1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 /* 11 * Quota change tags are associated with each transaction that allocates or 12 * deallocates space. Those changes are accumulated locally to each node (in a 13 * per-node file) and then are periodically synced to the quota file. This 14 * avoids the bottleneck of constantly touching the quota file, but introduces 15 * fuzziness in the current usage value of IDs that are being used on different 16 * nodes in the cluster simultaneously. So, it is possible for a user on 17 * multiple nodes to overrun their quota, but that overrun is controlable. 18 * Since quota tags are part of transactions, there is no need for a quota check 19 * program to be run on node crashes or anything like that. 20 * 21 * There are couple of knobs that let the administrator manage the quota 22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be 23 * sitting on one node before being synced to the quota file. (The default is 24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency 25 * of quota file syncs increases as the user moves closer to their limit. The 26 * more frequent the syncs, the more accurate the quota enforcement, but that 27 * means that there is more contention between the nodes for the quota file. 28 * The default value is one. This sets the maximum theoretical quota overrun 29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In 30 * practice, the maximum overrun you see should be much less.) A "quota_scale" 31 * number greater than one makes quota syncs more frequent and reduces the 32 * maximum overrun. Numbers less than one (but greater than zero) make quota 33 * syncs less frequent. 34 * 35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of 36 * the quota file, so it is not being constantly read. 37 */ 38 39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40 41 #include <linux/sched.h> 42 #include <linux/slab.h> 43 #include <linux/mm.h> 44 #include <linux/spinlock.h> 45 #include <linux/completion.h> 46 #include <linux/buffer_head.h> 47 #include <linux/sort.h> 48 #include <linux/fs.h> 49 #include <linux/bio.h> 50 #include <linux/gfs2_ondisk.h> 51 #include <linux/kthread.h> 52 #include <linux/freezer.h> 53 #include <linux/quota.h> 54 #include <linux/dqblk_xfs.h> 55 #include <linux/lockref.h> 56 #include <linux/list_lru.h> 57 #include <linux/rcupdate.h> 58 #include <linux/rculist_bl.h> 59 #include <linux/bit_spinlock.h> 60 #include <linux/jhash.h> 61 #include <linux/vmalloc.h> 62 63 #include "gfs2.h" 64 #include "incore.h" 65 #include "bmap.h" 66 #include "glock.h" 67 #include "glops.h" 68 #include "log.h" 69 #include "meta_io.h" 70 #include "quota.h" 71 #include "rgrp.h" 72 #include "super.h" 73 #include "trans.h" 74 #include "inode.h" 75 #include "util.h" 76 77 #define GFS2_QD_HASH_SHIFT 12 78 #define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT) 79 #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1) 80 81 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */ 82 /* -> sd_bitmap_lock */ 83 static DEFINE_SPINLOCK(qd_lock); 84 struct list_lru gfs2_qd_lru; 85 86 static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE]; 87 88 static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp, 89 const struct kqid qid) 90 { 91 unsigned int h; 92 93 h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0); 94 h = jhash(&qid, sizeof(struct kqid), h); 95 96 return h & GFS2_QD_HASH_MASK; 97 } 98 99 static inline void spin_lock_bucket(unsigned int hash) 100 { 101 hlist_bl_lock(&qd_hash_table[hash]); 102 } 103 104 static inline void spin_unlock_bucket(unsigned int hash) 105 { 106 hlist_bl_unlock(&qd_hash_table[hash]); 107 } 108 109 static void gfs2_qd_dealloc(struct rcu_head *rcu) 110 { 111 struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu); 112 kmem_cache_free(gfs2_quotad_cachep, qd); 113 } 114 115 static void gfs2_qd_dispose(struct list_head *list) 116 { 117 struct gfs2_quota_data *qd; 118 struct gfs2_sbd *sdp; 119 120 while (!list_empty(list)) { 121 qd = list_entry(list->next, struct gfs2_quota_data, qd_lru); 122 sdp = qd->qd_gl->gl_name.ln_sbd; 123 124 list_del(&qd->qd_lru); 125 126 /* Free from the filesystem-specific list */ 127 spin_lock(&qd_lock); 128 list_del(&qd->qd_list); 129 spin_unlock(&qd_lock); 130 131 spin_lock_bucket(qd->qd_hash); 132 hlist_bl_del_rcu(&qd->qd_hlist); 133 spin_unlock_bucket(qd->qd_hash); 134 135 gfs2_assert_warn(sdp, !qd->qd_change); 136 gfs2_assert_warn(sdp, !qd->qd_slot_count); 137 gfs2_assert_warn(sdp, !qd->qd_bh_count); 138 139 gfs2_glock_put(qd->qd_gl); 140 atomic_dec(&sdp->sd_quota_count); 141 142 /* Delete it from the common reclaim list */ 143 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); 144 } 145 } 146 147 148 static enum lru_status gfs2_qd_isolate(struct list_head *item, 149 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 150 { 151 struct list_head *dispose = arg; 152 struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru); 153 154 if (!spin_trylock(&qd->qd_lockref.lock)) 155 return LRU_SKIP; 156 157 if (qd->qd_lockref.count == 0) { 158 lockref_mark_dead(&qd->qd_lockref); 159 list_lru_isolate_move(lru, &qd->qd_lru, dispose); 160 } 161 162 spin_unlock(&qd->qd_lockref.lock); 163 return LRU_REMOVED; 164 } 165 166 static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, 167 struct shrink_control *sc) 168 { 169 LIST_HEAD(dispose); 170 unsigned long freed; 171 172 if (!(sc->gfp_mask & __GFP_FS)) 173 return SHRINK_STOP; 174 175 freed = list_lru_shrink_walk(&gfs2_qd_lru, sc, 176 gfs2_qd_isolate, &dispose); 177 178 gfs2_qd_dispose(&dispose); 179 180 return freed; 181 } 182 183 static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink, 184 struct shrink_control *sc) 185 { 186 return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc)); 187 } 188 189 struct shrinker gfs2_qd_shrinker = { 190 .count_objects = gfs2_qd_shrink_count, 191 .scan_objects = gfs2_qd_shrink_scan, 192 .seeks = DEFAULT_SEEKS, 193 .flags = SHRINKER_NUMA_AWARE, 194 }; 195 196 197 static u64 qd2index(struct gfs2_quota_data *qd) 198 { 199 struct kqid qid = qd->qd_id; 200 return (2 * (u64)from_kqid(&init_user_ns, qid)) + 201 ((qid.type == USRQUOTA) ? 0 : 1); 202 } 203 204 static u64 qd2offset(struct gfs2_quota_data *qd) 205 { 206 u64 offset; 207 208 offset = qd2index(qd); 209 offset *= sizeof(struct gfs2_quota); 210 211 return offset; 212 } 213 214 static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid) 215 { 216 struct gfs2_quota_data *qd; 217 int error; 218 219 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS); 220 if (!qd) 221 return NULL; 222 223 qd->qd_sbd = sdp; 224 qd->qd_lockref.count = 1; 225 spin_lock_init(&qd->qd_lockref.lock); 226 qd->qd_id = qid; 227 qd->qd_slot = -1; 228 INIT_LIST_HEAD(&qd->qd_lru); 229 qd->qd_hash = hash; 230 231 error = gfs2_glock_get(sdp, qd2index(qd), 232 &gfs2_quota_glops, CREATE, &qd->qd_gl); 233 if (error) 234 goto fail; 235 236 return qd; 237 238 fail: 239 kmem_cache_free(gfs2_quotad_cachep, qd); 240 return NULL; 241 } 242 243 static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash, 244 const struct gfs2_sbd *sdp, 245 struct kqid qid) 246 { 247 struct gfs2_quota_data *qd; 248 struct hlist_bl_node *h; 249 250 hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) { 251 if (!qid_eq(qd->qd_id, qid)) 252 continue; 253 if (qd->qd_sbd != sdp) 254 continue; 255 if (lockref_get_not_dead(&qd->qd_lockref)) { 256 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); 257 return qd; 258 } 259 } 260 261 return NULL; 262 } 263 264 265 static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, 266 struct gfs2_quota_data **qdp) 267 { 268 struct gfs2_quota_data *qd, *new_qd; 269 unsigned int hash = gfs2_qd_hash(sdp, qid); 270 271 rcu_read_lock(); 272 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); 273 rcu_read_unlock(); 274 275 if (qd) 276 return 0; 277 278 new_qd = qd_alloc(hash, sdp, qid); 279 if (!new_qd) 280 return -ENOMEM; 281 282 spin_lock(&qd_lock); 283 spin_lock_bucket(hash); 284 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); 285 if (qd == NULL) { 286 *qdp = new_qd; 287 list_add(&new_qd->qd_list, &sdp->sd_quota_list); 288 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]); 289 atomic_inc(&sdp->sd_quota_count); 290 } 291 spin_unlock_bucket(hash); 292 spin_unlock(&qd_lock); 293 294 if (qd) { 295 gfs2_glock_put(new_qd->qd_gl); 296 kmem_cache_free(gfs2_quotad_cachep, new_qd); 297 } 298 299 return 0; 300 } 301 302 303 static void qd_hold(struct gfs2_quota_data *qd) 304 { 305 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 306 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); 307 lockref_get(&qd->qd_lockref); 308 } 309 310 static void qd_put(struct gfs2_quota_data *qd) 311 { 312 if (lockref_put_or_lock(&qd->qd_lockref)) 313 return; 314 315 qd->qd_lockref.count = 0; 316 list_lru_add(&gfs2_qd_lru, &qd->qd_lru); 317 spin_unlock(&qd->qd_lockref.lock); 318 319 } 320 321 static int slot_get(struct gfs2_quota_data *qd) 322 { 323 struct gfs2_sbd *sdp = qd->qd_sbd; 324 unsigned int bit; 325 int error = 0; 326 327 spin_lock(&sdp->sd_bitmap_lock); 328 if (qd->qd_slot_count != 0) 329 goto out; 330 331 error = -ENOSPC; 332 bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots); 333 if (bit < sdp->sd_quota_slots) { 334 set_bit(bit, sdp->sd_quota_bitmap); 335 qd->qd_slot = bit; 336 error = 0; 337 out: 338 qd->qd_slot_count++; 339 } 340 spin_unlock(&sdp->sd_bitmap_lock); 341 342 return error; 343 } 344 345 static void slot_hold(struct gfs2_quota_data *qd) 346 { 347 struct gfs2_sbd *sdp = qd->qd_sbd; 348 349 spin_lock(&sdp->sd_bitmap_lock); 350 gfs2_assert(sdp, qd->qd_slot_count); 351 qd->qd_slot_count++; 352 spin_unlock(&sdp->sd_bitmap_lock); 353 } 354 355 static void slot_put(struct gfs2_quota_data *qd) 356 { 357 struct gfs2_sbd *sdp = qd->qd_sbd; 358 359 spin_lock(&sdp->sd_bitmap_lock); 360 gfs2_assert(sdp, qd->qd_slot_count); 361 if (!--qd->qd_slot_count) { 362 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap)); 363 qd->qd_slot = -1; 364 } 365 spin_unlock(&sdp->sd_bitmap_lock); 366 } 367 368 static int bh_get(struct gfs2_quota_data *qd) 369 { 370 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 371 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 372 unsigned int block, offset; 373 struct buffer_head *bh; 374 int error; 375 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; 376 377 mutex_lock(&sdp->sd_quota_mutex); 378 379 if (qd->qd_bh_count++) { 380 mutex_unlock(&sdp->sd_quota_mutex); 381 return 0; 382 } 383 384 block = qd->qd_slot / sdp->sd_qc_per_block; 385 offset = qd->qd_slot % sdp->sd_qc_per_block; 386 387 bh_map.b_size = 1 << ip->i_inode.i_blkbits; 388 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); 389 if (error) 390 goto fail; 391 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh); 392 if (error) 393 goto fail; 394 error = -EIO; 395 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) 396 goto fail_brelse; 397 398 qd->qd_bh = bh; 399 qd->qd_bh_qc = (struct gfs2_quota_change *) 400 (bh->b_data + sizeof(struct gfs2_meta_header) + 401 offset * sizeof(struct gfs2_quota_change)); 402 403 mutex_unlock(&sdp->sd_quota_mutex); 404 405 return 0; 406 407 fail_brelse: 408 brelse(bh); 409 fail: 410 qd->qd_bh_count--; 411 mutex_unlock(&sdp->sd_quota_mutex); 412 return error; 413 } 414 415 static void bh_put(struct gfs2_quota_data *qd) 416 { 417 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 418 419 mutex_lock(&sdp->sd_quota_mutex); 420 gfs2_assert(sdp, qd->qd_bh_count); 421 if (!--qd->qd_bh_count) { 422 brelse(qd->qd_bh); 423 qd->qd_bh = NULL; 424 qd->qd_bh_qc = NULL; 425 } 426 mutex_unlock(&sdp->sd_quota_mutex); 427 } 428 429 static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd, 430 u64 *sync_gen) 431 { 432 if (test_bit(QDF_LOCKED, &qd->qd_flags) || 433 !test_bit(QDF_CHANGE, &qd->qd_flags) || 434 (sync_gen && (qd->qd_sync_gen >= *sync_gen))) 435 return 0; 436 437 if (!lockref_get_not_dead(&qd->qd_lockref)) 438 return 0; 439 440 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); 441 set_bit(QDF_LOCKED, &qd->qd_flags); 442 qd->qd_change_sync = qd->qd_change; 443 slot_hold(qd); 444 return 1; 445 } 446 447 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) 448 { 449 struct gfs2_quota_data *qd = NULL; 450 int error; 451 int found = 0; 452 453 *qdp = NULL; 454 455 if (sdp->sd_vfs->s_flags & MS_RDONLY) 456 return 0; 457 458 spin_lock(&qd_lock); 459 460 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 461 found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen); 462 if (found) 463 break; 464 } 465 466 if (!found) 467 qd = NULL; 468 469 spin_unlock(&qd_lock); 470 471 if (qd) { 472 gfs2_assert_warn(sdp, qd->qd_change_sync); 473 error = bh_get(qd); 474 if (error) { 475 clear_bit(QDF_LOCKED, &qd->qd_flags); 476 slot_put(qd); 477 qd_put(qd); 478 return error; 479 } 480 } 481 482 *qdp = qd; 483 484 return 0; 485 } 486 487 static void qd_unlock(struct gfs2_quota_data *qd) 488 { 489 gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd, 490 test_bit(QDF_LOCKED, &qd->qd_flags)); 491 clear_bit(QDF_LOCKED, &qd->qd_flags); 492 bh_put(qd); 493 slot_put(qd); 494 qd_put(qd); 495 } 496 497 static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid, 498 struct gfs2_quota_data **qdp) 499 { 500 int error; 501 502 error = qd_get(sdp, qid, qdp); 503 if (error) 504 return error; 505 506 error = slot_get(*qdp); 507 if (error) 508 goto fail; 509 510 error = bh_get(*qdp); 511 if (error) 512 goto fail_slot; 513 514 return 0; 515 516 fail_slot: 517 slot_put(*qdp); 518 fail: 519 qd_put(*qdp); 520 return error; 521 } 522 523 static void qdsb_put(struct gfs2_quota_data *qd) 524 { 525 bh_put(qd); 526 slot_put(qd); 527 qd_put(qd); 528 } 529 530 /** 531 * gfs2_qa_alloc - make sure we have a quota allocations data structure, 532 * if necessary 533 * @ip: the inode for this reservation 534 */ 535 int gfs2_qa_alloc(struct gfs2_inode *ip) 536 { 537 int error = 0; 538 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 539 540 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 541 return 0; 542 543 down_write(&ip->i_rw_mutex); 544 if (ip->i_qadata == NULL) { 545 ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS); 546 if (!ip->i_qadata) 547 error = -ENOMEM; 548 } 549 up_write(&ip->i_rw_mutex); 550 return error; 551 } 552 553 void gfs2_qa_delete(struct gfs2_inode *ip, atomic_t *wcount) 554 { 555 down_write(&ip->i_rw_mutex); 556 if (ip->i_qadata && ((wcount == NULL) || (atomic_read(wcount) <= 1))) { 557 kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata); 558 ip->i_qadata = NULL; 559 } 560 up_write(&ip->i_rw_mutex); 561 } 562 563 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) 564 { 565 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 566 struct gfs2_quota_data **qd; 567 int error; 568 569 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 570 return 0; 571 572 if (ip->i_qadata == NULL) { 573 error = gfs2_rsqa_alloc(ip); 574 if (error) 575 return error; 576 } 577 578 qd = ip->i_qadata->qa_qd; 579 580 if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) || 581 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) 582 return -EIO; 583 584 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); 585 if (error) 586 goto out; 587 ip->i_qadata->qa_qd_num++; 588 qd++; 589 590 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); 591 if (error) 592 goto out; 593 ip->i_qadata->qa_qd_num++; 594 qd++; 595 596 if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) && 597 !uid_eq(uid, ip->i_inode.i_uid)) { 598 error = qdsb_get(sdp, make_kqid_uid(uid), qd); 599 if (error) 600 goto out; 601 ip->i_qadata->qa_qd_num++; 602 qd++; 603 } 604 605 if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) && 606 !gid_eq(gid, ip->i_inode.i_gid)) { 607 error = qdsb_get(sdp, make_kqid_gid(gid), qd); 608 if (error) 609 goto out; 610 ip->i_qadata->qa_qd_num++; 611 qd++; 612 } 613 614 out: 615 if (error) 616 gfs2_quota_unhold(ip); 617 return error; 618 } 619 620 void gfs2_quota_unhold(struct gfs2_inode *ip) 621 { 622 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 623 u32 x; 624 625 if (ip->i_qadata == NULL) 626 return; 627 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); 628 629 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { 630 qdsb_put(ip->i_qadata->qa_qd[x]); 631 ip->i_qadata->qa_qd[x] = NULL; 632 } 633 ip->i_qadata->qa_qd_num = 0; 634 } 635 636 static int sort_qd(const void *a, const void *b) 637 { 638 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a; 639 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b; 640 641 if (qid_lt(qd_a->qd_id, qd_b->qd_id)) 642 return -1; 643 if (qid_lt(qd_b->qd_id, qd_a->qd_id)) 644 return 1; 645 return 0; 646 } 647 648 static void do_qc(struct gfs2_quota_data *qd, s64 change) 649 { 650 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 651 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 652 struct gfs2_quota_change *qc = qd->qd_bh_qc; 653 s64 x; 654 655 mutex_lock(&sdp->sd_quota_mutex); 656 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); 657 658 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { 659 qc->qc_change = 0; 660 qc->qc_flags = 0; 661 if (qd->qd_id.type == USRQUOTA) 662 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); 663 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id)); 664 } 665 666 x = be64_to_cpu(qc->qc_change) + change; 667 qc->qc_change = cpu_to_be64(x); 668 669 spin_lock(&qd_lock); 670 qd->qd_change = x; 671 spin_unlock(&qd_lock); 672 673 if (!x) { 674 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); 675 clear_bit(QDF_CHANGE, &qd->qd_flags); 676 qc->qc_flags = 0; 677 qc->qc_id = 0; 678 slot_put(qd); 679 qd_put(qd); 680 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { 681 qd_hold(qd); 682 slot_hold(qd); 683 } 684 685 if (change < 0) /* Reset quiet flag if we freed some blocks */ 686 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); 687 mutex_unlock(&sdp->sd_quota_mutex); 688 } 689 690 static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, 691 unsigned off, void *buf, unsigned bytes) 692 { 693 struct inode *inode = &ip->i_inode; 694 struct gfs2_sbd *sdp = GFS2_SB(inode); 695 struct address_space *mapping = inode->i_mapping; 696 struct page *page; 697 struct buffer_head *bh; 698 void *kaddr; 699 u64 blk; 700 unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0; 701 unsigned to_write = bytes, pg_off = off; 702 int done = 0; 703 704 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift); 705 boff = off % bsize; 706 707 page = find_or_create_page(mapping, index, GFP_NOFS); 708 if (!page) 709 return -ENOMEM; 710 if (!page_has_buffers(page)) 711 create_empty_buffers(page, bsize, 0); 712 713 bh = page_buffers(page); 714 while (!done) { 715 /* Find the beginning block within the page */ 716 if (pg_off >= ((bnum * bsize) + bsize)) { 717 bh = bh->b_this_page; 718 bnum++; 719 blk++; 720 continue; 721 } 722 if (!buffer_mapped(bh)) { 723 gfs2_block_map(inode, blk, bh, 1); 724 if (!buffer_mapped(bh)) 725 goto unlock_out; 726 /* If it's a newly allocated disk block, zero it */ 727 if (buffer_new(bh)) 728 zero_user(page, bnum * bsize, bh->b_size); 729 } 730 if (PageUptodate(page)) 731 set_buffer_uptodate(bh); 732 if (!buffer_uptodate(bh)) { 733 ll_rw_block(REQ_OP_READ, REQ_META, 1, &bh); 734 wait_on_buffer(bh); 735 if (!buffer_uptodate(bh)) 736 goto unlock_out; 737 } 738 gfs2_trans_add_data(ip->i_gl, bh); 739 740 /* If we need to write to the next block as well */ 741 if (to_write > (bsize - boff)) { 742 pg_off += (bsize - boff); 743 to_write -= (bsize - boff); 744 boff = pg_off % bsize; 745 continue; 746 } 747 done = 1; 748 } 749 750 /* Write to the page, now that we have setup the buffer(s) */ 751 kaddr = kmap_atomic(page); 752 memcpy(kaddr + off, buf, bytes); 753 flush_dcache_page(page); 754 kunmap_atomic(kaddr); 755 unlock_page(page); 756 put_page(page); 757 758 return 0; 759 760 unlock_out: 761 unlock_page(page); 762 put_page(page); 763 return -EIO; 764 } 765 766 static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp, 767 loff_t loc) 768 { 769 unsigned long pg_beg; 770 unsigned pg_off, nbytes, overflow = 0; 771 int pg_oflow = 0, error; 772 void *ptr; 773 774 nbytes = sizeof(struct gfs2_quota); 775 776 pg_beg = loc >> PAGE_SHIFT; 777 pg_off = loc % PAGE_SIZE; 778 779 /* If the quota straddles a page boundary, split the write in two */ 780 if ((pg_off + nbytes) > PAGE_SIZE) { 781 pg_oflow = 1; 782 overflow = (pg_off + nbytes) - PAGE_SIZE; 783 } 784 785 ptr = qp; 786 error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr, 787 nbytes - overflow); 788 /* If there's an overflow, write the remaining bytes to the next page */ 789 if (!error && pg_oflow) 790 error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0, 791 ptr + nbytes - overflow, 792 overflow); 793 return error; 794 } 795 796 /** 797 * gfs2_adjust_quota - adjust record of current block usage 798 * @ip: The quota inode 799 * @loc: Offset of the entry in the quota file 800 * @change: The amount of usage change to record 801 * @qd: The quota data 802 * @fdq: The updated limits to record 803 * 804 * This function was mostly borrowed from gfs2_block_truncate_page which was 805 * in turn mostly borrowed from ext3 806 * 807 * Returns: 0 or -ve on error 808 */ 809 810 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 811 s64 change, struct gfs2_quota_data *qd, 812 struct qc_dqblk *fdq) 813 { 814 struct inode *inode = &ip->i_inode; 815 struct gfs2_sbd *sdp = GFS2_SB(inode); 816 struct gfs2_quota q; 817 int err; 818 u64 size; 819 820 if (gfs2_is_stuffed(ip)) { 821 err = gfs2_unstuff_dinode(ip, NULL); 822 if (err) 823 return err; 824 } 825 826 memset(&q, 0, sizeof(struct gfs2_quota)); 827 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); 828 if (err < 0) 829 return err; 830 831 loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */ 832 err = -EIO; 833 be64_add_cpu(&q.qu_value, change); 834 if (((s64)be64_to_cpu(q.qu_value)) < 0) 835 q.qu_value = 0; /* Never go negative on quota usage */ 836 qd->qd_qb.qb_value = q.qu_value; 837 if (fdq) { 838 if (fdq->d_fieldmask & QC_SPC_SOFT) { 839 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); 840 qd->qd_qb.qb_warn = q.qu_warn; 841 } 842 if (fdq->d_fieldmask & QC_SPC_HARD) { 843 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); 844 qd->qd_qb.qb_limit = q.qu_limit; 845 } 846 if (fdq->d_fieldmask & QC_SPACE) { 847 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); 848 qd->qd_qb.qb_value = q.qu_value; 849 } 850 } 851 852 err = gfs2_write_disk_quota(ip, &q, loc); 853 if (!err) { 854 size = loc + sizeof(struct gfs2_quota); 855 if (size > inode->i_size) 856 i_size_write(inode, size); 857 inode->i_mtime = inode->i_atime = CURRENT_TIME; 858 mark_inode_dirty(inode); 859 set_bit(QDF_REFRESH, &qd->qd_flags); 860 } 861 862 return err; 863 } 864 865 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) 866 { 867 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd; 868 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 869 struct gfs2_alloc_parms ap = { .aflags = 0, }; 870 unsigned int data_blocks, ind_blocks; 871 struct gfs2_holder *ghs, i_gh; 872 unsigned int qx, x; 873 struct gfs2_quota_data *qd; 874 unsigned reserved; 875 loff_t offset; 876 unsigned int nalloc = 0, blocks; 877 int error; 878 879 error = gfs2_rsqa_alloc(ip); 880 if (error) 881 return error; 882 883 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 884 &data_blocks, &ind_blocks); 885 886 ghs = kmalloc(num_qd * sizeof(struct gfs2_holder), GFP_NOFS); 887 if (!ghs) 888 return -ENOMEM; 889 890 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 891 inode_lock(&ip->i_inode); 892 for (qx = 0; qx < num_qd; qx++) { 893 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, 894 GL_NOCACHE, &ghs[qx]); 895 if (error) 896 goto out; 897 } 898 899 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 900 if (error) 901 goto out; 902 903 for (x = 0; x < num_qd; x++) { 904 offset = qd2offset(qda[x]); 905 if (gfs2_write_alloc_required(ip, offset, 906 sizeof(struct gfs2_quota))) 907 nalloc++; 908 } 909 910 /* 911 * 1 blk for unstuffing inode if stuffed. We add this extra 912 * block to the reservation unconditionally. If the inode 913 * doesn't need unstuffing, the block will be released to the 914 * rgrp since it won't be allocated during the transaction 915 */ 916 /* +3 in the end for unstuffing block, inode size update block 917 * and another block in case quota straddles page boundary and 918 * two blocks need to be updated instead of 1 */ 919 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; 920 921 reserved = 1 + (nalloc * (data_blocks + ind_blocks)); 922 ap.target = reserved; 923 error = gfs2_inplace_reserve(ip, &ap); 924 if (error) 925 goto out_alloc; 926 927 if (nalloc) 928 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; 929 930 error = gfs2_trans_begin(sdp, blocks, 0); 931 if (error) 932 goto out_ipres; 933 934 for (x = 0; x < num_qd; x++) { 935 qd = qda[x]; 936 offset = qd2offset(qd); 937 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); 938 if (error) 939 goto out_end_trans; 940 941 do_qc(qd, -qd->qd_change_sync); 942 set_bit(QDF_REFRESH, &qd->qd_flags); 943 } 944 945 error = 0; 946 947 out_end_trans: 948 gfs2_trans_end(sdp); 949 out_ipres: 950 gfs2_inplace_release(ip); 951 out_alloc: 952 gfs2_glock_dq_uninit(&i_gh); 953 out: 954 while (qx--) 955 gfs2_glock_dq_uninit(&ghs[qx]); 956 inode_unlock(&ip->i_inode); 957 kfree(ghs); 958 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, NORMAL_FLUSH); 959 return error; 960 } 961 962 static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) 963 { 964 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 965 struct gfs2_quota q; 966 struct gfs2_quota_lvb *qlvb; 967 loff_t pos; 968 int error; 969 970 memset(&q, 0, sizeof(struct gfs2_quota)); 971 pos = qd2offset(qd); 972 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); 973 if (error < 0) 974 return error; 975 976 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 977 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); 978 qlvb->__pad = 0; 979 qlvb->qb_limit = q.qu_limit; 980 qlvb->qb_warn = q.qu_warn; 981 qlvb->qb_value = q.qu_value; 982 qd->qd_qb = *qlvb; 983 984 return 0; 985 } 986 987 static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 988 struct gfs2_holder *q_gh) 989 { 990 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 991 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 992 struct gfs2_holder i_gh; 993 int error; 994 995 restart: 996 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 997 if (error) 998 return error; 999 1000 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) 1001 force_refresh = FORCE; 1002 1003 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 1004 1005 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { 1006 gfs2_glock_dq_uninit(q_gh); 1007 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 1008 GL_NOCACHE, q_gh); 1009 if (error) 1010 return error; 1011 1012 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); 1013 if (error) 1014 goto fail; 1015 1016 error = update_qd(sdp, qd); 1017 if (error) 1018 goto fail_gunlock; 1019 1020 gfs2_glock_dq_uninit(&i_gh); 1021 gfs2_glock_dq_uninit(q_gh); 1022 force_refresh = 0; 1023 goto restart; 1024 } 1025 1026 return 0; 1027 1028 fail_gunlock: 1029 gfs2_glock_dq_uninit(&i_gh); 1030 fail: 1031 gfs2_glock_dq_uninit(q_gh); 1032 return error; 1033 } 1034 1035 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) 1036 { 1037 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1038 struct gfs2_quota_data *qd; 1039 u32 x; 1040 int error = 0; 1041 1042 if (capable(CAP_SYS_RESOURCE) || 1043 sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 1044 return 0; 1045 1046 error = gfs2_quota_hold(ip, uid, gid); 1047 if (error) 1048 return error; 1049 1050 sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num, 1051 sizeof(struct gfs2_quota_data *), sort_qd, NULL); 1052 1053 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { 1054 qd = ip->i_qadata->qa_qd[x]; 1055 error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]); 1056 if (error) 1057 break; 1058 } 1059 1060 if (!error) 1061 set_bit(GIF_QD_LOCKED, &ip->i_flags); 1062 else { 1063 while (x--) 1064 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); 1065 gfs2_quota_unhold(ip); 1066 } 1067 1068 return error; 1069 } 1070 1071 static int need_sync(struct gfs2_quota_data *qd) 1072 { 1073 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1074 struct gfs2_tune *gt = &sdp->sd_tune; 1075 s64 value; 1076 unsigned int num, den; 1077 int do_sync = 1; 1078 1079 if (!qd->qd_qb.qb_limit) 1080 return 0; 1081 1082 spin_lock(&qd_lock); 1083 value = qd->qd_change; 1084 spin_unlock(&qd_lock); 1085 1086 spin_lock(>->gt_spin); 1087 num = gt->gt_quota_scale_num; 1088 den = gt->gt_quota_scale_den; 1089 spin_unlock(>->gt_spin); 1090 1091 if (value < 0) 1092 do_sync = 0; 1093 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= 1094 (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 1095 do_sync = 0; 1096 else { 1097 value *= gfs2_jindex_size(sdp) * num; 1098 value = div_s64(value, den); 1099 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); 1100 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) 1101 do_sync = 0; 1102 } 1103 1104 return do_sync; 1105 } 1106 1107 void gfs2_quota_unlock(struct gfs2_inode *ip) 1108 { 1109 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1110 struct gfs2_quota_data *qda[4]; 1111 unsigned int count = 0; 1112 u32 x; 1113 int found; 1114 1115 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) 1116 goto out; 1117 1118 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { 1119 struct gfs2_quota_data *qd; 1120 int sync; 1121 1122 qd = ip->i_qadata->qa_qd[x]; 1123 sync = need_sync(qd); 1124 1125 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); 1126 if (!sync) 1127 continue; 1128 1129 spin_lock(&qd_lock); 1130 found = qd_check_sync(sdp, qd, NULL); 1131 spin_unlock(&qd_lock); 1132 1133 if (!found) 1134 continue; 1135 1136 gfs2_assert_warn(sdp, qd->qd_change_sync); 1137 if (bh_get(qd)) { 1138 clear_bit(QDF_LOCKED, &qd->qd_flags); 1139 slot_put(qd); 1140 qd_put(qd); 1141 continue; 1142 } 1143 1144 qda[count++] = qd; 1145 } 1146 1147 if (count) { 1148 do_sync(count, qda); 1149 for (x = 0; x < count; x++) 1150 qd_unlock(qda[x]); 1151 } 1152 1153 out: 1154 gfs2_quota_unhold(ip); 1155 } 1156 1157 #define MAX_LINE 256 1158 1159 static int print_message(struct gfs2_quota_data *qd, char *type) 1160 { 1161 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; 1162 1163 fs_info(sdp, "quota %s for %s %u\n", 1164 type, 1165 (qd->qd_id.type == USRQUOTA) ? "user" : "group", 1166 from_kqid(&init_user_ns, qd->qd_id)); 1167 1168 return 0; 1169 } 1170 1171 /** 1172 * gfs2_quota_check - check if allocating new blocks will exceed quota 1173 * @ip: The inode for which this check is being performed 1174 * @uid: The uid to check against 1175 * @gid: The gid to check against 1176 * @ap: The allocation parameters. ap->target contains the requested 1177 * blocks. ap->min_target, if set, contains the minimum blks 1178 * requested. 1179 * 1180 * Returns: 0 on success. 1181 * min_req = ap->min_target ? ap->min_target : ap->target; 1182 * quota must allow atleast min_req blks for success and 1183 * ap->allowed is set to the number of blocks allowed 1184 * 1185 * -EDQUOT otherwise, quota violation. ap->allowed is set to number 1186 * of blocks available. 1187 */ 1188 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, 1189 struct gfs2_alloc_parms *ap) 1190 { 1191 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1192 struct gfs2_quota_data *qd; 1193 s64 value, warn, limit; 1194 u32 x; 1195 int error = 0; 1196 1197 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ 1198 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) 1199 return 0; 1200 1201 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 1202 return 0; 1203 1204 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { 1205 qd = ip->i_qadata->qa_qd[x]; 1206 1207 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) || 1208 qid_eq(qd->qd_id, make_kqid_gid(gid)))) 1209 continue; 1210 1211 warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn); 1212 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); 1213 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); 1214 spin_lock(&qd_lock); 1215 value += qd->qd_change; 1216 spin_unlock(&qd_lock); 1217 1218 if (limit > 0 && (limit - value) < ap->allowed) 1219 ap->allowed = limit - value; 1220 /* If we can't meet the target */ 1221 if (limit && limit < (value + (s64)ap->target)) { 1222 /* If no min_target specified or we don't meet 1223 * min_target, return -EDQUOT */ 1224 if (!ap->min_target || ap->min_target > ap->allowed) { 1225 if (!test_and_set_bit(QDF_QMSG_QUIET, 1226 &qd->qd_flags)) { 1227 print_message(qd, "exceeded"); 1228 quota_send_warning(qd->qd_id, 1229 sdp->sd_vfs->s_dev, 1230 QUOTA_NL_BHARDWARN); 1231 } 1232 error = -EDQUOT; 1233 break; 1234 } 1235 } else if (warn && warn < value && 1236 time_after_eq(jiffies, qd->qd_last_warn + 1237 gfs2_tune_get(sdp, gt_quota_warn_period) 1238 * HZ)) { 1239 quota_send_warning(qd->qd_id, 1240 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); 1241 error = print_message(qd, "warning"); 1242 qd->qd_last_warn = jiffies; 1243 } 1244 } 1245 return error; 1246 } 1247 1248 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 1249 kuid_t uid, kgid_t gid) 1250 { 1251 struct gfs2_quota_data *qd; 1252 u32 x; 1253 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1254 1255 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON || 1256 gfs2_assert_warn(sdp, change)) 1257 return; 1258 if (ip->i_diskflags & GFS2_DIF_SYSTEM) 1259 return; 1260 1261 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { 1262 qd = ip->i_qadata->qa_qd[x]; 1263 1264 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) || 1265 qid_eq(qd->qd_id, make_kqid_gid(gid))) { 1266 do_qc(qd, change); 1267 } 1268 } 1269 } 1270 1271 int gfs2_quota_sync(struct super_block *sb, int type) 1272 { 1273 struct gfs2_sbd *sdp = sb->s_fs_info; 1274 struct gfs2_quota_data **qda; 1275 unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder); 1276 unsigned int num_qd; 1277 unsigned int x; 1278 int error = 0; 1279 1280 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); 1281 if (!qda) 1282 return -ENOMEM; 1283 1284 mutex_lock(&sdp->sd_quota_sync_mutex); 1285 sdp->sd_quota_sync_gen++; 1286 1287 do { 1288 num_qd = 0; 1289 1290 for (;;) { 1291 error = qd_fish(sdp, qda + num_qd); 1292 if (error || !qda[num_qd]) 1293 break; 1294 if (++num_qd == max_qd) 1295 break; 1296 } 1297 1298 if (num_qd) { 1299 if (!error) 1300 error = do_sync(num_qd, qda); 1301 if (!error) 1302 for (x = 0; x < num_qd; x++) 1303 qda[x]->qd_sync_gen = 1304 sdp->sd_quota_sync_gen; 1305 1306 for (x = 0; x < num_qd; x++) 1307 qd_unlock(qda[x]); 1308 } 1309 } while (!error && num_qd == max_qd); 1310 1311 mutex_unlock(&sdp->sd_quota_sync_mutex); 1312 kfree(qda); 1313 1314 return error; 1315 } 1316 1317 int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid) 1318 { 1319 struct gfs2_quota_data *qd; 1320 struct gfs2_holder q_gh; 1321 int error; 1322 1323 error = qd_get(sdp, qid, &qd); 1324 if (error) 1325 return error; 1326 1327 error = do_glock(qd, FORCE, &q_gh); 1328 if (!error) 1329 gfs2_glock_dq_uninit(&q_gh); 1330 1331 qd_put(qd); 1332 return error; 1333 } 1334 1335 int gfs2_quota_init(struct gfs2_sbd *sdp) 1336 { 1337 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 1338 u64 size = i_size_read(sdp->sd_qc_inode); 1339 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift; 1340 unsigned int x, slot = 0; 1341 unsigned int found = 0; 1342 unsigned int hash; 1343 unsigned int bm_size; 1344 u64 dblock; 1345 u32 extlen = 0; 1346 int error; 1347 1348 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20)) 1349 return -EIO; 1350 1351 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; 1352 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long)); 1353 bm_size *= sizeof(unsigned long); 1354 error = -ENOMEM; 1355 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN); 1356 if (sdp->sd_quota_bitmap == NULL) 1357 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS | 1358 __GFP_ZERO, PAGE_KERNEL); 1359 if (!sdp->sd_quota_bitmap) 1360 return error; 1361 1362 for (x = 0; x < blocks; x++) { 1363 struct buffer_head *bh; 1364 const struct gfs2_quota_change *qc; 1365 unsigned int y; 1366 1367 if (!extlen) { 1368 int new = 0; 1369 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen); 1370 if (error) 1371 goto fail; 1372 } 1373 error = -EIO; 1374 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); 1375 if (!bh) 1376 goto fail; 1377 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) { 1378 brelse(bh); 1379 goto fail; 1380 } 1381 1382 qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); 1383 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; 1384 y++, slot++) { 1385 struct gfs2_quota_data *qd; 1386 s64 qc_change = be64_to_cpu(qc->qc_change); 1387 u32 qc_flags = be32_to_cpu(qc->qc_flags); 1388 enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ? 1389 USRQUOTA : GRPQUOTA; 1390 struct kqid qc_id = make_kqid(&init_user_ns, qtype, 1391 be32_to_cpu(qc->qc_id)); 1392 qc++; 1393 if (!qc_change) 1394 continue; 1395 1396 hash = gfs2_qd_hash(sdp, qc_id); 1397 qd = qd_alloc(hash, sdp, qc_id); 1398 if (qd == NULL) { 1399 brelse(bh); 1400 goto fail; 1401 } 1402 1403 set_bit(QDF_CHANGE, &qd->qd_flags); 1404 qd->qd_change = qc_change; 1405 qd->qd_slot = slot; 1406 qd->qd_slot_count = 1; 1407 1408 spin_lock(&qd_lock); 1409 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap)); 1410 list_add(&qd->qd_list, &sdp->sd_quota_list); 1411 atomic_inc(&sdp->sd_quota_count); 1412 spin_unlock(&qd_lock); 1413 1414 spin_lock_bucket(hash); 1415 hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]); 1416 spin_unlock_bucket(hash); 1417 1418 found++; 1419 } 1420 1421 brelse(bh); 1422 dblock++; 1423 extlen--; 1424 } 1425 1426 if (found) 1427 fs_info(sdp, "found %u quota changes\n", found); 1428 1429 return 0; 1430 1431 fail: 1432 gfs2_quota_cleanup(sdp); 1433 return error; 1434 } 1435 1436 void gfs2_quota_cleanup(struct gfs2_sbd *sdp) 1437 { 1438 struct list_head *head = &sdp->sd_quota_list; 1439 struct gfs2_quota_data *qd; 1440 1441 spin_lock(&qd_lock); 1442 while (!list_empty(head)) { 1443 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); 1444 1445 list_del(&qd->qd_list); 1446 1447 /* Also remove if this qd exists in the reclaim list */ 1448 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); 1449 atomic_dec(&sdp->sd_quota_count); 1450 spin_unlock(&qd_lock); 1451 1452 spin_lock_bucket(qd->qd_hash); 1453 hlist_bl_del_rcu(&qd->qd_hlist); 1454 spin_unlock_bucket(qd->qd_hash); 1455 1456 gfs2_assert_warn(sdp, !qd->qd_change); 1457 gfs2_assert_warn(sdp, !qd->qd_slot_count); 1458 gfs2_assert_warn(sdp, !qd->qd_bh_count); 1459 1460 gfs2_glock_put(qd->qd_gl); 1461 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); 1462 1463 spin_lock(&qd_lock); 1464 } 1465 spin_unlock(&qd_lock); 1466 1467 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); 1468 1469 kvfree(sdp->sd_quota_bitmap); 1470 sdp->sd_quota_bitmap = NULL; 1471 } 1472 1473 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error) 1474 { 1475 if (error == 0 || error == -EROFS) 1476 return; 1477 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) 1478 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error); 1479 } 1480 1481 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, 1482 int (*fxn)(struct super_block *sb, int type), 1483 unsigned long t, unsigned long *timeo, 1484 unsigned int *new_timeo) 1485 { 1486 if (t >= *timeo) { 1487 int error = fxn(sdp->sd_vfs, 0); 1488 quotad_error(sdp, msg, error); 1489 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; 1490 } else { 1491 *timeo -= t; 1492 } 1493 } 1494 1495 static void quotad_check_trunc_list(struct gfs2_sbd *sdp) 1496 { 1497 struct gfs2_inode *ip; 1498 1499 while(1) { 1500 ip = NULL; 1501 spin_lock(&sdp->sd_trunc_lock); 1502 if (!list_empty(&sdp->sd_trunc_list)) { 1503 ip = list_entry(sdp->sd_trunc_list.next, 1504 struct gfs2_inode, i_trunc_list); 1505 list_del_init(&ip->i_trunc_list); 1506 } 1507 spin_unlock(&sdp->sd_trunc_lock); 1508 if (ip == NULL) 1509 return; 1510 gfs2_glock_finish_truncate(ip); 1511 } 1512 } 1513 1514 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) { 1515 if (!sdp->sd_statfs_force_sync) { 1516 sdp->sd_statfs_force_sync = 1; 1517 wake_up(&sdp->sd_quota_wait); 1518 } 1519 } 1520 1521 1522 /** 1523 * gfs2_quotad - Write cached quota changes into the quota file 1524 * @sdp: Pointer to GFS2 superblock 1525 * 1526 */ 1527 1528 int gfs2_quotad(void *data) 1529 { 1530 struct gfs2_sbd *sdp = data; 1531 struct gfs2_tune *tune = &sdp->sd_tune; 1532 unsigned long statfs_timeo = 0; 1533 unsigned long quotad_timeo = 0; 1534 unsigned long t = 0; 1535 DEFINE_WAIT(wait); 1536 int empty; 1537 1538 while (!kthread_should_stop()) { 1539 1540 /* Update the master statfs file */ 1541 if (sdp->sd_statfs_force_sync) { 1542 int error = gfs2_statfs_sync(sdp->sd_vfs, 0); 1543 quotad_error(sdp, "statfs", error); 1544 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; 1545 } 1546 else 1547 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, 1548 &statfs_timeo, 1549 &tune->gt_statfs_quantum); 1550 1551 /* Update quota file */ 1552 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, 1553 "ad_timeo, &tune->gt_quota_quantum); 1554 1555 /* Check for & recover partially truncated inodes */ 1556 quotad_check_trunc_list(sdp); 1557 1558 try_to_freeze(); 1559 1560 t = min(quotad_timeo, statfs_timeo); 1561 1562 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); 1563 spin_lock(&sdp->sd_trunc_lock); 1564 empty = list_empty(&sdp->sd_trunc_list); 1565 spin_unlock(&sdp->sd_trunc_lock); 1566 if (empty && !sdp->sd_statfs_force_sync) 1567 t -= schedule_timeout(t); 1568 else 1569 t = 0; 1570 finish_wait(&sdp->sd_quota_wait, &wait); 1571 } 1572 1573 return 0; 1574 } 1575 1576 static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state) 1577 { 1578 struct gfs2_sbd *sdp = sb->s_fs_info; 1579 1580 memset(state, 0, sizeof(*state)); 1581 1582 switch (sdp->sd_args.ar_quota) { 1583 case GFS2_QUOTA_ON: 1584 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; 1585 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; 1586 /*FALLTHRU*/ 1587 case GFS2_QUOTA_ACCOUNT: 1588 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED | 1589 QCI_SYSFILE; 1590 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED | 1591 QCI_SYSFILE; 1592 break; 1593 case GFS2_QUOTA_OFF: 1594 break; 1595 } 1596 if (sdp->sd_quota_inode) { 1597 state->s_state[USRQUOTA].ino = 1598 GFS2_I(sdp->sd_quota_inode)->i_no_addr; 1599 state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks; 1600 } 1601 state->s_state[USRQUOTA].nextents = 1; /* unsupported */ 1602 state->s_state[GRPQUOTA] = state->s_state[USRQUOTA]; 1603 state->s_incoredqs = list_lru_count(&gfs2_qd_lru); 1604 return 0; 1605 } 1606 1607 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, 1608 struct qc_dqblk *fdq) 1609 { 1610 struct gfs2_sbd *sdp = sb->s_fs_info; 1611 struct gfs2_quota_lvb *qlvb; 1612 struct gfs2_quota_data *qd; 1613 struct gfs2_holder q_gh; 1614 int error; 1615 1616 memset(fdq, 0, sizeof(*fdq)); 1617 1618 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1619 return -ESRCH; /* Crazy XFS error code */ 1620 1621 if ((qid.type != USRQUOTA) && 1622 (qid.type != GRPQUOTA)) 1623 return -EINVAL; 1624 1625 error = qd_get(sdp, qid, &qd); 1626 if (error) 1627 return error; 1628 error = do_glock(qd, FORCE, &q_gh); 1629 if (error) 1630 goto out; 1631 1632 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 1633 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; 1634 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; 1635 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; 1636 1637 gfs2_glock_dq_uninit(&q_gh); 1638 out: 1639 qd_put(qd); 1640 return error; 1641 } 1642 1643 /* GFS2 only supports a subset of the XFS fields */ 1644 #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE) 1645 1646 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, 1647 struct qc_dqblk *fdq) 1648 { 1649 struct gfs2_sbd *sdp = sb->s_fs_info; 1650 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 1651 struct gfs2_quota_data *qd; 1652 struct gfs2_holder q_gh, i_gh; 1653 unsigned int data_blocks, ind_blocks; 1654 unsigned int blocks = 0; 1655 int alloc_required; 1656 loff_t offset; 1657 int error; 1658 1659 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1660 return -ESRCH; /* Crazy XFS error code */ 1661 1662 if ((qid.type != USRQUOTA) && 1663 (qid.type != GRPQUOTA)) 1664 return -EINVAL; 1665 1666 if (fdq->d_fieldmask & ~GFS2_FIELDMASK) 1667 return -EINVAL; 1668 1669 error = qd_get(sdp, qid, &qd); 1670 if (error) 1671 return error; 1672 1673 error = gfs2_rsqa_alloc(ip); 1674 if (error) 1675 goto out_put; 1676 1677 inode_lock(&ip->i_inode); 1678 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); 1679 if (error) 1680 goto out_unlockput; 1681 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 1682 if (error) 1683 goto out_q; 1684 1685 /* Check for existing entry, if none then alloc new blocks */ 1686 error = update_qd(sdp, qd); 1687 if (error) 1688 goto out_i; 1689 1690 /* If nothing has changed, this is a no-op */ 1691 if ((fdq->d_fieldmask & QC_SPC_SOFT) && 1692 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) 1693 fdq->d_fieldmask ^= QC_SPC_SOFT; 1694 1695 if ((fdq->d_fieldmask & QC_SPC_HARD) && 1696 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) 1697 fdq->d_fieldmask ^= QC_SPC_HARD; 1698 1699 if ((fdq->d_fieldmask & QC_SPACE) && 1700 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) 1701 fdq->d_fieldmask ^= QC_SPACE; 1702 1703 if (fdq->d_fieldmask == 0) 1704 goto out_i; 1705 1706 offset = qd2offset(qd); 1707 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); 1708 if (gfs2_is_stuffed(ip)) 1709 alloc_required = 1; 1710 if (alloc_required) { 1711 struct gfs2_alloc_parms ap = { .aflags = 0, }; 1712 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 1713 &data_blocks, &ind_blocks); 1714 blocks = 1 + data_blocks + ind_blocks; 1715 ap.target = blocks; 1716 error = gfs2_inplace_reserve(ip, &ap); 1717 if (error) 1718 goto out_i; 1719 blocks += gfs2_rg_blocks(ip, blocks); 1720 } 1721 1722 /* Some quotas span block boundaries and can update two blocks, 1723 adding an extra block to the transaction to handle such quotas */ 1724 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0); 1725 if (error) 1726 goto out_release; 1727 1728 /* Apply changes */ 1729 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); 1730 if (!error) 1731 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); 1732 1733 gfs2_trans_end(sdp); 1734 out_release: 1735 if (alloc_required) 1736 gfs2_inplace_release(ip); 1737 out_i: 1738 gfs2_glock_dq_uninit(&i_gh); 1739 out_q: 1740 gfs2_glock_dq_uninit(&q_gh); 1741 out_unlockput: 1742 inode_unlock(&ip->i_inode); 1743 out_put: 1744 qd_put(qd); 1745 return error; 1746 } 1747 1748 const struct quotactl_ops gfs2_quotactl_ops = { 1749 .quota_sync = gfs2_quota_sync, 1750 .get_state = gfs2_quota_get_state, 1751 .get_dqblk = gfs2_get_dqblk, 1752 .set_dqblk = gfs2_set_dqblk, 1753 }; 1754 1755 void __init gfs2_quota_hash_init(void) 1756 { 1757 unsigned i; 1758 1759 for(i = 0; i < GFS2_QD_HASH_SIZE; i++) 1760 INIT_HLIST_BL_HEAD(&qd_hash_table[i]); 1761 } 1762