1 /* 2 * Implementation of the diskquota system for the LINUX operating system. QUOTA 3 * is implemented using the BSD system call interface as the means of 4 * communication with the user level. This file contains the generic routines 5 * called by the different filesystems on allocation of an inode or block. 6 * These routines take care of the administration needed to have a consistent 7 * diskquota tracking system. The ideas of both user and group quotas are based 8 * on the Melbourne quota system as used on BSD derived systems. The internal 9 * implementation is based on one of the several variants of the LINUX 10 * inode-subsystem with added complexity of the diskquota system. 11 * 12 * Author: Marco van Wieringen <mvw@planets.elm.net> 13 * 14 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96 15 * 16 * Revised list management to avoid races 17 * -- Bill Hawes, <whawes@star.net>, 9/98 18 * 19 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). 20 * As the consequence the locking was moved from dquot_decr_...(), 21 * dquot_incr_...() to calling functions. 22 * invalidate_dquots() now writes modified dquots. 23 * Serialized quota_off() and quota_on() for mount point. 24 * Fixed a few bugs in grow_dquots(). 25 * Fixed deadlock in write_dquot() - we no longer account quotas on 26 * quota files 27 * remove_dquot_ref() moved to inode.c - it now traverses through inodes 28 * add_dquot_ref() restarts after blocking 29 * Added check for bogus uid and fixed check for group in quotactl. 30 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99 31 * 32 * Used struct list_head instead of own list struct 33 * Invalidation of referenced dquots is no longer possible 34 * Improved free_dquots list management 35 * Quota and i_blocks are now updated in one place to avoid races 36 * Warnings are now delayed so we won't block in critical section 37 * Write updated not to require dquot lock 38 * Jan Kara, <jack@suse.cz>, 9/2000 39 * 40 * Added dynamic quota structure allocation 41 * Jan Kara <jack@suse.cz> 12/2000 42 * 43 * Rewritten quota interface. Implemented new quota format and 44 * formats registering. 45 * Jan Kara, <jack@suse.cz>, 2001,2002 46 * 47 * New SMP locking. 48 * Jan Kara, <jack@suse.cz>, 10/2002 49 * 50 * Added journalled quota support, fix lock inversion problems 51 * Jan Kara, <jack@suse.cz>, 2003,2004 52 * 53 * (C) Copyright 1994 - 1997 Marco van Wieringen 54 */ 55 56 #include <linux/errno.h> 57 #include <linux/kernel.h> 58 #include <linux/fs.h> 59 #include <linux/mount.h> 60 #include <linux/mm.h> 61 #include <linux/time.h> 62 #include <linux/types.h> 63 #include <linux/string.h> 64 #include <linux/fcntl.h> 65 #include <linux/stat.h> 66 #include <linux/tty.h> 67 #include <linux/file.h> 68 #include <linux/slab.h> 69 #include <linux/sysctl.h> 70 #include <linux/init.h> 71 #include <linux/module.h> 72 #include <linux/proc_fs.h> 73 #include <linux/security.h> 74 #include <linux/sched.h> 75 #include <linux/kmod.h> 76 #include <linux/namei.h> 77 #include <linux/capability.h> 78 #include <linux/quotaops.h> 79 #include "../internal.h" /* ugh */ 80 81 #include <linux/uaccess.h> 82 83 /* 84 * There are three quota SMP locks. dq_list_lock protects all lists with quotas 85 * and quota formats. 86 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and 87 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. 88 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly 89 * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects 90 * modifications of quota state (on quotaon and quotaoff) and readers who care 91 * about latest values take it as well. 92 * 93 * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, 94 * dq_list_lock > dq_state_lock 95 * 96 * Note that some things (eg. sb pointer, type, id) doesn't change during 97 * the life of the dquot structure and so needn't to be protected by a lock 98 * 99 * Operation accessing dquots via inode pointers are protected by dquot_srcu. 100 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and 101 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from 102 * inode and before dropping dquot references to avoid use of dquots after 103 * they are freed. dq_data_lock is used to serialize the pointer setting and 104 * clearing operations. 105 * Special care needs to be taken about S_NOQUOTA inode flag (marking that 106 * inode is a quota file). Functions adding pointers from inode to dquots have 107 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they 108 * have to do all pointer modifications before dropping dq_data_lock. This makes 109 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and 110 * then drops all pointers to dquots from an inode. 111 * 112 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced 113 * from inodes (dquot_alloc_space() and such don't check the dq_lock). 114 * Currently dquot is locked only when it is being read to memory (or space for 115 * it is being allocated) on the first dqget() and when it is being released on 116 * the last dqput(). The allocation and release oparations are serialized by 117 * the dq_lock and by checking the use count in dquot_release(). Write 118 * operations on dquots don't hold dq_lock as they copy data under dq_data_lock 119 * spinlock to internal buffers before writing. 120 * 121 * Lock ordering (including related VFS locks) is the following: 122 * dqonoff_mutex > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex 123 * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc. 124 */ 125 126 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); 127 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); 128 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); 129 EXPORT_SYMBOL(dq_data_lock); 130 DEFINE_STATIC_SRCU(dquot_srcu); 131 132 void __quota_error(struct super_block *sb, const char *func, 133 const char *fmt, ...) 134 { 135 if (printk_ratelimit()) { 136 va_list args; 137 struct va_format vaf; 138 139 va_start(args, fmt); 140 141 vaf.fmt = fmt; 142 vaf.va = &args; 143 144 printk(KERN_ERR "Quota error (device %s): %s: %pV\n", 145 sb->s_id, func, &vaf); 146 147 va_end(args); 148 } 149 } 150 EXPORT_SYMBOL(__quota_error); 151 152 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING) 153 static char *quotatypes[] = INITQFNAMES; 154 #endif 155 static struct quota_format_type *quota_formats; /* List of registered formats */ 156 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; 157 158 /* SLAB cache for dquot structures */ 159 static struct kmem_cache *dquot_cachep; 160 161 int register_quota_format(struct quota_format_type *fmt) 162 { 163 spin_lock(&dq_list_lock); 164 fmt->qf_next = quota_formats; 165 quota_formats = fmt; 166 spin_unlock(&dq_list_lock); 167 return 0; 168 } 169 EXPORT_SYMBOL(register_quota_format); 170 171 void unregister_quota_format(struct quota_format_type *fmt) 172 { 173 struct quota_format_type **actqf; 174 175 spin_lock(&dq_list_lock); 176 for (actqf = "a_formats; *actqf && *actqf != fmt; 177 actqf = &(*actqf)->qf_next) 178 ; 179 if (*actqf) 180 *actqf = (*actqf)->qf_next; 181 spin_unlock(&dq_list_lock); 182 } 183 EXPORT_SYMBOL(unregister_quota_format); 184 185 static struct quota_format_type *find_quota_format(int id) 186 { 187 struct quota_format_type *actqf; 188 189 spin_lock(&dq_list_lock); 190 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; 191 actqf = actqf->qf_next) 192 ; 193 if (!actqf || !try_module_get(actqf->qf_owner)) { 194 int qm; 195 196 spin_unlock(&dq_list_lock); 197 198 for (qm = 0; module_names[qm].qm_fmt_id && 199 module_names[qm].qm_fmt_id != id; qm++) 200 ; 201 if (!module_names[qm].qm_fmt_id || 202 request_module(module_names[qm].qm_mod_name)) 203 return NULL; 204 205 spin_lock(&dq_list_lock); 206 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; 207 actqf = actqf->qf_next) 208 ; 209 if (actqf && !try_module_get(actqf->qf_owner)) 210 actqf = NULL; 211 } 212 spin_unlock(&dq_list_lock); 213 return actqf; 214 } 215 216 static void put_quota_format(struct quota_format_type *fmt) 217 { 218 module_put(fmt->qf_owner); 219 } 220 221 /* 222 * Dquot List Management: 223 * The quota code uses three lists for dquot management: the inuse_list, 224 * free_dquots, and dquot_hash[] array. A single dquot structure may be 225 * on all three lists, depending on its current state. 226 * 227 * All dquots are placed to the end of inuse_list when first created, and this 228 * list is used for invalidate operation, which must look at every dquot. 229 * 230 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, 231 * and this list is searched whenever we need an available dquot. Dquots are 232 * removed from the list as soon as they are used again, and 233 * dqstats.free_dquots gives the number of dquots on the list. When 234 * dquot is invalidated it's completely released from memory. 235 * 236 * Dquots with a specific identity (device, type and id) are placed on 237 * one of the dquot_hash[] hash chains. The provides an efficient search 238 * mechanism to locate a specific dquot. 239 */ 240 241 static LIST_HEAD(inuse_list); 242 static LIST_HEAD(free_dquots); 243 static unsigned int dq_hash_bits, dq_hash_mask; 244 static struct hlist_head *dquot_hash; 245 246 struct dqstats dqstats; 247 EXPORT_SYMBOL(dqstats); 248 249 static qsize_t inode_get_rsv_space(struct inode *inode); 250 static int __dquot_initialize(struct inode *inode, int type); 251 252 static inline unsigned int 253 hashfn(const struct super_block *sb, struct kqid qid) 254 { 255 unsigned int id = from_kqid(&init_user_ns, qid); 256 int type = qid.type; 257 unsigned long tmp; 258 259 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); 260 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; 261 } 262 263 /* 264 * Following list functions expect dq_list_lock to be held 265 */ 266 static inline void insert_dquot_hash(struct dquot *dquot) 267 { 268 struct hlist_head *head; 269 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id); 270 hlist_add_head(&dquot->dq_hash, head); 271 } 272 273 static inline void remove_dquot_hash(struct dquot *dquot) 274 { 275 hlist_del_init(&dquot->dq_hash); 276 } 277 278 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, 279 struct kqid qid) 280 { 281 struct hlist_node *node; 282 struct dquot *dquot; 283 284 hlist_for_each (node, dquot_hash+hashent) { 285 dquot = hlist_entry(node, struct dquot, dq_hash); 286 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid)) 287 return dquot; 288 } 289 return NULL; 290 } 291 292 /* Add a dquot to the tail of the free list */ 293 static inline void put_dquot_last(struct dquot *dquot) 294 { 295 list_add_tail(&dquot->dq_free, &free_dquots); 296 dqstats_inc(DQST_FREE_DQUOTS); 297 } 298 299 static inline void remove_free_dquot(struct dquot *dquot) 300 { 301 if (list_empty(&dquot->dq_free)) 302 return; 303 list_del_init(&dquot->dq_free); 304 dqstats_dec(DQST_FREE_DQUOTS); 305 } 306 307 static inline void put_inuse(struct dquot *dquot) 308 { 309 /* We add to the back of inuse list so we don't have to restart 310 * when traversing this list and we block */ 311 list_add_tail(&dquot->dq_inuse, &inuse_list); 312 dqstats_inc(DQST_ALLOC_DQUOTS); 313 } 314 315 static inline void remove_inuse(struct dquot *dquot) 316 { 317 dqstats_dec(DQST_ALLOC_DQUOTS); 318 list_del(&dquot->dq_inuse); 319 } 320 /* 321 * End of list functions needing dq_list_lock 322 */ 323 324 static void wait_on_dquot(struct dquot *dquot) 325 { 326 mutex_lock(&dquot->dq_lock); 327 mutex_unlock(&dquot->dq_lock); 328 } 329 330 static inline int dquot_dirty(struct dquot *dquot) 331 { 332 return test_bit(DQ_MOD_B, &dquot->dq_flags); 333 } 334 335 static inline int mark_dquot_dirty(struct dquot *dquot) 336 { 337 return dquot->dq_sb->dq_op->mark_dirty(dquot); 338 } 339 340 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */ 341 int dquot_mark_dquot_dirty(struct dquot *dquot) 342 { 343 int ret = 1; 344 345 /* If quota is dirty already, we don't have to acquire dq_list_lock */ 346 if (test_bit(DQ_MOD_B, &dquot->dq_flags)) 347 return 1; 348 349 spin_lock(&dq_list_lock); 350 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) { 351 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> 352 info[dquot->dq_id.type].dqi_dirty_list); 353 ret = 0; 354 } 355 spin_unlock(&dq_list_lock); 356 return ret; 357 } 358 EXPORT_SYMBOL(dquot_mark_dquot_dirty); 359 360 /* Dirtify all the dquots - this can block when journalling */ 361 static inline int mark_all_dquot_dirty(struct dquot * const *dquot) 362 { 363 int ret, err, cnt; 364 365 ret = err = 0; 366 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 367 if (dquot[cnt]) 368 /* Even in case of error we have to continue */ 369 ret = mark_dquot_dirty(dquot[cnt]); 370 if (!err) 371 err = ret; 372 } 373 return err; 374 } 375 376 static inline void dqput_all(struct dquot **dquot) 377 { 378 unsigned int cnt; 379 380 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 381 dqput(dquot[cnt]); 382 } 383 384 /* This function needs dq_list_lock */ 385 static inline int clear_dquot_dirty(struct dquot *dquot) 386 { 387 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) 388 return 0; 389 list_del_init(&dquot->dq_dirty); 390 return 1; 391 } 392 393 void mark_info_dirty(struct super_block *sb, int type) 394 { 395 set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags); 396 } 397 EXPORT_SYMBOL(mark_info_dirty); 398 399 /* 400 * Read dquot from disk and alloc space for it 401 */ 402 403 int dquot_acquire(struct dquot *dquot) 404 { 405 int ret = 0, ret2 = 0; 406 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 407 408 mutex_lock(&dquot->dq_lock); 409 mutex_lock(&dqopt->dqio_mutex); 410 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) 411 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); 412 if (ret < 0) 413 goto out_iolock; 414 set_bit(DQ_READ_B, &dquot->dq_flags); 415 /* Instantiate dquot if needed */ 416 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { 417 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 418 /* Write the info if needed */ 419 if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 420 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 421 dquot->dq_sb, dquot->dq_id.type); 422 } 423 if (ret < 0) 424 goto out_iolock; 425 if (ret2 < 0) { 426 ret = ret2; 427 goto out_iolock; 428 } 429 } 430 set_bit(DQ_ACTIVE_B, &dquot->dq_flags); 431 out_iolock: 432 mutex_unlock(&dqopt->dqio_mutex); 433 mutex_unlock(&dquot->dq_lock); 434 return ret; 435 } 436 EXPORT_SYMBOL(dquot_acquire); 437 438 /* 439 * Write dquot to disk 440 */ 441 int dquot_commit(struct dquot *dquot) 442 { 443 int ret = 0; 444 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 445 446 mutex_lock(&dqopt->dqio_mutex); 447 spin_lock(&dq_list_lock); 448 if (!clear_dquot_dirty(dquot)) { 449 spin_unlock(&dq_list_lock); 450 goto out_sem; 451 } 452 spin_unlock(&dq_list_lock); 453 /* Inactive dquot can be only if there was error during read/init 454 * => we have better not writing it */ 455 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) 456 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); 457 else 458 ret = -EIO; 459 out_sem: 460 mutex_unlock(&dqopt->dqio_mutex); 461 return ret; 462 } 463 EXPORT_SYMBOL(dquot_commit); 464 465 /* 466 * Release dquot 467 */ 468 int dquot_release(struct dquot *dquot) 469 { 470 int ret = 0, ret2 = 0; 471 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 472 473 mutex_lock(&dquot->dq_lock); 474 /* Check whether we are not racing with some other dqget() */ 475 if (atomic_read(&dquot->dq_count) > 1) 476 goto out_dqlock; 477 mutex_lock(&dqopt->dqio_mutex); 478 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) { 479 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot); 480 /* Write the info */ 481 if (info_dirty(&dqopt->info[dquot->dq_id.type])) { 482 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( 483 dquot->dq_sb, dquot->dq_id.type); 484 } 485 if (ret >= 0) 486 ret = ret2; 487 } 488 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); 489 mutex_unlock(&dqopt->dqio_mutex); 490 out_dqlock: 491 mutex_unlock(&dquot->dq_lock); 492 return ret; 493 } 494 EXPORT_SYMBOL(dquot_release); 495 496 void dquot_destroy(struct dquot *dquot) 497 { 498 kmem_cache_free(dquot_cachep, dquot); 499 } 500 EXPORT_SYMBOL(dquot_destroy); 501 502 static inline void do_destroy_dquot(struct dquot *dquot) 503 { 504 dquot->dq_sb->dq_op->destroy_dquot(dquot); 505 } 506 507 /* Invalidate all dquots on the list. Note that this function is called after 508 * quota is disabled and pointers from inodes removed so there cannot be new 509 * quota users. There can still be some users of quotas due to inodes being 510 * just deleted or pruned by prune_icache() (those are not attached to any 511 * list) or parallel quotactl call. We have to wait for such users. 512 */ 513 static void invalidate_dquots(struct super_block *sb, int type) 514 { 515 struct dquot *dquot, *tmp; 516 517 restart: 518 spin_lock(&dq_list_lock); 519 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { 520 if (dquot->dq_sb != sb) 521 continue; 522 if (dquot->dq_id.type != type) 523 continue; 524 /* Wait for dquot users */ 525 if (atomic_read(&dquot->dq_count)) { 526 DEFINE_WAIT(wait); 527 528 dqgrab(dquot); 529 prepare_to_wait(&dquot->dq_wait_unused, &wait, 530 TASK_UNINTERRUPTIBLE); 531 spin_unlock(&dq_list_lock); 532 /* Once dqput() wakes us up, we know it's time to free 533 * the dquot. 534 * IMPORTANT: we rely on the fact that there is always 535 * at most one process waiting for dquot to free. 536 * Otherwise dq_count would be > 1 and we would never 537 * wake up. 538 */ 539 if (atomic_read(&dquot->dq_count) > 1) 540 schedule(); 541 finish_wait(&dquot->dq_wait_unused, &wait); 542 dqput(dquot); 543 /* At this moment dquot() need not exist (it could be 544 * reclaimed by prune_dqcache(). Hence we must 545 * restart. */ 546 goto restart; 547 } 548 /* 549 * Quota now has no users and it has been written on last 550 * dqput() 551 */ 552 remove_dquot_hash(dquot); 553 remove_free_dquot(dquot); 554 remove_inuse(dquot); 555 do_destroy_dquot(dquot); 556 } 557 spin_unlock(&dq_list_lock); 558 } 559 560 /* Call callback for every active dquot on given filesystem */ 561 int dquot_scan_active(struct super_block *sb, 562 int (*fn)(struct dquot *dquot, unsigned long priv), 563 unsigned long priv) 564 { 565 struct dquot *dquot, *old_dquot = NULL; 566 int ret = 0; 567 568 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 569 spin_lock(&dq_list_lock); 570 list_for_each_entry(dquot, &inuse_list, dq_inuse) { 571 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) 572 continue; 573 if (dquot->dq_sb != sb) 574 continue; 575 /* Now we have active dquot so we can just increase use count */ 576 atomic_inc(&dquot->dq_count); 577 spin_unlock(&dq_list_lock); 578 dqstats_inc(DQST_LOOKUPS); 579 dqput(old_dquot); 580 old_dquot = dquot; 581 /* 582 * ->release_dquot() can be racing with us. Our reference 583 * protects us from new calls to it so just wait for any 584 * outstanding call and recheck the DQ_ACTIVE_B after that. 585 */ 586 wait_on_dquot(dquot); 587 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 588 ret = fn(dquot, priv); 589 if (ret < 0) 590 goto out; 591 } 592 spin_lock(&dq_list_lock); 593 /* We are safe to continue now because our dquot could not 594 * be moved out of the inuse list while we hold the reference */ 595 } 596 spin_unlock(&dq_list_lock); 597 out: 598 dqput(old_dquot); 599 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 600 return ret; 601 } 602 EXPORT_SYMBOL(dquot_scan_active); 603 604 /* Write all dquot structures to quota files */ 605 int dquot_writeback_dquots(struct super_block *sb, int type) 606 { 607 struct list_head *dirty; 608 struct dquot *dquot; 609 struct quota_info *dqopt = sb_dqopt(sb); 610 int cnt; 611 int err, ret = 0; 612 613 mutex_lock(&dqopt->dqonoff_mutex); 614 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 615 if (type != -1 && cnt != type) 616 continue; 617 if (!sb_has_quota_active(sb, cnt)) 618 continue; 619 spin_lock(&dq_list_lock); 620 dirty = &dqopt->info[cnt].dqi_dirty_list; 621 while (!list_empty(dirty)) { 622 dquot = list_first_entry(dirty, struct dquot, 623 dq_dirty); 624 /* Dirty and inactive can be only bad dquot... */ 625 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 626 clear_dquot_dirty(dquot); 627 continue; 628 } 629 /* Now we have active dquot from which someone is 630 * holding reference so we can safely just increase 631 * use count */ 632 dqgrab(dquot); 633 spin_unlock(&dq_list_lock); 634 dqstats_inc(DQST_LOOKUPS); 635 err = sb->dq_op->write_dquot(dquot); 636 if (!ret && err) 637 ret = err; 638 dqput(dquot); 639 spin_lock(&dq_list_lock); 640 } 641 spin_unlock(&dq_list_lock); 642 } 643 644 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 645 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) 646 && info_dirty(&dqopt->info[cnt])) 647 sb->dq_op->write_info(sb, cnt); 648 dqstats_inc(DQST_SYNCS); 649 mutex_unlock(&dqopt->dqonoff_mutex); 650 651 return ret; 652 } 653 EXPORT_SYMBOL(dquot_writeback_dquots); 654 655 /* Write all dquot structures to disk and make them visible from userspace */ 656 int dquot_quota_sync(struct super_block *sb, int type) 657 { 658 struct quota_info *dqopt = sb_dqopt(sb); 659 int cnt; 660 int ret; 661 662 ret = dquot_writeback_dquots(sb, type); 663 if (ret) 664 return ret; 665 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) 666 return 0; 667 668 /* This is not very clever (and fast) but currently I don't know about 669 * any other simple way of getting quota data to disk and we must get 670 * them there for userspace to be visible... */ 671 if (sb->s_op->sync_fs) 672 sb->s_op->sync_fs(sb, 1); 673 sync_blockdev(sb->s_bdev); 674 675 /* 676 * Now when everything is written we can discard the pagecache so 677 * that userspace sees the changes. 678 */ 679 mutex_lock(&dqopt->dqonoff_mutex); 680 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 681 if (type != -1 && cnt != type) 682 continue; 683 if (!sb_has_quota_active(sb, cnt)) 684 continue; 685 mutex_lock(&dqopt->files[cnt]->i_mutex); 686 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0); 687 mutex_unlock(&dqopt->files[cnt]->i_mutex); 688 } 689 mutex_unlock(&dqopt->dqonoff_mutex); 690 691 return 0; 692 } 693 EXPORT_SYMBOL(dquot_quota_sync); 694 695 static unsigned long 696 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 697 { 698 struct list_head *head; 699 struct dquot *dquot; 700 unsigned long freed = 0; 701 702 spin_lock(&dq_list_lock); 703 head = free_dquots.prev; 704 while (head != &free_dquots && sc->nr_to_scan) { 705 dquot = list_entry(head, struct dquot, dq_free); 706 remove_dquot_hash(dquot); 707 remove_free_dquot(dquot); 708 remove_inuse(dquot); 709 do_destroy_dquot(dquot); 710 sc->nr_to_scan--; 711 freed++; 712 head = free_dquots.prev; 713 } 714 spin_unlock(&dq_list_lock); 715 return freed; 716 } 717 718 static unsigned long 719 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 720 { 721 return vfs_pressure_ratio( 722 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])); 723 } 724 725 static struct shrinker dqcache_shrinker = { 726 .count_objects = dqcache_shrink_count, 727 .scan_objects = dqcache_shrink_scan, 728 .seeks = DEFAULT_SEEKS, 729 }; 730 731 /* 732 * Put reference to dquot 733 */ 734 void dqput(struct dquot *dquot) 735 { 736 int ret; 737 738 if (!dquot) 739 return; 740 #ifdef CONFIG_QUOTA_DEBUG 741 if (!atomic_read(&dquot->dq_count)) { 742 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d", 743 quotatypes[dquot->dq_id.type], 744 from_kqid(&init_user_ns, dquot->dq_id)); 745 BUG(); 746 } 747 #endif 748 dqstats_inc(DQST_DROPS); 749 we_slept: 750 spin_lock(&dq_list_lock); 751 if (atomic_read(&dquot->dq_count) > 1) { 752 /* We have more than one user... nothing to do */ 753 atomic_dec(&dquot->dq_count); 754 /* Releasing dquot during quotaoff phase? */ 755 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) && 756 atomic_read(&dquot->dq_count) == 1) 757 wake_up(&dquot->dq_wait_unused); 758 spin_unlock(&dq_list_lock); 759 return; 760 } 761 /* Need to release dquot? */ 762 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { 763 spin_unlock(&dq_list_lock); 764 /* Commit dquot before releasing */ 765 ret = dquot->dq_sb->dq_op->write_dquot(dquot); 766 if (ret < 0) { 767 quota_error(dquot->dq_sb, "Can't write quota structure" 768 " (error %d). Quota may get out of sync!", 769 ret); 770 /* 771 * We clear dirty bit anyway, so that we avoid 772 * infinite loop here 773 */ 774 spin_lock(&dq_list_lock); 775 clear_dquot_dirty(dquot); 776 spin_unlock(&dq_list_lock); 777 } 778 goto we_slept; 779 } 780 /* Clear flag in case dquot was inactive (something bad happened) */ 781 clear_dquot_dirty(dquot); 782 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 783 spin_unlock(&dq_list_lock); 784 dquot->dq_sb->dq_op->release_dquot(dquot); 785 goto we_slept; 786 } 787 atomic_dec(&dquot->dq_count); 788 #ifdef CONFIG_QUOTA_DEBUG 789 /* sanity check */ 790 BUG_ON(!list_empty(&dquot->dq_free)); 791 #endif 792 put_dquot_last(dquot); 793 spin_unlock(&dq_list_lock); 794 } 795 EXPORT_SYMBOL(dqput); 796 797 struct dquot *dquot_alloc(struct super_block *sb, int type) 798 { 799 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); 800 } 801 EXPORT_SYMBOL(dquot_alloc); 802 803 static struct dquot *get_empty_dquot(struct super_block *sb, int type) 804 { 805 struct dquot *dquot; 806 807 dquot = sb->dq_op->alloc_dquot(sb, type); 808 if(!dquot) 809 return NULL; 810 811 mutex_init(&dquot->dq_lock); 812 INIT_LIST_HEAD(&dquot->dq_free); 813 INIT_LIST_HEAD(&dquot->dq_inuse); 814 INIT_HLIST_NODE(&dquot->dq_hash); 815 INIT_LIST_HEAD(&dquot->dq_dirty); 816 init_waitqueue_head(&dquot->dq_wait_unused); 817 dquot->dq_sb = sb; 818 dquot->dq_id = make_kqid_invalid(type); 819 atomic_set(&dquot->dq_count, 1); 820 821 return dquot; 822 } 823 824 /* 825 * Get reference to dquot 826 * 827 * Locking is slightly tricky here. We are guarded from parallel quotaoff() 828 * destroying our dquot by: 829 * a) checking for quota flags under dq_list_lock and 830 * b) getting a reference to dquot before we release dq_list_lock 831 */ 832 struct dquot *dqget(struct super_block *sb, struct kqid qid) 833 { 834 unsigned int hashent = hashfn(sb, qid); 835 struct dquot *dquot, *empty = NULL; 836 837 if (!sb_has_quota_active(sb, qid.type)) 838 return ERR_PTR(-ESRCH); 839 we_slept: 840 spin_lock(&dq_list_lock); 841 spin_lock(&dq_state_lock); 842 if (!sb_has_quota_active(sb, qid.type)) { 843 spin_unlock(&dq_state_lock); 844 spin_unlock(&dq_list_lock); 845 dquot = ERR_PTR(-ESRCH); 846 goto out; 847 } 848 spin_unlock(&dq_state_lock); 849 850 dquot = find_dquot(hashent, sb, qid); 851 if (!dquot) { 852 if (!empty) { 853 spin_unlock(&dq_list_lock); 854 empty = get_empty_dquot(sb, qid.type); 855 if (!empty) 856 schedule(); /* Try to wait for a moment... */ 857 goto we_slept; 858 } 859 dquot = empty; 860 empty = NULL; 861 dquot->dq_id = qid; 862 /* all dquots go on the inuse_list */ 863 put_inuse(dquot); 864 /* hash it first so it can be found */ 865 insert_dquot_hash(dquot); 866 spin_unlock(&dq_list_lock); 867 dqstats_inc(DQST_LOOKUPS); 868 } else { 869 if (!atomic_read(&dquot->dq_count)) 870 remove_free_dquot(dquot); 871 atomic_inc(&dquot->dq_count); 872 spin_unlock(&dq_list_lock); 873 dqstats_inc(DQST_CACHE_HITS); 874 dqstats_inc(DQST_LOOKUPS); 875 } 876 /* Wait for dq_lock - after this we know that either dquot_release() is 877 * already finished or it will be canceled due to dq_count > 1 test */ 878 wait_on_dquot(dquot); 879 /* Read the dquot / allocate space in quota file */ 880 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 881 int err; 882 883 err = sb->dq_op->acquire_dquot(dquot); 884 if (err < 0) { 885 dqput(dquot); 886 dquot = ERR_PTR(err); 887 goto out; 888 } 889 } 890 #ifdef CONFIG_QUOTA_DEBUG 891 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ 892 #endif 893 out: 894 if (empty) 895 do_destroy_dquot(empty); 896 897 return dquot; 898 } 899 EXPORT_SYMBOL(dqget); 900 901 static inline struct dquot **i_dquot(struct inode *inode) 902 { 903 return inode->i_sb->s_op->get_dquots(inode); 904 } 905 906 static int dqinit_needed(struct inode *inode, int type) 907 { 908 struct dquot * const *dquots; 909 int cnt; 910 911 if (IS_NOQUOTA(inode)) 912 return 0; 913 914 dquots = i_dquot(inode); 915 if (type != -1) 916 return !dquots[type]; 917 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 918 if (!dquots[cnt]) 919 return 1; 920 return 0; 921 } 922 923 /* This routine is guarded by dqonoff_mutex mutex */ 924 static void add_dquot_ref(struct super_block *sb, int type) 925 { 926 struct inode *inode, *old_inode = NULL; 927 #ifdef CONFIG_QUOTA_DEBUG 928 int reserved = 0; 929 #endif 930 931 spin_lock(&sb->s_inode_list_lock); 932 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 933 spin_lock(&inode->i_lock); 934 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 935 !atomic_read(&inode->i_writecount) || 936 !dqinit_needed(inode, type)) { 937 spin_unlock(&inode->i_lock); 938 continue; 939 } 940 __iget(inode); 941 spin_unlock(&inode->i_lock); 942 spin_unlock(&sb->s_inode_list_lock); 943 944 #ifdef CONFIG_QUOTA_DEBUG 945 if (unlikely(inode_get_rsv_space(inode) > 0)) 946 reserved = 1; 947 #endif 948 iput(old_inode); 949 __dquot_initialize(inode, type); 950 951 /* 952 * We hold a reference to 'inode' so it couldn't have been 953 * removed from s_inodes list while we dropped the 954 * s_inode_list_lock. We cannot iput the inode now as we can be 955 * holding the last reference and we cannot iput it under 956 * s_inode_list_lock. So we keep the reference and iput it 957 * later. 958 */ 959 old_inode = inode; 960 spin_lock(&sb->s_inode_list_lock); 961 } 962 spin_unlock(&sb->s_inode_list_lock); 963 iput(old_inode); 964 965 #ifdef CONFIG_QUOTA_DEBUG 966 if (reserved) { 967 quota_error(sb, "Writes happened before quota was turned on " 968 "thus quota information is probably inconsistent. " 969 "Please run quotacheck(8)"); 970 } 971 #endif 972 } 973 974 /* 975 * Remove references to dquots from inode and add dquot to list for freeing 976 * if we have the last reference to dquot 977 */ 978 static void remove_inode_dquot_ref(struct inode *inode, int type, 979 struct list_head *tofree_head) 980 { 981 struct dquot **dquots = i_dquot(inode); 982 struct dquot *dquot = dquots[type]; 983 984 if (!dquot) 985 return; 986 987 dquots[type] = NULL; 988 if (list_empty(&dquot->dq_free)) { 989 /* 990 * The inode still has reference to dquot so it can't be in the 991 * free list 992 */ 993 spin_lock(&dq_list_lock); 994 list_add(&dquot->dq_free, tofree_head); 995 spin_unlock(&dq_list_lock); 996 } else { 997 /* 998 * Dquot is already in a list to put so we won't drop the last 999 * reference here. 1000 */ 1001 dqput(dquot); 1002 } 1003 } 1004 1005 /* 1006 * Free list of dquots 1007 * Dquots are removed from inodes and no new references can be got so we are 1008 * the only ones holding reference 1009 */ 1010 static void put_dquot_list(struct list_head *tofree_head) 1011 { 1012 struct list_head *act_head; 1013 struct dquot *dquot; 1014 1015 act_head = tofree_head->next; 1016 while (act_head != tofree_head) { 1017 dquot = list_entry(act_head, struct dquot, dq_free); 1018 act_head = act_head->next; 1019 /* Remove dquot from the list so we won't have problems... */ 1020 list_del_init(&dquot->dq_free); 1021 dqput(dquot); 1022 } 1023 } 1024 1025 static void remove_dquot_ref(struct super_block *sb, int type, 1026 struct list_head *tofree_head) 1027 { 1028 struct inode *inode; 1029 int reserved = 0; 1030 1031 spin_lock(&sb->s_inode_list_lock); 1032 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1033 /* 1034 * We have to scan also I_NEW inodes because they can already 1035 * have quota pointer initialized. Luckily, we need to touch 1036 * only quota pointers and these have separate locking 1037 * (dq_data_lock). 1038 */ 1039 spin_lock(&dq_data_lock); 1040 if (!IS_NOQUOTA(inode)) { 1041 if (unlikely(inode_get_rsv_space(inode) > 0)) 1042 reserved = 1; 1043 remove_inode_dquot_ref(inode, type, tofree_head); 1044 } 1045 spin_unlock(&dq_data_lock); 1046 } 1047 spin_unlock(&sb->s_inode_list_lock); 1048 #ifdef CONFIG_QUOTA_DEBUG 1049 if (reserved) { 1050 printk(KERN_WARNING "VFS (%s): Writes happened after quota" 1051 " was disabled thus quota information is probably " 1052 "inconsistent. Please run quotacheck(8).\n", sb->s_id); 1053 } 1054 #endif 1055 } 1056 1057 /* Gather all references from inodes and drop them */ 1058 static void drop_dquot_ref(struct super_block *sb, int type) 1059 { 1060 LIST_HEAD(tofree_head); 1061 1062 if (sb->dq_op) { 1063 remove_dquot_ref(sb, type, &tofree_head); 1064 synchronize_srcu(&dquot_srcu); 1065 put_dquot_list(&tofree_head); 1066 } 1067 } 1068 1069 static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number) 1070 { 1071 dquot->dq_dqb.dqb_curinodes += number; 1072 } 1073 1074 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) 1075 { 1076 dquot->dq_dqb.dqb_curspace += number; 1077 } 1078 1079 static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) 1080 { 1081 dquot->dq_dqb.dqb_rsvspace += number; 1082 } 1083 1084 /* 1085 * Claim reserved quota space 1086 */ 1087 static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number) 1088 { 1089 if (dquot->dq_dqb.dqb_rsvspace < number) { 1090 WARN_ON_ONCE(1); 1091 number = dquot->dq_dqb.dqb_rsvspace; 1092 } 1093 dquot->dq_dqb.dqb_curspace += number; 1094 dquot->dq_dqb.dqb_rsvspace -= number; 1095 } 1096 1097 static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number) 1098 { 1099 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number)) 1100 number = dquot->dq_dqb.dqb_curspace; 1101 dquot->dq_dqb.dqb_rsvspace += number; 1102 dquot->dq_dqb.dqb_curspace -= number; 1103 } 1104 1105 static inline 1106 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) 1107 { 1108 if (dquot->dq_dqb.dqb_rsvspace >= number) 1109 dquot->dq_dqb.dqb_rsvspace -= number; 1110 else { 1111 WARN_ON_ONCE(1); 1112 dquot->dq_dqb.dqb_rsvspace = 0; 1113 } 1114 } 1115 1116 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) 1117 { 1118 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 1119 dquot->dq_dqb.dqb_curinodes >= number) 1120 dquot->dq_dqb.dqb_curinodes -= number; 1121 else 1122 dquot->dq_dqb.dqb_curinodes = 0; 1123 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) 1124 dquot->dq_dqb.dqb_itime = (time_t) 0; 1125 clear_bit(DQ_INODES_B, &dquot->dq_flags); 1126 } 1127 1128 static void dquot_decr_space(struct dquot *dquot, qsize_t number) 1129 { 1130 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 1131 dquot->dq_dqb.dqb_curspace >= number) 1132 dquot->dq_dqb.dqb_curspace -= number; 1133 else 1134 dquot->dq_dqb.dqb_curspace = 0; 1135 if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) 1136 dquot->dq_dqb.dqb_btime = (time_t) 0; 1137 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 1138 } 1139 1140 struct dquot_warn { 1141 struct super_block *w_sb; 1142 struct kqid w_dq_id; 1143 short w_type; 1144 }; 1145 1146 static int warning_issued(struct dquot *dquot, const int warntype) 1147 { 1148 int flag = (warntype == QUOTA_NL_BHARDWARN || 1149 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : 1150 ((warntype == QUOTA_NL_IHARDWARN || 1151 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); 1152 1153 if (!flag) 1154 return 0; 1155 return test_and_set_bit(flag, &dquot->dq_flags); 1156 } 1157 1158 #ifdef CONFIG_PRINT_QUOTA_WARNING 1159 static int flag_print_warnings = 1; 1160 1161 static int need_print_warning(struct dquot_warn *warn) 1162 { 1163 if (!flag_print_warnings) 1164 return 0; 1165 1166 switch (warn->w_dq_id.type) { 1167 case USRQUOTA: 1168 return uid_eq(current_fsuid(), warn->w_dq_id.uid); 1169 case GRPQUOTA: 1170 return in_group_p(warn->w_dq_id.gid); 1171 case PRJQUOTA: 1172 return 1; 1173 } 1174 return 0; 1175 } 1176 1177 /* Print warning to user which exceeded quota */ 1178 static void print_warning(struct dquot_warn *warn) 1179 { 1180 char *msg = NULL; 1181 struct tty_struct *tty; 1182 int warntype = warn->w_type; 1183 1184 if (warntype == QUOTA_NL_IHARDBELOW || 1185 warntype == QUOTA_NL_ISOFTBELOW || 1186 warntype == QUOTA_NL_BHARDBELOW || 1187 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn)) 1188 return; 1189 1190 tty = get_current_tty(); 1191 if (!tty) 1192 return; 1193 tty_write_message(tty, warn->w_sb->s_id); 1194 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) 1195 tty_write_message(tty, ": warning, "); 1196 else 1197 tty_write_message(tty, ": write failed, "); 1198 tty_write_message(tty, quotatypes[warn->w_dq_id.type]); 1199 switch (warntype) { 1200 case QUOTA_NL_IHARDWARN: 1201 msg = " file limit reached.\r\n"; 1202 break; 1203 case QUOTA_NL_ISOFTLONGWARN: 1204 msg = " file quota exceeded too long.\r\n"; 1205 break; 1206 case QUOTA_NL_ISOFTWARN: 1207 msg = " file quota exceeded.\r\n"; 1208 break; 1209 case QUOTA_NL_BHARDWARN: 1210 msg = " block limit reached.\r\n"; 1211 break; 1212 case QUOTA_NL_BSOFTLONGWARN: 1213 msg = " block quota exceeded too long.\r\n"; 1214 break; 1215 case QUOTA_NL_BSOFTWARN: 1216 msg = " block quota exceeded.\r\n"; 1217 break; 1218 } 1219 tty_write_message(tty, msg); 1220 tty_kref_put(tty); 1221 } 1222 #endif 1223 1224 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot, 1225 int warntype) 1226 { 1227 if (warning_issued(dquot, warntype)) 1228 return; 1229 warn->w_type = warntype; 1230 warn->w_sb = dquot->dq_sb; 1231 warn->w_dq_id = dquot->dq_id; 1232 } 1233 1234 /* 1235 * Write warnings to the console and send warning messages over netlink. 1236 * 1237 * Note that this function can call into tty and networking code. 1238 */ 1239 static void flush_warnings(struct dquot_warn *warn) 1240 { 1241 int i; 1242 1243 for (i = 0; i < MAXQUOTAS; i++) { 1244 if (warn[i].w_type == QUOTA_NL_NOWARN) 1245 continue; 1246 #ifdef CONFIG_PRINT_QUOTA_WARNING 1247 print_warning(&warn[i]); 1248 #endif 1249 quota_send_warning(warn[i].w_dq_id, 1250 warn[i].w_sb->s_dev, warn[i].w_type); 1251 } 1252 } 1253 1254 static int ignore_hardlimit(struct dquot *dquot) 1255 { 1256 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 1257 1258 return capable(CAP_SYS_RESOURCE) && 1259 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || 1260 !(info->dqi_flags & DQF_ROOT_SQUASH)); 1261 } 1262 1263 /* needs dq_data_lock */ 1264 static int check_idq(struct dquot *dquot, qsize_t inodes, 1265 struct dquot_warn *warn) 1266 { 1267 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes; 1268 1269 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) || 1270 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1271 return 0; 1272 1273 if (dquot->dq_dqb.dqb_ihardlimit && 1274 newinodes > dquot->dq_dqb.dqb_ihardlimit && 1275 !ignore_hardlimit(dquot)) { 1276 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN); 1277 return -EDQUOT; 1278 } 1279 1280 if (dquot->dq_dqb.dqb_isoftlimit && 1281 newinodes > dquot->dq_dqb.dqb_isoftlimit && 1282 dquot->dq_dqb.dqb_itime && 1283 get_seconds() >= dquot->dq_dqb.dqb_itime && 1284 !ignore_hardlimit(dquot)) { 1285 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN); 1286 return -EDQUOT; 1287 } 1288 1289 if (dquot->dq_dqb.dqb_isoftlimit && 1290 newinodes > dquot->dq_dqb.dqb_isoftlimit && 1291 dquot->dq_dqb.dqb_itime == 0) { 1292 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN); 1293 dquot->dq_dqb.dqb_itime = get_seconds() + 1294 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace; 1295 } 1296 1297 return 0; 1298 } 1299 1300 /* needs dq_data_lock */ 1301 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, 1302 struct dquot_warn *warn) 1303 { 1304 qsize_t tspace; 1305 struct super_block *sb = dquot->dq_sb; 1306 1307 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || 1308 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1309 return 0; 1310 1311 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace 1312 + space; 1313 1314 if (dquot->dq_dqb.dqb_bhardlimit && 1315 tspace > dquot->dq_dqb.dqb_bhardlimit && 1316 !ignore_hardlimit(dquot)) { 1317 if (!prealloc) 1318 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); 1319 return -EDQUOT; 1320 } 1321 1322 if (dquot->dq_dqb.dqb_bsoftlimit && 1323 tspace > dquot->dq_dqb.dqb_bsoftlimit && 1324 dquot->dq_dqb.dqb_btime && 1325 get_seconds() >= dquot->dq_dqb.dqb_btime && 1326 !ignore_hardlimit(dquot)) { 1327 if (!prealloc) 1328 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); 1329 return -EDQUOT; 1330 } 1331 1332 if (dquot->dq_dqb.dqb_bsoftlimit && 1333 tspace > dquot->dq_dqb.dqb_bsoftlimit && 1334 dquot->dq_dqb.dqb_btime == 0) { 1335 if (!prealloc) { 1336 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN); 1337 dquot->dq_dqb.dqb_btime = get_seconds() + 1338 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace; 1339 } 1340 else 1341 /* 1342 * We don't allow preallocation to exceed softlimit so exceeding will 1343 * be always printed 1344 */ 1345 return -EDQUOT; 1346 } 1347 1348 return 0; 1349 } 1350 1351 static int info_idq_free(struct dquot *dquot, qsize_t inodes) 1352 { 1353 qsize_t newinodes; 1354 1355 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1356 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || 1357 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type)) 1358 return QUOTA_NL_NOWARN; 1359 1360 newinodes = dquot->dq_dqb.dqb_curinodes - inodes; 1361 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit) 1362 return QUOTA_NL_ISOFTBELOW; 1363 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && 1364 newinodes < dquot->dq_dqb.dqb_ihardlimit) 1365 return QUOTA_NL_IHARDBELOW; 1366 return QUOTA_NL_NOWARN; 1367 } 1368 1369 static int info_bdq_free(struct dquot *dquot, qsize_t space) 1370 { 1371 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1372 dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) 1373 return QUOTA_NL_NOWARN; 1374 1375 if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit) 1376 return QUOTA_NL_BSOFTBELOW; 1377 if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit && 1378 dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit) 1379 return QUOTA_NL_BHARDBELOW; 1380 return QUOTA_NL_NOWARN; 1381 } 1382 1383 static int dquot_active(const struct inode *inode) 1384 { 1385 struct super_block *sb = inode->i_sb; 1386 1387 if (IS_NOQUOTA(inode)) 1388 return 0; 1389 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb); 1390 } 1391 1392 /* 1393 * Initialize quota pointers in inode 1394 * 1395 * It is better to call this function outside of any transaction as it 1396 * might need a lot of space in journal for dquot structure allocation. 1397 */ 1398 static int __dquot_initialize(struct inode *inode, int type) 1399 { 1400 int cnt, init_needed = 0; 1401 struct dquot **dquots, *got[MAXQUOTAS]; 1402 struct super_block *sb = inode->i_sb; 1403 qsize_t rsv; 1404 int ret = 0; 1405 1406 if (!dquot_active(inode)) 1407 return 0; 1408 1409 dquots = i_dquot(inode); 1410 1411 /* First get references to structures we might need. */ 1412 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1413 struct kqid qid; 1414 kprojid_t projid; 1415 int rc; 1416 struct dquot *dquot; 1417 1418 got[cnt] = NULL; 1419 if (type != -1 && cnt != type) 1420 continue; 1421 /* 1422 * The i_dquot should have been initialized in most cases, 1423 * we check it without locking here to avoid unnecessary 1424 * dqget()/dqput() calls. 1425 */ 1426 if (dquots[cnt]) 1427 continue; 1428 1429 if (!sb_has_quota_active(sb, cnt)) 1430 continue; 1431 1432 init_needed = 1; 1433 1434 switch (cnt) { 1435 case USRQUOTA: 1436 qid = make_kqid_uid(inode->i_uid); 1437 break; 1438 case GRPQUOTA: 1439 qid = make_kqid_gid(inode->i_gid); 1440 break; 1441 case PRJQUOTA: 1442 rc = inode->i_sb->dq_op->get_projid(inode, &projid); 1443 if (rc) 1444 continue; 1445 qid = make_kqid_projid(projid); 1446 break; 1447 } 1448 dquot = dqget(sb, qid); 1449 if (IS_ERR(dquot)) { 1450 /* We raced with somebody turning quotas off... */ 1451 if (PTR_ERR(dquot) != -ESRCH) { 1452 ret = PTR_ERR(dquot); 1453 goto out_put; 1454 } 1455 dquot = NULL; 1456 } 1457 got[cnt] = dquot; 1458 } 1459 1460 /* All required i_dquot has been initialized */ 1461 if (!init_needed) 1462 return 0; 1463 1464 spin_lock(&dq_data_lock); 1465 if (IS_NOQUOTA(inode)) 1466 goto out_lock; 1467 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1468 if (type != -1 && cnt != type) 1469 continue; 1470 /* Avoid races with quotaoff() */ 1471 if (!sb_has_quota_active(sb, cnt)) 1472 continue; 1473 /* We could race with quotaon or dqget() could have failed */ 1474 if (!got[cnt]) 1475 continue; 1476 if (!dquots[cnt]) { 1477 dquots[cnt] = got[cnt]; 1478 got[cnt] = NULL; 1479 /* 1480 * Make quota reservation system happy if someone 1481 * did a write before quota was turned on 1482 */ 1483 rsv = inode_get_rsv_space(inode); 1484 if (unlikely(rsv)) 1485 dquot_resv_space(dquots[cnt], rsv); 1486 } 1487 } 1488 out_lock: 1489 spin_unlock(&dq_data_lock); 1490 out_put: 1491 /* Drop unused references */ 1492 dqput_all(got); 1493 1494 return ret; 1495 } 1496 1497 int dquot_initialize(struct inode *inode) 1498 { 1499 return __dquot_initialize(inode, -1); 1500 } 1501 EXPORT_SYMBOL(dquot_initialize); 1502 1503 /* 1504 * Release all quotas referenced by inode. 1505 * 1506 * This function only be called on inode free or converting 1507 * a file to quota file, no other users for the i_dquot in 1508 * both cases, so we needn't call synchronize_srcu() after 1509 * clearing i_dquot. 1510 */ 1511 static void __dquot_drop(struct inode *inode) 1512 { 1513 int cnt; 1514 struct dquot **dquots = i_dquot(inode); 1515 struct dquot *put[MAXQUOTAS]; 1516 1517 spin_lock(&dq_data_lock); 1518 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1519 put[cnt] = dquots[cnt]; 1520 dquots[cnt] = NULL; 1521 } 1522 spin_unlock(&dq_data_lock); 1523 dqput_all(put); 1524 } 1525 1526 void dquot_drop(struct inode *inode) 1527 { 1528 struct dquot * const *dquots; 1529 int cnt; 1530 1531 if (IS_NOQUOTA(inode)) 1532 return; 1533 1534 /* 1535 * Test before calling to rule out calls from proc and such 1536 * where we are not allowed to block. Note that this is 1537 * actually reliable test even without the lock - the caller 1538 * must assure that nobody can come after the DQUOT_DROP and 1539 * add quota pointers back anyway. 1540 */ 1541 dquots = i_dquot(inode); 1542 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1543 if (dquots[cnt]) 1544 break; 1545 } 1546 1547 if (cnt < MAXQUOTAS) 1548 __dquot_drop(inode); 1549 } 1550 EXPORT_SYMBOL(dquot_drop); 1551 1552 /* 1553 * inode_reserved_space is managed internally by quota, and protected by 1554 * i_lock similar to i_blocks+i_bytes. 1555 */ 1556 static qsize_t *inode_reserved_space(struct inode * inode) 1557 { 1558 /* Filesystem must explicitly define it's own method in order to use 1559 * quota reservation interface */ 1560 BUG_ON(!inode->i_sb->dq_op->get_reserved_space); 1561 return inode->i_sb->dq_op->get_reserved_space(inode); 1562 } 1563 1564 void inode_add_rsv_space(struct inode *inode, qsize_t number) 1565 { 1566 spin_lock(&inode->i_lock); 1567 *inode_reserved_space(inode) += number; 1568 spin_unlock(&inode->i_lock); 1569 } 1570 EXPORT_SYMBOL(inode_add_rsv_space); 1571 1572 void inode_claim_rsv_space(struct inode *inode, qsize_t number) 1573 { 1574 spin_lock(&inode->i_lock); 1575 *inode_reserved_space(inode) -= number; 1576 __inode_add_bytes(inode, number); 1577 spin_unlock(&inode->i_lock); 1578 } 1579 EXPORT_SYMBOL(inode_claim_rsv_space); 1580 1581 void inode_reclaim_rsv_space(struct inode *inode, qsize_t number) 1582 { 1583 spin_lock(&inode->i_lock); 1584 *inode_reserved_space(inode) += number; 1585 __inode_sub_bytes(inode, number); 1586 spin_unlock(&inode->i_lock); 1587 } 1588 EXPORT_SYMBOL(inode_reclaim_rsv_space); 1589 1590 void inode_sub_rsv_space(struct inode *inode, qsize_t number) 1591 { 1592 spin_lock(&inode->i_lock); 1593 *inode_reserved_space(inode) -= number; 1594 spin_unlock(&inode->i_lock); 1595 } 1596 EXPORT_SYMBOL(inode_sub_rsv_space); 1597 1598 static qsize_t inode_get_rsv_space(struct inode *inode) 1599 { 1600 qsize_t ret; 1601 1602 if (!inode->i_sb->dq_op->get_reserved_space) 1603 return 0; 1604 spin_lock(&inode->i_lock); 1605 ret = *inode_reserved_space(inode); 1606 spin_unlock(&inode->i_lock); 1607 return ret; 1608 } 1609 1610 static void inode_incr_space(struct inode *inode, qsize_t number, 1611 int reserve) 1612 { 1613 if (reserve) 1614 inode_add_rsv_space(inode, number); 1615 else 1616 inode_add_bytes(inode, number); 1617 } 1618 1619 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) 1620 { 1621 if (reserve) 1622 inode_sub_rsv_space(inode, number); 1623 else 1624 inode_sub_bytes(inode, number); 1625 } 1626 1627 /* 1628 * This functions updates i_blocks+i_bytes fields and quota information 1629 * (together with appropriate checks). 1630 * 1631 * NOTE: We absolutely rely on the fact that caller dirties the inode 1632 * (usually helpers in quotaops.h care about this) and holds a handle for 1633 * the current transaction so that dquot write and inode write go into the 1634 * same transaction. 1635 */ 1636 1637 /* 1638 * This operation can block, but only after everything is updated 1639 */ 1640 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) 1641 { 1642 int cnt, ret = 0, index; 1643 struct dquot_warn warn[MAXQUOTAS]; 1644 int reserve = flags & DQUOT_SPACE_RESERVE; 1645 struct dquot **dquots; 1646 1647 if (!dquot_active(inode)) { 1648 inode_incr_space(inode, number, reserve); 1649 goto out; 1650 } 1651 1652 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1653 warn[cnt].w_type = QUOTA_NL_NOWARN; 1654 1655 dquots = i_dquot(inode); 1656 index = srcu_read_lock(&dquot_srcu); 1657 spin_lock(&dq_data_lock); 1658 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1659 if (!dquots[cnt]) 1660 continue; 1661 ret = check_bdq(dquots[cnt], number, 1662 !(flags & DQUOT_SPACE_WARN), &warn[cnt]); 1663 if (ret && !(flags & DQUOT_SPACE_NOFAIL)) { 1664 spin_unlock(&dq_data_lock); 1665 goto out_flush_warn; 1666 } 1667 } 1668 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1669 if (!dquots[cnt]) 1670 continue; 1671 if (reserve) 1672 dquot_resv_space(dquots[cnt], number); 1673 else 1674 dquot_incr_space(dquots[cnt], number); 1675 } 1676 inode_incr_space(inode, number, reserve); 1677 spin_unlock(&dq_data_lock); 1678 1679 if (reserve) 1680 goto out_flush_warn; 1681 mark_all_dquot_dirty(dquots); 1682 out_flush_warn: 1683 srcu_read_unlock(&dquot_srcu, index); 1684 flush_warnings(warn); 1685 out: 1686 return ret; 1687 } 1688 EXPORT_SYMBOL(__dquot_alloc_space); 1689 1690 /* 1691 * This operation can block, but only after everything is updated 1692 */ 1693 int dquot_alloc_inode(struct inode *inode) 1694 { 1695 int cnt, ret = 0, index; 1696 struct dquot_warn warn[MAXQUOTAS]; 1697 struct dquot * const *dquots; 1698 1699 if (!dquot_active(inode)) 1700 return 0; 1701 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1702 warn[cnt].w_type = QUOTA_NL_NOWARN; 1703 1704 dquots = i_dquot(inode); 1705 index = srcu_read_lock(&dquot_srcu); 1706 spin_lock(&dq_data_lock); 1707 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1708 if (!dquots[cnt]) 1709 continue; 1710 ret = check_idq(dquots[cnt], 1, &warn[cnt]); 1711 if (ret) 1712 goto warn_put_all; 1713 } 1714 1715 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1716 if (!dquots[cnt]) 1717 continue; 1718 dquot_incr_inodes(dquots[cnt], 1); 1719 } 1720 1721 warn_put_all: 1722 spin_unlock(&dq_data_lock); 1723 if (ret == 0) 1724 mark_all_dquot_dirty(dquots); 1725 srcu_read_unlock(&dquot_srcu, index); 1726 flush_warnings(warn); 1727 return ret; 1728 } 1729 EXPORT_SYMBOL(dquot_alloc_inode); 1730 1731 /* 1732 * Convert in-memory reserved quotas to real consumed quotas 1733 */ 1734 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 1735 { 1736 struct dquot **dquots; 1737 int cnt, index; 1738 1739 if (!dquot_active(inode)) { 1740 inode_claim_rsv_space(inode, number); 1741 return 0; 1742 } 1743 1744 dquots = i_dquot(inode); 1745 index = srcu_read_lock(&dquot_srcu); 1746 spin_lock(&dq_data_lock); 1747 /* Claim reserved quotas to allocated quotas */ 1748 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1749 if (dquots[cnt]) 1750 dquot_claim_reserved_space(dquots[cnt], number); 1751 } 1752 /* Update inode bytes */ 1753 inode_claim_rsv_space(inode, number); 1754 spin_unlock(&dq_data_lock); 1755 mark_all_dquot_dirty(dquots); 1756 srcu_read_unlock(&dquot_srcu, index); 1757 return 0; 1758 } 1759 EXPORT_SYMBOL(dquot_claim_space_nodirty); 1760 1761 /* 1762 * Convert allocated space back to in-memory reserved quotas 1763 */ 1764 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) 1765 { 1766 struct dquot **dquots; 1767 int cnt, index; 1768 1769 if (!dquot_active(inode)) { 1770 inode_reclaim_rsv_space(inode, number); 1771 return; 1772 } 1773 1774 dquots = i_dquot(inode); 1775 index = srcu_read_lock(&dquot_srcu); 1776 spin_lock(&dq_data_lock); 1777 /* Claim reserved quotas to allocated quotas */ 1778 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1779 if (dquots[cnt]) 1780 dquot_reclaim_reserved_space(dquots[cnt], number); 1781 } 1782 /* Update inode bytes */ 1783 inode_reclaim_rsv_space(inode, number); 1784 spin_unlock(&dq_data_lock); 1785 mark_all_dquot_dirty(dquots); 1786 srcu_read_unlock(&dquot_srcu, index); 1787 return; 1788 } 1789 EXPORT_SYMBOL(dquot_reclaim_space_nodirty); 1790 1791 /* 1792 * This operation can block, but only after everything is updated 1793 */ 1794 void __dquot_free_space(struct inode *inode, qsize_t number, int flags) 1795 { 1796 unsigned int cnt; 1797 struct dquot_warn warn[MAXQUOTAS]; 1798 struct dquot **dquots; 1799 int reserve = flags & DQUOT_SPACE_RESERVE, index; 1800 1801 if (!dquot_active(inode)) { 1802 inode_decr_space(inode, number, reserve); 1803 return; 1804 } 1805 1806 dquots = i_dquot(inode); 1807 index = srcu_read_lock(&dquot_srcu); 1808 spin_lock(&dq_data_lock); 1809 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1810 int wtype; 1811 1812 warn[cnt].w_type = QUOTA_NL_NOWARN; 1813 if (!dquots[cnt]) 1814 continue; 1815 wtype = info_bdq_free(dquots[cnt], number); 1816 if (wtype != QUOTA_NL_NOWARN) 1817 prepare_warning(&warn[cnt], dquots[cnt], wtype); 1818 if (reserve) 1819 dquot_free_reserved_space(dquots[cnt], number); 1820 else 1821 dquot_decr_space(dquots[cnt], number); 1822 } 1823 inode_decr_space(inode, number, reserve); 1824 spin_unlock(&dq_data_lock); 1825 1826 if (reserve) 1827 goto out_unlock; 1828 mark_all_dquot_dirty(dquots); 1829 out_unlock: 1830 srcu_read_unlock(&dquot_srcu, index); 1831 flush_warnings(warn); 1832 } 1833 EXPORT_SYMBOL(__dquot_free_space); 1834 1835 /* 1836 * This operation can block, but only after everything is updated 1837 */ 1838 void dquot_free_inode(struct inode *inode) 1839 { 1840 unsigned int cnt; 1841 struct dquot_warn warn[MAXQUOTAS]; 1842 struct dquot * const *dquots; 1843 int index; 1844 1845 if (!dquot_active(inode)) 1846 return; 1847 1848 dquots = i_dquot(inode); 1849 index = srcu_read_lock(&dquot_srcu); 1850 spin_lock(&dq_data_lock); 1851 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1852 int wtype; 1853 1854 warn[cnt].w_type = QUOTA_NL_NOWARN; 1855 if (!dquots[cnt]) 1856 continue; 1857 wtype = info_idq_free(dquots[cnt], 1); 1858 if (wtype != QUOTA_NL_NOWARN) 1859 prepare_warning(&warn[cnt], dquots[cnt], wtype); 1860 dquot_decr_inodes(dquots[cnt], 1); 1861 } 1862 spin_unlock(&dq_data_lock); 1863 mark_all_dquot_dirty(dquots); 1864 srcu_read_unlock(&dquot_srcu, index); 1865 flush_warnings(warn); 1866 } 1867 EXPORT_SYMBOL(dquot_free_inode); 1868 1869 /* 1870 * Transfer the number of inode and blocks from one diskquota to an other. 1871 * On success, dquot references in transfer_to are consumed and references 1872 * to original dquots that need to be released are placed there. On failure, 1873 * references are kept untouched. 1874 * 1875 * This operation can block, but only after everything is updated 1876 * A transaction must be started when entering this function. 1877 * 1878 * We are holding reference on transfer_from & transfer_to, no need to 1879 * protect them by srcu_read_lock(). 1880 */ 1881 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) 1882 { 1883 qsize_t space, cur_space; 1884 qsize_t rsv_space = 0; 1885 struct dquot *transfer_from[MAXQUOTAS] = {}; 1886 int cnt, ret = 0; 1887 char is_valid[MAXQUOTAS] = {}; 1888 struct dquot_warn warn_to[MAXQUOTAS]; 1889 struct dquot_warn warn_from_inodes[MAXQUOTAS]; 1890 struct dquot_warn warn_from_space[MAXQUOTAS]; 1891 1892 if (IS_NOQUOTA(inode)) 1893 return 0; 1894 /* Initialize the arrays */ 1895 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1896 warn_to[cnt].w_type = QUOTA_NL_NOWARN; 1897 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; 1898 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; 1899 } 1900 1901 spin_lock(&dq_data_lock); 1902 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ 1903 spin_unlock(&dq_data_lock); 1904 return 0; 1905 } 1906 cur_space = inode_get_bytes(inode); 1907 rsv_space = inode_get_rsv_space(inode); 1908 space = cur_space + rsv_space; 1909 /* Build the transfer_from list and check the limits */ 1910 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1911 /* 1912 * Skip changes for same uid or gid or for turned off quota-type. 1913 */ 1914 if (!transfer_to[cnt]) 1915 continue; 1916 /* Avoid races with quotaoff() */ 1917 if (!sb_has_quota_active(inode->i_sb, cnt)) 1918 continue; 1919 is_valid[cnt] = 1; 1920 transfer_from[cnt] = i_dquot(inode)[cnt]; 1921 ret = check_idq(transfer_to[cnt], 1, &warn_to[cnt]); 1922 if (ret) 1923 goto over_quota; 1924 ret = check_bdq(transfer_to[cnt], space, 0, &warn_to[cnt]); 1925 if (ret) 1926 goto over_quota; 1927 } 1928 1929 /* 1930 * Finally perform the needed transfer from transfer_from to transfer_to 1931 */ 1932 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1933 if (!is_valid[cnt]) 1934 continue; 1935 /* Due to IO error we might not have transfer_from[] structure */ 1936 if (transfer_from[cnt]) { 1937 int wtype; 1938 wtype = info_idq_free(transfer_from[cnt], 1); 1939 if (wtype != QUOTA_NL_NOWARN) 1940 prepare_warning(&warn_from_inodes[cnt], 1941 transfer_from[cnt], wtype); 1942 wtype = info_bdq_free(transfer_from[cnt], space); 1943 if (wtype != QUOTA_NL_NOWARN) 1944 prepare_warning(&warn_from_space[cnt], 1945 transfer_from[cnt], wtype); 1946 dquot_decr_inodes(transfer_from[cnt], 1); 1947 dquot_decr_space(transfer_from[cnt], cur_space); 1948 dquot_free_reserved_space(transfer_from[cnt], 1949 rsv_space); 1950 } 1951 1952 dquot_incr_inodes(transfer_to[cnt], 1); 1953 dquot_incr_space(transfer_to[cnt], cur_space); 1954 dquot_resv_space(transfer_to[cnt], rsv_space); 1955 1956 i_dquot(inode)[cnt] = transfer_to[cnt]; 1957 } 1958 spin_unlock(&dq_data_lock); 1959 1960 mark_all_dquot_dirty(transfer_from); 1961 mark_all_dquot_dirty(transfer_to); 1962 flush_warnings(warn_to); 1963 flush_warnings(warn_from_inodes); 1964 flush_warnings(warn_from_space); 1965 /* Pass back references to put */ 1966 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1967 if (is_valid[cnt]) 1968 transfer_to[cnt] = transfer_from[cnt]; 1969 return 0; 1970 over_quota: 1971 spin_unlock(&dq_data_lock); 1972 flush_warnings(warn_to); 1973 return ret; 1974 } 1975 EXPORT_SYMBOL(__dquot_transfer); 1976 1977 /* Wrapper for transferring ownership of an inode for uid/gid only 1978 * Called from FSXXX_setattr() 1979 */ 1980 int dquot_transfer(struct inode *inode, struct iattr *iattr) 1981 { 1982 struct dquot *transfer_to[MAXQUOTAS] = {}; 1983 struct dquot *dquot; 1984 struct super_block *sb = inode->i_sb; 1985 int ret; 1986 1987 if (!dquot_active(inode)) 1988 return 0; 1989 1990 if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){ 1991 dquot = dqget(sb, make_kqid_uid(iattr->ia_uid)); 1992 if (IS_ERR(dquot)) { 1993 if (PTR_ERR(dquot) != -ESRCH) { 1994 ret = PTR_ERR(dquot); 1995 goto out_put; 1996 } 1997 dquot = NULL; 1998 } 1999 transfer_to[USRQUOTA] = dquot; 2000 } 2001 if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){ 2002 dquot = dqget(sb, make_kqid_gid(iattr->ia_gid)); 2003 if (IS_ERR(dquot)) { 2004 if (PTR_ERR(dquot) != -ESRCH) { 2005 ret = PTR_ERR(dquot); 2006 goto out_put; 2007 } 2008 dquot = NULL; 2009 } 2010 transfer_to[GRPQUOTA] = dquot; 2011 } 2012 ret = __dquot_transfer(inode, transfer_to); 2013 out_put: 2014 dqput_all(transfer_to); 2015 return ret; 2016 } 2017 EXPORT_SYMBOL(dquot_transfer); 2018 2019 /* 2020 * Write info of quota file to disk 2021 */ 2022 int dquot_commit_info(struct super_block *sb, int type) 2023 { 2024 int ret; 2025 struct quota_info *dqopt = sb_dqopt(sb); 2026 2027 mutex_lock(&dqopt->dqio_mutex); 2028 ret = dqopt->ops[type]->write_file_info(sb, type); 2029 mutex_unlock(&dqopt->dqio_mutex); 2030 return ret; 2031 } 2032 EXPORT_SYMBOL(dquot_commit_info); 2033 2034 /* 2035 * Definitions of diskquota operations. 2036 */ 2037 const struct dquot_operations dquot_operations = { 2038 .write_dquot = dquot_commit, 2039 .acquire_dquot = dquot_acquire, 2040 .release_dquot = dquot_release, 2041 .mark_dirty = dquot_mark_dquot_dirty, 2042 .write_info = dquot_commit_info, 2043 .alloc_dquot = dquot_alloc, 2044 .destroy_dquot = dquot_destroy, 2045 }; 2046 EXPORT_SYMBOL(dquot_operations); 2047 2048 /* 2049 * Generic helper for ->open on filesystems supporting disk quotas. 2050 */ 2051 int dquot_file_open(struct inode *inode, struct file *file) 2052 { 2053 int error; 2054 2055 error = generic_file_open(inode, file); 2056 if (!error && (file->f_mode & FMODE_WRITE)) 2057 dquot_initialize(inode); 2058 return error; 2059 } 2060 EXPORT_SYMBOL(dquot_file_open); 2061 2062 /* 2063 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) 2064 */ 2065 int dquot_disable(struct super_block *sb, int type, unsigned int flags) 2066 { 2067 int cnt, ret = 0; 2068 struct quota_info *dqopt = sb_dqopt(sb); 2069 struct inode *toputinode[MAXQUOTAS]; 2070 2071 /* Cannot turn off usage accounting without turning off limits, or 2072 * suspend quotas and simultaneously turn quotas off. */ 2073 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) 2074 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | 2075 DQUOT_USAGE_ENABLED))) 2076 return -EINVAL; 2077 2078 /* We need to serialize quota_off() for device */ 2079 mutex_lock(&dqopt->dqonoff_mutex); 2080 2081 /* 2082 * Skip everything if there's nothing to do. We have to do this because 2083 * sometimes we are called when fill_super() failed and calling 2084 * sync_fs() in such cases does no good. 2085 */ 2086 if (!sb_any_quota_loaded(sb)) { 2087 mutex_unlock(&dqopt->dqonoff_mutex); 2088 return 0; 2089 } 2090 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2091 toputinode[cnt] = NULL; 2092 if (type != -1 && cnt != type) 2093 continue; 2094 if (!sb_has_quota_loaded(sb, cnt)) 2095 continue; 2096 2097 if (flags & DQUOT_SUSPENDED) { 2098 spin_lock(&dq_state_lock); 2099 dqopt->flags |= 2100 dquot_state_flag(DQUOT_SUSPENDED, cnt); 2101 spin_unlock(&dq_state_lock); 2102 } else { 2103 spin_lock(&dq_state_lock); 2104 dqopt->flags &= ~dquot_state_flag(flags, cnt); 2105 /* Turning off suspended quotas? */ 2106 if (!sb_has_quota_loaded(sb, cnt) && 2107 sb_has_quota_suspended(sb, cnt)) { 2108 dqopt->flags &= ~dquot_state_flag( 2109 DQUOT_SUSPENDED, cnt); 2110 spin_unlock(&dq_state_lock); 2111 iput(dqopt->files[cnt]); 2112 dqopt->files[cnt] = NULL; 2113 continue; 2114 } 2115 spin_unlock(&dq_state_lock); 2116 } 2117 2118 /* We still have to keep quota loaded? */ 2119 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) 2120 continue; 2121 2122 /* Note: these are blocking operations */ 2123 drop_dquot_ref(sb, cnt); 2124 invalidate_dquots(sb, cnt); 2125 /* 2126 * Now all dquots should be invalidated, all writes done so we 2127 * should be only users of the info. No locks needed. 2128 */ 2129 if (info_dirty(&dqopt->info[cnt])) 2130 sb->dq_op->write_info(sb, cnt); 2131 if (dqopt->ops[cnt]->free_file_info) 2132 dqopt->ops[cnt]->free_file_info(sb, cnt); 2133 put_quota_format(dqopt->info[cnt].dqi_format); 2134 2135 toputinode[cnt] = dqopt->files[cnt]; 2136 if (!sb_has_quota_loaded(sb, cnt)) 2137 dqopt->files[cnt] = NULL; 2138 dqopt->info[cnt].dqi_flags = 0; 2139 dqopt->info[cnt].dqi_igrace = 0; 2140 dqopt->info[cnt].dqi_bgrace = 0; 2141 dqopt->ops[cnt] = NULL; 2142 } 2143 mutex_unlock(&dqopt->dqonoff_mutex); 2144 2145 /* Skip syncing and setting flags if quota files are hidden */ 2146 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) 2147 goto put_inodes; 2148 2149 /* Sync the superblock so that buffers with quota data are written to 2150 * disk (and so userspace sees correct data afterwards). */ 2151 if (sb->s_op->sync_fs) 2152 sb->s_op->sync_fs(sb, 1); 2153 sync_blockdev(sb->s_bdev); 2154 /* Now the quota files are just ordinary files and we can set the 2155 * inode flags back. Moreover we discard the pagecache so that 2156 * userspace sees the writes we did bypassing the pagecache. We 2157 * must also discard the blockdev buffers so that we see the 2158 * changes done by userspace on the next quotaon() */ 2159 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2160 if (toputinode[cnt]) { 2161 mutex_lock(&dqopt->dqonoff_mutex); 2162 /* If quota was reenabled in the meantime, we have 2163 * nothing to do */ 2164 if (!sb_has_quota_loaded(sb, cnt)) { 2165 mutex_lock(&toputinode[cnt]->i_mutex); 2166 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | 2167 S_NOATIME | S_NOQUOTA); 2168 truncate_inode_pages(&toputinode[cnt]->i_data, 2169 0); 2170 mutex_unlock(&toputinode[cnt]->i_mutex); 2171 mark_inode_dirty_sync(toputinode[cnt]); 2172 } 2173 mutex_unlock(&dqopt->dqonoff_mutex); 2174 } 2175 if (sb->s_bdev) 2176 invalidate_bdev(sb->s_bdev); 2177 put_inodes: 2178 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2179 if (toputinode[cnt]) { 2180 /* On remount RO, we keep the inode pointer so that we 2181 * can reenable quota on the subsequent remount RW. We 2182 * have to check 'flags' variable and not use sb_has_ 2183 * function because another quotaon / quotaoff could 2184 * change global state before we got here. We refuse 2185 * to suspend quotas when there is pending delete on 2186 * the quota file... */ 2187 if (!(flags & DQUOT_SUSPENDED)) 2188 iput(toputinode[cnt]); 2189 else if (!toputinode[cnt]->i_nlink) 2190 ret = -EBUSY; 2191 } 2192 return ret; 2193 } 2194 EXPORT_SYMBOL(dquot_disable); 2195 2196 int dquot_quota_off(struct super_block *sb, int type) 2197 { 2198 return dquot_disable(sb, type, 2199 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2200 } 2201 EXPORT_SYMBOL(dquot_quota_off); 2202 2203 /* 2204 * Turn quotas on on a device 2205 */ 2206 2207 /* 2208 * Helper function to turn quotas on when we already have the inode of 2209 * quota file and no quota information is loaded. 2210 */ 2211 static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, 2212 unsigned int flags) 2213 { 2214 struct quota_format_type *fmt = find_quota_format(format_id); 2215 struct super_block *sb = inode->i_sb; 2216 struct quota_info *dqopt = sb_dqopt(sb); 2217 int error; 2218 int oldflags = -1; 2219 2220 if (!fmt) 2221 return -ESRCH; 2222 if (!S_ISREG(inode->i_mode)) { 2223 error = -EACCES; 2224 goto out_fmt; 2225 } 2226 if (IS_RDONLY(inode)) { 2227 error = -EROFS; 2228 goto out_fmt; 2229 } 2230 if (!sb->s_op->quota_write || !sb->s_op->quota_read || 2231 (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) { 2232 error = -EINVAL; 2233 goto out_fmt; 2234 } 2235 /* Usage always has to be set... */ 2236 if (!(flags & DQUOT_USAGE_ENABLED)) { 2237 error = -EINVAL; 2238 goto out_fmt; 2239 } 2240 2241 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2242 /* As we bypass the pagecache we must now flush all the 2243 * dirty data and invalidate caches so that kernel sees 2244 * changes from userspace. It is not enough to just flush 2245 * the quota file since if blocksize < pagesize, invalidation 2246 * of the cache could fail because of other unrelated dirty 2247 * data */ 2248 sync_filesystem(sb); 2249 invalidate_bdev(sb->s_bdev); 2250 } 2251 mutex_lock(&dqopt->dqonoff_mutex); 2252 if (sb_has_quota_loaded(sb, type)) { 2253 error = -EBUSY; 2254 goto out_lock; 2255 } 2256 2257 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2258 /* We don't want quota and atime on quota files (deadlocks 2259 * possible) Also nobody should write to the file - we use 2260 * special IO operations which ignore the immutable bit. */ 2261 mutex_lock(&inode->i_mutex); 2262 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | 2263 S_NOQUOTA); 2264 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; 2265 mutex_unlock(&inode->i_mutex); 2266 /* 2267 * When S_NOQUOTA is set, remove dquot references as no more 2268 * references can be added 2269 */ 2270 __dquot_drop(inode); 2271 } 2272 2273 error = -EIO; 2274 dqopt->files[type] = igrab(inode); 2275 if (!dqopt->files[type]) 2276 goto out_lock; 2277 error = -EINVAL; 2278 if (!fmt->qf_ops->check_quota_file(sb, type)) 2279 goto out_file_init; 2280 2281 dqopt->ops[type] = fmt->qf_ops; 2282 dqopt->info[type].dqi_format = fmt; 2283 dqopt->info[type].dqi_fmt_id = format_id; 2284 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); 2285 mutex_lock(&dqopt->dqio_mutex); 2286 error = dqopt->ops[type]->read_file_info(sb, type); 2287 if (error < 0) { 2288 mutex_unlock(&dqopt->dqio_mutex); 2289 goto out_file_init; 2290 } 2291 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) 2292 dqopt->info[type].dqi_flags |= DQF_SYS_FILE; 2293 mutex_unlock(&dqopt->dqio_mutex); 2294 spin_lock(&dq_state_lock); 2295 dqopt->flags |= dquot_state_flag(flags, type); 2296 spin_unlock(&dq_state_lock); 2297 2298 add_dquot_ref(sb, type); 2299 mutex_unlock(&dqopt->dqonoff_mutex); 2300 2301 return 0; 2302 2303 out_file_init: 2304 dqopt->files[type] = NULL; 2305 iput(inode); 2306 out_lock: 2307 if (oldflags != -1) { 2308 mutex_lock(&inode->i_mutex); 2309 /* Set the flags back (in the case of accidental quotaon() 2310 * on a wrong file we don't want to mess up the flags) */ 2311 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); 2312 inode->i_flags |= oldflags; 2313 mutex_unlock(&inode->i_mutex); 2314 } 2315 mutex_unlock(&dqopt->dqonoff_mutex); 2316 out_fmt: 2317 put_quota_format(fmt); 2318 2319 return error; 2320 } 2321 2322 /* Reenable quotas on remount RW */ 2323 int dquot_resume(struct super_block *sb, int type) 2324 { 2325 struct quota_info *dqopt = sb_dqopt(sb); 2326 struct inode *inode; 2327 int ret = 0, cnt; 2328 unsigned int flags; 2329 2330 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2331 if (type != -1 && cnt != type) 2332 continue; 2333 2334 mutex_lock(&dqopt->dqonoff_mutex); 2335 if (!sb_has_quota_suspended(sb, cnt)) { 2336 mutex_unlock(&dqopt->dqonoff_mutex); 2337 continue; 2338 } 2339 inode = dqopt->files[cnt]; 2340 dqopt->files[cnt] = NULL; 2341 spin_lock(&dq_state_lock); 2342 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | 2343 DQUOT_LIMITS_ENABLED, 2344 cnt); 2345 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt); 2346 spin_unlock(&dq_state_lock); 2347 mutex_unlock(&dqopt->dqonoff_mutex); 2348 2349 flags = dquot_generic_flag(flags, cnt); 2350 ret = vfs_load_quota_inode(inode, cnt, 2351 dqopt->info[cnt].dqi_fmt_id, flags); 2352 iput(inode); 2353 } 2354 2355 return ret; 2356 } 2357 EXPORT_SYMBOL(dquot_resume); 2358 2359 int dquot_quota_on(struct super_block *sb, int type, int format_id, 2360 struct path *path) 2361 { 2362 int error = security_quota_on(path->dentry); 2363 if (error) 2364 return error; 2365 /* Quota file not on the same filesystem? */ 2366 if (path->dentry->d_sb != sb) 2367 error = -EXDEV; 2368 else 2369 error = vfs_load_quota_inode(d_inode(path->dentry), type, 2370 format_id, DQUOT_USAGE_ENABLED | 2371 DQUOT_LIMITS_ENABLED); 2372 return error; 2373 } 2374 EXPORT_SYMBOL(dquot_quota_on); 2375 2376 /* 2377 * More powerful function for turning on quotas allowing setting 2378 * of individual quota flags 2379 */ 2380 int dquot_enable(struct inode *inode, int type, int format_id, 2381 unsigned int flags) 2382 { 2383 int ret = 0; 2384 struct super_block *sb = inode->i_sb; 2385 struct quota_info *dqopt = sb_dqopt(sb); 2386 2387 /* Just unsuspend quotas? */ 2388 BUG_ON(flags & DQUOT_SUSPENDED); 2389 2390 if (!flags) 2391 return 0; 2392 /* Just updating flags needed? */ 2393 if (sb_has_quota_loaded(sb, type)) { 2394 mutex_lock(&dqopt->dqonoff_mutex); 2395 /* Now do a reliable test... */ 2396 if (!sb_has_quota_loaded(sb, type)) { 2397 mutex_unlock(&dqopt->dqonoff_mutex); 2398 goto load_quota; 2399 } 2400 if (flags & DQUOT_USAGE_ENABLED && 2401 sb_has_quota_usage_enabled(sb, type)) { 2402 ret = -EBUSY; 2403 goto out_lock; 2404 } 2405 if (flags & DQUOT_LIMITS_ENABLED && 2406 sb_has_quota_limits_enabled(sb, type)) { 2407 ret = -EBUSY; 2408 goto out_lock; 2409 } 2410 spin_lock(&dq_state_lock); 2411 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); 2412 spin_unlock(&dq_state_lock); 2413 out_lock: 2414 mutex_unlock(&dqopt->dqonoff_mutex); 2415 return ret; 2416 } 2417 2418 load_quota: 2419 return vfs_load_quota_inode(inode, type, format_id, flags); 2420 } 2421 EXPORT_SYMBOL(dquot_enable); 2422 2423 /* 2424 * This function is used when filesystem needs to initialize quotas 2425 * during mount time. 2426 */ 2427 int dquot_quota_on_mount(struct super_block *sb, char *qf_name, 2428 int format_id, int type) 2429 { 2430 struct dentry *dentry; 2431 int error; 2432 2433 mutex_lock(&d_inode(sb->s_root)->i_mutex); 2434 dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); 2435 mutex_unlock(&d_inode(sb->s_root)->i_mutex); 2436 if (IS_ERR(dentry)) 2437 return PTR_ERR(dentry); 2438 2439 if (d_really_is_negative(dentry)) { 2440 error = -ENOENT; 2441 goto out; 2442 } 2443 2444 error = security_quota_on(dentry); 2445 if (!error) 2446 error = vfs_load_quota_inode(d_inode(dentry), type, format_id, 2447 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2448 2449 out: 2450 dput(dentry); 2451 return error; 2452 } 2453 EXPORT_SYMBOL(dquot_quota_on_mount); 2454 2455 static int dquot_quota_enable(struct super_block *sb, unsigned int flags) 2456 { 2457 int ret; 2458 int type; 2459 struct quota_info *dqopt = sb_dqopt(sb); 2460 2461 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) 2462 return -ENOSYS; 2463 /* Accounting cannot be turned on while fs is mounted */ 2464 flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT); 2465 if (!flags) 2466 return -EINVAL; 2467 for (type = 0; type < MAXQUOTAS; type++) { 2468 if (!(flags & qtype_enforce_flag(type))) 2469 continue; 2470 /* Can't enforce without accounting */ 2471 if (!sb_has_quota_usage_enabled(sb, type)) 2472 return -EINVAL; 2473 ret = dquot_enable(dqopt->files[type], type, 2474 dqopt->info[type].dqi_fmt_id, 2475 DQUOT_LIMITS_ENABLED); 2476 if (ret < 0) 2477 goto out_err; 2478 } 2479 return 0; 2480 out_err: 2481 /* Backout enforcement enablement we already did */ 2482 for (type--; type >= 0; type--) { 2483 if (flags & qtype_enforce_flag(type)) 2484 dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); 2485 } 2486 /* Error code translation for better compatibility with XFS */ 2487 if (ret == -EBUSY) 2488 ret = -EEXIST; 2489 return ret; 2490 } 2491 2492 static int dquot_quota_disable(struct super_block *sb, unsigned int flags) 2493 { 2494 int ret; 2495 int type; 2496 struct quota_info *dqopt = sb_dqopt(sb); 2497 2498 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) 2499 return -ENOSYS; 2500 /* 2501 * We don't support turning off accounting via quotactl. In principle 2502 * quota infrastructure can do this but filesystems don't expect 2503 * userspace to be able to do it. 2504 */ 2505 if (flags & 2506 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT)) 2507 return -EOPNOTSUPP; 2508 2509 /* Filter out limits not enabled */ 2510 for (type = 0; type < MAXQUOTAS; type++) 2511 if (!sb_has_quota_limits_enabled(sb, type)) 2512 flags &= ~qtype_enforce_flag(type); 2513 /* Nothing left? */ 2514 if (!flags) 2515 return -EEXIST; 2516 for (type = 0; type < MAXQUOTAS; type++) { 2517 if (flags & qtype_enforce_flag(type)) { 2518 ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); 2519 if (ret < 0) 2520 goto out_err; 2521 } 2522 } 2523 return 0; 2524 out_err: 2525 /* Backout enforcement disabling we already did */ 2526 for (type--; type >= 0; type--) { 2527 if (flags & qtype_enforce_flag(type)) 2528 dquot_enable(dqopt->files[type], type, 2529 dqopt->info[type].dqi_fmt_id, 2530 DQUOT_LIMITS_ENABLED); 2531 } 2532 return ret; 2533 } 2534 2535 /* Generic routine for getting common part of quota structure */ 2536 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) 2537 { 2538 struct mem_dqblk *dm = &dquot->dq_dqb; 2539 2540 memset(di, 0, sizeof(*di)); 2541 spin_lock(&dq_data_lock); 2542 di->d_spc_hardlimit = dm->dqb_bhardlimit; 2543 di->d_spc_softlimit = dm->dqb_bsoftlimit; 2544 di->d_ino_hardlimit = dm->dqb_ihardlimit; 2545 di->d_ino_softlimit = dm->dqb_isoftlimit; 2546 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace; 2547 di->d_ino_count = dm->dqb_curinodes; 2548 di->d_spc_timer = dm->dqb_btime; 2549 di->d_ino_timer = dm->dqb_itime; 2550 spin_unlock(&dq_data_lock); 2551 } 2552 2553 int dquot_get_dqblk(struct super_block *sb, struct kqid qid, 2554 struct qc_dqblk *di) 2555 { 2556 struct dquot *dquot; 2557 2558 dquot = dqget(sb, qid); 2559 if (IS_ERR(dquot)) 2560 return PTR_ERR(dquot); 2561 do_get_dqblk(dquot, di); 2562 dqput(dquot); 2563 2564 return 0; 2565 } 2566 EXPORT_SYMBOL(dquot_get_dqblk); 2567 2568 #define VFS_QC_MASK \ 2569 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \ 2570 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \ 2571 QC_SPC_TIMER | QC_INO_TIMER) 2572 2573 /* Generic routine for setting common part of quota structure */ 2574 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) 2575 { 2576 struct mem_dqblk *dm = &dquot->dq_dqb; 2577 int check_blim = 0, check_ilim = 0; 2578 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 2579 2580 if (di->d_fieldmask & ~VFS_QC_MASK) 2581 return -EINVAL; 2582 2583 if (((di->d_fieldmask & QC_SPC_SOFT) && 2584 di->d_spc_softlimit > dqi->dqi_max_spc_limit) || 2585 ((di->d_fieldmask & QC_SPC_HARD) && 2586 di->d_spc_hardlimit > dqi->dqi_max_spc_limit) || 2587 ((di->d_fieldmask & QC_INO_SOFT) && 2588 (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) || 2589 ((di->d_fieldmask & QC_INO_HARD) && 2590 (di->d_ino_hardlimit > dqi->dqi_max_ino_limit))) 2591 return -ERANGE; 2592 2593 spin_lock(&dq_data_lock); 2594 if (di->d_fieldmask & QC_SPACE) { 2595 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; 2596 check_blim = 1; 2597 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2598 } 2599 2600 if (di->d_fieldmask & QC_SPC_SOFT) 2601 dm->dqb_bsoftlimit = di->d_spc_softlimit; 2602 if (di->d_fieldmask & QC_SPC_HARD) 2603 dm->dqb_bhardlimit = di->d_spc_hardlimit; 2604 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) { 2605 check_blim = 1; 2606 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 2607 } 2608 2609 if (di->d_fieldmask & QC_INO_COUNT) { 2610 dm->dqb_curinodes = di->d_ino_count; 2611 check_ilim = 1; 2612 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 2613 } 2614 2615 if (di->d_fieldmask & QC_INO_SOFT) 2616 dm->dqb_isoftlimit = di->d_ino_softlimit; 2617 if (di->d_fieldmask & QC_INO_HARD) 2618 dm->dqb_ihardlimit = di->d_ino_hardlimit; 2619 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) { 2620 check_ilim = 1; 2621 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 2622 } 2623 2624 if (di->d_fieldmask & QC_SPC_TIMER) { 2625 dm->dqb_btime = di->d_spc_timer; 2626 check_blim = 1; 2627 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2628 } 2629 2630 if (di->d_fieldmask & QC_INO_TIMER) { 2631 dm->dqb_itime = di->d_ino_timer; 2632 check_ilim = 1; 2633 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2634 } 2635 2636 if (check_blim) { 2637 if (!dm->dqb_bsoftlimit || 2638 dm->dqb_curspace < dm->dqb_bsoftlimit) { 2639 dm->dqb_btime = 0; 2640 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 2641 } else if (!(di->d_fieldmask & QC_SPC_TIMER)) 2642 /* Set grace only if user hasn't provided his own... */ 2643 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; 2644 } 2645 if (check_ilim) { 2646 if (!dm->dqb_isoftlimit || 2647 dm->dqb_curinodes < dm->dqb_isoftlimit) { 2648 dm->dqb_itime = 0; 2649 clear_bit(DQ_INODES_B, &dquot->dq_flags); 2650 } else if (!(di->d_fieldmask & QC_INO_TIMER)) 2651 /* Set grace only if user hasn't provided his own... */ 2652 dm->dqb_itime = get_seconds() + dqi->dqi_igrace; 2653 } 2654 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || 2655 dm->dqb_isoftlimit) 2656 clear_bit(DQ_FAKE_B, &dquot->dq_flags); 2657 else 2658 set_bit(DQ_FAKE_B, &dquot->dq_flags); 2659 spin_unlock(&dq_data_lock); 2660 mark_dquot_dirty(dquot); 2661 2662 return 0; 2663 } 2664 2665 int dquot_set_dqblk(struct super_block *sb, struct kqid qid, 2666 struct qc_dqblk *di) 2667 { 2668 struct dquot *dquot; 2669 int rc; 2670 2671 dquot = dqget(sb, qid); 2672 if (IS_ERR(dquot)) { 2673 rc = PTR_ERR(dquot); 2674 goto out; 2675 } 2676 rc = do_set_dqblk(dquot, di); 2677 dqput(dquot); 2678 out: 2679 return rc; 2680 } 2681 EXPORT_SYMBOL(dquot_set_dqblk); 2682 2683 /* Generic routine for getting common part of quota file information */ 2684 int dquot_get_state(struct super_block *sb, struct qc_state *state) 2685 { 2686 struct mem_dqinfo *mi; 2687 struct qc_type_state *tstate; 2688 struct quota_info *dqopt = sb_dqopt(sb); 2689 int type; 2690 2691 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 2692 memset(state, 0, sizeof(*state)); 2693 for (type = 0; type < MAXQUOTAS; type++) { 2694 if (!sb_has_quota_active(sb, type)) 2695 continue; 2696 tstate = state->s_state + type; 2697 mi = sb_dqopt(sb)->info + type; 2698 tstate->flags = QCI_ACCT_ENABLED; 2699 spin_lock(&dq_data_lock); 2700 if (mi->dqi_flags & DQF_SYS_FILE) 2701 tstate->flags |= QCI_SYSFILE; 2702 if (mi->dqi_flags & DQF_ROOT_SQUASH) 2703 tstate->flags |= QCI_ROOT_SQUASH; 2704 if (sb_has_quota_limits_enabled(sb, type)) 2705 tstate->flags |= QCI_LIMITS_ENFORCED; 2706 tstate->spc_timelimit = mi->dqi_bgrace; 2707 tstate->ino_timelimit = mi->dqi_igrace; 2708 tstate->ino = dqopt->files[type]->i_ino; 2709 tstate->blocks = dqopt->files[type]->i_blocks; 2710 tstate->nextents = 1; /* We don't know... */ 2711 spin_unlock(&dq_data_lock); 2712 } 2713 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2714 return 0; 2715 } 2716 EXPORT_SYMBOL(dquot_get_state); 2717 2718 /* Generic routine for setting common part of quota file information */ 2719 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii) 2720 { 2721 struct mem_dqinfo *mi; 2722 int err = 0; 2723 2724 if ((ii->i_fieldmask & QC_WARNS_MASK) || 2725 (ii->i_fieldmask & QC_RT_SPC_TIMER)) 2726 return -EINVAL; 2727 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 2728 if (!sb_has_quota_active(sb, type)) { 2729 err = -ESRCH; 2730 goto out; 2731 } 2732 mi = sb_dqopt(sb)->info + type; 2733 if (ii->i_fieldmask & QC_FLAGS) { 2734 if ((ii->i_flags & QCI_ROOT_SQUASH && 2735 mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) { 2736 err = -EINVAL; 2737 goto out; 2738 } 2739 } 2740 spin_lock(&dq_data_lock); 2741 if (ii->i_fieldmask & QC_SPC_TIMER) 2742 mi->dqi_bgrace = ii->i_spc_timelimit; 2743 if (ii->i_fieldmask & QC_INO_TIMER) 2744 mi->dqi_igrace = ii->i_ino_timelimit; 2745 if (ii->i_fieldmask & QC_FLAGS) { 2746 if (ii->i_flags & QCI_ROOT_SQUASH) 2747 mi->dqi_flags |= DQF_ROOT_SQUASH; 2748 else 2749 mi->dqi_flags &= ~DQF_ROOT_SQUASH; 2750 } 2751 spin_unlock(&dq_data_lock); 2752 mark_info_dirty(sb, type); 2753 /* Force write to disk */ 2754 sb->dq_op->write_info(sb, type); 2755 out: 2756 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2757 return err; 2758 } 2759 EXPORT_SYMBOL(dquot_set_dqinfo); 2760 2761 const struct quotactl_ops dquot_quotactl_ops = { 2762 .quota_on = dquot_quota_on, 2763 .quota_off = dquot_quota_off, 2764 .quota_sync = dquot_quota_sync, 2765 .get_state = dquot_get_state, 2766 .set_info = dquot_set_dqinfo, 2767 .get_dqblk = dquot_get_dqblk, 2768 .set_dqblk = dquot_set_dqblk 2769 }; 2770 EXPORT_SYMBOL(dquot_quotactl_ops); 2771 2772 const struct quotactl_ops dquot_quotactl_sysfile_ops = { 2773 .quota_enable = dquot_quota_enable, 2774 .quota_disable = dquot_quota_disable, 2775 .quota_sync = dquot_quota_sync, 2776 .get_state = dquot_get_state, 2777 .set_info = dquot_set_dqinfo, 2778 .get_dqblk = dquot_get_dqblk, 2779 .set_dqblk = dquot_set_dqblk 2780 }; 2781 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops); 2782 2783 static int do_proc_dqstats(struct ctl_table *table, int write, 2784 void __user *buffer, size_t *lenp, loff_t *ppos) 2785 { 2786 unsigned int type = (int *)table->data - dqstats.stat; 2787 2788 /* Update global table */ 2789 dqstats.stat[type] = 2790 percpu_counter_sum_positive(&dqstats.counter[type]); 2791 return proc_dointvec(table, write, buffer, lenp, ppos); 2792 } 2793 2794 static struct ctl_table fs_dqstats_table[] = { 2795 { 2796 .procname = "lookups", 2797 .data = &dqstats.stat[DQST_LOOKUPS], 2798 .maxlen = sizeof(int), 2799 .mode = 0444, 2800 .proc_handler = do_proc_dqstats, 2801 }, 2802 { 2803 .procname = "drops", 2804 .data = &dqstats.stat[DQST_DROPS], 2805 .maxlen = sizeof(int), 2806 .mode = 0444, 2807 .proc_handler = do_proc_dqstats, 2808 }, 2809 { 2810 .procname = "reads", 2811 .data = &dqstats.stat[DQST_READS], 2812 .maxlen = sizeof(int), 2813 .mode = 0444, 2814 .proc_handler = do_proc_dqstats, 2815 }, 2816 { 2817 .procname = "writes", 2818 .data = &dqstats.stat[DQST_WRITES], 2819 .maxlen = sizeof(int), 2820 .mode = 0444, 2821 .proc_handler = do_proc_dqstats, 2822 }, 2823 { 2824 .procname = "cache_hits", 2825 .data = &dqstats.stat[DQST_CACHE_HITS], 2826 .maxlen = sizeof(int), 2827 .mode = 0444, 2828 .proc_handler = do_proc_dqstats, 2829 }, 2830 { 2831 .procname = "allocated_dquots", 2832 .data = &dqstats.stat[DQST_ALLOC_DQUOTS], 2833 .maxlen = sizeof(int), 2834 .mode = 0444, 2835 .proc_handler = do_proc_dqstats, 2836 }, 2837 { 2838 .procname = "free_dquots", 2839 .data = &dqstats.stat[DQST_FREE_DQUOTS], 2840 .maxlen = sizeof(int), 2841 .mode = 0444, 2842 .proc_handler = do_proc_dqstats, 2843 }, 2844 { 2845 .procname = "syncs", 2846 .data = &dqstats.stat[DQST_SYNCS], 2847 .maxlen = sizeof(int), 2848 .mode = 0444, 2849 .proc_handler = do_proc_dqstats, 2850 }, 2851 #ifdef CONFIG_PRINT_QUOTA_WARNING 2852 { 2853 .procname = "warnings", 2854 .data = &flag_print_warnings, 2855 .maxlen = sizeof(int), 2856 .mode = 0644, 2857 .proc_handler = proc_dointvec, 2858 }, 2859 #endif 2860 { }, 2861 }; 2862 2863 static struct ctl_table fs_table[] = { 2864 { 2865 .procname = "quota", 2866 .mode = 0555, 2867 .child = fs_dqstats_table, 2868 }, 2869 { }, 2870 }; 2871 2872 static struct ctl_table sys_table[] = { 2873 { 2874 .procname = "fs", 2875 .mode = 0555, 2876 .child = fs_table, 2877 }, 2878 { }, 2879 }; 2880 2881 static int __init dquot_init(void) 2882 { 2883 int i, ret; 2884 unsigned long nr_hash, order; 2885 2886 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); 2887 2888 register_sysctl_table(sys_table); 2889 2890 dquot_cachep = kmem_cache_create("dquot", 2891 sizeof(struct dquot), sizeof(unsigned long) * 4, 2892 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 2893 SLAB_MEM_SPREAD|SLAB_PANIC), 2894 NULL); 2895 2896 order = 0; 2897 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); 2898 if (!dquot_hash) 2899 panic("Cannot create dquot hash table"); 2900 2901 for (i = 0; i < _DQST_DQSTAT_LAST; i++) { 2902 ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL); 2903 if (ret) 2904 panic("Cannot create dquot stat counters"); 2905 } 2906 2907 /* Find power-of-two hlist_heads which can fit into allocation */ 2908 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); 2909 dq_hash_bits = 0; 2910 do { 2911 dq_hash_bits++; 2912 } while (nr_hash >> dq_hash_bits); 2913 dq_hash_bits--; 2914 2915 nr_hash = 1UL << dq_hash_bits; 2916 dq_hash_mask = nr_hash - 1; 2917 for (i = 0; i < nr_hash; i++) 2918 INIT_HLIST_HEAD(dquot_hash + i); 2919 2920 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld," 2921 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); 2922 2923 register_shrinker(&dqcache_shrinker); 2924 2925 return 0; 2926 } 2927 module_init(dquot_init); 2928