1 /* 2 * Implementation of the diskquota system for the LINUX operating system. QUOTA 3 * is implemented using the BSD system call interface as the means of 4 * communication with the user level. This file contains the generic routines 5 * called by the different filesystems on allocation of an inode or block. 6 * These routines take care of the administration needed to have a consistent 7 * diskquota tracking system. The ideas of both user and group quotas are based 8 * on the Melbourne quota system as used on BSD derived systems. The internal 9 * implementation is based on one of the several variants of the LINUX 10 * inode-subsystem with added complexity of the diskquota system. 11 * 12 * Author: Marco van Wieringen <mvw@planets.elm.net> 13 * 14 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96 15 * 16 * Revised list management to avoid races 17 * -- Bill Hawes, <whawes@star.net>, 9/98 18 * 19 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). 20 * As the consequence the locking was moved from dquot_decr_...(), 21 * dquot_incr_...() to calling functions. 22 * invalidate_dquots() now writes modified dquots. 23 * Serialized quota_off() and quota_on() for mount point. 24 * Fixed a few bugs in grow_dquots(). 25 * Fixed deadlock in write_dquot() - we no longer account quotas on 26 * quota files 27 * remove_dquot_ref() moved to inode.c - it now traverses through inodes 28 * add_dquot_ref() restarts after blocking 29 * Added check for bogus uid and fixed check for group in quotactl. 30 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99 31 * 32 * Used struct list_head instead of own list struct 33 * Invalidation of referenced dquots is no longer possible 34 * Improved free_dquots list management 35 * Quota and i_blocks are now updated in one place to avoid races 36 * Warnings are now delayed so we won't block in critical section 37 * Write updated not to require dquot lock 38 * Jan Kara, <jack@suse.cz>, 9/2000 39 * 40 * Added dynamic quota structure allocation 41 * Jan Kara <jack@suse.cz> 12/2000 42 * 43 * Rewritten quota interface. Implemented new quota format and 44 * formats registering. 45 * Jan Kara, <jack@suse.cz>, 2001,2002 46 * 47 * New SMP locking. 48 * Jan Kara, <jack@suse.cz>, 10/2002 49 * 50 * Added journalled quota support, fix lock inversion problems 51 * Jan Kara, <jack@suse.cz>, 2003,2004 52 * 53 * (C) Copyright 1994 - 1997 Marco van Wieringen 54 */ 55 56 #include <linux/errno.h> 57 #include <linux/kernel.h> 58 #include <linux/fs.h> 59 #include <linux/mount.h> 60 #include <linux/mm.h> 61 #include <linux/time.h> 62 #include <linux/types.h> 63 #include <linux/string.h> 64 #include <linux/fcntl.h> 65 #include <linux/stat.h> 66 #include <linux/tty.h> 67 #include <linux/file.h> 68 #include <linux/slab.h> 69 #include <linux/sysctl.h> 70 #include <linux/init.h> 71 #include <linux/module.h> 72 #include <linux/proc_fs.h> 73 #include <linux/security.h> 74 #include <linux/kmod.h> 75 #include <linux/namei.h> 76 #include <linux/buffer_head.h> 77 #include <linux/capability.h> 78 #include <linux/quotaops.h> 79 #include <linux/writeback.h> /* for inode_lock, oddly enough.. */ 80 81 #include <asm/uaccess.h> 82 83 /* 84 * There are three quota SMP locks. dq_list_lock protects all lists with quotas 85 * and quota formats. 86 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and 87 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. 88 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly 89 * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects 90 * modifications of quota state (on quotaon and quotaoff) and readers who care 91 * about latest values take it as well. 92 * 93 * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, 94 * dq_list_lock > dq_state_lock 95 * 96 * Note that some things (eg. sb pointer, type, id) doesn't change during 97 * the life of the dquot structure and so needn't to be protected by a lock 98 * 99 * Any operation working on dquots via inode pointers must hold dqptr_sem. If 100 * operation is just reading pointers from inode (or not using them at all) the 101 * read lock is enough. If pointers are altered function must hold write lock. 102 * Special care needs to be taken about S_NOQUOTA inode flag (marking that 103 * inode is a quota file). Functions adding pointers from inode to dquots have 104 * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they 105 * have to do all pointer modifications before dropping dqptr_sem. This makes 106 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and 107 * then drops all pointers to dquots from an inode. 108 * 109 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced 110 * from inodes (dquot_alloc_space() and such don't check the dq_lock). 111 * Currently dquot is locked only when it is being read to memory (or space for 112 * it is being allocated) on the first dqget() and when it is being released on 113 * the last dqput(). The allocation and release oparations are serialized by 114 * the dq_lock and by checking the use count in dquot_release(). Write 115 * operations on dquots don't hold dq_lock as they copy data under dq_data_lock 116 * spinlock to internal buffers before writing. 117 * 118 * Lock ordering (including related VFS locks) is the following: 119 * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > 120 * dqio_mutex 121 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > 122 * dqptr_sem. But filesystem has to count with the fact that functions such as 123 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called 124 * from inside a transaction to keep filesystem consistency after a crash. Also 125 * filesystems usually want to do some IO on dquot from ->mark_dirty which is 126 * called with dqptr_sem held. 127 * i_mutex on quota files is special (it's below dqio_mutex) 128 */ 129 130 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); 131 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); 132 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); 133 EXPORT_SYMBOL(dq_data_lock); 134 135 void __quota_error(struct super_block *sb, const char *func, 136 const char *fmt, ...) 137 { 138 if (printk_ratelimit()) { 139 va_list args; 140 struct va_format vaf; 141 142 va_start(args, fmt); 143 144 vaf.fmt = fmt; 145 vaf.va = &args; 146 147 printk(KERN_ERR "Quota error (device %s): %s: %pV\n", 148 sb->s_id, func, &vaf); 149 150 va_end(args); 151 } 152 } 153 EXPORT_SYMBOL(__quota_error); 154 155 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING) 156 static char *quotatypes[] = INITQFNAMES; 157 #endif 158 static struct quota_format_type *quota_formats; /* List of registered formats */ 159 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; 160 161 /* SLAB cache for dquot structures */ 162 static struct kmem_cache *dquot_cachep; 163 164 int register_quota_format(struct quota_format_type *fmt) 165 { 166 spin_lock(&dq_list_lock); 167 fmt->qf_next = quota_formats; 168 quota_formats = fmt; 169 spin_unlock(&dq_list_lock); 170 return 0; 171 } 172 EXPORT_SYMBOL(register_quota_format); 173 174 void unregister_quota_format(struct quota_format_type *fmt) 175 { 176 struct quota_format_type **actqf; 177 178 spin_lock(&dq_list_lock); 179 for (actqf = "a_formats; *actqf && *actqf != fmt; 180 actqf = &(*actqf)->qf_next) 181 ; 182 if (*actqf) 183 *actqf = (*actqf)->qf_next; 184 spin_unlock(&dq_list_lock); 185 } 186 EXPORT_SYMBOL(unregister_quota_format); 187 188 static struct quota_format_type *find_quota_format(int id) 189 { 190 struct quota_format_type *actqf; 191 192 spin_lock(&dq_list_lock); 193 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; 194 actqf = actqf->qf_next) 195 ; 196 if (!actqf || !try_module_get(actqf->qf_owner)) { 197 int qm; 198 199 spin_unlock(&dq_list_lock); 200 201 for (qm = 0; module_names[qm].qm_fmt_id && 202 module_names[qm].qm_fmt_id != id; qm++) 203 ; 204 if (!module_names[qm].qm_fmt_id || 205 request_module(module_names[qm].qm_mod_name)) 206 return NULL; 207 208 spin_lock(&dq_list_lock); 209 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; 210 actqf = actqf->qf_next) 211 ; 212 if (actqf && !try_module_get(actqf->qf_owner)) 213 actqf = NULL; 214 } 215 spin_unlock(&dq_list_lock); 216 return actqf; 217 } 218 219 static void put_quota_format(struct quota_format_type *fmt) 220 { 221 module_put(fmt->qf_owner); 222 } 223 224 /* 225 * Dquot List Management: 226 * The quota code uses three lists for dquot management: the inuse_list, 227 * free_dquots, and dquot_hash[] array. A single dquot structure may be 228 * on all three lists, depending on its current state. 229 * 230 * All dquots are placed to the end of inuse_list when first created, and this 231 * list is used for invalidate operation, which must look at every dquot. 232 * 233 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, 234 * and this list is searched whenever we need an available dquot. Dquots are 235 * removed from the list as soon as they are used again, and 236 * dqstats.free_dquots gives the number of dquots on the list. When 237 * dquot is invalidated it's completely released from memory. 238 * 239 * Dquots with a specific identity (device, type and id) are placed on 240 * one of the dquot_hash[] hash chains. The provides an efficient search 241 * mechanism to locate a specific dquot. 242 */ 243 244 static LIST_HEAD(inuse_list); 245 static LIST_HEAD(free_dquots); 246 static unsigned int dq_hash_bits, dq_hash_mask; 247 static struct hlist_head *dquot_hash; 248 249 struct dqstats dqstats; 250 EXPORT_SYMBOL(dqstats); 251 252 static qsize_t inode_get_rsv_space(struct inode *inode); 253 static void __dquot_initialize(struct inode *inode, int type); 254 255 static inline unsigned int 256 hashfn(const struct super_block *sb, unsigned int id, int type) 257 { 258 unsigned long tmp; 259 260 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); 261 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; 262 } 263 264 /* 265 * Following list functions expect dq_list_lock to be held 266 */ 267 static inline void insert_dquot_hash(struct dquot *dquot) 268 { 269 struct hlist_head *head; 270 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); 271 hlist_add_head(&dquot->dq_hash, head); 272 } 273 274 static inline void remove_dquot_hash(struct dquot *dquot) 275 { 276 hlist_del_init(&dquot->dq_hash); 277 } 278 279 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, 280 unsigned int id, int type) 281 { 282 struct hlist_node *node; 283 struct dquot *dquot; 284 285 hlist_for_each (node, dquot_hash+hashent) { 286 dquot = hlist_entry(node, struct dquot, dq_hash); 287 if (dquot->dq_sb == sb && dquot->dq_id == id && 288 dquot->dq_type == type) 289 return dquot; 290 } 291 return NULL; 292 } 293 294 /* Add a dquot to the tail of the free list */ 295 static inline void put_dquot_last(struct dquot *dquot) 296 { 297 list_add_tail(&dquot->dq_free, &free_dquots); 298 dqstats_inc(DQST_FREE_DQUOTS); 299 } 300 301 static inline void remove_free_dquot(struct dquot *dquot) 302 { 303 if (list_empty(&dquot->dq_free)) 304 return; 305 list_del_init(&dquot->dq_free); 306 dqstats_dec(DQST_FREE_DQUOTS); 307 } 308 309 static inline void put_inuse(struct dquot *dquot) 310 { 311 /* We add to the back of inuse list so we don't have to restart 312 * when traversing this list and we block */ 313 list_add_tail(&dquot->dq_inuse, &inuse_list); 314 dqstats_inc(DQST_ALLOC_DQUOTS); 315 } 316 317 static inline void remove_inuse(struct dquot *dquot) 318 { 319 dqstats_dec(DQST_ALLOC_DQUOTS); 320 list_del(&dquot->dq_inuse); 321 } 322 /* 323 * End of list functions needing dq_list_lock 324 */ 325 326 static void wait_on_dquot(struct dquot *dquot) 327 { 328 mutex_lock(&dquot->dq_lock); 329 mutex_unlock(&dquot->dq_lock); 330 } 331 332 static inline int dquot_dirty(struct dquot *dquot) 333 { 334 return test_bit(DQ_MOD_B, &dquot->dq_flags); 335 } 336 337 static inline int mark_dquot_dirty(struct dquot *dquot) 338 { 339 return dquot->dq_sb->dq_op->mark_dirty(dquot); 340 } 341 342 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */ 343 int dquot_mark_dquot_dirty(struct dquot *dquot) 344 { 345 int ret = 1; 346 347 /* If quota is dirty already, we don't have to acquire dq_list_lock */ 348 if (test_bit(DQ_MOD_B, &dquot->dq_flags)) 349 return 1; 350 351 spin_lock(&dq_list_lock); 352 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) { 353 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> 354 info[dquot->dq_type].dqi_dirty_list); 355 ret = 0; 356 } 357 spin_unlock(&dq_list_lock); 358 return ret; 359 } 360 EXPORT_SYMBOL(dquot_mark_dquot_dirty); 361 362 /* Dirtify all the dquots - this can block when journalling */ 363 static inline int mark_all_dquot_dirty(struct dquot * const *dquot) 364 { 365 int ret, err, cnt; 366 367 ret = err = 0; 368 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 369 if (dquot[cnt]) 370 /* Even in case of error we have to continue */ 371 ret = mark_dquot_dirty(dquot[cnt]); 372 if (!err) 373 err = ret; 374 } 375 return err; 376 } 377 378 static inline void dqput_all(struct dquot **dquot) 379 { 380 unsigned int cnt; 381 382 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 383 dqput(dquot[cnt]); 384 } 385 386 /* This function needs dq_list_lock */ 387 static inline int clear_dquot_dirty(struct dquot *dquot) 388 { 389 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) 390 return 0; 391 list_del_init(&dquot->dq_dirty); 392 return 1; 393 } 394 395 void mark_info_dirty(struct super_block *sb, int type) 396 { 397 set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags); 398 } 399 EXPORT_SYMBOL(mark_info_dirty); 400 401 /* 402 * Read dquot from disk and alloc space for it 403 */ 404 405 int dquot_acquire(struct dquot *dquot) 406 { 407 int ret = 0, ret2 = 0; 408 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 409 410 mutex_lock(&dquot->dq_lock); 411 mutex_lock(&dqopt->dqio_mutex); 412 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) 413 ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); 414 if (ret < 0) 415 goto out_iolock; 416 set_bit(DQ_READ_B, &dquot->dq_flags); 417 /* Instantiate dquot if needed */ 418 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { 419 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); 420 /* Write the info if needed */ 421 if (info_dirty(&dqopt->info[dquot->dq_type])) { 422 ret2 = dqopt->ops[dquot->dq_type]->write_file_info( 423 dquot->dq_sb, dquot->dq_type); 424 } 425 if (ret < 0) 426 goto out_iolock; 427 if (ret2 < 0) { 428 ret = ret2; 429 goto out_iolock; 430 } 431 } 432 set_bit(DQ_ACTIVE_B, &dquot->dq_flags); 433 out_iolock: 434 mutex_unlock(&dqopt->dqio_mutex); 435 mutex_unlock(&dquot->dq_lock); 436 return ret; 437 } 438 EXPORT_SYMBOL(dquot_acquire); 439 440 /* 441 * Write dquot to disk 442 */ 443 int dquot_commit(struct dquot *dquot) 444 { 445 int ret = 0, ret2 = 0; 446 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 447 448 mutex_lock(&dqopt->dqio_mutex); 449 spin_lock(&dq_list_lock); 450 if (!clear_dquot_dirty(dquot)) { 451 spin_unlock(&dq_list_lock); 452 goto out_sem; 453 } 454 spin_unlock(&dq_list_lock); 455 /* Inactive dquot can be only if there was error during read/init 456 * => we have better not writing it */ 457 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 458 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); 459 if (info_dirty(&dqopt->info[dquot->dq_type])) { 460 ret2 = dqopt->ops[dquot->dq_type]->write_file_info( 461 dquot->dq_sb, dquot->dq_type); 462 } 463 if (ret >= 0) 464 ret = ret2; 465 } 466 out_sem: 467 mutex_unlock(&dqopt->dqio_mutex); 468 return ret; 469 } 470 EXPORT_SYMBOL(dquot_commit); 471 472 /* 473 * Release dquot 474 */ 475 int dquot_release(struct dquot *dquot) 476 { 477 int ret = 0, ret2 = 0; 478 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 479 480 mutex_lock(&dquot->dq_lock); 481 /* Check whether we are not racing with some other dqget() */ 482 if (atomic_read(&dquot->dq_count) > 1) 483 goto out_dqlock; 484 mutex_lock(&dqopt->dqio_mutex); 485 if (dqopt->ops[dquot->dq_type]->release_dqblk) { 486 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); 487 /* Write the info */ 488 if (info_dirty(&dqopt->info[dquot->dq_type])) { 489 ret2 = dqopt->ops[dquot->dq_type]->write_file_info( 490 dquot->dq_sb, dquot->dq_type); 491 } 492 if (ret >= 0) 493 ret = ret2; 494 } 495 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); 496 mutex_unlock(&dqopt->dqio_mutex); 497 out_dqlock: 498 mutex_unlock(&dquot->dq_lock); 499 return ret; 500 } 501 EXPORT_SYMBOL(dquot_release); 502 503 void dquot_destroy(struct dquot *dquot) 504 { 505 kmem_cache_free(dquot_cachep, dquot); 506 } 507 EXPORT_SYMBOL(dquot_destroy); 508 509 static inline void do_destroy_dquot(struct dquot *dquot) 510 { 511 dquot->dq_sb->dq_op->destroy_dquot(dquot); 512 } 513 514 /* Invalidate all dquots on the list. Note that this function is called after 515 * quota is disabled and pointers from inodes removed so there cannot be new 516 * quota users. There can still be some users of quotas due to inodes being 517 * just deleted or pruned by prune_icache() (those are not attached to any 518 * list) or parallel quotactl call. We have to wait for such users. 519 */ 520 static void invalidate_dquots(struct super_block *sb, int type) 521 { 522 struct dquot *dquot, *tmp; 523 524 restart: 525 spin_lock(&dq_list_lock); 526 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { 527 if (dquot->dq_sb != sb) 528 continue; 529 if (dquot->dq_type != type) 530 continue; 531 /* Wait for dquot users */ 532 if (atomic_read(&dquot->dq_count)) { 533 DEFINE_WAIT(wait); 534 535 atomic_inc(&dquot->dq_count); 536 prepare_to_wait(&dquot->dq_wait_unused, &wait, 537 TASK_UNINTERRUPTIBLE); 538 spin_unlock(&dq_list_lock); 539 /* Once dqput() wakes us up, we know it's time to free 540 * the dquot. 541 * IMPORTANT: we rely on the fact that there is always 542 * at most one process waiting for dquot to free. 543 * Otherwise dq_count would be > 1 and we would never 544 * wake up. 545 */ 546 if (atomic_read(&dquot->dq_count) > 1) 547 schedule(); 548 finish_wait(&dquot->dq_wait_unused, &wait); 549 dqput(dquot); 550 /* At this moment dquot() need not exist (it could be 551 * reclaimed by prune_dqcache(). Hence we must 552 * restart. */ 553 goto restart; 554 } 555 /* 556 * Quota now has no users and it has been written on last 557 * dqput() 558 */ 559 remove_dquot_hash(dquot); 560 remove_free_dquot(dquot); 561 remove_inuse(dquot); 562 do_destroy_dquot(dquot); 563 } 564 spin_unlock(&dq_list_lock); 565 } 566 567 /* Call callback for every active dquot on given filesystem */ 568 int dquot_scan_active(struct super_block *sb, 569 int (*fn)(struct dquot *dquot, unsigned long priv), 570 unsigned long priv) 571 { 572 struct dquot *dquot, *old_dquot = NULL; 573 int ret = 0; 574 575 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 576 spin_lock(&dq_list_lock); 577 list_for_each_entry(dquot, &inuse_list, dq_inuse) { 578 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) 579 continue; 580 if (dquot->dq_sb != sb) 581 continue; 582 /* Now we have active dquot so we can just increase use count */ 583 atomic_inc(&dquot->dq_count); 584 spin_unlock(&dq_list_lock); 585 dqstats_inc(DQST_LOOKUPS); 586 dqput(old_dquot); 587 old_dquot = dquot; 588 ret = fn(dquot, priv); 589 if (ret < 0) 590 goto out; 591 spin_lock(&dq_list_lock); 592 /* We are safe to continue now because our dquot could not 593 * be moved out of the inuse list while we hold the reference */ 594 } 595 spin_unlock(&dq_list_lock); 596 out: 597 dqput(old_dquot); 598 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 599 return ret; 600 } 601 EXPORT_SYMBOL(dquot_scan_active); 602 603 int dquot_quota_sync(struct super_block *sb, int type, int wait) 604 { 605 struct list_head *dirty; 606 struct dquot *dquot; 607 struct quota_info *dqopt = sb_dqopt(sb); 608 int cnt; 609 610 mutex_lock(&dqopt->dqonoff_mutex); 611 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 612 if (type != -1 && cnt != type) 613 continue; 614 if (!sb_has_quota_active(sb, cnt)) 615 continue; 616 spin_lock(&dq_list_lock); 617 dirty = &dqopt->info[cnt].dqi_dirty_list; 618 while (!list_empty(dirty)) { 619 dquot = list_first_entry(dirty, struct dquot, 620 dq_dirty); 621 /* Dirty and inactive can be only bad dquot... */ 622 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 623 clear_dquot_dirty(dquot); 624 continue; 625 } 626 /* Now we have active dquot from which someone is 627 * holding reference so we can safely just increase 628 * use count */ 629 atomic_inc(&dquot->dq_count); 630 spin_unlock(&dq_list_lock); 631 dqstats_inc(DQST_LOOKUPS); 632 sb->dq_op->write_dquot(dquot); 633 dqput(dquot); 634 spin_lock(&dq_list_lock); 635 } 636 spin_unlock(&dq_list_lock); 637 } 638 639 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 640 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) 641 && info_dirty(&dqopt->info[cnt])) 642 sb->dq_op->write_info(sb, cnt); 643 dqstats_inc(DQST_SYNCS); 644 mutex_unlock(&dqopt->dqonoff_mutex); 645 646 if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE)) 647 return 0; 648 649 /* This is not very clever (and fast) but currently I don't know about 650 * any other simple way of getting quota data to disk and we must get 651 * them there for userspace to be visible... */ 652 if (sb->s_op->sync_fs) 653 sb->s_op->sync_fs(sb, 1); 654 sync_blockdev(sb->s_bdev); 655 656 /* 657 * Now when everything is written we can discard the pagecache so 658 * that userspace sees the changes. 659 */ 660 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 661 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 662 if (type != -1 && cnt != type) 663 continue; 664 if (!sb_has_quota_active(sb, cnt)) 665 continue; 666 mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, 667 I_MUTEX_QUOTA); 668 truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); 669 mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); 670 } 671 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 672 673 return 0; 674 } 675 EXPORT_SYMBOL(dquot_quota_sync); 676 677 /* Free unused dquots from cache */ 678 static void prune_dqcache(int count) 679 { 680 struct list_head *head; 681 struct dquot *dquot; 682 683 head = free_dquots.prev; 684 while (head != &free_dquots && count) { 685 dquot = list_entry(head, struct dquot, dq_free); 686 remove_dquot_hash(dquot); 687 remove_free_dquot(dquot); 688 remove_inuse(dquot); 689 do_destroy_dquot(dquot); 690 count--; 691 head = free_dquots.prev; 692 } 693 } 694 695 /* 696 * This is called from kswapd when we think we need some 697 * more memory 698 */ 699 static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 700 { 701 if (nr) { 702 spin_lock(&dq_list_lock); 703 prune_dqcache(nr); 704 spin_unlock(&dq_list_lock); 705 } 706 return ((unsigned) 707 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]) 708 /100) * sysctl_vfs_cache_pressure; 709 } 710 711 static struct shrinker dqcache_shrinker = { 712 .shrink = shrink_dqcache_memory, 713 .seeks = DEFAULT_SEEKS, 714 }; 715 716 /* 717 * Put reference to dquot 718 * NOTE: If you change this function please check whether dqput_blocks() works right... 719 */ 720 void dqput(struct dquot *dquot) 721 { 722 int ret; 723 724 if (!dquot) 725 return; 726 #ifdef CONFIG_QUOTA_DEBUG 727 if (!atomic_read(&dquot->dq_count)) { 728 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d", 729 quotatypes[dquot->dq_type], dquot->dq_id); 730 BUG(); 731 } 732 #endif 733 dqstats_inc(DQST_DROPS); 734 we_slept: 735 spin_lock(&dq_list_lock); 736 if (atomic_read(&dquot->dq_count) > 1) { 737 /* We have more than one user... nothing to do */ 738 atomic_dec(&dquot->dq_count); 739 /* Releasing dquot during quotaoff phase? */ 740 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) && 741 atomic_read(&dquot->dq_count) == 1) 742 wake_up(&dquot->dq_wait_unused); 743 spin_unlock(&dq_list_lock); 744 return; 745 } 746 /* Need to release dquot? */ 747 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { 748 spin_unlock(&dq_list_lock); 749 /* Commit dquot before releasing */ 750 ret = dquot->dq_sb->dq_op->write_dquot(dquot); 751 if (ret < 0) { 752 quota_error(dquot->dq_sb, "Can't write quota structure" 753 " (error %d). Quota may get out of sync!", 754 ret); 755 /* 756 * We clear dirty bit anyway, so that we avoid 757 * infinite loop here 758 */ 759 spin_lock(&dq_list_lock); 760 clear_dquot_dirty(dquot); 761 spin_unlock(&dq_list_lock); 762 } 763 goto we_slept; 764 } 765 /* Clear flag in case dquot was inactive (something bad happened) */ 766 clear_dquot_dirty(dquot); 767 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 768 spin_unlock(&dq_list_lock); 769 dquot->dq_sb->dq_op->release_dquot(dquot); 770 goto we_slept; 771 } 772 atomic_dec(&dquot->dq_count); 773 #ifdef CONFIG_QUOTA_DEBUG 774 /* sanity check */ 775 BUG_ON(!list_empty(&dquot->dq_free)); 776 #endif 777 put_dquot_last(dquot); 778 spin_unlock(&dq_list_lock); 779 } 780 EXPORT_SYMBOL(dqput); 781 782 struct dquot *dquot_alloc(struct super_block *sb, int type) 783 { 784 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); 785 } 786 EXPORT_SYMBOL(dquot_alloc); 787 788 static struct dquot *get_empty_dquot(struct super_block *sb, int type) 789 { 790 struct dquot *dquot; 791 792 dquot = sb->dq_op->alloc_dquot(sb, type); 793 if(!dquot) 794 return NULL; 795 796 mutex_init(&dquot->dq_lock); 797 INIT_LIST_HEAD(&dquot->dq_free); 798 INIT_LIST_HEAD(&dquot->dq_inuse); 799 INIT_HLIST_NODE(&dquot->dq_hash); 800 INIT_LIST_HEAD(&dquot->dq_dirty); 801 init_waitqueue_head(&dquot->dq_wait_unused); 802 dquot->dq_sb = sb; 803 dquot->dq_type = type; 804 atomic_set(&dquot->dq_count, 1); 805 806 return dquot; 807 } 808 809 /* 810 * Get reference to dquot 811 * 812 * Locking is slightly tricky here. We are guarded from parallel quotaoff() 813 * destroying our dquot by: 814 * a) checking for quota flags under dq_list_lock and 815 * b) getting a reference to dquot before we release dq_list_lock 816 */ 817 struct dquot *dqget(struct super_block *sb, unsigned int id, int type) 818 { 819 unsigned int hashent = hashfn(sb, id, type); 820 struct dquot *dquot = NULL, *empty = NULL; 821 822 if (!sb_has_quota_active(sb, type)) 823 return NULL; 824 we_slept: 825 spin_lock(&dq_list_lock); 826 spin_lock(&dq_state_lock); 827 if (!sb_has_quota_active(sb, type)) { 828 spin_unlock(&dq_state_lock); 829 spin_unlock(&dq_list_lock); 830 goto out; 831 } 832 spin_unlock(&dq_state_lock); 833 834 dquot = find_dquot(hashent, sb, id, type); 835 if (!dquot) { 836 if (!empty) { 837 spin_unlock(&dq_list_lock); 838 empty = get_empty_dquot(sb, type); 839 if (!empty) 840 schedule(); /* Try to wait for a moment... */ 841 goto we_slept; 842 } 843 dquot = empty; 844 empty = NULL; 845 dquot->dq_id = id; 846 /* all dquots go on the inuse_list */ 847 put_inuse(dquot); 848 /* hash it first so it can be found */ 849 insert_dquot_hash(dquot); 850 spin_unlock(&dq_list_lock); 851 dqstats_inc(DQST_LOOKUPS); 852 } else { 853 if (!atomic_read(&dquot->dq_count)) 854 remove_free_dquot(dquot); 855 atomic_inc(&dquot->dq_count); 856 spin_unlock(&dq_list_lock); 857 dqstats_inc(DQST_CACHE_HITS); 858 dqstats_inc(DQST_LOOKUPS); 859 } 860 /* Wait for dq_lock - after this we know that either dquot_release() is 861 * already finished or it will be canceled due to dq_count > 1 test */ 862 wait_on_dquot(dquot); 863 /* Read the dquot / allocate space in quota file */ 864 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && 865 sb->dq_op->acquire_dquot(dquot) < 0) { 866 dqput(dquot); 867 dquot = NULL; 868 goto out; 869 } 870 #ifdef CONFIG_QUOTA_DEBUG 871 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ 872 #endif 873 out: 874 if (empty) 875 do_destroy_dquot(empty); 876 877 return dquot; 878 } 879 EXPORT_SYMBOL(dqget); 880 881 static int dqinit_needed(struct inode *inode, int type) 882 { 883 int cnt; 884 885 if (IS_NOQUOTA(inode)) 886 return 0; 887 if (type != -1) 888 return !inode->i_dquot[type]; 889 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 890 if (!inode->i_dquot[cnt]) 891 return 1; 892 return 0; 893 } 894 895 /* This routine is guarded by dqonoff_mutex mutex */ 896 static void add_dquot_ref(struct super_block *sb, int type) 897 { 898 struct inode *inode, *old_inode = NULL; 899 #ifdef CONFIG_QUOTA_DEBUG 900 int reserved = 0; 901 #endif 902 903 spin_lock(&inode_lock); 904 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 905 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) 906 continue; 907 #ifdef CONFIG_QUOTA_DEBUG 908 if (unlikely(inode_get_rsv_space(inode) > 0)) 909 reserved = 1; 910 #endif 911 if (!atomic_read(&inode->i_writecount)) 912 continue; 913 if (!dqinit_needed(inode, type)) 914 continue; 915 916 __iget(inode); 917 spin_unlock(&inode_lock); 918 919 iput(old_inode); 920 __dquot_initialize(inode, type); 921 /* We hold a reference to 'inode' so it couldn't have been 922 * removed from s_inodes list while we dropped the inode_lock. 923 * We cannot iput the inode now as we can be holding the last 924 * reference and we cannot iput it under inode_lock. So we 925 * keep the reference and iput it later. */ 926 old_inode = inode; 927 spin_lock(&inode_lock); 928 } 929 spin_unlock(&inode_lock); 930 iput(old_inode); 931 932 #ifdef CONFIG_QUOTA_DEBUG 933 if (reserved) { 934 quota_error(sb, "Writes happened before quota was turned on " 935 "thus quota information is probably inconsistent. " 936 "Please run quotacheck(8)"); 937 } 938 #endif 939 } 940 941 /* 942 * Return 0 if dqput() won't block. 943 * (note that 1 doesn't necessarily mean blocking) 944 */ 945 static inline int dqput_blocks(struct dquot *dquot) 946 { 947 if (atomic_read(&dquot->dq_count) <= 1) 948 return 1; 949 return 0; 950 } 951 952 /* 953 * Remove references to dquots from inode and add dquot to list for freeing 954 * if we have the last referece to dquot 955 * We can't race with anybody because we hold dqptr_sem for writing... 956 */ 957 static int remove_inode_dquot_ref(struct inode *inode, int type, 958 struct list_head *tofree_head) 959 { 960 struct dquot *dquot = inode->i_dquot[type]; 961 962 inode->i_dquot[type] = NULL; 963 if (dquot) { 964 if (dqput_blocks(dquot)) { 965 #ifdef CONFIG_QUOTA_DEBUG 966 if (atomic_read(&dquot->dq_count) != 1) 967 quota_error(inode->i_sb, "Adding dquot with " 968 "dq_count %d to dispose list", 969 atomic_read(&dquot->dq_count)); 970 #endif 971 spin_lock(&dq_list_lock); 972 /* As dquot must have currently users it can't be on 973 * the free list... */ 974 list_add(&dquot->dq_free, tofree_head); 975 spin_unlock(&dq_list_lock); 976 return 1; 977 } 978 else 979 dqput(dquot); /* We have guaranteed we won't block */ 980 } 981 return 0; 982 } 983 984 /* 985 * Free list of dquots 986 * Dquots are removed from inodes and no new references can be got so we are 987 * the only ones holding reference 988 */ 989 static void put_dquot_list(struct list_head *tofree_head) 990 { 991 struct list_head *act_head; 992 struct dquot *dquot; 993 994 act_head = tofree_head->next; 995 while (act_head != tofree_head) { 996 dquot = list_entry(act_head, struct dquot, dq_free); 997 act_head = act_head->next; 998 /* Remove dquot from the list so we won't have problems... */ 999 list_del_init(&dquot->dq_free); 1000 dqput(dquot); 1001 } 1002 } 1003 1004 static void remove_dquot_ref(struct super_block *sb, int type, 1005 struct list_head *tofree_head) 1006 { 1007 struct inode *inode; 1008 int reserved = 0; 1009 1010 spin_lock(&inode_lock); 1011 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1012 /* 1013 * We have to scan also I_NEW inodes because they can already 1014 * have quota pointer initialized. Luckily, we need to touch 1015 * only quota pointers and these have separate locking 1016 * (dqptr_sem). 1017 */ 1018 if (!IS_NOQUOTA(inode)) { 1019 if (unlikely(inode_get_rsv_space(inode) > 0)) 1020 reserved = 1; 1021 remove_inode_dquot_ref(inode, type, tofree_head); 1022 } 1023 } 1024 spin_unlock(&inode_lock); 1025 #ifdef CONFIG_QUOTA_DEBUG 1026 if (reserved) { 1027 printk(KERN_WARNING "VFS (%s): Writes happened after quota" 1028 " was disabled thus quota information is probably " 1029 "inconsistent. Please run quotacheck(8).\n", sb->s_id); 1030 } 1031 #endif 1032 } 1033 1034 /* Gather all references from inodes and drop them */ 1035 static void drop_dquot_ref(struct super_block *sb, int type) 1036 { 1037 LIST_HEAD(tofree_head); 1038 1039 if (sb->dq_op) { 1040 down_write(&sb_dqopt(sb)->dqptr_sem); 1041 remove_dquot_ref(sb, type, &tofree_head); 1042 up_write(&sb_dqopt(sb)->dqptr_sem); 1043 put_dquot_list(&tofree_head); 1044 } 1045 } 1046 1047 static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number) 1048 { 1049 dquot->dq_dqb.dqb_curinodes += number; 1050 } 1051 1052 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) 1053 { 1054 dquot->dq_dqb.dqb_curspace += number; 1055 } 1056 1057 static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) 1058 { 1059 dquot->dq_dqb.dqb_rsvspace += number; 1060 } 1061 1062 /* 1063 * Claim reserved quota space 1064 */ 1065 static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number) 1066 { 1067 if (dquot->dq_dqb.dqb_rsvspace < number) { 1068 WARN_ON_ONCE(1); 1069 number = dquot->dq_dqb.dqb_rsvspace; 1070 } 1071 dquot->dq_dqb.dqb_curspace += number; 1072 dquot->dq_dqb.dqb_rsvspace -= number; 1073 } 1074 1075 static inline 1076 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) 1077 { 1078 if (dquot->dq_dqb.dqb_rsvspace >= number) 1079 dquot->dq_dqb.dqb_rsvspace -= number; 1080 else { 1081 WARN_ON_ONCE(1); 1082 dquot->dq_dqb.dqb_rsvspace = 0; 1083 } 1084 } 1085 1086 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) 1087 { 1088 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 1089 dquot->dq_dqb.dqb_curinodes >= number) 1090 dquot->dq_dqb.dqb_curinodes -= number; 1091 else 1092 dquot->dq_dqb.dqb_curinodes = 0; 1093 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) 1094 dquot->dq_dqb.dqb_itime = (time_t) 0; 1095 clear_bit(DQ_INODES_B, &dquot->dq_flags); 1096 } 1097 1098 static void dquot_decr_space(struct dquot *dquot, qsize_t number) 1099 { 1100 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 1101 dquot->dq_dqb.dqb_curspace >= number) 1102 dquot->dq_dqb.dqb_curspace -= number; 1103 else 1104 dquot->dq_dqb.dqb_curspace = 0; 1105 if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) 1106 dquot->dq_dqb.dqb_btime = (time_t) 0; 1107 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 1108 } 1109 1110 static int warning_issued(struct dquot *dquot, const int warntype) 1111 { 1112 int flag = (warntype == QUOTA_NL_BHARDWARN || 1113 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : 1114 ((warntype == QUOTA_NL_IHARDWARN || 1115 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); 1116 1117 if (!flag) 1118 return 0; 1119 return test_and_set_bit(flag, &dquot->dq_flags); 1120 } 1121 1122 #ifdef CONFIG_PRINT_QUOTA_WARNING 1123 static int flag_print_warnings = 1; 1124 1125 static int need_print_warning(struct dquot *dquot) 1126 { 1127 if (!flag_print_warnings) 1128 return 0; 1129 1130 switch (dquot->dq_type) { 1131 case USRQUOTA: 1132 return current_fsuid() == dquot->dq_id; 1133 case GRPQUOTA: 1134 return in_group_p(dquot->dq_id); 1135 } 1136 return 0; 1137 } 1138 1139 /* Print warning to user which exceeded quota */ 1140 static void print_warning(struct dquot *dquot, const int warntype) 1141 { 1142 char *msg = NULL; 1143 struct tty_struct *tty; 1144 1145 if (warntype == QUOTA_NL_IHARDBELOW || 1146 warntype == QUOTA_NL_ISOFTBELOW || 1147 warntype == QUOTA_NL_BHARDBELOW || 1148 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot)) 1149 return; 1150 1151 tty = get_current_tty(); 1152 if (!tty) 1153 return; 1154 tty_write_message(tty, dquot->dq_sb->s_id); 1155 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) 1156 tty_write_message(tty, ": warning, "); 1157 else 1158 tty_write_message(tty, ": write failed, "); 1159 tty_write_message(tty, quotatypes[dquot->dq_type]); 1160 switch (warntype) { 1161 case QUOTA_NL_IHARDWARN: 1162 msg = " file limit reached.\r\n"; 1163 break; 1164 case QUOTA_NL_ISOFTLONGWARN: 1165 msg = " file quota exceeded too long.\r\n"; 1166 break; 1167 case QUOTA_NL_ISOFTWARN: 1168 msg = " file quota exceeded.\r\n"; 1169 break; 1170 case QUOTA_NL_BHARDWARN: 1171 msg = " block limit reached.\r\n"; 1172 break; 1173 case QUOTA_NL_BSOFTLONGWARN: 1174 msg = " block quota exceeded too long.\r\n"; 1175 break; 1176 case QUOTA_NL_BSOFTWARN: 1177 msg = " block quota exceeded.\r\n"; 1178 break; 1179 } 1180 tty_write_message(tty, msg); 1181 tty_kref_put(tty); 1182 } 1183 #endif 1184 1185 /* 1186 * Write warnings to the console and send warning messages over netlink. 1187 * 1188 * Note that this function can sleep. 1189 */ 1190 static void flush_warnings(struct dquot *const *dquots, char *warntype) 1191 { 1192 struct dquot *dq; 1193 int i; 1194 1195 for (i = 0; i < MAXQUOTAS; i++) { 1196 dq = dquots[i]; 1197 if (dq && warntype[i] != QUOTA_NL_NOWARN && 1198 !warning_issued(dq, warntype[i])) { 1199 #ifdef CONFIG_PRINT_QUOTA_WARNING 1200 print_warning(dq, warntype[i]); 1201 #endif 1202 quota_send_warning(dq->dq_type, dq->dq_id, 1203 dq->dq_sb->s_dev, warntype[i]); 1204 } 1205 } 1206 } 1207 1208 static int ignore_hardlimit(struct dquot *dquot) 1209 { 1210 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; 1211 1212 return capable(CAP_SYS_RESOURCE) && 1213 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || 1214 !(info->dqi_flags & V1_DQF_RSQUASH)); 1215 } 1216 1217 /* needs dq_data_lock */ 1218 static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) 1219 { 1220 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes; 1221 1222 *warntype = QUOTA_NL_NOWARN; 1223 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || 1224 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1225 return 0; 1226 1227 if (dquot->dq_dqb.dqb_ihardlimit && 1228 newinodes > dquot->dq_dqb.dqb_ihardlimit && 1229 !ignore_hardlimit(dquot)) { 1230 *warntype = QUOTA_NL_IHARDWARN; 1231 return -EDQUOT; 1232 } 1233 1234 if (dquot->dq_dqb.dqb_isoftlimit && 1235 newinodes > dquot->dq_dqb.dqb_isoftlimit && 1236 dquot->dq_dqb.dqb_itime && 1237 get_seconds() >= dquot->dq_dqb.dqb_itime && 1238 !ignore_hardlimit(dquot)) { 1239 *warntype = QUOTA_NL_ISOFTLONGWARN; 1240 return -EDQUOT; 1241 } 1242 1243 if (dquot->dq_dqb.dqb_isoftlimit && 1244 newinodes > dquot->dq_dqb.dqb_isoftlimit && 1245 dquot->dq_dqb.dqb_itime == 0) { 1246 *warntype = QUOTA_NL_ISOFTWARN; 1247 dquot->dq_dqb.dqb_itime = get_seconds() + 1248 sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; 1249 } 1250 1251 return 0; 1252 } 1253 1254 /* needs dq_data_lock */ 1255 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) 1256 { 1257 qsize_t tspace; 1258 struct super_block *sb = dquot->dq_sb; 1259 1260 *warntype = QUOTA_NL_NOWARN; 1261 if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) || 1262 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1263 return 0; 1264 1265 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace 1266 + space; 1267 1268 if (dquot->dq_dqb.dqb_bhardlimit && 1269 tspace > dquot->dq_dqb.dqb_bhardlimit && 1270 !ignore_hardlimit(dquot)) { 1271 if (!prealloc) 1272 *warntype = QUOTA_NL_BHARDWARN; 1273 return -EDQUOT; 1274 } 1275 1276 if (dquot->dq_dqb.dqb_bsoftlimit && 1277 tspace > dquot->dq_dqb.dqb_bsoftlimit && 1278 dquot->dq_dqb.dqb_btime && 1279 get_seconds() >= dquot->dq_dqb.dqb_btime && 1280 !ignore_hardlimit(dquot)) { 1281 if (!prealloc) 1282 *warntype = QUOTA_NL_BSOFTLONGWARN; 1283 return -EDQUOT; 1284 } 1285 1286 if (dquot->dq_dqb.dqb_bsoftlimit && 1287 tspace > dquot->dq_dqb.dqb_bsoftlimit && 1288 dquot->dq_dqb.dqb_btime == 0) { 1289 if (!prealloc) { 1290 *warntype = QUOTA_NL_BSOFTWARN; 1291 dquot->dq_dqb.dqb_btime = get_seconds() + 1292 sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace; 1293 } 1294 else 1295 /* 1296 * We don't allow preallocation to exceed softlimit so exceeding will 1297 * be always printed 1298 */ 1299 return -EDQUOT; 1300 } 1301 1302 return 0; 1303 } 1304 1305 static int info_idq_free(struct dquot *dquot, qsize_t inodes) 1306 { 1307 qsize_t newinodes; 1308 1309 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1310 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || 1311 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) 1312 return QUOTA_NL_NOWARN; 1313 1314 newinodes = dquot->dq_dqb.dqb_curinodes - inodes; 1315 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit) 1316 return QUOTA_NL_ISOFTBELOW; 1317 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && 1318 newinodes < dquot->dq_dqb.dqb_ihardlimit) 1319 return QUOTA_NL_IHARDBELOW; 1320 return QUOTA_NL_NOWARN; 1321 } 1322 1323 static int info_bdq_free(struct dquot *dquot, qsize_t space) 1324 { 1325 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1326 dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) 1327 return QUOTA_NL_NOWARN; 1328 1329 if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit) 1330 return QUOTA_NL_BSOFTBELOW; 1331 if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit && 1332 dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit) 1333 return QUOTA_NL_BHARDBELOW; 1334 return QUOTA_NL_NOWARN; 1335 } 1336 1337 static int dquot_active(const struct inode *inode) 1338 { 1339 struct super_block *sb = inode->i_sb; 1340 1341 if (IS_NOQUOTA(inode)) 1342 return 0; 1343 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb); 1344 } 1345 1346 /* 1347 * Initialize quota pointers in inode 1348 * 1349 * We do things in a bit complicated way but by that we avoid calling 1350 * dqget() and thus filesystem callbacks under dqptr_sem. 1351 * 1352 * It is better to call this function outside of any transaction as it 1353 * might need a lot of space in journal for dquot structure allocation. 1354 */ 1355 static void __dquot_initialize(struct inode *inode, int type) 1356 { 1357 unsigned int id = 0; 1358 int cnt; 1359 struct dquot *got[MAXQUOTAS]; 1360 struct super_block *sb = inode->i_sb; 1361 qsize_t rsv; 1362 1363 /* First test before acquiring mutex - solves deadlocks when we 1364 * re-enter the quota code and are already holding the mutex */ 1365 if (!dquot_active(inode)) 1366 return; 1367 1368 /* First get references to structures we might need. */ 1369 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1370 got[cnt] = NULL; 1371 if (type != -1 && cnt != type) 1372 continue; 1373 switch (cnt) { 1374 case USRQUOTA: 1375 id = inode->i_uid; 1376 break; 1377 case GRPQUOTA: 1378 id = inode->i_gid; 1379 break; 1380 } 1381 got[cnt] = dqget(sb, id, cnt); 1382 } 1383 1384 down_write(&sb_dqopt(sb)->dqptr_sem); 1385 if (IS_NOQUOTA(inode)) 1386 goto out_err; 1387 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1388 if (type != -1 && cnt != type) 1389 continue; 1390 /* Avoid races with quotaoff() */ 1391 if (!sb_has_quota_active(sb, cnt)) 1392 continue; 1393 /* We could race with quotaon or dqget() could have failed */ 1394 if (!got[cnt]) 1395 continue; 1396 if (!inode->i_dquot[cnt]) { 1397 inode->i_dquot[cnt] = got[cnt]; 1398 got[cnt] = NULL; 1399 /* 1400 * Make quota reservation system happy if someone 1401 * did a write before quota was turned on 1402 */ 1403 rsv = inode_get_rsv_space(inode); 1404 if (unlikely(rsv)) 1405 dquot_resv_space(inode->i_dquot[cnt], rsv); 1406 } 1407 } 1408 out_err: 1409 up_write(&sb_dqopt(sb)->dqptr_sem); 1410 /* Drop unused references */ 1411 dqput_all(got); 1412 } 1413 1414 void dquot_initialize(struct inode *inode) 1415 { 1416 __dquot_initialize(inode, -1); 1417 } 1418 EXPORT_SYMBOL(dquot_initialize); 1419 1420 /* 1421 * Release all quotas referenced by inode 1422 */ 1423 static void __dquot_drop(struct inode *inode) 1424 { 1425 int cnt; 1426 struct dquot *put[MAXQUOTAS]; 1427 1428 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1429 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1430 put[cnt] = inode->i_dquot[cnt]; 1431 inode->i_dquot[cnt] = NULL; 1432 } 1433 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1434 dqput_all(put); 1435 } 1436 1437 void dquot_drop(struct inode *inode) 1438 { 1439 int cnt; 1440 1441 if (IS_NOQUOTA(inode)) 1442 return; 1443 1444 /* 1445 * Test before calling to rule out calls from proc and such 1446 * where we are not allowed to block. Note that this is 1447 * actually reliable test even without the lock - the caller 1448 * must assure that nobody can come after the DQUOT_DROP and 1449 * add quota pointers back anyway. 1450 */ 1451 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1452 if (inode->i_dquot[cnt]) 1453 break; 1454 } 1455 1456 if (cnt < MAXQUOTAS) 1457 __dquot_drop(inode); 1458 } 1459 EXPORT_SYMBOL(dquot_drop); 1460 1461 /* 1462 * inode_reserved_space is managed internally by quota, and protected by 1463 * i_lock similar to i_blocks+i_bytes. 1464 */ 1465 static qsize_t *inode_reserved_space(struct inode * inode) 1466 { 1467 /* Filesystem must explicitly define it's own method in order to use 1468 * quota reservation interface */ 1469 BUG_ON(!inode->i_sb->dq_op->get_reserved_space); 1470 return inode->i_sb->dq_op->get_reserved_space(inode); 1471 } 1472 1473 void inode_add_rsv_space(struct inode *inode, qsize_t number) 1474 { 1475 spin_lock(&inode->i_lock); 1476 *inode_reserved_space(inode) += number; 1477 spin_unlock(&inode->i_lock); 1478 } 1479 EXPORT_SYMBOL(inode_add_rsv_space); 1480 1481 void inode_claim_rsv_space(struct inode *inode, qsize_t number) 1482 { 1483 spin_lock(&inode->i_lock); 1484 *inode_reserved_space(inode) -= number; 1485 __inode_add_bytes(inode, number); 1486 spin_unlock(&inode->i_lock); 1487 } 1488 EXPORT_SYMBOL(inode_claim_rsv_space); 1489 1490 void inode_sub_rsv_space(struct inode *inode, qsize_t number) 1491 { 1492 spin_lock(&inode->i_lock); 1493 *inode_reserved_space(inode) -= number; 1494 spin_unlock(&inode->i_lock); 1495 } 1496 EXPORT_SYMBOL(inode_sub_rsv_space); 1497 1498 static qsize_t inode_get_rsv_space(struct inode *inode) 1499 { 1500 qsize_t ret; 1501 1502 if (!inode->i_sb->dq_op->get_reserved_space) 1503 return 0; 1504 spin_lock(&inode->i_lock); 1505 ret = *inode_reserved_space(inode); 1506 spin_unlock(&inode->i_lock); 1507 return ret; 1508 } 1509 1510 static void inode_incr_space(struct inode *inode, qsize_t number, 1511 int reserve) 1512 { 1513 if (reserve) 1514 inode_add_rsv_space(inode, number); 1515 else 1516 inode_add_bytes(inode, number); 1517 } 1518 1519 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) 1520 { 1521 if (reserve) 1522 inode_sub_rsv_space(inode, number); 1523 else 1524 inode_sub_bytes(inode, number); 1525 } 1526 1527 /* 1528 * This functions updates i_blocks+i_bytes fields and quota information 1529 * (together with appropriate checks). 1530 * 1531 * NOTE: We absolutely rely on the fact that caller dirties the inode 1532 * (usually helpers in quotaops.h care about this) and holds a handle for 1533 * the current transaction so that dquot write and inode write go into the 1534 * same transaction. 1535 */ 1536 1537 /* 1538 * This operation can block, but only after everything is updated 1539 */ 1540 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) 1541 { 1542 int cnt, ret = 0; 1543 char warntype[MAXQUOTAS]; 1544 int warn = flags & DQUOT_SPACE_WARN; 1545 int reserve = flags & DQUOT_SPACE_RESERVE; 1546 int nofail = flags & DQUOT_SPACE_NOFAIL; 1547 1548 /* 1549 * First test before acquiring mutex - solves deadlocks when we 1550 * re-enter the quota code and are already holding the mutex 1551 */ 1552 if (!dquot_active(inode)) { 1553 inode_incr_space(inode, number, reserve); 1554 goto out; 1555 } 1556 1557 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1558 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1559 warntype[cnt] = QUOTA_NL_NOWARN; 1560 1561 spin_lock(&dq_data_lock); 1562 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1563 if (!inode->i_dquot[cnt]) 1564 continue; 1565 ret = check_bdq(inode->i_dquot[cnt], number, !warn, 1566 warntype+cnt); 1567 if (ret && !nofail) { 1568 spin_unlock(&dq_data_lock); 1569 goto out_flush_warn; 1570 } 1571 } 1572 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1573 if (!inode->i_dquot[cnt]) 1574 continue; 1575 if (reserve) 1576 dquot_resv_space(inode->i_dquot[cnt], number); 1577 else 1578 dquot_incr_space(inode->i_dquot[cnt], number); 1579 } 1580 inode_incr_space(inode, number, reserve); 1581 spin_unlock(&dq_data_lock); 1582 1583 if (reserve) 1584 goto out_flush_warn; 1585 mark_all_dquot_dirty(inode->i_dquot); 1586 out_flush_warn: 1587 flush_warnings(inode->i_dquot, warntype); 1588 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1589 out: 1590 return ret; 1591 } 1592 EXPORT_SYMBOL(__dquot_alloc_space); 1593 1594 /* 1595 * This operation can block, but only after everything is updated 1596 */ 1597 int dquot_alloc_inode(const struct inode *inode) 1598 { 1599 int cnt, ret = 0; 1600 char warntype[MAXQUOTAS]; 1601 1602 /* First test before acquiring mutex - solves deadlocks when we 1603 * re-enter the quota code and are already holding the mutex */ 1604 if (!dquot_active(inode)) 1605 return 0; 1606 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1607 warntype[cnt] = QUOTA_NL_NOWARN; 1608 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1609 spin_lock(&dq_data_lock); 1610 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1611 if (!inode->i_dquot[cnt]) 1612 continue; 1613 ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt); 1614 if (ret) 1615 goto warn_put_all; 1616 } 1617 1618 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1619 if (!inode->i_dquot[cnt]) 1620 continue; 1621 dquot_incr_inodes(inode->i_dquot[cnt], 1); 1622 } 1623 1624 warn_put_all: 1625 spin_unlock(&dq_data_lock); 1626 if (ret == 0) 1627 mark_all_dquot_dirty(inode->i_dquot); 1628 flush_warnings(inode->i_dquot, warntype); 1629 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1630 return ret; 1631 } 1632 EXPORT_SYMBOL(dquot_alloc_inode); 1633 1634 /* 1635 * Convert in-memory reserved quotas to real consumed quotas 1636 */ 1637 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 1638 { 1639 int cnt; 1640 1641 if (!dquot_active(inode)) { 1642 inode_claim_rsv_space(inode, number); 1643 return 0; 1644 } 1645 1646 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1647 spin_lock(&dq_data_lock); 1648 /* Claim reserved quotas to allocated quotas */ 1649 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1650 if (inode->i_dquot[cnt]) 1651 dquot_claim_reserved_space(inode->i_dquot[cnt], 1652 number); 1653 } 1654 /* Update inode bytes */ 1655 inode_claim_rsv_space(inode, number); 1656 spin_unlock(&dq_data_lock); 1657 mark_all_dquot_dirty(inode->i_dquot); 1658 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1659 return 0; 1660 } 1661 EXPORT_SYMBOL(dquot_claim_space_nodirty); 1662 1663 /* 1664 * This operation can block, but only after everything is updated 1665 */ 1666 void __dquot_free_space(struct inode *inode, qsize_t number, int flags) 1667 { 1668 unsigned int cnt; 1669 char warntype[MAXQUOTAS]; 1670 int reserve = flags & DQUOT_SPACE_RESERVE; 1671 1672 /* First test before acquiring mutex - solves deadlocks when we 1673 * re-enter the quota code and are already holding the mutex */ 1674 if (!dquot_active(inode)) { 1675 inode_decr_space(inode, number, reserve); 1676 return; 1677 } 1678 1679 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1680 spin_lock(&dq_data_lock); 1681 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1682 if (!inode->i_dquot[cnt]) 1683 continue; 1684 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); 1685 if (reserve) 1686 dquot_free_reserved_space(inode->i_dquot[cnt], number); 1687 else 1688 dquot_decr_space(inode->i_dquot[cnt], number); 1689 } 1690 inode_decr_space(inode, number, reserve); 1691 spin_unlock(&dq_data_lock); 1692 1693 if (reserve) 1694 goto out_unlock; 1695 mark_all_dquot_dirty(inode->i_dquot); 1696 out_unlock: 1697 flush_warnings(inode->i_dquot, warntype); 1698 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1699 } 1700 EXPORT_SYMBOL(__dquot_free_space); 1701 1702 /* 1703 * This operation can block, but only after everything is updated 1704 */ 1705 void dquot_free_inode(const struct inode *inode) 1706 { 1707 unsigned int cnt; 1708 char warntype[MAXQUOTAS]; 1709 1710 /* First test before acquiring mutex - solves deadlocks when we 1711 * re-enter the quota code and are already holding the mutex */ 1712 if (!dquot_active(inode)) 1713 return; 1714 1715 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1716 spin_lock(&dq_data_lock); 1717 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1718 if (!inode->i_dquot[cnt]) 1719 continue; 1720 warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1); 1721 dquot_decr_inodes(inode->i_dquot[cnt], 1); 1722 } 1723 spin_unlock(&dq_data_lock); 1724 mark_all_dquot_dirty(inode->i_dquot); 1725 flush_warnings(inode->i_dquot, warntype); 1726 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1727 } 1728 EXPORT_SYMBOL(dquot_free_inode); 1729 1730 /* 1731 * Transfer the number of inode and blocks from one diskquota to an other. 1732 * On success, dquot references in transfer_to are consumed and references 1733 * to original dquots that need to be released are placed there. On failure, 1734 * references are kept untouched. 1735 * 1736 * This operation can block, but only after everything is updated 1737 * A transaction must be started when entering this function. 1738 * 1739 */ 1740 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) 1741 { 1742 qsize_t space, cur_space; 1743 qsize_t rsv_space = 0; 1744 struct dquot *transfer_from[MAXQUOTAS] = {}; 1745 int cnt, ret = 0; 1746 char is_valid[MAXQUOTAS] = {}; 1747 char warntype_to[MAXQUOTAS]; 1748 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; 1749 1750 /* First test before acquiring mutex - solves deadlocks when we 1751 * re-enter the quota code and are already holding the mutex */ 1752 if (IS_NOQUOTA(inode)) 1753 return 0; 1754 /* Initialize the arrays */ 1755 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1756 warntype_to[cnt] = QUOTA_NL_NOWARN; 1757 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1758 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ 1759 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1760 return 0; 1761 } 1762 spin_lock(&dq_data_lock); 1763 cur_space = inode_get_bytes(inode); 1764 rsv_space = inode_get_rsv_space(inode); 1765 space = cur_space + rsv_space; 1766 /* Build the transfer_from list and check the limits */ 1767 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1768 /* 1769 * Skip changes for same uid or gid or for turned off quota-type. 1770 */ 1771 if (!transfer_to[cnt]) 1772 continue; 1773 /* Avoid races with quotaoff() */ 1774 if (!sb_has_quota_active(inode->i_sb, cnt)) 1775 continue; 1776 is_valid[cnt] = 1; 1777 transfer_from[cnt] = inode->i_dquot[cnt]; 1778 ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt); 1779 if (ret) 1780 goto over_quota; 1781 ret = check_bdq(transfer_to[cnt], space, 0, warntype_to + cnt); 1782 if (ret) 1783 goto over_quota; 1784 } 1785 1786 /* 1787 * Finally perform the needed transfer from transfer_from to transfer_to 1788 */ 1789 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1790 if (!is_valid[cnt]) 1791 continue; 1792 /* Due to IO error we might not have transfer_from[] structure */ 1793 if (transfer_from[cnt]) { 1794 warntype_from_inodes[cnt] = 1795 info_idq_free(transfer_from[cnt], 1); 1796 warntype_from_space[cnt] = 1797 info_bdq_free(transfer_from[cnt], space); 1798 dquot_decr_inodes(transfer_from[cnt], 1); 1799 dquot_decr_space(transfer_from[cnt], cur_space); 1800 dquot_free_reserved_space(transfer_from[cnt], 1801 rsv_space); 1802 } 1803 1804 dquot_incr_inodes(transfer_to[cnt], 1); 1805 dquot_incr_space(transfer_to[cnt], cur_space); 1806 dquot_resv_space(transfer_to[cnt], rsv_space); 1807 1808 inode->i_dquot[cnt] = transfer_to[cnt]; 1809 } 1810 spin_unlock(&dq_data_lock); 1811 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1812 1813 mark_all_dquot_dirty(transfer_from); 1814 mark_all_dquot_dirty(transfer_to); 1815 flush_warnings(transfer_to, warntype_to); 1816 flush_warnings(transfer_from, warntype_from_inodes); 1817 flush_warnings(transfer_from, warntype_from_space); 1818 /* Pass back references to put */ 1819 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1820 if (is_valid[cnt]) 1821 transfer_to[cnt] = transfer_from[cnt]; 1822 return 0; 1823 over_quota: 1824 spin_unlock(&dq_data_lock); 1825 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1826 flush_warnings(transfer_to, warntype_to); 1827 return ret; 1828 } 1829 EXPORT_SYMBOL(__dquot_transfer); 1830 1831 /* Wrapper for transferring ownership of an inode for uid/gid only 1832 * Called from FSXXX_setattr() 1833 */ 1834 int dquot_transfer(struct inode *inode, struct iattr *iattr) 1835 { 1836 struct dquot *transfer_to[MAXQUOTAS] = {}; 1837 struct super_block *sb = inode->i_sb; 1838 int ret; 1839 1840 if (!dquot_active(inode)) 1841 return 0; 1842 1843 if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) 1844 transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA); 1845 if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) 1846 transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_gid, GRPQUOTA); 1847 1848 ret = __dquot_transfer(inode, transfer_to); 1849 dqput_all(transfer_to); 1850 return ret; 1851 } 1852 EXPORT_SYMBOL(dquot_transfer); 1853 1854 /* 1855 * Write info of quota file to disk 1856 */ 1857 int dquot_commit_info(struct super_block *sb, int type) 1858 { 1859 int ret; 1860 struct quota_info *dqopt = sb_dqopt(sb); 1861 1862 mutex_lock(&dqopt->dqio_mutex); 1863 ret = dqopt->ops[type]->write_file_info(sb, type); 1864 mutex_unlock(&dqopt->dqio_mutex); 1865 return ret; 1866 } 1867 EXPORT_SYMBOL(dquot_commit_info); 1868 1869 /* 1870 * Definitions of diskquota operations. 1871 */ 1872 const struct dquot_operations dquot_operations = { 1873 .write_dquot = dquot_commit, 1874 .acquire_dquot = dquot_acquire, 1875 .release_dquot = dquot_release, 1876 .mark_dirty = dquot_mark_dquot_dirty, 1877 .write_info = dquot_commit_info, 1878 .alloc_dquot = dquot_alloc, 1879 .destroy_dquot = dquot_destroy, 1880 }; 1881 EXPORT_SYMBOL(dquot_operations); 1882 1883 /* 1884 * Generic helper for ->open on filesystems supporting disk quotas. 1885 */ 1886 int dquot_file_open(struct inode *inode, struct file *file) 1887 { 1888 int error; 1889 1890 error = generic_file_open(inode, file); 1891 if (!error && (file->f_mode & FMODE_WRITE)) 1892 dquot_initialize(inode); 1893 return error; 1894 } 1895 EXPORT_SYMBOL(dquot_file_open); 1896 1897 /* 1898 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) 1899 */ 1900 int dquot_disable(struct super_block *sb, int type, unsigned int flags) 1901 { 1902 int cnt, ret = 0; 1903 struct quota_info *dqopt = sb_dqopt(sb); 1904 struct inode *toputinode[MAXQUOTAS]; 1905 1906 /* Cannot turn off usage accounting without turning off limits, or 1907 * suspend quotas and simultaneously turn quotas off. */ 1908 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) 1909 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | 1910 DQUOT_USAGE_ENABLED))) 1911 return -EINVAL; 1912 1913 /* We need to serialize quota_off() for device */ 1914 mutex_lock(&dqopt->dqonoff_mutex); 1915 1916 /* 1917 * Skip everything if there's nothing to do. We have to do this because 1918 * sometimes we are called when fill_super() failed and calling 1919 * sync_fs() in such cases does no good. 1920 */ 1921 if (!sb_any_quota_loaded(sb)) { 1922 mutex_unlock(&dqopt->dqonoff_mutex); 1923 return 0; 1924 } 1925 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1926 toputinode[cnt] = NULL; 1927 if (type != -1 && cnt != type) 1928 continue; 1929 if (!sb_has_quota_loaded(sb, cnt)) 1930 continue; 1931 1932 if (flags & DQUOT_SUSPENDED) { 1933 spin_lock(&dq_state_lock); 1934 dqopt->flags |= 1935 dquot_state_flag(DQUOT_SUSPENDED, cnt); 1936 spin_unlock(&dq_state_lock); 1937 } else { 1938 spin_lock(&dq_state_lock); 1939 dqopt->flags &= ~dquot_state_flag(flags, cnt); 1940 /* Turning off suspended quotas? */ 1941 if (!sb_has_quota_loaded(sb, cnt) && 1942 sb_has_quota_suspended(sb, cnt)) { 1943 dqopt->flags &= ~dquot_state_flag( 1944 DQUOT_SUSPENDED, cnt); 1945 spin_unlock(&dq_state_lock); 1946 iput(dqopt->files[cnt]); 1947 dqopt->files[cnt] = NULL; 1948 continue; 1949 } 1950 spin_unlock(&dq_state_lock); 1951 } 1952 1953 /* We still have to keep quota loaded? */ 1954 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) 1955 continue; 1956 1957 /* Note: these are blocking operations */ 1958 drop_dquot_ref(sb, cnt); 1959 invalidate_dquots(sb, cnt); 1960 /* 1961 * Now all dquots should be invalidated, all writes done so we 1962 * should be only users of the info. No locks needed. 1963 */ 1964 if (info_dirty(&dqopt->info[cnt])) 1965 sb->dq_op->write_info(sb, cnt); 1966 if (dqopt->ops[cnt]->free_file_info) 1967 dqopt->ops[cnt]->free_file_info(sb, cnt); 1968 put_quota_format(dqopt->info[cnt].dqi_format); 1969 1970 toputinode[cnt] = dqopt->files[cnt]; 1971 if (!sb_has_quota_loaded(sb, cnt)) 1972 dqopt->files[cnt] = NULL; 1973 dqopt->info[cnt].dqi_flags = 0; 1974 dqopt->info[cnt].dqi_igrace = 0; 1975 dqopt->info[cnt].dqi_bgrace = 0; 1976 dqopt->ops[cnt] = NULL; 1977 } 1978 mutex_unlock(&dqopt->dqonoff_mutex); 1979 1980 /* Skip syncing and setting flags if quota files are hidden */ 1981 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) 1982 goto put_inodes; 1983 1984 /* Sync the superblock so that buffers with quota data are written to 1985 * disk (and so userspace sees correct data afterwards). */ 1986 if (sb->s_op->sync_fs) 1987 sb->s_op->sync_fs(sb, 1); 1988 sync_blockdev(sb->s_bdev); 1989 /* Now the quota files are just ordinary files and we can set the 1990 * inode flags back. Moreover we discard the pagecache so that 1991 * userspace sees the writes we did bypassing the pagecache. We 1992 * must also discard the blockdev buffers so that we see the 1993 * changes done by userspace on the next quotaon() */ 1994 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1995 if (toputinode[cnt]) { 1996 mutex_lock(&dqopt->dqonoff_mutex); 1997 /* If quota was reenabled in the meantime, we have 1998 * nothing to do */ 1999 if (!sb_has_quota_loaded(sb, cnt)) { 2000 mutex_lock_nested(&toputinode[cnt]->i_mutex, 2001 I_MUTEX_QUOTA); 2002 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | 2003 S_NOATIME | S_NOQUOTA); 2004 truncate_inode_pages(&toputinode[cnt]->i_data, 2005 0); 2006 mutex_unlock(&toputinode[cnt]->i_mutex); 2007 mark_inode_dirty_sync(toputinode[cnt]); 2008 } 2009 mutex_unlock(&dqopt->dqonoff_mutex); 2010 } 2011 if (sb->s_bdev) 2012 invalidate_bdev(sb->s_bdev); 2013 put_inodes: 2014 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 2015 if (toputinode[cnt]) { 2016 /* On remount RO, we keep the inode pointer so that we 2017 * can reenable quota on the subsequent remount RW. We 2018 * have to check 'flags' variable and not use sb_has_ 2019 * function because another quotaon / quotaoff could 2020 * change global state before we got here. We refuse 2021 * to suspend quotas when there is pending delete on 2022 * the quota file... */ 2023 if (!(flags & DQUOT_SUSPENDED)) 2024 iput(toputinode[cnt]); 2025 else if (!toputinode[cnt]->i_nlink) 2026 ret = -EBUSY; 2027 } 2028 return ret; 2029 } 2030 EXPORT_SYMBOL(dquot_disable); 2031 2032 int dquot_quota_off(struct super_block *sb, int type) 2033 { 2034 return dquot_disable(sb, type, 2035 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2036 } 2037 EXPORT_SYMBOL(dquot_quota_off); 2038 2039 /* 2040 * Turn quotas on on a device 2041 */ 2042 2043 /* 2044 * Helper function to turn quotas on when we already have the inode of 2045 * quota file and no quota information is loaded. 2046 */ 2047 static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, 2048 unsigned int flags) 2049 { 2050 struct quota_format_type *fmt = find_quota_format(format_id); 2051 struct super_block *sb = inode->i_sb; 2052 struct quota_info *dqopt = sb_dqopt(sb); 2053 int error; 2054 int oldflags = -1; 2055 2056 if (!fmt) 2057 return -ESRCH; 2058 if (!S_ISREG(inode->i_mode)) { 2059 error = -EACCES; 2060 goto out_fmt; 2061 } 2062 if (IS_RDONLY(inode)) { 2063 error = -EROFS; 2064 goto out_fmt; 2065 } 2066 if (!sb->s_op->quota_write || !sb->s_op->quota_read) { 2067 error = -EINVAL; 2068 goto out_fmt; 2069 } 2070 /* Usage always has to be set... */ 2071 if (!(flags & DQUOT_USAGE_ENABLED)) { 2072 error = -EINVAL; 2073 goto out_fmt; 2074 } 2075 2076 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2077 /* As we bypass the pagecache we must now flush all the 2078 * dirty data and invalidate caches so that kernel sees 2079 * changes from userspace. It is not enough to just flush 2080 * the quota file since if blocksize < pagesize, invalidation 2081 * of the cache could fail because of other unrelated dirty 2082 * data */ 2083 sync_filesystem(sb); 2084 invalidate_bdev(sb->s_bdev); 2085 } 2086 mutex_lock(&dqopt->dqonoff_mutex); 2087 if (sb_has_quota_loaded(sb, type)) { 2088 error = -EBUSY; 2089 goto out_lock; 2090 } 2091 2092 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2093 /* We don't want quota and atime on quota files (deadlocks 2094 * possible) Also nobody should write to the file - we use 2095 * special IO operations which ignore the immutable bit. */ 2096 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2097 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | 2098 S_NOQUOTA); 2099 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; 2100 mutex_unlock(&inode->i_mutex); 2101 /* 2102 * When S_NOQUOTA is set, remove dquot references as no more 2103 * references can be added 2104 */ 2105 __dquot_drop(inode); 2106 } 2107 2108 error = -EIO; 2109 dqopt->files[type] = igrab(inode); 2110 if (!dqopt->files[type]) 2111 goto out_lock; 2112 error = -EINVAL; 2113 if (!fmt->qf_ops->check_quota_file(sb, type)) 2114 goto out_file_init; 2115 2116 dqopt->ops[type] = fmt->qf_ops; 2117 dqopt->info[type].dqi_format = fmt; 2118 dqopt->info[type].dqi_fmt_id = format_id; 2119 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); 2120 mutex_lock(&dqopt->dqio_mutex); 2121 error = dqopt->ops[type]->read_file_info(sb, type); 2122 if (error < 0) { 2123 mutex_unlock(&dqopt->dqio_mutex); 2124 goto out_file_init; 2125 } 2126 mutex_unlock(&dqopt->dqio_mutex); 2127 spin_lock(&dq_state_lock); 2128 dqopt->flags |= dquot_state_flag(flags, type); 2129 spin_unlock(&dq_state_lock); 2130 2131 add_dquot_ref(sb, type); 2132 mutex_unlock(&dqopt->dqonoff_mutex); 2133 2134 return 0; 2135 2136 out_file_init: 2137 dqopt->files[type] = NULL; 2138 iput(inode); 2139 out_lock: 2140 if (oldflags != -1) { 2141 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2142 /* Set the flags back (in the case of accidental quotaon() 2143 * on a wrong file we don't want to mess up the flags) */ 2144 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); 2145 inode->i_flags |= oldflags; 2146 mutex_unlock(&inode->i_mutex); 2147 } 2148 mutex_unlock(&dqopt->dqonoff_mutex); 2149 out_fmt: 2150 put_quota_format(fmt); 2151 2152 return error; 2153 } 2154 2155 /* Reenable quotas on remount RW */ 2156 int dquot_resume(struct super_block *sb, int type) 2157 { 2158 struct quota_info *dqopt = sb_dqopt(sb); 2159 struct inode *inode; 2160 int ret = 0, cnt; 2161 unsigned int flags; 2162 2163 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 2164 if (type != -1 && cnt != type) 2165 continue; 2166 2167 mutex_lock(&dqopt->dqonoff_mutex); 2168 if (!sb_has_quota_suspended(sb, cnt)) { 2169 mutex_unlock(&dqopt->dqonoff_mutex); 2170 continue; 2171 } 2172 inode = dqopt->files[cnt]; 2173 dqopt->files[cnt] = NULL; 2174 spin_lock(&dq_state_lock); 2175 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | 2176 DQUOT_LIMITS_ENABLED, 2177 cnt); 2178 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt); 2179 spin_unlock(&dq_state_lock); 2180 mutex_unlock(&dqopt->dqonoff_mutex); 2181 2182 flags = dquot_generic_flag(flags, cnt); 2183 ret = vfs_load_quota_inode(inode, cnt, 2184 dqopt->info[cnt].dqi_fmt_id, flags); 2185 iput(inode); 2186 } 2187 2188 return ret; 2189 } 2190 EXPORT_SYMBOL(dquot_resume); 2191 2192 int dquot_quota_on(struct super_block *sb, int type, int format_id, 2193 struct path *path) 2194 { 2195 int error = security_quota_on(path->dentry); 2196 if (error) 2197 return error; 2198 /* Quota file not on the same filesystem? */ 2199 if (path->mnt->mnt_sb != sb) 2200 error = -EXDEV; 2201 else 2202 error = vfs_load_quota_inode(path->dentry->d_inode, type, 2203 format_id, DQUOT_USAGE_ENABLED | 2204 DQUOT_LIMITS_ENABLED); 2205 return error; 2206 } 2207 EXPORT_SYMBOL(dquot_quota_on); 2208 2209 /* 2210 * More powerful function for turning on quotas allowing setting 2211 * of individual quota flags 2212 */ 2213 int dquot_enable(struct inode *inode, int type, int format_id, 2214 unsigned int flags) 2215 { 2216 int ret = 0; 2217 struct super_block *sb = inode->i_sb; 2218 struct quota_info *dqopt = sb_dqopt(sb); 2219 2220 /* Just unsuspend quotas? */ 2221 BUG_ON(flags & DQUOT_SUSPENDED); 2222 2223 if (!flags) 2224 return 0; 2225 /* Just updating flags needed? */ 2226 if (sb_has_quota_loaded(sb, type)) { 2227 mutex_lock(&dqopt->dqonoff_mutex); 2228 /* Now do a reliable test... */ 2229 if (!sb_has_quota_loaded(sb, type)) { 2230 mutex_unlock(&dqopt->dqonoff_mutex); 2231 goto load_quota; 2232 } 2233 if (flags & DQUOT_USAGE_ENABLED && 2234 sb_has_quota_usage_enabled(sb, type)) { 2235 ret = -EBUSY; 2236 goto out_lock; 2237 } 2238 if (flags & DQUOT_LIMITS_ENABLED && 2239 sb_has_quota_limits_enabled(sb, type)) { 2240 ret = -EBUSY; 2241 goto out_lock; 2242 } 2243 spin_lock(&dq_state_lock); 2244 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); 2245 spin_unlock(&dq_state_lock); 2246 out_lock: 2247 mutex_unlock(&dqopt->dqonoff_mutex); 2248 return ret; 2249 } 2250 2251 load_quota: 2252 return vfs_load_quota_inode(inode, type, format_id, flags); 2253 } 2254 EXPORT_SYMBOL(dquot_enable); 2255 2256 /* 2257 * This function is used when filesystem needs to initialize quotas 2258 * during mount time. 2259 */ 2260 int dquot_quota_on_mount(struct super_block *sb, char *qf_name, 2261 int format_id, int type) 2262 { 2263 struct dentry *dentry; 2264 int error; 2265 2266 mutex_lock(&sb->s_root->d_inode->i_mutex); 2267 dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); 2268 mutex_unlock(&sb->s_root->d_inode->i_mutex); 2269 if (IS_ERR(dentry)) 2270 return PTR_ERR(dentry); 2271 2272 if (!dentry->d_inode) { 2273 error = -ENOENT; 2274 goto out; 2275 } 2276 2277 error = security_quota_on(dentry); 2278 if (!error) 2279 error = vfs_load_quota_inode(dentry->d_inode, type, format_id, 2280 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 2281 2282 out: 2283 dput(dentry); 2284 return error; 2285 } 2286 EXPORT_SYMBOL(dquot_quota_on_mount); 2287 2288 static inline qsize_t qbtos(qsize_t blocks) 2289 { 2290 return blocks << QIF_DQBLKSIZE_BITS; 2291 } 2292 2293 static inline qsize_t stoqb(qsize_t space) 2294 { 2295 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; 2296 } 2297 2298 /* Generic routine for getting common part of quota structure */ 2299 static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di) 2300 { 2301 struct mem_dqblk *dm = &dquot->dq_dqb; 2302 2303 memset(di, 0, sizeof(*di)); 2304 di->d_version = FS_DQUOT_VERSION; 2305 di->d_flags = dquot->dq_type == USRQUOTA ? 2306 FS_USER_QUOTA : FS_GROUP_QUOTA; 2307 di->d_id = dquot->dq_id; 2308 2309 spin_lock(&dq_data_lock); 2310 di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit); 2311 di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit); 2312 di->d_ino_hardlimit = dm->dqb_ihardlimit; 2313 di->d_ino_softlimit = dm->dqb_isoftlimit; 2314 di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace; 2315 di->d_icount = dm->dqb_curinodes; 2316 di->d_btimer = dm->dqb_btime; 2317 di->d_itimer = dm->dqb_itime; 2318 spin_unlock(&dq_data_lock); 2319 } 2320 2321 int dquot_get_dqblk(struct super_block *sb, int type, qid_t id, 2322 struct fs_disk_quota *di) 2323 { 2324 struct dquot *dquot; 2325 2326 dquot = dqget(sb, id, type); 2327 if (!dquot) 2328 return -ESRCH; 2329 do_get_dqblk(dquot, di); 2330 dqput(dquot); 2331 2332 return 0; 2333 } 2334 EXPORT_SYMBOL(dquot_get_dqblk); 2335 2336 #define VFS_FS_DQ_MASK \ 2337 (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \ 2338 FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \ 2339 FS_DQ_BTIMER | FS_DQ_ITIMER) 2340 2341 /* Generic routine for setting common part of quota structure */ 2342 static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) 2343 { 2344 struct mem_dqblk *dm = &dquot->dq_dqb; 2345 int check_blim = 0, check_ilim = 0; 2346 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; 2347 2348 if (di->d_fieldmask & ~VFS_FS_DQ_MASK) 2349 return -EINVAL; 2350 2351 if (((di->d_fieldmask & FS_DQ_BSOFT) && 2352 (di->d_blk_softlimit > dqi->dqi_maxblimit)) || 2353 ((di->d_fieldmask & FS_DQ_BHARD) && 2354 (di->d_blk_hardlimit > dqi->dqi_maxblimit)) || 2355 ((di->d_fieldmask & FS_DQ_ISOFT) && 2356 (di->d_ino_softlimit > dqi->dqi_maxilimit)) || 2357 ((di->d_fieldmask & FS_DQ_IHARD) && 2358 (di->d_ino_hardlimit > dqi->dqi_maxilimit))) 2359 return -ERANGE; 2360 2361 spin_lock(&dq_data_lock); 2362 if (di->d_fieldmask & FS_DQ_BCOUNT) { 2363 dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace; 2364 check_blim = 1; 2365 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2366 } 2367 2368 if (di->d_fieldmask & FS_DQ_BSOFT) 2369 dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit); 2370 if (di->d_fieldmask & FS_DQ_BHARD) 2371 dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit); 2372 if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) { 2373 check_blim = 1; 2374 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 2375 } 2376 2377 if (di->d_fieldmask & FS_DQ_ICOUNT) { 2378 dm->dqb_curinodes = di->d_icount; 2379 check_ilim = 1; 2380 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 2381 } 2382 2383 if (di->d_fieldmask & FS_DQ_ISOFT) 2384 dm->dqb_isoftlimit = di->d_ino_softlimit; 2385 if (di->d_fieldmask & FS_DQ_IHARD) 2386 dm->dqb_ihardlimit = di->d_ino_hardlimit; 2387 if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) { 2388 check_ilim = 1; 2389 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 2390 } 2391 2392 if (di->d_fieldmask & FS_DQ_BTIMER) { 2393 dm->dqb_btime = di->d_btimer; 2394 check_blim = 1; 2395 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2396 } 2397 2398 if (di->d_fieldmask & FS_DQ_ITIMER) { 2399 dm->dqb_itime = di->d_itimer; 2400 check_ilim = 1; 2401 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2402 } 2403 2404 if (check_blim) { 2405 if (!dm->dqb_bsoftlimit || 2406 dm->dqb_curspace < dm->dqb_bsoftlimit) { 2407 dm->dqb_btime = 0; 2408 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 2409 } else if (!(di->d_fieldmask & FS_DQ_BTIMER)) 2410 /* Set grace only if user hasn't provided his own... */ 2411 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; 2412 } 2413 if (check_ilim) { 2414 if (!dm->dqb_isoftlimit || 2415 dm->dqb_curinodes < dm->dqb_isoftlimit) { 2416 dm->dqb_itime = 0; 2417 clear_bit(DQ_INODES_B, &dquot->dq_flags); 2418 } else if (!(di->d_fieldmask & FS_DQ_ITIMER)) 2419 /* Set grace only if user hasn't provided his own... */ 2420 dm->dqb_itime = get_seconds() + dqi->dqi_igrace; 2421 } 2422 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || 2423 dm->dqb_isoftlimit) 2424 clear_bit(DQ_FAKE_B, &dquot->dq_flags); 2425 else 2426 set_bit(DQ_FAKE_B, &dquot->dq_flags); 2427 spin_unlock(&dq_data_lock); 2428 mark_dquot_dirty(dquot); 2429 2430 return 0; 2431 } 2432 2433 int dquot_set_dqblk(struct super_block *sb, int type, qid_t id, 2434 struct fs_disk_quota *di) 2435 { 2436 struct dquot *dquot; 2437 int rc; 2438 2439 dquot = dqget(sb, id, type); 2440 if (!dquot) { 2441 rc = -ESRCH; 2442 goto out; 2443 } 2444 rc = do_set_dqblk(dquot, di); 2445 dqput(dquot); 2446 out: 2447 return rc; 2448 } 2449 EXPORT_SYMBOL(dquot_set_dqblk); 2450 2451 /* Generic routine for getting common part of quota file information */ 2452 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) 2453 { 2454 struct mem_dqinfo *mi; 2455 2456 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 2457 if (!sb_has_quota_active(sb, type)) { 2458 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2459 return -ESRCH; 2460 } 2461 mi = sb_dqopt(sb)->info + type; 2462 spin_lock(&dq_data_lock); 2463 ii->dqi_bgrace = mi->dqi_bgrace; 2464 ii->dqi_igrace = mi->dqi_igrace; 2465 ii->dqi_flags = mi->dqi_flags & DQF_MASK; 2466 ii->dqi_valid = IIF_ALL; 2467 spin_unlock(&dq_data_lock); 2468 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2469 return 0; 2470 } 2471 EXPORT_SYMBOL(dquot_get_dqinfo); 2472 2473 /* Generic routine for setting common part of quota file information */ 2474 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) 2475 { 2476 struct mem_dqinfo *mi; 2477 int err = 0; 2478 2479 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 2480 if (!sb_has_quota_active(sb, type)) { 2481 err = -ESRCH; 2482 goto out; 2483 } 2484 mi = sb_dqopt(sb)->info + type; 2485 spin_lock(&dq_data_lock); 2486 if (ii->dqi_valid & IIF_BGRACE) 2487 mi->dqi_bgrace = ii->dqi_bgrace; 2488 if (ii->dqi_valid & IIF_IGRACE) 2489 mi->dqi_igrace = ii->dqi_igrace; 2490 if (ii->dqi_valid & IIF_FLAGS) 2491 mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | 2492 (ii->dqi_flags & DQF_MASK); 2493 spin_unlock(&dq_data_lock); 2494 mark_info_dirty(sb, type); 2495 /* Force write to disk */ 2496 sb->dq_op->write_info(sb, type); 2497 out: 2498 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2499 return err; 2500 } 2501 EXPORT_SYMBOL(dquot_set_dqinfo); 2502 2503 const struct quotactl_ops dquot_quotactl_ops = { 2504 .quota_on = dquot_quota_on, 2505 .quota_off = dquot_quota_off, 2506 .quota_sync = dquot_quota_sync, 2507 .get_info = dquot_get_dqinfo, 2508 .set_info = dquot_set_dqinfo, 2509 .get_dqblk = dquot_get_dqblk, 2510 .set_dqblk = dquot_set_dqblk 2511 }; 2512 EXPORT_SYMBOL(dquot_quotactl_ops); 2513 2514 static int do_proc_dqstats(struct ctl_table *table, int write, 2515 void __user *buffer, size_t *lenp, loff_t *ppos) 2516 { 2517 unsigned int type = (int *)table->data - dqstats.stat; 2518 2519 /* Update global table */ 2520 dqstats.stat[type] = 2521 percpu_counter_sum_positive(&dqstats.counter[type]); 2522 return proc_dointvec(table, write, buffer, lenp, ppos); 2523 } 2524 2525 static ctl_table fs_dqstats_table[] = { 2526 { 2527 .procname = "lookups", 2528 .data = &dqstats.stat[DQST_LOOKUPS], 2529 .maxlen = sizeof(int), 2530 .mode = 0444, 2531 .proc_handler = do_proc_dqstats, 2532 }, 2533 { 2534 .procname = "drops", 2535 .data = &dqstats.stat[DQST_DROPS], 2536 .maxlen = sizeof(int), 2537 .mode = 0444, 2538 .proc_handler = do_proc_dqstats, 2539 }, 2540 { 2541 .procname = "reads", 2542 .data = &dqstats.stat[DQST_READS], 2543 .maxlen = sizeof(int), 2544 .mode = 0444, 2545 .proc_handler = do_proc_dqstats, 2546 }, 2547 { 2548 .procname = "writes", 2549 .data = &dqstats.stat[DQST_WRITES], 2550 .maxlen = sizeof(int), 2551 .mode = 0444, 2552 .proc_handler = do_proc_dqstats, 2553 }, 2554 { 2555 .procname = "cache_hits", 2556 .data = &dqstats.stat[DQST_CACHE_HITS], 2557 .maxlen = sizeof(int), 2558 .mode = 0444, 2559 .proc_handler = do_proc_dqstats, 2560 }, 2561 { 2562 .procname = "allocated_dquots", 2563 .data = &dqstats.stat[DQST_ALLOC_DQUOTS], 2564 .maxlen = sizeof(int), 2565 .mode = 0444, 2566 .proc_handler = do_proc_dqstats, 2567 }, 2568 { 2569 .procname = "free_dquots", 2570 .data = &dqstats.stat[DQST_FREE_DQUOTS], 2571 .maxlen = sizeof(int), 2572 .mode = 0444, 2573 .proc_handler = do_proc_dqstats, 2574 }, 2575 { 2576 .procname = "syncs", 2577 .data = &dqstats.stat[DQST_SYNCS], 2578 .maxlen = sizeof(int), 2579 .mode = 0444, 2580 .proc_handler = do_proc_dqstats, 2581 }, 2582 #ifdef CONFIG_PRINT_QUOTA_WARNING 2583 { 2584 .procname = "warnings", 2585 .data = &flag_print_warnings, 2586 .maxlen = sizeof(int), 2587 .mode = 0644, 2588 .proc_handler = proc_dointvec, 2589 }, 2590 #endif 2591 { }, 2592 }; 2593 2594 static ctl_table fs_table[] = { 2595 { 2596 .procname = "quota", 2597 .mode = 0555, 2598 .child = fs_dqstats_table, 2599 }, 2600 { }, 2601 }; 2602 2603 static ctl_table sys_table[] = { 2604 { 2605 .procname = "fs", 2606 .mode = 0555, 2607 .child = fs_table, 2608 }, 2609 { }, 2610 }; 2611 2612 static int __init dquot_init(void) 2613 { 2614 int i, ret; 2615 unsigned long nr_hash, order; 2616 2617 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); 2618 2619 register_sysctl_table(sys_table); 2620 2621 dquot_cachep = kmem_cache_create("dquot", 2622 sizeof(struct dquot), sizeof(unsigned long) * 4, 2623 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 2624 SLAB_MEM_SPREAD|SLAB_PANIC), 2625 NULL); 2626 2627 order = 0; 2628 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); 2629 if (!dquot_hash) 2630 panic("Cannot create dquot hash table"); 2631 2632 for (i = 0; i < _DQST_DQSTAT_LAST; i++) { 2633 ret = percpu_counter_init(&dqstats.counter[i], 0); 2634 if (ret) 2635 panic("Cannot create dquot stat counters"); 2636 } 2637 2638 /* Find power-of-two hlist_heads which can fit into allocation */ 2639 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); 2640 dq_hash_bits = 0; 2641 do { 2642 dq_hash_bits++; 2643 } while (nr_hash >> dq_hash_bits); 2644 dq_hash_bits--; 2645 2646 nr_hash = 1UL << dq_hash_bits; 2647 dq_hash_mask = nr_hash - 1; 2648 for (i = 0; i < nr_hash; i++) 2649 INIT_HLIST_HEAD(dquot_hash + i); 2650 2651 printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", 2652 nr_hash, order, (PAGE_SIZE << order)); 2653 2654 register_shrinker(&dqcache_shrinker); 2655 2656 return 0; 2657 } 2658 module_init(dquot_init); 2659