xref: /openbmc/linux/fs/quota/dquot.c (revision c469070a)
1 /*
2  * Implementation of the diskquota system for the LINUX operating system. QUOTA
3  * is implemented using the BSD system call interface as the means of
4  * communication with the user level. This file contains the generic routines
5  * called by the different filesystems on allocation of an inode or block.
6  * These routines take care of the administration needed to have a consistent
7  * diskquota tracking system. The ideas of both user and group quotas are based
8  * on the Melbourne quota system as used on BSD derived systems. The internal
9  * implementation is based on one of the several variants of the LINUX
10  * inode-subsystem with added complexity of the diskquota system.
11  *
12  * Author:	Marco van Wieringen <mvw@planets.elm.net>
13  *
14  * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
15  *
16  *		Revised list management to avoid races
17  *		-- Bill Hawes, <whawes@star.net>, 9/98
18  *
19  *		Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
20  *		As the consequence the locking was moved from dquot_decr_...(),
21  *		dquot_incr_...() to calling functions.
22  *		invalidate_dquots() now writes modified dquots.
23  *		Serialized quota_off() and quota_on() for mount point.
24  *		Fixed a few bugs in grow_dquots().
25  *		Fixed deadlock in write_dquot() - we no longer account quotas on
26  *		quota files
27  *		remove_dquot_ref() moved to inode.c - it now traverses through inodes
28  *		add_dquot_ref() restarts after blocking
29  *		Added check for bogus uid and fixed check for group in quotactl.
30  *		Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
31  *
32  *		Used struct list_head instead of own list struct
33  *		Invalidation of referenced dquots is no longer possible
34  *		Improved free_dquots list management
35  *		Quota and i_blocks are now updated in one place to avoid races
36  *		Warnings are now delayed so we won't block in critical section
37  *		Write updated not to require dquot lock
38  *		Jan Kara, <jack@suse.cz>, 9/2000
39  *
40  *		Added dynamic quota structure allocation
41  *		Jan Kara <jack@suse.cz> 12/2000
42  *
43  *		Rewritten quota interface. Implemented new quota format and
44  *		formats registering.
45  *		Jan Kara, <jack@suse.cz>, 2001,2002
46  *
47  *		New SMP locking.
48  *		Jan Kara, <jack@suse.cz>, 10/2002
49  *
50  *		Added journalled quota support, fix lock inversion problems
51  *		Jan Kara, <jack@suse.cz>, 2003,2004
52  *
53  * (C) Copyright 1994 - 1997 Marco van Wieringen
54  */
55 
56 #include <linux/errno.h>
57 #include <linux/kernel.h>
58 #include <linux/fs.h>
59 #include <linux/mount.h>
60 #include <linux/mm.h>
61 #include <linux/time.h>
62 #include <linux/types.h>
63 #include <linux/string.h>
64 #include <linux/fcntl.h>
65 #include <linux/stat.h>
66 #include <linux/tty.h>
67 #include <linux/file.h>
68 #include <linux/slab.h>
69 #include <linux/sysctl.h>
70 #include <linux/init.h>
71 #include <linux/module.h>
72 #include <linux/proc_fs.h>
73 #include <linux/security.h>
74 #include <linux/kmod.h>
75 #include <linux/namei.h>
76 #include <linux/buffer_head.h>
77 #include <linux/capability.h>
78 #include <linux/quotaops.h>
79 #include <linux/writeback.h> /* for inode_lock, oddly enough.. */
80 
81 #include <asm/uaccess.h>
82 
83 #define __DQUOT_PARANOIA
84 
85 /*
86  * There are three quota SMP locks. dq_list_lock protects all lists with quotas
87  * and quota formats, dqstats structure containing statistics about the lists
88  * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
89  * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
90  * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
91  * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
92  * modifications of quota state (on quotaon and quotaoff) and readers who care
93  * about latest values take it as well.
94  *
95  * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
96  *   dq_list_lock > dq_state_lock
97  *
98  * Note that some things (eg. sb pointer, type, id) doesn't change during
99  * the life of the dquot structure and so needn't to be protected by a lock
100  *
101  * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
102  * operation is just reading pointers from inode (or not using them at all) the
103  * read lock is enough. If pointers are altered function must hold write lock.
104  * Special care needs to be taken about S_NOQUOTA inode flag (marking that
105  * inode is a quota file). Functions adding pointers from inode to dquots have
106  * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
107  * have to do all pointer modifications before dropping dqptr_sem. This makes
108  * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
109  * then drops all pointers to dquots from an inode.
110  *
111  * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
112  * from inodes (dquot_alloc_space() and such don't check the dq_lock).
113  * Currently dquot is locked only when it is being read to memory (or space for
114  * it is being allocated) on the first dqget() and when it is being released on
115  * the last dqput(). The allocation and release oparations are serialized by
116  * the dq_lock and by checking the use count in dquot_release().  Write
117  * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
118  * spinlock to internal buffers before writing.
119  *
120  * Lock ordering (including related VFS locks) is the following:
121  *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
122  *   dqio_mutex
123  * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
124  * dqptr_sem. But filesystem has to count with the fact that functions such as
125  * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
126  * from inside a transaction to keep filesystem consistency after a crash. Also
127  * filesystems usually want to do some IO on dquot from ->mark_dirty which is
128  * called with dqptr_sem held.
129  * i_mutex on quota files is special (it's below dqio_mutex)
130  */
131 
132 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
133 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
134 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
135 EXPORT_SYMBOL(dq_data_lock);
136 
137 static char *quotatypes[] = INITQFNAMES;
138 static struct quota_format_type *quota_formats;	/* List of registered formats */
139 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
140 
141 /* SLAB cache for dquot structures */
142 static struct kmem_cache *dquot_cachep;
143 
144 int register_quota_format(struct quota_format_type *fmt)
145 {
146 	spin_lock(&dq_list_lock);
147 	fmt->qf_next = quota_formats;
148 	quota_formats = fmt;
149 	spin_unlock(&dq_list_lock);
150 	return 0;
151 }
152 EXPORT_SYMBOL(register_quota_format);
153 
154 void unregister_quota_format(struct quota_format_type *fmt)
155 {
156 	struct quota_format_type **actqf;
157 
158 	spin_lock(&dq_list_lock);
159 	for (actqf = &quota_formats; *actqf && *actqf != fmt;
160 	     actqf = &(*actqf)->qf_next)
161 		;
162 	if (*actqf)
163 		*actqf = (*actqf)->qf_next;
164 	spin_unlock(&dq_list_lock);
165 }
166 EXPORT_SYMBOL(unregister_quota_format);
167 
168 static struct quota_format_type *find_quota_format(int id)
169 {
170 	struct quota_format_type *actqf;
171 
172 	spin_lock(&dq_list_lock);
173 	for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
174 	     actqf = actqf->qf_next)
175 		;
176 	if (!actqf || !try_module_get(actqf->qf_owner)) {
177 		int qm;
178 
179 		spin_unlock(&dq_list_lock);
180 
181 		for (qm = 0; module_names[qm].qm_fmt_id &&
182 			     module_names[qm].qm_fmt_id != id; qm++)
183 			;
184 		if (!module_names[qm].qm_fmt_id ||
185 		    request_module(module_names[qm].qm_mod_name))
186 			return NULL;
187 
188 		spin_lock(&dq_list_lock);
189 		for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
190 		     actqf = actqf->qf_next)
191 			;
192 		if (actqf && !try_module_get(actqf->qf_owner))
193 			actqf = NULL;
194 	}
195 	spin_unlock(&dq_list_lock);
196 	return actqf;
197 }
198 
199 static void put_quota_format(struct quota_format_type *fmt)
200 {
201 	module_put(fmt->qf_owner);
202 }
203 
204 /*
205  * Dquot List Management:
206  * The quota code uses three lists for dquot management: the inuse_list,
207  * free_dquots, and dquot_hash[] array. A single dquot structure may be
208  * on all three lists, depending on its current state.
209  *
210  * All dquots are placed to the end of inuse_list when first created, and this
211  * list is used for invalidate operation, which must look at every dquot.
212  *
213  * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
214  * and this list is searched whenever we need an available dquot.  Dquots are
215  * removed from the list as soon as they are used again, and
216  * dqstats.free_dquots gives the number of dquots on the list. When
217  * dquot is invalidated it's completely released from memory.
218  *
219  * Dquots with a specific identity (device, type and id) are placed on
220  * one of the dquot_hash[] hash chains. The provides an efficient search
221  * mechanism to locate a specific dquot.
222  */
223 
224 static LIST_HEAD(inuse_list);
225 static LIST_HEAD(free_dquots);
226 static unsigned int dq_hash_bits, dq_hash_mask;
227 static struct hlist_head *dquot_hash;
228 
229 struct dqstats dqstats;
230 EXPORT_SYMBOL(dqstats);
231 
232 static inline unsigned int
233 hashfn(const struct super_block *sb, unsigned int id, int type)
234 {
235 	unsigned long tmp;
236 
237 	tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
238 	return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
239 }
240 
241 /*
242  * Following list functions expect dq_list_lock to be held
243  */
244 static inline void insert_dquot_hash(struct dquot *dquot)
245 {
246 	struct hlist_head *head;
247 	head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
248 	hlist_add_head(&dquot->dq_hash, head);
249 }
250 
251 static inline void remove_dquot_hash(struct dquot *dquot)
252 {
253 	hlist_del_init(&dquot->dq_hash);
254 }
255 
256 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
257 				unsigned int id, int type)
258 {
259 	struct hlist_node *node;
260 	struct dquot *dquot;
261 
262 	hlist_for_each (node, dquot_hash+hashent) {
263 		dquot = hlist_entry(node, struct dquot, dq_hash);
264 		if (dquot->dq_sb == sb && dquot->dq_id == id &&
265 		    dquot->dq_type == type)
266 			return dquot;
267 	}
268 	return NULL;
269 }
270 
271 /* Add a dquot to the tail of the free list */
272 static inline void put_dquot_last(struct dquot *dquot)
273 {
274 	list_add_tail(&dquot->dq_free, &free_dquots);
275 	dqstats.free_dquots++;
276 }
277 
278 static inline void remove_free_dquot(struct dquot *dquot)
279 {
280 	if (list_empty(&dquot->dq_free))
281 		return;
282 	list_del_init(&dquot->dq_free);
283 	dqstats.free_dquots--;
284 }
285 
286 static inline void put_inuse(struct dquot *dquot)
287 {
288 	/* We add to the back of inuse list so we don't have to restart
289 	 * when traversing this list and we block */
290 	list_add_tail(&dquot->dq_inuse, &inuse_list);
291 	dqstats.allocated_dquots++;
292 }
293 
294 static inline void remove_inuse(struct dquot *dquot)
295 {
296 	dqstats.allocated_dquots--;
297 	list_del(&dquot->dq_inuse);
298 }
299 /*
300  * End of list functions needing dq_list_lock
301  */
302 
303 static void wait_on_dquot(struct dquot *dquot)
304 {
305 	mutex_lock(&dquot->dq_lock);
306 	mutex_unlock(&dquot->dq_lock);
307 }
308 
309 static inline int dquot_dirty(struct dquot *dquot)
310 {
311 	return test_bit(DQ_MOD_B, &dquot->dq_flags);
312 }
313 
314 static inline int mark_dquot_dirty(struct dquot *dquot)
315 {
316 	return dquot->dq_sb->dq_op->mark_dirty(dquot);
317 }
318 
319 int dquot_mark_dquot_dirty(struct dquot *dquot)
320 {
321 	spin_lock(&dq_list_lock);
322 	if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
323 		list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
324 				info[dquot->dq_type].dqi_dirty_list);
325 	spin_unlock(&dq_list_lock);
326 	return 0;
327 }
328 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
329 
330 /* Dirtify all the dquots - this can block when journalling */
331 static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
332 {
333 	int ret, err, cnt;
334 
335 	ret = err = 0;
336 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
337 		if (dquot[cnt])
338 			/* Even in case of error we have to continue */
339 			ret = mark_dquot_dirty(dquot[cnt]);
340 		if (!err)
341 			err = ret;
342 	}
343 	return err;
344 }
345 
346 static inline void dqput_all(struct dquot **dquot)
347 {
348 	unsigned int cnt;
349 
350 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
351 		dqput(dquot[cnt]);
352 }
353 
354 /* This function needs dq_list_lock */
355 static inline int clear_dquot_dirty(struct dquot *dquot)
356 {
357 	if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
358 		return 0;
359 	list_del_init(&dquot->dq_dirty);
360 	return 1;
361 }
362 
363 void mark_info_dirty(struct super_block *sb, int type)
364 {
365 	set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
366 }
367 EXPORT_SYMBOL(mark_info_dirty);
368 
369 /*
370  *	Read dquot from disk and alloc space for it
371  */
372 
373 int dquot_acquire(struct dquot *dquot)
374 {
375 	int ret = 0, ret2 = 0;
376 	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
377 
378 	mutex_lock(&dquot->dq_lock);
379 	mutex_lock(&dqopt->dqio_mutex);
380 	if (!test_bit(DQ_READ_B, &dquot->dq_flags))
381 		ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
382 	if (ret < 0)
383 		goto out_iolock;
384 	set_bit(DQ_READ_B, &dquot->dq_flags);
385 	/* Instantiate dquot if needed */
386 	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
387 		ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
388 		/* Write the info if needed */
389 		if (info_dirty(&dqopt->info[dquot->dq_type])) {
390 			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
391 						dquot->dq_sb, dquot->dq_type);
392 		}
393 		if (ret < 0)
394 			goto out_iolock;
395 		if (ret2 < 0) {
396 			ret = ret2;
397 			goto out_iolock;
398 		}
399 	}
400 	set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
401 out_iolock:
402 	mutex_unlock(&dqopt->dqio_mutex);
403 	mutex_unlock(&dquot->dq_lock);
404 	return ret;
405 }
406 EXPORT_SYMBOL(dquot_acquire);
407 
408 /*
409  *	Write dquot to disk
410  */
411 int dquot_commit(struct dquot *dquot)
412 {
413 	int ret = 0, ret2 = 0;
414 	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
415 
416 	mutex_lock(&dqopt->dqio_mutex);
417 	spin_lock(&dq_list_lock);
418 	if (!clear_dquot_dirty(dquot)) {
419 		spin_unlock(&dq_list_lock);
420 		goto out_sem;
421 	}
422 	spin_unlock(&dq_list_lock);
423 	/* Inactive dquot can be only if there was error during read/init
424 	 * => we have better not writing it */
425 	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
426 		ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
427 		if (info_dirty(&dqopt->info[dquot->dq_type])) {
428 			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
429 						dquot->dq_sb, dquot->dq_type);
430 		}
431 		if (ret >= 0)
432 			ret = ret2;
433 	}
434 out_sem:
435 	mutex_unlock(&dqopt->dqio_mutex);
436 	return ret;
437 }
438 EXPORT_SYMBOL(dquot_commit);
439 
440 /*
441  *	Release dquot
442  */
443 int dquot_release(struct dquot *dquot)
444 {
445 	int ret = 0, ret2 = 0;
446 	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
447 
448 	mutex_lock(&dquot->dq_lock);
449 	/* Check whether we are not racing with some other dqget() */
450 	if (atomic_read(&dquot->dq_count) > 1)
451 		goto out_dqlock;
452 	mutex_lock(&dqopt->dqio_mutex);
453 	if (dqopt->ops[dquot->dq_type]->release_dqblk) {
454 		ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
455 		/* Write the info */
456 		if (info_dirty(&dqopt->info[dquot->dq_type])) {
457 			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
458 						dquot->dq_sb, dquot->dq_type);
459 		}
460 		if (ret >= 0)
461 			ret = ret2;
462 	}
463 	clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
464 	mutex_unlock(&dqopt->dqio_mutex);
465 out_dqlock:
466 	mutex_unlock(&dquot->dq_lock);
467 	return ret;
468 }
469 EXPORT_SYMBOL(dquot_release);
470 
471 void dquot_destroy(struct dquot *dquot)
472 {
473 	kmem_cache_free(dquot_cachep, dquot);
474 }
475 EXPORT_SYMBOL(dquot_destroy);
476 
477 static inline void do_destroy_dquot(struct dquot *dquot)
478 {
479 	dquot->dq_sb->dq_op->destroy_dquot(dquot);
480 }
481 
482 /* Invalidate all dquots on the list. Note that this function is called after
483  * quota is disabled and pointers from inodes removed so there cannot be new
484  * quota users. There can still be some users of quotas due to inodes being
485  * just deleted or pruned by prune_icache() (those are not attached to any
486  * list) or parallel quotactl call. We have to wait for such users.
487  */
488 static void invalidate_dquots(struct super_block *sb, int type)
489 {
490 	struct dquot *dquot, *tmp;
491 
492 restart:
493 	spin_lock(&dq_list_lock);
494 	list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
495 		if (dquot->dq_sb != sb)
496 			continue;
497 		if (dquot->dq_type != type)
498 			continue;
499 		/* Wait for dquot users */
500 		if (atomic_read(&dquot->dq_count)) {
501 			DEFINE_WAIT(wait);
502 
503 			atomic_inc(&dquot->dq_count);
504 			prepare_to_wait(&dquot->dq_wait_unused, &wait,
505 					TASK_UNINTERRUPTIBLE);
506 			spin_unlock(&dq_list_lock);
507 			/* Once dqput() wakes us up, we know it's time to free
508 			 * the dquot.
509 			 * IMPORTANT: we rely on the fact that there is always
510 			 * at most one process waiting for dquot to free.
511 			 * Otherwise dq_count would be > 1 and we would never
512 			 * wake up.
513 			 */
514 			if (atomic_read(&dquot->dq_count) > 1)
515 				schedule();
516 			finish_wait(&dquot->dq_wait_unused, &wait);
517 			dqput(dquot);
518 			/* At this moment dquot() need not exist (it could be
519 			 * reclaimed by prune_dqcache(). Hence we must
520 			 * restart. */
521 			goto restart;
522 		}
523 		/*
524 		 * Quota now has no users and it has been written on last
525 		 * dqput()
526 		 */
527 		remove_dquot_hash(dquot);
528 		remove_free_dquot(dquot);
529 		remove_inuse(dquot);
530 		do_destroy_dquot(dquot);
531 	}
532 	spin_unlock(&dq_list_lock);
533 }
534 
535 /* Call callback for every active dquot on given filesystem */
536 int dquot_scan_active(struct super_block *sb,
537 		      int (*fn)(struct dquot *dquot, unsigned long priv),
538 		      unsigned long priv)
539 {
540 	struct dquot *dquot, *old_dquot = NULL;
541 	int ret = 0;
542 
543 	mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
544 	spin_lock(&dq_list_lock);
545 	list_for_each_entry(dquot, &inuse_list, dq_inuse) {
546 		if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
547 			continue;
548 		if (dquot->dq_sb != sb)
549 			continue;
550 		/* Now we have active dquot so we can just increase use count */
551 		atomic_inc(&dquot->dq_count);
552 		dqstats.lookups++;
553 		spin_unlock(&dq_list_lock);
554 		dqput(old_dquot);
555 		old_dquot = dquot;
556 		ret = fn(dquot, priv);
557 		if (ret < 0)
558 			goto out;
559 		spin_lock(&dq_list_lock);
560 		/* We are safe to continue now because our dquot could not
561 		 * be moved out of the inuse list while we hold the reference */
562 	}
563 	spin_unlock(&dq_list_lock);
564 out:
565 	dqput(old_dquot);
566 	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
567 	return ret;
568 }
569 EXPORT_SYMBOL(dquot_scan_active);
570 
571 int vfs_quota_sync(struct super_block *sb, int type)
572 {
573 	struct list_head *dirty;
574 	struct dquot *dquot;
575 	struct quota_info *dqopt = sb_dqopt(sb);
576 	int cnt;
577 
578 	mutex_lock(&dqopt->dqonoff_mutex);
579 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
580 		if (type != -1 && cnt != type)
581 			continue;
582 		if (!sb_has_quota_active(sb, cnt))
583 			continue;
584 		spin_lock(&dq_list_lock);
585 		dirty = &dqopt->info[cnt].dqi_dirty_list;
586 		while (!list_empty(dirty)) {
587 			dquot = list_first_entry(dirty, struct dquot,
588 						 dq_dirty);
589 			/* Dirty and inactive can be only bad dquot... */
590 			if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
591 				clear_dquot_dirty(dquot);
592 				continue;
593 			}
594 			/* Now we have active dquot from which someone is
595  			 * holding reference so we can safely just increase
596 			 * use count */
597 			atomic_inc(&dquot->dq_count);
598 			dqstats.lookups++;
599 			spin_unlock(&dq_list_lock);
600 			sb->dq_op->write_dquot(dquot);
601 			dqput(dquot);
602 			spin_lock(&dq_list_lock);
603 		}
604 		spin_unlock(&dq_list_lock);
605 	}
606 
607 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
608 		if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
609 		    && info_dirty(&dqopt->info[cnt]))
610 			sb->dq_op->write_info(sb, cnt);
611 	spin_lock(&dq_list_lock);
612 	dqstats.syncs++;
613 	spin_unlock(&dq_list_lock);
614 	mutex_unlock(&dqopt->dqonoff_mutex);
615 
616 	return 0;
617 }
618 EXPORT_SYMBOL(vfs_quota_sync);
619 
620 /* Free unused dquots from cache */
621 static void prune_dqcache(int count)
622 {
623 	struct list_head *head;
624 	struct dquot *dquot;
625 
626 	head = free_dquots.prev;
627 	while (head != &free_dquots && count) {
628 		dquot = list_entry(head, struct dquot, dq_free);
629 		remove_dquot_hash(dquot);
630 		remove_free_dquot(dquot);
631 		remove_inuse(dquot);
632 		do_destroy_dquot(dquot);
633 		count--;
634 		head = free_dquots.prev;
635 	}
636 }
637 
638 /*
639  * This is called from kswapd when we think we need some
640  * more memory
641  */
642 
643 static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
644 {
645 	if (nr) {
646 		spin_lock(&dq_list_lock);
647 		prune_dqcache(nr);
648 		spin_unlock(&dq_list_lock);
649 	}
650 	return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
651 }
652 
653 static struct shrinker dqcache_shrinker = {
654 	.shrink = shrink_dqcache_memory,
655 	.seeks = DEFAULT_SEEKS,
656 };
657 
658 /*
659  * Put reference to dquot
660  * NOTE: If you change this function please check whether dqput_blocks() works right...
661  */
662 void dqput(struct dquot *dquot)
663 {
664 	int ret;
665 
666 	if (!dquot)
667 		return;
668 #ifdef __DQUOT_PARANOIA
669 	if (!atomic_read(&dquot->dq_count)) {
670 		printk("VFS: dqput: trying to free free dquot\n");
671 		printk("VFS: device %s, dquot of %s %d\n",
672 			dquot->dq_sb->s_id,
673 			quotatypes[dquot->dq_type],
674 			dquot->dq_id);
675 		BUG();
676 	}
677 #endif
678 
679 	spin_lock(&dq_list_lock);
680 	dqstats.drops++;
681 	spin_unlock(&dq_list_lock);
682 we_slept:
683 	spin_lock(&dq_list_lock);
684 	if (atomic_read(&dquot->dq_count) > 1) {
685 		/* We have more than one user... nothing to do */
686 		atomic_dec(&dquot->dq_count);
687 		/* Releasing dquot during quotaoff phase? */
688 		if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
689 		    atomic_read(&dquot->dq_count) == 1)
690 			wake_up(&dquot->dq_wait_unused);
691 		spin_unlock(&dq_list_lock);
692 		return;
693 	}
694 	/* Need to release dquot? */
695 	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
696 		spin_unlock(&dq_list_lock);
697 		/* Commit dquot before releasing */
698 		ret = dquot->dq_sb->dq_op->write_dquot(dquot);
699 		if (ret < 0) {
700 			printk(KERN_ERR "VFS: cannot write quota structure on "
701 				"device %s (error %d). Quota may get out of "
702 				"sync!\n", dquot->dq_sb->s_id, ret);
703 			/*
704 			 * We clear dirty bit anyway, so that we avoid
705 			 * infinite loop here
706 			 */
707 			spin_lock(&dq_list_lock);
708 			clear_dquot_dirty(dquot);
709 			spin_unlock(&dq_list_lock);
710 		}
711 		goto we_slept;
712 	}
713 	/* Clear flag in case dquot was inactive (something bad happened) */
714 	clear_dquot_dirty(dquot);
715 	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
716 		spin_unlock(&dq_list_lock);
717 		dquot->dq_sb->dq_op->release_dquot(dquot);
718 		goto we_slept;
719 	}
720 	atomic_dec(&dquot->dq_count);
721 #ifdef __DQUOT_PARANOIA
722 	/* sanity check */
723 	BUG_ON(!list_empty(&dquot->dq_free));
724 #endif
725 	put_dquot_last(dquot);
726 	spin_unlock(&dq_list_lock);
727 }
728 EXPORT_SYMBOL(dqput);
729 
730 struct dquot *dquot_alloc(struct super_block *sb, int type)
731 {
732 	return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
733 }
734 EXPORT_SYMBOL(dquot_alloc);
735 
736 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
737 {
738 	struct dquot *dquot;
739 
740 	dquot = sb->dq_op->alloc_dquot(sb, type);
741 	if(!dquot)
742 		return NULL;
743 
744 	mutex_init(&dquot->dq_lock);
745 	INIT_LIST_HEAD(&dquot->dq_free);
746 	INIT_LIST_HEAD(&dquot->dq_inuse);
747 	INIT_HLIST_NODE(&dquot->dq_hash);
748 	INIT_LIST_HEAD(&dquot->dq_dirty);
749 	init_waitqueue_head(&dquot->dq_wait_unused);
750 	dquot->dq_sb = sb;
751 	dquot->dq_type = type;
752 	atomic_set(&dquot->dq_count, 1);
753 
754 	return dquot;
755 }
756 
757 /*
758  * Get reference to dquot
759  *
760  * Locking is slightly tricky here. We are guarded from parallel quotaoff()
761  * destroying our dquot by:
762  *   a) checking for quota flags under dq_list_lock and
763  *   b) getting a reference to dquot before we release dq_list_lock
764  */
765 struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
766 {
767 	unsigned int hashent = hashfn(sb, id, type);
768 	struct dquot *dquot = NULL, *empty = NULL;
769 
770         if (!sb_has_quota_active(sb, type))
771 		return NULL;
772 we_slept:
773 	spin_lock(&dq_list_lock);
774 	spin_lock(&dq_state_lock);
775 	if (!sb_has_quota_active(sb, type)) {
776 		spin_unlock(&dq_state_lock);
777 		spin_unlock(&dq_list_lock);
778 		goto out;
779 	}
780 	spin_unlock(&dq_state_lock);
781 
782 	dquot = find_dquot(hashent, sb, id, type);
783 	if (!dquot) {
784 		if (!empty) {
785 			spin_unlock(&dq_list_lock);
786 			empty = get_empty_dquot(sb, type);
787 			if (!empty)
788 				schedule();	/* Try to wait for a moment... */
789 			goto we_slept;
790 		}
791 		dquot = empty;
792 		empty = NULL;
793 		dquot->dq_id = id;
794 		/* all dquots go on the inuse_list */
795 		put_inuse(dquot);
796 		/* hash it first so it can be found */
797 		insert_dquot_hash(dquot);
798 		dqstats.lookups++;
799 		spin_unlock(&dq_list_lock);
800 	} else {
801 		if (!atomic_read(&dquot->dq_count))
802 			remove_free_dquot(dquot);
803 		atomic_inc(&dquot->dq_count);
804 		dqstats.cache_hits++;
805 		dqstats.lookups++;
806 		spin_unlock(&dq_list_lock);
807 	}
808 	/* Wait for dq_lock - after this we know that either dquot_release() is
809 	 * already finished or it will be canceled due to dq_count > 1 test */
810 	wait_on_dquot(dquot);
811 	/* Read the dquot / allocate space in quota file */
812 	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
813 	    sb->dq_op->acquire_dquot(dquot) < 0) {
814 		dqput(dquot);
815 		dquot = NULL;
816 		goto out;
817 	}
818 #ifdef __DQUOT_PARANOIA
819 	BUG_ON(!dquot->dq_sb);	/* Has somebody invalidated entry under us? */
820 #endif
821 out:
822 	if (empty)
823 		do_destroy_dquot(empty);
824 
825 	return dquot;
826 }
827 EXPORT_SYMBOL(dqget);
828 
829 static int dqinit_needed(struct inode *inode, int type)
830 {
831 	int cnt;
832 
833 	if (IS_NOQUOTA(inode))
834 		return 0;
835 	if (type != -1)
836 		return !inode->i_dquot[type];
837 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
838 		if (!inode->i_dquot[cnt])
839 			return 1;
840 	return 0;
841 }
842 
843 /* This routine is guarded by dqonoff_mutex mutex */
844 static void add_dquot_ref(struct super_block *sb, int type)
845 {
846 	struct inode *inode, *old_inode = NULL;
847 
848 	spin_lock(&inode_lock);
849 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
850 		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
851 			continue;
852 		if (!atomic_read(&inode->i_writecount))
853 			continue;
854 		if (!dqinit_needed(inode, type))
855 			continue;
856 
857 		__iget(inode);
858 		spin_unlock(&inode_lock);
859 
860 		iput(old_inode);
861 		sb->dq_op->initialize(inode, type);
862 		/* We hold a reference to 'inode' so it couldn't have been
863 		 * removed from s_inodes list while we dropped the inode_lock.
864 		 * We cannot iput the inode now as we can be holding the last
865 		 * reference and we cannot iput it under inode_lock. So we
866 		 * keep the reference and iput it later. */
867 		old_inode = inode;
868 		spin_lock(&inode_lock);
869 	}
870 	spin_unlock(&inode_lock);
871 	iput(old_inode);
872 }
873 
874 /*
875  * Return 0 if dqput() won't block.
876  * (note that 1 doesn't necessarily mean blocking)
877  */
878 static inline int dqput_blocks(struct dquot *dquot)
879 {
880 	if (atomic_read(&dquot->dq_count) <= 1)
881 		return 1;
882 	return 0;
883 }
884 
885 /*
886  * Remove references to dquots from inode and add dquot to list for freeing
887  * if we have the last referece to dquot
888  * We can't race with anybody because we hold dqptr_sem for writing...
889  */
890 static int remove_inode_dquot_ref(struct inode *inode, int type,
891 				  struct list_head *tofree_head)
892 {
893 	struct dquot *dquot = inode->i_dquot[type];
894 
895 	inode->i_dquot[type] = NULL;
896 	if (dquot) {
897 		if (dqput_blocks(dquot)) {
898 #ifdef __DQUOT_PARANOIA
899 			if (atomic_read(&dquot->dq_count) != 1)
900 				printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
901 #endif
902 			spin_lock(&dq_list_lock);
903 			/* As dquot must have currently users it can't be on
904 			 * the free list... */
905 			list_add(&dquot->dq_free, tofree_head);
906 			spin_unlock(&dq_list_lock);
907 			return 1;
908 		}
909 		else
910 			dqput(dquot);   /* We have guaranteed we won't block */
911 	}
912 	return 0;
913 }
914 
915 /*
916  * Free list of dquots
917  * Dquots are removed from inodes and no new references can be got so we are
918  * the only ones holding reference
919  */
920 static void put_dquot_list(struct list_head *tofree_head)
921 {
922 	struct list_head *act_head;
923 	struct dquot *dquot;
924 
925 	act_head = tofree_head->next;
926 	while (act_head != tofree_head) {
927 		dquot = list_entry(act_head, struct dquot, dq_free);
928 		act_head = act_head->next;
929 		/* Remove dquot from the list so we won't have problems... */
930 		list_del_init(&dquot->dq_free);
931 		dqput(dquot);
932 	}
933 }
934 
935 static void remove_dquot_ref(struct super_block *sb, int type,
936 		struct list_head *tofree_head)
937 {
938 	struct inode *inode;
939 
940 	spin_lock(&inode_lock);
941 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
942 		/*
943 		 *  We have to scan also I_NEW inodes because they can already
944 		 *  have quota pointer initialized. Luckily, we need to touch
945 		 *  only quota pointers and these have separate locking
946 		 *  (dqptr_sem).
947 		 */
948 		if (!IS_NOQUOTA(inode))
949 			remove_inode_dquot_ref(inode, type, tofree_head);
950 	}
951 	spin_unlock(&inode_lock);
952 }
953 
954 /* Gather all references from inodes and drop them */
955 static void drop_dquot_ref(struct super_block *sb, int type)
956 {
957 	LIST_HEAD(tofree_head);
958 
959 	if (sb->dq_op) {
960 		down_write(&sb_dqopt(sb)->dqptr_sem);
961 		remove_dquot_ref(sb, type, &tofree_head);
962 		up_write(&sb_dqopt(sb)->dqptr_sem);
963 		put_dquot_list(&tofree_head);
964 	}
965 }
966 
967 static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
968 {
969 	dquot->dq_dqb.dqb_curinodes += number;
970 }
971 
972 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
973 {
974 	dquot->dq_dqb.dqb_curspace += number;
975 }
976 
977 static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
978 {
979 	dquot->dq_dqb.dqb_rsvspace += number;
980 }
981 
982 /*
983  * Claim reserved quota space
984  */
985 static void dquot_claim_reserved_space(struct dquot *dquot,
986 						qsize_t number)
987 {
988 	WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
989 	dquot->dq_dqb.dqb_curspace += number;
990 	dquot->dq_dqb.dqb_rsvspace -= number;
991 }
992 
993 static inline
994 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
995 {
996 	dquot->dq_dqb.dqb_rsvspace -= number;
997 }
998 
999 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1000 {
1001 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1002 	    dquot->dq_dqb.dqb_curinodes >= number)
1003 		dquot->dq_dqb.dqb_curinodes -= number;
1004 	else
1005 		dquot->dq_dqb.dqb_curinodes = 0;
1006 	if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1007 		dquot->dq_dqb.dqb_itime = (time_t) 0;
1008 	clear_bit(DQ_INODES_B, &dquot->dq_flags);
1009 }
1010 
1011 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1012 {
1013 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1014 	    dquot->dq_dqb.dqb_curspace >= number)
1015 		dquot->dq_dqb.dqb_curspace -= number;
1016 	else
1017 		dquot->dq_dqb.dqb_curspace = 0;
1018 	if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1019 		dquot->dq_dqb.dqb_btime = (time_t) 0;
1020 	clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1021 }
1022 
1023 static int warning_issued(struct dquot *dquot, const int warntype)
1024 {
1025 	int flag = (warntype == QUOTA_NL_BHARDWARN ||
1026 		warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1027 		((warntype == QUOTA_NL_IHARDWARN ||
1028 		warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1029 
1030 	if (!flag)
1031 		return 0;
1032 	return test_and_set_bit(flag, &dquot->dq_flags);
1033 }
1034 
1035 #ifdef CONFIG_PRINT_QUOTA_WARNING
1036 static int flag_print_warnings = 1;
1037 
1038 static int need_print_warning(struct dquot *dquot)
1039 {
1040 	if (!flag_print_warnings)
1041 		return 0;
1042 
1043 	switch (dquot->dq_type) {
1044 		case USRQUOTA:
1045 			return current_fsuid() == dquot->dq_id;
1046 		case GRPQUOTA:
1047 			return in_group_p(dquot->dq_id);
1048 	}
1049 	return 0;
1050 }
1051 
1052 /* Print warning to user which exceeded quota */
1053 static void print_warning(struct dquot *dquot, const int warntype)
1054 {
1055 	char *msg = NULL;
1056 	struct tty_struct *tty;
1057 
1058 	if (warntype == QUOTA_NL_IHARDBELOW ||
1059 	    warntype == QUOTA_NL_ISOFTBELOW ||
1060 	    warntype == QUOTA_NL_BHARDBELOW ||
1061 	    warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot))
1062 		return;
1063 
1064 	tty = get_current_tty();
1065 	if (!tty)
1066 		return;
1067 	tty_write_message(tty, dquot->dq_sb->s_id);
1068 	if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1069 		tty_write_message(tty, ": warning, ");
1070 	else
1071 		tty_write_message(tty, ": write failed, ");
1072 	tty_write_message(tty, quotatypes[dquot->dq_type]);
1073 	switch (warntype) {
1074 		case QUOTA_NL_IHARDWARN:
1075 			msg = " file limit reached.\r\n";
1076 			break;
1077 		case QUOTA_NL_ISOFTLONGWARN:
1078 			msg = " file quota exceeded too long.\r\n";
1079 			break;
1080 		case QUOTA_NL_ISOFTWARN:
1081 			msg = " file quota exceeded.\r\n";
1082 			break;
1083 		case QUOTA_NL_BHARDWARN:
1084 			msg = " block limit reached.\r\n";
1085 			break;
1086 		case QUOTA_NL_BSOFTLONGWARN:
1087 			msg = " block quota exceeded too long.\r\n";
1088 			break;
1089 		case QUOTA_NL_BSOFTWARN:
1090 			msg = " block quota exceeded.\r\n";
1091 			break;
1092 	}
1093 	tty_write_message(tty, msg);
1094 	tty_kref_put(tty);
1095 }
1096 #endif
1097 
1098 /*
1099  * Write warnings to the console and send warning messages over netlink.
1100  *
1101  * Note that this function can sleep.
1102  */
1103 static void flush_warnings(struct dquot *const *dquots, char *warntype)
1104 {
1105 	struct dquot *dq;
1106 	int i;
1107 
1108 	for (i = 0; i < MAXQUOTAS; i++) {
1109 		dq = dquots[i];
1110 		if (dq && warntype[i] != QUOTA_NL_NOWARN &&
1111 		    !warning_issued(dq, warntype[i])) {
1112 #ifdef CONFIG_PRINT_QUOTA_WARNING
1113 			print_warning(dq, warntype[i]);
1114 #endif
1115 			quota_send_warning(dq->dq_type, dq->dq_id,
1116 					   dq->dq_sb->s_dev, warntype[i]);
1117 		}
1118 	}
1119 }
1120 
1121 static int ignore_hardlimit(struct dquot *dquot)
1122 {
1123 	struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
1124 
1125 	return capable(CAP_SYS_RESOURCE) &&
1126 	       (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1127 		!(info->dqi_flags & V1_DQF_RSQUASH));
1128 }
1129 
1130 /* needs dq_data_lock */
1131 static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
1132 {
1133 	qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1134 
1135 	*warntype = QUOTA_NL_NOWARN;
1136 	if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
1137 	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1138 		return QUOTA_OK;
1139 
1140 	if (dquot->dq_dqb.dqb_ihardlimit &&
1141 	    newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1142             !ignore_hardlimit(dquot)) {
1143 		*warntype = QUOTA_NL_IHARDWARN;
1144 		return NO_QUOTA;
1145 	}
1146 
1147 	if (dquot->dq_dqb.dqb_isoftlimit &&
1148 	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1149 	    dquot->dq_dqb.dqb_itime &&
1150 	    get_seconds() >= dquot->dq_dqb.dqb_itime &&
1151             !ignore_hardlimit(dquot)) {
1152 		*warntype = QUOTA_NL_ISOFTLONGWARN;
1153 		return NO_QUOTA;
1154 	}
1155 
1156 	if (dquot->dq_dqb.dqb_isoftlimit &&
1157 	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1158 	    dquot->dq_dqb.dqb_itime == 0) {
1159 		*warntype = QUOTA_NL_ISOFTWARN;
1160 		dquot->dq_dqb.dqb_itime = get_seconds() +
1161 		    sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
1162 	}
1163 
1164 	return QUOTA_OK;
1165 }
1166 
1167 /* needs dq_data_lock */
1168 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
1169 {
1170 	qsize_t tspace;
1171 	struct super_block *sb = dquot->dq_sb;
1172 
1173 	*warntype = QUOTA_NL_NOWARN;
1174 	if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
1175 	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1176 		return QUOTA_OK;
1177 
1178 	tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1179 		+ space;
1180 
1181 	if (dquot->dq_dqb.dqb_bhardlimit &&
1182 	    tspace > dquot->dq_dqb.dqb_bhardlimit &&
1183             !ignore_hardlimit(dquot)) {
1184 		if (!prealloc)
1185 			*warntype = QUOTA_NL_BHARDWARN;
1186 		return NO_QUOTA;
1187 	}
1188 
1189 	if (dquot->dq_dqb.dqb_bsoftlimit &&
1190 	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1191 	    dquot->dq_dqb.dqb_btime &&
1192 	    get_seconds() >= dquot->dq_dqb.dqb_btime &&
1193             !ignore_hardlimit(dquot)) {
1194 		if (!prealloc)
1195 			*warntype = QUOTA_NL_BSOFTLONGWARN;
1196 		return NO_QUOTA;
1197 	}
1198 
1199 	if (dquot->dq_dqb.dqb_bsoftlimit &&
1200 	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1201 	    dquot->dq_dqb.dqb_btime == 0) {
1202 		if (!prealloc) {
1203 			*warntype = QUOTA_NL_BSOFTWARN;
1204 			dquot->dq_dqb.dqb_btime = get_seconds() +
1205 			    sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
1206 		}
1207 		else
1208 			/*
1209 			 * We don't allow preallocation to exceed softlimit so exceeding will
1210 			 * be always printed
1211 			 */
1212 			return NO_QUOTA;
1213 	}
1214 
1215 	return QUOTA_OK;
1216 }
1217 
1218 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1219 {
1220 	qsize_t newinodes;
1221 
1222 	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1223 	    dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1224 	    !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
1225 		return QUOTA_NL_NOWARN;
1226 
1227 	newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1228 	if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1229 		return QUOTA_NL_ISOFTBELOW;
1230 	if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1231 	    newinodes < dquot->dq_dqb.dqb_ihardlimit)
1232 		return QUOTA_NL_IHARDBELOW;
1233 	return QUOTA_NL_NOWARN;
1234 }
1235 
1236 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1237 {
1238 	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1239 	    dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1240 		return QUOTA_NL_NOWARN;
1241 
1242 	if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1243 		return QUOTA_NL_BSOFTBELOW;
1244 	if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
1245 	    dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
1246 		return QUOTA_NL_BHARDBELOW;
1247 	return QUOTA_NL_NOWARN;
1248 }
1249 /*
1250  *	Initialize quota pointers in inode
1251  *	We do things in a bit complicated way but by that we avoid calling
1252  *	dqget() and thus filesystem callbacks under dqptr_sem.
1253  */
1254 int dquot_initialize(struct inode *inode, int type)
1255 {
1256 	unsigned int id = 0;
1257 	int cnt, ret = 0;
1258 	struct dquot *got[MAXQUOTAS] = { NULL, NULL };
1259 	struct super_block *sb = inode->i_sb;
1260 
1261 	/* First test before acquiring mutex - solves deadlocks when we
1262          * re-enter the quota code and are already holding the mutex */
1263 	if (IS_NOQUOTA(inode))
1264 		return 0;
1265 
1266 	/* First get references to structures we might need. */
1267 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1268 		if (type != -1 && cnt != type)
1269 			continue;
1270 		switch (cnt) {
1271 		case USRQUOTA:
1272 			id = inode->i_uid;
1273 			break;
1274 		case GRPQUOTA:
1275 			id = inode->i_gid;
1276 			break;
1277 		}
1278 		got[cnt] = dqget(sb, id, cnt);
1279 	}
1280 
1281 	down_write(&sb_dqopt(sb)->dqptr_sem);
1282 	if (IS_NOQUOTA(inode))
1283 		goto out_err;
1284 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1285 		if (type != -1 && cnt != type)
1286 			continue;
1287 		/* Avoid races with quotaoff() */
1288 		if (!sb_has_quota_active(sb, cnt))
1289 			continue;
1290 		if (!inode->i_dquot[cnt]) {
1291 			inode->i_dquot[cnt] = got[cnt];
1292 			got[cnt] = NULL;
1293 		}
1294 	}
1295 out_err:
1296 	up_write(&sb_dqopt(sb)->dqptr_sem);
1297 	/* Drop unused references */
1298 	dqput_all(got);
1299 	return ret;
1300 }
1301 EXPORT_SYMBOL(dquot_initialize);
1302 
1303 /*
1304  * 	Release all quotas referenced by inode
1305  */
1306 int dquot_drop(struct inode *inode)
1307 {
1308 	int cnt;
1309 	struct dquot *put[MAXQUOTAS];
1310 
1311 	down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1312 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1313 		put[cnt] = inode->i_dquot[cnt];
1314 		inode->i_dquot[cnt] = NULL;
1315 	}
1316 	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1317 	dqput_all(put);
1318 	return 0;
1319 }
1320 EXPORT_SYMBOL(dquot_drop);
1321 
1322 /* Wrapper to remove references to quota structures from inode */
1323 void vfs_dq_drop(struct inode *inode)
1324 {
1325 	/* Here we can get arbitrary inode from clear_inode() so we have
1326 	 * to be careful. OTOH we don't need locking as quota operations
1327 	 * are allowed to change only at mount time */
1328 	if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op
1329 	    && inode->i_sb->dq_op->drop) {
1330 		int cnt;
1331 		/* Test before calling to rule out calls from proc and such
1332                  * where we are not allowed to block. Note that this is
1333 		 * actually reliable test even without the lock - the caller
1334 		 * must assure that nobody can come after the DQUOT_DROP and
1335 		 * add quota pointers back anyway */
1336 		for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1337 			if (inode->i_dquot[cnt])
1338 				break;
1339 		if (cnt < MAXQUOTAS)
1340 			inode->i_sb->dq_op->drop(inode);
1341 	}
1342 }
1343 EXPORT_SYMBOL(vfs_dq_drop);
1344 
1345 /*
1346  * inode_reserved_space is managed internally by quota, and protected by
1347  * i_lock similar to i_blocks+i_bytes.
1348  */
1349 static qsize_t *inode_reserved_space(struct inode * inode)
1350 {
1351 	/* Filesystem must explicitly define it's own method in order to use
1352 	 * quota reservation interface */
1353 	BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1354 	return inode->i_sb->dq_op->get_reserved_space(inode);
1355 }
1356 
1357 void inode_add_rsv_space(struct inode *inode, qsize_t number)
1358 {
1359 	spin_lock(&inode->i_lock);
1360 	*inode_reserved_space(inode) += number;
1361 	spin_unlock(&inode->i_lock);
1362 }
1363 EXPORT_SYMBOL(inode_add_rsv_space);
1364 
1365 void inode_claim_rsv_space(struct inode *inode, qsize_t number)
1366 {
1367 	spin_lock(&inode->i_lock);
1368 	*inode_reserved_space(inode) -= number;
1369 	__inode_add_bytes(inode, number);
1370 	spin_unlock(&inode->i_lock);
1371 }
1372 EXPORT_SYMBOL(inode_claim_rsv_space);
1373 
1374 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
1375 {
1376 	spin_lock(&inode->i_lock);
1377 	*inode_reserved_space(inode) -= number;
1378 	spin_unlock(&inode->i_lock);
1379 }
1380 EXPORT_SYMBOL(inode_sub_rsv_space);
1381 
1382 static qsize_t inode_get_rsv_space(struct inode *inode)
1383 {
1384 	qsize_t ret;
1385 
1386 	if (!inode->i_sb->dq_op->get_reserved_space)
1387 		return 0;
1388 	spin_lock(&inode->i_lock);
1389 	ret = *inode_reserved_space(inode);
1390 	spin_unlock(&inode->i_lock);
1391 	return ret;
1392 }
1393 
1394 static void inode_incr_space(struct inode *inode, qsize_t number,
1395 				int reserve)
1396 {
1397 	if (reserve)
1398 		inode_add_rsv_space(inode, number);
1399 	else
1400 		inode_add_bytes(inode, number);
1401 }
1402 
1403 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1404 {
1405 	if (reserve)
1406 		inode_sub_rsv_space(inode, number);
1407 	else
1408 		inode_sub_bytes(inode, number);
1409 }
1410 
1411 /*
1412  * Following four functions update i_blocks+i_bytes fields and
1413  * quota information (together with appropriate checks)
1414  * NOTE: We absolutely rely on the fact that caller dirties
1415  * the inode (usually macros in quotaops.h care about this) and
1416  * holds a handle for the current transaction so that dquot write and
1417  * inode write go into the same transaction.
1418  */
1419 
1420 /*
1421  * This operation can block, but only after everything is updated
1422  */
1423 int __dquot_alloc_space(struct inode *inode, qsize_t number,
1424 			int warn, int reserve)
1425 {
1426 	int cnt, ret = QUOTA_OK;
1427 	char warntype[MAXQUOTAS];
1428 
1429 	/*
1430 	 * First test before acquiring mutex - solves deadlocks when we
1431 	 * re-enter the quota code and are already holding the mutex
1432 	 */
1433 	if (IS_NOQUOTA(inode)) {
1434 		inode_incr_space(inode, number, reserve);
1435 		goto out;
1436 	}
1437 
1438 	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1439 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1440 		warntype[cnt] = QUOTA_NL_NOWARN;
1441 
1442 	spin_lock(&dq_data_lock);
1443 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1444 		if (!inode->i_dquot[cnt])
1445 			continue;
1446 		if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
1447 		    == NO_QUOTA) {
1448 			ret = NO_QUOTA;
1449 			spin_unlock(&dq_data_lock);
1450 			goto out_flush_warn;
1451 		}
1452 	}
1453 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1454 		if (!inode->i_dquot[cnt])
1455 			continue;
1456 		if (reserve)
1457 			dquot_resv_space(inode->i_dquot[cnt], number);
1458 		else
1459 			dquot_incr_space(inode->i_dquot[cnt], number);
1460 	}
1461 	inode_incr_space(inode, number, reserve);
1462 	spin_unlock(&dq_data_lock);
1463 
1464 	if (reserve)
1465 		goto out_flush_warn;
1466 	mark_all_dquot_dirty(inode->i_dquot);
1467 out_flush_warn:
1468 	flush_warnings(inode->i_dquot, warntype);
1469 	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1470 out:
1471 	return ret;
1472 }
1473 
1474 int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
1475 {
1476 	return __dquot_alloc_space(inode, number, warn, 0);
1477 }
1478 EXPORT_SYMBOL(dquot_alloc_space);
1479 
1480 int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
1481 {
1482 	return __dquot_alloc_space(inode, number, warn, 1);
1483 }
1484 EXPORT_SYMBOL(dquot_reserve_space);
1485 
1486 /*
1487  * This operation can block, but only after everything is updated
1488  */
1489 int dquot_alloc_inode(const struct inode *inode, qsize_t number)
1490 {
1491 	int cnt, ret = NO_QUOTA;
1492 	char warntype[MAXQUOTAS];
1493 
1494 	/* First test before acquiring mutex - solves deadlocks when we
1495          * re-enter the quota code and are already holding the mutex */
1496 	if (IS_NOQUOTA(inode))
1497 		return QUOTA_OK;
1498 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1499 		warntype[cnt] = QUOTA_NL_NOWARN;
1500 	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1501 	spin_lock(&dq_data_lock);
1502 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1503 		if (!inode->i_dquot[cnt])
1504 			continue;
1505 		if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
1506 		    == NO_QUOTA)
1507 			goto warn_put_all;
1508 	}
1509 
1510 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1511 		if (!inode->i_dquot[cnt])
1512 			continue;
1513 		dquot_incr_inodes(inode->i_dquot[cnt], number);
1514 	}
1515 	ret = QUOTA_OK;
1516 warn_put_all:
1517 	spin_unlock(&dq_data_lock);
1518 	if (ret == QUOTA_OK)
1519 		mark_all_dquot_dirty(inode->i_dquot);
1520 	flush_warnings(inode->i_dquot, warntype);
1521 	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1522 	return ret;
1523 }
1524 EXPORT_SYMBOL(dquot_alloc_inode);
1525 
1526 int dquot_claim_space(struct inode *inode, qsize_t number)
1527 {
1528 	int cnt;
1529 	int ret = QUOTA_OK;
1530 
1531 	if (IS_NOQUOTA(inode)) {
1532 		inode_claim_rsv_space(inode, number);
1533 		goto out;
1534 	}
1535 
1536 	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1537 	spin_lock(&dq_data_lock);
1538 	/* Claim reserved quotas to allocated quotas */
1539 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1540 		if (inode->i_dquot[cnt])
1541 			dquot_claim_reserved_space(inode->i_dquot[cnt],
1542 							number);
1543 	}
1544 	/* Update inode bytes */
1545 	inode_claim_rsv_space(inode, number);
1546 	spin_unlock(&dq_data_lock);
1547 	mark_all_dquot_dirty(inode->i_dquot);
1548 	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1549 out:
1550 	return ret;
1551 }
1552 EXPORT_SYMBOL(dquot_claim_space);
1553 
1554 /*
1555  * This operation can block, but only after everything is updated
1556  */
1557 int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
1558 {
1559 	unsigned int cnt;
1560 	char warntype[MAXQUOTAS];
1561 
1562 	/* First test before acquiring mutex - solves deadlocks when we
1563          * re-enter the quota code and are already holding the mutex */
1564 	if (IS_NOQUOTA(inode)) {
1565 		inode_decr_space(inode, number, reserve);
1566 		return QUOTA_OK;
1567 	}
1568 
1569 	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1570 	spin_lock(&dq_data_lock);
1571 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1572 		if (!inode->i_dquot[cnt])
1573 			continue;
1574 		warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
1575 		if (reserve)
1576 			dquot_free_reserved_space(inode->i_dquot[cnt], number);
1577 		else
1578 			dquot_decr_space(inode->i_dquot[cnt], number);
1579 	}
1580 	inode_decr_space(inode, number, reserve);
1581 	spin_unlock(&dq_data_lock);
1582 
1583 	if (reserve)
1584 		goto out_unlock;
1585 	mark_all_dquot_dirty(inode->i_dquot);
1586 out_unlock:
1587 	flush_warnings(inode->i_dquot, warntype);
1588 	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1589 	return QUOTA_OK;
1590 }
1591 
1592 int dquot_free_space(struct inode *inode, qsize_t number)
1593 {
1594 	return  __dquot_free_space(inode, number, 0);
1595 }
1596 EXPORT_SYMBOL(dquot_free_space);
1597 
1598 /*
1599  * Release reserved quota space
1600  */
1601 void dquot_release_reserved_space(struct inode *inode, qsize_t number)
1602 {
1603 	__dquot_free_space(inode, number, 1);
1604 
1605 }
1606 EXPORT_SYMBOL(dquot_release_reserved_space);
1607 
1608 /*
1609  * This operation can block, but only after everything is updated
1610  */
1611 int dquot_free_inode(const struct inode *inode, qsize_t number)
1612 {
1613 	unsigned int cnt;
1614 	char warntype[MAXQUOTAS];
1615 
1616 	/* First test before acquiring mutex - solves deadlocks when we
1617          * re-enter the quota code and are already holding the mutex */
1618 	if (IS_NOQUOTA(inode))
1619 		return QUOTA_OK;
1620 
1621 	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1622 	spin_lock(&dq_data_lock);
1623 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1624 		if (!inode->i_dquot[cnt])
1625 			continue;
1626 		warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
1627 		dquot_decr_inodes(inode->i_dquot[cnt], number);
1628 	}
1629 	spin_unlock(&dq_data_lock);
1630 	mark_all_dquot_dirty(inode->i_dquot);
1631 	flush_warnings(inode->i_dquot, warntype);
1632 	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1633 	return QUOTA_OK;
1634 }
1635 EXPORT_SYMBOL(dquot_free_inode);
1636 
1637 /*
1638  * Transfer the number of inode and blocks from one diskquota to an other.
1639  *
1640  * This operation can block, but only after everything is updated
1641  * A transaction must be started when entering this function.
1642  */
1643 int dquot_transfer(struct inode *inode, struct iattr *iattr)
1644 {
1645 	qsize_t space, cur_space;
1646 	qsize_t rsv_space = 0;
1647 	struct dquot *transfer_from[MAXQUOTAS];
1648 	struct dquot *transfer_to[MAXQUOTAS];
1649 	int cnt, ret = QUOTA_OK;
1650 	int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid,
1651 	    chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid;
1652 	char warntype_to[MAXQUOTAS];
1653 	char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
1654 
1655 	/* First test before acquiring mutex - solves deadlocks when we
1656          * re-enter the quota code and are already holding the mutex */
1657 	if (IS_NOQUOTA(inode))
1658 		return QUOTA_OK;
1659 	/* Initialize the arrays */
1660 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1661 		transfer_from[cnt] = NULL;
1662 		transfer_to[cnt] = NULL;
1663 		warntype_to[cnt] = QUOTA_NL_NOWARN;
1664 	}
1665 	if (chuid)
1666 		transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid,
1667 					      USRQUOTA);
1668 	if (chgid)
1669 		transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
1670 					      GRPQUOTA);
1671 
1672 	down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1673 	if (IS_NOQUOTA(inode)) {	/* File without quota accounting? */
1674 		up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1675 		goto put_all;
1676 	}
1677 	spin_lock(&dq_data_lock);
1678 	cur_space = inode_get_bytes(inode);
1679 	rsv_space = inode_get_rsv_space(inode);
1680 	space = cur_space + rsv_space;
1681 	/* Build the transfer_from list and check the limits */
1682 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1683 		if (!transfer_to[cnt])
1684 			continue;
1685 		transfer_from[cnt] = inode->i_dquot[cnt];
1686 		if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
1687 		    NO_QUOTA || check_bdq(transfer_to[cnt], space, 0,
1688 		    warntype_to + cnt) == NO_QUOTA)
1689 			goto over_quota;
1690 	}
1691 
1692 	/*
1693 	 * Finally perform the needed transfer from transfer_from to transfer_to
1694 	 */
1695 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1696 		/*
1697 		 * Skip changes for same uid or gid or for turned off quota-type.
1698 		 */
1699 		if (!transfer_to[cnt])
1700 			continue;
1701 
1702 		/* Due to IO error we might not have transfer_from[] structure */
1703 		if (transfer_from[cnt]) {
1704 			warntype_from_inodes[cnt] =
1705 				info_idq_free(transfer_from[cnt], 1);
1706 			warntype_from_space[cnt] =
1707 				info_bdq_free(transfer_from[cnt], space);
1708 			dquot_decr_inodes(transfer_from[cnt], 1);
1709 			dquot_decr_space(transfer_from[cnt], cur_space);
1710 			dquot_free_reserved_space(transfer_from[cnt],
1711 						  rsv_space);
1712 		}
1713 
1714 		dquot_incr_inodes(transfer_to[cnt], 1);
1715 		dquot_incr_space(transfer_to[cnt], cur_space);
1716 		dquot_resv_space(transfer_to[cnt], rsv_space);
1717 
1718 		inode->i_dquot[cnt] = transfer_to[cnt];
1719 	}
1720 	spin_unlock(&dq_data_lock);
1721 	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1722 
1723 	mark_all_dquot_dirty(transfer_from);
1724 	mark_all_dquot_dirty(transfer_to);
1725 	/* The reference we got is transferred to the inode */
1726 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1727 		transfer_to[cnt] = NULL;
1728 warn_put_all:
1729 	flush_warnings(transfer_to, warntype_to);
1730 	flush_warnings(transfer_from, warntype_from_inodes);
1731 	flush_warnings(transfer_from, warntype_from_space);
1732 put_all:
1733 	dqput_all(transfer_from);
1734 	dqput_all(transfer_to);
1735 	return ret;
1736 over_quota:
1737 	spin_unlock(&dq_data_lock);
1738 	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1739 	/* Clear dquot pointers we don't want to dqput() */
1740 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1741 		transfer_from[cnt] = NULL;
1742 	ret = NO_QUOTA;
1743 	goto warn_put_all;
1744 }
1745 EXPORT_SYMBOL(dquot_transfer);
1746 
1747 /* Wrapper for transferring ownership of an inode */
1748 int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
1749 {
1750 	if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) {
1751 		vfs_dq_init(inode);
1752 		if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA)
1753 			return 1;
1754 	}
1755 	return 0;
1756 }
1757 EXPORT_SYMBOL(vfs_dq_transfer);
1758 
1759 /*
1760  * Write info of quota file to disk
1761  */
1762 int dquot_commit_info(struct super_block *sb, int type)
1763 {
1764 	int ret;
1765 	struct quota_info *dqopt = sb_dqopt(sb);
1766 
1767 	mutex_lock(&dqopt->dqio_mutex);
1768 	ret = dqopt->ops[type]->write_file_info(sb, type);
1769 	mutex_unlock(&dqopt->dqio_mutex);
1770 	return ret;
1771 }
1772 EXPORT_SYMBOL(dquot_commit_info);
1773 
1774 /*
1775  * Definitions of diskquota operations.
1776  */
1777 const struct dquot_operations dquot_operations = {
1778 	.initialize	= dquot_initialize,
1779 	.drop		= dquot_drop,
1780 	.alloc_space	= dquot_alloc_space,
1781 	.alloc_inode	= dquot_alloc_inode,
1782 	.free_space	= dquot_free_space,
1783 	.free_inode	= dquot_free_inode,
1784 	.transfer	= dquot_transfer,
1785 	.write_dquot	= dquot_commit,
1786 	.acquire_dquot	= dquot_acquire,
1787 	.release_dquot	= dquot_release,
1788 	.mark_dirty	= dquot_mark_dquot_dirty,
1789 	.write_info	= dquot_commit_info,
1790 	.alloc_dquot	= dquot_alloc,
1791 	.destroy_dquot	= dquot_destroy,
1792 };
1793 
1794 /*
1795  * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
1796  */
1797 int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
1798 {
1799 	int cnt, ret = 0;
1800 	struct quota_info *dqopt = sb_dqopt(sb);
1801 	struct inode *toputinode[MAXQUOTAS];
1802 
1803 	/* Cannot turn off usage accounting without turning off limits, or
1804 	 * suspend quotas and simultaneously turn quotas off. */
1805 	if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
1806 	    || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
1807 	    DQUOT_USAGE_ENABLED)))
1808 		return -EINVAL;
1809 
1810 	/* We need to serialize quota_off() for device */
1811 	mutex_lock(&dqopt->dqonoff_mutex);
1812 
1813 	/*
1814 	 * Skip everything if there's nothing to do. We have to do this because
1815 	 * sometimes we are called when fill_super() failed and calling
1816 	 * sync_fs() in such cases does no good.
1817 	 */
1818 	if (!sb_any_quota_loaded(sb)) {
1819 		mutex_unlock(&dqopt->dqonoff_mutex);
1820 		return 0;
1821 	}
1822 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1823 		toputinode[cnt] = NULL;
1824 		if (type != -1 && cnt != type)
1825 			continue;
1826 		if (!sb_has_quota_loaded(sb, cnt))
1827 			continue;
1828 
1829 		if (flags & DQUOT_SUSPENDED) {
1830 			spin_lock(&dq_state_lock);
1831 			dqopt->flags |=
1832 				dquot_state_flag(DQUOT_SUSPENDED, cnt);
1833 			spin_unlock(&dq_state_lock);
1834 		} else {
1835 			spin_lock(&dq_state_lock);
1836 			dqopt->flags &= ~dquot_state_flag(flags, cnt);
1837 			/* Turning off suspended quotas? */
1838 			if (!sb_has_quota_loaded(sb, cnt) &&
1839 			    sb_has_quota_suspended(sb, cnt)) {
1840 				dqopt->flags &=	~dquot_state_flag(
1841 							DQUOT_SUSPENDED, cnt);
1842 				spin_unlock(&dq_state_lock);
1843 				iput(dqopt->files[cnt]);
1844 				dqopt->files[cnt] = NULL;
1845 				continue;
1846 			}
1847 			spin_unlock(&dq_state_lock);
1848 		}
1849 
1850 		/* We still have to keep quota loaded? */
1851 		if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
1852 			continue;
1853 
1854 		/* Note: these are blocking operations */
1855 		drop_dquot_ref(sb, cnt);
1856 		invalidate_dquots(sb, cnt);
1857 		/*
1858 		 * Now all dquots should be invalidated, all writes done so we
1859 		 * should be only users of the info. No locks needed.
1860 		 */
1861 		if (info_dirty(&dqopt->info[cnt]))
1862 			sb->dq_op->write_info(sb, cnt);
1863 		if (dqopt->ops[cnt]->free_file_info)
1864 			dqopt->ops[cnt]->free_file_info(sb, cnt);
1865 		put_quota_format(dqopt->info[cnt].dqi_format);
1866 
1867 		toputinode[cnt] = dqopt->files[cnt];
1868 		if (!sb_has_quota_loaded(sb, cnt))
1869 			dqopt->files[cnt] = NULL;
1870 		dqopt->info[cnt].dqi_flags = 0;
1871 		dqopt->info[cnt].dqi_igrace = 0;
1872 		dqopt->info[cnt].dqi_bgrace = 0;
1873 		dqopt->ops[cnt] = NULL;
1874 	}
1875 	mutex_unlock(&dqopt->dqonoff_mutex);
1876 
1877 	/* Skip syncing and setting flags if quota files are hidden */
1878 	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
1879 		goto put_inodes;
1880 
1881 	/* Sync the superblock so that buffers with quota data are written to
1882 	 * disk (and so userspace sees correct data afterwards). */
1883 	if (sb->s_op->sync_fs)
1884 		sb->s_op->sync_fs(sb, 1);
1885 	sync_blockdev(sb->s_bdev);
1886 	/* Now the quota files are just ordinary files and we can set the
1887 	 * inode flags back. Moreover we discard the pagecache so that
1888 	 * userspace sees the writes we did bypassing the pagecache. We
1889 	 * must also discard the blockdev buffers so that we see the
1890 	 * changes done by userspace on the next quotaon() */
1891 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1892 		if (toputinode[cnt]) {
1893 			mutex_lock(&dqopt->dqonoff_mutex);
1894 			/* If quota was reenabled in the meantime, we have
1895 			 * nothing to do */
1896 			if (!sb_has_quota_loaded(sb, cnt)) {
1897 				mutex_lock_nested(&toputinode[cnt]->i_mutex,
1898 						  I_MUTEX_QUOTA);
1899 				toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
1900 				  S_NOATIME | S_NOQUOTA);
1901 				truncate_inode_pages(&toputinode[cnt]->i_data,
1902 						     0);
1903 				mutex_unlock(&toputinode[cnt]->i_mutex);
1904 				mark_inode_dirty(toputinode[cnt]);
1905 			}
1906 			mutex_unlock(&dqopt->dqonoff_mutex);
1907 		}
1908 	if (sb->s_bdev)
1909 		invalidate_bdev(sb->s_bdev);
1910 put_inodes:
1911 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1912 		if (toputinode[cnt]) {
1913 			/* On remount RO, we keep the inode pointer so that we
1914 			 * can reenable quota on the subsequent remount RW. We
1915 			 * have to check 'flags' variable and not use sb_has_
1916 			 * function because another quotaon / quotaoff could
1917 			 * change global state before we got here. We refuse
1918 			 * to suspend quotas when there is pending delete on
1919 			 * the quota file... */
1920 			if (!(flags & DQUOT_SUSPENDED))
1921 				iput(toputinode[cnt]);
1922 			else if (!toputinode[cnt]->i_nlink)
1923 				ret = -EBUSY;
1924 		}
1925 	return ret;
1926 }
1927 EXPORT_SYMBOL(vfs_quota_disable);
1928 
1929 int vfs_quota_off(struct super_block *sb, int type, int remount)
1930 {
1931 	return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED :
1932 				 (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED));
1933 }
1934 EXPORT_SYMBOL(vfs_quota_off);
1935 /*
1936  *	Turn quotas on on a device
1937  */
1938 
1939 /*
1940  * Helper function to turn quotas on when we already have the inode of
1941  * quota file and no quota information is loaded.
1942  */
1943 static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
1944 	unsigned int flags)
1945 {
1946 	struct quota_format_type *fmt = find_quota_format(format_id);
1947 	struct super_block *sb = inode->i_sb;
1948 	struct quota_info *dqopt = sb_dqopt(sb);
1949 	int error;
1950 	int oldflags = -1;
1951 
1952 	if (!fmt)
1953 		return -ESRCH;
1954 	if (!S_ISREG(inode->i_mode)) {
1955 		error = -EACCES;
1956 		goto out_fmt;
1957 	}
1958 	if (IS_RDONLY(inode)) {
1959 		error = -EROFS;
1960 		goto out_fmt;
1961 	}
1962 	if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
1963 		error = -EINVAL;
1964 		goto out_fmt;
1965 	}
1966 	/* Usage always has to be set... */
1967 	if (!(flags & DQUOT_USAGE_ENABLED)) {
1968 		error = -EINVAL;
1969 		goto out_fmt;
1970 	}
1971 
1972 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
1973 		/* As we bypass the pagecache we must now flush the inode so
1974 		 * that we see all the changes from userspace... */
1975 		write_inode_now(inode, 1);
1976 		/* And now flush the block cache so that kernel sees the
1977 		 * changes */
1978 		invalidate_bdev(sb->s_bdev);
1979 	}
1980 	mutex_lock(&dqopt->dqonoff_mutex);
1981 	if (sb_has_quota_loaded(sb, type)) {
1982 		error = -EBUSY;
1983 		goto out_lock;
1984 	}
1985 
1986 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
1987 		/* We don't want quota and atime on quota files (deadlocks
1988 		 * possible) Also nobody should write to the file - we use
1989 		 * special IO operations which ignore the immutable bit. */
1990 		mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
1991 		oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
1992 					     S_NOQUOTA);
1993 		inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
1994 		mutex_unlock(&inode->i_mutex);
1995 		/*
1996 		 * When S_NOQUOTA is set, remove dquot references as no more
1997 		 * references can be added
1998 		 */
1999 		sb->dq_op->drop(inode);
2000 	}
2001 
2002 	error = -EIO;
2003 	dqopt->files[type] = igrab(inode);
2004 	if (!dqopt->files[type])
2005 		goto out_lock;
2006 	error = -EINVAL;
2007 	if (!fmt->qf_ops->check_quota_file(sb, type))
2008 		goto out_file_init;
2009 
2010 	dqopt->ops[type] = fmt->qf_ops;
2011 	dqopt->info[type].dqi_format = fmt;
2012 	dqopt->info[type].dqi_fmt_id = format_id;
2013 	INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2014 	mutex_lock(&dqopt->dqio_mutex);
2015 	error = dqopt->ops[type]->read_file_info(sb, type);
2016 	if (error < 0) {
2017 		mutex_unlock(&dqopt->dqio_mutex);
2018 		goto out_file_init;
2019 	}
2020 	mutex_unlock(&dqopt->dqio_mutex);
2021 	spin_lock(&dq_state_lock);
2022 	dqopt->flags |= dquot_state_flag(flags, type);
2023 	spin_unlock(&dq_state_lock);
2024 
2025 	add_dquot_ref(sb, type);
2026 	mutex_unlock(&dqopt->dqonoff_mutex);
2027 
2028 	return 0;
2029 
2030 out_file_init:
2031 	dqopt->files[type] = NULL;
2032 	iput(inode);
2033 out_lock:
2034 	if (oldflags != -1) {
2035 		mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2036 		/* Set the flags back (in the case of accidental quotaon()
2037 		 * on a wrong file we don't want to mess up the flags) */
2038 		inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
2039 		inode->i_flags |= oldflags;
2040 		mutex_unlock(&inode->i_mutex);
2041 	}
2042 	mutex_unlock(&dqopt->dqonoff_mutex);
2043 out_fmt:
2044 	put_quota_format(fmt);
2045 
2046 	return error;
2047 }
2048 
2049 /* Reenable quotas on remount RW */
2050 static int vfs_quota_on_remount(struct super_block *sb, int type)
2051 {
2052 	struct quota_info *dqopt = sb_dqopt(sb);
2053 	struct inode *inode;
2054 	int ret;
2055 	unsigned int flags;
2056 
2057 	mutex_lock(&dqopt->dqonoff_mutex);
2058 	if (!sb_has_quota_suspended(sb, type)) {
2059 		mutex_unlock(&dqopt->dqonoff_mutex);
2060 		return 0;
2061 	}
2062 	inode = dqopt->files[type];
2063 	dqopt->files[type] = NULL;
2064 	spin_lock(&dq_state_lock);
2065 	flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2066 						DQUOT_LIMITS_ENABLED, type);
2067 	dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type);
2068 	spin_unlock(&dq_state_lock);
2069 	mutex_unlock(&dqopt->dqonoff_mutex);
2070 
2071 	flags = dquot_generic_flag(flags, type);
2072 	ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id,
2073 				   flags);
2074 	iput(inode);
2075 
2076 	return ret;
2077 }
2078 
2079 int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
2080 		      struct path *path)
2081 {
2082 	int error = security_quota_on(path->dentry);
2083 	if (error)
2084 		return error;
2085 	/* Quota file not on the same filesystem? */
2086 	if (path->mnt->mnt_sb != sb)
2087 		error = -EXDEV;
2088 	else
2089 		error = vfs_load_quota_inode(path->dentry->d_inode, type,
2090 					     format_id, DQUOT_USAGE_ENABLED |
2091 					     DQUOT_LIMITS_ENABLED);
2092 	return error;
2093 }
2094 EXPORT_SYMBOL(vfs_quota_on_path);
2095 
2096 int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
2097 		 int remount)
2098 {
2099 	struct path path;
2100 	int error;
2101 
2102 	if (remount)
2103 		return vfs_quota_on_remount(sb, type);
2104 
2105 	error = kern_path(name, LOOKUP_FOLLOW, &path);
2106 	if (!error) {
2107 		error = vfs_quota_on_path(sb, type, format_id, &path);
2108 		path_put(&path);
2109 	}
2110 	return error;
2111 }
2112 EXPORT_SYMBOL(vfs_quota_on);
2113 
2114 /*
2115  * More powerful function for turning on quotas allowing setting
2116  * of individual quota flags
2117  */
2118 int vfs_quota_enable(struct inode *inode, int type, int format_id,
2119 		unsigned int flags)
2120 {
2121 	int ret = 0;
2122 	struct super_block *sb = inode->i_sb;
2123 	struct quota_info *dqopt = sb_dqopt(sb);
2124 
2125 	/* Just unsuspend quotas? */
2126 	if (flags & DQUOT_SUSPENDED)
2127 		return vfs_quota_on_remount(sb, type);
2128 	if (!flags)
2129 		return 0;
2130 	/* Just updating flags needed? */
2131 	if (sb_has_quota_loaded(sb, type)) {
2132 		mutex_lock(&dqopt->dqonoff_mutex);
2133 		/* Now do a reliable test... */
2134 		if (!sb_has_quota_loaded(sb, type)) {
2135 			mutex_unlock(&dqopt->dqonoff_mutex);
2136 			goto load_quota;
2137 		}
2138 		if (flags & DQUOT_USAGE_ENABLED &&
2139 		    sb_has_quota_usage_enabled(sb, type)) {
2140 			ret = -EBUSY;
2141 			goto out_lock;
2142 		}
2143 		if (flags & DQUOT_LIMITS_ENABLED &&
2144 		    sb_has_quota_limits_enabled(sb, type)) {
2145 			ret = -EBUSY;
2146 			goto out_lock;
2147 		}
2148 		spin_lock(&dq_state_lock);
2149 		sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
2150 		spin_unlock(&dq_state_lock);
2151 out_lock:
2152 		mutex_unlock(&dqopt->dqonoff_mutex);
2153 		return ret;
2154 	}
2155 
2156 load_quota:
2157 	return vfs_load_quota_inode(inode, type, format_id, flags);
2158 }
2159 EXPORT_SYMBOL(vfs_quota_enable);
2160 
2161 /*
2162  * This function is used when filesystem needs to initialize quotas
2163  * during mount time.
2164  */
2165 int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
2166 		int format_id, int type)
2167 {
2168 	struct dentry *dentry;
2169 	int error;
2170 
2171 	mutex_lock(&sb->s_root->d_inode->i_mutex);
2172 	dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
2173 	mutex_unlock(&sb->s_root->d_inode->i_mutex);
2174 	if (IS_ERR(dentry))
2175 		return PTR_ERR(dentry);
2176 
2177 	if (!dentry->d_inode) {
2178 		error = -ENOENT;
2179 		goto out;
2180 	}
2181 
2182 	error = security_quota_on(dentry);
2183 	if (!error)
2184 		error = vfs_load_quota_inode(dentry->d_inode, type, format_id,
2185 				DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2186 
2187 out:
2188 	dput(dentry);
2189 	return error;
2190 }
2191 EXPORT_SYMBOL(vfs_quota_on_mount);
2192 
2193 /* Wrapper to turn on quotas when remounting rw */
2194 int vfs_dq_quota_on_remount(struct super_block *sb)
2195 {
2196 	int cnt;
2197 	int ret = 0, err;
2198 
2199 	if (!sb->s_qcop || !sb->s_qcop->quota_on)
2200 		return -ENOSYS;
2201 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2202 		err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
2203 		if (err < 0 && !ret)
2204 			ret = err;
2205 	}
2206 	return ret;
2207 }
2208 EXPORT_SYMBOL(vfs_dq_quota_on_remount);
2209 
2210 static inline qsize_t qbtos(qsize_t blocks)
2211 {
2212 	return blocks << QIF_DQBLKSIZE_BITS;
2213 }
2214 
2215 static inline qsize_t stoqb(qsize_t space)
2216 {
2217 	return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
2218 }
2219 
2220 /* Generic routine for getting common part of quota structure */
2221 static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
2222 {
2223 	struct mem_dqblk *dm = &dquot->dq_dqb;
2224 
2225 	spin_lock(&dq_data_lock);
2226 	di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
2227 	di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
2228 	di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
2229 	di->dqb_ihardlimit = dm->dqb_ihardlimit;
2230 	di->dqb_isoftlimit = dm->dqb_isoftlimit;
2231 	di->dqb_curinodes = dm->dqb_curinodes;
2232 	di->dqb_btime = dm->dqb_btime;
2233 	di->dqb_itime = dm->dqb_itime;
2234 	di->dqb_valid = QIF_ALL;
2235 	spin_unlock(&dq_data_lock);
2236 }
2237 
2238 int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
2239 		  struct if_dqblk *di)
2240 {
2241 	struct dquot *dquot;
2242 
2243 	dquot = dqget(sb, id, type);
2244 	if (!dquot)
2245 		return -ESRCH;
2246 	do_get_dqblk(dquot, di);
2247 	dqput(dquot);
2248 
2249 	return 0;
2250 }
2251 EXPORT_SYMBOL(vfs_get_dqblk);
2252 
2253 /* Generic routine for setting common part of quota structure */
2254 static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2255 {
2256 	struct mem_dqblk *dm = &dquot->dq_dqb;
2257 	int check_blim = 0, check_ilim = 0;
2258 	struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
2259 
2260 	if ((di->dqb_valid & QIF_BLIMITS &&
2261 	     (di->dqb_bhardlimit > dqi->dqi_maxblimit ||
2262 	      di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
2263 	    (di->dqb_valid & QIF_ILIMITS &&
2264 	     (di->dqb_ihardlimit > dqi->dqi_maxilimit ||
2265 	      di->dqb_isoftlimit > dqi->dqi_maxilimit)))
2266 		return -ERANGE;
2267 
2268 	spin_lock(&dq_data_lock);
2269 	if (di->dqb_valid & QIF_SPACE) {
2270 		dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
2271 		check_blim = 1;
2272 		__set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2273 	}
2274 	if (di->dqb_valid & QIF_BLIMITS) {
2275 		dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
2276 		dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
2277 		check_blim = 1;
2278 		__set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2279 	}
2280 	if (di->dqb_valid & QIF_INODES) {
2281 		dm->dqb_curinodes = di->dqb_curinodes;
2282 		check_ilim = 1;
2283 		__set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2284 	}
2285 	if (di->dqb_valid & QIF_ILIMITS) {
2286 		dm->dqb_isoftlimit = di->dqb_isoftlimit;
2287 		dm->dqb_ihardlimit = di->dqb_ihardlimit;
2288 		check_ilim = 1;
2289 		__set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2290 	}
2291 	if (di->dqb_valid & QIF_BTIME) {
2292 		dm->dqb_btime = di->dqb_btime;
2293 		check_blim = 1;
2294 		__set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2295 	}
2296 	if (di->dqb_valid & QIF_ITIME) {
2297 		dm->dqb_itime = di->dqb_itime;
2298 		check_ilim = 1;
2299 		__set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2300 	}
2301 
2302 	if (check_blim) {
2303 		if (!dm->dqb_bsoftlimit ||
2304 		    dm->dqb_curspace < dm->dqb_bsoftlimit) {
2305 			dm->dqb_btime = 0;
2306 			clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2307 		} else if (!(di->dqb_valid & QIF_BTIME))
2308 			/* Set grace only if user hasn't provided his own... */
2309 			dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2310 	}
2311 	if (check_ilim) {
2312 		if (!dm->dqb_isoftlimit ||
2313 		    dm->dqb_curinodes < dm->dqb_isoftlimit) {
2314 			dm->dqb_itime = 0;
2315 			clear_bit(DQ_INODES_B, &dquot->dq_flags);
2316 		} else if (!(di->dqb_valid & QIF_ITIME))
2317 			/* Set grace only if user hasn't provided his own... */
2318 			dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2319 	}
2320 	if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2321 	    dm->dqb_isoftlimit)
2322 		clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2323 	else
2324 		set_bit(DQ_FAKE_B, &dquot->dq_flags);
2325 	spin_unlock(&dq_data_lock);
2326 	mark_dquot_dirty(dquot);
2327 
2328 	return 0;
2329 }
2330 
2331 int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
2332 		  struct if_dqblk *di)
2333 {
2334 	struct dquot *dquot;
2335 	int rc;
2336 
2337 	dquot = dqget(sb, id, type);
2338 	if (!dquot) {
2339 		rc = -ESRCH;
2340 		goto out;
2341 	}
2342 	rc = do_set_dqblk(dquot, di);
2343 	dqput(dquot);
2344 out:
2345 	return rc;
2346 }
2347 EXPORT_SYMBOL(vfs_set_dqblk);
2348 
2349 /* Generic routine for getting common part of quota file information */
2350 int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2351 {
2352 	struct mem_dqinfo *mi;
2353 
2354 	mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2355 	if (!sb_has_quota_active(sb, type)) {
2356 		mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2357 		return -ESRCH;
2358 	}
2359 	mi = sb_dqopt(sb)->info + type;
2360 	spin_lock(&dq_data_lock);
2361 	ii->dqi_bgrace = mi->dqi_bgrace;
2362 	ii->dqi_igrace = mi->dqi_igrace;
2363 	ii->dqi_flags = mi->dqi_flags & DQF_MASK;
2364 	ii->dqi_valid = IIF_ALL;
2365 	spin_unlock(&dq_data_lock);
2366 	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2367 	return 0;
2368 }
2369 EXPORT_SYMBOL(vfs_get_dqinfo);
2370 
2371 /* Generic routine for setting common part of quota file information */
2372 int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2373 {
2374 	struct mem_dqinfo *mi;
2375 	int err = 0;
2376 
2377 	mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2378 	if (!sb_has_quota_active(sb, type)) {
2379 		err = -ESRCH;
2380 		goto out;
2381 	}
2382 	mi = sb_dqopt(sb)->info + type;
2383 	spin_lock(&dq_data_lock);
2384 	if (ii->dqi_valid & IIF_BGRACE)
2385 		mi->dqi_bgrace = ii->dqi_bgrace;
2386 	if (ii->dqi_valid & IIF_IGRACE)
2387 		mi->dqi_igrace = ii->dqi_igrace;
2388 	if (ii->dqi_valid & IIF_FLAGS)
2389 		mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
2390 				(ii->dqi_flags & DQF_MASK);
2391 	spin_unlock(&dq_data_lock);
2392 	mark_info_dirty(sb, type);
2393 	/* Force write to disk */
2394 	sb->dq_op->write_info(sb, type);
2395 out:
2396 	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2397 	return err;
2398 }
2399 EXPORT_SYMBOL(vfs_set_dqinfo);
2400 
2401 const struct quotactl_ops vfs_quotactl_ops = {
2402 	.quota_on	= vfs_quota_on,
2403 	.quota_off	= vfs_quota_off,
2404 	.quota_sync	= vfs_quota_sync,
2405 	.get_info	= vfs_get_dqinfo,
2406 	.set_info	= vfs_set_dqinfo,
2407 	.get_dqblk	= vfs_get_dqblk,
2408 	.set_dqblk	= vfs_set_dqblk
2409 };
2410 
2411 static ctl_table fs_dqstats_table[] = {
2412 	{
2413 		.procname	= "lookups",
2414 		.data		= &dqstats.lookups,
2415 		.maxlen		= sizeof(int),
2416 		.mode		= 0444,
2417 		.proc_handler	= proc_dointvec,
2418 	},
2419 	{
2420 		.procname	= "drops",
2421 		.data		= &dqstats.drops,
2422 		.maxlen		= sizeof(int),
2423 		.mode		= 0444,
2424 		.proc_handler	= proc_dointvec,
2425 	},
2426 	{
2427 		.procname	= "reads",
2428 		.data		= &dqstats.reads,
2429 		.maxlen		= sizeof(int),
2430 		.mode		= 0444,
2431 		.proc_handler	= proc_dointvec,
2432 	},
2433 	{
2434 		.procname	= "writes",
2435 		.data		= &dqstats.writes,
2436 		.maxlen		= sizeof(int),
2437 		.mode		= 0444,
2438 		.proc_handler	= proc_dointvec,
2439 	},
2440 	{
2441 		.procname	= "cache_hits",
2442 		.data		= &dqstats.cache_hits,
2443 		.maxlen		= sizeof(int),
2444 		.mode		= 0444,
2445 		.proc_handler	= proc_dointvec,
2446 	},
2447 	{
2448 		.procname	= "allocated_dquots",
2449 		.data		= &dqstats.allocated_dquots,
2450 		.maxlen		= sizeof(int),
2451 		.mode		= 0444,
2452 		.proc_handler	= proc_dointvec,
2453 	},
2454 	{
2455 		.procname	= "free_dquots",
2456 		.data		= &dqstats.free_dquots,
2457 		.maxlen		= sizeof(int),
2458 		.mode		= 0444,
2459 		.proc_handler	= proc_dointvec,
2460 	},
2461 	{
2462 		.procname	= "syncs",
2463 		.data		= &dqstats.syncs,
2464 		.maxlen		= sizeof(int),
2465 		.mode		= 0444,
2466 		.proc_handler	= proc_dointvec,
2467 	},
2468 #ifdef CONFIG_PRINT_QUOTA_WARNING
2469 	{
2470 		.procname	= "warnings",
2471 		.data		= &flag_print_warnings,
2472 		.maxlen		= sizeof(int),
2473 		.mode		= 0644,
2474 		.proc_handler	= proc_dointvec,
2475 	},
2476 #endif
2477 	{ },
2478 };
2479 
2480 static ctl_table fs_table[] = {
2481 	{
2482 		.procname	= "quota",
2483 		.mode		= 0555,
2484 		.child		= fs_dqstats_table,
2485 	},
2486 	{ },
2487 };
2488 
2489 static ctl_table sys_table[] = {
2490 	{
2491 		.procname	= "fs",
2492 		.mode		= 0555,
2493 		.child		= fs_table,
2494 	},
2495 	{ },
2496 };
2497 
2498 static int __init dquot_init(void)
2499 {
2500 	int i;
2501 	unsigned long nr_hash, order;
2502 
2503 	printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2504 
2505 	register_sysctl_table(sys_table);
2506 
2507 	dquot_cachep = kmem_cache_create("dquot",
2508 			sizeof(struct dquot), sizeof(unsigned long) * 4,
2509 			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2510 				SLAB_MEM_SPREAD|SLAB_PANIC),
2511 			NULL);
2512 
2513 	order = 0;
2514 	dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
2515 	if (!dquot_hash)
2516 		panic("Cannot create dquot hash table");
2517 
2518 	/* Find power-of-two hlist_heads which can fit into allocation */
2519 	nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2520 	dq_hash_bits = 0;
2521 	do {
2522 		dq_hash_bits++;
2523 	} while (nr_hash >> dq_hash_bits);
2524 	dq_hash_bits--;
2525 
2526 	nr_hash = 1UL << dq_hash_bits;
2527 	dq_hash_mask = nr_hash - 1;
2528 	for (i = 0; i < nr_hash; i++)
2529 		INIT_HLIST_HEAD(dquot_hash + i);
2530 
2531 	printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
2532 			nr_hash, order, (PAGE_SIZE << order));
2533 
2534 	register_shrinker(&dqcache_shrinker);
2535 
2536 	return 0;
2537 }
2538 module_init(dquot_init);
2539