xref: /openbmc/linux/fs/quota/dquot.c (revision 02412847)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implementation of the diskquota system for the LINUX operating system. QUOTA
4  * is implemented using the BSD system call interface as the means of
5  * communication with the user level. This file contains the generic routines
6  * called by the different filesystems on allocation of an inode or block.
7  * These routines take care of the administration needed to have a consistent
8  * diskquota tracking system. The ideas of both user and group quotas are based
9  * on the Melbourne quota system as used on BSD derived systems. The internal
10  * implementation is based on one of the several variants of the LINUX
11  * inode-subsystem with added complexity of the diskquota system.
12  *
13  * Author:	Marco van Wieringen <mvw@planets.elm.net>
14  *
15  * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
16  *
17  *		Revised list management to avoid races
18  *		-- Bill Hawes, <whawes@star.net>, 9/98
19  *
20  *		Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
21  *		As the consequence the locking was moved from dquot_decr_...(),
22  *		dquot_incr_...() to calling functions.
23  *		invalidate_dquots() now writes modified dquots.
24  *		Serialized quota_off() and quota_on() for mount point.
25  *		Fixed a few bugs in grow_dquots().
26  *		Fixed deadlock in write_dquot() - we no longer account quotas on
27  *		quota files
28  *		remove_dquot_ref() moved to inode.c - it now traverses through inodes
29  *		add_dquot_ref() restarts after blocking
30  *		Added check for bogus uid and fixed check for group in quotactl.
31  *		Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
32  *
33  *		Used struct list_head instead of own list struct
34  *		Invalidation of referenced dquots is no longer possible
35  *		Improved free_dquots list management
36  *		Quota and i_blocks are now updated in one place to avoid races
37  *		Warnings are now delayed so we won't block in critical section
38  *		Write updated not to require dquot lock
39  *		Jan Kara, <jack@suse.cz>, 9/2000
40  *
41  *		Added dynamic quota structure allocation
42  *		Jan Kara <jack@suse.cz> 12/2000
43  *
44  *		Rewritten quota interface. Implemented new quota format and
45  *		formats registering.
46  *		Jan Kara, <jack@suse.cz>, 2001,2002
47  *
48  *		New SMP locking.
49  *		Jan Kara, <jack@suse.cz>, 10/2002
50  *
51  *		Added journalled quota support, fix lock inversion problems
52  *		Jan Kara, <jack@suse.cz>, 2003,2004
53  *
54  * (C) Copyright 1994 - 1997 Marco van Wieringen
55  */
56 
57 #include <linux/errno.h>
58 #include <linux/kernel.h>
59 #include <linux/fs.h>
60 #include <linux/mount.h>
61 #include <linux/mm.h>
62 #include <linux/time.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/fcntl.h>
66 #include <linux/stat.h>
67 #include <linux/tty.h>
68 #include <linux/file.h>
69 #include <linux/slab.h>
70 #include <linux/sysctl.h>
71 #include <linux/init.h>
72 #include <linux/module.h>
73 #include <linux/proc_fs.h>
74 #include <linux/security.h>
75 #include <linux/sched.h>
76 #include <linux/cred.h>
77 #include <linux/kmod.h>
78 #include <linux/namei.h>
79 #include <linux/capability.h>
80 #include <linux/quotaops.h>
81 #include <linux/blkdev.h>
82 #include <linux/sched/mm.h>
83 #include "../internal.h" /* ugh */
84 
85 #include <linux/uaccess.h>
86 
87 /*
88  * There are five quota SMP locks:
89  * * dq_list_lock protects all lists with quotas and quota formats.
90  * * dquot->dq_dqb_lock protects data from dq_dqb
91  * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
92  *   consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
93  *   dquot_transfer() can stabilize amount it transfers
94  * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
95  *   pointers in the inode
96  * * dq_state_lock protects modifications of quota state (on quotaon and
97  *   quotaoff) and readers who care about latest values take it as well.
98  *
99  * The spinlock ordering is hence:
100  *   dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
101  *   dq_list_lock > dq_state_lock
102  *
103  * Note that some things (eg. sb pointer, type, id) doesn't change during
104  * the life of the dquot structure and so needn't to be protected by a lock
105  *
106  * Operation accessing dquots via inode pointers are protected by dquot_srcu.
107  * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
108  * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
109  * inode and before dropping dquot references to avoid use of dquots after
110  * they are freed. dq_data_lock is used to serialize the pointer setting and
111  * clearing operations.
112  * Special care needs to be taken about S_NOQUOTA inode flag (marking that
113  * inode is a quota file). Functions adding pointers from inode to dquots have
114  * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
115  * have to do all pointer modifications before dropping dq_data_lock. This makes
116  * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
117  * then drops all pointers to dquots from an inode.
118  *
119  * Each dquot has its dq_lock mutex.  Dquot is locked when it is being read to
120  * memory (or space for it is being allocated) on the first dqget(), when it is
121  * being written out, and when it is being released on the last dqput(). The
122  * allocation and release operations are serialized by the dq_lock and by
123  * checking the use count in dquot_release().
124  *
125  * Lock ordering (including related VFS locks) is the following:
126  *   s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
127  */
128 
129 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
130 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
131 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
132 EXPORT_SYMBOL(dq_data_lock);
133 DEFINE_STATIC_SRCU(dquot_srcu);
134 
135 static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
136 
137 void __quota_error(struct super_block *sb, const char *func,
138 		   const char *fmt, ...)
139 {
140 	if (printk_ratelimit()) {
141 		va_list args;
142 		struct va_format vaf;
143 
144 		va_start(args, fmt);
145 
146 		vaf.fmt = fmt;
147 		vaf.va = &args;
148 
149 		printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
150 		       sb->s_id, func, &vaf);
151 
152 		va_end(args);
153 	}
154 }
155 EXPORT_SYMBOL(__quota_error);
156 
157 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
158 static char *quotatypes[] = INITQFNAMES;
159 #endif
160 static struct quota_format_type *quota_formats;	/* List of registered formats */
161 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
162 
163 /* SLAB cache for dquot structures */
164 static struct kmem_cache *dquot_cachep;
165 
166 int register_quota_format(struct quota_format_type *fmt)
167 {
168 	spin_lock(&dq_list_lock);
169 	fmt->qf_next = quota_formats;
170 	quota_formats = fmt;
171 	spin_unlock(&dq_list_lock);
172 	return 0;
173 }
174 EXPORT_SYMBOL(register_quota_format);
175 
176 void unregister_quota_format(struct quota_format_type *fmt)
177 {
178 	struct quota_format_type **actqf;
179 
180 	spin_lock(&dq_list_lock);
181 	for (actqf = &quota_formats; *actqf && *actqf != fmt;
182 	     actqf = &(*actqf)->qf_next)
183 		;
184 	if (*actqf)
185 		*actqf = (*actqf)->qf_next;
186 	spin_unlock(&dq_list_lock);
187 }
188 EXPORT_SYMBOL(unregister_quota_format);
189 
190 static struct quota_format_type *find_quota_format(int id)
191 {
192 	struct quota_format_type *actqf;
193 
194 	spin_lock(&dq_list_lock);
195 	for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
196 	     actqf = actqf->qf_next)
197 		;
198 	if (!actqf || !try_module_get(actqf->qf_owner)) {
199 		int qm;
200 
201 		spin_unlock(&dq_list_lock);
202 
203 		for (qm = 0; module_names[qm].qm_fmt_id &&
204 			     module_names[qm].qm_fmt_id != id; qm++)
205 			;
206 		if (!module_names[qm].qm_fmt_id ||
207 		    request_module(module_names[qm].qm_mod_name))
208 			return NULL;
209 
210 		spin_lock(&dq_list_lock);
211 		for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
212 		     actqf = actqf->qf_next)
213 			;
214 		if (actqf && !try_module_get(actqf->qf_owner))
215 			actqf = NULL;
216 	}
217 	spin_unlock(&dq_list_lock);
218 	return actqf;
219 }
220 
221 static void put_quota_format(struct quota_format_type *fmt)
222 {
223 	module_put(fmt->qf_owner);
224 }
225 
226 /*
227  * Dquot List Management:
228  * The quota code uses four lists for dquot management: the inuse_list,
229  * free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot
230  * structure may be on some of those lists, depending on its current state.
231  *
232  * All dquots are placed to the end of inuse_list when first created, and this
233  * list is used for invalidate operation, which must look at every dquot.
234  *
235  * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
236  * and this list is searched whenever we need an available dquot.  Dquots are
237  * removed from the list as soon as they are used again, and
238  * dqstats.free_dquots gives the number of dquots on the list. When
239  * dquot is invalidated it's completely released from memory.
240  *
241  * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
242  * dirtied, and this list is searched when writing dirty dquots back to
243  * quota file. Note that some filesystems do dirty dquot tracking on their
244  * own (e.g. in a journal) and thus don't use dqi_dirty_list.
245  *
246  * Dquots with a specific identity (device, type and id) are placed on
247  * one of the dquot_hash[] hash chains. The provides an efficient search
248  * mechanism to locate a specific dquot.
249  */
250 
251 static LIST_HEAD(inuse_list);
252 static LIST_HEAD(free_dquots);
253 static unsigned int dq_hash_bits, dq_hash_mask;
254 static struct hlist_head *dquot_hash;
255 
256 struct dqstats dqstats;
257 EXPORT_SYMBOL(dqstats);
258 
259 static qsize_t inode_get_rsv_space(struct inode *inode);
260 static qsize_t __inode_get_rsv_space(struct inode *inode);
261 static int __dquot_initialize(struct inode *inode, int type);
262 
263 static inline unsigned int
264 hashfn(const struct super_block *sb, struct kqid qid)
265 {
266 	unsigned int id = from_kqid(&init_user_ns, qid);
267 	int type = qid.type;
268 	unsigned long tmp;
269 
270 	tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
271 	return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
272 }
273 
274 /*
275  * Following list functions expect dq_list_lock to be held
276  */
277 static inline void insert_dquot_hash(struct dquot *dquot)
278 {
279 	struct hlist_head *head;
280 	head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
281 	hlist_add_head(&dquot->dq_hash, head);
282 }
283 
284 static inline void remove_dquot_hash(struct dquot *dquot)
285 {
286 	hlist_del_init(&dquot->dq_hash);
287 }
288 
289 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
290 				struct kqid qid)
291 {
292 	struct dquot *dquot;
293 
294 	hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash)
295 		if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
296 			return dquot;
297 
298 	return NULL;
299 }
300 
301 /* Add a dquot to the tail of the free list */
302 static inline void put_dquot_last(struct dquot *dquot)
303 {
304 	list_add_tail(&dquot->dq_free, &free_dquots);
305 	dqstats_inc(DQST_FREE_DQUOTS);
306 }
307 
308 static inline void remove_free_dquot(struct dquot *dquot)
309 {
310 	if (list_empty(&dquot->dq_free))
311 		return;
312 	list_del_init(&dquot->dq_free);
313 	dqstats_dec(DQST_FREE_DQUOTS);
314 }
315 
316 static inline void put_inuse(struct dquot *dquot)
317 {
318 	/* We add to the back of inuse list so we don't have to restart
319 	 * when traversing this list and we block */
320 	list_add_tail(&dquot->dq_inuse, &inuse_list);
321 	dqstats_inc(DQST_ALLOC_DQUOTS);
322 }
323 
324 static inline void remove_inuse(struct dquot *dquot)
325 {
326 	dqstats_dec(DQST_ALLOC_DQUOTS);
327 	list_del(&dquot->dq_inuse);
328 }
329 /*
330  * End of list functions needing dq_list_lock
331  */
332 
333 static void wait_on_dquot(struct dquot *dquot)
334 {
335 	mutex_lock(&dquot->dq_lock);
336 	mutex_unlock(&dquot->dq_lock);
337 }
338 
339 static inline int dquot_dirty(struct dquot *dquot)
340 {
341 	return test_bit(DQ_MOD_B, &dquot->dq_flags);
342 }
343 
344 static inline int mark_dquot_dirty(struct dquot *dquot)
345 {
346 	return dquot->dq_sb->dq_op->mark_dirty(dquot);
347 }
348 
349 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
350 int dquot_mark_dquot_dirty(struct dquot *dquot)
351 {
352 	int ret = 1;
353 
354 	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
355 		return 0;
356 
357 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
358 		return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
359 
360 	/* If quota is dirty already, we don't have to acquire dq_list_lock */
361 	if (test_bit(DQ_MOD_B, &dquot->dq_flags))
362 		return 1;
363 
364 	spin_lock(&dq_list_lock);
365 	if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
366 		list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
367 				info[dquot->dq_id.type].dqi_dirty_list);
368 		ret = 0;
369 	}
370 	spin_unlock(&dq_list_lock);
371 	return ret;
372 }
373 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
374 
375 /* Dirtify all the dquots - this can block when journalling */
376 static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
377 {
378 	int ret, err, cnt;
379 
380 	ret = err = 0;
381 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
382 		if (dquot[cnt])
383 			/* Even in case of error we have to continue */
384 			ret = mark_dquot_dirty(dquot[cnt]);
385 		if (!err)
386 			err = ret;
387 	}
388 	return err;
389 }
390 
391 static inline void dqput_all(struct dquot **dquot)
392 {
393 	unsigned int cnt;
394 
395 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
396 		dqput(dquot[cnt]);
397 }
398 
399 static inline int clear_dquot_dirty(struct dquot *dquot)
400 {
401 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
402 		return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
403 
404 	spin_lock(&dq_list_lock);
405 	if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
406 		spin_unlock(&dq_list_lock);
407 		return 0;
408 	}
409 	list_del_init(&dquot->dq_dirty);
410 	spin_unlock(&dq_list_lock);
411 	return 1;
412 }
413 
414 void mark_info_dirty(struct super_block *sb, int type)
415 {
416 	spin_lock(&dq_data_lock);
417 	sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
418 	spin_unlock(&dq_data_lock);
419 }
420 EXPORT_SYMBOL(mark_info_dirty);
421 
422 /*
423  *	Read dquot from disk and alloc space for it
424  */
425 
426 int dquot_acquire(struct dquot *dquot)
427 {
428 	int ret = 0, ret2 = 0;
429 	unsigned int memalloc;
430 	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
431 
432 	mutex_lock(&dquot->dq_lock);
433 	memalloc = memalloc_nofs_save();
434 	if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
435 		ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
436 		if (ret < 0)
437 			goto out_iolock;
438 	}
439 	/* Make sure flags update is visible after dquot has been filled */
440 	smp_mb__before_atomic();
441 	set_bit(DQ_READ_B, &dquot->dq_flags);
442 	/* Instantiate dquot if needed */
443 	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
444 		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
445 		/* Write the info if needed */
446 		if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
447 			ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
448 					dquot->dq_sb, dquot->dq_id.type);
449 		}
450 		if (ret < 0)
451 			goto out_iolock;
452 		if (ret2 < 0) {
453 			ret = ret2;
454 			goto out_iolock;
455 		}
456 	}
457 	/*
458 	 * Make sure flags update is visible after on-disk struct has been
459 	 * allocated. Paired with smp_rmb() in dqget().
460 	 */
461 	smp_mb__before_atomic();
462 	set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
463 out_iolock:
464 	memalloc_nofs_restore(memalloc);
465 	mutex_unlock(&dquot->dq_lock);
466 	return ret;
467 }
468 EXPORT_SYMBOL(dquot_acquire);
469 
470 /*
471  *	Write dquot to disk
472  */
473 int dquot_commit(struct dquot *dquot)
474 {
475 	int ret = 0;
476 	unsigned int memalloc;
477 	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
478 
479 	mutex_lock(&dquot->dq_lock);
480 	memalloc = memalloc_nofs_save();
481 	if (!clear_dquot_dirty(dquot))
482 		goto out_lock;
483 	/* Inactive dquot can be only if there was error during read/init
484 	 * => we have better not writing it */
485 	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
486 		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
487 	else
488 		ret = -EIO;
489 out_lock:
490 	memalloc_nofs_restore(memalloc);
491 	mutex_unlock(&dquot->dq_lock);
492 	return ret;
493 }
494 EXPORT_SYMBOL(dquot_commit);
495 
496 /*
497  *	Release dquot
498  */
499 int dquot_release(struct dquot *dquot)
500 {
501 	int ret = 0, ret2 = 0;
502 	unsigned int memalloc;
503 	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
504 
505 	mutex_lock(&dquot->dq_lock);
506 	memalloc = memalloc_nofs_save();
507 	/* Check whether we are not racing with some other dqget() */
508 	if (dquot_is_busy(dquot))
509 		goto out_dqlock;
510 	if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
511 		ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
512 		/* Write the info */
513 		if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
514 			ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
515 						dquot->dq_sb, dquot->dq_id.type);
516 		}
517 		if (ret >= 0)
518 			ret = ret2;
519 	}
520 	clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
521 out_dqlock:
522 	memalloc_nofs_restore(memalloc);
523 	mutex_unlock(&dquot->dq_lock);
524 	return ret;
525 }
526 EXPORT_SYMBOL(dquot_release);
527 
528 void dquot_destroy(struct dquot *dquot)
529 {
530 	kmem_cache_free(dquot_cachep, dquot);
531 }
532 EXPORT_SYMBOL(dquot_destroy);
533 
534 static inline void do_destroy_dquot(struct dquot *dquot)
535 {
536 	dquot->dq_sb->dq_op->destroy_dquot(dquot);
537 }
538 
539 /* Invalidate all dquots on the list. Note that this function is called after
540  * quota is disabled and pointers from inodes removed so there cannot be new
541  * quota users. There can still be some users of quotas due to inodes being
542  * just deleted or pruned by prune_icache() (those are not attached to any
543  * list) or parallel quotactl call. We have to wait for such users.
544  */
545 static void invalidate_dquots(struct super_block *sb, int type)
546 {
547 	struct dquot *dquot, *tmp;
548 
549 restart:
550 	spin_lock(&dq_list_lock);
551 	list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
552 		if (dquot->dq_sb != sb)
553 			continue;
554 		if (dquot->dq_id.type != type)
555 			continue;
556 		/* Wait for dquot users */
557 		if (atomic_read(&dquot->dq_count)) {
558 			atomic_inc(&dquot->dq_count);
559 			spin_unlock(&dq_list_lock);
560 			/*
561 			 * Once dqput() wakes us up, we know it's time to free
562 			 * the dquot.
563 			 * IMPORTANT: we rely on the fact that there is always
564 			 * at most one process waiting for dquot to free.
565 			 * Otherwise dq_count would be > 1 and we would never
566 			 * wake up.
567 			 */
568 			wait_event(dquot_ref_wq,
569 				   atomic_read(&dquot->dq_count) == 1);
570 			dqput(dquot);
571 			/* At this moment dquot() need not exist (it could be
572 			 * reclaimed by prune_dqcache(). Hence we must
573 			 * restart. */
574 			goto restart;
575 		}
576 		/*
577 		 * Quota now has no users and it has been written on last
578 		 * dqput()
579 		 */
580 		remove_dquot_hash(dquot);
581 		remove_free_dquot(dquot);
582 		remove_inuse(dquot);
583 		do_destroy_dquot(dquot);
584 	}
585 	spin_unlock(&dq_list_lock);
586 }
587 
588 /* Call callback for every active dquot on given filesystem */
589 int dquot_scan_active(struct super_block *sb,
590 		      int (*fn)(struct dquot *dquot, unsigned long priv),
591 		      unsigned long priv)
592 {
593 	struct dquot *dquot, *old_dquot = NULL;
594 	int ret = 0;
595 
596 	WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
597 
598 	spin_lock(&dq_list_lock);
599 	list_for_each_entry(dquot, &inuse_list, dq_inuse) {
600 		if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
601 			continue;
602 		if (dquot->dq_sb != sb)
603 			continue;
604 		/* Now we have active dquot so we can just increase use count */
605 		atomic_inc(&dquot->dq_count);
606 		spin_unlock(&dq_list_lock);
607 		dqput(old_dquot);
608 		old_dquot = dquot;
609 		/*
610 		 * ->release_dquot() can be racing with us. Our reference
611 		 * protects us from new calls to it so just wait for any
612 		 * outstanding call and recheck the DQ_ACTIVE_B after that.
613 		 */
614 		wait_on_dquot(dquot);
615 		if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
616 			ret = fn(dquot, priv);
617 			if (ret < 0)
618 				goto out;
619 		}
620 		spin_lock(&dq_list_lock);
621 		/* We are safe to continue now because our dquot could not
622 		 * be moved out of the inuse list while we hold the reference */
623 	}
624 	spin_unlock(&dq_list_lock);
625 out:
626 	dqput(old_dquot);
627 	return ret;
628 }
629 EXPORT_SYMBOL(dquot_scan_active);
630 
631 static inline int dquot_write_dquot(struct dquot *dquot)
632 {
633 	int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
634 	if (ret < 0) {
635 		quota_error(dquot->dq_sb, "Can't write quota structure "
636 			    "(error %d). Quota may get out of sync!", ret);
637 		/* Clear dirty bit anyway to avoid infinite loop. */
638 		clear_dquot_dirty(dquot);
639 	}
640 	return ret;
641 }
642 
643 /* Write all dquot structures to quota files */
644 int dquot_writeback_dquots(struct super_block *sb, int type)
645 {
646 	struct list_head dirty;
647 	struct dquot *dquot;
648 	struct quota_info *dqopt = sb_dqopt(sb);
649 	int cnt;
650 	int err, ret = 0;
651 
652 	WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
653 
654 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
655 		if (type != -1 && cnt != type)
656 			continue;
657 		if (!sb_has_quota_active(sb, cnt))
658 			continue;
659 		spin_lock(&dq_list_lock);
660 		/* Move list away to avoid livelock. */
661 		list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
662 		while (!list_empty(&dirty)) {
663 			dquot = list_first_entry(&dirty, struct dquot,
664 						 dq_dirty);
665 
666 			WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
667 
668 			/* Now we have active dquot from which someone is
669  			 * holding reference so we can safely just increase
670 			 * use count */
671 			dqgrab(dquot);
672 			spin_unlock(&dq_list_lock);
673 			err = dquot_write_dquot(dquot);
674 			if (err && !ret)
675 				ret = err;
676 			dqput(dquot);
677 			spin_lock(&dq_list_lock);
678 		}
679 		spin_unlock(&dq_list_lock);
680 	}
681 
682 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
683 		if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
684 		    && info_dirty(&dqopt->info[cnt]))
685 			sb->dq_op->write_info(sb, cnt);
686 	dqstats_inc(DQST_SYNCS);
687 
688 	return ret;
689 }
690 EXPORT_SYMBOL(dquot_writeback_dquots);
691 
692 /* Write all dquot structures to disk and make them visible from userspace */
693 int dquot_quota_sync(struct super_block *sb, int type)
694 {
695 	struct quota_info *dqopt = sb_dqopt(sb);
696 	int cnt;
697 	int ret;
698 
699 	ret = dquot_writeback_dquots(sb, type);
700 	if (ret)
701 		return ret;
702 	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
703 		return 0;
704 
705 	/* This is not very clever (and fast) but currently I don't know about
706 	 * any other simple way of getting quota data to disk and we must get
707 	 * them there for userspace to be visible... */
708 	if (sb->s_op->sync_fs) {
709 		ret = sb->s_op->sync_fs(sb, 1);
710 		if (ret)
711 			return ret;
712 	}
713 	ret = sync_blockdev(sb->s_bdev);
714 	if (ret)
715 		return ret;
716 
717 	/*
718 	 * Now when everything is written we can discard the pagecache so
719 	 * that userspace sees the changes.
720 	 */
721 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
722 		if (type != -1 && cnt != type)
723 			continue;
724 		if (!sb_has_quota_active(sb, cnt))
725 			continue;
726 		inode_lock(dqopt->files[cnt]);
727 		truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
728 		inode_unlock(dqopt->files[cnt]);
729 	}
730 
731 	return 0;
732 }
733 EXPORT_SYMBOL(dquot_quota_sync);
734 
735 static unsigned long
736 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
737 {
738 	struct dquot *dquot;
739 	unsigned long freed = 0;
740 
741 	spin_lock(&dq_list_lock);
742 	while (!list_empty(&free_dquots) && sc->nr_to_scan) {
743 		dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
744 		remove_dquot_hash(dquot);
745 		remove_free_dquot(dquot);
746 		remove_inuse(dquot);
747 		do_destroy_dquot(dquot);
748 		sc->nr_to_scan--;
749 		freed++;
750 	}
751 	spin_unlock(&dq_list_lock);
752 	return freed;
753 }
754 
755 static unsigned long
756 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
757 {
758 	return vfs_pressure_ratio(
759 	percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
760 }
761 
762 static struct shrinker dqcache_shrinker = {
763 	.count_objects = dqcache_shrink_count,
764 	.scan_objects = dqcache_shrink_scan,
765 	.seeks = DEFAULT_SEEKS,
766 };
767 
768 /*
769  * Put reference to dquot
770  */
771 void dqput(struct dquot *dquot)
772 {
773 	if (!dquot)
774 		return;
775 #ifdef CONFIG_QUOTA_DEBUG
776 	if (!atomic_read(&dquot->dq_count)) {
777 		quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
778 			    quotatypes[dquot->dq_id.type],
779 			    from_kqid(&init_user_ns, dquot->dq_id));
780 		BUG();
781 	}
782 #endif
783 	dqstats_inc(DQST_DROPS);
784 we_slept:
785 	spin_lock(&dq_list_lock);
786 	if (atomic_read(&dquot->dq_count) > 1) {
787 		/* We have more than one user... nothing to do */
788 		atomic_dec(&dquot->dq_count);
789 		/* Releasing dquot during quotaoff phase? */
790 		if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
791 		    atomic_read(&dquot->dq_count) == 1)
792 			wake_up(&dquot_ref_wq);
793 		spin_unlock(&dq_list_lock);
794 		return;
795 	}
796 	/* Need to release dquot? */
797 	if (dquot_dirty(dquot)) {
798 		spin_unlock(&dq_list_lock);
799 		/* Commit dquot before releasing */
800 		dquot_write_dquot(dquot);
801 		goto we_slept;
802 	}
803 	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
804 		spin_unlock(&dq_list_lock);
805 		dquot->dq_sb->dq_op->release_dquot(dquot);
806 		goto we_slept;
807 	}
808 	atomic_dec(&dquot->dq_count);
809 #ifdef CONFIG_QUOTA_DEBUG
810 	/* sanity check */
811 	BUG_ON(!list_empty(&dquot->dq_free));
812 #endif
813 	put_dquot_last(dquot);
814 	spin_unlock(&dq_list_lock);
815 }
816 EXPORT_SYMBOL(dqput);
817 
818 struct dquot *dquot_alloc(struct super_block *sb, int type)
819 {
820 	return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
821 }
822 EXPORT_SYMBOL(dquot_alloc);
823 
824 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
825 {
826 	struct dquot *dquot;
827 
828 	dquot = sb->dq_op->alloc_dquot(sb, type);
829 	if(!dquot)
830 		return NULL;
831 
832 	mutex_init(&dquot->dq_lock);
833 	INIT_LIST_HEAD(&dquot->dq_free);
834 	INIT_LIST_HEAD(&dquot->dq_inuse);
835 	INIT_HLIST_NODE(&dquot->dq_hash);
836 	INIT_LIST_HEAD(&dquot->dq_dirty);
837 	dquot->dq_sb = sb;
838 	dquot->dq_id = make_kqid_invalid(type);
839 	atomic_set(&dquot->dq_count, 1);
840 	spin_lock_init(&dquot->dq_dqb_lock);
841 
842 	return dquot;
843 }
844 
845 /*
846  * Get reference to dquot
847  *
848  * Locking is slightly tricky here. We are guarded from parallel quotaoff()
849  * destroying our dquot by:
850  *   a) checking for quota flags under dq_list_lock and
851  *   b) getting a reference to dquot before we release dq_list_lock
852  */
853 struct dquot *dqget(struct super_block *sb, struct kqid qid)
854 {
855 	unsigned int hashent = hashfn(sb, qid);
856 	struct dquot *dquot, *empty = NULL;
857 
858 	if (!qid_has_mapping(sb->s_user_ns, qid))
859 		return ERR_PTR(-EINVAL);
860 
861         if (!sb_has_quota_active(sb, qid.type))
862 		return ERR_PTR(-ESRCH);
863 we_slept:
864 	spin_lock(&dq_list_lock);
865 	spin_lock(&dq_state_lock);
866 	if (!sb_has_quota_active(sb, qid.type)) {
867 		spin_unlock(&dq_state_lock);
868 		spin_unlock(&dq_list_lock);
869 		dquot = ERR_PTR(-ESRCH);
870 		goto out;
871 	}
872 	spin_unlock(&dq_state_lock);
873 
874 	dquot = find_dquot(hashent, sb, qid);
875 	if (!dquot) {
876 		if (!empty) {
877 			spin_unlock(&dq_list_lock);
878 			empty = get_empty_dquot(sb, qid.type);
879 			if (!empty)
880 				schedule();	/* Try to wait for a moment... */
881 			goto we_slept;
882 		}
883 		dquot = empty;
884 		empty = NULL;
885 		dquot->dq_id = qid;
886 		/* all dquots go on the inuse_list */
887 		put_inuse(dquot);
888 		/* hash it first so it can be found */
889 		insert_dquot_hash(dquot);
890 		spin_unlock(&dq_list_lock);
891 		dqstats_inc(DQST_LOOKUPS);
892 	} else {
893 		if (!atomic_read(&dquot->dq_count))
894 			remove_free_dquot(dquot);
895 		atomic_inc(&dquot->dq_count);
896 		spin_unlock(&dq_list_lock);
897 		dqstats_inc(DQST_CACHE_HITS);
898 		dqstats_inc(DQST_LOOKUPS);
899 	}
900 	/* Wait for dq_lock - after this we know that either dquot_release() is
901 	 * already finished or it will be canceled due to dq_count > 1 test */
902 	wait_on_dquot(dquot);
903 	/* Read the dquot / allocate space in quota file */
904 	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
905 		int err;
906 
907 		err = sb->dq_op->acquire_dquot(dquot);
908 		if (err < 0) {
909 			dqput(dquot);
910 			dquot = ERR_PTR(err);
911 			goto out;
912 		}
913 	}
914 	/*
915 	 * Make sure following reads see filled structure - paired with
916 	 * smp_mb__before_atomic() in dquot_acquire().
917 	 */
918 	smp_rmb();
919 #ifdef CONFIG_QUOTA_DEBUG
920 	BUG_ON(!dquot->dq_sb);	/* Has somebody invalidated entry under us? */
921 #endif
922 out:
923 	if (empty)
924 		do_destroy_dquot(empty);
925 
926 	return dquot;
927 }
928 EXPORT_SYMBOL(dqget);
929 
930 static inline struct dquot **i_dquot(struct inode *inode)
931 {
932 	return inode->i_sb->s_op->get_dquots(inode);
933 }
934 
935 static int dqinit_needed(struct inode *inode, int type)
936 {
937 	struct dquot * const *dquots;
938 	int cnt;
939 
940 	if (IS_NOQUOTA(inode))
941 		return 0;
942 
943 	dquots = i_dquot(inode);
944 	if (type != -1)
945 		return !dquots[type];
946 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
947 		if (!dquots[cnt])
948 			return 1;
949 	return 0;
950 }
951 
952 /* This routine is guarded by s_umount semaphore */
953 static int add_dquot_ref(struct super_block *sb, int type)
954 {
955 	struct inode *inode, *old_inode = NULL;
956 #ifdef CONFIG_QUOTA_DEBUG
957 	int reserved = 0;
958 #endif
959 	int err = 0;
960 
961 	spin_lock(&sb->s_inode_list_lock);
962 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
963 		spin_lock(&inode->i_lock);
964 		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
965 		    !atomic_read(&inode->i_writecount) ||
966 		    !dqinit_needed(inode, type)) {
967 			spin_unlock(&inode->i_lock);
968 			continue;
969 		}
970 		__iget(inode);
971 		spin_unlock(&inode->i_lock);
972 		spin_unlock(&sb->s_inode_list_lock);
973 
974 #ifdef CONFIG_QUOTA_DEBUG
975 		if (unlikely(inode_get_rsv_space(inode) > 0))
976 			reserved = 1;
977 #endif
978 		iput(old_inode);
979 		err = __dquot_initialize(inode, type);
980 		if (err) {
981 			iput(inode);
982 			goto out;
983 		}
984 
985 		/*
986 		 * We hold a reference to 'inode' so it couldn't have been
987 		 * removed from s_inodes list while we dropped the
988 		 * s_inode_list_lock. We cannot iput the inode now as we can be
989 		 * holding the last reference and we cannot iput it under
990 		 * s_inode_list_lock. So we keep the reference and iput it
991 		 * later.
992 		 */
993 		old_inode = inode;
994 		cond_resched();
995 		spin_lock(&sb->s_inode_list_lock);
996 	}
997 	spin_unlock(&sb->s_inode_list_lock);
998 	iput(old_inode);
999 out:
1000 #ifdef CONFIG_QUOTA_DEBUG
1001 	if (reserved) {
1002 		quota_error(sb, "Writes happened before quota was turned on "
1003 			"thus quota information is probably inconsistent. "
1004 			"Please run quotacheck(8)");
1005 	}
1006 #endif
1007 	return err;
1008 }
1009 
1010 /*
1011  * Remove references to dquots from inode and add dquot to list for freeing
1012  * if we have the last reference to dquot
1013  */
1014 static void remove_inode_dquot_ref(struct inode *inode, int type,
1015 				   struct list_head *tofree_head)
1016 {
1017 	struct dquot **dquots = i_dquot(inode);
1018 	struct dquot *dquot = dquots[type];
1019 
1020 	if (!dquot)
1021 		return;
1022 
1023 	dquots[type] = NULL;
1024 	if (list_empty(&dquot->dq_free)) {
1025 		/*
1026 		 * The inode still has reference to dquot so it can't be in the
1027 		 * free list
1028 		 */
1029 		spin_lock(&dq_list_lock);
1030 		list_add(&dquot->dq_free, tofree_head);
1031 		spin_unlock(&dq_list_lock);
1032 	} else {
1033 		/*
1034 		 * Dquot is already in a list to put so we won't drop the last
1035 		 * reference here.
1036 		 */
1037 		dqput(dquot);
1038 	}
1039 }
1040 
1041 /*
1042  * Free list of dquots
1043  * Dquots are removed from inodes and no new references can be got so we are
1044  * the only ones holding reference
1045  */
1046 static void put_dquot_list(struct list_head *tofree_head)
1047 {
1048 	struct list_head *act_head;
1049 	struct dquot *dquot;
1050 
1051 	act_head = tofree_head->next;
1052 	while (act_head != tofree_head) {
1053 		dquot = list_entry(act_head, struct dquot, dq_free);
1054 		act_head = act_head->next;
1055 		/* Remove dquot from the list so we won't have problems... */
1056 		list_del_init(&dquot->dq_free);
1057 		dqput(dquot);
1058 	}
1059 }
1060 
1061 static void remove_dquot_ref(struct super_block *sb, int type,
1062 		struct list_head *tofree_head)
1063 {
1064 	struct inode *inode;
1065 #ifdef CONFIG_QUOTA_DEBUG
1066 	int reserved = 0;
1067 #endif
1068 
1069 	spin_lock(&sb->s_inode_list_lock);
1070 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1071 		/*
1072 		 *  We have to scan also I_NEW inodes because they can already
1073 		 *  have quota pointer initialized. Luckily, we need to touch
1074 		 *  only quota pointers and these have separate locking
1075 		 *  (dq_data_lock).
1076 		 */
1077 		spin_lock(&dq_data_lock);
1078 		if (!IS_NOQUOTA(inode)) {
1079 #ifdef CONFIG_QUOTA_DEBUG
1080 			if (unlikely(inode_get_rsv_space(inode) > 0))
1081 				reserved = 1;
1082 #endif
1083 			remove_inode_dquot_ref(inode, type, tofree_head);
1084 		}
1085 		spin_unlock(&dq_data_lock);
1086 	}
1087 	spin_unlock(&sb->s_inode_list_lock);
1088 #ifdef CONFIG_QUOTA_DEBUG
1089 	if (reserved) {
1090 		printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1091 			" was disabled thus quota information is probably "
1092 			"inconsistent. Please run quotacheck(8).\n", sb->s_id);
1093 	}
1094 #endif
1095 }
1096 
1097 /* Gather all references from inodes and drop them */
1098 static void drop_dquot_ref(struct super_block *sb, int type)
1099 {
1100 	LIST_HEAD(tofree_head);
1101 
1102 	if (sb->dq_op) {
1103 		remove_dquot_ref(sb, type, &tofree_head);
1104 		synchronize_srcu(&dquot_srcu);
1105 		put_dquot_list(&tofree_head);
1106 	}
1107 }
1108 
1109 static inline
1110 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1111 {
1112 	if (dquot->dq_dqb.dqb_rsvspace >= number)
1113 		dquot->dq_dqb.dqb_rsvspace -= number;
1114 	else {
1115 		WARN_ON_ONCE(1);
1116 		dquot->dq_dqb.dqb_rsvspace = 0;
1117 	}
1118 	if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1119 	    dquot->dq_dqb.dqb_bsoftlimit)
1120 		dquot->dq_dqb.dqb_btime = (time64_t) 0;
1121 	clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1122 }
1123 
1124 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1125 {
1126 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1127 	    dquot->dq_dqb.dqb_curinodes >= number)
1128 		dquot->dq_dqb.dqb_curinodes -= number;
1129 	else
1130 		dquot->dq_dqb.dqb_curinodes = 0;
1131 	if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1132 		dquot->dq_dqb.dqb_itime = (time64_t) 0;
1133 	clear_bit(DQ_INODES_B, &dquot->dq_flags);
1134 }
1135 
1136 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1137 {
1138 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1139 	    dquot->dq_dqb.dqb_curspace >= number)
1140 		dquot->dq_dqb.dqb_curspace -= number;
1141 	else
1142 		dquot->dq_dqb.dqb_curspace = 0;
1143 	if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1144 	    dquot->dq_dqb.dqb_bsoftlimit)
1145 		dquot->dq_dqb.dqb_btime = (time64_t) 0;
1146 	clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1147 }
1148 
1149 struct dquot_warn {
1150 	struct super_block *w_sb;
1151 	struct kqid w_dq_id;
1152 	short w_type;
1153 };
1154 
1155 static int warning_issued(struct dquot *dquot, const int warntype)
1156 {
1157 	int flag = (warntype == QUOTA_NL_BHARDWARN ||
1158 		warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1159 		((warntype == QUOTA_NL_IHARDWARN ||
1160 		warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1161 
1162 	if (!flag)
1163 		return 0;
1164 	return test_and_set_bit(flag, &dquot->dq_flags);
1165 }
1166 
1167 #ifdef CONFIG_PRINT_QUOTA_WARNING
1168 static int flag_print_warnings = 1;
1169 
1170 static int need_print_warning(struct dquot_warn *warn)
1171 {
1172 	if (!flag_print_warnings)
1173 		return 0;
1174 
1175 	switch (warn->w_dq_id.type) {
1176 		case USRQUOTA:
1177 			return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1178 		case GRPQUOTA:
1179 			return in_group_p(warn->w_dq_id.gid);
1180 		case PRJQUOTA:
1181 			return 1;
1182 	}
1183 	return 0;
1184 }
1185 
1186 /* Print warning to user which exceeded quota */
1187 static void print_warning(struct dquot_warn *warn)
1188 {
1189 	char *msg = NULL;
1190 	struct tty_struct *tty;
1191 	int warntype = warn->w_type;
1192 
1193 	if (warntype == QUOTA_NL_IHARDBELOW ||
1194 	    warntype == QUOTA_NL_ISOFTBELOW ||
1195 	    warntype == QUOTA_NL_BHARDBELOW ||
1196 	    warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1197 		return;
1198 
1199 	tty = get_current_tty();
1200 	if (!tty)
1201 		return;
1202 	tty_write_message(tty, warn->w_sb->s_id);
1203 	if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1204 		tty_write_message(tty, ": warning, ");
1205 	else
1206 		tty_write_message(tty, ": write failed, ");
1207 	tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1208 	switch (warntype) {
1209 		case QUOTA_NL_IHARDWARN:
1210 			msg = " file limit reached.\r\n";
1211 			break;
1212 		case QUOTA_NL_ISOFTLONGWARN:
1213 			msg = " file quota exceeded too long.\r\n";
1214 			break;
1215 		case QUOTA_NL_ISOFTWARN:
1216 			msg = " file quota exceeded.\r\n";
1217 			break;
1218 		case QUOTA_NL_BHARDWARN:
1219 			msg = " block limit reached.\r\n";
1220 			break;
1221 		case QUOTA_NL_BSOFTLONGWARN:
1222 			msg = " block quota exceeded too long.\r\n";
1223 			break;
1224 		case QUOTA_NL_BSOFTWARN:
1225 			msg = " block quota exceeded.\r\n";
1226 			break;
1227 	}
1228 	tty_write_message(tty, msg);
1229 	tty_kref_put(tty);
1230 }
1231 #endif
1232 
1233 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1234 			    int warntype)
1235 {
1236 	if (warning_issued(dquot, warntype))
1237 		return;
1238 	warn->w_type = warntype;
1239 	warn->w_sb = dquot->dq_sb;
1240 	warn->w_dq_id = dquot->dq_id;
1241 }
1242 
1243 /*
1244  * Write warnings to the console and send warning messages over netlink.
1245  *
1246  * Note that this function can call into tty and networking code.
1247  */
1248 static void flush_warnings(struct dquot_warn *warn)
1249 {
1250 	int i;
1251 
1252 	for (i = 0; i < MAXQUOTAS; i++) {
1253 		if (warn[i].w_type == QUOTA_NL_NOWARN)
1254 			continue;
1255 #ifdef CONFIG_PRINT_QUOTA_WARNING
1256 		print_warning(&warn[i]);
1257 #endif
1258 		quota_send_warning(warn[i].w_dq_id,
1259 				   warn[i].w_sb->s_dev, warn[i].w_type);
1260 	}
1261 }
1262 
1263 static int ignore_hardlimit(struct dquot *dquot)
1264 {
1265 	struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1266 
1267 	return capable(CAP_SYS_RESOURCE) &&
1268 	       (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1269 		!(info->dqi_flags & DQF_ROOT_SQUASH));
1270 }
1271 
1272 static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
1273 			    struct dquot_warn *warn)
1274 {
1275 	qsize_t newinodes;
1276 	int ret = 0;
1277 
1278 	spin_lock(&dquot->dq_dqb_lock);
1279 	newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1280 	if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1281 	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1282 		goto add;
1283 
1284 	if (dquot->dq_dqb.dqb_ihardlimit &&
1285 	    newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1286             !ignore_hardlimit(dquot)) {
1287 		prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1288 		ret = -EDQUOT;
1289 		goto out;
1290 	}
1291 
1292 	if (dquot->dq_dqb.dqb_isoftlimit &&
1293 	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1294 	    dquot->dq_dqb.dqb_itime &&
1295 	    ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
1296             !ignore_hardlimit(dquot)) {
1297 		prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1298 		ret = -EDQUOT;
1299 		goto out;
1300 	}
1301 
1302 	if (dquot->dq_dqb.dqb_isoftlimit &&
1303 	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1304 	    dquot->dq_dqb.dqb_itime == 0) {
1305 		prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1306 		dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
1307 		    sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1308 	}
1309 add:
1310 	dquot->dq_dqb.dqb_curinodes = newinodes;
1311 
1312 out:
1313 	spin_unlock(&dquot->dq_dqb_lock);
1314 	return ret;
1315 }
1316 
1317 static int dquot_add_space(struct dquot *dquot, qsize_t space,
1318 			   qsize_t rsv_space, unsigned int flags,
1319 			   struct dquot_warn *warn)
1320 {
1321 	qsize_t tspace;
1322 	struct super_block *sb = dquot->dq_sb;
1323 	int ret = 0;
1324 
1325 	spin_lock(&dquot->dq_dqb_lock);
1326 	if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1327 	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1328 		goto finish;
1329 
1330 	tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1331 		+ space + rsv_space;
1332 
1333 	if (dquot->dq_dqb.dqb_bhardlimit &&
1334 	    tspace > dquot->dq_dqb.dqb_bhardlimit &&
1335             !ignore_hardlimit(dquot)) {
1336 		if (flags & DQUOT_SPACE_WARN)
1337 			prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1338 		ret = -EDQUOT;
1339 		goto finish;
1340 	}
1341 
1342 	if (dquot->dq_dqb.dqb_bsoftlimit &&
1343 	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1344 	    dquot->dq_dqb.dqb_btime &&
1345 	    ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
1346             !ignore_hardlimit(dquot)) {
1347 		if (flags & DQUOT_SPACE_WARN)
1348 			prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1349 		ret = -EDQUOT;
1350 		goto finish;
1351 	}
1352 
1353 	if (dquot->dq_dqb.dqb_bsoftlimit &&
1354 	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1355 	    dquot->dq_dqb.dqb_btime == 0) {
1356 		if (flags & DQUOT_SPACE_WARN) {
1357 			prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1358 			dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
1359 			    sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1360 		} else {
1361 			/*
1362 			 * We don't allow preallocation to exceed softlimit so exceeding will
1363 			 * be always printed
1364 			 */
1365 			ret = -EDQUOT;
1366 			goto finish;
1367 		}
1368 	}
1369 finish:
1370 	/*
1371 	 * We have to be careful and go through warning generation & grace time
1372 	 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1373 	 * only here...
1374 	 */
1375 	if (flags & DQUOT_SPACE_NOFAIL)
1376 		ret = 0;
1377 	if (!ret) {
1378 		dquot->dq_dqb.dqb_rsvspace += rsv_space;
1379 		dquot->dq_dqb.dqb_curspace += space;
1380 	}
1381 	spin_unlock(&dquot->dq_dqb_lock);
1382 	return ret;
1383 }
1384 
1385 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1386 {
1387 	qsize_t newinodes;
1388 
1389 	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1390 	    dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1391 	    !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1392 		return QUOTA_NL_NOWARN;
1393 
1394 	newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1395 	if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1396 		return QUOTA_NL_ISOFTBELOW;
1397 	if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1398 	    newinodes < dquot->dq_dqb.dqb_ihardlimit)
1399 		return QUOTA_NL_IHARDBELOW;
1400 	return QUOTA_NL_NOWARN;
1401 }
1402 
1403 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1404 {
1405 	qsize_t tspace;
1406 
1407 	tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
1408 
1409 	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1410 	    tspace <= dquot->dq_dqb.dqb_bsoftlimit)
1411 		return QUOTA_NL_NOWARN;
1412 
1413 	if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1414 		return QUOTA_NL_BSOFTBELOW;
1415 	if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
1416 	    tspace - space < dquot->dq_dqb.dqb_bhardlimit)
1417 		return QUOTA_NL_BHARDBELOW;
1418 	return QUOTA_NL_NOWARN;
1419 }
1420 
1421 static int dquot_active(const struct inode *inode)
1422 {
1423 	struct super_block *sb = inode->i_sb;
1424 
1425 	if (IS_NOQUOTA(inode))
1426 		return 0;
1427 	return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1428 }
1429 
1430 /*
1431  * Initialize quota pointers in inode
1432  *
1433  * It is better to call this function outside of any transaction as it
1434  * might need a lot of space in journal for dquot structure allocation.
1435  */
1436 static int __dquot_initialize(struct inode *inode, int type)
1437 {
1438 	int cnt, init_needed = 0;
1439 	struct dquot **dquots, *got[MAXQUOTAS] = {};
1440 	struct super_block *sb = inode->i_sb;
1441 	qsize_t rsv;
1442 	int ret = 0;
1443 
1444 	if (!dquot_active(inode))
1445 		return 0;
1446 
1447 	dquots = i_dquot(inode);
1448 
1449 	/* First get references to structures we might need. */
1450 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1451 		struct kqid qid;
1452 		kprojid_t projid;
1453 		int rc;
1454 		struct dquot *dquot;
1455 
1456 		if (type != -1 && cnt != type)
1457 			continue;
1458 		/*
1459 		 * The i_dquot should have been initialized in most cases,
1460 		 * we check it without locking here to avoid unnecessary
1461 		 * dqget()/dqput() calls.
1462 		 */
1463 		if (dquots[cnt])
1464 			continue;
1465 
1466 		if (!sb_has_quota_active(sb, cnt))
1467 			continue;
1468 
1469 		init_needed = 1;
1470 
1471 		switch (cnt) {
1472 		case USRQUOTA:
1473 			qid = make_kqid_uid(inode->i_uid);
1474 			break;
1475 		case GRPQUOTA:
1476 			qid = make_kqid_gid(inode->i_gid);
1477 			break;
1478 		case PRJQUOTA:
1479 			rc = inode->i_sb->dq_op->get_projid(inode, &projid);
1480 			if (rc)
1481 				continue;
1482 			qid = make_kqid_projid(projid);
1483 			break;
1484 		}
1485 		dquot = dqget(sb, qid);
1486 		if (IS_ERR(dquot)) {
1487 			/* We raced with somebody turning quotas off... */
1488 			if (PTR_ERR(dquot) != -ESRCH) {
1489 				ret = PTR_ERR(dquot);
1490 				goto out_put;
1491 			}
1492 			dquot = NULL;
1493 		}
1494 		got[cnt] = dquot;
1495 	}
1496 
1497 	/* All required i_dquot has been initialized */
1498 	if (!init_needed)
1499 		return 0;
1500 
1501 	spin_lock(&dq_data_lock);
1502 	if (IS_NOQUOTA(inode))
1503 		goto out_lock;
1504 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1505 		if (type != -1 && cnt != type)
1506 			continue;
1507 		/* Avoid races with quotaoff() */
1508 		if (!sb_has_quota_active(sb, cnt))
1509 			continue;
1510 		/* We could race with quotaon or dqget() could have failed */
1511 		if (!got[cnt])
1512 			continue;
1513 		if (!dquots[cnt]) {
1514 			dquots[cnt] = got[cnt];
1515 			got[cnt] = NULL;
1516 			/*
1517 			 * Make quota reservation system happy if someone
1518 			 * did a write before quota was turned on
1519 			 */
1520 			rsv = inode_get_rsv_space(inode);
1521 			if (unlikely(rsv)) {
1522 				spin_lock(&inode->i_lock);
1523 				/* Get reservation again under proper lock */
1524 				rsv = __inode_get_rsv_space(inode);
1525 				spin_lock(&dquots[cnt]->dq_dqb_lock);
1526 				dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
1527 				spin_unlock(&dquots[cnt]->dq_dqb_lock);
1528 				spin_unlock(&inode->i_lock);
1529 			}
1530 		}
1531 	}
1532 out_lock:
1533 	spin_unlock(&dq_data_lock);
1534 out_put:
1535 	/* Drop unused references */
1536 	dqput_all(got);
1537 
1538 	return ret;
1539 }
1540 
1541 int dquot_initialize(struct inode *inode)
1542 {
1543 	return __dquot_initialize(inode, -1);
1544 }
1545 EXPORT_SYMBOL(dquot_initialize);
1546 
1547 bool dquot_initialize_needed(struct inode *inode)
1548 {
1549 	struct dquot **dquots;
1550 	int i;
1551 
1552 	if (!dquot_active(inode))
1553 		return false;
1554 
1555 	dquots = i_dquot(inode);
1556 	for (i = 0; i < MAXQUOTAS; i++)
1557 		if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1558 			return true;
1559 	return false;
1560 }
1561 EXPORT_SYMBOL(dquot_initialize_needed);
1562 
1563 /*
1564  * Release all quotas referenced by inode.
1565  *
1566  * This function only be called on inode free or converting
1567  * a file to quota file, no other users for the i_dquot in
1568  * both cases, so we needn't call synchronize_srcu() after
1569  * clearing i_dquot.
1570  */
1571 static void __dquot_drop(struct inode *inode)
1572 {
1573 	int cnt;
1574 	struct dquot **dquots = i_dquot(inode);
1575 	struct dquot *put[MAXQUOTAS];
1576 
1577 	spin_lock(&dq_data_lock);
1578 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1579 		put[cnt] = dquots[cnt];
1580 		dquots[cnt] = NULL;
1581 	}
1582 	spin_unlock(&dq_data_lock);
1583 	dqput_all(put);
1584 }
1585 
1586 void dquot_drop(struct inode *inode)
1587 {
1588 	struct dquot * const *dquots;
1589 	int cnt;
1590 
1591 	if (IS_NOQUOTA(inode))
1592 		return;
1593 
1594 	/*
1595 	 * Test before calling to rule out calls from proc and such
1596 	 * where we are not allowed to block. Note that this is
1597 	 * actually reliable test even without the lock - the caller
1598 	 * must assure that nobody can come after the DQUOT_DROP and
1599 	 * add quota pointers back anyway.
1600 	 */
1601 	dquots = i_dquot(inode);
1602 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1603 		if (dquots[cnt])
1604 			break;
1605 	}
1606 
1607 	if (cnt < MAXQUOTAS)
1608 		__dquot_drop(inode);
1609 }
1610 EXPORT_SYMBOL(dquot_drop);
1611 
1612 /*
1613  * inode_reserved_space is managed internally by quota, and protected by
1614  * i_lock similar to i_blocks+i_bytes.
1615  */
1616 static qsize_t *inode_reserved_space(struct inode * inode)
1617 {
1618 	/* Filesystem must explicitly define it's own method in order to use
1619 	 * quota reservation interface */
1620 	BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1621 	return inode->i_sb->dq_op->get_reserved_space(inode);
1622 }
1623 
1624 static qsize_t __inode_get_rsv_space(struct inode *inode)
1625 {
1626 	if (!inode->i_sb->dq_op->get_reserved_space)
1627 		return 0;
1628 	return *inode_reserved_space(inode);
1629 }
1630 
1631 static qsize_t inode_get_rsv_space(struct inode *inode)
1632 {
1633 	qsize_t ret;
1634 
1635 	if (!inode->i_sb->dq_op->get_reserved_space)
1636 		return 0;
1637 	spin_lock(&inode->i_lock);
1638 	ret = __inode_get_rsv_space(inode);
1639 	spin_unlock(&inode->i_lock);
1640 	return ret;
1641 }
1642 
1643 /*
1644  * This functions updates i_blocks+i_bytes fields and quota information
1645  * (together with appropriate checks).
1646  *
1647  * NOTE: We absolutely rely on the fact that caller dirties the inode
1648  * (usually helpers in quotaops.h care about this) and holds a handle for
1649  * the current transaction so that dquot write and inode write go into the
1650  * same transaction.
1651  */
1652 
1653 /*
1654  * This operation can block, but only after everything is updated
1655  */
1656 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1657 {
1658 	int cnt, ret = 0, index;
1659 	struct dquot_warn warn[MAXQUOTAS];
1660 	int reserve = flags & DQUOT_SPACE_RESERVE;
1661 	struct dquot **dquots;
1662 
1663 	if (!dquot_active(inode)) {
1664 		if (reserve) {
1665 			spin_lock(&inode->i_lock);
1666 			*inode_reserved_space(inode) += number;
1667 			spin_unlock(&inode->i_lock);
1668 		} else {
1669 			inode_add_bytes(inode, number);
1670 		}
1671 		goto out;
1672 	}
1673 
1674 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1675 		warn[cnt].w_type = QUOTA_NL_NOWARN;
1676 
1677 	dquots = i_dquot(inode);
1678 	index = srcu_read_lock(&dquot_srcu);
1679 	spin_lock(&inode->i_lock);
1680 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1681 		if (!dquots[cnt])
1682 			continue;
1683 		if (reserve) {
1684 			ret = dquot_add_space(dquots[cnt], 0, number, flags,
1685 					      &warn[cnt]);
1686 		} else {
1687 			ret = dquot_add_space(dquots[cnt], number, 0, flags,
1688 					      &warn[cnt]);
1689 		}
1690 		if (ret) {
1691 			/* Back out changes we already did */
1692 			for (cnt--; cnt >= 0; cnt--) {
1693 				if (!dquots[cnt])
1694 					continue;
1695 				spin_lock(&dquots[cnt]->dq_dqb_lock);
1696 				if (reserve)
1697 					dquot_free_reserved_space(dquots[cnt],
1698 								  number);
1699 				else
1700 					dquot_decr_space(dquots[cnt], number);
1701 				spin_unlock(&dquots[cnt]->dq_dqb_lock);
1702 			}
1703 			spin_unlock(&inode->i_lock);
1704 			goto out_flush_warn;
1705 		}
1706 	}
1707 	if (reserve)
1708 		*inode_reserved_space(inode) += number;
1709 	else
1710 		__inode_add_bytes(inode, number);
1711 	spin_unlock(&inode->i_lock);
1712 
1713 	if (reserve)
1714 		goto out_flush_warn;
1715 	mark_all_dquot_dirty(dquots);
1716 out_flush_warn:
1717 	srcu_read_unlock(&dquot_srcu, index);
1718 	flush_warnings(warn);
1719 out:
1720 	return ret;
1721 }
1722 EXPORT_SYMBOL(__dquot_alloc_space);
1723 
1724 /*
1725  * This operation can block, but only after everything is updated
1726  */
1727 int dquot_alloc_inode(struct inode *inode)
1728 {
1729 	int cnt, ret = 0, index;
1730 	struct dquot_warn warn[MAXQUOTAS];
1731 	struct dquot * const *dquots;
1732 
1733 	if (!dquot_active(inode))
1734 		return 0;
1735 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1736 		warn[cnt].w_type = QUOTA_NL_NOWARN;
1737 
1738 	dquots = i_dquot(inode);
1739 	index = srcu_read_lock(&dquot_srcu);
1740 	spin_lock(&inode->i_lock);
1741 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1742 		if (!dquots[cnt])
1743 			continue;
1744 		ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
1745 		if (ret) {
1746 			for (cnt--; cnt >= 0; cnt--) {
1747 				if (!dquots[cnt])
1748 					continue;
1749 				/* Back out changes we already did */
1750 				spin_lock(&dquots[cnt]->dq_dqb_lock);
1751 				dquot_decr_inodes(dquots[cnt], 1);
1752 				spin_unlock(&dquots[cnt]->dq_dqb_lock);
1753 			}
1754 			goto warn_put_all;
1755 		}
1756 	}
1757 
1758 warn_put_all:
1759 	spin_unlock(&inode->i_lock);
1760 	if (ret == 0)
1761 		mark_all_dquot_dirty(dquots);
1762 	srcu_read_unlock(&dquot_srcu, index);
1763 	flush_warnings(warn);
1764 	return ret;
1765 }
1766 EXPORT_SYMBOL(dquot_alloc_inode);
1767 
1768 /*
1769  * Convert in-memory reserved quotas to real consumed quotas
1770  */
1771 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1772 {
1773 	struct dquot **dquots;
1774 	int cnt, index;
1775 
1776 	if (!dquot_active(inode)) {
1777 		spin_lock(&inode->i_lock);
1778 		*inode_reserved_space(inode) -= number;
1779 		__inode_add_bytes(inode, number);
1780 		spin_unlock(&inode->i_lock);
1781 		return 0;
1782 	}
1783 
1784 	dquots = i_dquot(inode);
1785 	index = srcu_read_lock(&dquot_srcu);
1786 	spin_lock(&inode->i_lock);
1787 	/* Claim reserved quotas to allocated quotas */
1788 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1789 		if (dquots[cnt]) {
1790 			struct dquot *dquot = dquots[cnt];
1791 
1792 			spin_lock(&dquot->dq_dqb_lock);
1793 			if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
1794 				number = dquot->dq_dqb.dqb_rsvspace;
1795 			dquot->dq_dqb.dqb_curspace += number;
1796 			dquot->dq_dqb.dqb_rsvspace -= number;
1797 			spin_unlock(&dquot->dq_dqb_lock);
1798 		}
1799 	}
1800 	/* Update inode bytes */
1801 	*inode_reserved_space(inode) -= number;
1802 	__inode_add_bytes(inode, number);
1803 	spin_unlock(&inode->i_lock);
1804 	mark_all_dquot_dirty(dquots);
1805 	srcu_read_unlock(&dquot_srcu, index);
1806 	return 0;
1807 }
1808 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1809 
1810 /*
1811  * Convert allocated space back to in-memory reserved quotas
1812  */
1813 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1814 {
1815 	struct dquot **dquots;
1816 	int cnt, index;
1817 
1818 	if (!dquot_active(inode)) {
1819 		spin_lock(&inode->i_lock);
1820 		*inode_reserved_space(inode) += number;
1821 		__inode_sub_bytes(inode, number);
1822 		spin_unlock(&inode->i_lock);
1823 		return;
1824 	}
1825 
1826 	dquots = i_dquot(inode);
1827 	index = srcu_read_lock(&dquot_srcu);
1828 	spin_lock(&inode->i_lock);
1829 	/* Claim reserved quotas to allocated quotas */
1830 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1831 		if (dquots[cnt]) {
1832 			struct dquot *dquot = dquots[cnt];
1833 
1834 			spin_lock(&dquot->dq_dqb_lock);
1835 			if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1836 				number = dquot->dq_dqb.dqb_curspace;
1837 			dquot->dq_dqb.dqb_rsvspace += number;
1838 			dquot->dq_dqb.dqb_curspace -= number;
1839 			spin_unlock(&dquot->dq_dqb_lock);
1840 		}
1841 	}
1842 	/* Update inode bytes */
1843 	*inode_reserved_space(inode) += number;
1844 	__inode_sub_bytes(inode, number);
1845 	spin_unlock(&inode->i_lock);
1846 	mark_all_dquot_dirty(dquots);
1847 	srcu_read_unlock(&dquot_srcu, index);
1848 	return;
1849 }
1850 EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1851 
1852 /*
1853  * This operation can block, but only after everything is updated
1854  */
1855 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1856 {
1857 	unsigned int cnt;
1858 	struct dquot_warn warn[MAXQUOTAS];
1859 	struct dquot **dquots;
1860 	int reserve = flags & DQUOT_SPACE_RESERVE, index;
1861 
1862 	if (!dquot_active(inode)) {
1863 		if (reserve) {
1864 			spin_lock(&inode->i_lock);
1865 			*inode_reserved_space(inode) -= number;
1866 			spin_unlock(&inode->i_lock);
1867 		} else {
1868 			inode_sub_bytes(inode, number);
1869 		}
1870 		return;
1871 	}
1872 
1873 	dquots = i_dquot(inode);
1874 	index = srcu_read_lock(&dquot_srcu);
1875 	spin_lock(&inode->i_lock);
1876 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1877 		int wtype;
1878 
1879 		warn[cnt].w_type = QUOTA_NL_NOWARN;
1880 		if (!dquots[cnt])
1881 			continue;
1882 		spin_lock(&dquots[cnt]->dq_dqb_lock);
1883 		wtype = info_bdq_free(dquots[cnt], number);
1884 		if (wtype != QUOTA_NL_NOWARN)
1885 			prepare_warning(&warn[cnt], dquots[cnt], wtype);
1886 		if (reserve)
1887 			dquot_free_reserved_space(dquots[cnt], number);
1888 		else
1889 			dquot_decr_space(dquots[cnt], number);
1890 		spin_unlock(&dquots[cnt]->dq_dqb_lock);
1891 	}
1892 	if (reserve)
1893 		*inode_reserved_space(inode) -= number;
1894 	else
1895 		__inode_sub_bytes(inode, number);
1896 	spin_unlock(&inode->i_lock);
1897 
1898 	if (reserve)
1899 		goto out_unlock;
1900 	mark_all_dquot_dirty(dquots);
1901 out_unlock:
1902 	srcu_read_unlock(&dquot_srcu, index);
1903 	flush_warnings(warn);
1904 }
1905 EXPORT_SYMBOL(__dquot_free_space);
1906 
1907 /*
1908  * This operation can block, but only after everything is updated
1909  */
1910 void dquot_free_inode(struct inode *inode)
1911 {
1912 	unsigned int cnt;
1913 	struct dquot_warn warn[MAXQUOTAS];
1914 	struct dquot * const *dquots;
1915 	int index;
1916 
1917 	if (!dquot_active(inode))
1918 		return;
1919 
1920 	dquots = i_dquot(inode);
1921 	index = srcu_read_lock(&dquot_srcu);
1922 	spin_lock(&inode->i_lock);
1923 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1924 		int wtype;
1925 
1926 		warn[cnt].w_type = QUOTA_NL_NOWARN;
1927 		if (!dquots[cnt])
1928 			continue;
1929 		spin_lock(&dquots[cnt]->dq_dqb_lock);
1930 		wtype = info_idq_free(dquots[cnt], 1);
1931 		if (wtype != QUOTA_NL_NOWARN)
1932 			prepare_warning(&warn[cnt], dquots[cnt], wtype);
1933 		dquot_decr_inodes(dquots[cnt], 1);
1934 		spin_unlock(&dquots[cnt]->dq_dqb_lock);
1935 	}
1936 	spin_unlock(&inode->i_lock);
1937 	mark_all_dquot_dirty(dquots);
1938 	srcu_read_unlock(&dquot_srcu, index);
1939 	flush_warnings(warn);
1940 }
1941 EXPORT_SYMBOL(dquot_free_inode);
1942 
1943 /*
1944  * Transfer the number of inode and blocks from one diskquota to an other.
1945  * On success, dquot references in transfer_to are consumed and references
1946  * to original dquots that need to be released are placed there. On failure,
1947  * references are kept untouched.
1948  *
1949  * This operation can block, but only after everything is updated
1950  * A transaction must be started when entering this function.
1951  *
1952  * We are holding reference on transfer_from & transfer_to, no need to
1953  * protect them by srcu_read_lock().
1954  */
1955 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1956 {
1957 	qsize_t cur_space;
1958 	qsize_t rsv_space = 0;
1959 	qsize_t inode_usage = 1;
1960 	struct dquot *transfer_from[MAXQUOTAS] = {};
1961 	int cnt, ret = 0;
1962 	char is_valid[MAXQUOTAS] = {};
1963 	struct dquot_warn warn_to[MAXQUOTAS];
1964 	struct dquot_warn warn_from_inodes[MAXQUOTAS];
1965 	struct dquot_warn warn_from_space[MAXQUOTAS];
1966 
1967 	if (IS_NOQUOTA(inode))
1968 		return 0;
1969 
1970 	if (inode->i_sb->dq_op->get_inode_usage) {
1971 		ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
1972 		if (ret)
1973 			return ret;
1974 	}
1975 
1976 	/* Initialize the arrays */
1977 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1978 		warn_to[cnt].w_type = QUOTA_NL_NOWARN;
1979 		warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
1980 		warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
1981 	}
1982 
1983 	spin_lock(&dq_data_lock);
1984 	spin_lock(&inode->i_lock);
1985 	if (IS_NOQUOTA(inode)) {	/* File without quota accounting? */
1986 		spin_unlock(&inode->i_lock);
1987 		spin_unlock(&dq_data_lock);
1988 		return 0;
1989 	}
1990 	cur_space = __inode_get_bytes(inode);
1991 	rsv_space = __inode_get_rsv_space(inode);
1992 	/*
1993 	 * Build the transfer_from list, check limits, and update usage in
1994 	 * the target structures.
1995 	 */
1996 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1997 		/*
1998 		 * Skip changes for same uid or gid or for turned off quota-type.
1999 		 */
2000 		if (!transfer_to[cnt])
2001 			continue;
2002 		/* Avoid races with quotaoff() */
2003 		if (!sb_has_quota_active(inode->i_sb, cnt))
2004 			continue;
2005 		is_valid[cnt] = 1;
2006 		transfer_from[cnt] = i_dquot(inode)[cnt];
2007 		ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
2008 				       &warn_to[cnt]);
2009 		if (ret)
2010 			goto over_quota;
2011 		ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
2012 				      DQUOT_SPACE_WARN, &warn_to[cnt]);
2013 		if (ret) {
2014 			spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2015 			dquot_decr_inodes(transfer_to[cnt], inode_usage);
2016 			spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2017 			goto over_quota;
2018 		}
2019 	}
2020 
2021 	/* Decrease usage for source structures and update quota pointers */
2022 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2023 		if (!is_valid[cnt])
2024 			continue;
2025 		/* Due to IO error we might not have transfer_from[] structure */
2026 		if (transfer_from[cnt]) {
2027 			int wtype;
2028 
2029 			spin_lock(&transfer_from[cnt]->dq_dqb_lock);
2030 			wtype = info_idq_free(transfer_from[cnt], inode_usage);
2031 			if (wtype != QUOTA_NL_NOWARN)
2032 				prepare_warning(&warn_from_inodes[cnt],
2033 						transfer_from[cnt], wtype);
2034 			wtype = info_bdq_free(transfer_from[cnt],
2035 					      cur_space + rsv_space);
2036 			if (wtype != QUOTA_NL_NOWARN)
2037 				prepare_warning(&warn_from_space[cnt],
2038 						transfer_from[cnt], wtype);
2039 			dquot_decr_inodes(transfer_from[cnt], inode_usage);
2040 			dquot_decr_space(transfer_from[cnt], cur_space);
2041 			dquot_free_reserved_space(transfer_from[cnt],
2042 						  rsv_space);
2043 			spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
2044 		}
2045 		i_dquot(inode)[cnt] = transfer_to[cnt];
2046 	}
2047 	spin_unlock(&inode->i_lock);
2048 	spin_unlock(&dq_data_lock);
2049 
2050 	mark_all_dquot_dirty(transfer_from);
2051 	mark_all_dquot_dirty(transfer_to);
2052 	flush_warnings(warn_to);
2053 	flush_warnings(warn_from_inodes);
2054 	flush_warnings(warn_from_space);
2055 	/* Pass back references to put */
2056 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2057 		if (is_valid[cnt])
2058 			transfer_to[cnt] = transfer_from[cnt];
2059 	return 0;
2060 over_quota:
2061 	/* Back out changes we already did */
2062 	for (cnt--; cnt >= 0; cnt--) {
2063 		if (!is_valid[cnt])
2064 			continue;
2065 		spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2066 		dquot_decr_inodes(transfer_to[cnt], inode_usage);
2067 		dquot_decr_space(transfer_to[cnt], cur_space);
2068 		dquot_free_reserved_space(transfer_to[cnt], rsv_space);
2069 		spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2070 	}
2071 	spin_unlock(&inode->i_lock);
2072 	spin_unlock(&dq_data_lock);
2073 	flush_warnings(warn_to);
2074 	return ret;
2075 }
2076 EXPORT_SYMBOL(__dquot_transfer);
2077 
2078 /* Wrapper for transferring ownership of an inode for uid/gid only
2079  * Called from FSXXX_setattr()
2080  */
2081 int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode,
2082 		   struct iattr *iattr)
2083 {
2084 	struct dquot *transfer_to[MAXQUOTAS] = {};
2085 	struct dquot *dquot;
2086 	struct super_block *sb = inode->i_sb;
2087 	int ret;
2088 
2089 	if (!dquot_active(inode))
2090 		return 0;
2091 
2092 	if (i_uid_needs_update(idmap, iattr, inode)) {
2093 		kuid_t kuid = from_vfsuid(idmap, i_user_ns(inode),
2094 					  iattr->ia_vfsuid);
2095 
2096 		dquot = dqget(sb, make_kqid_uid(kuid));
2097 		if (IS_ERR(dquot)) {
2098 			if (PTR_ERR(dquot) != -ESRCH) {
2099 				ret = PTR_ERR(dquot);
2100 				goto out_put;
2101 			}
2102 			dquot = NULL;
2103 		}
2104 		transfer_to[USRQUOTA] = dquot;
2105 	}
2106 	if (i_gid_needs_update(idmap, iattr, inode)) {
2107 		kgid_t kgid = from_vfsgid(idmap, i_user_ns(inode),
2108 					  iattr->ia_vfsgid);
2109 
2110 		dquot = dqget(sb, make_kqid_gid(kgid));
2111 		if (IS_ERR(dquot)) {
2112 			if (PTR_ERR(dquot) != -ESRCH) {
2113 				ret = PTR_ERR(dquot);
2114 				goto out_put;
2115 			}
2116 			dquot = NULL;
2117 		}
2118 		transfer_to[GRPQUOTA] = dquot;
2119 	}
2120 	ret = __dquot_transfer(inode, transfer_to);
2121 out_put:
2122 	dqput_all(transfer_to);
2123 	return ret;
2124 }
2125 EXPORT_SYMBOL(dquot_transfer);
2126 
2127 /*
2128  * Write info of quota file to disk
2129  */
2130 int dquot_commit_info(struct super_block *sb, int type)
2131 {
2132 	struct quota_info *dqopt = sb_dqopt(sb);
2133 
2134 	return dqopt->ops[type]->write_file_info(sb, type);
2135 }
2136 EXPORT_SYMBOL(dquot_commit_info);
2137 
2138 int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2139 {
2140 	struct quota_info *dqopt = sb_dqopt(sb);
2141 
2142 	if (!sb_has_quota_active(sb, qid->type))
2143 		return -ESRCH;
2144 	if (!dqopt->ops[qid->type]->get_next_id)
2145 		return -ENOSYS;
2146 	return dqopt->ops[qid->type]->get_next_id(sb, qid);
2147 }
2148 EXPORT_SYMBOL(dquot_get_next_id);
2149 
2150 /*
2151  * Definitions of diskquota operations.
2152  */
2153 const struct dquot_operations dquot_operations = {
2154 	.write_dquot	= dquot_commit,
2155 	.acquire_dquot	= dquot_acquire,
2156 	.release_dquot	= dquot_release,
2157 	.mark_dirty	= dquot_mark_dquot_dirty,
2158 	.write_info	= dquot_commit_info,
2159 	.alloc_dquot	= dquot_alloc,
2160 	.destroy_dquot	= dquot_destroy,
2161 	.get_next_id	= dquot_get_next_id,
2162 };
2163 EXPORT_SYMBOL(dquot_operations);
2164 
2165 /*
2166  * Generic helper for ->open on filesystems supporting disk quotas.
2167  */
2168 int dquot_file_open(struct inode *inode, struct file *file)
2169 {
2170 	int error;
2171 
2172 	error = generic_file_open(inode, file);
2173 	if (!error && (file->f_mode & FMODE_WRITE))
2174 		error = dquot_initialize(inode);
2175 	return error;
2176 }
2177 EXPORT_SYMBOL(dquot_file_open);
2178 
2179 static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
2180 {
2181 	struct quota_info *dqopt = sb_dqopt(sb);
2182 	struct inode *inode = dqopt->files[type];
2183 
2184 	if (!inode)
2185 		return;
2186 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2187 		inode_lock(inode);
2188 		inode->i_flags &= ~S_NOQUOTA;
2189 		inode_unlock(inode);
2190 	}
2191 	dqopt->files[type] = NULL;
2192 	iput(inode);
2193 }
2194 
2195 /*
2196  * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2197  */
2198 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2199 {
2200 	int cnt;
2201 	struct quota_info *dqopt = sb_dqopt(sb);
2202 
2203 	/* s_umount should be held in exclusive mode */
2204 	if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2205 		up_read(&sb->s_umount);
2206 
2207 	/* Cannot turn off usage accounting without turning off limits, or
2208 	 * suspend quotas and simultaneously turn quotas off. */
2209 	if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2210 	    || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2211 	    DQUOT_USAGE_ENABLED)))
2212 		return -EINVAL;
2213 
2214 	/*
2215 	 * Skip everything if there's nothing to do. We have to do this because
2216 	 * sometimes we are called when fill_super() failed and calling
2217 	 * sync_fs() in such cases does no good.
2218 	 */
2219 	if (!sb_any_quota_loaded(sb))
2220 		return 0;
2221 
2222 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2223 		if (type != -1 && cnt != type)
2224 			continue;
2225 		if (!sb_has_quota_loaded(sb, cnt))
2226 			continue;
2227 
2228 		if (flags & DQUOT_SUSPENDED) {
2229 			spin_lock(&dq_state_lock);
2230 			dqopt->flags |=
2231 				dquot_state_flag(DQUOT_SUSPENDED, cnt);
2232 			spin_unlock(&dq_state_lock);
2233 		} else {
2234 			spin_lock(&dq_state_lock);
2235 			dqopt->flags &= ~dquot_state_flag(flags, cnt);
2236 			/* Turning off suspended quotas? */
2237 			if (!sb_has_quota_loaded(sb, cnt) &&
2238 			    sb_has_quota_suspended(sb, cnt)) {
2239 				dqopt->flags &=	~dquot_state_flag(
2240 							DQUOT_SUSPENDED, cnt);
2241 				spin_unlock(&dq_state_lock);
2242 				vfs_cleanup_quota_inode(sb, cnt);
2243 				continue;
2244 			}
2245 			spin_unlock(&dq_state_lock);
2246 		}
2247 
2248 		/* We still have to keep quota loaded? */
2249 		if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2250 			continue;
2251 
2252 		/* Note: these are blocking operations */
2253 		drop_dquot_ref(sb, cnt);
2254 		invalidate_dquots(sb, cnt);
2255 		/*
2256 		 * Now all dquots should be invalidated, all writes done so we
2257 		 * should be only users of the info. No locks needed.
2258 		 */
2259 		if (info_dirty(&dqopt->info[cnt]))
2260 			sb->dq_op->write_info(sb, cnt);
2261 		if (dqopt->ops[cnt]->free_file_info)
2262 			dqopt->ops[cnt]->free_file_info(sb, cnt);
2263 		put_quota_format(dqopt->info[cnt].dqi_format);
2264 		dqopt->info[cnt].dqi_flags = 0;
2265 		dqopt->info[cnt].dqi_igrace = 0;
2266 		dqopt->info[cnt].dqi_bgrace = 0;
2267 		dqopt->ops[cnt] = NULL;
2268 	}
2269 
2270 	/* Skip syncing and setting flags if quota files are hidden */
2271 	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2272 		goto put_inodes;
2273 
2274 	/* Sync the superblock so that buffers with quota data are written to
2275 	 * disk (and so userspace sees correct data afterwards). */
2276 	if (sb->s_op->sync_fs)
2277 		sb->s_op->sync_fs(sb, 1);
2278 	sync_blockdev(sb->s_bdev);
2279 	/* Now the quota files are just ordinary files and we can set the
2280 	 * inode flags back. Moreover we discard the pagecache so that
2281 	 * userspace sees the writes we did bypassing the pagecache. We
2282 	 * must also discard the blockdev buffers so that we see the
2283 	 * changes done by userspace on the next quotaon() */
2284 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2285 		if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
2286 			inode_lock(dqopt->files[cnt]);
2287 			truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
2288 			inode_unlock(dqopt->files[cnt]);
2289 		}
2290 	if (sb->s_bdev)
2291 		invalidate_bdev(sb->s_bdev);
2292 put_inodes:
2293 	/* We are done when suspending quotas */
2294 	if (flags & DQUOT_SUSPENDED)
2295 		return 0;
2296 
2297 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2298 		if (!sb_has_quota_loaded(sb, cnt))
2299 			vfs_cleanup_quota_inode(sb, cnt);
2300 	return 0;
2301 }
2302 EXPORT_SYMBOL(dquot_disable);
2303 
2304 int dquot_quota_off(struct super_block *sb, int type)
2305 {
2306 	return dquot_disable(sb, type,
2307 			     DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2308 }
2309 EXPORT_SYMBOL(dquot_quota_off);
2310 
2311 /*
2312  *	Turn quotas on on a device
2313  */
2314 
2315 static int vfs_setup_quota_inode(struct inode *inode, int type)
2316 {
2317 	struct super_block *sb = inode->i_sb;
2318 	struct quota_info *dqopt = sb_dqopt(sb);
2319 
2320 	if (is_bad_inode(inode))
2321 		return -EUCLEAN;
2322 	if (!S_ISREG(inode->i_mode))
2323 		return -EACCES;
2324 	if (IS_RDONLY(inode))
2325 		return -EROFS;
2326 	if (sb_has_quota_loaded(sb, type))
2327 		return -EBUSY;
2328 
2329 	dqopt->files[type] = igrab(inode);
2330 	if (!dqopt->files[type])
2331 		return -EIO;
2332 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2333 		/* We don't want quota and atime on quota files (deadlocks
2334 		 * possible) Also nobody should write to the file - we use
2335 		 * special IO operations which ignore the immutable bit. */
2336 		inode_lock(inode);
2337 		inode->i_flags |= S_NOQUOTA;
2338 		inode_unlock(inode);
2339 		/*
2340 		 * When S_NOQUOTA is set, remove dquot references as no more
2341 		 * references can be added
2342 		 */
2343 		__dquot_drop(inode);
2344 	}
2345 	return 0;
2346 }
2347 
2348 int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
2349 	unsigned int flags)
2350 {
2351 	struct quota_format_type *fmt = find_quota_format(format_id);
2352 	struct quota_info *dqopt = sb_dqopt(sb);
2353 	int error;
2354 
2355 	/* Just unsuspend quotas? */
2356 	BUG_ON(flags & DQUOT_SUSPENDED);
2357 	/* s_umount should be held in exclusive mode */
2358 	if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2359 		up_read(&sb->s_umount);
2360 
2361 	if (!fmt)
2362 		return -ESRCH;
2363 	if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
2364 	    (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
2365 		error = -EINVAL;
2366 		goto out_fmt;
2367 	}
2368 	/* Filesystems outside of init_user_ns not yet supported */
2369 	if (sb->s_user_ns != &init_user_ns) {
2370 		error = -EINVAL;
2371 		goto out_fmt;
2372 	}
2373 	/* Usage always has to be set... */
2374 	if (!(flags & DQUOT_USAGE_ENABLED)) {
2375 		error = -EINVAL;
2376 		goto out_fmt;
2377 	}
2378 	if (sb_has_quota_loaded(sb, type)) {
2379 		error = -EBUSY;
2380 		goto out_fmt;
2381 	}
2382 
2383 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2384 		/* As we bypass the pagecache we must now flush all the
2385 		 * dirty data and invalidate caches so that kernel sees
2386 		 * changes from userspace. It is not enough to just flush
2387 		 * the quota file since if blocksize < pagesize, invalidation
2388 		 * of the cache could fail because of other unrelated dirty
2389 		 * data */
2390 		sync_filesystem(sb);
2391 		invalidate_bdev(sb->s_bdev);
2392 	}
2393 
2394 	error = -EINVAL;
2395 	if (!fmt->qf_ops->check_quota_file(sb, type))
2396 		goto out_fmt;
2397 
2398 	dqopt->ops[type] = fmt->qf_ops;
2399 	dqopt->info[type].dqi_format = fmt;
2400 	dqopt->info[type].dqi_fmt_id = format_id;
2401 	INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2402 	error = dqopt->ops[type]->read_file_info(sb, type);
2403 	if (error < 0)
2404 		goto out_fmt;
2405 	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
2406 		spin_lock(&dq_data_lock);
2407 		dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2408 		spin_unlock(&dq_data_lock);
2409 	}
2410 	spin_lock(&dq_state_lock);
2411 	dqopt->flags |= dquot_state_flag(flags, type);
2412 	spin_unlock(&dq_state_lock);
2413 
2414 	error = add_dquot_ref(sb, type);
2415 	if (error)
2416 		dquot_disable(sb, type,
2417 			      DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2418 
2419 	return error;
2420 out_fmt:
2421 	put_quota_format(fmt);
2422 
2423 	return error;
2424 }
2425 EXPORT_SYMBOL(dquot_load_quota_sb);
2426 
2427 /*
2428  * More powerful function for turning on quotas on given quota inode allowing
2429  * setting of individual quota flags
2430  */
2431 int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
2432 	unsigned int flags)
2433 {
2434 	int err;
2435 
2436 	err = vfs_setup_quota_inode(inode, type);
2437 	if (err < 0)
2438 		return err;
2439 	err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
2440 	if (err < 0)
2441 		vfs_cleanup_quota_inode(inode->i_sb, type);
2442 	return err;
2443 }
2444 EXPORT_SYMBOL(dquot_load_quota_inode);
2445 
2446 /* Reenable quotas on remount RW */
2447 int dquot_resume(struct super_block *sb, int type)
2448 {
2449 	struct quota_info *dqopt = sb_dqopt(sb);
2450 	int ret = 0, cnt;
2451 	unsigned int flags;
2452 
2453 	/* s_umount should be held in exclusive mode */
2454 	if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2455 		up_read(&sb->s_umount);
2456 
2457 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2458 		if (type != -1 && cnt != type)
2459 			continue;
2460 		if (!sb_has_quota_suspended(sb, cnt))
2461 			continue;
2462 
2463 		spin_lock(&dq_state_lock);
2464 		flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2465 							DQUOT_LIMITS_ENABLED,
2466 							cnt);
2467 		dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2468 		spin_unlock(&dq_state_lock);
2469 
2470 		flags = dquot_generic_flag(flags, cnt);
2471 		ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
2472 					  flags);
2473 		if (ret < 0)
2474 			vfs_cleanup_quota_inode(sb, cnt);
2475 	}
2476 
2477 	return ret;
2478 }
2479 EXPORT_SYMBOL(dquot_resume);
2480 
2481 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2482 		   const struct path *path)
2483 {
2484 	int error = security_quota_on(path->dentry);
2485 	if (error)
2486 		return error;
2487 	/* Quota file not on the same filesystem? */
2488 	if (path->dentry->d_sb != sb)
2489 		error = -EXDEV;
2490 	else
2491 		error = dquot_load_quota_inode(d_inode(path->dentry), type,
2492 					     format_id, DQUOT_USAGE_ENABLED |
2493 					     DQUOT_LIMITS_ENABLED);
2494 	return error;
2495 }
2496 EXPORT_SYMBOL(dquot_quota_on);
2497 
2498 /*
2499  * This function is used when filesystem needs to initialize quotas
2500  * during mount time.
2501  */
2502 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2503 		int format_id, int type)
2504 {
2505 	struct dentry *dentry;
2506 	int error;
2507 
2508 	dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name));
2509 	if (IS_ERR(dentry))
2510 		return PTR_ERR(dentry);
2511 
2512 	error = security_quota_on(dentry);
2513 	if (!error)
2514 		error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
2515 				DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2516 
2517 	dput(dentry);
2518 	return error;
2519 }
2520 EXPORT_SYMBOL(dquot_quota_on_mount);
2521 
2522 static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2523 {
2524 	int ret;
2525 	int type;
2526 	struct quota_info *dqopt = sb_dqopt(sb);
2527 
2528 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2529 		return -ENOSYS;
2530 	/* Accounting cannot be turned on while fs is mounted */
2531 	flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2532 	if (!flags)
2533 		return -EINVAL;
2534 	for (type = 0; type < MAXQUOTAS; type++) {
2535 		if (!(flags & qtype_enforce_flag(type)))
2536 			continue;
2537 		/* Can't enforce without accounting */
2538 		if (!sb_has_quota_usage_enabled(sb, type)) {
2539 			ret = -EINVAL;
2540 			goto out_err;
2541 		}
2542 		if (sb_has_quota_limits_enabled(sb, type)) {
2543 			ret = -EBUSY;
2544 			goto out_err;
2545 		}
2546 		spin_lock(&dq_state_lock);
2547 		dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2548 		spin_unlock(&dq_state_lock);
2549 	}
2550 	return 0;
2551 out_err:
2552 	/* Backout enforcement enablement we already did */
2553 	for (type--; type >= 0; type--)  {
2554 		if (flags & qtype_enforce_flag(type))
2555 			dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2556 	}
2557 	/* Error code translation for better compatibility with XFS */
2558 	if (ret == -EBUSY)
2559 		ret = -EEXIST;
2560 	return ret;
2561 }
2562 
2563 static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2564 {
2565 	int ret;
2566 	int type;
2567 	struct quota_info *dqopt = sb_dqopt(sb);
2568 
2569 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2570 		return -ENOSYS;
2571 	/*
2572 	 * We don't support turning off accounting via quotactl. In principle
2573 	 * quota infrastructure can do this but filesystems don't expect
2574 	 * userspace to be able to do it.
2575 	 */
2576 	if (flags &
2577 		  (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2578 		return -EOPNOTSUPP;
2579 
2580 	/* Filter out limits not enabled */
2581 	for (type = 0; type < MAXQUOTAS; type++)
2582 		if (!sb_has_quota_limits_enabled(sb, type))
2583 			flags &= ~qtype_enforce_flag(type);
2584 	/* Nothing left? */
2585 	if (!flags)
2586 		return -EEXIST;
2587 	for (type = 0; type < MAXQUOTAS; type++) {
2588 		if (flags & qtype_enforce_flag(type)) {
2589 			ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2590 			if (ret < 0)
2591 				goto out_err;
2592 		}
2593 	}
2594 	return 0;
2595 out_err:
2596 	/* Backout enforcement disabling we already did */
2597 	for (type--; type >= 0; type--)  {
2598 		if (flags & qtype_enforce_flag(type)) {
2599 			spin_lock(&dq_state_lock);
2600 			dqopt->flags |=
2601 				dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2602 			spin_unlock(&dq_state_lock);
2603 		}
2604 	}
2605 	return ret;
2606 }
2607 
2608 /* Generic routine for getting common part of quota structure */
2609 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2610 {
2611 	struct mem_dqblk *dm = &dquot->dq_dqb;
2612 
2613 	memset(di, 0, sizeof(*di));
2614 	spin_lock(&dquot->dq_dqb_lock);
2615 	di->d_spc_hardlimit = dm->dqb_bhardlimit;
2616 	di->d_spc_softlimit = dm->dqb_bsoftlimit;
2617 	di->d_ino_hardlimit = dm->dqb_ihardlimit;
2618 	di->d_ino_softlimit = dm->dqb_isoftlimit;
2619 	di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2620 	di->d_ino_count = dm->dqb_curinodes;
2621 	di->d_spc_timer = dm->dqb_btime;
2622 	di->d_ino_timer = dm->dqb_itime;
2623 	spin_unlock(&dquot->dq_dqb_lock);
2624 }
2625 
2626 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2627 		    struct qc_dqblk *di)
2628 {
2629 	struct dquot *dquot;
2630 
2631 	dquot = dqget(sb, qid);
2632 	if (IS_ERR(dquot))
2633 		return PTR_ERR(dquot);
2634 	do_get_dqblk(dquot, di);
2635 	dqput(dquot);
2636 
2637 	return 0;
2638 }
2639 EXPORT_SYMBOL(dquot_get_dqblk);
2640 
2641 int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
2642 			 struct qc_dqblk *di)
2643 {
2644 	struct dquot *dquot;
2645 	int err;
2646 
2647 	if (!sb->dq_op->get_next_id)
2648 		return -ENOSYS;
2649 	err = sb->dq_op->get_next_id(sb, qid);
2650 	if (err < 0)
2651 		return err;
2652 	dquot = dqget(sb, *qid);
2653 	if (IS_ERR(dquot))
2654 		return PTR_ERR(dquot);
2655 	do_get_dqblk(dquot, di);
2656 	dqput(dquot);
2657 
2658 	return 0;
2659 }
2660 EXPORT_SYMBOL(dquot_get_next_dqblk);
2661 
2662 #define VFS_QC_MASK \
2663 	(QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2664 	 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2665 	 QC_SPC_TIMER | QC_INO_TIMER)
2666 
2667 /* Generic routine for setting common part of quota structure */
2668 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2669 {
2670 	struct mem_dqblk *dm = &dquot->dq_dqb;
2671 	int check_blim = 0, check_ilim = 0;
2672 	struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2673 
2674 	if (di->d_fieldmask & ~VFS_QC_MASK)
2675 		return -EINVAL;
2676 
2677 	if (((di->d_fieldmask & QC_SPC_SOFT) &&
2678 	     di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2679 	    ((di->d_fieldmask & QC_SPC_HARD) &&
2680 	     di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2681 	    ((di->d_fieldmask & QC_INO_SOFT) &&
2682 	     (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2683 	    ((di->d_fieldmask & QC_INO_HARD) &&
2684 	     (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2685 		return -ERANGE;
2686 
2687 	spin_lock(&dquot->dq_dqb_lock);
2688 	if (di->d_fieldmask & QC_SPACE) {
2689 		dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2690 		check_blim = 1;
2691 		set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2692 	}
2693 
2694 	if (di->d_fieldmask & QC_SPC_SOFT)
2695 		dm->dqb_bsoftlimit = di->d_spc_softlimit;
2696 	if (di->d_fieldmask & QC_SPC_HARD)
2697 		dm->dqb_bhardlimit = di->d_spc_hardlimit;
2698 	if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2699 		check_blim = 1;
2700 		set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2701 	}
2702 
2703 	if (di->d_fieldmask & QC_INO_COUNT) {
2704 		dm->dqb_curinodes = di->d_ino_count;
2705 		check_ilim = 1;
2706 		set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2707 	}
2708 
2709 	if (di->d_fieldmask & QC_INO_SOFT)
2710 		dm->dqb_isoftlimit = di->d_ino_softlimit;
2711 	if (di->d_fieldmask & QC_INO_HARD)
2712 		dm->dqb_ihardlimit = di->d_ino_hardlimit;
2713 	if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2714 		check_ilim = 1;
2715 		set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2716 	}
2717 
2718 	if (di->d_fieldmask & QC_SPC_TIMER) {
2719 		dm->dqb_btime = di->d_spc_timer;
2720 		check_blim = 1;
2721 		set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2722 	}
2723 
2724 	if (di->d_fieldmask & QC_INO_TIMER) {
2725 		dm->dqb_itime = di->d_ino_timer;
2726 		check_ilim = 1;
2727 		set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2728 	}
2729 
2730 	if (check_blim) {
2731 		if (!dm->dqb_bsoftlimit ||
2732 		    dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) {
2733 			dm->dqb_btime = 0;
2734 			clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2735 		} else if (!(di->d_fieldmask & QC_SPC_TIMER))
2736 			/* Set grace only if user hasn't provided his own... */
2737 			dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
2738 	}
2739 	if (check_ilim) {
2740 		if (!dm->dqb_isoftlimit ||
2741 		    dm->dqb_curinodes <= dm->dqb_isoftlimit) {
2742 			dm->dqb_itime = 0;
2743 			clear_bit(DQ_INODES_B, &dquot->dq_flags);
2744 		} else if (!(di->d_fieldmask & QC_INO_TIMER))
2745 			/* Set grace only if user hasn't provided his own... */
2746 			dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
2747 	}
2748 	if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2749 	    dm->dqb_isoftlimit)
2750 		clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2751 	else
2752 		set_bit(DQ_FAKE_B, &dquot->dq_flags);
2753 	spin_unlock(&dquot->dq_dqb_lock);
2754 	mark_dquot_dirty(dquot);
2755 
2756 	return 0;
2757 }
2758 
2759 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2760 		  struct qc_dqblk *di)
2761 {
2762 	struct dquot *dquot;
2763 	int rc;
2764 
2765 	dquot = dqget(sb, qid);
2766 	if (IS_ERR(dquot)) {
2767 		rc = PTR_ERR(dquot);
2768 		goto out;
2769 	}
2770 	rc = do_set_dqblk(dquot, di);
2771 	dqput(dquot);
2772 out:
2773 	return rc;
2774 }
2775 EXPORT_SYMBOL(dquot_set_dqblk);
2776 
2777 /* Generic routine for getting common part of quota file information */
2778 int dquot_get_state(struct super_block *sb, struct qc_state *state)
2779 {
2780 	struct mem_dqinfo *mi;
2781 	struct qc_type_state *tstate;
2782 	struct quota_info *dqopt = sb_dqopt(sb);
2783 	int type;
2784 
2785 	memset(state, 0, sizeof(*state));
2786 	for (type = 0; type < MAXQUOTAS; type++) {
2787 		if (!sb_has_quota_active(sb, type))
2788 			continue;
2789 		tstate = state->s_state + type;
2790 		mi = sb_dqopt(sb)->info + type;
2791 		tstate->flags = QCI_ACCT_ENABLED;
2792 		spin_lock(&dq_data_lock);
2793 		if (mi->dqi_flags & DQF_SYS_FILE)
2794 			tstate->flags |= QCI_SYSFILE;
2795 		if (mi->dqi_flags & DQF_ROOT_SQUASH)
2796 			tstate->flags |= QCI_ROOT_SQUASH;
2797 		if (sb_has_quota_limits_enabled(sb, type))
2798 			tstate->flags |= QCI_LIMITS_ENFORCED;
2799 		tstate->spc_timelimit = mi->dqi_bgrace;
2800 		tstate->ino_timelimit = mi->dqi_igrace;
2801 		if (dqopt->files[type]) {
2802 			tstate->ino = dqopt->files[type]->i_ino;
2803 			tstate->blocks = dqopt->files[type]->i_blocks;
2804 		}
2805 		tstate->nextents = 1;	/* We don't know... */
2806 		spin_unlock(&dq_data_lock);
2807 	}
2808 	return 0;
2809 }
2810 EXPORT_SYMBOL(dquot_get_state);
2811 
2812 /* Generic routine for setting common part of quota file information */
2813 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
2814 {
2815 	struct mem_dqinfo *mi;
2816 
2817 	if ((ii->i_fieldmask & QC_WARNS_MASK) ||
2818 	    (ii->i_fieldmask & QC_RT_SPC_TIMER))
2819 		return -EINVAL;
2820 	if (!sb_has_quota_active(sb, type))
2821 		return -ESRCH;
2822 	mi = sb_dqopt(sb)->info + type;
2823 	if (ii->i_fieldmask & QC_FLAGS) {
2824 		if ((ii->i_flags & QCI_ROOT_SQUASH &&
2825 		     mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
2826 			return -EINVAL;
2827 	}
2828 	spin_lock(&dq_data_lock);
2829 	if (ii->i_fieldmask & QC_SPC_TIMER)
2830 		mi->dqi_bgrace = ii->i_spc_timelimit;
2831 	if (ii->i_fieldmask & QC_INO_TIMER)
2832 		mi->dqi_igrace = ii->i_ino_timelimit;
2833 	if (ii->i_fieldmask & QC_FLAGS) {
2834 		if (ii->i_flags & QCI_ROOT_SQUASH)
2835 			mi->dqi_flags |= DQF_ROOT_SQUASH;
2836 		else
2837 			mi->dqi_flags &= ~DQF_ROOT_SQUASH;
2838 	}
2839 	spin_unlock(&dq_data_lock);
2840 	mark_info_dirty(sb, type);
2841 	/* Force write to disk */
2842 	return sb->dq_op->write_info(sb, type);
2843 }
2844 EXPORT_SYMBOL(dquot_set_dqinfo);
2845 
2846 const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2847 	.quota_enable	= dquot_quota_enable,
2848 	.quota_disable	= dquot_quota_disable,
2849 	.quota_sync	= dquot_quota_sync,
2850 	.get_state	= dquot_get_state,
2851 	.set_info	= dquot_set_dqinfo,
2852 	.get_dqblk	= dquot_get_dqblk,
2853 	.get_nextdqblk	= dquot_get_next_dqblk,
2854 	.set_dqblk	= dquot_set_dqblk
2855 };
2856 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2857 
2858 static int do_proc_dqstats(struct ctl_table *table, int write,
2859 		     void *buffer, size_t *lenp, loff_t *ppos)
2860 {
2861 	unsigned int type = (unsigned long *)table->data - dqstats.stat;
2862 	s64 value = percpu_counter_sum(&dqstats.counter[type]);
2863 
2864 	/* Filter negative values for non-monotonic counters */
2865 	if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
2866 			  type == DQST_FREE_DQUOTS))
2867 		value = 0;
2868 
2869 	/* Update global table */
2870 	dqstats.stat[type] = value;
2871 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2872 }
2873 
2874 static struct ctl_table fs_dqstats_table[] = {
2875 	{
2876 		.procname	= "lookups",
2877 		.data		= &dqstats.stat[DQST_LOOKUPS],
2878 		.maxlen		= sizeof(unsigned long),
2879 		.mode		= 0444,
2880 		.proc_handler	= do_proc_dqstats,
2881 	},
2882 	{
2883 		.procname	= "drops",
2884 		.data		= &dqstats.stat[DQST_DROPS],
2885 		.maxlen		= sizeof(unsigned long),
2886 		.mode		= 0444,
2887 		.proc_handler	= do_proc_dqstats,
2888 	},
2889 	{
2890 		.procname	= "reads",
2891 		.data		= &dqstats.stat[DQST_READS],
2892 		.maxlen		= sizeof(unsigned long),
2893 		.mode		= 0444,
2894 		.proc_handler	= do_proc_dqstats,
2895 	},
2896 	{
2897 		.procname	= "writes",
2898 		.data		= &dqstats.stat[DQST_WRITES],
2899 		.maxlen		= sizeof(unsigned long),
2900 		.mode		= 0444,
2901 		.proc_handler	= do_proc_dqstats,
2902 	},
2903 	{
2904 		.procname	= "cache_hits",
2905 		.data		= &dqstats.stat[DQST_CACHE_HITS],
2906 		.maxlen		= sizeof(unsigned long),
2907 		.mode		= 0444,
2908 		.proc_handler	= do_proc_dqstats,
2909 	},
2910 	{
2911 		.procname	= "allocated_dquots",
2912 		.data		= &dqstats.stat[DQST_ALLOC_DQUOTS],
2913 		.maxlen		= sizeof(unsigned long),
2914 		.mode		= 0444,
2915 		.proc_handler	= do_proc_dqstats,
2916 	},
2917 	{
2918 		.procname	= "free_dquots",
2919 		.data		= &dqstats.stat[DQST_FREE_DQUOTS],
2920 		.maxlen		= sizeof(unsigned long),
2921 		.mode		= 0444,
2922 		.proc_handler	= do_proc_dqstats,
2923 	},
2924 	{
2925 		.procname	= "syncs",
2926 		.data		= &dqstats.stat[DQST_SYNCS],
2927 		.maxlen		= sizeof(unsigned long),
2928 		.mode		= 0444,
2929 		.proc_handler	= do_proc_dqstats,
2930 	},
2931 #ifdef CONFIG_PRINT_QUOTA_WARNING
2932 	{
2933 		.procname	= "warnings",
2934 		.data		= &flag_print_warnings,
2935 		.maxlen		= sizeof(int),
2936 		.mode		= 0644,
2937 		.proc_handler	= proc_dointvec,
2938 	},
2939 #endif
2940 	{ },
2941 };
2942 
2943 static int __init dquot_init(void)
2944 {
2945 	int i, ret;
2946 	unsigned long nr_hash, order;
2947 
2948 	printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2949 
2950 	register_sysctl_init("fs/quota", fs_dqstats_table);
2951 
2952 	dquot_cachep = kmem_cache_create("dquot",
2953 			sizeof(struct dquot), sizeof(unsigned long) * 4,
2954 			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2955 				SLAB_MEM_SPREAD|SLAB_PANIC),
2956 			NULL);
2957 
2958 	order = 0;
2959 	dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
2960 	if (!dquot_hash)
2961 		panic("Cannot create dquot hash table");
2962 
2963 	for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2964 		ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
2965 		if (ret)
2966 			panic("Cannot create dquot stat counters");
2967 	}
2968 
2969 	/* Find power-of-two hlist_heads which can fit into allocation */
2970 	nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2971 	dq_hash_bits = ilog2(nr_hash);
2972 
2973 	nr_hash = 1UL << dq_hash_bits;
2974 	dq_hash_mask = nr_hash - 1;
2975 	for (i = 0; i < nr_hash; i++)
2976 		INIT_HLIST_HEAD(dquot_hash + i);
2977 
2978 	pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
2979 		" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
2980 
2981 	if (register_shrinker(&dqcache_shrinker, "dquota-cache"))
2982 		panic("Cannot register dquot shrinker");
2983 
2984 	return 0;
2985 }
2986 fs_initcall(dquot_init);
2987