xref: /openbmc/linux/fs/quota/dquot.c (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Implementation of the diskquota system for the LINUX operating system. QUOTA
4   * is implemented using the BSD system call interface as the means of
5   * communication with the user level. This file contains the generic routines
6   * called by the different filesystems on allocation of an inode or block.
7   * These routines take care of the administration needed to have a consistent
8   * diskquota tracking system. The ideas of both user and group quotas are based
9   * on the Melbourne quota system as used on BSD derived systems. The internal
10   * implementation is based on one of the several variants of the LINUX
11   * inode-subsystem with added complexity of the diskquota system.
12   *
13   * Author:	Marco van Wieringen <mvw@planets.elm.net>
14   *
15   * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
16   *
17   *		Revised list management to avoid races
18   *		-- Bill Hawes, <whawes@star.net>, 9/98
19   *
20   *		Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
21   *		As the consequence the locking was moved from dquot_decr_...(),
22   *		dquot_incr_...() to calling functions.
23   *		invalidate_dquots() now writes modified dquots.
24   *		Serialized quota_off() and quota_on() for mount point.
25   *		Fixed a few bugs in grow_dquots().
26   *		Fixed deadlock in write_dquot() - we no longer account quotas on
27   *		quota files
28   *		remove_dquot_ref() moved to inode.c - it now traverses through inodes
29   *		add_dquot_ref() restarts after blocking
30   *		Added check for bogus uid and fixed check for group in quotactl.
31   *		Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
32   *
33   *		Used struct list_head instead of own list struct
34   *		Invalidation of referenced dquots is no longer possible
35   *		Improved free_dquots list management
36   *		Quota and i_blocks are now updated in one place to avoid races
37   *		Warnings are now delayed so we won't block in critical section
38   *		Write updated not to require dquot lock
39   *		Jan Kara, <jack@suse.cz>, 9/2000
40   *
41   *		Added dynamic quota structure allocation
42   *		Jan Kara <jack@suse.cz> 12/2000
43   *
44   *		Rewritten quota interface. Implemented new quota format and
45   *		formats registering.
46   *		Jan Kara, <jack@suse.cz>, 2001,2002
47   *
48   *		New SMP locking.
49   *		Jan Kara, <jack@suse.cz>, 10/2002
50   *
51   *		Added journalled quota support, fix lock inversion problems
52   *		Jan Kara, <jack@suse.cz>, 2003,2004
53   *
54   * (C) Copyright 1994 - 1997 Marco van Wieringen
55   */
56  
57  #include <linux/errno.h>
58  #include <linux/kernel.h>
59  #include <linux/fs.h>
60  #include <linux/mount.h>
61  #include <linux/mm.h>
62  #include <linux/time.h>
63  #include <linux/types.h>
64  #include <linux/string.h>
65  #include <linux/fcntl.h>
66  #include <linux/stat.h>
67  #include <linux/tty.h>
68  #include <linux/file.h>
69  #include <linux/slab.h>
70  #include <linux/sysctl.h>
71  #include <linux/init.h>
72  #include <linux/module.h>
73  #include <linux/proc_fs.h>
74  #include <linux/security.h>
75  #include <linux/sched.h>
76  #include <linux/cred.h>
77  #include <linux/kmod.h>
78  #include <linux/namei.h>
79  #include <linux/capability.h>
80  #include <linux/quotaops.h>
81  #include <linux/blkdev.h>
82  #include <linux/sched/mm.h>
83  #include "../internal.h" /* ugh */
84  
85  #include <linux/uaccess.h>
86  
87  /*
88   * There are five quota SMP locks:
89   * * dq_list_lock protects all lists with quotas and quota formats.
90   * * dquot->dq_dqb_lock protects data from dq_dqb
91   * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
92   *   consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
93   *   dquot_transfer() can stabilize amount it transfers
94   * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
95   *   pointers in the inode
96   * * dq_state_lock protects modifications of quota state (on quotaon and
97   *   quotaoff) and readers who care about latest values take it as well.
98   *
99   * The spinlock ordering is hence:
100   *   dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
101   *   dq_list_lock > dq_state_lock
102   *
103   * Note that some things (eg. sb pointer, type, id) doesn't change during
104   * the life of the dquot structure and so needn't to be protected by a lock
105   *
106   * Operation accessing dquots via inode pointers are protected by dquot_srcu.
107   * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
108   * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
109   * inode and before dropping dquot references to avoid use of dquots after
110   * they are freed. dq_data_lock is used to serialize the pointer setting and
111   * clearing operations.
112   * Special care needs to be taken about S_NOQUOTA inode flag (marking that
113   * inode is a quota file). Functions adding pointers from inode to dquots have
114   * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
115   * have to do all pointer modifications before dropping dq_data_lock. This makes
116   * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
117   * then drops all pointers to dquots from an inode.
118   *
119   * Each dquot has its dq_lock mutex.  Dquot is locked when it is being read to
120   * memory (or space for it is being allocated) on the first dqget(), when it is
121   * being written out, and when it is being released on the last dqput(). The
122   * allocation and release operations are serialized by the dq_lock and by
123   * checking the use count in dquot_release().
124   *
125   * Lock ordering (including related VFS locks) is the following:
126   *   s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
127   */
128  
129  static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
130  static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
131  __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
132  EXPORT_SYMBOL(dq_data_lock);
133  DEFINE_STATIC_SRCU(dquot_srcu);
134  
135  static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
136  
__quota_error(struct super_block * sb,const char * func,const char * fmt,...)137  void __quota_error(struct super_block *sb, const char *func,
138  		   const char *fmt, ...)
139  {
140  	if (printk_ratelimit()) {
141  		va_list args;
142  		struct va_format vaf;
143  
144  		va_start(args, fmt);
145  
146  		vaf.fmt = fmt;
147  		vaf.va = &args;
148  
149  		printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
150  		       sb->s_id, func, &vaf);
151  
152  		va_end(args);
153  	}
154  }
155  EXPORT_SYMBOL(__quota_error);
156  
157  #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
158  static char *quotatypes[] = INITQFNAMES;
159  #endif
160  static struct quota_format_type *quota_formats;	/* List of registered formats */
161  static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
162  
163  /* SLAB cache for dquot structures */
164  static struct kmem_cache *dquot_cachep;
165  
register_quota_format(struct quota_format_type * fmt)166  int register_quota_format(struct quota_format_type *fmt)
167  {
168  	spin_lock(&dq_list_lock);
169  	fmt->qf_next = quota_formats;
170  	quota_formats = fmt;
171  	spin_unlock(&dq_list_lock);
172  	return 0;
173  }
174  EXPORT_SYMBOL(register_quota_format);
175  
unregister_quota_format(struct quota_format_type * fmt)176  void unregister_quota_format(struct quota_format_type *fmt)
177  {
178  	struct quota_format_type **actqf;
179  
180  	spin_lock(&dq_list_lock);
181  	for (actqf = &quota_formats; *actqf && *actqf != fmt;
182  	     actqf = &(*actqf)->qf_next)
183  		;
184  	if (*actqf)
185  		*actqf = (*actqf)->qf_next;
186  	spin_unlock(&dq_list_lock);
187  }
188  EXPORT_SYMBOL(unregister_quota_format);
189  
find_quota_format(int id)190  static struct quota_format_type *find_quota_format(int id)
191  {
192  	struct quota_format_type *actqf;
193  
194  	spin_lock(&dq_list_lock);
195  	for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
196  	     actqf = actqf->qf_next)
197  		;
198  	if (!actqf || !try_module_get(actqf->qf_owner)) {
199  		int qm;
200  
201  		spin_unlock(&dq_list_lock);
202  
203  		for (qm = 0; module_names[qm].qm_fmt_id &&
204  			     module_names[qm].qm_fmt_id != id; qm++)
205  			;
206  		if (!module_names[qm].qm_fmt_id ||
207  		    request_module(module_names[qm].qm_mod_name))
208  			return NULL;
209  
210  		spin_lock(&dq_list_lock);
211  		for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
212  		     actqf = actqf->qf_next)
213  			;
214  		if (actqf && !try_module_get(actqf->qf_owner))
215  			actqf = NULL;
216  	}
217  	spin_unlock(&dq_list_lock);
218  	return actqf;
219  }
220  
put_quota_format(struct quota_format_type * fmt)221  static void put_quota_format(struct quota_format_type *fmt)
222  {
223  	module_put(fmt->qf_owner);
224  }
225  
226  /*
227   * Dquot List Management:
228   * The quota code uses five lists for dquot management: the inuse_list,
229   * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
230   * A single dquot structure may be on some of those lists, depending on
231   * its current state.
232   *
233   * All dquots are placed to the end of inuse_list when first created, and this
234   * list is used for invalidate operation, which must look at every dquot.
235   *
236   * When the last reference of a dquot is dropped, the dquot is added to
237   * releasing_dquots. We'll then queue work item which will call
238   * synchronize_srcu() and after that perform the final cleanup of all the
239   * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
240   * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
241   * struct.
242   *
243   * Unused and cleaned up dquots are in the free_dquots list and this list is
244   * searched whenever we need an available dquot. Dquots are removed from the
245   * list as soon as they are used again and dqstats.free_dquots gives the number
246   * of dquots on the list. When dquot is invalidated it's completely released
247   * from memory.
248   *
249   * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
250   * dirtied, and this list is searched when writing dirty dquots back to
251   * quota file. Note that some filesystems do dirty dquot tracking on their
252   * own (e.g. in a journal) and thus don't use dqi_dirty_list.
253   *
254   * Dquots with a specific identity (device, type and id) are placed on
255   * one of the dquot_hash[] hash chains. The provides an efficient search
256   * mechanism to locate a specific dquot.
257   */
258  
259  static LIST_HEAD(inuse_list);
260  static LIST_HEAD(free_dquots);
261  static LIST_HEAD(releasing_dquots);
262  static unsigned int dq_hash_bits, dq_hash_mask;
263  static struct hlist_head *dquot_hash;
264  
265  struct dqstats dqstats;
266  EXPORT_SYMBOL(dqstats);
267  
268  static qsize_t inode_get_rsv_space(struct inode *inode);
269  static qsize_t __inode_get_rsv_space(struct inode *inode);
270  static int __dquot_initialize(struct inode *inode, int type);
271  
272  static void quota_release_workfn(struct work_struct *work);
273  static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
274  
275  static inline unsigned int
hashfn(const struct super_block * sb,struct kqid qid)276  hashfn(const struct super_block *sb, struct kqid qid)
277  {
278  	unsigned int id = from_kqid(&init_user_ns, qid);
279  	int type = qid.type;
280  	unsigned long tmp;
281  
282  	tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
283  	return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
284  }
285  
286  /*
287   * Following list functions expect dq_list_lock to be held
288   */
insert_dquot_hash(struct dquot * dquot)289  static inline void insert_dquot_hash(struct dquot *dquot)
290  {
291  	struct hlist_head *head;
292  	head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
293  	hlist_add_head(&dquot->dq_hash, head);
294  }
295  
remove_dquot_hash(struct dquot * dquot)296  static inline void remove_dquot_hash(struct dquot *dquot)
297  {
298  	hlist_del_init(&dquot->dq_hash);
299  }
300  
find_dquot(unsigned int hashent,struct super_block * sb,struct kqid qid)301  static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
302  				struct kqid qid)
303  {
304  	struct dquot *dquot;
305  
306  	hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash)
307  		if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
308  			return dquot;
309  
310  	return NULL;
311  }
312  
313  /* Add a dquot to the tail of the free list */
put_dquot_last(struct dquot * dquot)314  static inline void put_dquot_last(struct dquot *dquot)
315  {
316  	list_add_tail(&dquot->dq_free, &free_dquots);
317  	dqstats_inc(DQST_FREE_DQUOTS);
318  }
319  
put_releasing_dquots(struct dquot * dquot)320  static inline void put_releasing_dquots(struct dquot *dquot)
321  {
322  	list_add_tail(&dquot->dq_free, &releasing_dquots);
323  	set_bit(DQ_RELEASING_B, &dquot->dq_flags);
324  }
325  
remove_free_dquot(struct dquot * dquot)326  static inline void remove_free_dquot(struct dquot *dquot)
327  {
328  	if (list_empty(&dquot->dq_free))
329  		return;
330  	list_del_init(&dquot->dq_free);
331  	if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
332  		dqstats_dec(DQST_FREE_DQUOTS);
333  	else
334  		clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
335  }
336  
put_inuse(struct dquot * dquot)337  static inline void put_inuse(struct dquot *dquot)
338  {
339  	/* We add to the back of inuse list so we don't have to restart
340  	 * when traversing this list and we block */
341  	list_add_tail(&dquot->dq_inuse, &inuse_list);
342  	dqstats_inc(DQST_ALLOC_DQUOTS);
343  }
344  
remove_inuse(struct dquot * dquot)345  static inline void remove_inuse(struct dquot *dquot)
346  {
347  	dqstats_dec(DQST_ALLOC_DQUOTS);
348  	list_del(&dquot->dq_inuse);
349  }
350  /*
351   * End of list functions needing dq_list_lock
352   */
353  
wait_on_dquot(struct dquot * dquot)354  static void wait_on_dquot(struct dquot *dquot)
355  {
356  	mutex_lock(&dquot->dq_lock);
357  	mutex_unlock(&dquot->dq_lock);
358  }
359  
dquot_active(struct dquot * dquot)360  static inline int dquot_active(struct dquot *dquot)
361  {
362  	return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
363  }
364  
dquot_dirty(struct dquot * dquot)365  static inline int dquot_dirty(struct dquot *dquot)
366  {
367  	return test_bit(DQ_MOD_B, &dquot->dq_flags);
368  }
369  
mark_dquot_dirty(struct dquot * dquot)370  static inline int mark_dquot_dirty(struct dquot *dquot)
371  {
372  	return dquot->dq_sb->dq_op->mark_dirty(dquot);
373  }
374  
375  /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
dquot_mark_dquot_dirty(struct dquot * dquot)376  int dquot_mark_dquot_dirty(struct dquot *dquot)
377  {
378  	int ret = 1;
379  
380  	if (!dquot_active(dquot))
381  		return 0;
382  
383  	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
384  		return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
385  
386  	/* If quota is dirty already, we don't have to acquire dq_list_lock */
387  	if (dquot_dirty(dquot))
388  		return 1;
389  
390  	spin_lock(&dq_list_lock);
391  	if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
392  		list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
393  				info[dquot->dq_id.type].dqi_dirty_list);
394  		ret = 0;
395  	}
396  	spin_unlock(&dq_list_lock);
397  	return ret;
398  }
399  EXPORT_SYMBOL(dquot_mark_dquot_dirty);
400  
401  /* Dirtify all the dquots - this can block when journalling */
mark_all_dquot_dirty(struct dquot __rcu * const * dquots)402  static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
403  {
404  	int ret, err, cnt;
405  	struct dquot *dquot;
406  
407  	ret = err = 0;
408  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
409  		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
410  		if (dquot)
411  			/* Even in case of error we have to continue */
412  			ret = mark_dquot_dirty(dquot);
413  		if (!err)
414  			err = ret;
415  	}
416  	return err;
417  }
418  
dqput_all(struct dquot ** dquot)419  static inline void dqput_all(struct dquot **dquot)
420  {
421  	unsigned int cnt;
422  
423  	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
424  		dqput(dquot[cnt]);
425  }
426  
clear_dquot_dirty(struct dquot * dquot)427  static inline int clear_dquot_dirty(struct dquot *dquot)
428  {
429  	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
430  		return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
431  
432  	spin_lock(&dq_list_lock);
433  	if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
434  		spin_unlock(&dq_list_lock);
435  		return 0;
436  	}
437  	list_del_init(&dquot->dq_dirty);
438  	spin_unlock(&dq_list_lock);
439  	return 1;
440  }
441  
mark_info_dirty(struct super_block * sb,int type)442  void mark_info_dirty(struct super_block *sb, int type)
443  {
444  	spin_lock(&dq_data_lock);
445  	sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
446  	spin_unlock(&dq_data_lock);
447  }
448  EXPORT_SYMBOL(mark_info_dirty);
449  
450  /*
451   *	Read dquot from disk and alloc space for it
452   */
453  
dquot_acquire(struct dquot * dquot)454  int dquot_acquire(struct dquot *dquot)
455  {
456  	int ret = 0, ret2 = 0;
457  	unsigned int memalloc;
458  	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
459  
460  	mutex_lock(&dquot->dq_lock);
461  	memalloc = memalloc_nofs_save();
462  	if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
463  		ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
464  		if (ret < 0)
465  			goto out_iolock;
466  	}
467  	/* Make sure flags update is visible after dquot has been filled */
468  	smp_mb__before_atomic();
469  	set_bit(DQ_READ_B, &dquot->dq_flags);
470  	/* Instantiate dquot if needed */
471  	if (!dquot_active(dquot) && !dquot->dq_off) {
472  		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
473  		/* Write the info if needed */
474  		if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
475  			ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
476  					dquot->dq_sb, dquot->dq_id.type);
477  		}
478  		if (ret < 0)
479  			goto out_iolock;
480  		if (ret2 < 0) {
481  			ret = ret2;
482  			goto out_iolock;
483  		}
484  	}
485  	/*
486  	 * Make sure flags update is visible after on-disk struct has been
487  	 * allocated. Paired with smp_rmb() in dqget().
488  	 */
489  	smp_mb__before_atomic();
490  	set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
491  out_iolock:
492  	memalloc_nofs_restore(memalloc);
493  	mutex_unlock(&dquot->dq_lock);
494  	return ret;
495  }
496  EXPORT_SYMBOL(dquot_acquire);
497  
498  /*
499   *	Write dquot to disk
500   */
dquot_commit(struct dquot * dquot)501  int dquot_commit(struct dquot *dquot)
502  {
503  	int ret = 0;
504  	unsigned int memalloc;
505  	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
506  
507  	mutex_lock(&dquot->dq_lock);
508  	memalloc = memalloc_nofs_save();
509  	if (!clear_dquot_dirty(dquot))
510  		goto out_lock;
511  	/* Inactive dquot can be only if there was error during read/init
512  	 * => we have better not writing it */
513  	if (dquot_active(dquot))
514  		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
515  	else
516  		ret = -EIO;
517  out_lock:
518  	memalloc_nofs_restore(memalloc);
519  	mutex_unlock(&dquot->dq_lock);
520  	return ret;
521  }
522  EXPORT_SYMBOL(dquot_commit);
523  
524  /*
525   *	Release dquot
526   */
dquot_release(struct dquot * dquot)527  int dquot_release(struct dquot *dquot)
528  {
529  	int ret = 0, ret2 = 0;
530  	unsigned int memalloc;
531  	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
532  
533  	mutex_lock(&dquot->dq_lock);
534  	memalloc = memalloc_nofs_save();
535  	/* Check whether we are not racing with some other dqget() */
536  	if (dquot_is_busy(dquot))
537  		goto out_dqlock;
538  	if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
539  		ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
540  		/* Write the info */
541  		if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
542  			ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
543  						dquot->dq_sb, dquot->dq_id.type);
544  		}
545  		if (ret >= 0)
546  			ret = ret2;
547  	}
548  	clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
549  out_dqlock:
550  	memalloc_nofs_restore(memalloc);
551  	mutex_unlock(&dquot->dq_lock);
552  	return ret;
553  }
554  EXPORT_SYMBOL(dquot_release);
555  
dquot_destroy(struct dquot * dquot)556  void dquot_destroy(struct dquot *dquot)
557  {
558  	kmem_cache_free(dquot_cachep, dquot);
559  }
560  EXPORT_SYMBOL(dquot_destroy);
561  
do_destroy_dquot(struct dquot * dquot)562  static inline void do_destroy_dquot(struct dquot *dquot)
563  {
564  	dquot->dq_sb->dq_op->destroy_dquot(dquot);
565  }
566  
567  /* Invalidate all dquots on the list. Note that this function is called after
568   * quota is disabled and pointers from inodes removed so there cannot be new
569   * quota users. There can still be some users of quotas due to inodes being
570   * just deleted or pruned by prune_icache() (those are not attached to any
571   * list) or parallel quotactl call. We have to wait for such users.
572   */
invalidate_dquots(struct super_block * sb,int type)573  static void invalidate_dquots(struct super_block *sb, int type)
574  {
575  	struct dquot *dquot, *tmp;
576  
577  restart:
578  	flush_delayed_work(&quota_release_work);
579  
580  	spin_lock(&dq_list_lock);
581  	list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
582  		if (dquot->dq_sb != sb)
583  			continue;
584  		if (dquot->dq_id.type != type)
585  			continue;
586  		/* Wait for dquot users */
587  		if (atomic_read(&dquot->dq_count)) {
588  			atomic_inc(&dquot->dq_count);
589  			spin_unlock(&dq_list_lock);
590  			/*
591  			 * Once dqput() wakes us up, we know it's time to free
592  			 * the dquot.
593  			 * IMPORTANT: we rely on the fact that there is always
594  			 * at most one process waiting for dquot to free.
595  			 * Otherwise dq_count would be > 1 and we would never
596  			 * wake up.
597  			 */
598  			wait_event(dquot_ref_wq,
599  				   atomic_read(&dquot->dq_count) == 1);
600  			dqput(dquot);
601  			/* At this moment dquot() need not exist (it could be
602  			 * reclaimed by prune_dqcache(). Hence we must
603  			 * restart. */
604  			goto restart;
605  		}
606  		/*
607  		 * The last user already dropped its reference but dquot didn't
608  		 * get fully cleaned up yet. Restart the scan which flushes the
609  		 * work cleaning up released dquots.
610  		 */
611  		if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
612  			spin_unlock(&dq_list_lock);
613  			goto restart;
614  		}
615  		/*
616  		 * Quota now has no users and it has been written on last
617  		 * dqput()
618  		 */
619  		remove_dquot_hash(dquot);
620  		remove_free_dquot(dquot);
621  		remove_inuse(dquot);
622  		do_destroy_dquot(dquot);
623  	}
624  	spin_unlock(&dq_list_lock);
625  }
626  
627  /* Call callback for every active dquot on given filesystem */
dquot_scan_active(struct super_block * sb,int (* fn)(struct dquot * dquot,unsigned long priv),unsigned long priv)628  int dquot_scan_active(struct super_block *sb,
629  		      int (*fn)(struct dquot *dquot, unsigned long priv),
630  		      unsigned long priv)
631  {
632  	struct dquot *dquot, *old_dquot = NULL;
633  	int ret = 0;
634  
635  	WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
636  
637  	spin_lock(&dq_list_lock);
638  	list_for_each_entry(dquot, &inuse_list, dq_inuse) {
639  		if (!dquot_active(dquot))
640  			continue;
641  		if (dquot->dq_sb != sb)
642  			continue;
643  		/* Now we have active dquot so we can just increase use count */
644  		atomic_inc(&dquot->dq_count);
645  		spin_unlock(&dq_list_lock);
646  		dqput(old_dquot);
647  		old_dquot = dquot;
648  		/*
649  		 * ->release_dquot() can be racing with us. Our reference
650  		 * protects us from new calls to it so just wait for any
651  		 * outstanding call and recheck the DQ_ACTIVE_B after that.
652  		 */
653  		wait_on_dquot(dquot);
654  		if (dquot_active(dquot)) {
655  			ret = fn(dquot, priv);
656  			if (ret < 0)
657  				goto out;
658  		}
659  		spin_lock(&dq_list_lock);
660  		/* We are safe to continue now because our dquot could not
661  		 * be moved out of the inuse list while we hold the reference */
662  	}
663  	spin_unlock(&dq_list_lock);
664  out:
665  	dqput(old_dquot);
666  	return ret;
667  }
668  EXPORT_SYMBOL(dquot_scan_active);
669  
dquot_write_dquot(struct dquot * dquot)670  static inline int dquot_write_dquot(struct dquot *dquot)
671  {
672  	int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
673  	if (ret < 0) {
674  		quota_error(dquot->dq_sb, "Can't write quota structure "
675  			    "(error %d). Quota may get out of sync!", ret);
676  		/* Clear dirty bit anyway to avoid infinite loop. */
677  		clear_dquot_dirty(dquot);
678  	}
679  	return ret;
680  }
681  
682  /* Write all dquot structures to quota files */
dquot_writeback_dquots(struct super_block * sb,int type)683  int dquot_writeback_dquots(struct super_block *sb, int type)
684  {
685  	struct list_head dirty;
686  	struct dquot *dquot;
687  	struct quota_info *dqopt = sb_dqopt(sb);
688  	int cnt;
689  	int err, ret = 0;
690  
691  	WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
692  
693  	flush_delayed_work(&quota_release_work);
694  
695  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
696  		if (type != -1 && cnt != type)
697  			continue;
698  		if (!sb_has_quota_active(sb, cnt))
699  			continue;
700  		spin_lock(&dq_list_lock);
701  		/* Move list away to avoid livelock. */
702  		list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
703  		while (!list_empty(&dirty)) {
704  			dquot = list_first_entry(&dirty, struct dquot,
705  						 dq_dirty);
706  
707  			WARN_ON(!dquot_active(dquot));
708  			/* If the dquot is releasing we should not touch it */
709  			if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
710  				spin_unlock(&dq_list_lock);
711  				flush_delayed_work(&quota_release_work);
712  				spin_lock(&dq_list_lock);
713  				continue;
714  			}
715  
716  			/* Now we have active dquot from which someone is
717   			 * holding reference so we can safely just increase
718  			 * use count */
719  			dqgrab(dquot);
720  			spin_unlock(&dq_list_lock);
721  			err = dquot_write_dquot(dquot);
722  			if (err && !ret)
723  				ret = err;
724  			dqput(dquot);
725  			spin_lock(&dq_list_lock);
726  		}
727  		spin_unlock(&dq_list_lock);
728  	}
729  
730  	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
731  		if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
732  		    && info_dirty(&dqopt->info[cnt]))
733  			sb->dq_op->write_info(sb, cnt);
734  	dqstats_inc(DQST_SYNCS);
735  
736  	return ret;
737  }
738  EXPORT_SYMBOL(dquot_writeback_dquots);
739  
740  /* Write all dquot structures to disk and make them visible from userspace */
dquot_quota_sync(struct super_block * sb,int type)741  int dquot_quota_sync(struct super_block *sb, int type)
742  {
743  	struct quota_info *dqopt = sb_dqopt(sb);
744  	int cnt;
745  	int ret;
746  
747  	ret = dquot_writeback_dquots(sb, type);
748  	if (ret)
749  		return ret;
750  	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
751  		return 0;
752  
753  	/* This is not very clever (and fast) but currently I don't know about
754  	 * any other simple way of getting quota data to disk and we must get
755  	 * them there for userspace to be visible... */
756  	if (sb->s_op->sync_fs) {
757  		ret = sb->s_op->sync_fs(sb, 1);
758  		if (ret)
759  			return ret;
760  	}
761  	ret = sync_blockdev(sb->s_bdev);
762  	if (ret)
763  		return ret;
764  
765  	/*
766  	 * Now when everything is written we can discard the pagecache so
767  	 * that userspace sees the changes.
768  	 */
769  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
770  		if (type != -1 && cnt != type)
771  			continue;
772  		if (!sb_has_quota_active(sb, cnt))
773  			continue;
774  		inode_lock(dqopt->files[cnt]);
775  		truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
776  		inode_unlock(dqopt->files[cnt]);
777  	}
778  
779  	return 0;
780  }
781  EXPORT_SYMBOL(dquot_quota_sync);
782  
783  static unsigned long
dqcache_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)784  dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
785  {
786  	struct dquot *dquot;
787  	unsigned long freed = 0;
788  
789  	spin_lock(&dq_list_lock);
790  	while (!list_empty(&free_dquots) && sc->nr_to_scan) {
791  		dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
792  		remove_dquot_hash(dquot);
793  		remove_free_dquot(dquot);
794  		remove_inuse(dquot);
795  		do_destroy_dquot(dquot);
796  		sc->nr_to_scan--;
797  		freed++;
798  	}
799  	spin_unlock(&dq_list_lock);
800  	return freed;
801  }
802  
803  static unsigned long
dqcache_shrink_count(struct shrinker * shrink,struct shrink_control * sc)804  dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
805  {
806  	return vfs_pressure_ratio(
807  	percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
808  }
809  
810  static struct shrinker dqcache_shrinker = {
811  	.count_objects = dqcache_shrink_count,
812  	.scan_objects = dqcache_shrink_scan,
813  	.seeks = DEFAULT_SEEKS,
814  };
815  
816  /*
817   * Safely release dquot and put reference to dquot.
818   */
quota_release_workfn(struct work_struct * work)819  static void quota_release_workfn(struct work_struct *work)
820  {
821  	struct dquot *dquot;
822  	struct list_head rls_head;
823  
824  	spin_lock(&dq_list_lock);
825  	/* Exchange the list head to avoid livelock. */
826  	list_replace_init(&releasing_dquots, &rls_head);
827  	spin_unlock(&dq_list_lock);
828  	synchronize_srcu(&dquot_srcu);
829  
830  restart:
831  	spin_lock(&dq_list_lock);
832  	while (!list_empty(&rls_head)) {
833  		dquot = list_first_entry(&rls_head, struct dquot, dq_free);
834  		WARN_ON_ONCE(atomic_read(&dquot->dq_count));
835  		/*
836  		 * Note that DQ_RELEASING_B protects us from racing with
837  		 * invalidate_dquots() calls so we are safe to work with the
838  		 * dquot even after we drop dq_list_lock.
839  		 */
840  		if (dquot_dirty(dquot)) {
841  			spin_unlock(&dq_list_lock);
842  			/* Commit dquot before releasing */
843  			dquot_write_dquot(dquot);
844  			goto restart;
845  		}
846  		if (dquot_active(dquot)) {
847  			spin_unlock(&dq_list_lock);
848  			dquot->dq_sb->dq_op->release_dquot(dquot);
849  			goto restart;
850  		}
851  		/* Dquot is inactive and clean, now move it to free list */
852  		remove_free_dquot(dquot);
853  		put_dquot_last(dquot);
854  	}
855  	spin_unlock(&dq_list_lock);
856  }
857  
858  /*
859   * Put reference to dquot
860   */
dqput(struct dquot * dquot)861  void dqput(struct dquot *dquot)
862  {
863  	if (!dquot)
864  		return;
865  #ifdef CONFIG_QUOTA_DEBUG
866  	if (!atomic_read(&dquot->dq_count)) {
867  		quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
868  			    quotatypes[dquot->dq_id.type],
869  			    from_kqid(&init_user_ns, dquot->dq_id));
870  		BUG();
871  	}
872  #endif
873  	dqstats_inc(DQST_DROPS);
874  
875  	spin_lock(&dq_list_lock);
876  	if (atomic_read(&dquot->dq_count) > 1) {
877  		/* We have more than one user... nothing to do */
878  		atomic_dec(&dquot->dq_count);
879  		/* Releasing dquot during quotaoff phase? */
880  		if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
881  		    atomic_read(&dquot->dq_count) == 1)
882  			wake_up(&dquot_ref_wq);
883  		spin_unlock(&dq_list_lock);
884  		return;
885  	}
886  
887  	/* Need to release dquot? */
888  #ifdef CONFIG_QUOTA_DEBUG
889  	/* sanity check */
890  	BUG_ON(!list_empty(&dquot->dq_free));
891  #endif
892  	put_releasing_dquots(dquot);
893  	atomic_dec(&dquot->dq_count);
894  	spin_unlock(&dq_list_lock);
895  	queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
896  }
897  EXPORT_SYMBOL(dqput);
898  
dquot_alloc(struct super_block * sb,int type)899  struct dquot *dquot_alloc(struct super_block *sb, int type)
900  {
901  	return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
902  }
903  EXPORT_SYMBOL(dquot_alloc);
904  
get_empty_dquot(struct super_block * sb,int type)905  static struct dquot *get_empty_dquot(struct super_block *sb, int type)
906  {
907  	struct dquot *dquot;
908  
909  	dquot = sb->dq_op->alloc_dquot(sb, type);
910  	if(!dquot)
911  		return NULL;
912  
913  	mutex_init(&dquot->dq_lock);
914  	INIT_LIST_HEAD(&dquot->dq_free);
915  	INIT_LIST_HEAD(&dquot->dq_inuse);
916  	INIT_HLIST_NODE(&dquot->dq_hash);
917  	INIT_LIST_HEAD(&dquot->dq_dirty);
918  	dquot->dq_sb = sb;
919  	dquot->dq_id = make_kqid_invalid(type);
920  	atomic_set(&dquot->dq_count, 1);
921  	spin_lock_init(&dquot->dq_dqb_lock);
922  
923  	return dquot;
924  }
925  
926  /*
927   * Get reference to dquot
928   *
929   * Locking is slightly tricky here. We are guarded from parallel quotaoff()
930   * destroying our dquot by:
931   *   a) checking for quota flags under dq_list_lock and
932   *   b) getting a reference to dquot before we release dq_list_lock
933   */
dqget(struct super_block * sb,struct kqid qid)934  struct dquot *dqget(struct super_block *sb, struct kqid qid)
935  {
936  	unsigned int hashent = hashfn(sb, qid);
937  	struct dquot *dquot, *empty = NULL;
938  
939  	if (!qid_has_mapping(sb->s_user_ns, qid))
940  		return ERR_PTR(-EINVAL);
941  
942          if (!sb_has_quota_active(sb, qid.type))
943  		return ERR_PTR(-ESRCH);
944  we_slept:
945  	spin_lock(&dq_list_lock);
946  	spin_lock(&dq_state_lock);
947  	if (!sb_has_quota_active(sb, qid.type)) {
948  		spin_unlock(&dq_state_lock);
949  		spin_unlock(&dq_list_lock);
950  		dquot = ERR_PTR(-ESRCH);
951  		goto out;
952  	}
953  	spin_unlock(&dq_state_lock);
954  
955  	dquot = find_dquot(hashent, sb, qid);
956  	if (!dquot) {
957  		if (!empty) {
958  			spin_unlock(&dq_list_lock);
959  			empty = get_empty_dquot(sb, qid.type);
960  			if (!empty)
961  				schedule();	/* Try to wait for a moment... */
962  			goto we_slept;
963  		}
964  		dquot = empty;
965  		empty = NULL;
966  		dquot->dq_id = qid;
967  		/* all dquots go on the inuse_list */
968  		put_inuse(dquot);
969  		/* hash it first so it can be found */
970  		insert_dquot_hash(dquot);
971  		spin_unlock(&dq_list_lock);
972  		dqstats_inc(DQST_LOOKUPS);
973  	} else {
974  		if (!atomic_read(&dquot->dq_count))
975  			remove_free_dquot(dquot);
976  		atomic_inc(&dquot->dq_count);
977  		spin_unlock(&dq_list_lock);
978  		dqstats_inc(DQST_CACHE_HITS);
979  		dqstats_inc(DQST_LOOKUPS);
980  	}
981  	/* Wait for dq_lock - after this we know that either dquot_release() is
982  	 * already finished or it will be canceled due to dq_count > 0 test */
983  	wait_on_dquot(dquot);
984  	/* Read the dquot / allocate space in quota file */
985  	if (!dquot_active(dquot)) {
986  		int err;
987  
988  		err = sb->dq_op->acquire_dquot(dquot);
989  		if (err < 0) {
990  			dqput(dquot);
991  			dquot = ERR_PTR(err);
992  			goto out;
993  		}
994  	}
995  	/*
996  	 * Make sure following reads see filled structure - paired with
997  	 * smp_mb__before_atomic() in dquot_acquire().
998  	 */
999  	smp_rmb();
1000  	/* Has somebody invalidated entry under us? */
1001  	WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash));
1002  out:
1003  	if (empty)
1004  		do_destroy_dquot(empty);
1005  
1006  	return dquot;
1007  }
1008  EXPORT_SYMBOL(dqget);
1009  
i_dquot(struct inode * inode)1010  static inline struct dquot __rcu **i_dquot(struct inode *inode)
1011  {
1012  	return inode->i_sb->s_op->get_dquots(inode);
1013  }
1014  
dqinit_needed(struct inode * inode,int type)1015  static int dqinit_needed(struct inode *inode, int type)
1016  {
1017  	struct dquot __rcu * const *dquots;
1018  	int cnt;
1019  
1020  	if (IS_NOQUOTA(inode))
1021  		return 0;
1022  
1023  	dquots = i_dquot(inode);
1024  	if (type != -1)
1025  		return !dquots[type];
1026  	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1027  		if (!dquots[cnt])
1028  			return 1;
1029  	return 0;
1030  }
1031  
1032  /* This routine is guarded by s_umount semaphore */
add_dquot_ref(struct super_block * sb,int type)1033  static int add_dquot_ref(struct super_block *sb, int type)
1034  {
1035  	struct inode *inode, *old_inode = NULL;
1036  #ifdef CONFIG_QUOTA_DEBUG
1037  	int reserved = 0;
1038  #endif
1039  	int err = 0;
1040  
1041  	spin_lock(&sb->s_inode_list_lock);
1042  	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1043  		spin_lock(&inode->i_lock);
1044  		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1045  		    !atomic_read(&inode->i_writecount) ||
1046  		    !dqinit_needed(inode, type)) {
1047  			spin_unlock(&inode->i_lock);
1048  			continue;
1049  		}
1050  		__iget(inode);
1051  		spin_unlock(&inode->i_lock);
1052  		spin_unlock(&sb->s_inode_list_lock);
1053  
1054  #ifdef CONFIG_QUOTA_DEBUG
1055  		if (unlikely(inode_get_rsv_space(inode) > 0))
1056  			reserved = 1;
1057  #endif
1058  		iput(old_inode);
1059  		err = __dquot_initialize(inode, type);
1060  		if (err) {
1061  			iput(inode);
1062  			goto out;
1063  		}
1064  
1065  		/*
1066  		 * We hold a reference to 'inode' so it couldn't have been
1067  		 * removed from s_inodes list while we dropped the
1068  		 * s_inode_list_lock. We cannot iput the inode now as we can be
1069  		 * holding the last reference and we cannot iput it under
1070  		 * s_inode_list_lock. So we keep the reference and iput it
1071  		 * later.
1072  		 */
1073  		old_inode = inode;
1074  		cond_resched();
1075  		spin_lock(&sb->s_inode_list_lock);
1076  	}
1077  	spin_unlock(&sb->s_inode_list_lock);
1078  	iput(old_inode);
1079  out:
1080  #ifdef CONFIG_QUOTA_DEBUG
1081  	if (reserved) {
1082  		quota_error(sb, "Writes happened before quota was turned on "
1083  			"thus quota information is probably inconsistent. "
1084  			"Please run quotacheck(8)");
1085  	}
1086  #endif
1087  	return err;
1088  }
1089  
remove_dquot_ref(struct super_block * sb,int type)1090  static void remove_dquot_ref(struct super_block *sb, int type)
1091  {
1092  	struct inode *inode;
1093  #ifdef CONFIG_QUOTA_DEBUG
1094  	int reserved = 0;
1095  #endif
1096  
1097  	spin_lock(&sb->s_inode_list_lock);
1098  	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1099  		/*
1100  		 *  We have to scan also I_NEW inodes because they can already
1101  		 *  have quota pointer initialized. Luckily, we need to touch
1102  		 *  only quota pointers and these have separate locking
1103  		 *  (dq_data_lock).
1104  		 */
1105  		spin_lock(&dq_data_lock);
1106  		if (!IS_NOQUOTA(inode)) {
1107  			struct dquot __rcu **dquots = i_dquot(inode);
1108  			struct dquot *dquot = srcu_dereference_check(
1109  				dquots[type], &dquot_srcu,
1110  				lockdep_is_held(&dq_data_lock));
1111  
1112  #ifdef CONFIG_QUOTA_DEBUG
1113  			if (unlikely(inode_get_rsv_space(inode) > 0))
1114  				reserved = 1;
1115  #endif
1116  			rcu_assign_pointer(dquots[type], NULL);
1117  			if (dquot)
1118  				dqput(dquot);
1119  		}
1120  		spin_unlock(&dq_data_lock);
1121  	}
1122  	spin_unlock(&sb->s_inode_list_lock);
1123  #ifdef CONFIG_QUOTA_DEBUG
1124  	if (reserved) {
1125  		printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1126  			" was disabled thus quota information is probably "
1127  			"inconsistent. Please run quotacheck(8).\n", sb->s_id);
1128  	}
1129  #endif
1130  }
1131  
1132  /* Gather all references from inodes and drop them */
drop_dquot_ref(struct super_block * sb,int type)1133  static void drop_dquot_ref(struct super_block *sb, int type)
1134  {
1135  	if (sb->dq_op)
1136  		remove_dquot_ref(sb, type);
1137  }
1138  
1139  static inline
dquot_free_reserved_space(struct dquot * dquot,qsize_t number)1140  void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1141  {
1142  	if (dquot->dq_dqb.dqb_rsvspace >= number)
1143  		dquot->dq_dqb.dqb_rsvspace -= number;
1144  	else {
1145  		WARN_ON_ONCE(1);
1146  		dquot->dq_dqb.dqb_rsvspace = 0;
1147  	}
1148  	if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1149  	    dquot->dq_dqb.dqb_bsoftlimit)
1150  		dquot->dq_dqb.dqb_btime = (time64_t) 0;
1151  	clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1152  }
1153  
dquot_decr_inodes(struct dquot * dquot,qsize_t number)1154  static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1155  {
1156  	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1157  	    dquot->dq_dqb.dqb_curinodes >= number)
1158  		dquot->dq_dqb.dqb_curinodes -= number;
1159  	else
1160  		dquot->dq_dqb.dqb_curinodes = 0;
1161  	if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1162  		dquot->dq_dqb.dqb_itime = (time64_t) 0;
1163  	clear_bit(DQ_INODES_B, &dquot->dq_flags);
1164  }
1165  
dquot_decr_space(struct dquot * dquot,qsize_t number)1166  static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1167  {
1168  	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1169  	    dquot->dq_dqb.dqb_curspace >= number)
1170  		dquot->dq_dqb.dqb_curspace -= number;
1171  	else
1172  		dquot->dq_dqb.dqb_curspace = 0;
1173  	if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1174  	    dquot->dq_dqb.dqb_bsoftlimit)
1175  		dquot->dq_dqb.dqb_btime = (time64_t) 0;
1176  	clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1177  }
1178  
1179  struct dquot_warn {
1180  	struct super_block *w_sb;
1181  	struct kqid w_dq_id;
1182  	short w_type;
1183  };
1184  
warning_issued(struct dquot * dquot,const int warntype)1185  static int warning_issued(struct dquot *dquot, const int warntype)
1186  {
1187  	int flag = (warntype == QUOTA_NL_BHARDWARN ||
1188  		warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1189  		((warntype == QUOTA_NL_IHARDWARN ||
1190  		warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1191  
1192  	if (!flag)
1193  		return 0;
1194  	return test_and_set_bit(flag, &dquot->dq_flags);
1195  }
1196  
1197  #ifdef CONFIG_PRINT_QUOTA_WARNING
1198  static int flag_print_warnings = 1;
1199  
need_print_warning(struct dquot_warn * warn)1200  static int need_print_warning(struct dquot_warn *warn)
1201  {
1202  	if (!flag_print_warnings)
1203  		return 0;
1204  
1205  	switch (warn->w_dq_id.type) {
1206  		case USRQUOTA:
1207  			return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1208  		case GRPQUOTA:
1209  			return in_group_p(warn->w_dq_id.gid);
1210  		case PRJQUOTA:
1211  			return 1;
1212  	}
1213  	return 0;
1214  }
1215  
1216  /* Print warning to user which exceeded quota */
print_warning(struct dquot_warn * warn)1217  static void print_warning(struct dquot_warn *warn)
1218  {
1219  	char *msg = NULL;
1220  	struct tty_struct *tty;
1221  	int warntype = warn->w_type;
1222  
1223  	if (warntype == QUOTA_NL_IHARDBELOW ||
1224  	    warntype == QUOTA_NL_ISOFTBELOW ||
1225  	    warntype == QUOTA_NL_BHARDBELOW ||
1226  	    warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1227  		return;
1228  
1229  	tty = get_current_tty();
1230  	if (!tty)
1231  		return;
1232  	tty_write_message(tty, warn->w_sb->s_id);
1233  	if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1234  		tty_write_message(tty, ": warning, ");
1235  	else
1236  		tty_write_message(tty, ": write failed, ");
1237  	tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1238  	switch (warntype) {
1239  		case QUOTA_NL_IHARDWARN:
1240  			msg = " file limit reached.\r\n";
1241  			break;
1242  		case QUOTA_NL_ISOFTLONGWARN:
1243  			msg = " file quota exceeded too long.\r\n";
1244  			break;
1245  		case QUOTA_NL_ISOFTWARN:
1246  			msg = " file quota exceeded.\r\n";
1247  			break;
1248  		case QUOTA_NL_BHARDWARN:
1249  			msg = " block limit reached.\r\n";
1250  			break;
1251  		case QUOTA_NL_BSOFTLONGWARN:
1252  			msg = " block quota exceeded too long.\r\n";
1253  			break;
1254  		case QUOTA_NL_BSOFTWARN:
1255  			msg = " block quota exceeded.\r\n";
1256  			break;
1257  	}
1258  	tty_write_message(tty, msg);
1259  	tty_kref_put(tty);
1260  }
1261  #endif
1262  
prepare_warning(struct dquot_warn * warn,struct dquot * dquot,int warntype)1263  static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1264  			    int warntype)
1265  {
1266  	if (warning_issued(dquot, warntype))
1267  		return;
1268  	warn->w_type = warntype;
1269  	warn->w_sb = dquot->dq_sb;
1270  	warn->w_dq_id = dquot->dq_id;
1271  }
1272  
1273  /*
1274   * Write warnings to the console and send warning messages over netlink.
1275   *
1276   * Note that this function can call into tty and networking code.
1277   */
flush_warnings(struct dquot_warn * warn)1278  static void flush_warnings(struct dquot_warn *warn)
1279  {
1280  	int i;
1281  
1282  	for (i = 0; i < MAXQUOTAS; i++) {
1283  		if (warn[i].w_type == QUOTA_NL_NOWARN)
1284  			continue;
1285  #ifdef CONFIG_PRINT_QUOTA_WARNING
1286  		print_warning(&warn[i]);
1287  #endif
1288  		quota_send_warning(warn[i].w_dq_id,
1289  				   warn[i].w_sb->s_dev, warn[i].w_type);
1290  	}
1291  }
1292  
ignore_hardlimit(struct dquot * dquot)1293  static int ignore_hardlimit(struct dquot *dquot)
1294  {
1295  	struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1296  
1297  	return capable(CAP_SYS_RESOURCE) &&
1298  	       (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1299  		!(info->dqi_flags & DQF_ROOT_SQUASH));
1300  }
1301  
dquot_add_inodes(struct dquot * dquot,qsize_t inodes,struct dquot_warn * warn)1302  static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
1303  			    struct dquot_warn *warn)
1304  {
1305  	qsize_t newinodes;
1306  	int ret = 0;
1307  
1308  	spin_lock(&dquot->dq_dqb_lock);
1309  	newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1310  	if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1311  	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1312  		goto add;
1313  
1314  	if (dquot->dq_dqb.dqb_ihardlimit &&
1315  	    newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1316              !ignore_hardlimit(dquot)) {
1317  		prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1318  		ret = -EDQUOT;
1319  		goto out;
1320  	}
1321  
1322  	if (dquot->dq_dqb.dqb_isoftlimit &&
1323  	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1324  	    dquot->dq_dqb.dqb_itime &&
1325  	    ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
1326              !ignore_hardlimit(dquot)) {
1327  		prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1328  		ret = -EDQUOT;
1329  		goto out;
1330  	}
1331  
1332  	if (dquot->dq_dqb.dqb_isoftlimit &&
1333  	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1334  	    dquot->dq_dqb.dqb_itime == 0) {
1335  		prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1336  		dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
1337  		    sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1338  	}
1339  add:
1340  	dquot->dq_dqb.dqb_curinodes = newinodes;
1341  
1342  out:
1343  	spin_unlock(&dquot->dq_dqb_lock);
1344  	return ret;
1345  }
1346  
dquot_add_space(struct dquot * dquot,qsize_t space,qsize_t rsv_space,unsigned int flags,struct dquot_warn * warn)1347  static int dquot_add_space(struct dquot *dquot, qsize_t space,
1348  			   qsize_t rsv_space, unsigned int flags,
1349  			   struct dquot_warn *warn)
1350  {
1351  	qsize_t tspace;
1352  	struct super_block *sb = dquot->dq_sb;
1353  	int ret = 0;
1354  
1355  	spin_lock(&dquot->dq_dqb_lock);
1356  	if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1357  	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1358  		goto finish;
1359  
1360  	tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1361  		+ space + rsv_space;
1362  
1363  	if (dquot->dq_dqb.dqb_bhardlimit &&
1364  	    tspace > dquot->dq_dqb.dqb_bhardlimit &&
1365              !ignore_hardlimit(dquot)) {
1366  		if (flags & DQUOT_SPACE_WARN)
1367  			prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1368  		ret = -EDQUOT;
1369  		goto finish;
1370  	}
1371  
1372  	if (dquot->dq_dqb.dqb_bsoftlimit &&
1373  	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1374  	    dquot->dq_dqb.dqb_btime &&
1375  	    ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
1376              !ignore_hardlimit(dquot)) {
1377  		if (flags & DQUOT_SPACE_WARN)
1378  			prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1379  		ret = -EDQUOT;
1380  		goto finish;
1381  	}
1382  
1383  	if (dquot->dq_dqb.dqb_bsoftlimit &&
1384  	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1385  	    dquot->dq_dqb.dqb_btime == 0) {
1386  		if (flags & DQUOT_SPACE_WARN) {
1387  			prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1388  			dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
1389  			    sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1390  		} else {
1391  			/*
1392  			 * We don't allow preallocation to exceed softlimit so exceeding will
1393  			 * be always printed
1394  			 */
1395  			ret = -EDQUOT;
1396  			goto finish;
1397  		}
1398  	}
1399  finish:
1400  	/*
1401  	 * We have to be careful and go through warning generation & grace time
1402  	 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1403  	 * only here...
1404  	 */
1405  	if (flags & DQUOT_SPACE_NOFAIL)
1406  		ret = 0;
1407  	if (!ret) {
1408  		dquot->dq_dqb.dqb_rsvspace += rsv_space;
1409  		dquot->dq_dqb.dqb_curspace += space;
1410  	}
1411  	spin_unlock(&dquot->dq_dqb_lock);
1412  	return ret;
1413  }
1414  
info_idq_free(struct dquot * dquot,qsize_t inodes)1415  static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1416  {
1417  	qsize_t newinodes;
1418  
1419  	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1420  	    dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1421  	    !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1422  		return QUOTA_NL_NOWARN;
1423  
1424  	newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1425  	if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1426  		return QUOTA_NL_ISOFTBELOW;
1427  	if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1428  	    newinodes < dquot->dq_dqb.dqb_ihardlimit)
1429  		return QUOTA_NL_IHARDBELOW;
1430  	return QUOTA_NL_NOWARN;
1431  }
1432  
info_bdq_free(struct dquot * dquot,qsize_t space)1433  static int info_bdq_free(struct dquot *dquot, qsize_t space)
1434  {
1435  	qsize_t tspace;
1436  
1437  	tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
1438  
1439  	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1440  	    tspace <= dquot->dq_dqb.dqb_bsoftlimit)
1441  		return QUOTA_NL_NOWARN;
1442  
1443  	if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1444  		return QUOTA_NL_BSOFTBELOW;
1445  	if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
1446  	    tspace - space < dquot->dq_dqb.dqb_bhardlimit)
1447  		return QUOTA_NL_BHARDBELOW;
1448  	return QUOTA_NL_NOWARN;
1449  }
1450  
inode_quota_active(const struct inode * inode)1451  static int inode_quota_active(const struct inode *inode)
1452  {
1453  	struct super_block *sb = inode->i_sb;
1454  
1455  	if (IS_NOQUOTA(inode))
1456  		return 0;
1457  	return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1458  }
1459  
1460  /*
1461   * Initialize quota pointers in inode
1462   *
1463   * It is better to call this function outside of any transaction as it
1464   * might need a lot of space in journal for dquot structure allocation.
1465   */
__dquot_initialize(struct inode * inode,int type)1466  static int __dquot_initialize(struct inode *inode, int type)
1467  {
1468  	int cnt, init_needed = 0;
1469  	struct dquot __rcu **dquots;
1470  	struct dquot *got[MAXQUOTAS] = {};
1471  	struct super_block *sb = inode->i_sb;
1472  	qsize_t rsv;
1473  	int ret = 0;
1474  
1475  	if (!inode_quota_active(inode))
1476  		return 0;
1477  
1478  	dquots = i_dquot(inode);
1479  
1480  	/* First get references to structures we might need. */
1481  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1482  		struct kqid qid;
1483  		kprojid_t projid;
1484  		int rc;
1485  		struct dquot *dquot;
1486  
1487  		if (type != -1 && cnt != type)
1488  			continue;
1489  		/*
1490  		 * The i_dquot should have been initialized in most cases,
1491  		 * we check it without locking here to avoid unnecessary
1492  		 * dqget()/dqput() calls.
1493  		 */
1494  		if (dquots[cnt])
1495  			continue;
1496  
1497  		if (!sb_has_quota_active(sb, cnt))
1498  			continue;
1499  
1500  		init_needed = 1;
1501  
1502  		switch (cnt) {
1503  		case USRQUOTA:
1504  			qid = make_kqid_uid(inode->i_uid);
1505  			break;
1506  		case GRPQUOTA:
1507  			qid = make_kqid_gid(inode->i_gid);
1508  			break;
1509  		case PRJQUOTA:
1510  			rc = inode->i_sb->dq_op->get_projid(inode, &projid);
1511  			if (rc)
1512  				continue;
1513  			qid = make_kqid_projid(projid);
1514  			break;
1515  		}
1516  		dquot = dqget(sb, qid);
1517  		if (IS_ERR(dquot)) {
1518  			/* We raced with somebody turning quotas off... */
1519  			if (PTR_ERR(dquot) != -ESRCH) {
1520  				ret = PTR_ERR(dquot);
1521  				goto out_put;
1522  			}
1523  			dquot = NULL;
1524  		}
1525  		got[cnt] = dquot;
1526  	}
1527  
1528  	/* All required i_dquot has been initialized */
1529  	if (!init_needed)
1530  		return 0;
1531  
1532  	spin_lock(&dq_data_lock);
1533  	if (IS_NOQUOTA(inode))
1534  		goto out_lock;
1535  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1536  		if (type != -1 && cnt != type)
1537  			continue;
1538  		/* Avoid races with quotaoff() */
1539  		if (!sb_has_quota_active(sb, cnt))
1540  			continue;
1541  		/* We could race with quotaon or dqget() could have failed */
1542  		if (!got[cnt])
1543  			continue;
1544  		if (!dquots[cnt]) {
1545  			rcu_assign_pointer(dquots[cnt], got[cnt]);
1546  			got[cnt] = NULL;
1547  			/*
1548  			 * Make quota reservation system happy if someone
1549  			 * did a write before quota was turned on
1550  			 */
1551  			rsv = inode_get_rsv_space(inode);
1552  			if (unlikely(rsv)) {
1553  				struct dquot *dquot = srcu_dereference_check(
1554  					dquots[cnt], &dquot_srcu,
1555  					lockdep_is_held(&dq_data_lock));
1556  
1557  				spin_lock(&inode->i_lock);
1558  				/* Get reservation again under proper lock */
1559  				rsv = __inode_get_rsv_space(inode);
1560  				spin_lock(&dquot->dq_dqb_lock);
1561  				dquot->dq_dqb.dqb_rsvspace += rsv;
1562  				spin_unlock(&dquot->dq_dqb_lock);
1563  				spin_unlock(&inode->i_lock);
1564  			}
1565  		}
1566  	}
1567  out_lock:
1568  	spin_unlock(&dq_data_lock);
1569  out_put:
1570  	/* Drop unused references */
1571  	dqput_all(got);
1572  
1573  	return ret;
1574  }
1575  
dquot_initialize(struct inode * inode)1576  int dquot_initialize(struct inode *inode)
1577  {
1578  	return __dquot_initialize(inode, -1);
1579  }
1580  EXPORT_SYMBOL(dquot_initialize);
1581  
dquot_initialize_needed(struct inode * inode)1582  bool dquot_initialize_needed(struct inode *inode)
1583  {
1584  	struct dquot __rcu **dquots;
1585  	int i;
1586  
1587  	if (!inode_quota_active(inode))
1588  		return false;
1589  
1590  	dquots = i_dquot(inode);
1591  	for (i = 0; i < MAXQUOTAS; i++)
1592  		if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1593  			return true;
1594  	return false;
1595  }
1596  EXPORT_SYMBOL(dquot_initialize_needed);
1597  
1598  /*
1599   * Release all quotas referenced by inode.
1600   *
1601   * This function only be called on inode free or converting
1602   * a file to quota file, no other users for the i_dquot in
1603   * both cases, so we needn't call synchronize_srcu() after
1604   * clearing i_dquot.
1605   */
__dquot_drop(struct inode * inode)1606  static void __dquot_drop(struct inode *inode)
1607  {
1608  	int cnt;
1609  	struct dquot __rcu **dquots = i_dquot(inode);
1610  	struct dquot *put[MAXQUOTAS];
1611  
1612  	spin_lock(&dq_data_lock);
1613  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1614  		put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
1615  					lockdep_is_held(&dq_data_lock));
1616  		rcu_assign_pointer(dquots[cnt], NULL);
1617  	}
1618  	spin_unlock(&dq_data_lock);
1619  	dqput_all(put);
1620  }
1621  
dquot_drop(struct inode * inode)1622  void dquot_drop(struct inode *inode)
1623  {
1624  	struct dquot __rcu * const *dquots;
1625  	int cnt;
1626  
1627  	if (IS_NOQUOTA(inode))
1628  		return;
1629  
1630  	/*
1631  	 * Test before calling to rule out calls from proc and such
1632  	 * where we are not allowed to block. Note that this is
1633  	 * actually reliable test even without the lock - the caller
1634  	 * must assure that nobody can come after the DQUOT_DROP and
1635  	 * add quota pointers back anyway.
1636  	 */
1637  	dquots = i_dquot(inode);
1638  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1639  		if (dquots[cnt])
1640  			break;
1641  	}
1642  
1643  	if (cnt < MAXQUOTAS)
1644  		__dquot_drop(inode);
1645  }
1646  EXPORT_SYMBOL(dquot_drop);
1647  
1648  /*
1649   * inode_reserved_space is managed internally by quota, and protected by
1650   * i_lock similar to i_blocks+i_bytes.
1651   */
inode_reserved_space(struct inode * inode)1652  static qsize_t *inode_reserved_space(struct inode * inode)
1653  {
1654  	/* Filesystem must explicitly define it's own method in order to use
1655  	 * quota reservation interface */
1656  	BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1657  	return inode->i_sb->dq_op->get_reserved_space(inode);
1658  }
1659  
__inode_get_rsv_space(struct inode * inode)1660  static qsize_t __inode_get_rsv_space(struct inode *inode)
1661  {
1662  	if (!inode->i_sb->dq_op->get_reserved_space)
1663  		return 0;
1664  	return *inode_reserved_space(inode);
1665  }
1666  
inode_get_rsv_space(struct inode * inode)1667  static qsize_t inode_get_rsv_space(struct inode *inode)
1668  {
1669  	qsize_t ret;
1670  
1671  	if (!inode->i_sb->dq_op->get_reserved_space)
1672  		return 0;
1673  	spin_lock(&inode->i_lock);
1674  	ret = __inode_get_rsv_space(inode);
1675  	spin_unlock(&inode->i_lock);
1676  	return ret;
1677  }
1678  
1679  /*
1680   * This functions updates i_blocks+i_bytes fields and quota information
1681   * (together with appropriate checks).
1682   *
1683   * NOTE: We absolutely rely on the fact that caller dirties the inode
1684   * (usually helpers in quotaops.h care about this) and holds a handle for
1685   * the current transaction so that dquot write and inode write go into the
1686   * same transaction.
1687   */
1688  
1689  /*
1690   * This operation can block, but only after everything is updated
1691   */
__dquot_alloc_space(struct inode * inode,qsize_t number,int flags)1692  int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1693  {
1694  	int cnt, ret = 0, index;
1695  	struct dquot_warn warn[MAXQUOTAS];
1696  	int reserve = flags & DQUOT_SPACE_RESERVE;
1697  	struct dquot __rcu **dquots;
1698  	struct dquot *dquot;
1699  
1700  	if (!inode_quota_active(inode)) {
1701  		if (reserve) {
1702  			spin_lock(&inode->i_lock);
1703  			*inode_reserved_space(inode) += number;
1704  			spin_unlock(&inode->i_lock);
1705  		} else {
1706  			inode_add_bytes(inode, number);
1707  		}
1708  		goto out;
1709  	}
1710  
1711  	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1712  		warn[cnt].w_type = QUOTA_NL_NOWARN;
1713  
1714  	dquots = i_dquot(inode);
1715  	index = srcu_read_lock(&dquot_srcu);
1716  	spin_lock(&inode->i_lock);
1717  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1718  		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1719  		if (!dquot)
1720  			continue;
1721  		if (reserve) {
1722  			ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
1723  		} else {
1724  			ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
1725  		}
1726  		if (ret) {
1727  			/* Back out changes we already did */
1728  			for (cnt--; cnt >= 0; cnt--) {
1729  				dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1730  				if (!dquot)
1731  					continue;
1732  				spin_lock(&dquot->dq_dqb_lock);
1733  				if (reserve)
1734  					dquot_free_reserved_space(dquot, number);
1735  				else
1736  					dquot_decr_space(dquot, number);
1737  				spin_unlock(&dquot->dq_dqb_lock);
1738  			}
1739  			spin_unlock(&inode->i_lock);
1740  			goto out_flush_warn;
1741  		}
1742  	}
1743  	if (reserve)
1744  		*inode_reserved_space(inode) += number;
1745  	else
1746  		__inode_add_bytes(inode, number);
1747  	spin_unlock(&inode->i_lock);
1748  
1749  	if (reserve)
1750  		goto out_flush_warn;
1751  	mark_all_dquot_dirty(dquots);
1752  out_flush_warn:
1753  	srcu_read_unlock(&dquot_srcu, index);
1754  	flush_warnings(warn);
1755  out:
1756  	return ret;
1757  }
1758  EXPORT_SYMBOL(__dquot_alloc_space);
1759  
1760  /*
1761   * This operation can block, but only after everything is updated
1762   */
dquot_alloc_inode(struct inode * inode)1763  int dquot_alloc_inode(struct inode *inode)
1764  {
1765  	int cnt, ret = 0, index;
1766  	struct dquot_warn warn[MAXQUOTAS];
1767  	struct dquot __rcu * const *dquots;
1768  	struct dquot *dquot;
1769  
1770  	if (!inode_quota_active(inode))
1771  		return 0;
1772  	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1773  		warn[cnt].w_type = QUOTA_NL_NOWARN;
1774  
1775  	dquots = i_dquot(inode);
1776  	index = srcu_read_lock(&dquot_srcu);
1777  	spin_lock(&inode->i_lock);
1778  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1779  		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1780  		if (!dquot)
1781  			continue;
1782  		ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
1783  		if (ret) {
1784  			for (cnt--; cnt >= 0; cnt--) {
1785  				dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1786  				if (!dquot)
1787  					continue;
1788  				/* Back out changes we already did */
1789  				spin_lock(&dquot->dq_dqb_lock);
1790  				dquot_decr_inodes(dquot, 1);
1791  				spin_unlock(&dquot->dq_dqb_lock);
1792  			}
1793  			goto warn_put_all;
1794  		}
1795  	}
1796  
1797  warn_put_all:
1798  	spin_unlock(&inode->i_lock);
1799  	if (ret == 0)
1800  		mark_all_dquot_dirty(dquots);
1801  	srcu_read_unlock(&dquot_srcu, index);
1802  	flush_warnings(warn);
1803  	return ret;
1804  }
1805  EXPORT_SYMBOL(dquot_alloc_inode);
1806  
1807  /*
1808   * Convert in-memory reserved quotas to real consumed quotas
1809   */
dquot_claim_space_nodirty(struct inode * inode,qsize_t number)1810  int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1811  {
1812  	struct dquot __rcu **dquots;
1813  	struct dquot *dquot;
1814  	int cnt, index;
1815  
1816  	if (!inode_quota_active(inode)) {
1817  		spin_lock(&inode->i_lock);
1818  		*inode_reserved_space(inode) -= number;
1819  		__inode_add_bytes(inode, number);
1820  		spin_unlock(&inode->i_lock);
1821  		return 0;
1822  	}
1823  
1824  	dquots = i_dquot(inode);
1825  	index = srcu_read_lock(&dquot_srcu);
1826  	spin_lock(&inode->i_lock);
1827  	/* Claim reserved quotas to allocated quotas */
1828  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1829  		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1830  		if (dquot) {
1831  			spin_lock(&dquot->dq_dqb_lock);
1832  			if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
1833  				number = dquot->dq_dqb.dqb_rsvspace;
1834  			dquot->dq_dqb.dqb_curspace += number;
1835  			dquot->dq_dqb.dqb_rsvspace -= number;
1836  			spin_unlock(&dquot->dq_dqb_lock);
1837  		}
1838  	}
1839  	/* Update inode bytes */
1840  	*inode_reserved_space(inode) -= number;
1841  	__inode_add_bytes(inode, number);
1842  	spin_unlock(&inode->i_lock);
1843  	mark_all_dquot_dirty(dquots);
1844  	srcu_read_unlock(&dquot_srcu, index);
1845  	return 0;
1846  }
1847  EXPORT_SYMBOL(dquot_claim_space_nodirty);
1848  
1849  /*
1850   * Convert allocated space back to in-memory reserved quotas
1851   */
dquot_reclaim_space_nodirty(struct inode * inode,qsize_t number)1852  void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1853  {
1854  	struct dquot __rcu **dquots;
1855  	struct dquot *dquot;
1856  	int cnt, index;
1857  
1858  	if (!inode_quota_active(inode)) {
1859  		spin_lock(&inode->i_lock);
1860  		*inode_reserved_space(inode) += number;
1861  		__inode_sub_bytes(inode, number);
1862  		spin_unlock(&inode->i_lock);
1863  		return;
1864  	}
1865  
1866  	dquots = i_dquot(inode);
1867  	index = srcu_read_lock(&dquot_srcu);
1868  	spin_lock(&inode->i_lock);
1869  	/* Claim reserved quotas to allocated quotas */
1870  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1871  		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1872  		if (dquot) {
1873  			spin_lock(&dquot->dq_dqb_lock);
1874  			if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1875  				number = dquot->dq_dqb.dqb_curspace;
1876  			dquot->dq_dqb.dqb_rsvspace += number;
1877  			dquot->dq_dqb.dqb_curspace -= number;
1878  			spin_unlock(&dquot->dq_dqb_lock);
1879  		}
1880  	}
1881  	/* Update inode bytes */
1882  	*inode_reserved_space(inode) += number;
1883  	__inode_sub_bytes(inode, number);
1884  	spin_unlock(&inode->i_lock);
1885  	mark_all_dquot_dirty(dquots);
1886  	srcu_read_unlock(&dquot_srcu, index);
1887  	return;
1888  }
1889  EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1890  
1891  /*
1892   * This operation can block, but only after everything is updated
1893   */
__dquot_free_space(struct inode * inode,qsize_t number,int flags)1894  void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1895  {
1896  	unsigned int cnt;
1897  	struct dquot_warn warn[MAXQUOTAS];
1898  	struct dquot __rcu **dquots;
1899  	struct dquot *dquot;
1900  	int reserve = flags & DQUOT_SPACE_RESERVE, index;
1901  
1902  	if (!inode_quota_active(inode)) {
1903  		if (reserve) {
1904  			spin_lock(&inode->i_lock);
1905  			*inode_reserved_space(inode) -= number;
1906  			spin_unlock(&inode->i_lock);
1907  		} else {
1908  			inode_sub_bytes(inode, number);
1909  		}
1910  		return;
1911  	}
1912  
1913  	dquots = i_dquot(inode);
1914  	index = srcu_read_lock(&dquot_srcu);
1915  	spin_lock(&inode->i_lock);
1916  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1917  		int wtype;
1918  
1919  		warn[cnt].w_type = QUOTA_NL_NOWARN;
1920  		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1921  		if (!dquot)
1922  			continue;
1923  		spin_lock(&dquot->dq_dqb_lock);
1924  		wtype = info_bdq_free(dquot, number);
1925  		if (wtype != QUOTA_NL_NOWARN)
1926  			prepare_warning(&warn[cnt], dquot, wtype);
1927  		if (reserve)
1928  			dquot_free_reserved_space(dquot, number);
1929  		else
1930  			dquot_decr_space(dquot, number);
1931  		spin_unlock(&dquot->dq_dqb_lock);
1932  	}
1933  	if (reserve)
1934  		*inode_reserved_space(inode) -= number;
1935  	else
1936  		__inode_sub_bytes(inode, number);
1937  	spin_unlock(&inode->i_lock);
1938  
1939  	if (reserve)
1940  		goto out_unlock;
1941  	mark_all_dquot_dirty(dquots);
1942  out_unlock:
1943  	srcu_read_unlock(&dquot_srcu, index);
1944  	flush_warnings(warn);
1945  }
1946  EXPORT_SYMBOL(__dquot_free_space);
1947  
1948  /*
1949   * This operation can block, but only after everything is updated
1950   */
dquot_free_inode(struct inode * inode)1951  void dquot_free_inode(struct inode *inode)
1952  {
1953  	unsigned int cnt;
1954  	struct dquot_warn warn[MAXQUOTAS];
1955  	struct dquot __rcu * const *dquots;
1956  	struct dquot *dquot;
1957  	int index;
1958  
1959  	if (!inode_quota_active(inode))
1960  		return;
1961  
1962  	dquots = i_dquot(inode);
1963  	index = srcu_read_lock(&dquot_srcu);
1964  	spin_lock(&inode->i_lock);
1965  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1966  		int wtype;
1967  		warn[cnt].w_type = QUOTA_NL_NOWARN;
1968  		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1969  		if (!dquot)
1970  			continue;
1971  		spin_lock(&dquot->dq_dqb_lock);
1972  		wtype = info_idq_free(dquot, 1);
1973  		if (wtype != QUOTA_NL_NOWARN)
1974  			prepare_warning(&warn[cnt], dquot, wtype);
1975  		dquot_decr_inodes(dquot, 1);
1976  		spin_unlock(&dquot->dq_dqb_lock);
1977  	}
1978  	spin_unlock(&inode->i_lock);
1979  	mark_all_dquot_dirty(dquots);
1980  	srcu_read_unlock(&dquot_srcu, index);
1981  	flush_warnings(warn);
1982  }
1983  EXPORT_SYMBOL(dquot_free_inode);
1984  
1985  /*
1986   * Transfer the number of inode and blocks from one diskquota to an other.
1987   * On success, dquot references in transfer_to are consumed and references
1988   * to original dquots that need to be released are placed there. On failure,
1989   * references are kept untouched.
1990   *
1991   * This operation can block, but only after everything is updated
1992   * A transaction must be started when entering this function.
1993   *
1994   * We are holding reference on transfer_from & transfer_to, no need to
1995   * protect them by srcu_read_lock().
1996   */
__dquot_transfer(struct inode * inode,struct dquot ** transfer_to)1997  int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1998  {
1999  	qsize_t cur_space;
2000  	qsize_t rsv_space = 0;
2001  	qsize_t inode_usage = 1;
2002  	struct dquot __rcu **dquots;
2003  	struct dquot *transfer_from[MAXQUOTAS] = {};
2004  	int cnt, index, ret = 0;
2005  	char is_valid[MAXQUOTAS] = {};
2006  	struct dquot_warn warn_to[MAXQUOTAS];
2007  	struct dquot_warn warn_from_inodes[MAXQUOTAS];
2008  	struct dquot_warn warn_from_space[MAXQUOTAS];
2009  
2010  	if (IS_NOQUOTA(inode))
2011  		return 0;
2012  
2013  	if (inode->i_sb->dq_op->get_inode_usage) {
2014  		ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
2015  		if (ret)
2016  			return ret;
2017  	}
2018  
2019  	/* Initialize the arrays */
2020  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2021  		warn_to[cnt].w_type = QUOTA_NL_NOWARN;
2022  		warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
2023  		warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
2024  	}
2025  
2026  	spin_lock(&dq_data_lock);
2027  	spin_lock(&inode->i_lock);
2028  	if (IS_NOQUOTA(inode)) {	/* File without quota accounting? */
2029  		spin_unlock(&inode->i_lock);
2030  		spin_unlock(&dq_data_lock);
2031  		return 0;
2032  	}
2033  	cur_space = __inode_get_bytes(inode);
2034  	rsv_space = __inode_get_rsv_space(inode);
2035  	dquots = i_dquot(inode);
2036  	/*
2037  	 * Build the transfer_from list, check limits, and update usage in
2038  	 * the target structures.
2039  	 */
2040  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2041  		/*
2042  		 * Skip changes for same uid or gid or for turned off quota-type.
2043  		 */
2044  		if (!transfer_to[cnt])
2045  			continue;
2046  		/* Avoid races with quotaoff() */
2047  		if (!sb_has_quota_active(inode->i_sb, cnt))
2048  			continue;
2049  		is_valid[cnt] = 1;
2050  		transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
2051  				&dquot_srcu, lockdep_is_held(&dq_data_lock));
2052  		ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
2053  				       &warn_to[cnt]);
2054  		if (ret)
2055  			goto over_quota;
2056  		ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
2057  				      DQUOT_SPACE_WARN, &warn_to[cnt]);
2058  		if (ret) {
2059  			spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2060  			dquot_decr_inodes(transfer_to[cnt], inode_usage);
2061  			spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2062  			goto over_quota;
2063  		}
2064  	}
2065  
2066  	/* Decrease usage for source structures and update quota pointers */
2067  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2068  		if (!is_valid[cnt])
2069  			continue;
2070  		/* Due to IO error we might not have transfer_from[] structure */
2071  		if (transfer_from[cnt]) {
2072  			int wtype;
2073  
2074  			spin_lock(&transfer_from[cnt]->dq_dqb_lock);
2075  			wtype = info_idq_free(transfer_from[cnt], inode_usage);
2076  			if (wtype != QUOTA_NL_NOWARN)
2077  				prepare_warning(&warn_from_inodes[cnt],
2078  						transfer_from[cnt], wtype);
2079  			wtype = info_bdq_free(transfer_from[cnt],
2080  					      cur_space + rsv_space);
2081  			if (wtype != QUOTA_NL_NOWARN)
2082  				prepare_warning(&warn_from_space[cnt],
2083  						transfer_from[cnt], wtype);
2084  			dquot_decr_inodes(transfer_from[cnt], inode_usage);
2085  			dquot_decr_space(transfer_from[cnt], cur_space);
2086  			dquot_free_reserved_space(transfer_from[cnt],
2087  						  rsv_space);
2088  			spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
2089  		}
2090  		rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
2091  	}
2092  	spin_unlock(&inode->i_lock);
2093  	spin_unlock(&dq_data_lock);
2094  
2095  	/*
2096  	 * These arrays are local and we hold dquot references so we don't need
2097  	 * the srcu protection but still take dquot_srcu to avoid warning in
2098  	 * mark_all_dquot_dirty().
2099  	 */
2100  	index = srcu_read_lock(&dquot_srcu);
2101  	mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
2102  	mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
2103  	srcu_read_unlock(&dquot_srcu, index);
2104  
2105  	flush_warnings(warn_to);
2106  	flush_warnings(warn_from_inodes);
2107  	flush_warnings(warn_from_space);
2108  	/* Pass back references to put */
2109  	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2110  		if (is_valid[cnt])
2111  			transfer_to[cnt] = transfer_from[cnt];
2112  	return 0;
2113  over_quota:
2114  	/* Back out changes we already did */
2115  	for (cnt--; cnt >= 0; cnt--) {
2116  		if (!is_valid[cnt])
2117  			continue;
2118  		spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2119  		dquot_decr_inodes(transfer_to[cnt], inode_usage);
2120  		dquot_decr_space(transfer_to[cnt], cur_space);
2121  		dquot_free_reserved_space(transfer_to[cnt], rsv_space);
2122  		spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2123  	}
2124  	spin_unlock(&inode->i_lock);
2125  	spin_unlock(&dq_data_lock);
2126  	flush_warnings(warn_to);
2127  	return ret;
2128  }
2129  EXPORT_SYMBOL(__dquot_transfer);
2130  
2131  /* Wrapper for transferring ownership of an inode for uid/gid only
2132   * Called from FSXXX_setattr()
2133   */
dquot_transfer(struct mnt_idmap * idmap,struct inode * inode,struct iattr * iattr)2134  int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode,
2135  		   struct iattr *iattr)
2136  {
2137  	struct dquot *transfer_to[MAXQUOTAS] = {};
2138  	struct dquot *dquot;
2139  	struct super_block *sb = inode->i_sb;
2140  	int ret;
2141  
2142  	if (!inode_quota_active(inode))
2143  		return 0;
2144  
2145  	if (i_uid_needs_update(idmap, iattr, inode)) {
2146  		kuid_t kuid = from_vfsuid(idmap, i_user_ns(inode),
2147  					  iattr->ia_vfsuid);
2148  
2149  		dquot = dqget(sb, make_kqid_uid(kuid));
2150  		if (IS_ERR(dquot)) {
2151  			if (PTR_ERR(dquot) != -ESRCH) {
2152  				ret = PTR_ERR(dquot);
2153  				goto out_put;
2154  			}
2155  			dquot = NULL;
2156  		}
2157  		transfer_to[USRQUOTA] = dquot;
2158  	}
2159  	if (i_gid_needs_update(idmap, iattr, inode)) {
2160  		kgid_t kgid = from_vfsgid(idmap, i_user_ns(inode),
2161  					  iattr->ia_vfsgid);
2162  
2163  		dquot = dqget(sb, make_kqid_gid(kgid));
2164  		if (IS_ERR(dquot)) {
2165  			if (PTR_ERR(dquot) != -ESRCH) {
2166  				ret = PTR_ERR(dquot);
2167  				goto out_put;
2168  			}
2169  			dquot = NULL;
2170  		}
2171  		transfer_to[GRPQUOTA] = dquot;
2172  	}
2173  	ret = __dquot_transfer(inode, transfer_to);
2174  out_put:
2175  	dqput_all(transfer_to);
2176  	return ret;
2177  }
2178  EXPORT_SYMBOL(dquot_transfer);
2179  
2180  /*
2181   * Write info of quota file to disk
2182   */
dquot_commit_info(struct super_block * sb,int type)2183  int dquot_commit_info(struct super_block *sb, int type)
2184  {
2185  	struct quota_info *dqopt = sb_dqopt(sb);
2186  
2187  	return dqopt->ops[type]->write_file_info(sb, type);
2188  }
2189  EXPORT_SYMBOL(dquot_commit_info);
2190  
dquot_get_next_id(struct super_block * sb,struct kqid * qid)2191  int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2192  {
2193  	struct quota_info *dqopt = sb_dqopt(sb);
2194  
2195  	if (!sb_has_quota_active(sb, qid->type))
2196  		return -ESRCH;
2197  	if (!dqopt->ops[qid->type]->get_next_id)
2198  		return -ENOSYS;
2199  	return dqopt->ops[qid->type]->get_next_id(sb, qid);
2200  }
2201  EXPORT_SYMBOL(dquot_get_next_id);
2202  
2203  /*
2204   * Definitions of diskquota operations.
2205   */
2206  const struct dquot_operations dquot_operations = {
2207  	.write_dquot	= dquot_commit,
2208  	.acquire_dquot	= dquot_acquire,
2209  	.release_dquot	= dquot_release,
2210  	.mark_dirty	= dquot_mark_dquot_dirty,
2211  	.write_info	= dquot_commit_info,
2212  	.alloc_dquot	= dquot_alloc,
2213  	.destroy_dquot	= dquot_destroy,
2214  	.get_next_id	= dquot_get_next_id,
2215  };
2216  EXPORT_SYMBOL(dquot_operations);
2217  
2218  /*
2219   * Generic helper for ->open on filesystems supporting disk quotas.
2220   */
dquot_file_open(struct inode * inode,struct file * file)2221  int dquot_file_open(struct inode *inode, struct file *file)
2222  {
2223  	int error;
2224  
2225  	error = generic_file_open(inode, file);
2226  	if (!error && (file->f_mode & FMODE_WRITE))
2227  		error = dquot_initialize(inode);
2228  	return error;
2229  }
2230  EXPORT_SYMBOL(dquot_file_open);
2231  
vfs_cleanup_quota_inode(struct super_block * sb,int type)2232  static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
2233  {
2234  	struct quota_info *dqopt = sb_dqopt(sb);
2235  	struct inode *inode = dqopt->files[type];
2236  
2237  	if (!inode)
2238  		return;
2239  	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2240  		inode_lock(inode);
2241  		inode->i_flags &= ~S_NOQUOTA;
2242  		inode_unlock(inode);
2243  	}
2244  	dqopt->files[type] = NULL;
2245  	iput(inode);
2246  }
2247  
2248  /*
2249   * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2250   */
dquot_disable(struct super_block * sb,int type,unsigned int flags)2251  int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2252  {
2253  	int cnt;
2254  	struct quota_info *dqopt = sb_dqopt(sb);
2255  
2256  	/* s_umount should be held in exclusive mode */
2257  	if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2258  		up_read(&sb->s_umount);
2259  
2260  	/* Cannot turn off usage accounting without turning off limits, or
2261  	 * suspend quotas and simultaneously turn quotas off. */
2262  	if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2263  	    || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2264  	    DQUOT_USAGE_ENABLED)))
2265  		return -EINVAL;
2266  
2267  	/*
2268  	 * Skip everything if there's nothing to do. We have to do this because
2269  	 * sometimes we are called when fill_super() failed and calling
2270  	 * sync_fs() in such cases does no good.
2271  	 */
2272  	if (!sb_any_quota_loaded(sb))
2273  		return 0;
2274  
2275  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2276  		if (type != -1 && cnt != type)
2277  			continue;
2278  		if (!sb_has_quota_loaded(sb, cnt))
2279  			continue;
2280  
2281  		if (flags & DQUOT_SUSPENDED) {
2282  			spin_lock(&dq_state_lock);
2283  			dqopt->flags |=
2284  				dquot_state_flag(DQUOT_SUSPENDED, cnt);
2285  			spin_unlock(&dq_state_lock);
2286  		} else {
2287  			spin_lock(&dq_state_lock);
2288  			dqopt->flags &= ~dquot_state_flag(flags, cnt);
2289  			/* Turning off suspended quotas? */
2290  			if (!sb_has_quota_loaded(sb, cnt) &&
2291  			    sb_has_quota_suspended(sb, cnt)) {
2292  				dqopt->flags &=	~dquot_state_flag(
2293  							DQUOT_SUSPENDED, cnt);
2294  				spin_unlock(&dq_state_lock);
2295  				vfs_cleanup_quota_inode(sb, cnt);
2296  				continue;
2297  			}
2298  			spin_unlock(&dq_state_lock);
2299  		}
2300  
2301  		/* We still have to keep quota loaded? */
2302  		if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2303  			continue;
2304  
2305  		/* Note: these are blocking operations */
2306  		drop_dquot_ref(sb, cnt);
2307  		invalidate_dquots(sb, cnt);
2308  		/*
2309  		 * Now all dquots should be invalidated, all writes done so we
2310  		 * should be only users of the info. No locks needed.
2311  		 */
2312  		if (info_dirty(&dqopt->info[cnt]))
2313  			sb->dq_op->write_info(sb, cnt);
2314  		if (dqopt->ops[cnt]->free_file_info)
2315  			dqopt->ops[cnt]->free_file_info(sb, cnt);
2316  		put_quota_format(dqopt->info[cnt].dqi_format);
2317  		dqopt->info[cnt].dqi_flags = 0;
2318  		dqopt->info[cnt].dqi_igrace = 0;
2319  		dqopt->info[cnt].dqi_bgrace = 0;
2320  		dqopt->ops[cnt] = NULL;
2321  	}
2322  
2323  	/* Skip syncing and setting flags if quota files are hidden */
2324  	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2325  		goto put_inodes;
2326  
2327  	/* Sync the superblock so that buffers with quota data are written to
2328  	 * disk (and so userspace sees correct data afterwards). */
2329  	if (sb->s_op->sync_fs)
2330  		sb->s_op->sync_fs(sb, 1);
2331  	sync_blockdev(sb->s_bdev);
2332  	/* Now the quota files are just ordinary files and we can set the
2333  	 * inode flags back. Moreover we discard the pagecache so that
2334  	 * userspace sees the writes we did bypassing the pagecache. We
2335  	 * must also discard the blockdev buffers so that we see the
2336  	 * changes done by userspace on the next quotaon() */
2337  	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2338  		if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
2339  			inode_lock(dqopt->files[cnt]);
2340  			truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
2341  			inode_unlock(dqopt->files[cnt]);
2342  		}
2343  	if (sb->s_bdev)
2344  		invalidate_bdev(sb->s_bdev);
2345  put_inodes:
2346  	/* We are done when suspending quotas */
2347  	if (flags & DQUOT_SUSPENDED)
2348  		return 0;
2349  
2350  	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2351  		if (!sb_has_quota_loaded(sb, cnt))
2352  			vfs_cleanup_quota_inode(sb, cnt);
2353  	return 0;
2354  }
2355  EXPORT_SYMBOL(dquot_disable);
2356  
dquot_quota_off(struct super_block * sb,int type)2357  int dquot_quota_off(struct super_block *sb, int type)
2358  {
2359  	return dquot_disable(sb, type,
2360  			     DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2361  }
2362  EXPORT_SYMBOL(dquot_quota_off);
2363  
2364  /*
2365   *	Turn quotas on on a device
2366   */
2367  
vfs_setup_quota_inode(struct inode * inode,int type)2368  static int vfs_setup_quota_inode(struct inode *inode, int type)
2369  {
2370  	struct super_block *sb = inode->i_sb;
2371  	struct quota_info *dqopt = sb_dqopt(sb);
2372  
2373  	if (is_bad_inode(inode))
2374  		return -EUCLEAN;
2375  	if (!S_ISREG(inode->i_mode))
2376  		return -EACCES;
2377  	if (IS_RDONLY(inode))
2378  		return -EROFS;
2379  	if (sb_has_quota_loaded(sb, type))
2380  		return -EBUSY;
2381  
2382  	/*
2383  	 * Quota files should never be encrypted.  They should be thought of as
2384  	 * filesystem metadata, not user data.  New-style internal quota files
2385  	 * cannot be encrypted by users anyway, but old-style external quota
2386  	 * files could potentially be incorrectly created in an encrypted
2387  	 * directory, hence this explicit check.  Some reasons why encrypted
2388  	 * quota files don't work include: (1) some filesystems that support
2389  	 * encryption don't handle it in their quota_read and quota_write, and
2390  	 * (2) cleaning up encrypted quota files at unmount would need special
2391  	 * consideration, as quota files are cleaned up later than user files.
2392  	 */
2393  	if (IS_ENCRYPTED(inode))
2394  		return -EINVAL;
2395  
2396  	dqopt->files[type] = igrab(inode);
2397  	if (!dqopt->files[type])
2398  		return -EIO;
2399  	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2400  		/* We don't want quota and atime on quota files (deadlocks
2401  		 * possible) Also nobody should write to the file - we use
2402  		 * special IO operations which ignore the immutable bit. */
2403  		inode_lock(inode);
2404  		inode->i_flags |= S_NOQUOTA;
2405  		inode_unlock(inode);
2406  		/*
2407  		 * When S_NOQUOTA is set, remove dquot references as no more
2408  		 * references can be added
2409  		 */
2410  		__dquot_drop(inode);
2411  	}
2412  	return 0;
2413  }
2414  
dquot_load_quota_sb(struct super_block * sb,int type,int format_id,unsigned int flags)2415  int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
2416  	unsigned int flags)
2417  {
2418  	struct quota_format_type *fmt = find_quota_format(format_id);
2419  	struct quota_info *dqopt = sb_dqopt(sb);
2420  	int error;
2421  
2422  	lockdep_assert_held_write(&sb->s_umount);
2423  
2424  	/* Just unsuspend quotas? */
2425  	BUG_ON(flags & DQUOT_SUSPENDED);
2426  
2427  	if (!fmt)
2428  		return -ESRCH;
2429  	if (!sb->dq_op || !sb->s_qcop ||
2430  	    (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
2431  		error = -EINVAL;
2432  		goto out_fmt;
2433  	}
2434  	/* Filesystems outside of init_user_ns not yet supported */
2435  	if (sb->s_user_ns != &init_user_ns) {
2436  		error = -EINVAL;
2437  		goto out_fmt;
2438  	}
2439  	/* Usage always has to be set... */
2440  	if (!(flags & DQUOT_USAGE_ENABLED)) {
2441  		error = -EINVAL;
2442  		goto out_fmt;
2443  	}
2444  	if (sb_has_quota_loaded(sb, type)) {
2445  		error = -EBUSY;
2446  		goto out_fmt;
2447  	}
2448  
2449  	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2450  		/* As we bypass the pagecache we must now flush all the
2451  		 * dirty data and invalidate caches so that kernel sees
2452  		 * changes from userspace. It is not enough to just flush
2453  		 * the quota file since if blocksize < pagesize, invalidation
2454  		 * of the cache could fail because of other unrelated dirty
2455  		 * data */
2456  		sync_filesystem(sb);
2457  		invalidate_bdev(sb->s_bdev);
2458  	}
2459  
2460  	error = -EINVAL;
2461  	if (!fmt->qf_ops->check_quota_file(sb, type))
2462  		goto out_fmt;
2463  
2464  	dqopt->ops[type] = fmt->qf_ops;
2465  	dqopt->info[type].dqi_format = fmt;
2466  	dqopt->info[type].dqi_fmt_id = format_id;
2467  	INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2468  	error = dqopt->ops[type]->read_file_info(sb, type);
2469  	if (error < 0)
2470  		goto out_fmt;
2471  	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
2472  		spin_lock(&dq_data_lock);
2473  		dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2474  		spin_unlock(&dq_data_lock);
2475  	}
2476  	spin_lock(&dq_state_lock);
2477  	dqopt->flags |= dquot_state_flag(flags, type);
2478  	spin_unlock(&dq_state_lock);
2479  
2480  	error = add_dquot_ref(sb, type);
2481  	if (error)
2482  		dquot_disable(sb, type,
2483  			      DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2484  
2485  	return error;
2486  out_fmt:
2487  	put_quota_format(fmt);
2488  
2489  	return error;
2490  }
2491  EXPORT_SYMBOL(dquot_load_quota_sb);
2492  
2493  /*
2494   * More powerful function for turning on quotas on given quota inode allowing
2495   * setting of individual quota flags
2496   */
dquot_load_quota_inode(struct inode * inode,int type,int format_id,unsigned int flags)2497  int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
2498  	unsigned int flags)
2499  {
2500  	int err;
2501  
2502  	err = vfs_setup_quota_inode(inode, type);
2503  	if (err < 0)
2504  		return err;
2505  	err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
2506  	if (err < 0)
2507  		vfs_cleanup_quota_inode(inode->i_sb, type);
2508  	return err;
2509  }
2510  EXPORT_SYMBOL(dquot_load_quota_inode);
2511  
2512  /* Reenable quotas on remount RW */
dquot_resume(struct super_block * sb,int type)2513  int dquot_resume(struct super_block *sb, int type)
2514  {
2515  	struct quota_info *dqopt = sb_dqopt(sb);
2516  	int ret = 0, cnt;
2517  	unsigned int flags;
2518  
2519  	/* s_umount should be held in exclusive mode */
2520  	if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2521  		up_read(&sb->s_umount);
2522  
2523  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2524  		if (type != -1 && cnt != type)
2525  			continue;
2526  		if (!sb_has_quota_suspended(sb, cnt))
2527  			continue;
2528  
2529  		spin_lock(&dq_state_lock);
2530  		flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2531  							DQUOT_LIMITS_ENABLED,
2532  							cnt);
2533  		dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2534  		spin_unlock(&dq_state_lock);
2535  
2536  		flags = dquot_generic_flag(flags, cnt);
2537  		ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
2538  					  flags);
2539  		if (ret < 0)
2540  			vfs_cleanup_quota_inode(sb, cnt);
2541  	}
2542  
2543  	return ret;
2544  }
2545  EXPORT_SYMBOL(dquot_resume);
2546  
dquot_quota_on(struct super_block * sb,int type,int format_id,const struct path * path)2547  int dquot_quota_on(struct super_block *sb, int type, int format_id,
2548  		   const struct path *path)
2549  {
2550  	int error = security_quota_on(path->dentry);
2551  	if (error)
2552  		return error;
2553  	/* Quota file not on the same filesystem? */
2554  	if (path->dentry->d_sb != sb)
2555  		error = -EXDEV;
2556  	else
2557  		error = dquot_load_quota_inode(d_inode(path->dentry), type,
2558  					     format_id, DQUOT_USAGE_ENABLED |
2559  					     DQUOT_LIMITS_ENABLED);
2560  	return error;
2561  }
2562  EXPORT_SYMBOL(dquot_quota_on);
2563  
2564  /*
2565   * This function is used when filesystem needs to initialize quotas
2566   * during mount time.
2567   */
dquot_quota_on_mount(struct super_block * sb,char * qf_name,int format_id,int type)2568  int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2569  		int format_id, int type)
2570  {
2571  	struct dentry *dentry;
2572  	int error;
2573  
2574  	dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name));
2575  	if (IS_ERR(dentry))
2576  		return PTR_ERR(dentry);
2577  
2578  	error = security_quota_on(dentry);
2579  	if (!error)
2580  		error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
2581  				DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2582  
2583  	dput(dentry);
2584  	return error;
2585  }
2586  EXPORT_SYMBOL(dquot_quota_on_mount);
2587  
dquot_quota_enable(struct super_block * sb,unsigned int flags)2588  static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2589  {
2590  	int ret;
2591  	int type;
2592  	struct quota_info *dqopt = sb_dqopt(sb);
2593  
2594  	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2595  		return -ENOSYS;
2596  	/* Accounting cannot be turned on while fs is mounted */
2597  	flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2598  	if (!flags)
2599  		return -EINVAL;
2600  	for (type = 0; type < MAXQUOTAS; type++) {
2601  		if (!(flags & qtype_enforce_flag(type)))
2602  			continue;
2603  		/* Can't enforce without accounting */
2604  		if (!sb_has_quota_usage_enabled(sb, type)) {
2605  			ret = -EINVAL;
2606  			goto out_err;
2607  		}
2608  		if (sb_has_quota_limits_enabled(sb, type)) {
2609  			ret = -EBUSY;
2610  			goto out_err;
2611  		}
2612  		spin_lock(&dq_state_lock);
2613  		dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2614  		spin_unlock(&dq_state_lock);
2615  	}
2616  	return 0;
2617  out_err:
2618  	/* Backout enforcement enablement we already did */
2619  	for (type--; type >= 0; type--)  {
2620  		if (flags & qtype_enforce_flag(type))
2621  			dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2622  	}
2623  	/* Error code translation for better compatibility with XFS */
2624  	if (ret == -EBUSY)
2625  		ret = -EEXIST;
2626  	return ret;
2627  }
2628  
dquot_quota_disable(struct super_block * sb,unsigned int flags)2629  static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2630  {
2631  	int ret;
2632  	int type;
2633  	struct quota_info *dqopt = sb_dqopt(sb);
2634  
2635  	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2636  		return -ENOSYS;
2637  	/*
2638  	 * We don't support turning off accounting via quotactl. In principle
2639  	 * quota infrastructure can do this but filesystems don't expect
2640  	 * userspace to be able to do it.
2641  	 */
2642  	if (flags &
2643  		  (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2644  		return -EOPNOTSUPP;
2645  
2646  	/* Filter out limits not enabled */
2647  	for (type = 0; type < MAXQUOTAS; type++)
2648  		if (!sb_has_quota_limits_enabled(sb, type))
2649  			flags &= ~qtype_enforce_flag(type);
2650  	/* Nothing left? */
2651  	if (!flags)
2652  		return -EEXIST;
2653  	for (type = 0; type < MAXQUOTAS; type++) {
2654  		if (flags & qtype_enforce_flag(type)) {
2655  			ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2656  			if (ret < 0)
2657  				goto out_err;
2658  		}
2659  	}
2660  	return 0;
2661  out_err:
2662  	/* Backout enforcement disabling we already did */
2663  	for (type--; type >= 0; type--)  {
2664  		if (flags & qtype_enforce_flag(type)) {
2665  			spin_lock(&dq_state_lock);
2666  			dqopt->flags |=
2667  				dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2668  			spin_unlock(&dq_state_lock);
2669  		}
2670  	}
2671  	return ret;
2672  }
2673  
2674  /* Generic routine for getting common part of quota structure */
do_get_dqblk(struct dquot * dquot,struct qc_dqblk * di)2675  static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2676  {
2677  	struct mem_dqblk *dm = &dquot->dq_dqb;
2678  
2679  	memset(di, 0, sizeof(*di));
2680  	spin_lock(&dquot->dq_dqb_lock);
2681  	di->d_spc_hardlimit = dm->dqb_bhardlimit;
2682  	di->d_spc_softlimit = dm->dqb_bsoftlimit;
2683  	di->d_ino_hardlimit = dm->dqb_ihardlimit;
2684  	di->d_ino_softlimit = dm->dqb_isoftlimit;
2685  	di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2686  	di->d_ino_count = dm->dqb_curinodes;
2687  	di->d_spc_timer = dm->dqb_btime;
2688  	di->d_ino_timer = dm->dqb_itime;
2689  	spin_unlock(&dquot->dq_dqb_lock);
2690  }
2691  
dquot_get_dqblk(struct super_block * sb,struct kqid qid,struct qc_dqblk * di)2692  int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2693  		    struct qc_dqblk *di)
2694  {
2695  	struct dquot *dquot;
2696  
2697  	dquot = dqget(sb, qid);
2698  	if (IS_ERR(dquot))
2699  		return PTR_ERR(dquot);
2700  	do_get_dqblk(dquot, di);
2701  	dqput(dquot);
2702  
2703  	return 0;
2704  }
2705  EXPORT_SYMBOL(dquot_get_dqblk);
2706  
dquot_get_next_dqblk(struct super_block * sb,struct kqid * qid,struct qc_dqblk * di)2707  int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
2708  			 struct qc_dqblk *di)
2709  {
2710  	struct dquot *dquot;
2711  	int err;
2712  
2713  	if (!sb->dq_op->get_next_id)
2714  		return -ENOSYS;
2715  	err = sb->dq_op->get_next_id(sb, qid);
2716  	if (err < 0)
2717  		return err;
2718  	dquot = dqget(sb, *qid);
2719  	if (IS_ERR(dquot))
2720  		return PTR_ERR(dquot);
2721  	do_get_dqblk(dquot, di);
2722  	dqput(dquot);
2723  
2724  	return 0;
2725  }
2726  EXPORT_SYMBOL(dquot_get_next_dqblk);
2727  
2728  #define VFS_QC_MASK \
2729  	(QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2730  	 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2731  	 QC_SPC_TIMER | QC_INO_TIMER)
2732  
2733  /* Generic routine for setting common part of quota structure */
do_set_dqblk(struct dquot * dquot,struct qc_dqblk * di)2734  static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2735  {
2736  	struct mem_dqblk *dm = &dquot->dq_dqb;
2737  	int check_blim = 0, check_ilim = 0;
2738  	struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2739  
2740  	if (di->d_fieldmask & ~VFS_QC_MASK)
2741  		return -EINVAL;
2742  
2743  	if (((di->d_fieldmask & QC_SPC_SOFT) &&
2744  	     di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2745  	    ((di->d_fieldmask & QC_SPC_HARD) &&
2746  	     di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2747  	    ((di->d_fieldmask & QC_INO_SOFT) &&
2748  	     (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2749  	    ((di->d_fieldmask & QC_INO_HARD) &&
2750  	     (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2751  		return -ERANGE;
2752  
2753  	spin_lock(&dquot->dq_dqb_lock);
2754  	if (di->d_fieldmask & QC_SPACE) {
2755  		dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2756  		check_blim = 1;
2757  		set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2758  	}
2759  
2760  	if (di->d_fieldmask & QC_SPC_SOFT)
2761  		dm->dqb_bsoftlimit = di->d_spc_softlimit;
2762  	if (di->d_fieldmask & QC_SPC_HARD)
2763  		dm->dqb_bhardlimit = di->d_spc_hardlimit;
2764  	if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2765  		check_blim = 1;
2766  		set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2767  	}
2768  
2769  	if (di->d_fieldmask & QC_INO_COUNT) {
2770  		dm->dqb_curinodes = di->d_ino_count;
2771  		check_ilim = 1;
2772  		set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2773  	}
2774  
2775  	if (di->d_fieldmask & QC_INO_SOFT)
2776  		dm->dqb_isoftlimit = di->d_ino_softlimit;
2777  	if (di->d_fieldmask & QC_INO_HARD)
2778  		dm->dqb_ihardlimit = di->d_ino_hardlimit;
2779  	if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2780  		check_ilim = 1;
2781  		set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2782  	}
2783  
2784  	if (di->d_fieldmask & QC_SPC_TIMER) {
2785  		dm->dqb_btime = di->d_spc_timer;
2786  		check_blim = 1;
2787  		set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2788  	}
2789  
2790  	if (di->d_fieldmask & QC_INO_TIMER) {
2791  		dm->dqb_itime = di->d_ino_timer;
2792  		check_ilim = 1;
2793  		set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2794  	}
2795  
2796  	if (check_blim) {
2797  		if (!dm->dqb_bsoftlimit ||
2798  		    dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) {
2799  			dm->dqb_btime = 0;
2800  			clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2801  		} else if (!(di->d_fieldmask & QC_SPC_TIMER))
2802  			/* Set grace only if user hasn't provided his own... */
2803  			dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
2804  	}
2805  	if (check_ilim) {
2806  		if (!dm->dqb_isoftlimit ||
2807  		    dm->dqb_curinodes <= dm->dqb_isoftlimit) {
2808  			dm->dqb_itime = 0;
2809  			clear_bit(DQ_INODES_B, &dquot->dq_flags);
2810  		} else if (!(di->d_fieldmask & QC_INO_TIMER))
2811  			/* Set grace only if user hasn't provided his own... */
2812  			dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
2813  	}
2814  	if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2815  	    dm->dqb_isoftlimit)
2816  		clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2817  	else
2818  		set_bit(DQ_FAKE_B, &dquot->dq_flags);
2819  	spin_unlock(&dquot->dq_dqb_lock);
2820  	mark_dquot_dirty(dquot);
2821  
2822  	return 0;
2823  }
2824  
dquot_set_dqblk(struct super_block * sb,struct kqid qid,struct qc_dqblk * di)2825  int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2826  		  struct qc_dqblk *di)
2827  {
2828  	struct dquot *dquot;
2829  	int rc;
2830  
2831  	dquot = dqget(sb, qid);
2832  	if (IS_ERR(dquot)) {
2833  		rc = PTR_ERR(dquot);
2834  		goto out;
2835  	}
2836  	rc = do_set_dqblk(dquot, di);
2837  	dqput(dquot);
2838  out:
2839  	return rc;
2840  }
2841  EXPORT_SYMBOL(dquot_set_dqblk);
2842  
2843  /* Generic routine for getting common part of quota file information */
dquot_get_state(struct super_block * sb,struct qc_state * state)2844  int dquot_get_state(struct super_block *sb, struct qc_state *state)
2845  {
2846  	struct mem_dqinfo *mi;
2847  	struct qc_type_state *tstate;
2848  	struct quota_info *dqopt = sb_dqopt(sb);
2849  	int type;
2850  
2851  	memset(state, 0, sizeof(*state));
2852  	for (type = 0; type < MAXQUOTAS; type++) {
2853  		if (!sb_has_quota_active(sb, type))
2854  			continue;
2855  		tstate = state->s_state + type;
2856  		mi = sb_dqopt(sb)->info + type;
2857  		tstate->flags = QCI_ACCT_ENABLED;
2858  		spin_lock(&dq_data_lock);
2859  		if (mi->dqi_flags & DQF_SYS_FILE)
2860  			tstate->flags |= QCI_SYSFILE;
2861  		if (mi->dqi_flags & DQF_ROOT_SQUASH)
2862  			tstate->flags |= QCI_ROOT_SQUASH;
2863  		if (sb_has_quota_limits_enabled(sb, type))
2864  			tstate->flags |= QCI_LIMITS_ENFORCED;
2865  		tstate->spc_timelimit = mi->dqi_bgrace;
2866  		tstate->ino_timelimit = mi->dqi_igrace;
2867  		if (dqopt->files[type]) {
2868  			tstate->ino = dqopt->files[type]->i_ino;
2869  			tstate->blocks = dqopt->files[type]->i_blocks;
2870  		}
2871  		tstate->nextents = 1;	/* We don't know... */
2872  		spin_unlock(&dq_data_lock);
2873  	}
2874  	return 0;
2875  }
2876  EXPORT_SYMBOL(dquot_get_state);
2877  
2878  /* Generic routine for setting common part of quota file information */
dquot_set_dqinfo(struct super_block * sb,int type,struct qc_info * ii)2879  int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
2880  {
2881  	struct mem_dqinfo *mi;
2882  
2883  	if ((ii->i_fieldmask & QC_WARNS_MASK) ||
2884  	    (ii->i_fieldmask & QC_RT_SPC_TIMER))
2885  		return -EINVAL;
2886  	if (!sb_has_quota_active(sb, type))
2887  		return -ESRCH;
2888  	mi = sb_dqopt(sb)->info + type;
2889  	if (ii->i_fieldmask & QC_FLAGS) {
2890  		if ((ii->i_flags & QCI_ROOT_SQUASH &&
2891  		     mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
2892  			return -EINVAL;
2893  	}
2894  	spin_lock(&dq_data_lock);
2895  	if (ii->i_fieldmask & QC_SPC_TIMER)
2896  		mi->dqi_bgrace = ii->i_spc_timelimit;
2897  	if (ii->i_fieldmask & QC_INO_TIMER)
2898  		mi->dqi_igrace = ii->i_ino_timelimit;
2899  	if (ii->i_fieldmask & QC_FLAGS) {
2900  		if (ii->i_flags & QCI_ROOT_SQUASH)
2901  			mi->dqi_flags |= DQF_ROOT_SQUASH;
2902  		else
2903  			mi->dqi_flags &= ~DQF_ROOT_SQUASH;
2904  	}
2905  	spin_unlock(&dq_data_lock);
2906  	mark_info_dirty(sb, type);
2907  	/* Force write to disk */
2908  	return sb->dq_op->write_info(sb, type);
2909  }
2910  EXPORT_SYMBOL(dquot_set_dqinfo);
2911  
2912  const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2913  	.quota_enable	= dquot_quota_enable,
2914  	.quota_disable	= dquot_quota_disable,
2915  	.quota_sync	= dquot_quota_sync,
2916  	.get_state	= dquot_get_state,
2917  	.set_info	= dquot_set_dqinfo,
2918  	.get_dqblk	= dquot_get_dqblk,
2919  	.get_nextdqblk	= dquot_get_next_dqblk,
2920  	.set_dqblk	= dquot_set_dqblk
2921  };
2922  EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2923  
do_proc_dqstats(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2924  static int do_proc_dqstats(struct ctl_table *table, int write,
2925  		     void *buffer, size_t *lenp, loff_t *ppos)
2926  {
2927  	unsigned int type = (unsigned long *)table->data - dqstats.stat;
2928  	s64 value = percpu_counter_sum(&dqstats.counter[type]);
2929  
2930  	/* Filter negative values for non-monotonic counters */
2931  	if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
2932  			  type == DQST_FREE_DQUOTS))
2933  		value = 0;
2934  
2935  	/* Update global table */
2936  	dqstats.stat[type] = value;
2937  	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2938  }
2939  
2940  static struct ctl_table fs_dqstats_table[] = {
2941  	{
2942  		.procname	= "lookups",
2943  		.data		= &dqstats.stat[DQST_LOOKUPS],
2944  		.maxlen		= sizeof(unsigned long),
2945  		.mode		= 0444,
2946  		.proc_handler	= do_proc_dqstats,
2947  	},
2948  	{
2949  		.procname	= "drops",
2950  		.data		= &dqstats.stat[DQST_DROPS],
2951  		.maxlen		= sizeof(unsigned long),
2952  		.mode		= 0444,
2953  		.proc_handler	= do_proc_dqstats,
2954  	},
2955  	{
2956  		.procname	= "reads",
2957  		.data		= &dqstats.stat[DQST_READS],
2958  		.maxlen		= sizeof(unsigned long),
2959  		.mode		= 0444,
2960  		.proc_handler	= do_proc_dqstats,
2961  	},
2962  	{
2963  		.procname	= "writes",
2964  		.data		= &dqstats.stat[DQST_WRITES],
2965  		.maxlen		= sizeof(unsigned long),
2966  		.mode		= 0444,
2967  		.proc_handler	= do_proc_dqstats,
2968  	},
2969  	{
2970  		.procname	= "cache_hits",
2971  		.data		= &dqstats.stat[DQST_CACHE_HITS],
2972  		.maxlen		= sizeof(unsigned long),
2973  		.mode		= 0444,
2974  		.proc_handler	= do_proc_dqstats,
2975  	},
2976  	{
2977  		.procname	= "allocated_dquots",
2978  		.data		= &dqstats.stat[DQST_ALLOC_DQUOTS],
2979  		.maxlen		= sizeof(unsigned long),
2980  		.mode		= 0444,
2981  		.proc_handler	= do_proc_dqstats,
2982  	},
2983  	{
2984  		.procname	= "free_dquots",
2985  		.data		= &dqstats.stat[DQST_FREE_DQUOTS],
2986  		.maxlen		= sizeof(unsigned long),
2987  		.mode		= 0444,
2988  		.proc_handler	= do_proc_dqstats,
2989  	},
2990  	{
2991  		.procname	= "syncs",
2992  		.data		= &dqstats.stat[DQST_SYNCS],
2993  		.maxlen		= sizeof(unsigned long),
2994  		.mode		= 0444,
2995  		.proc_handler	= do_proc_dqstats,
2996  	},
2997  #ifdef CONFIG_PRINT_QUOTA_WARNING
2998  	{
2999  		.procname	= "warnings",
3000  		.data		= &flag_print_warnings,
3001  		.maxlen		= sizeof(int),
3002  		.mode		= 0644,
3003  		.proc_handler	= proc_dointvec,
3004  	},
3005  #endif
3006  	{ },
3007  };
3008  
dquot_init(void)3009  static int __init dquot_init(void)
3010  {
3011  	int i, ret;
3012  	unsigned long nr_hash, order;
3013  
3014  	printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
3015  
3016  	register_sysctl_init("fs/quota", fs_dqstats_table);
3017  
3018  	dquot_cachep = kmem_cache_create("dquot",
3019  			sizeof(struct dquot), sizeof(unsigned long) * 4,
3020  			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
3021  				SLAB_MEM_SPREAD|SLAB_PANIC),
3022  			NULL);
3023  
3024  	order = 0;
3025  	dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
3026  	if (!dquot_hash)
3027  		panic("Cannot create dquot hash table");
3028  
3029  	for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
3030  		ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
3031  		if (ret)
3032  			panic("Cannot create dquot stat counters");
3033  	}
3034  
3035  	/* Find power-of-two hlist_heads which can fit into allocation */
3036  	nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
3037  	dq_hash_bits = ilog2(nr_hash);
3038  
3039  	nr_hash = 1UL << dq_hash_bits;
3040  	dq_hash_mask = nr_hash - 1;
3041  	for (i = 0; i < nr_hash; i++)
3042  		INIT_HLIST_HEAD(dquot_hash + i);
3043  
3044  	pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
3045  		" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
3046  
3047  	if (register_shrinker(&dqcache_shrinker, "dquota-cache"))
3048  		panic("Cannot register dquot shrinker");
3049  
3050  	return 0;
3051  }
3052  fs_initcall(dquot_init);
3053