xref: /openbmc/linux/fs/quota/quota.c (revision 83a530e1)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Quota code necessary even when VFS quota support is not compiled
4   * into the kernel.  The interesting stuff is over in dquot.c, here
5   * we have symbols for initial quotactl(2) handling, the sysctl(2)
6   * variables, etc - things needed even when quota support disabled.
7   */
8  
9  #include <linux/fs.h>
10  #include <linux/namei.h>
11  #include <linux/slab.h>
12  #include <asm/current.h>
13  #include <linux/uaccess.h>
14  #include <linux/kernel.h>
15  #include <linux/security.h>
16  #include <linux/syscalls.h>
17  #include <linux/capability.h>
18  #include <linux/quotaops.h>
19  #include <linux/types.h>
20  #include <linux/writeback.h>
21  
22  static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
23  				     qid_t id)
24  {
25  	switch (cmd) {
26  	/* these commands do not require any special privilegues */
27  	case Q_GETFMT:
28  	case Q_SYNC:
29  	case Q_GETINFO:
30  	case Q_XGETQSTAT:
31  	case Q_XGETQSTATV:
32  	case Q_XQUOTASYNC:
33  		break;
34  	/* allow to query information for dquots we "own" */
35  	case Q_GETQUOTA:
36  	case Q_XGETQUOTA:
37  		if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) ||
38  		    (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id))))
39  			break;
40  		/*FALLTHROUGH*/
41  	default:
42  		if (!capable(CAP_SYS_ADMIN))
43  			return -EPERM;
44  	}
45  
46  	return security_quotactl(cmd, type, id, sb);
47  }
48  
49  static void quota_sync_one(struct super_block *sb, void *arg)
50  {
51  	int type = *(int *)arg;
52  
53  	if (sb->s_qcop && sb->s_qcop->quota_sync &&
54  	    (sb->s_quota_types & (1 << type)))
55  		sb->s_qcop->quota_sync(sb, type);
56  }
57  
58  static int quota_sync_all(int type)
59  {
60  	int ret;
61  
62  	if (type >= MAXQUOTAS)
63  		return -EINVAL;
64  	ret = security_quotactl(Q_SYNC, type, 0, NULL);
65  	if (!ret)
66  		iterate_supers(quota_sync_one, &type);
67  	return ret;
68  }
69  
70  unsigned int qtype_enforce_flag(int type)
71  {
72  	switch (type) {
73  	case USRQUOTA:
74  		return FS_QUOTA_UDQ_ENFD;
75  	case GRPQUOTA:
76  		return FS_QUOTA_GDQ_ENFD;
77  	case PRJQUOTA:
78  		return FS_QUOTA_PDQ_ENFD;
79  	}
80  	return 0;
81  }
82  
83  static int quota_quotaon(struct super_block *sb, int type, qid_t id,
84  		         const struct path *path)
85  {
86  	if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
87  		return -ENOSYS;
88  	if (sb->s_qcop->quota_enable)
89  		return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type));
90  	if (IS_ERR(path))
91  		return PTR_ERR(path);
92  	return sb->s_qcop->quota_on(sb, type, id, path);
93  }
94  
95  static int quota_quotaoff(struct super_block *sb, int type)
96  {
97  	if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable)
98  		return -ENOSYS;
99  	if (sb->s_qcop->quota_disable)
100  		return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type));
101  	return sb->s_qcop->quota_off(sb, type);
102  }
103  
104  static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
105  {
106  	__u32 fmt;
107  
108  	if (!sb_has_quota_active(sb, type))
109  		return -ESRCH;
110  	fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
111  	if (copy_to_user(addr, &fmt, sizeof(fmt)))
112  		return -EFAULT;
113  	return 0;
114  }
115  
116  static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
117  {
118  	struct qc_state state;
119  	struct qc_type_state *tstate;
120  	struct if_dqinfo uinfo;
121  	int ret;
122  
123  	/* This checks whether qc_state has enough entries... */
124  	BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS);
125  	if (!sb->s_qcop->get_state)
126  		return -ENOSYS;
127  	ret = sb->s_qcop->get_state(sb, &state);
128  	if (ret)
129  		return ret;
130  	tstate = state.s_state + type;
131  	if (!(tstate->flags & QCI_ACCT_ENABLED))
132  		return -ESRCH;
133  	memset(&uinfo, 0, sizeof(uinfo));
134  	uinfo.dqi_bgrace = tstate->spc_timelimit;
135  	uinfo.dqi_igrace = tstate->ino_timelimit;
136  	if (tstate->flags & QCI_SYSFILE)
137  		uinfo.dqi_flags |= DQF_SYS_FILE;
138  	if (tstate->flags & QCI_ROOT_SQUASH)
139  		uinfo.dqi_flags |= DQF_ROOT_SQUASH;
140  	uinfo.dqi_valid = IIF_ALL;
141  	if (copy_to_user(addr, &uinfo, sizeof(uinfo)))
142  		return -EFAULT;
143  	return 0;
144  }
145  
146  static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
147  {
148  	struct if_dqinfo info;
149  	struct qc_info qinfo;
150  
151  	if (copy_from_user(&info, addr, sizeof(info)))
152  		return -EFAULT;
153  	if (!sb->s_qcop->set_info)
154  		return -ENOSYS;
155  	if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE))
156  		return -EINVAL;
157  	memset(&qinfo, 0, sizeof(qinfo));
158  	if (info.dqi_valid & IIF_FLAGS) {
159  		if (info.dqi_flags & ~DQF_SETINFO_MASK)
160  			return -EINVAL;
161  		if (info.dqi_flags & DQF_ROOT_SQUASH)
162  			qinfo.i_flags |= QCI_ROOT_SQUASH;
163  		qinfo.i_fieldmask |= QC_FLAGS;
164  	}
165  	if (info.dqi_valid & IIF_BGRACE) {
166  		qinfo.i_spc_timelimit = info.dqi_bgrace;
167  		qinfo.i_fieldmask |= QC_SPC_TIMER;
168  	}
169  	if (info.dqi_valid & IIF_IGRACE) {
170  		qinfo.i_ino_timelimit = info.dqi_igrace;
171  		qinfo.i_fieldmask |= QC_INO_TIMER;
172  	}
173  	return sb->s_qcop->set_info(sb, type, &qinfo);
174  }
175  
176  static inline qsize_t qbtos(qsize_t blocks)
177  {
178  	return blocks << QIF_DQBLKSIZE_BITS;
179  }
180  
181  static inline qsize_t stoqb(qsize_t space)
182  {
183  	return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
184  }
185  
186  static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
187  {
188  	memset(dst, 0, sizeof(*dst));
189  	dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
190  	dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
191  	dst->dqb_curspace = src->d_space;
192  	dst->dqb_ihardlimit = src->d_ino_hardlimit;
193  	dst->dqb_isoftlimit = src->d_ino_softlimit;
194  	dst->dqb_curinodes = src->d_ino_count;
195  	dst->dqb_btime = src->d_spc_timer;
196  	dst->dqb_itime = src->d_ino_timer;
197  	dst->dqb_valid = QIF_ALL;
198  }
199  
200  static int quota_getquota(struct super_block *sb, int type, qid_t id,
201  			  void __user *addr)
202  {
203  	struct kqid qid;
204  	struct qc_dqblk fdq;
205  	struct if_dqblk idq;
206  	int ret;
207  
208  	if (!sb->s_qcop->get_dqblk)
209  		return -ENOSYS;
210  	qid = make_kqid(current_user_ns(), type, id);
211  	if (!qid_has_mapping(sb->s_user_ns, qid))
212  		return -EINVAL;
213  	ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
214  	if (ret)
215  		return ret;
216  	copy_to_if_dqblk(&idq, &fdq);
217  	if (copy_to_user(addr, &idq, sizeof(idq)))
218  		return -EFAULT;
219  	return 0;
220  }
221  
222  /*
223   * Return quota for next active quota >= this id, if any exists,
224   * otherwise return -ENOENT via ->get_nextdqblk
225   */
226  static int quota_getnextquota(struct super_block *sb, int type, qid_t id,
227  			  void __user *addr)
228  {
229  	struct kqid qid;
230  	struct qc_dqblk fdq;
231  	struct if_nextdqblk idq;
232  	int ret;
233  
234  	if (!sb->s_qcop->get_nextdqblk)
235  		return -ENOSYS;
236  	qid = make_kqid(current_user_ns(), type, id);
237  	if (!qid_has_mapping(sb->s_user_ns, qid))
238  		return -EINVAL;
239  	ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq);
240  	if (ret)
241  		return ret;
242  	/* struct if_nextdqblk is a superset of struct if_dqblk */
243  	copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq);
244  	idq.dqb_id = from_kqid(current_user_ns(), qid);
245  	if (copy_to_user(addr, &idq, sizeof(idq)))
246  		return -EFAULT;
247  	return 0;
248  }
249  
250  static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
251  {
252  	dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
253  	dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
254  	dst->d_space = src->dqb_curspace;
255  	dst->d_ino_hardlimit = src->dqb_ihardlimit;
256  	dst->d_ino_softlimit = src->dqb_isoftlimit;
257  	dst->d_ino_count = src->dqb_curinodes;
258  	dst->d_spc_timer = src->dqb_btime;
259  	dst->d_ino_timer = src->dqb_itime;
260  
261  	dst->d_fieldmask = 0;
262  	if (src->dqb_valid & QIF_BLIMITS)
263  		dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
264  	if (src->dqb_valid & QIF_SPACE)
265  		dst->d_fieldmask |= QC_SPACE;
266  	if (src->dqb_valid & QIF_ILIMITS)
267  		dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
268  	if (src->dqb_valid & QIF_INODES)
269  		dst->d_fieldmask |= QC_INO_COUNT;
270  	if (src->dqb_valid & QIF_BTIME)
271  		dst->d_fieldmask |= QC_SPC_TIMER;
272  	if (src->dqb_valid & QIF_ITIME)
273  		dst->d_fieldmask |= QC_INO_TIMER;
274  }
275  
276  static int quota_setquota(struct super_block *sb, int type, qid_t id,
277  			  void __user *addr)
278  {
279  	struct qc_dqblk fdq;
280  	struct if_dqblk idq;
281  	struct kqid qid;
282  
283  	if (copy_from_user(&idq, addr, sizeof(idq)))
284  		return -EFAULT;
285  	if (!sb->s_qcop->set_dqblk)
286  		return -ENOSYS;
287  	qid = make_kqid(current_user_ns(), type, id);
288  	if (!qid_has_mapping(sb->s_user_ns, qid))
289  		return -EINVAL;
290  	copy_from_if_dqblk(&fdq, &idq);
291  	return sb->s_qcop->set_dqblk(sb, qid, &fdq);
292  }
293  
294  static int quota_enable(struct super_block *sb, void __user *addr)
295  {
296  	__u32 flags;
297  
298  	if (copy_from_user(&flags, addr, sizeof(flags)))
299  		return -EFAULT;
300  	if (!sb->s_qcop->quota_enable)
301  		return -ENOSYS;
302  	return sb->s_qcop->quota_enable(sb, flags);
303  }
304  
305  static int quota_disable(struct super_block *sb, void __user *addr)
306  {
307  	__u32 flags;
308  
309  	if (copy_from_user(&flags, addr, sizeof(flags)))
310  		return -EFAULT;
311  	if (!sb->s_qcop->quota_disable)
312  		return -ENOSYS;
313  	return sb->s_qcop->quota_disable(sb, flags);
314  }
315  
316  static int quota_state_to_flags(struct qc_state *state)
317  {
318  	int flags = 0;
319  
320  	if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED)
321  		flags |= FS_QUOTA_UDQ_ACCT;
322  	if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED)
323  		flags |= FS_QUOTA_UDQ_ENFD;
324  	if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)
325  		flags |= FS_QUOTA_GDQ_ACCT;
326  	if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED)
327  		flags |= FS_QUOTA_GDQ_ENFD;
328  	if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED)
329  		flags |= FS_QUOTA_PDQ_ACCT;
330  	if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED)
331  		flags |= FS_QUOTA_PDQ_ENFD;
332  	return flags;
333  }
334  
335  static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
336  {
337  	int type;
338  	struct qc_state state;
339  	int ret;
340  
341  	memset(&state, 0, sizeof (struct qc_state));
342  	ret = sb->s_qcop->get_state(sb, &state);
343  	if (ret < 0)
344  		return ret;
345  
346  	memset(fqs, 0, sizeof(*fqs));
347  	fqs->qs_version = FS_QSTAT_VERSION;
348  	fqs->qs_flags = quota_state_to_flags(&state);
349  	/* No quota enabled? */
350  	if (!fqs->qs_flags)
351  		return -ENOSYS;
352  	fqs->qs_incoredqs = state.s_incoredqs;
353  	/*
354  	 * GETXSTATE quotactl has space for just one set of time limits so
355  	 * report them for the first enabled quota type
356  	 */
357  	for (type = 0; type < XQM_MAXQUOTAS; type++)
358  		if (state.s_state[type].flags & QCI_ACCT_ENABLED)
359  			break;
360  	BUG_ON(type == XQM_MAXQUOTAS);
361  	fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
362  	fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
363  	fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
364  	fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
365  	fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
366  
367  	/* Inodes may be allocated even if inactive; copy out if present */
368  	if (state.s_state[USRQUOTA].ino) {
369  		fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
370  		fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
371  		fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
372  	}
373  	if (state.s_state[GRPQUOTA].ino) {
374  		fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
375  		fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
376  		fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
377  	}
378  	if (state.s_state[PRJQUOTA].ino) {
379  		/*
380  		 * Q_XGETQSTAT doesn't have room for both group and project
381  		 * quotas.  So, allow the project quota values to be copied out
382  		 * only if there is no group quota information available.
383  		 */
384  		if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) {
385  			fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino;
386  			fqs->qs_gquota.qfs_nblks =
387  					state.s_state[PRJQUOTA].blocks;
388  			fqs->qs_gquota.qfs_nextents =
389  					state.s_state[PRJQUOTA].nextents;
390  		}
391  	}
392  	return 0;
393  }
394  
395  static int quota_getxstate(struct super_block *sb, void __user *addr)
396  {
397  	struct fs_quota_stat fqs;
398  	int ret;
399  
400  	if (!sb->s_qcop->get_state)
401  		return -ENOSYS;
402  	ret = quota_getstate(sb, &fqs);
403  	if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
404  		return -EFAULT;
405  	return ret;
406  }
407  
408  static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
409  {
410  	int type;
411  	struct qc_state state;
412  	int ret;
413  
414  	memset(&state, 0, sizeof (struct qc_state));
415  	ret = sb->s_qcop->get_state(sb, &state);
416  	if (ret < 0)
417  		return ret;
418  
419  	memset(fqs, 0, sizeof(*fqs));
420  	fqs->qs_version = FS_QSTAT_VERSION;
421  	fqs->qs_flags = quota_state_to_flags(&state);
422  	/* No quota enabled? */
423  	if (!fqs->qs_flags)
424  		return -ENOSYS;
425  	fqs->qs_incoredqs = state.s_incoredqs;
426  	/*
427  	 * GETXSTATV quotactl has space for just one set of time limits so
428  	 * report them for the first enabled quota type
429  	 */
430  	for (type = 0; type < XQM_MAXQUOTAS; type++)
431  		if (state.s_state[type].flags & QCI_ACCT_ENABLED)
432  			break;
433  	BUG_ON(type == XQM_MAXQUOTAS);
434  	fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
435  	fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
436  	fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
437  	fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
438  	fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
439  
440  	/* Inodes may be allocated even if inactive; copy out if present */
441  	if (state.s_state[USRQUOTA].ino) {
442  		fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
443  		fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
444  		fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
445  	}
446  	if (state.s_state[GRPQUOTA].ino) {
447  		fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
448  		fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
449  		fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
450  	}
451  	if (state.s_state[PRJQUOTA].ino) {
452  		fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino;
453  		fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks;
454  		fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents;
455  	}
456  	return 0;
457  }
458  
459  static int quota_getxstatev(struct super_block *sb, void __user *addr)
460  {
461  	struct fs_quota_statv fqs;
462  	int ret;
463  
464  	if (!sb->s_qcop->get_state)
465  		return -ENOSYS;
466  
467  	memset(&fqs, 0, sizeof(fqs));
468  	if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */
469  		return -EFAULT;
470  
471  	/* If this kernel doesn't support user specified version, fail */
472  	switch (fqs.qs_version) {
473  	case FS_QSTATV_VERSION1:
474  		break;
475  	default:
476  		return -EINVAL;
477  	}
478  	ret = quota_getstatev(sb, &fqs);
479  	if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
480  		return -EFAULT;
481  	return ret;
482  }
483  
484  /*
485   * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
486   * out of there as xfsprogs rely on definitions being in that header file. So
487   * just define same functions here for quota purposes.
488   */
489  #define XFS_BB_SHIFT 9
490  
491  static inline u64 quota_bbtob(u64 blocks)
492  {
493  	return blocks << XFS_BB_SHIFT;
494  }
495  
496  static inline u64 quota_btobb(u64 bytes)
497  {
498  	return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
499  }
500  
501  static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
502  {
503  	dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
504  	dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
505  	dst->d_ino_hardlimit = src->d_ino_hardlimit;
506  	dst->d_ino_softlimit = src->d_ino_softlimit;
507  	dst->d_space = quota_bbtob(src->d_bcount);
508  	dst->d_ino_count = src->d_icount;
509  	dst->d_ino_timer = src->d_itimer;
510  	dst->d_spc_timer = src->d_btimer;
511  	dst->d_ino_warns = src->d_iwarns;
512  	dst->d_spc_warns = src->d_bwarns;
513  	dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
514  	dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
515  	dst->d_rt_space = quota_bbtob(src->d_rtbcount);
516  	dst->d_rt_spc_timer = src->d_rtbtimer;
517  	dst->d_rt_spc_warns = src->d_rtbwarns;
518  	dst->d_fieldmask = 0;
519  	if (src->d_fieldmask & FS_DQ_ISOFT)
520  		dst->d_fieldmask |= QC_INO_SOFT;
521  	if (src->d_fieldmask & FS_DQ_IHARD)
522  		dst->d_fieldmask |= QC_INO_HARD;
523  	if (src->d_fieldmask & FS_DQ_BSOFT)
524  		dst->d_fieldmask |= QC_SPC_SOFT;
525  	if (src->d_fieldmask & FS_DQ_BHARD)
526  		dst->d_fieldmask |= QC_SPC_HARD;
527  	if (src->d_fieldmask & FS_DQ_RTBSOFT)
528  		dst->d_fieldmask |= QC_RT_SPC_SOFT;
529  	if (src->d_fieldmask & FS_DQ_RTBHARD)
530  		dst->d_fieldmask |= QC_RT_SPC_HARD;
531  	if (src->d_fieldmask & FS_DQ_BTIMER)
532  		dst->d_fieldmask |= QC_SPC_TIMER;
533  	if (src->d_fieldmask & FS_DQ_ITIMER)
534  		dst->d_fieldmask |= QC_INO_TIMER;
535  	if (src->d_fieldmask & FS_DQ_RTBTIMER)
536  		dst->d_fieldmask |= QC_RT_SPC_TIMER;
537  	if (src->d_fieldmask & FS_DQ_BWARNS)
538  		dst->d_fieldmask |= QC_SPC_WARNS;
539  	if (src->d_fieldmask & FS_DQ_IWARNS)
540  		dst->d_fieldmask |= QC_INO_WARNS;
541  	if (src->d_fieldmask & FS_DQ_RTBWARNS)
542  		dst->d_fieldmask |= QC_RT_SPC_WARNS;
543  	if (src->d_fieldmask & FS_DQ_BCOUNT)
544  		dst->d_fieldmask |= QC_SPACE;
545  	if (src->d_fieldmask & FS_DQ_ICOUNT)
546  		dst->d_fieldmask |= QC_INO_COUNT;
547  	if (src->d_fieldmask & FS_DQ_RTBCOUNT)
548  		dst->d_fieldmask |= QC_RT_SPACE;
549  }
550  
551  static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst,
552  				       struct fs_disk_quota *src)
553  {
554  	memset(dst, 0, sizeof(*dst));
555  	dst->i_spc_timelimit = src->d_btimer;
556  	dst->i_ino_timelimit = src->d_itimer;
557  	dst->i_rt_spc_timelimit = src->d_rtbtimer;
558  	dst->i_ino_warnlimit = src->d_iwarns;
559  	dst->i_spc_warnlimit = src->d_bwarns;
560  	dst->i_rt_spc_warnlimit = src->d_rtbwarns;
561  	if (src->d_fieldmask & FS_DQ_BWARNS)
562  		dst->i_fieldmask |= QC_SPC_WARNS;
563  	if (src->d_fieldmask & FS_DQ_IWARNS)
564  		dst->i_fieldmask |= QC_INO_WARNS;
565  	if (src->d_fieldmask & FS_DQ_RTBWARNS)
566  		dst->i_fieldmask |= QC_RT_SPC_WARNS;
567  	if (src->d_fieldmask & FS_DQ_BTIMER)
568  		dst->i_fieldmask |= QC_SPC_TIMER;
569  	if (src->d_fieldmask & FS_DQ_ITIMER)
570  		dst->i_fieldmask |= QC_INO_TIMER;
571  	if (src->d_fieldmask & FS_DQ_RTBTIMER)
572  		dst->i_fieldmask |= QC_RT_SPC_TIMER;
573  }
574  
575  static int quota_setxquota(struct super_block *sb, int type, qid_t id,
576  			   void __user *addr)
577  {
578  	struct fs_disk_quota fdq;
579  	struct qc_dqblk qdq;
580  	struct kqid qid;
581  
582  	if (copy_from_user(&fdq, addr, sizeof(fdq)))
583  		return -EFAULT;
584  	if (!sb->s_qcop->set_dqblk)
585  		return -ENOSYS;
586  	qid = make_kqid(current_user_ns(), type, id);
587  	if (!qid_has_mapping(sb->s_user_ns, qid))
588  		return -EINVAL;
589  	/* Are we actually setting timer / warning limits for all users? */
590  	if (from_kqid(sb->s_user_ns, qid) == 0 &&
591  	    fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) {
592  		struct qc_info qinfo;
593  		int ret;
594  
595  		if (!sb->s_qcop->set_info)
596  			return -EINVAL;
597  		copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq);
598  		ret = sb->s_qcop->set_info(sb, type, &qinfo);
599  		if (ret)
600  			return ret;
601  		/* These are already done */
602  		fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK);
603  	}
604  	copy_from_xfs_dqblk(&qdq, &fdq);
605  	return sb->s_qcop->set_dqblk(sb, qid, &qdq);
606  }
607  
608  static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
609  			      int type, qid_t id)
610  {
611  	memset(dst, 0, sizeof(*dst));
612  	dst->d_version = FS_DQUOT_VERSION;
613  	dst->d_id = id;
614  	if (type == USRQUOTA)
615  		dst->d_flags = FS_USER_QUOTA;
616  	else if (type == PRJQUOTA)
617  		dst->d_flags = FS_PROJ_QUOTA;
618  	else
619  		dst->d_flags = FS_GROUP_QUOTA;
620  	dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
621  	dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
622  	dst->d_ino_hardlimit = src->d_ino_hardlimit;
623  	dst->d_ino_softlimit = src->d_ino_softlimit;
624  	dst->d_bcount = quota_btobb(src->d_space);
625  	dst->d_icount = src->d_ino_count;
626  	dst->d_itimer = src->d_ino_timer;
627  	dst->d_btimer = src->d_spc_timer;
628  	dst->d_iwarns = src->d_ino_warns;
629  	dst->d_bwarns = src->d_spc_warns;
630  	dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
631  	dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
632  	dst->d_rtbcount = quota_btobb(src->d_rt_space);
633  	dst->d_rtbtimer = src->d_rt_spc_timer;
634  	dst->d_rtbwarns = src->d_rt_spc_warns;
635  }
636  
637  static int quota_getxquota(struct super_block *sb, int type, qid_t id,
638  			   void __user *addr)
639  {
640  	struct fs_disk_quota fdq;
641  	struct qc_dqblk qdq;
642  	struct kqid qid;
643  	int ret;
644  
645  	if (!sb->s_qcop->get_dqblk)
646  		return -ENOSYS;
647  	qid = make_kqid(current_user_ns(), type, id);
648  	if (!qid_has_mapping(sb->s_user_ns, qid))
649  		return -EINVAL;
650  	ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
651  	if (ret)
652  		return ret;
653  	copy_to_xfs_dqblk(&fdq, &qdq, type, id);
654  	if (copy_to_user(addr, &fdq, sizeof(fdq)))
655  		return -EFAULT;
656  	return ret;
657  }
658  
659  /*
660   * Return quota for next active quota >= this id, if any exists,
661   * otherwise return -ENOENT via ->get_nextdqblk.
662   */
663  static int quota_getnextxquota(struct super_block *sb, int type, qid_t id,
664  			    void __user *addr)
665  {
666  	struct fs_disk_quota fdq;
667  	struct qc_dqblk qdq;
668  	struct kqid qid;
669  	qid_t id_out;
670  	int ret;
671  
672  	if (!sb->s_qcop->get_nextdqblk)
673  		return -ENOSYS;
674  	qid = make_kqid(current_user_ns(), type, id);
675  	if (!qid_has_mapping(sb->s_user_ns, qid))
676  		return -EINVAL;
677  	ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq);
678  	if (ret)
679  		return ret;
680  	id_out = from_kqid(current_user_ns(), qid);
681  	copy_to_xfs_dqblk(&fdq, &qdq, type, id_out);
682  	if (copy_to_user(addr, &fdq, sizeof(fdq)))
683  		return -EFAULT;
684  	return ret;
685  }
686  
687  static int quota_rmxquota(struct super_block *sb, void __user *addr)
688  {
689  	__u32 flags;
690  
691  	if (copy_from_user(&flags, addr, sizeof(flags)))
692  		return -EFAULT;
693  	if (!sb->s_qcop->rm_xquota)
694  		return -ENOSYS;
695  	return sb->s_qcop->rm_xquota(sb, flags);
696  }
697  
698  /* Copy parameters and call proper function */
699  static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
700  		       void __user *addr, const struct path *path)
701  {
702  	int ret;
703  
704  	if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
705  		return -EINVAL;
706  	/*
707  	 * Quota not supported on this fs? Check this before s_quota_types
708  	 * since they needn't be set if quota is not supported at all.
709  	 */
710  	if (!sb->s_qcop)
711  		return -ENOSYS;
712  	if (!(sb->s_quota_types & (1 << type)))
713  		return -EINVAL;
714  
715  	ret = check_quotactl_permission(sb, type, cmd, id);
716  	if (ret < 0)
717  		return ret;
718  
719  	switch (cmd) {
720  	case Q_QUOTAON:
721  		return quota_quotaon(sb, type, id, path);
722  	case Q_QUOTAOFF:
723  		return quota_quotaoff(sb, type);
724  	case Q_GETFMT:
725  		return quota_getfmt(sb, type, addr);
726  	case Q_GETINFO:
727  		return quota_getinfo(sb, type, addr);
728  	case Q_SETINFO:
729  		return quota_setinfo(sb, type, addr);
730  	case Q_GETQUOTA:
731  		return quota_getquota(sb, type, id, addr);
732  	case Q_GETNEXTQUOTA:
733  		return quota_getnextquota(sb, type, id, addr);
734  	case Q_SETQUOTA:
735  		return quota_setquota(sb, type, id, addr);
736  	case Q_SYNC:
737  		if (!sb->s_qcop->quota_sync)
738  			return -ENOSYS;
739  		return sb->s_qcop->quota_sync(sb, type);
740  	case Q_XQUOTAON:
741  		return quota_enable(sb, addr);
742  	case Q_XQUOTAOFF:
743  		return quota_disable(sb, addr);
744  	case Q_XQUOTARM:
745  		return quota_rmxquota(sb, addr);
746  	case Q_XGETQSTAT:
747  		return quota_getxstate(sb, addr);
748  	case Q_XGETQSTATV:
749  		return quota_getxstatev(sb, addr);
750  	case Q_XSETQLIM:
751  		return quota_setxquota(sb, type, id, addr);
752  	case Q_XGETQUOTA:
753  		return quota_getxquota(sb, type, id, addr);
754  	case Q_XGETNEXTQUOTA:
755  		return quota_getnextxquota(sb, type, id, addr);
756  	case Q_XQUOTASYNC:
757  		if (sb_rdonly(sb))
758  			return -EROFS;
759  		/* XFS quotas are fully coherent now, making this call a noop */
760  		return 0;
761  	default:
762  		return -EINVAL;
763  	}
764  }
765  
766  #ifdef CONFIG_BLOCK
767  
768  /* Return 1 if 'cmd' will block on frozen filesystem */
769  static int quotactl_cmd_write(int cmd)
770  {
771  	/*
772  	 * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access
773  	 * as dquot_acquire() may allocate space for new structure and OCFS2
774  	 * needs to increment on-disk use count.
775  	 */
776  	switch (cmd) {
777  	case Q_GETFMT:
778  	case Q_GETINFO:
779  	case Q_SYNC:
780  	case Q_XGETQSTAT:
781  	case Q_XGETQSTATV:
782  	case Q_XGETQUOTA:
783  	case Q_XGETNEXTQUOTA:
784  	case Q_XQUOTASYNC:
785  		return 0;
786  	}
787  	return 1;
788  }
789  #endif /* CONFIG_BLOCK */
790  
791  /* Return true if quotactl command is manipulating quota on/off state */
792  static bool quotactl_cmd_onoff(int cmd)
793  {
794  	return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF);
795  }
796  
797  /*
798   * look up a superblock on which quota ops will be performed
799   * - use the name of a block device to find the superblock thereon
800   */
801  static struct super_block *quotactl_block(const char __user *special, int cmd)
802  {
803  #ifdef CONFIG_BLOCK
804  	struct block_device *bdev;
805  	struct super_block *sb;
806  	struct filename *tmp = getname(special);
807  
808  	if (IS_ERR(tmp))
809  		return ERR_CAST(tmp);
810  	bdev = lookup_bdev(tmp->name);
811  	putname(tmp);
812  	if (IS_ERR(bdev))
813  		return ERR_CAST(bdev);
814  	if (quotactl_cmd_onoff(cmd))
815  		sb = get_super_exclusive_thawed(bdev);
816  	else if (quotactl_cmd_write(cmd))
817  		sb = get_super_thawed(bdev);
818  	else
819  		sb = get_super(bdev);
820  	bdput(bdev);
821  	if (!sb)
822  		return ERR_PTR(-ENODEV);
823  
824  	return sb;
825  #else
826  	return ERR_PTR(-ENODEV);
827  #endif
828  }
829  
830  /*
831   * This is the system call interface. This communicates with
832   * the user-level programs. Currently this only supports diskquota
833   * calls. Maybe we need to add the process quotas etc. in the future,
834   * but we probably should use rlimits for that.
835   */
836  int kernel_quotactl(unsigned int cmd, const char __user *special,
837  		    qid_t id, void __user *addr)
838  {
839  	uint cmds, type;
840  	struct super_block *sb = NULL;
841  	struct path path, *pathp = NULL;
842  	int ret;
843  
844  	cmds = cmd >> SUBCMDSHIFT;
845  	type = cmd & SUBCMDMASK;
846  
847  	/*
848  	 * As a special case Q_SYNC can be called without a specific device.
849  	 * It will iterate all superblocks that have quota enabled and call
850  	 * the sync action on each of them.
851  	 */
852  	if (!special) {
853  		if (cmds == Q_SYNC)
854  			return quota_sync_all(type);
855  		return -ENODEV;
856  	}
857  
858  	/*
859  	 * Path for quotaon has to be resolved before grabbing superblock
860  	 * because that gets s_umount sem which is also possibly needed by path
861  	 * resolution (think about autofs) and thus deadlocks could arise.
862  	 */
863  	if (cmds == Q_QUOTAON) {
864  		ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
865  		if (ret)
866  			pathp = ERR_PTR(ret);
867  		else
868  			pathp = &path;
869  	}
870  
871  	sb = quotactl_block(special, cmds);
872  	if (IS_ERR(sb)) {
873  		ret = PTR_ERR(sb);
874  		goto out;
875  	}
876  
877  	ret = do_quotactl(sb, type, cmds, id, addr, pathp);
878  
879  	if (!quotactl_cmd_onoff(cmds))
880  		drop_super(sb);
881  	else
882  		drop_super_exclusive(sb);
883  out:
884  	if (pathp && !IS_ERR(pathp))
885  		path_put(pathp);
886  	return ret;
887  }
888  
889  SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
890  		qid_t, id, void __user *, addr)
891  {
892  	return kernel_quotactl(cmd, special, id, addr);
893  }
894