1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Quota code necessary even when VFS quota support is not compiled 4 * into the kernel. The interesting stuff is over in dquot.c, here 5 * we have symbols for initial quotactl(2) handling, the sysctl(2) 6 * variables, etc - things needed even when quota support disabled. 7 */ 8 9 #include <linux/fs.h> 10 #include <linux/namei.h> 11 #include <linux/slab.h> 12 #include <asm/current.h> 13 #include <linux/uaccess.h> 14 #include <linux/kernel.h> 15 #include <linux/security.h> 16 #include <linux/syscalls.h> 17 #include <linux/capability.h> 18 #include <linux/quotaops.h> 19 #include <linux/types.h> 20 #include <linux/mount.h> 21 #include <linux/writeback.h> 22 #include <linux/nospec.h> 23 #include "compat.h" 24 #include "../internal.h" 25 26 static int check_quotactl_permission(struct super_block *sb, int type, int cmd, 27 qid_t id) 28 { 29 switch (cmd) { 30 /* these commands do not require any special privilegues */ 31 case Q_GETFMT: 32 case Q_SYNC: 33 case Q_GETINFO: 34 case Q_XGETQSTAT: 35 case Q_XGETQSTATV: 36 case Q_XQUOTASYNC: 37 break; 38 /* allow to query information for dquots we "own" */ 39 case Q_GETQUOTA: 40 case Q_XGETQUOTA: 41 if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) || 42 (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id)))) 43 break; 44 fallthrough; 45 default: 46 if (!capable(CAP_SYS_ADMIN)) 47 return -EPERM; 48 } 49 50 return security_quotactl(cmd, type, id, sb); 51 } 52 53 static void quota_sync_one(struct super_block *sb, void *arg) 54 { 55 int type = *(int *)arg; 56 57 if (sb->s_qcop && sb->s_qcop->quota_sync && 58 (sb->s_quota_types & (1 << type))) 59 sb->s_qcop->quota_sync(sb, type); 60 } 61 62 static int quota_sync_all(int type) 63 { 64 int ret; 65 66 ret = security_quotactl(Q_SYNC, type, 0, NULL); 67 if (!ret) 68 iterate_supers(quota_sync_one, &type); 69 return ret; 70 } 71 72 unsigned int qtype_enforce_flag(int type) 73 { 74 switch (type) { 75 case USRQUOTA: 76 return FS_QUOTA_UDQ_ENFD; 77 case GRPQUOTA: 78 return FS_QUOTA_GDQ_ENFD; 79 case PRJQUOTA: 80 return FS_QUOTA_PDQ_ENFD; 81 } 82 return 0; 83 } 84 85 static int quota_quotaon(struct super_block *sb, int type, qid_t id, 86 const struct path *path) 87 { 88 if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable) 89 return -ENOSYS; 90 if (sb->s_qcop->quota_enable) 91 return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type)); 92 if (IS_ERR(path)) 93 return PTR_ERR(path); 94 return sb->s_qcop->quota_on(sb, type, id, path); 95 } 96 97 static int quota_quotaoff(struct super_block *sb, int type) 98 { 99 if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable) 100 return -ENOSYS; 101 if (sb->s_qcop->quota_disable) 102 return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type)); 103 return sb->s_qcop->quota_off(sb, type); 104 } 105 106 static int quota_getfmt(struct super_block *sb, int type, void __user *addr) 107 { 108 __u32 fmt; 109 110 if (!sb_has_quota_active(sb, type)) 111 return -ESRCH; 112 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; 113 if (copy_to_user(addr, &fmt, sizeof(fmt))) 114 return -EFAULT; 115 return 0; 116 } 117 118 static int quota_getinfo(struct super_block *sb, int type, void __user *addr) 119 { 120 struct qc_state state; 121 struct qc_type_state *tstate; 122 struct if_dqinfo uinfo; 123 int ret; 124 125 if (!sb->s_qcop->get_state) 126 return -ENOSYS; 127 ret = sb->s_qcop->get_state(sb, &state); 128 if (ret) 129 return ret; 130 tstate = state.s_state + type; 131 if (!(tstate->flags & QCI_ACCT_ENABLED)) 132 return -ESRCH; 133 memset(&uinfo, 0, sizeof(uinfo)); 134 uinfo.dqi_bgrace = tstate->spc_timelimit; 135 uinfo.dqi_igrace = tstate->ino_timelimit; 136 if (tstate->flags & QCI_SYSFILE) 137 uinfo.dqi_flags |= DQF_SYS_FILE; 138 if (tstate->flags & QCI_ROOT_SQUASH) 139 uinfo.dqi_flags |= DQF_ROOT_SQUASH; 140 uinfo.dqi_valid = IIF_ALL; 141 if (copy_to_user(addr, &uinfo, sizeof(uinfo))) 142 return -EFAULT; 143 return 0; 144 } 145 146 static int quota_setinfo(struct super_block *sb, int type, void __user *addr) 147 { 148 struct if_dqinfo info; 149 struct qc_info qinfo; 150 151 if (copy_from_user(&info, addr, sizeof(info))) 152 return -EFAULT; 153 if (!sb->s_qcop->set_info) 154 return -ENOSYS; 155 if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE)) 156 return -EINVAL; 157 memset(&qinfo, 0, sizeof(qinfo)); 158 if (info.dqi_valid & IIF_FLAGS) { 159 if (info.dqi_flags & ~DQF_SETINFO_MASK) 160 return -EINVAL; 161 if (info.dqi_flags & DQF_ROOT_SQUASH) 162 qinfo.i_flags |= QCI_ROOT_SQUASH; 163 qinfo.i_fieldmask |= QC_FLAGS; 164 } 165 if (info.dqi_valid & IIF_BGRACE) { 166 qinfo.i_spc_timelimit = info.dqi_bgrace; 167 qinfo.i_fieldmask |= QC_SPC_TIMER; 168 } 169 if (info.dqi_valid & IIF_IGRACE) { 170 qinfo.i_ino_timelimit = info.dqi_igrace; 171 qinfo.i_fieldmask |= QC_INO_TIMER; 172 } 173 return sb->s_qcop->set_info(sb, type, &qinfo); 174 } 175 176 static inline qsize_t qbtos(qsize_t blocks) 177 { 178 return blocks << QIF_DQBLKSIZE_BITS; 179 } 180 181 static inline qsize_t stoqb(qsize_t space) 182 { 183 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; 184 } 185 186 static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) 187 { 188 memset(dst, 0, sizeof(*dst)); 189 dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); 190 dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); 191 dst->dqb_curspace = src->d_space; 192 dst->dqb_ihardlimit = src->d_ino_hardlimit; 193 dst->dqb_isoftlimit = src->d_ino_softlimit; 194 dst->dqb_curinodes = src->d_ino_count; 195 dst->dqb_btime = src->d_spc_timer; 196 dst->dqb_itime = src->d_ino_timer; 197 dst->dqb_valid = QIF_ALL; 198 } 199 200 static int quota_getquota(struct super_block *sb, int type, qid_t id, 201 void __user *addr) 202 { 203 struct kqid qid; 204 struct qc_dqblk fdq; 205 struct if_dqblk idq; 206 int ret; 207 208 if (!sb->s_qcop->get_dqblk) 209 return -ENOSYS; 210 qid = make_kqid(current_user_ns(), type, id); 211 if (!qid_has_mapping(sb->s_user_ns, qid)) 212 return -EINVAL; 213 ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); 214 if (ret) 215 return ret; 216 copy_to_if_dqblk(&idq, &fdq); 217 218 if (compat_need_64bit_alignment_fixup()) { 219 struct compat_if_dqblk __user *compat_dqblk = addr; 220 221 if (copy_to_user(compat_dqblk, &idq, sizeof(*compat_dqblk))) 222 return -EFAULT; 223 if (put_user(idq.dqb_valid, &compat_dqblk->dqb_valid)) 224 return -EFAULT; 225 } else { 226 if (copy_to_user(addr, &idq, sizeof(idq))) 227 return -EFAULT; 228 } 229 return 0; 230 } 231 232 /* 233 * Return quota for next active quota >= this id, if any exists, 234 * otherwise return -ENOENT via ->get_nextdqblk 235 */ 236 static int quota_getnextquota(struct super_block *sb, int type, qid_t id, 237 void __user *addr) 238 { 239 struct kqid qid; 240 struct qc_dqblk fdq; 241 struct if_nextdqblk idq; 242 int ret; 243 244 if (!sb->s_qcop->get_nextdqblk) 245 return -ENOSYS; 246 qid = make_kqid(current_user_ns(), type, id); 247 if (!qid_has_mapping(sb->s_user_ns, qid)) 248 return -EINVAL; 249 ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq); 250 if (ret) 251 return ret; 252 /* struct if_nextdqblk is a superset of struct if_dqblk */ 253 copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq); 254 idq.dqb_id = from_kqid(current_user_ns(), qid); 255 if (copy_to_user(addr, &idq, sizeof(idq))) 256 return -EFAULT; 257 return 0; 258 } 259 260 static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) 261 { 262 dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); 263 dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); 264 dst->d_space = src->dqb_curspace; 265 dst->d_ino_hardlimit = src->dqb_ihardlimit; 266 dst->d_ino_softlimit = src->dqb_isoftlimit; 267 dst->d_ino_count = src->dqb_curinodes; 268 dst->d_spc_timer = src->dqb_btime; 269 dst->d_ino_timer = src->dqb_itime; 270 271 dst->d_fieldmask = 0; 272 if (src->dqb_valid & QIF_BLIMITS) 273 dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; 274 if (src->dqb_valid & QIF_SPACE) 275 dst->d_fieldmask |= QC_SPACE; 276 if (src->dqb_valid & QIF_ILIMITS) 277 dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; 278 if (src->dqb_valid & QIF_INODES) 279 dst->d_fieldmask |= QC_INO_COUNT; 280 if (src->dqb_valid & QIF_BTIME) 281 dst->d_fieldmask |= QC_SPC_TIMER; 282 if (src->dqb_valid & QIF_ITIME) 283 dst->d_fieldmask |= QC_INO_TIMER; 284 } 285 286 static int quota_setquota(struct super_block *sb, int type, qid_t id, 287 void __user *addr) 288 { 289 struct qc_dqblk fdq; 290 struct if_dqblk idq; 291 struct kqid qid; 292 293 if (compat_need_64bit_alignment_fixup()) { 294 struct compat_if_dqblk __user *compat_dqblk = addr; 295 296 if (copy_from_user(&idq, compat_dqblk, sizeof(*compat_dqblk)) || 297 get_user(idq.dqb_valid, &compat_dqblk->dqb_valid)) 298 return -EFAULT; 299 } else { 300 if (copy_from_user(&idq, addr, sizeof(idq))) 301 return -EFAULT; 302 } 303 if (!sb->s_qcop->set_dqblk) 304 return -ENOSYS; 305 qid = make_kqid(current_user_ns(), type, id); 306 if (!qid_has_mapping(sb->s_user_ns, qid)) 307 return -EINVAL; 308 copy_from_if_dqblk(&fdq, &idq); 309 return sb->s_qcop->set_dqblk(sb, qid, &fdq); 310 } 311 312 static int quota_enable(struct super_block *sb, void __user *addr) 313 { 314 __u32 flags; 315 316 if (copy_from_user(&flags, addr, sizeof(flags))) 317 return -EFAULT; 318 if (!sb->s_qcop->quota_enable) 319 return -ENOSYS; 320 return sb->s_qcop->quota_enable(sb, flags); 321 } 322 323 static int quota_disable(struct super_block *sb, void __user *addr) 324 { 325 __u32 flags; 326 327 if (copy_from_user(&flags, addr, sizeof(flags))) 328 return -EFAULT; 329 if (!sb->s_qcop->quota_disable) 330 return -ENOSYS; 331 return sb->s_qcop->quota_disable(sb, flags); 332 } 333 334 static int quota_state_to_flags(struct qc_state *state) 335 { 336 int flags = 0; 337 338 if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) 339 flags |= FS_QUOTA_UDQ_ACCT; 340 if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED) 341 flags |= FS_QUOTA_UDQ_ENFD; 342 if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) 343 flags |= FS_QUOTA_GDQ_ACCT; 344 if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED) 345 flags |= FS_QUOTA_GDQ_ENFD; 346 if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) 347 flags |= FS_QUOTA_PDQ_ACCT; 348 if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED) 349 flags |= FS_QUOTA_PDQ_ENFD; 350 return flags; 351 } 352 353 static int quota_getstate(struct super_block *sb, int type, 354 struct fs_quota_stat *fqs) 355 { 356 struct qc_state state; 357 int ret; 358 359 memset(&state, 0, sizeof (struct qc_state)); 360 ret = sb->s_qcop->get_state(sb, &state); 361 if (ret < 0) 362 return ret; 363 364 memset(fqs, 0, sizeof(*fqs)); 365 fqs->qs_version = FS_QSTAT_VERSION; 366 fqs->qs_flags = quota_state_to_flags(&state); 367 /* No quota enabled? */ 368 if (!fqs->qs_flags) 369 return -ENOSYS; 370 fqs->qs_incoredqs = state.s_incoredqs; 371 372 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 373 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 374 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; 375 fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; 376 fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; 377 378 /* Inodes may be allocated even if inactive; copy out if present */ 379 if (state.s_state[USRQUOTA].ino) { 380 fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; 381 fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; 382 fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; 383 } 384 if (state.s_state[GRPQUOTA].ino) { 385 fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; 386 fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; 387 fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; 388 } 389 if (state.s_state[PRJQUOTA].ino) { 390 /* 391 * Q_XGETQSTAT doesn't have room for both group and project 392 * quotas. So, allow the project quota values to be copied out 393 * only if there is no group quota information available. 394 */ 395 if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) { 396 fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino; 397 fqs->qs_gquota.qfs_nblks = 398 state.s_state[PRJQUOTA].blocks; 399 fqs->qs_gquota.qfs_nextents = 400 state.s_state[PRJQUOTA].nextents; 401 } 402 } 403 return 0; 404 } 405 406 static int compat_copy_fs_qfilestat(struct compat_fs_qfilestat __user *to, 407 struct fs_qfilestat *from) 408 { 409 if (copy_to_user(to, from, sizeof(*to)) || 410 put_user(from->qfs_nextents, &to->qfs_nextents)) 411 return -EFAULT; 412 return 0; 413 } 414 415 static int compat_copy_fs_quota_stat(struct compat_fs_quota_stat __user *to, 416 struct fs_quota_stat *from) 417 { 418 if (put_user(from->qs_version, &to->qs_version) || 419 put_user(from->qs_flags, &to->qs_flags) || 420 put_user(from->qs_pad, &to->qs_pad) || 421 compat_copy_fs_qfilestat(&to->qs_uquota, &from->qs_uquota) || 422 compat_copy_fs_qfilestat(&to->qs_gquota, &from->qs_gquota) || 423 put_user(from->qs_incoredqs, &to->qs_incoredqs) || 424 put_user(from->qs_btimelimit, &to->qs_btimelimit) || 425 put_user(from->qs_itimelimit, &to->qs_itimelimit) || 426 put_user(from->qs_rtbtimelimit, &to->qs_rtbtimelimit) || 427 put_user(from->qs_bwarnlimit, &to->qs_bwarnlimit) || 428 put_user(from->qs_iwarnlimit, &to->qs_iwarnlimit)) 429 return -EFAULT; 430 return 0; 431 } 432 433 static int quota_getxstate(struct super_block *sb, int type, void __user *addr) 434 { 435 struct fs_quota_stat fqs; 436 int ret; 437 438 if (!sb->s_qcop->get_state) 439 return -ENOSYS; 440 ret = quota_getstate(sb, type, &fqs); 441 if (ret) 442 return ret; 443 444 if (compat_need_64bit_alignment_fixup()) 445 return compat_copy_fs_quota_stat(addr, &fqs); 446 if (copy_to_user(addr, &fqs, sizeof(fqs))) 447 return -EFAULT; 448 return 0; 449 } 450 451 static int quota_getstatev(struct super_block *sb, int type, 452 struct fs_quota_statv *fqs) 453 { 454 struct qc_state state; 455 int ret; 456 457 memset(&state, 0, sizeof (struct qc_state)); 458 ret = sb->s_qcop->get_state(sb, &state); 459 if (ret < 0) 460 return ret; 461 462 memset(fqs, 0, sizeof(*fqs)); 463 fqs->qs_version = FS_QSTAT_VERSION; 464 fqs->qs_flags = quota_state_to_flags(&state); 465 /* No quota enabled? */ 466 if (!fqs->qs_flags) 467 return -ENOSYS; 468 fqs->qs_incoredqs = state.s_incoredqs; 469 470 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 471 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 472 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; 473 fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; 474 fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; 475 476 /* Inodes may be allocated even if inactive; copy out if present */ 477 if (state.s_state[USRQUOTA].ino) { 478 fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; 479 fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; 480 fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; 481 } 482 if (state.s_state[GRPQUOTA].ino) { 483 fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; 484 fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; 485 fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; 486 } 487 if (state.s_state[PRJQUOTA].ino) { 488 fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino; 489 fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks; 490 fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents; 491 } 492 return 0; 493 } 494 495 static int quota_getxstatev(struct super_block *sb, int type, void __user *addr) 496 { 497 struct fs_quota_statv fqs; 498 int ret; 499 500 if (!sb->s_qcop->get_state) 501 return -ENOSYS; 502 503 memset(&fqs, 0, sizeof(fqs)); 504 if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */ 505 return -EFAULT; 506 507 /* If this kernel doesn't support user specified version, fail */ 508 switch (fqs.qs_version) { 509 case FS_QSTATV_VERSION1: 510 break; 511 default: 512 return -EINVAL; 513 } 514 ret = quota_getstatev(sb, type, &fqs); 515 if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) 516 return -EFAULT; 517 return ret; 518 } 519 520 /* 521 * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them 522 * out of there as xfsprogs rely on definitions being in that header file. So 523 * just define same functions here for quota purposes. 524 */ 525 #define XFS_BB_SHIFT 9 526 527 static inline u64 quota_bbtob(u64 blocks) 528 { 529 return blocks << XFS_BB_SHIFT; 530 } 531 532 static inline u64 quota_btobb(u64 bytes) 533 { 534 return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; 535 } 536 537 static inline s64 copy_from_xfs_dqblk_ts(const struct fs_disk_quota *d, 538 __s32 timer, __s8 timer_hi) 539 { 540 if (d->d_fieldmask & FS_DQ_BIGTIME) 541 return (u32)timer | (s64)timer_hi << 32; 542 return timer; 543 } 544 545 static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) 546 { 547 dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); 548 dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); 549 dst->d_ino_hardlimit = src->d_ino_hardlimit; 550 dst->d_ino_softlimit = src->d_ino_softlimit; 551 dst->d_space = quota_bbtob(src->d_bcount); 552 dst->d_ino_count = src->d_icount; 553 dst->d_ino_timer = copy_from_xfs_dqblk_ts(src, src->d_itimer, 554 src->d_itimer_hi); 555 dst->d_spc_timer = copy_from_xfs_dqblk_ts(src, src->d_btimer, 556 src->d_btimer_hi); 557 dst->d_ino_warns = src->d_iwarns; 558 dst->d_spc_warns = src->d_bwarns; 559 dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); 560 dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); 561 dst->d_rt_space = quota_bbtob(src->d_rtbcount); 562 dst->d_rt_spc_timer = copy_from_xfs_dqblk_ts(src, src->d_rtbtimer, 563 src->d_rtbtimer_hi); 564 dst->d_rt_spc_warns = src->d_rtbwarns; 565 dst->d_fieldmask = 0; 566 if (src->d_fieldmask & FS_DQ_ISOFT) 567 dst->d_fieldmask |= QC_INO_SOFT; 568 if (src->d_fieldmask & FS_DQ_IHARD) 569 dst->d_fieldmask |= QC_INO_HARD; 570 if (src->d_fieldmask & FS_DQ_BSOFT) 571 dst->d_fieldmask |= QC_SPC_SOFT; 572 if (src->d_fieldmask & FS_DQ_BHARD) 573 dst->d_fieldmask |= QC_SPC_HARD; 574 if (src->d_fieldmask & FS_DQ_RTBSOFT) 575 dst->d_fieldmask |= QC_RT_SPC_SOFT; 576 if (src->d_fieldmask & FS_DQ_RTBHARD) 577 dst->d_fieldmask |= QC_RT_SPC_HARD; 578 if (src->d_fieldmask & FS_DQ_BTIMER) 579 dst->d_fieldmask |= QC_SPC_TIMER; 580 if (src->d_fieldmask & FS_DQ_ITIMER) 581 dst->d_fieldmask |= QC_INO_TIMER; 582 if (src->d_fieldmask & FS_DQ_RTBTIMER) 583 dst->d_fieldmask |= QC_RT_SPC_TIMER; 584 if (src->d_fieldmask & FS_DQ_BWARNS) 585 dst->d_fieldmask |= QC_SPC_WARNS; 586 if (src->d_fieldmask & FS_DQ_IWARNS) 587 dst->d_fieldmask |= QC_INO_WARNS; 588 if (src->d_fieldmask & FS_DQ_RTBWARNS) 589 dst->d_fieldmask |= QC_RT_SPC_WARNS; 590 if (src->d_fieldmask & FS_DQ_BCOUNT) 591 dst->d_fieldmask |= QC_SPACE; 592 if (src->d_fieldmask & FS_DQ_ICOUNT) 593 dst->d_fieldmask |= QC_INO_COUNT; 594 if (src->d_fieldmask & FS_DQ_RTBCOUNT) 595 dst->d_fieldmask |= QC_RT_SPACE; 596 } 597 598 static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst, 599 struct fs_disk_quota *src) 600 { 601 memset(dst, 0, sizeof(*dst)); 602 dst->i_spc_timelimit = src->d_btimer; 603 dst->i_ino_timelimit = src->d_itimer; 604 dst->i_rt_spc_timelimit = src->d_rtbtimer; 605 dst->i_ino_warnlimit = src->d_iwarns; 606 dst->i_spc_warnlimit = src->d_bwarns; 607 dst->i_rt_spc_warnlimit = src->d_rtbwarns; 608 if (src->d_fieldmask & FS_DQ_BWARNS) 609 dst->i_fieldmask |= QC_SPC_WARNS; 610 if (src->d_fieldmask & FS_DQ_IWARNS) 611 dst->i_fieldmask |= QC_INO_WARNS; 612 if (src->d_fieldmask & FS_DQ_RTBWARNS) 613 dst->i_fieldmask |= QC_RT_SPC_WARNS; 614 if (src->d_fieldmask & FS_DQ_BTIMER) 615 dst->i_fieldmask |= QC_SPC_TIMER; 616 if (src->d_fieldmask & FS_DQ_ITIMER) 617 dst->i_fieldmask |= QC_INO_TIMER; 618 if (src->d_fieldmask & FS_DQ_RTBTIMER) 619 dst->i_fieldmask |= QC_RT_SPC_TIMER; 620 } 621 622 static int quota_setxquota(struct super_block *sb, int type, qid_t id, 623 void __user *addr) 624 { 625 struct fs_disk_quota fdq; 626 struct qc_dqblk qdq; 627 struct kqid qid; 628 629 if (copy_from_user(&fdq, addr, sizeof(fdq))) 630 return -EFAULT; 631 if (!sb->s_qcop->set_dqblk) 632 return -ENOSYS; 633 qid = make_kqid(current_user_ns(), type, id); 634 if (!qid_has_mapping(sb->s_user_ns, qid)) 635 return -EINVAL; 636 /* Are we actually setting timer / warning limits for all users? */ 637 if (from_kqid(sb->s_user_ns, qid) == 0 && 638 fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) { 639 struct qc_info qinfo; 640 int ret; 641 642 if (!sb->s_qcop->set_info) 643 return -EINVAL; 644 copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq); 645 ret = sb->s_qcop->set_info(sb, type, &qinfo); 646 if (ret) 647 return ret; 648 /* These are already done */ 649 fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK); 650 } 651 copy_from_xfs_dqblk(&qdq, &fdq); 652 return sb->s_qcop->set_dqblk(sb, qid, &qdq); 653 } 654 655 static inline void copy_to_xfs_dqblk_ts(const struct fs_disk_quota *d, 656 __s32 *timer_lo, __s8 *timer_hi, s64 timer) 657 { 658 *timer_lo = timer; 659 if (d->d_fieldmask & FS_DQ_BIGTIME) 660 *timer_hi = timer >> 32; 661 } 662 663 static inline bool want_bigtime(s64 timer) 664 { 665 return timer > S32_MAX || timer < S32_MIN; 666 } 667 668 static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, 669 int type, qid_t id) 670 { 671 memset(dst, 0, sizeof(*dst)); 672 if (want_bigtime(src->d_ino_timer) || want_bigtime(src->d_spc_timer) || 673 want_bigtime(src->d_rt_spc_timer)) 674 dst->d_fieldmask |= FS_DQ_BIGTIME; 675 dst->d_version = FS_DQUOT_VERSION; 676 dst->d_id = id; 677 if (type == USRQUOTA) 678 dst->d_flags = FS_USER_QUOTA; 679 else if (type == PRJQUOTA) 680 dst->d_flags = FS_PROJ_QUOTA; 681 else 682 dst->d_flags = FS_GROUP_QUOTA; 683 dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); 684 dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); 685 dst->d_ino_hardlimit = src->d_ino_hardlimit; 686 dst->d_ino_softlimit = src->d_ino_softlimit; 687 dst->d_bcount = quota_btobb(src->d_space); 688 dst->d_icount = src->d_ino_count; 689 copy_to_xfs_dqblk_ts(dst, &dst->d_itimer, &dst->d_itimer_hi, 690 src->d_ino_timer); 691 copy_to_xfs_dqblk_ts(dst, &dst->d_btimer, &dst->d_btimer_hi, 692 src->d_spc_timer); 693 dst->d_iwarns = src->d_ino_warns; 694 dst->d_bwarns = src->d_spc_warns; 695 dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); 696 dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); 697 dst->d_rtbcount = quota_btobb(src->d_rt_space); 698 copy_to_xfs_dqblk_ts(dst, &dst->d_rtbtimer, &dst->d_rtbtimer_hi, 699 src->d_rt_spc_timer); 700 dst->d_rtbwarns = src->d_rt_spc_warns; 701 } 702 703 static int quota_getxquota(struct super_block *sb, int type, qid_t id, 704 void __user *addr) 705 { 706 struct fs_disk_quota fdq; 707 struct qc_dqblk qdq; 708 struct kqid qid; 709 int ret; 710 711 if (!sb->s_qcop->get_dqblk) 712 return -ENOSYS; 713 qid = make_kqid(current_user_ns(), type, id); 714 if (!qid_has_mapping(sb->s_user_ns, qid)) 715 return -EINVAL; 716 ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); 717 if (ret) 718 return ret; 719 copy_to_xfs_dqblk(&fdq, &qdq, type, id); 720 if (copy_to_user(addr, &fdq, sizeof(fdq))) 721 return -EFAULT; 722 return ret; 723 } 724 725 /* 726 * Return quota for next active quota >= this id, if any exists, 727 * otherwise return -ENOENT via ->get_nextdqblk. 728 */ 729 static int quota_getnextxquota(struct super_block *sb, int type, qid_t id, 730 void __user *addr) 731 { 732 struct fs_disk_quota fdq; 733 struct qc_dqblk qdq; 734 struct kqid qid; 735 qid_t id_out; 736 int ret; 737 738 if (!sb->s_qcop->get_nextdqblk) 739 return -ENOSYS; 740 qid = make_kqid(current_user_ns(), type, id); 741 if (!qid_has_mapping(sb->s_user_ns, qid)) 742 return -EINVAL; 743 ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq); 744 if (ret) 745 return ret; 746 id_out = from_kqid(current_user_ns(), qid); 747 copy_to_xfs_dqblk(&fdq, &qdq, type, id_out); 748 if (copy_to_user(addr, &fdq, sizeof(fdq))) 749 return -EFAULT; 750 return ret; 751 } 752 753 static int quota_rmxquota(struct super_block *sb, void __user *addr) 754 { 755 __u32 flags; 756 757 if (copy_from_user(&flags, addr, sizeof(flags))) 758 return -EFAULT; 759 if (!sb->s_qcop->rm_xquota) 760 return -ENOSYS; 761 return sb->s_qcop->rm_xquota(sb, flags); 762 } 763 764 /* Copy parameters and call proper function */ 765 static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, 766 void __user *addr, const struct path *path) 767 { 768 int ret; 769 770 type = array_index_nospec(type, MAXQUOTAS); 771 /* 772 * Quota not supported on this fs? Check this before s_quota_types 773 * since they needn't be set if quota is not supported at all. 774 */ 775 if (!sb->s_qcop) 776 return -ENOSYS; 777 if (!(sb->s_quota_types & (1 << type))) 778 return -EINVAL; 779 780 ret = check_quotactl_permission(sb, type, cmd, id); 781 if (ret < 0) 782 return ret; 783 784 switch (cmd) { 785 case Q_QUOTAON: 786 return quota_quotaon(sb, type, id, path); 787 case Q_QUOTAOFF: 788 return quota_quotaoff(sb, type); 789 case Q_GETFMT: 790 return quota_getfmt(sb, type, addr); 791 case Q_GETINFO: 792 return quota_getinfo(sb, type, addr); 793 case Q_SETINFO: 794 return quota_setinfo(sb, type, addr); 795 case Q_GETQUOTA: 796 return quota_getquota(sb, type, id, addr); 797 case Q_GETNEXTQUOTA: 798 return quota_getnextquota(sb, type, id, addr); 799 case Q_SETQUOTA: 800 return quota_setquota(sb, type, id, addr); 801 case Q_SYNC: 802 if (!sb->s_qcop->quota_sync) 803 return -ENOSYS; 804 return sb->s_qcop->quota_sync(sb, type); 805 case Q_XQUOTAON: 806 return quota_enable(sb, addr); 807 case Q_XQUOTAOFF: 808 return quota_disable(sb, addr); 809 case Q_XQUOTARM: 810 return quota_rmxquota(sb, addr); 811 case Q_XGETQSTAT: 812 return quota_getxstate(sb, type, addr); 813 case Q_XGETQSTATV: 814 return quota_getxstatev(sb, type, addr); 815 case Q_XSETQLIM: 816 return quota_setxquota(sb, type, id, addr); 817 case Q_XGETQUOTA: 818 return quota_getxquota(sb, type, id, addr); 819 case Q_XGETNEXTQUOTA: 820 return quota_getnextxquota(sb, type, id, addr); 821 case Q_XQUOTASYNC: 822 if (sb_rdonly(sb)) 823 return -EROFS; 824 /* XFS quotas are fully coherent now, making this call a noop */ 825 return 0; 826 default: 827 return -EINVAL; 828 } 829 } 830 831 /* Return 1 if 'cmd' will block on frozen filesystem */ 832 static int quotactl_cmd_write(int cmd) 833 { 834 /* 835 * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access 836 * as dquot_acquire() may allocate space for new structure and OCFS2 837 * needs to increment on-disk use count. 838 */ 839 switch (cmd) { 840 case Q_GETFMT: 841 case Q_GETINFO: 842 case Q_SYNC: 843 case Q_XGETQSTAT: 844 case Q_XGETQSTATV: 845 case Q_XGETQUOTA: 846 case Q_XGETNEXTQUOTA: 847 case Q_XQUOTASYNC: 848 return 0; 849 } 850 return 1; 851 } 852 853 /* Return true if quotactl command is manipulating quota on/off state */ 854 static bool quotactl_cmd_onoff(int cmd) 855 { 856 return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) || 857 (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF); 858 } 859 860 /* 861 * look up a superblock on which quota ops will be performed 862 * - use the name of a block device to find the superblock thereon 863 */ 864 static struct super_block *quotactl_block(const char __user *special, int cmd) 865 { 866 #ifdef CONFIG_BLOCK 867 struct super_block *sb; 868 struct filename *tmp = getname(special); 869 bool excl = false, thawed = false; 870 int error; 871 dev_t dev; 872 873 if (IS_ERR(tmp)) 874 return ERR_CAST(tmp); 875 error = lookup_bdev(tmp->name, &dev); 876 putname(tmp); 877 if (error) 878 return ERR_PTR(error); 879 880 if (quotactl_cmd_onoff(cmd)) { 881 excl = true; 882 thawed = true; 883 } else if (quotactl_cmd_write(cmd)) { 884 thawed = true; 885 } 886 887 retry: 888 sb = user_get_super(dev, excl); 889 if (!sb) 890 return ERR_PTR(-ENODEV); 891 if (thawed && sb->s_writers.frozen != SB_UNFROZEN) { 892 if (excl) 893 up_write(&sb->s_umount); 894 else 895 up_read(&sb->s_umount); 896 wait_event(sb->s_writers.wait_unfrozen, 897 sb->s_writers.frozen == SB_UNFROZEN); 898 put_super(sb); 899 goto retry; 900 } 901 return sb; 902 903 #else 904 return ERR_PTR(-ENODEV); 905 #endif 906 } 907 908 /* 909 * This is the system call interface. This communicates with 910 * the user-level programs. Currently this only supports diskquota 911 * calls. Maybe we need to add the process quotas etc. in the future, 912 * but we probably should use rlimits for that. 913 */ 914 SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, 915 qid_t, id, void __user *, addr) 916 { 917 uint cmds, type; 918 struct super_block *sb = NULL; 919 struct path path, *pathp = NULL; 920 int ret; 921 922 cmds = cmd >> SUBCMDSHIFT; 923 type = cmd & SUBCMDMASK; 924 925 if (type >= MAXQUOTAS) 926 return -EINVAL; 927 928 /* 929 * As a special case Q_SYNC can be called without a specific device. 930 * It will iterate all superblocks that have quota enabled and call 931 * the sync action on each of them. 932 */ 933 if (!special) { 934 if (cmds == Q_SYNC) 935 return quota_sync_all(type); 936 return -ENODEV; 937 } 938 939 /* 940 * Path for quotaon has to be resolved before grabbing superblock 941 * because that gets s_umount sem which is also possibly needed by path 942 * resolution (think about autofs) and thus deadlocks could arise. 943 */ 944 if (cmds == Q_QUOTAON) { 945 ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 946 if (ret) 947 pathp = ERR_PTR(ret); 948 else 949 pathp = &path; 950 } 951 952 sb = quotactl_block(special, cmds); 953 if (IS_ERR(sb)) { 954 ret = PTR_ERR(sb); 955 goto out; 956 } 957 958 ret = do_quotactl(sb, type, cmds, id, addr, pathp); 959 960 if (!quotactl_cmd_onoff(cmds)) 961 drop_super(sb); 962 else 963 drop_super_exclusive(sb); 964 out: 965 if (pathp && !IS_ERR(pathp)) 966 path_put(pathp); 967 return ret; 968 } 969 970 SYSCALL_DEFINE4(quotactl_path, unsigned int, cmd, const char __user *, 971 mountpoint, qid_t, id, void __user *, addr) 972 { 973 struct super_block *sb; 974 struct path mountpath; 975 unsigned int cmds = cmd >> SUBCMDSHIFT; 976 unsigned int type = cmd & SUBCMDMASK; 977 int ret; 978 979 if (type >= MAXQUOTAS) 980 return -EINVAL; 981 982 ret = user_path_at(AT_FDCWD, mountpoint, 983 LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT, &mountpath); 984 if (ret) 985 return ret; 986 987 sb = mountpath.mnt->mnt_sb; 988 989 if (quotactl_cmd_write(cmds)) { 990 ret = mnt_want_write(mountpath.mnt); 991 if (ret) 992 goto out; 993 } 994 995 if (quotactl_cmd_onoff(cmds)) 996 down_write(&sb->s_umount); 997 else 998 down_read(&sb->s_umount); 999 1000 ret = do_quotactl(sb, type, cmds, id, addr, ERR_PTR(-EINVAL)); 1001 1002 if (quotactl_cmd_onoff(cmds)) 1003 up_write(&sb->s_umount); 1004 else 1005 up_read(&sb->s_umount); 1006 1007 if (quotactl_cmd_write(cmds)) 1008 mnt_drop_write(mountpath.mnt); 1009 out: 1010 path_put(&mountpath); 1011 1012 return ret; 1013 } 1014