1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Quota code necessary even when VFS quota support is not compiled 4 * into the kernel. The interesting stuff is over in dquot.c, here 5 * we have symbols for initial quotactl(2) handling, the sysctl(2) 6 * variables, etc - things needed even when quota support disabled. 7 */ 8 9 #include <linux/fs.h> 10 #include <linux/namei.h> 11 #include <linux/slab.h> 12 #include <asm/current.h> 13 #include <linux/uaccess.h> 14 #include <linux/kernel.h> 15 #include <linux/security.h> 16 #include <linux/syscalls.h> 17 #include <linux/capability.h> 18 #include <linux/quotaops.h> 19 #include <linux/types.h> 20 #include <linux/writeback.h> 21 #include <linux/nospec.h> 22 23 static int check_quotactl_permission(struct super_block *sb, int type, int cmd, 24 qid_t id) 25 { 26 switch (cmd) { 27 /* these commands do not require any special privilegues */ 28 case Q_GETFMT: 29 case Q_SYNC: 30 case Q_GETINFO: 31 case Q_XGETQSTAT: 32 case Q_XGETQSTATV: 33 case Q_XQUOTASYNC: 34 break; 35 /* allow to query information for dquots we "own" */ 36 case Q_GETQUOTA: 37 case Q_XGETQUOTA: 38 if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) || 39 (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id)))) 40 break; 41 fallthrough; 42 default: 43 if (!capable(CAP_SYS_ADMIN)) 44 return -EPERM; 45 } 46 47 return security_quotactl(cmd, type, id, sb); 48 } 49 50 static void quota_sync_one(struct super_block *sb, void *arg) 51 { 52 int type = *(int *)arg; 53 54 if (sb->s_qcop && sb->s_qcop->quota_sync && 55 (sb->s_quota_types & (1 << type))) 56 sb->s_qcop->quota_sync(sb, type); 57 } 58 59 static int quota_sync_all(int type) 60 { 61 int ret; 62 63 ret = security_quotactl(Q_SYNC, type, 0, NULL); 64 if (!ret) 65 iterate_supers(quota_sync_one, &type); 66 return ret; 67 } 68 69 unsigned int qtype_enforce_flag(int type) 70 { 71 switch (type) { 72 case USRQUOTA: 73 return FS_QUOTA_UDQ_ENFD; 74 case GRPQUOTA: 75 return FS_QUOTA_GDQ_ENFD; 76 case PRJQUOTA: 77 return FS_QUOTA_PDQ_ENFD; 78 } 79 return 0; 80 } 81 82 static int quota_quotaon(struct super_block *sb, int type, qid_t id, 83 const struct path *path) 84 { 85 if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable) 86 return -ENOSYS; 87 if (sb->s_qcop->quota_enable) 88 return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type)); 89 if (IS_ERR(path)) 90 return PTR_ERR(path); 91 return sb->s_qcop->quota_on(sb, type, id, path); 92 } 93 94 static int quota_quotaoff(struct super_block *sb, int type) 95 { 96 if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable) 97 return -ENOSYS; 98 if (sb->s_qcop->quota_disable) 99 return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type)); 100 return sb->s_qcop->quota_off(sb, type); 101 } 102 103 static int quota_getfmt(struct super_block *sb, int type, void __user *addr) 104 { 105 __u32 fmt; 106 107 if (!sb_has_quota_active(sb, type)) 108 return -ESRCH; 109 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; 110 if (copy_to_user(addr, &fmt, sizeof(fmt))) 111 return -EFAULT; 112 return 0; 113 } 114 115 static int quota_getinfo(struct super_block *sb, int type, void __user *addr) 116 { 117 struct qc_state state; 118 struct qc_type_state *tstate; 119 struct if_dqinfo uinfo; 120 int ret; 121 122 if (!sb->s_qcop->get_state) 123 return -ENOSYS; 124 ret = sb->s_qcop->get_state(sb, &state); 125 if (ret) 126 return ret; 127 tstate = state.s_state + type; 128 if (!(tstate->flags & QCI_ACCT_ENABLED)) 129 return -ESRCH; 130 memset(&uinfo, 0, sizeof(uinfo)); 131 uinfo.dqi_bgrace = tstate->spc_timelimit; 132 uinfo.dqi_igrace = tstate->ino_timelimit; 133 if (tstate->flags & QCI_SYSFILE) 134 uinfo.dqi_flags |= DQF_SYS_FILE; 135 if (tstate->flags & QCI_ROOT_SQUASH) 136 uinfo.dqi_flags |= DQF_ROOT_SQUASH; 137 uinfo.dqi_valid = IIF_ALL; 138 if (copy_to_user(addr, &uinfo, sizeof(uinfo))) 139 return -EFAULT; 140 return 0; 141 } 142 143 static int quota_setinfo(struct super_block *sb, int type, void __user *addr) 144 { 145 struct if_dqinfo info; 146 struct qc_info qinfo; 147 148 if (copy_from_user(&info, addr, sizeof(info))) 149 return -EFAULT; 150 if (!sb->s_qcop->set_info) 151 return -ENOSYS; 152 if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE)) 153 return -EINVAL; 154 memset(&qinfo, 0, sizeof(qinfo)); 155 if (info.dqi_valid & IIF_FLAGS) { 156 if (info.dqi_flags & ~DQF_SETINFO_MASK) 157 return -EINVAL; 158 if (info.dqi_flags & DQF_ROOT_SQUASH) 159 qinfo.i_flags |= QCI_ROOT_SQUASH; 160 qinfo.i_fieldmask |= QC_FLAGS; 161 } 162 if (info.dqi_valid & IIF_BGRACE) { 163 qinfo.i_spc_timelimit = info.dqi_bgrace; 164 qinfo.i_fieldmask |= QC_SPC_TIMER; 165 } 166 if (info.dqi_valid & IIF_IGRACE) { 167 qinfo.i_ino_timelimit = info.dqi_igrace; 168 qinfo.i_fieldmask |= QC_INO_TIMER; 169 } 170 return sb->s_qcop->set_info(sb, type, &qinfo); 171 } 172 173 static inline qsize_t qbtos(qsize_t blocks) 174 { 175 return blocks << QIF_DQBLKSIZE_BITS; 176 } 177 178 static inline qsize_t stoqb(qsize_t space) 179 { 180 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; 181 } 182 183 static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) 184 { 185 memset(dst, 0, sizeof(*dst)); 186 dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); 187 dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); 188 dst->dqb_curspace = src->d_space; 189 dst->dqb_ihardlimit = src->d_ino_hardlimit; 190 dst->dqb_isoftlimit = src->d_ino_softlimit; 191 dst->dqb_curinodes = src->d_ino_count; 192 dst->dqb_btime = src->d_spc_timer; 193 dst->dqb_itime = src->d_ino_timer; 194 dst->dqb_valid = QIF_ALL; 195 } 196 197 static int quota_getquota(struct super_block *sb, int type, qid_t id, 198 void __user *addr) 199 { 200 struct kqid qid; 201 struct qc_dqblk fdq; 202 struct if_dqblk idq; 203 int ret; 204 205 if (!sb->s_qcop->get_dqblk) 206 return -ENOSYS; 207 qid = make_kqid(current_user_ns(), type, id); 208 if (!qid_has_mapping(sb->s_user_ns, qid)) 209 return -EINVAL; 210 ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); 211 if (ret) 212 return ret; 213 copy_to_if_dqblk(&idq, &fdq); 214 if (copy_to_user(addr, &idq, sizeof(idq))) 215 return -EFAULT; 216 return 0; 217 } 218 219 /* 220 * Return quota for next active quota >= this id, if any exists, 221 * otherwise return -ENOENT via ->get_nextdqblk 222 */ 223 static int quota_getnextquota(struct super_block *sb, int type, qid_t id, 224 void __user *addr) 225 { 226 struct kqid qid; 227 struct qc_dqblk fdq; 228 struct if_nextdqblk idq; 229 int ret; 230 231 if (!sb->s_qcop->get_nextdqblk) 232 return -ENOSYS; 233 qid = make_kqid(current_user_ns(), type, id); 234 if (!qid_has_mapping(sb->s_user_ns, qid)) 235 return -EINVAL; 236 ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq); 237 if (ret) 238 return ret; 239 /* struct if_nextdqblk is a superset of struct if_dqblk */ 240 copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq); 241 idq.dqb_id = from_kqid(current_user_ns(), qid); 242 if (copy_to_user(addr, &idq, sizeof(idq))) 243 return -EFAULT; 244 return 0; 245 } 246 247 static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) 248 { 249 dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); 250 dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); 251 dst->d_space = src->dqb_curspace; 252 dst->d_ino_hardlimit = src->dqb_ihardlimit; 253 dst->d_ino_softlimit = src->dqb_isoftlimit; 254 dst->d_ino_count = src->dqb_curinodes; 255 dst->d_spc_timer = src->dqb_btime; 256 dst->d_ino_timer = src->dqb_itime; 257 258 dst->d_fieldmask = 0; 259 if (src->dqb_valid & QIF_BLIMITS) 260 dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; 261 if (src->dqb_valid & QIF_SPACE) 262 dst->d_fieldmask |= QC_SPACE; 263 if (src->dqb_valid & QIF_ILIMITS) 264 dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; 265 if (src->dqb_valid & QIF_INODES) 266 dst->d_fieldmask |= QC_INO_COUNT; 267 if (src->dqb_valid & QIF_BTIME) 268 dst->d_fieldmask |= QC_SPC_TIMER; 269 if (src->dqb_valid & QIF_ITIME) 270 dst->d_fieldmask |= QC_INO_TIMER; 271 } 272 273 static int quota_setquota(struct super_block *sb, int type, qid_t id, 274 void __user *addr) 275 { 276 struct qc_dqblk fdq; 277 struct if_dqblk idq; 278 struct kqid qid; 279 280 if (copy_from_user(&idq, addr, sizeof(idq))) 281 return -EFAULT; 282 if (!sb->s_qcop->set_dqblk) 283 return -ENOSYS; 284 qid = make_kqid(current_user_ns(), type, id); 285 if (!qid_has_mapping(sb->s_user_ns, qid)) 286 return -EINVAL; 287 copy_from_if_dqblk(&fdq, &idq); 288 return sb->s_qcop->set_dqblk(sb, qid, &fdq); 289 } 290 291 static int quota_enable(struct super_block *sb, void __user *addr) 292 { 293 __u32 flags; 294 295 if (copy_from_user(&flags, addr, sizeof(flags))) 296 return -EFAULT; 297 if (!sb->s_qcop->quota_enable) 298 return -ENOSYS; 299 return sb->s_qcop->quota_enable(sb, flags); 300 } 301 302 static int quota_disable(struct super_block *sb, void __user *addr) 303 { 304 __u32 flags; 305 306 if (copy_from_user(&flags, addr, sizeof(flags))) 307 return -EFAULT; 308 if (!sb->s_qcop->quota_disable) 309 return -ENOSYS; 310 return sb->s_qcop->quota_disable(sb, flags); 311 } 312 313 static int quota_state_to_flags(struct qc_state *state) 314 { 315 int flags = 0; 316 317 if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) 318 flags |= FS_QUOTA_UDQ_ACCT; 319 if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED) 320 flags |= FS_QUOTA_UDQ_ENFD; 321 if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) 322 flags |= FS_QUOTA_GDQ_ACCT; 323 if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED) 324 flags |= FS_QUOTA_GDQ_ENFD; 325 if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) 326 flags |= FS_QUOTA_PDQ_ACCT; 327 if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED) 328 flags |= FS_QUOTA_PDQ_ENFD; 329 return flags; 330 } 331 332 static int quota_getstate(struct super_block *sb, int type, 333 struct fs_quota_stat *fqs) 334 { 335 struct qc_state state; 336 int ret; 337 338 memset(&state, 0, sizeof (struct qc_state)); 339 ret = sb->s_qcop->get_state(sb, &state); 340 if (ret < 0) 341 return ret; 342 343 memset(fqs, 0, sizeof(*fqs)); 344 fqs->qs_version = FS_QSTAT_VERSION; 345 fqs->qs_flags = quota_state_to_flags(&state); 346 /* No quota enabled? */ 347 if (!fqs->qs_flags) 348 return -ENOSYS; 349 fqs->qs_incoredqs = state.s_incoredqs; 350 351 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 352 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 353 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; 354 fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; 355 fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; 356 357 /* Inodes may be allocated even if inactive; copy out if present */ 358 if (state.s_state[USRQUOTA].ino) { 359 fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; 360 fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; 361 fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; 362 } 363 if (state.s_state[GRPQUOTA].ino) { 364 fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; 365 fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; 366 fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; 367 } 368 if (state.s_state[PRJQUOTA].ino) { 369 /* 370 * Q_XGETQSTAT doesn't have room for both group and project 371 * quotas. So, allow the project quota values to be copied out 372 * only if there is no group quota information available. 373 */ 374 if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) { 375 fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino; 376 fqs->qs_gquota.qfs_nblks = 377 state.s_state[PRJQUOTA].blocks; 378 fqs->qs_gquota.qfs_nextents = 379 state.s_state[PRJQUOTA].nextents; 380 } 381 } 382 return 0; 383 } 384 385 static int quota_getxstate(struct super_block *sb, int type, void __user *addr) 386 { 387 struct fs_quota_stat fqs; 388 int ret; 389 390 if (!sb->s_qcop->get_state) 391 return -ENOSYS; 392 ret = quota_getstate(sb, type, &fqs); 393 if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) 394 return -EFAULT; 395 return ret; 396 } 397 398 static int quota_getstatev(struct super_block *sb, int type, 399 struct fs_quota_statv *fqs) 400 { 401 struct qc_state state; 402 int ret; 403 404 memset(&state, 0, sizeof (struct qc_state)); 405 ret = sb->s_qcop->get_state(sb, &state); 406 if (ret < 0) 407 return ret; 408 409 memset(fqs, 0, sizeof(*fqs)); 410 fqs->qs_version = FS_QSTAT_VERSION; 411 fqs->qs_flags = quota_state_to_flags(&state); 412 /* No quota enabled? */ 413 if (!fqs->qs_flags) 414 return -ENOSYS; 415 fqs->qs_incoredqs = state.s_incoredqs; 416 417 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 418 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 419 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; 420 fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; 421 fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; 422 423 /* Inodes may be allocated even if inactive; copy out if present */ 424 if (state.s_state[USRQUOTA].ino) { 425 fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; 426 fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; 427 fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; 428 } 429 if (state.s_state[GRPQUOTA].ino) { 430 fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; 431 fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; 432 fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; 433 } 434 if (state.s_state[PRJQUOTA].ino) { 435 fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino; 436 fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks; 437 fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents; 438 } 439 return 0; 440 } 441 442 static int quota_getxstatev(struct super_block *sb, int type, void __user *addr) 443 { 444 struct fs_quota_statv fqs; 445 int ret; 446 447 if (!sb->s_qcop->get_state) 448 return -ENOSYS; 449 450 memset(&fqs, 0, sizeof(fqs)); 451 if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */ 452 return -EFAULT; 453 454 /* If this kernel doesn't support user specified version, fail */ 455 switch (fqs.qs_version) { 456 case FS_QSTATV_VERSION1: 457 break; 458 default: 459 return -EINVAL; 460 } 461 ret = quota_getstatev(sb, type, &fqs); 462 if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) 463 return -EFAULT; 464 return ret; 465 } 466 467 /* 468 * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them 469 * out of there as xfsprogs rely on definitions being in that header file. So 470 * just define same functions here for quota purposes. 471 */ 472 #define XFS_BB_SHIFT 9 473 474 static inline u64 quota_bbtob(u64 blocks) 475 { 476 return blocks << XFS_BB_SHIFT; 477 } 478 479 static inline u64 quota_btobb(u64 bytes) 480 { 481 return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; 482 } 483 484 static inline s64 copy_from_xfs_dqblk_ts(const struct fs_disk_quota *d, 485 __s32 timer, __s8 timer_hi) 486 { 487 if (d->d_fieldmask & FS_DQ_BIGTIME) 488 return (u32)timer | (s64)timer_hi << 32; 489 return timer; 490 } 491 492 static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) 493 { 494 dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); 495 dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); 496 dst->d_ino_hardlimit = src->d_ino_hardlimit; 497 dst->d_ino_softlimit = src->d_ino_softlimit; 498 dst->d_space = quota_bbtob(src->d_bcount); 499 dst->d_ino_count = src->d_icount; 500 dst->d_ino_timer = copy_from_xfs_dqblk_ts(src, src->d_itimer, 501 src->d_itimer_hi); 502 dst->d_spc_timer = copy_from_xfs_dqblk_ts(src, src->d_btimer, 503 src->d_btimer_hi); 504 dst->d_ino_warns = src->d_iwarns; 505 dst->d_spc_warns = src->d_bwarns; 506 dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); 507 dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); 508 dst->d_rt_space = quota_bbtob(src->d_rtbcount); 509 dst->d_rt_spc_timer = copy_from_xfs_dqblk_ts(src, src->d_rtbtimer, 510 src->d_rtbtimer_hi); 511 dst->d_rt_spc_warns = src->d_rtbwarns; 512 dst->d_fieldmask = 0; 513 if (src->d_fieldmask & FS_DQ_ISOFT) 514 dst->d_fieldmask |= QC_INO_SOFT; 515 if (src->d_fieldmask & FS_DQ_IHARD) 516 dst->d_fieldmask |= QC_INO_HARD; 517 if (src->d_fieldmask & FS_DQ_BSOFT) 518 dst->d_fieldmask |= QC_SPC_SOFT; 519 if (src->d_fieldmask & FS_DQ_BHARD) 520 dst->d_fieldmask |= QC_SPC_HARD; 521 if (src->d_fieldmask & FS_DQ_RTBSOFT) 522 dst->d_fieldmask |= QC_RT_SPC_SOFT; 523 if (src->d_fieldmask & FS_DQ_RTBHARD) 524 dst->d_fieldmask |= QC_RT_SPC_HARD; 525 if (src->d_fieldmask & FS_DQ_BTIMER) 526 dst->d_fieldmask |= QC_SPC_TIMER; 527 if (src->d_fieldmask & FS_DQ_ITIMER) 528 dst->d_fieldmask |= QC_INO_TIMER; 529 if (src->d_fieldmask & FS_DQ_RTBTIMER) 530 dst->d_fieldmask |= QC_RT_SPC_TIMER; 531 if (src->d_fieldmask & FS_DQ_BWARNS) 532 dst->d_fieldmask |= QC_SPC_WARNS; 533 if (src->d_fieldmask & FS_DQ_IWARNS) 534 dst->d_fieldmask |= QC_INO_WARNS; 535 if (src->d_fieldmask & FS_DQ_RTBWARNS) 536 dst->d_fieldmask |= QC_RT_SPC_WARNS; 537 if (src->d_fieldmask & FS_DQ_BCOUNT) 538 dst->d_fieldmask |= QC_SPACE; 539 if (src->d_fieldmask & FS_DQ_ICOUNT) 540 dst->d_fieldmask |= QC_INO_COUNT; 541 if (src->d_fieldmask & FS_DQ_RTBCOUNT) 542 dst->d_fieldmask |= QC_RT_SPACE; 543 } 544 545 static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst, 546 struct fs_disk_quota *src) 547 { 548 memset(dst, 0, sizeof(*dst)); 549 dst->i_spc_timelimit = src->d_btimer; 550 dst->i_ino_timelimit = src->d_itimer; 551 dst->i_rt_spc_timelimit = src->d_rtbtimer; 552 dst->i_ino_warnlimit = src->d_iwarns; 553 dst->i_spc_warnlimit = src->d_bwarns; 554 dst->i_rt_spc_warnlimit = src->d_rtbwarns; 555 if (src->d_fieldmask & FS_DQ_BWARNS) 556 dst->i_fieldmask |= QC_SPC_WARNS; 557 if (src->d_fieldmask & FS_DQ_IWARNS) 558 dst->i_fieldmask |= QC_INO_WARNS; 559 if (src->d_fieldmask & FS_DQ_RTBWARNS) 560 dst->i_fieldmask |= QC_RT_SPC_WARNS; 561 if (src->d_fieldmask & FS_DQ_BTIMER) 562 dst->i_fieldmask |= QC_SPC_TIMER; 563 if (src->d_fieldmask & FS_DQ_ITIMER) 564 dst->i_fieldmask |= QC_INO_TIMER; 565 if (src->d_fieldmask & FS_DQ_RTBTIMER) 566 dst->i_fieldmask |= QC_RT_SPC_TIMER; 567 } 568 569 static int quota_setxquota(struct super_block *sb, int type, qid_t id, 570 void __user *addr) 571 { 572 struct fs_disk_quota fdq; 573 struct qc_dqblk qdq; 574 struct kqid qid; 575 576 if (copy_from_user(&fdq, addr, sizeof(fdq))) 577 return -EFAULT; 578 if (!sb->s_qcop->set_dqblk) 579 return -ENOSYS; 580 qid = make_kqid(current_user_ns(), type, id); 581 if (!qid_has_mapping(sb->s_user_ns, qid)) 582 return -EINVAL; 583 /* Are we actually setting timer / warning limits for all users? */ 584 if (from_kqid(sb->s_user_ns, qid) == 0 && 585 fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) { 586 struct qc_info qinfo; 587 int ret; 588 589 if (!sb->s_qcop->set_info) 590 return -EINVAL; 591 copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq); 592 ret = sb->s_qcop->set_info(sb, type, &qinfo); 593 if (ret) 594 return ret; 595 /* These are already done */ 596 fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK); 597 } 598 copy_from_xfs_dqblk(&qdq, &fdq); 599 return sb->s_qcop->set_dqblk(sb, qid, &qdq); 600 } 601 602 static inline void copy_to_xfs_dqblk_ts(const struct fs_disk_quota *d, 603 __s32 *timer_lo, __s8 *timer_hi, s64 timer) 604 { 605 *timer_lo = timer; 606 if (d->d_fieldmask & FS_DQ_BIGTIME) 607 *timer_hi = timer >> 32; 608 } 609 610 static inline bool want_bigtime(s64 timer) 611 { 612 return timer > S32_MAX || timer < S32_MIN; 613 } 614 615 static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, 616 int type, qid_t id) 617 { 618 memset(dst, 0, sizeof(*dst)); 619 if (want_bigtime(src->d_ino_timer) || want_bigtime(src->d_spc_timer) || 620 want_bigtime(src->d_rt_spc_timer)) 621 dst->d_fieldmask |= FS_DQ_BIGTIME; 622 dst->d_version = FS_DQUOT_VERSION; 623 dst->d_id = id; 624 if (type == USRQUOTA) 625 dst->d_flags = FS_USER_QUOTA; 626 else if (type == PRJQUOTA) 627 dst->d_flags = FS_PROJ_QUOTA; 628 else 629 dst->d_flags = FS_GROUP_QUOTA; 630 dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); 631 dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); 632 dst->d_ino_hardlimit = src->d_ino_hardlimit; 633 dst->d_ino_softlimit = src->d_ino_softlimit; 634 dst->d_bcount = quota_btobb(src->d_space); 635 dst->d_icount = src->d_ino_count; 636 copy_to_xfs_dqblk_ts(dst, &dst->d_itimer, &dst->d_itimer_hi, 637 src->d_ino_timer); 638 copy_to_xfs_dqblk_ts(dst, &dst->d_btimer, &dst->d_btimer_hi, 639 src->d_spc_timer); 640 dst->d_iwarns = src->d_ino_warns; 641 dst->d_bwarns = src->d_spc_warns; 642 dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); 643 dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); 644 dst->d_rtbcount = quota_btobb(src->d_rt_space); 645 copy_to_xfs_dqblk_ts(dst, &dst->d_rtbtimer, &dst->d_rtbtimer_hi, 646 src->d_rt_spc_timer); 647 dst->d_rtbwarns = src->d_rt_spc_warns; 648 } 649 650 static int quota_getxquota(struct super_block *sb, int type, qid_t id, 651 void __user *addr) 652 { 653 struct fs_disk_quota fdq; 654 struct qc_dqblk qdq; 655 struct kqid qid; 656 int ret; 657 658 if (!sb->s_qcop->get_dqblk) 659 return -ENOSYS; 660 qid = make_kqid(current_user_ns(), type, id); 661 if (!qid_has_mapping(sb->s_user_ns, qid)) 662 return -EINVAL; 663 ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); 664 if (ret) 665 return ret; 666 copy_to_xfs_dqblk(&fdq, &qdq, type, id); 667 if (copy_to_user(addr, &fdq, sizeof(fdq))) 668 return -EFAULT; 669 return ret; 670 } 671 672 /* 673 * Return quota for next active quota >= this id, if any exists, 674 * otherwise return -ENOENT via ->get_nextdqblk. 675 */ 676 static int quota_getnextxquota(struct super_block *sb, int type, qid_t id, 677 void __user *addr) 678 { 679 struct fs_disk_quota fdq; 680 struct qc_dqblk qdq; 681 struct kqid qid; 682 qid_t id_out; 683 int ret; 684 685 if (!sb->s_qcop->get_nextdqblk) 686 return -ENOSYS; 687 qid = make_kqid(current_user_ns(), type, id); 688 if (!qid_has_mapping(sb->s_user_ns, qid)) 689 return -EINVAL; 690 ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq); 691 if (ret) 692 return ret; 693 id_out = from_kqid(current_user_ns(), qid); 694 copy_to_xfs_dqblk(&fdq, &qdq, type, id_out); 695 if (copy_to_user(addr, &fdq, sizeof(fdq))) 696 return -EFAULT; 697 return ret; 698 } 699 700 static int quota_rmxquota(struct super_block *sb, void __user *addr) 701 { 702 __u32 flags; 703 704 if (copy_from_user(&flags, addr, sizeof(flags))) 705 return -EFAULT; 706 if (!sb->s_qcop->rm_xquota) 707 return -ENOSYS; 708 return sb->s_qcop->rm_xquota(sb, flags); 709 } 710 711 /* Copy parameters and call proper function */ 712 static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, 713 void __user *addr, const struct path *path) 714 { 715 int ret; 716 717 type = array_index_nospec(type, MAXQUOTAS); 718 /* 719 * Quota not supported on this fs? Check this before s_quota_types 720 * since they needn't be set if quota is not supported at all. 721 */ 722 if (!sb->s_qcop) 723 return -ENOSYS; 724 if (!(sb->s_quota_types & (1 << type))) 725 return -EINVAL; 726 727 ret = check_quotactl_permission(sb, type, cmd, id); 728 if (ret < 0) 729 return ret; 730 731 switch (cmd) { 732 case Q_QUOTAON: 733 return quota_quotaon(sb, type, id, path); 734 case Q_QUOTAOFF: 735 return quota_quotaoff(sb, type); 736 case Q_GETFMT: 737 return quota_getfmt(sb, type, addr); 738 case Q_GETINFO: 739 return quota_getinfo(sb, type, addr); 740 case Q_SETINFO: 741 return quota_setinfo(sb, type, addr); 742 case Q_GETQUOTA: 743 return quota_getquota(sb, type, id, addr); 744 case Q_GETNEXTQUOTA: 745 return quota_getnextquota(sb, type, id, addr); 746 case Q_SETQUOTA: 747 return quota_setquota(sb, type, id, addr); 748 case Q_SYNC: 749 if (!sb->s_qcop->quota_sync) 750 return -ENOSYS; 751 return sb->s_qcop->quota_sync(sb, type); 752 case Q_XQUOTAON: 753 return quota_enable(sb, addr); 754 case Q_XQUOTAOFF: 755 return quota_disable(sb, addr); 756 case Q_XQUOTARM: 757 return quota_rmxquota(sb, addr); 758 case Q_XGETQSTAT: 759 return quota_getxstate(sb, type, addr); 760 case Q_XGETQSTATV: 761 return quota_getxstatev(sb, type, addr); 762 case Q_XSETQLIM: 763 return quota_setxquota(sb, type, id, addr); 764 case Q_XGETQUOTA: 765 return quota_getxquota(sb, type, id, addr); 766 case Q_XGETNEXTQUOTA: 767 return quota_getnextxquota(sb, type, id, addr); 768 case Q_XQUOTASYNC: 769 if (sb_rdonly(sb)) 770 return -EROFS; 771 /* XFS quotas are fully coherent now, making this call a noop */ 772 return 0; 773 default: 774 return -EINVAL; 775 } 776 } 777 778 #ifdef CONFIG_BLOCK 779 780 /* Return 1 if 'cmd' will block on frozen filesystem */ 781 static int quotactl_cmd_write(int cmd) 782 { 783 /* 784 * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access 785 * as dquot_acquire() may allocate space for new structure and OCFS2 786 * needs to increment on-disk use count. 787 */ 788 switch (cmd) { 789 case Q_GETFMT: 790 case Q_GETINFO: 791 case Q_SYNC: 792 case Q_XGETQSTAT: 793 case Q_XGETQSTATV: 794 case Q_XGETQUOTA: 795 case Q_XGETNEXTQUOTA: 796 case Q_XQUOTASYNC: 797 return 0; 798 } 799 return 1; 800 } 801 #endif /* CONFIG_BLOCK */ 802 803 /* Return true if quotactl command is manipulating quota on/off state */ 804 static bool quotactl_cmd_onoff(int cmd) 805 { 806 return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) || 807 (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF); 808 } 809 810 /* 811 * look up a superblock on which quota ops will be performed 812 * - use the name of a block device to find the superblock thereon 813 */ 814 static struct super_block *quotactl_block(const char __user *special, int cmd) 815 { 816 #ifdef CONFIG_BLOCK 817 struct block_device *bdev; 818 struct super_block *sb; 819 struct filename *tmp = getname(special); 820 821 if (IS_ERR(tmp)) 822 return ERR_CAST(tmp); 823 bdev = lookup_bdev(tmp->name); 824 putname(tmp); 825 if (IS_ERR(bdev)) 826 return ERR_CAST(bdev); 827 if (quotactl_cmd_onoff(cmd)) 828 sb = get_super_exclusive_thawed(bdev); 829 else if (quotactl_cmd_write(cmd)) 830 sb = get_super_thawed(bdev); 831 else 832 sb = get_super(bdev); 833 bdput(bdev); 834 if (!sb) 835 return ERR_PTR(-ENODEV); 836 837 return sb; 838 #else 839 return ERR_PTR(-ENODEV); 840 #endif 841 } 842 843 /* 844 * This is the system call interface. This communicates with 845 * the user-level programs. Currently this only supports diskquota 846 * calls. Maybe we need to add the process quotas etc. in the future, 847 * but we probably should use rlimits for that. 848 */ 849 int kernel_quotactl(unsigned int cmd, const char __user *special, 850 qid_t id, void __user *addr) 851 { 852 uint cmds, type; 853 struct super_block *sb = NULL; 854 struct path path, *pathp = NULL; 855 int ret; 856 857 cmds = cmd >> SUBCMDSHIFT; 858 type = cmd & SUBCMDMASK; 859 860 if (type >= MAXQUOTAS) 861 return -EINVAL; 862 863 /* 864 * As a special case Q_SYNC can be called without a specific device. 865 * It will iterate all superblocks that have quota enabled and call 866 * the sync action on each of them. 867 */ 868 if (!special) { 869 if (cmds == Q_SYNC) 870 return quota_sync_all(type); 871 return -ENODEV; 872 } 873 874 /* 875 * Path for quotaon has to be resolved before grabbing superblock 876 * because that gets s_umount sem which is also possibly needed by path 877 * resolution (think about autofs) and thus deadlocks could arise. 878 */ 879 if (cmds == Q_QUOTAON) { 880 ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 881 if (ret) 882 pathp = ERR_PTR(ret); 883 else 884 pathp = &path; 885 } 886 887 sb = quotactl_block(special, cmds); 888 if (IS_ERR(sb)) { 889 ret = PTR_ERR(sb); 890 goto out; 891 } 892 893 ret = do_quotactl(sb, type, cmds, id, addr, pathp); 894 895 if (!quotactl_cmd_onoff(cmds)) 896 drop_super(sb); 897 else 898 drop_super_exclusive(sb); 899 out: 900 if (pathp && !IS_ERR(pathp)) 901 path_put(pathp); 902 return ret; 903 } 904 905 SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, 906 qid_t, id, void __user *, addr) 907 { 908 return kernel_quotactl(cmd, special, id, addr); 909 } 910