1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Quota code necessary even when VFS quota support is not compiled 4 * into the kernel. The interesting stuff is over in dquot.c, here 5 * we have symbols for initial quotactl(2) handling, the sysctl(2) 6 * variables, etc - things needed even when quota support disabled. 7 */ 8 9 #include <linux/fs.h> 10 #include <linux/namei.h> 11 #include <linux/slab.h> 12 #include <asm/current.h> 13 #include <linux/uaccess.h> 14 #include <linux/kernel.h> 15 #include <linux/security.h> 16 #include <linux/syscalls.h> 17 #include <linux/capability.h> 18 #include <linux/quotaops.h> 19 #include <linux/types.h> 20 #include <linux/writeback.h> 21 22 static int check_quotactl_permission(struct super_block *sb, int type, int cmd, 23 qid_t id) 24 { 25 switch (cmd) { 26 /* these commands do not require any special privilegues */ 27 case Q_GETFMT: 28 case Q_SYNC: 29 case Q_GETINFO: 30 case Q_XGETQSTAT: 31 case Q_XGETQSTATV: 32 case Q_XQUOTASYNC: 33 break; 34 /* allow to query information for dquots we "own" */ 35 case Q_GETQUOTA: 36 case Q_XGETQUOTA: 37 if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) || 38 (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id)))) 39 break; 40 /*FALLTHROUGH*/ 41 default: 42 if (!capable(CAP_SYS_ADMIN)) 43 return -EPERM; 44 } 45 46 return security_quotactl(cmd, type, id, sb); 47 } 48 49 static void quota_sync_one(struct super_block *sb, void *arg) 50 { 51 int type = *(int *)arg; 52 53 if (sb->s_qcop && sb->s_qcop->quota_sync && 54 (sb->s_quota_types & (1 << type))) 55 sb->s_qcop->quota_sync(sb, type); 56 } 57 58 static int quota_sync_all(int type) 59 { 60 int ret; 61 62 if (type >= MAXQUOTAS) 63 return -EINVAL; 64 ret = security_quotactl(Q_SYNC, type, 0, NULL); 65 if (!ret) 66 iterate_supers(quota_sync_one, &type); 67 return ret; 68 } 69 70 unsigned int qtype_enforce_flag(int type) 71 { 72 switch (type) { 73 case USRQUOTA: 74 return FS_QUOTA_UDQ_ENFD; 75 case GRPQUOTA: 76 return FS_QUOTA_GDQ_ENFD; 77 case PRJQUOTA: 78 return FS_QUOTA_PDQ_ENFD; 79 } 80 return 0; 81 } 82 83 static int quota_quotaon(struct super_block *sb, int type, qid_t id, 84 const struct path *path) 85 { 86 if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable) 87 return -ENOSYS; 88 if (sb->s_qcop->quota_enable) 89 return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type)); 90 if (IS_ERR(path)) 91 return PTR_ERR(path); 92 return sb->s_qcop->quota_on(sb, type, id, path); 93 } 94 95 static int quota_quotaoff(struct super_block *sb, int type) 96 { 97 if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable) 98 return -ENOSYS; 99 if (sb->s_qcop->quota_disable) 100 return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type)); 101 return sb->s_qcop->quota_off(sb, type); 102 } 103 104 static int quota_getfmt(struct super_block *sb, int type, void __user *addr) 105 { 106 __u32 fmt; 107 108 if (!sb_has_quota_active(sb, type)) 109 return -ESRCH; 110 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; 111 if (copy_to_user(addr, &fmt, sizeof(fmt))) 112 return -EFAULT; 113 return 0; 114 } 115 116 static int quota_getinfo(struct super_block *sb, int type, void __user *addr) 117 { 118 struct qc_state state; 119 struct qc_type_state *tstate; 120 struct if_dqinfo uinfo; 121 int ret; 122 123 if (!sb->s_qcop->get_state) 124 return -ENOSYS; 125 ret = sb->s_qcop->get_state(sb, &state); 126 if (ret) 127 return ret; 128 tstate = state.s_state + type; 129 if (!(tstate->flags & QCI_ACCT_ENABLED)) 130 return -ESRCH; 131 memset(&uinfo, 0, sizeof(uinfo)); 132 uinfo.dqi_bgrace = tstate->spc_timelimit; 133 uinfo.dqi_igrace = tstate->ino_timelimit; 134 if (tstate->flags & QCI_SYSFILE) 135 uinfo.dqi_flags |= DQF_SYS_FILE; 136 if (tstate->flags & QCI_ROOT_SQUASH) 137 uinfo.dqi_flags |= DQF_ROOT_SQUASH; 138 uinfo.dqi_valid = IIF_ALL; 139 if (copy_to_user(addr, &uinfo, sizeof(uinfo))) 140 return -EFAULT; 141 return 0; 142 } 143 144 static int quota_setinfo(struct super_block *sb, int type, void __user *addr) 145 { 146 struct if_dqinfo info; 147 struct qc_info qinfo; 148 149 if (copy_from_user(&info, addr, sizeof(info))) 150 return -EFAULT; 151 if (!sb->s_qcop->set_info) 152 return -ENOSYS; 153 if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE)) 154 return -EINVAL; 155 memset(&qinfo, 0, sizeof(qinfo)); 156 if (info.dqi_valid & IIF_FLAGS) { 157 if (info.dqi_flags & ~DQF_SETINFO_MASK) 158 return -EINVAL; 159 if (info.dqi_flags & DQF_ROOT_SQUASH) 160 qinfo.i_flags |= QCI_ROOT_SQUASH; 161 qinfo.i_fieldmask |= QC_FLAGS; 162 } 163 if (info.dqi_valid & IIF_BGRACE) { 164 qinfo.i_spc_timelimit = info.dqi_bgrace; 165 qinfo.i_fieldmask |= QC_SPC_TIMER; 166 } 167 if (info.dqi_valid & IIF_IGRACE) { 168 qinfo.i_ino_timelimit = info.dqi_igrace; 169 qinfo.i_fieldmask |= QC_INO_TIMER; 170 } 171 return sb->s_qcop->set_info(sb, type, &qinfo); 172 } 173 174 static inline qsize_t qbtos(qsize_t blocks) 175 { 176 return blocks << QIF_DQBLKSIZE_BITS; 177 } 178 179 static inline qsize_t stoqb(qsize_t space) 180 { 181 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; 182 } 183 184 static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) 185 { 186 memset(dst, 0, sizeof(*dst)); 187 dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); 188 dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); 189 dst->dqb_curspace = src->d_space; 190 dst->dqb_ihardlimit = src->d_ino_hardlimit; 191 dst->dqb_isoftlimit = src->d_ino_softlimit; 192 dst->dqb_curinodes = src->d_ino_count; 193 dst->dqb_btime = src->d_spc_timer; 194 dst->dqb_itime = src->d_ino_timer; 195 dst->dqb_valid = QIF_ALL; 196 } 197 198 static int quota_getquota(struct super_block *sb, int type, qid_t id, 199 void __user *addr) 200 { 201 struct kqid qid; 202 struct qc_dqblk fdq; 203 struct if_dqblk idq; 204 int ret; 205 206 if (!sb->s_qcop->get_dqblk) 207 return -ENOSYS; 208 qid = make_kqid(current_user_ns(), type, id); 209 if (!qid_has_mapping(sb->s_user_ns, qid)) 210 return -EINVAL; 211 ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); 212 if (ret) 213 return ret; 214 copy_to_if_dqblk(&idq, &fdq); 215 if (copy_to_user(addr, &idq, sizeof(idq))) 216 return -EFAULT; 217 return 0; 218 } 219 220 /* 221 * Return quota for next active quota >= this id, if any exists, 222 * otherwise return -ENOENT via ->get_nextdqblk 223 */ 224 static int quota_getnextquota(struct super_block *sb, int type, qid_t id, 225 void __user *addr) 226 { 227 struct kqid qid; 228 struct qc_dqblk fdq; 229 struct if_nextdqblk idq; 230 int ret; 231 232 if (!sb->s_qcop->get_nextdqblk) 233 return -ENOSYS; 234 qid = make_kqid(current_user_ns(), type, id); 235 if (!qid_has_mapping(sb->s_user_ns, qid)) 236 return -EINVAL; 237 ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq); 238 if (ret) 239 return ret; 240 /* struct if_nextdqblk is a superset of struct if_dqblk */ 241 copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq); 242 idq.dqb_id = from_kqid(current_user_ns(), qid); 243 if (copy_to_user(addr, &idq, sizeof(idq))) 244 return -EFAULT; 245 return 0; 246 } 247 248 static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) 249 { 250 dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); 251 dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); 252 dst->d_space = src->dqb_curspace; 253 dst->d_ino_hardlimit = src->dqb_ihardlimit; 254 dst->d_ino_softlimit = src->dqb_isoftlimit; 255 dst->d_ino_count = src->dqb_curinodes; 256 dst->d_spc_timer = src->dqb_btime; 257 dst->d_ino_timer = src->dqb_itime; 258 259 dst->d_fieldmask = 0; 260 if (src->dqb_valid & QIF_BLIMITS) 261 dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; 262 if (src->dqb_valid & QIF_SPACE) 263 dst->d_fieldmask |= QC_SPACE; 264 if (src->dqb_valid & QIF_ILIMITS) 265 dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; 266 if (src->dqb_valid & QIF_INODES) 267 dst->d_fieldmask |= QC_INO_COUNT; 268 if (src->dqb_valid & QIF_BTIME) 269 dst->d_fieldmask |= QC_SPC_TIMER; 270 if (src->dqb_valid & QIF_ITIME) 271 dst->d_fieldmask |= QC_INO_TIMER; 272 } 273 274 static int quota_setquota(struct super_block *sb, int type, qid_t id, 275 void __user *addr) 276 { 277 struct qc_dqblk fdq; 278 struct if_dqblk idq; 279 struct kqid qid; 280 281 if (copy_from_user(&idq, addr, sizeof(idq))) 282 return -EFAULT; 283 if (!sb->s_qcop->set_dqblk) 284 return -ENOSYS; 285 qid = make_kqid(current_user_ns(), type, id); 286 if (!qid_has_mapping(sb->s_user_ns, qid)) 287 return -EINVAL; 288 copy_from_if_dqblk(&fdq, &idq); 289 return sb->s_qcop->set_dqblk(sb, qid, &fdq); 290 } 291 292 static int quota_enable(struct super_block *sb, void __user *addr) 293 { 294 __u32 flags; 295 296 if (copy_from_user(&flags, addr, sizeof(flags))) 297 return -EFAULT; 298 if (!sb->s_qcop->quota_enable) 299 return -ENOSYS; 300 return sb->s_qcop->quota_enable(sb, flags); 301 } 302 303 static int quota_disable(struct super_block *sb, void __user *addr) 304 { 305 __u32 flags; 306 307 if (copy_from_user(&flags, addr, sizeof(flags))) 308 return -EFAULT; 309 if (!sb->s_qcop->quota_disable) 310 return -ENOSYS; 311 return sb->s_qcop->quota_disable(sb, flags); 312 } 313 314 static int quota_state_to_flags(struct qc_state *state) 315 { 316 int flags = 0; 317 318 if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) 319 flags |= FS_QUOTA_UDQ_ACCT; 320 if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED) 321 flags |= FS_QUOTA_UDQ_ENFD; 322 if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) 323 flags |= FS_QUOTA_GDQ_ACCT; 324 if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED) 325 flags |= FS_QUOTA_GDQ_ENFD; 326 if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) 327 flags |= FS_QUOTA_PDQ_ACCT; 328 if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED) 329 flags |= FS_QUOTA_PDQ_ENFD; 330 return flags; 331 } 332 333 static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs) 334 { 335 int type; 336 struct qc_state state; 337 int ret; 338 339 memset(&state, 0, sizeof (struct qc_state)); 340 ret = sb->s_qcop->get_state(sb, &state); 341 if (ret < 0) 342 return ret; 343 344 memset(fqs, 0, sizeof(*fqs)); 345 fqs->qs_version = FS_QSTAT_VERSION; 346 fqs->qs_flags = quota_state_to_flags(&state); 347 /* No quota enabled? */ 348 if (!fqs->qs_flags) 349 return -ENOSYS; 350 fqs->qs_incoredqs = state.s_incoredqs; 351 /* 352 * GETXSTATE quotactl has space for just one set of time limits so 353 * report them for the first enabled quota type 354 */ 355 for (type = 0; type < MAXQUOTAS; type++) 356 if (state.s_state[type].flags & QCI_ACCT_ENABLED) 357 break; 358 BUG_ON(type == MAXQUOTAS); 359 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 360 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 361 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; 362 fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; 363 fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; 364 365 /* Inodes may be allocated even if inactive; copy out if present */ 366 if (state.s_state[USRQUOTA].ino) { 367 fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; 368 fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; 369 fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; 370 } 371 if (state.s_state[GRPQUOTA].ino) { 372 fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; 373 fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; 374 fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; 375 } 376 if (state.s_state[PRJQUOTA].ino) { 377 /* 378 * Q_XGETQSTAT doesn't have room for both group and project 379 * quotas. So, allow the project quota values to be copied out 380 * only if there is no group quota information available. 381 */ 382 if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) { 383 fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino; 384 fqs->qs_gquota.qfs_nblks = 385 state.s_state[PRJQUOTA].blocks; 386 fqs->qs_gquota.qfs_nextents = 387 state.s_state[PRJQUOTA].nextents; 388 } 389 } 390 return 0; 391 } 392 393 static int quota_getxstate(struct super_block *sb, void __user *addr) 394 { 395 struct fs_quota_stat fqs; 396 int ret; 397 398 if (!sb->s_qcop->get_state) 399 return -ENOSYS; 400 ret = quota_getstate(sb, &fqs); 401 if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) 402 return -EFAULT; 403 return ret; 404 } 405 406 static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs) 407 { 408 int type; 409 struct qc_state state; 410 int ret; 411 412 memset(&state, 0, sizeof (struct qc_state)); 413 ret = sb->s_qcop->get_state(sb, &state); 414 if (ret < 0) 415 return ret; 416 417 memset(fqs, 0, sizeof(*fqs)); 418 fqs->qs_version = FS_QSTAT_VERSION; 419 fqs->qs_flags = quota_state_to_flags(&state); 420 /* No quota enabled? */ 421 if (!fqs->qs_flags) 422 return -ENOSYS; 423 fqs->qs_incoredqs = state.s_incoredqs; 424 /* 425 * GETXSTATV quotactl has space for just one set of time limits so 426 * report them for the first enabled quota type 427 */ 428 for (type = 0; type < MAXQUOTAS; type++) 429 if (state.s_state[type].flags & QCI_ACCT_ENABLED) 430 break; 431 BUG_ON(type == MAXQUOTAS); 432 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 433 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 434 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; 435 fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; 436 fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; 437 438 /* Inodes may be allocated even if inactive; copy out if present */ 439 if (state.s_state[USRQUOTA].ino) { 440 fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; 441 fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; 442 fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; 443 } 444 if (state.s_state[GRPQUOTA].ino) { 445 fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; 446 fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; 447 fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; 448 } 449 if (state.s_state[PRJQUOTA].ino) { 450 fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino; 451 fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks; 452 fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents; 453 } 454 return 0; 455 } 456 457 static int quota_getxstatev(struct super_block *sb, void __user *addr) 458 { 459 struct fs_quota_statv fqs; 460 int ret; 461 462 if (!sb->s_qcop->get_state) 463 return -ENOSYS; 464 465 memset(&fqs, 0, sizeof(fqs)); 466 if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */ 467 return -EFAULT; 468 469 /* If this kernel doesn't support user specified version, fail */ 470 switch (fqs.qs_version) { 471 case FS_QSTATV_VERSION1: 472 break; 473 default: 474 return -EINVAL; 475 } 476 ret = quota_getstatev(sb, &fqs); 477 if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) 478 return -EFAULT; 479 return ret; 480 } 481 482 /* 483 * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them 484 * out of there as xfsprogs rely on definitions being in that header file. So 485 * just define same functions here for quota purposes. 486 */ 487 #define XFS_BB_SHIFT 9 488 489 static inline u64 quota_bbtob(u64 blocks) 490 { 491 return blocks << XFS_BB_SHIFT; 492 } 493 494 static inline u64 quota_btobb(u64 bytes) 495 { 496 return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; 497 } 498 499 static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) 500 { 501 dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); 502 dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); 503 dst->d_ino_hardlimit = src->d_ino_hardlimit; 504 dst->d_ino_softlimit = src->d_ino_softlimit; 505 dst->d_space = quota_bbtob(src->d_bcount); 506 dst->d_ino_count = src->d_icount; 507 dst->d_ino_timer = src->d_itimer; 508 dst->d_spc_timer = src->d_btimer; 509 dst->d_ino_warns = src->d_iwarns; 510 dst->d_spc_warns = src->d_bwarns; 511 dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); 512 dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); 513 dst->d_rt_space = quota_bbtob(src->d_rtbcount); 514 dst->d_rt_spc_timer = src->d_rtbtimer; 515 dst->d_rt_spc_warns = src->d_rtbwarns; 516 dst->d_fieldmask = 0; 517 if (src->d_fieldmask & FS_DQ_ISOFT) 518 dst->d_fieldmask |= QC_INO_SOFT; 519 if (src->d_fieldmask & FS_DQ_IHARD) 520 dst->d_fieldmask |= QC_INO_HARD; 521 if (src->d_fieldmask & FS_DQ_BSOFT) 522 dst->d_fieldmask |= QC_SPC_SOFT; 523 if (src->d_fieldmask & FS_DQ_BHARD) 524 dst->d_fieldmask |= QC_SPC_HARD; 525 if (src->d_fieldmask & FS_DQ_RTBSOFT) 526 dst->d_fieldmask |= QC_RT_SPC_SOFT; 527 if (src->d_fieldmask & FS_DQ_RTBHARD) 528 dst->d_fieldmask |= QC_RT_SPC_HARD; 529 if (src->d_fieldmask & FS_DQ_BTIMER) 530 dst->d_fieldmask |= QC_SPC_TIMER; 531 if (src->d_fieldmask & FS_DQ_ITIMER) 532 dst->d_fieldmask |= QC_INO_TIMER; 533 if (src->d_fieldmask & FS_DQ_RTBTIMER) 534 dst->d_fieldmask |= QC_RT_SPC_TIMER; 535 if (src->d_fieldmask & FS_DQ_BWARNS) 536 dst->d_fieldmask |= QC_SPC_WARNS; 537 if (src->d_fieldmask & FS_DQ_IWARNS) 538 dst->d_fieldmask |= QC_INO_WARNS; 539 if (src->d_fieldmask & FS_DQ_RTBWARNS) 540 dst->d_fieldmask |= QC_RT_SPC_WARNS; 541 if (src->d_fieldmask & FS_DQ_BCOUNT) 542 dst->d_fieldmask |= QC_SPACE; 543 if (src->d_fieldmask & FS_DQ_ICOUNT) 544 dst->d_fieldmask |= QC_INO_COUNT; 545 if (src->d_fieldmask & FS_DQ_RTBCOUNT) 546 dst->d_fieldmask |= QC_RT_SPACE; 547 } 548 549 static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst, 550 struct fs_disk_quota *src) 551 { 552 memset(dst, 0, sizeof(*dst)); 553 dst->i_spc_timelimit = src->d_btimer; 554 dst->i_ino_timelimit = src->d_itimer; 555 dst->i_rt_spc_timelimit = src->d_rtbtimer; 556 dst->i_ino_warnlimit = src->d_iwarns; 557 dst->i_spc_warnlimit = src->d_bwarns; 558 dst->i_rt_spc_warnlimit = src->d_rtbwarns; 559 if (src->d_fieldmask & FS_DQ_BWARNS) 560 dst->i_fieldmask |= QC_SPC_WARNS; 561 if (src->d_fieldmask & FS_DQ_IWARNS) 562 dst->i_fieldmask |= QC_INO_WARNS; 563 if (src->d_fieldmask & FS_DQ_RTBWARNS) 564 dst->i_fieldmask |= QC_RT_SPC_WARNS; 565 if (src->d_fieldmask & FS_DQ_BTIMER) 566 dst->i_fieldmask |= QC_SPC_TIMER; 567 if (src->d_fieldmask & FS_DQ_ITIMER) 568 dst->i_fieldmask |= QC_INO_TIMER; 569 if (src->d_fieldmask & FS_DQ_RTBTIMER) 570 dst->i_fieldmask |= QC_RT_SPC_TIMER; 571 } 572 573 static int quota_setxquota(struct super_block *sb, int type, qid_t id, 574 void __user *addr) 575 { 576 struct fs_disk_quota fdq; 577 struct qc_dqblk qdq; 578 struct kqid qid; 579 580 if (copy_from_user(&fdq, addr, sizeof(fdq))) 581 return -EFAULT; 582 if (!sb->s_qcop->set_dqblk) 583 return -ENOSYS; 584 qid = make_kqid(current_user_ns(), type, id); 585 if (!qid_has_mapping(sb->s_user_ns, qid)) 586 return -EINVAL; 587 /* Are we actually setting timer / warning limits for all users? */ 588 if (from_kqid(sb->s_user_ns, qid) == 0 && 589 fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) { 590 struct qc_info qinfo; 591 int ret; 592 593 if (!sb->s_qcop->set_info) 594 return -EINVAL; 595 copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq); 596 ret = sb->s_qcop->set_info(sb, type, &qinfo); 597 if (ret) 598 return ret; 599 /* These are already done */ 600 fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK); 601 } 602 copy_from_xfs_dqblk(&qdq, &fdq); 603 return sb->s_qcop->set_dqblk(sb, qid, &qdq); 604 } 605 606 static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, 607 int type, qid_t id) 608 { 609 memset(dst, 0, sizeof(*dst)); 610 dst->d_version = FS_DQUOT_VERSION; 611 dst->d_id = id; 612 if (type == USRQUOTA) 613 dst->d_flags = FS_USER_QUOTA; 614 else if (type == PRJQUOTA) 615 dst->d_flags = FS_PROJ_QUOTA; 616 else 617 dst->d_flags = FS_GROUP_QUOTA; 618 dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); 619 dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); 620 dst->d_ino_hardlimit = src->d_ino_hardlimit; 621 dst->d_ino_softlimit = src->d_ino_softlimit; 622 dst->d_bcount = quota_btobb(src->d_space); 623 dst->d_icount = src->d_ino_count; 624 dst->d_itimer = src->d_ino_timer; 625 dst->d_btimer = src->d_spc_timer; 626 dst->d_iwarns = src->d_ino_warns; 627 dst->d_bwarns = src->d_spc_warns; 628 dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); 629 dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); 630 dst->d_rtbcount = quota_btobb(src->d_rt_space); 631 dst->d_rtbtimer = src->d_rt_spc_timer; 632 dst->d_rtbwarns = src->d_rt_spc_warns; 633 } 634 635 static int quota_getxquota(struct super_block *sb, int type, qid_t id, 636 void __user *addr) 637 { 638 struct fs_disk_quota fdq; 639 struct qc_dqblk qdq; 640 struct kqid qid; 641 int ret; 642 643 if (!sb->s_qcop->get_dqblk) 644 return -ENOSYS; 645 qid = make_kqid(current_user_ns(), type, id); 646 if (!qid_has_mapping(sb->s_user_ns, qid)) 647 return -EINVAL; 648 ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); 649 if (ret) 650 return ret; 651 copy_to_xfs_dqblk(&fdq, &qdq, type, id); 652 if (copy_to_user(addr, &fdq, sizeof(fdq))) 653 return -EFAULT; 654 return ret; 655 } 656 657 /* 658 * Return quota for next active quota >= this id, if any exists, 659 * otherwise return -ENOENT via ->get_nextdqblk. 660 */ 661 static int quota_getnextxquota(struct super_block *sb, int type, qid_t id, 662 void __user *addr) 663 { 664 struct fs_disk_quota fdq; 665 struct qc_dqblk qdq; 666 struct kqid qid; 667 qid_t id_out; 668 int ret; 669 670 if (!sb->s_qcop->get_nextdqblk) 671 return -ENOSYS; 672 qid = make_kqid(current_user_ns(), type, id); 673 if (!qid_has_mapping(sb->s_user_ns, qid)) 674 return -EINVAL; 675 ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq); 676 if (ret) 677 return ret; 678 id_out = from_kqid(current_user_ns(), qid); 679 copy_to_xfs_dqblk(&fdq, &qdq, type, id_out); 680 if (copy_to_user(addr, &fdq, sizeof(fdq))) 681 return -EFAULT; 682 return ret; 683 } 684 685 static int quota_rmxquota(struct super_block *sb, void __user *addr) 686 { 687 __u32 flags; 688 689 if (copy_from_user(&flags, addr, sizeof(flags))) 690 return -EFAULT; 691 if (!sb->s_qcop->rm_xquota) 692 return -ENOSYS; 693 return sb->s_qcop->rm_xquota(sb, flags); 694 } 695 696 /* Copy parameters and call proper function */ 697 static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, 698 void __user *addr, const struct path *path) 699 { 700 int ret; 701 702 if (type >= MAXQUOTAS) 703 return -EINVAL; 704 /* 705 * Quota not supported on this fs? Check this before s_quota_types 706 * since they needn't be set if quota is not supported at all. 707 */ 708 if (!sb->s_qcop) 709 return -ENOSYS; 710 if (!(sb->s_quota_types & (1 << type))) 711 return -EINVAL; 712 713 ret = check_quotactl_permission(sb, type, cmd, id); 714 if (ret < 0) 715 return ret; 716 717 switch (cmd) { 718 case Q_QUOTAON: 719 return quota_quotaon(sb, type, id, path); 720 case Q_QUOTAOFF: 721 return quota_quotaoff(sb, type); 722 case Q_GETFMT: 723 return quota_getfmt(sb, type, addr); 724 case Q_GETINFO: 725 return quota_getinfo(sb, type, addr); 726 case Q_SETINFO: 727 return quota_setinfo(sb, type, addr); 728 case Q_GETQUOTA: 729 return quota_getquota(sb, type, id, addr); 730 case Q_GETNEXTQUOTA: 731 return quota_getnextquota(sb, type, id, addr); 732 case Q_SETQUOTA: 733 return quota_setquota(sb, type, id, addr); 734 case Q_SYNC: 735 if (!sb->s_qcop->quota_sync) 736 return -ENOSYS; 737 return sb->s_qcop->quota_sync(sb, type); 738 case Q_XQUOTAON: 739 return quota_enable(sb, addr); 740 case Q_XQUOTAOFF: 741 return quota_disable(sb, addr); 742 case Q_XQUOTARM: 743 return quota_rmxquota(sb, addr); 744 case Q_XGETQSTAT: 745 return quota_getxstate(sb, addr); 746 case Q_XGETQSTATV: 747 return quota_getxstatev(sb, addr); 748 case Q_XSETQLIM: 749 return quota_setxquota(sb, type, id, addr); 750 case Q_XGETQUOTA: 751 return quota_getxquota(sb, type, id, addr); 752 case Q_XGETNEXTQUOTA: 753 return quota_getnextxquota(sb, type, id, addr); 754 case Q_XQUOTASYNC: 755 if (sb_rdonly(sb)) 756 return -EROFS; 757 /* XFS quotas are fully coherent now, making this call a noop */ 758 return 0; 759 default: 760 return -EINVAL; 761 } 762 } 763 764 #ifdef CONFIG_BLOCK 765 766 /* Return 1 if 'cmd' will block on frozen filesystem */ 767 static int quotactl_cmd_write(int cmd) 768 { 769 /* 770 * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access 771 * as dquot_acquire() may allocate space for new structure and OCFS2 772 * needs to increment on-disk use count. 773 */ 774 switch (cmd) { 775 case Q_GETFMT: 776 case Q_GETINFO: 777 case Q_SYNC: 778 case Q_XGETQSTAT: 779 case Q_XGETQSTATV: 780 case Q_XGETQUOTA: 781 case Q_XGETNEXTQUOTA: 782 case Q_XQUOTASYNC: 783 return 0; 784 } 785 return 1; 786 } 787 #endif /* CONFIG_BLOCK */ 788 789 /* Return true if quotactl command is manipulating quota on/off state */ 790 static bool quotactl_cmd_onoff(int cmd) 791 { 792 return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF); 793 } 794 795 /* 796 * look up a superblock on which quota ops will be performed 797 * - use the name of a block device to find the superblock thereon 798 */ 799 static struct super_block *quotactl_block(const char __user *special, int cmd) 800 { 801 #ifdef CONFIG_BLOCK 802 struct block_device *bdev; 803 struct super_block *sb; 804 struct filename *tmp = getname(special); 805 806 if (IS_ERR(tmp)) 807 return ERR_CAST(tmp); 808 bdev = lookup_bdev(tmp->name); 809 putname(tmp); 810 if (IS_ERR(bdev)) 811 return ERR_CAST(bdev); 812 if (quotactl_cmd_onoff(cmd)) 813 sb = get_super_exclusive_thawed(bdev); 814 else if (quotactl_cmd_write(cmd)) 815 sb = get_super_thawed(bdev); 816 else 817 sb = get_super(bdev); 818 bdput(bdev); 819 if (!sb) 820 return ERR_PTR(-ENODEV); 821 822 return sb; 823 #else 824 return ERR_PTR(-ENODEV); 825 #endif 826 } 827 828 /* 829 * This is the system call interface. This communicates with 830 * the user-level programs. Currently this only supports diskquota 831 * calls. Maybe we need to add the process quotas etc. in the future, 832 * but we probably should use rlimits for that. 833 */ 834 int kernel_quotactl(unsigned int cmd, const char __user *special, 835 qid_t id, void __user *addr) 836 { 837 uint cmds, type; 838 struct super_block *sb = NULL; 839 struct path path, *pathp = NULL; 840 int ret; 841 842 cmds = cmd >> SUBCMDSHIFT; 843 type = cmd & SUBCMDMASK; 844 845 /* 846 * As a special case Q_SYNC can be called without a specific device. 847 * It will iterate all superblocks that have quota enabled and call 848 * the sync action on each of them. 849 */ 850 if (!special) { 851 if (cmds == Q_SYNC) 852 return quota_sync_all(type); 853 return -ENODEV; 854 } 855 856 /* 857 * Path for quotaon has to be resolved before grabbing superblock 858 * because that gets s_umount sem which is also possibly needed by path 859 * resolution (think about autofs) and thus deadlocks could arise. 860 */ 861 if (cmds == Q_QUOTAON) { 862 ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 863 if (ret) 864 pathp = ERR_PTR(ret); 865 else 866 pathp = &path; 867 } 868 869 sb = quotactl_block(special, cmds); 870 if (IS_ERR(sb)) { 871 ret = PTR_ERR(sb); 872 goto out; 873 } 874 875 ret = do_quotactl(sb, type, cmds, id, addr, pathp); 876 877 if (!quotactl_cmd_onoff(cmds)) 878 drop_super(sb); 879 else 880 drop_super_exclusive(sb); 881 out: 882 if (pathp && !IS_ERR(pathp)) 883 path_put(pathp); 884 return ret; 885 } 886 887 SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, 888 qid_t, id, void __user *, addr) 889 { 890 return kernel_quotactl(cmd, special, id, addr); 891 } 892