1 /* 2 * Quota code necessary even when VFS quota support is not compiled 3 * into the kernel. The interesting stuff is over in dquot.c, here 4 * we have symbols for initial quotactl(2) handling, the sysctl(2) 5 * variables, etc - things needed even when quota support disabled. 6 */ 7 8 #include <linux/fs.h> 9 #include <linux/namei.h> 10 #include <linux/slab.h> 11 #include <asm/current.h> 12 #include <linux/uaccess.h> 13 #include <linux/kernel.h> 14 #include <linux/security.h> 15 #include <linux/syscalls.h> 16 #include <linux/capability.h> 17 #include <linux/quotaops.h> 18 #include <linux/types.h> 19 #include <linux/writeback.h> 20 21 static int check_quotactl_permission(struct super_block *sb, int type, int cmd, 22 qid_t id) 23 { 24 switch (cmd) { 25 /* these commands do not require any special privilegues */ 26 case Q_GETFMT: 27 case Q_SYNC: 28 case Q_GETINFO: 29 case Q_XGETQSTAT: 30 case Q_XGETQSTATV: 31 case Q_XQUOTASYNC: 32 break; 33 /* allow to query information for dquots we "own" */ 34 case Q_GETQUOTA: 35 case Q_XGETQUOTA: 36 if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) || 37 (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id)))) 38 break; 39 /*FALLTHROUGH*/ 40 default: 41 if (!capable(CAP_SYS_ADMIN)) 42 return -EPERM; 43 } 44 45 return security_quotactl(cmd, type, id, sb); 46 } 47 48 static void quota_sync_one(struct super_block *sb, void *arg) 49 { 50 int type = *(int *)arg; 51 52 if (sb->s_qcop && sb->s_qcop->quota_sync && 53 (sb->s_quota_types & (1 << type))) 54 sb->s_qcop->quota_sync(sb, type); 55 } 56 57 static int quota_sync_all(int type) 58 { 59 int ret; 60 61 if (type >= MAXQUOTAS) 62 return -EINVAL; 63 ret = security_quotactl(Q_SYNC, type, 0, NULL); 64 if (!ret) 65 iterate_supers(quota_sync_one, &type); 66 return ret; 67 } 68 69 unsigned int qtype_enforce_flag(int type) 70 { 71 switch (type) { 72 case USRQUOTA: 73 return FS_QUOTA_UDQ_ENFD; 74 case GRPQUOTA: 75 return FS_QUOTA_GDQ_ENFD; 76 case PRJQUOTA: 77 return FS_QUOTA_PDQ_ENFD; 78 } 79 return 0; 80 } 81 82 static int quota_quotaon(struct super_block *sb, int type, qid_t id, 83 struct path *path) 84 { 85 if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable) 86 return -ENOSYS; 87 if (sb->s_qcop->quota_enable) 88 return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type)); 89 if (IS_ERR(path)) 90 return PTR_ERR(path); 91 return sb->s_qcop->quota_on(sb, type, id, path); 92 } 93 94 static int quota_quotaoff(struct super_block *sb, int type) 95 { 96 if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable) 97 return -ENOSYS; 98 if (sb->s_qcop->quota_disable) 99 return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type)); 100 return sb->s_qcop->quota_off(sb, type); 101 } 102 103 static int quota_getfmt(struct super_block *sb, int type, void __user *addr) 104 { 105 __u32 fmt; 106 107 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 108 if (!sb_has_quota_active(sb, type)) { 109 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 110 return -ESRCH; 111 } 112 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; 113 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 114 if (copy_to_user(addr, &fmt, sizeof(fmt))) 115 return -EFAULT; 116 return 0; 117 } 118 119 static int quota_getinfo(struct super_block *sb, int type, void __user *addr) 120 { 121 struct qc_state state; 122 struct qc_type_state *tstate; 123 struct if_dqinfo uinfo; 124 int ret; 125 126 /* This checks whether qc_state has enough entries... */ 127 BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS); 128 if (!sb->s_qcop->get_state) 129 return -ENOSYS; 130 ret = sb->s_qcop->get_state(sb, &state); 131 if (ret) 132 return ret; 133 tstate = state.s_state + type; 134 if (!(tstate->flags & QCI_ACCT_ENABLED)) 135 return -ESRCH; 136 memset(&uinfo, 0, sizeof(uinfo)); 137 uinfo.dqi_bgrace = tstate->spc_timelimit; 138 uinfo.dqi_igrace = tstate->ino_timelimit; 139 if (tstate->flags & QCI_SYSFILE) 140 uinfo.dqi_flags |= DQF_SYS_FILE; 141 if (tstate->flags & QCI_ROOT_SQUASH) 142 uinfo.dqi_flags |= DQF_ROOT_SQUASH; 143 uinfo.dqi_valid = IIF_ALL; 144 if (copy_to_user(addr, &uinfo, sizeof(uinfo))) 145 return -EFAULT; 146 return 0; 147 } 148 149 static int quota_setinfo(struct super_block *sb, int type, void __user *addr) 150 { 151 struct if_dqinfo info; 152 struct qc_info qinfo; 153 154 if (copy_from_user(&info, addr, sizeof(info))) 155 return -EFAULT; 156 if (!sb->s_qcop->set_info) 157 return -ENOSYS; 158 if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE)) 159 return -EINVAL; 160 memset(&qinfo, 0, sizeof(qinfo)); 161 if (info.dqi_valid & IIF_FLAGS) { 162 if (info.dqi_flags & ~DQF_SETINFO_MASK) 163 return -EINVAL; 164 if (info.dqi_flags & DQF_ROOT_SQUASH) 165 qinfo.i_flags |= QCI_ROOT_SQUASH; 166 qinfo.i_fieldmask |= QC_FLAGS; 167 } 168 if (info.dqi_valid & IIF_BGRACE) { 169 qinfo.i_spc_timelimit = info.dqi_bgrace; 170 qinfo.i_fieldmask |= QC_SPC_TIMER; 171 } 172 if (info.dqi_valid & IIF_IGRACE) { 173 qinfo.i_ino_timelimit = info.dqi_igrace; 174 qinfo.i_fieldmask |= QC_INO_TIMER; 175 } 176 return sb->s_qcop->set_info(sb, type, &qinfo); 177 } 178 179 static inline qsize_t qbtos(qsize_t blocks) 180 { 181 return blocks << QIF_DQBLKSIZE_BITS; 182 } 183 184 static inline qsize_t stoqb(qsize_t space) 185 { 186 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; 187 } 188 189 static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) 190 { 191 memset(dst, 0, sizeof(*dst)); 192 dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); 193 dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); 194 dst->dqb_curspace = src->d_space; 195 dst->dqb_ihardlimit = src->d_ino_hardlimit; 196 dst->dqb_isoftlimit = src->d_ino_softlimit; 197 dst->dqb_curinodes = src->d_ino_count; 198 dst->dqb_btime = src->d_spc_timer; 199 dst->dqb_itime = src->d_ino_timer; 200 dst->dqb_valid = QIF_ALL; 201 } 202 203 static int quota_getquota(struct super_block *sb, int type, qid_t id, 204 void __user *addr) 205 { 206 struct kqid qid; 207 struct qc_dqblk fdq; 208 struct if_dqblk idq; 209 int ret; 210 211 if (!sb->s_qcop->get_dqblk) 212 return -ENOSYS; 213 qid = make_kqid(current_user_ns(), type, id); 214 if (!qid_has_mapping(sb->s_user_ns, qid)) 215 return -EINVAL; 216 ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); 217 if (ret) 218 return ret; 219 copy_to_if_dqblk(&idq, &fdq); 220 if (copy_to_user(addr, &idq, sizeof(idq))) 221 return -EFAULT; 222 return 0; 223 } 224 225 /* 226 * Return quota for next active quota >= this id, if any exists, 227 * otherwise return -ENOENT via ->get_nextdqblk 228 */ 229 static int quota_getnextquota(struct super_block *sb, int type, qid_t id, 230 void __user *addr) 231 { 232 struct kqid qid; 233 struct qc_dqblk fdq; 234 struct if_nextdqblk idq; 235 int ret; 236 237 if (!sb->s_qcop->get_nextdqblk) 238 return -ENOSYS; 239 qid = make_kqid(current_user_ns(), type, id); 240 if (!qid_has_mapping(sb->s_user_ns, qid)) 241 return -EINVAL; 242 ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq); 243 if (ret) 244 return ret; 245 /* struct if_nextdqblk is a superset of struct if_dqblk */ 246 copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq); 247 idq.dqb_id = from_kqid(current_user_ns(), qid); 248 if (copy_to_user(addr, &idq, sizeof(idq))) 249 return -EFAULT; 250 return 0; 251 } 252 253 static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) 254 { 255 dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); 256 dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); 257 dst->d_space = src->dqb_curspace; 258 dst->d_ino_hardlimit = src->dqb_ihardlimit; 259 dst->d_ino_softlimit = src->dqb_isoftlimit; 260 dst->d_ino_count = src->dqb_curinodes; 261 dst->d_spc_timer = src->dqb_btime; 262 dst->d_ino_timer = src->dqb_itime; 263 264 dst->d_fieldmask = 0; 265 if (src->dqb_valid & QIF_BLIMITS) 266 dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; 267 if (src->dqb_valid & QIF_SPACE) 268 dst->d_fieldmask |= QC_SPACE; 269 if (src->dqb_valid & QIF_ILIMITS) 270 dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; 271 if (src->dqb_valid & QIF_INODES) 272 dst->d_fieldmask |= QC_INO_COUNT; 273 if (src->dqb_valid & QIF_BTIME) 274 dst->d_fieldmask |= QC_SPC_TIMER; 275 if (src->dqb_valid & QIF_ITIME) 276 dst->d_fieldmask |= QC_INO_TIMER; 277 } 278 279 static int quota_setquota(struct super_block *sb, int type, qid_t id, 280 void __user *addr) 281 { 282 struct qc_dqblk fdq; 283 struct if_dqblk idq; 284 struct kqid qid; 285 286 if (copy_from_user(&idq, addr, sizeof(idq))) 287 return -EFAULT; 288 if (!sb->s_qcop->set_dqblk) 289 return -ENOSYS; 290 qid = make_kqid(current_user_ns(), type, id); 291 if (!qid_has_mapping(sb->s_user_ns, qid)) 292 return -EINVAL; 293 copy_from_if_dqblk(&fdq, &idq); 294 return sb->s_qcop->set_dqblk(sb, qid, &fdq); 295 } 296 297 static int quota_enable(struct super_block *sb, void __user *addr) 298 { 299 __u32 flags; 300 301 if (copy_from_user(&flags, addr, sizeof(flags))) 302 return -EFAULT; 303 if (!sb->s_qcop->quota_enable) 304 return -ENOSYS; 305 return sb->s_qcop->quota_enable(sb, flags); 306 } 307 308 static int quota_disable(struct super_block *sb, void __user *addr) 309 { 310 __u32 flags; 311 312 if (copy_from_user(&flags, addr, sizeof(flags))) 313 return -EFAULT; 314 if (!sb->s_qcop->quota_disable) 315 return -ENOSYS; 316 return sb->s_qcop->quota_disable(sb, flags); 317 } 318 319 static int quota_state_to_flags(struct qc_state *state) 320 { 321 int flags = 0; 322 323 if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) 324 flags |= FS_QUOTA_UDQ_ACCT; 325 if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED) 326 flags |= FS_QUOTA_UDQ_ENFD; 327 if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) 328 flags |= FS_QUOTA_GDQ_ACCT; 329 if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED) 330 flags |= FS_QUOTA_GDQ_ENFD; 331 if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) 332 flags |= FS_QUOTA_PDQ_ACCT; 333 if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED) 334 flags |= FS_QUOTA_PDQ_ENFD; 335 return flags; 336 } 337 338 static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs) 339 { 340 int type; 341 struct qc_state state; 342 int ret; 343 344 memset(&state, 0, sizeof (struct qc_state)); 345 ret = sb->s_qcop->get_state(sb, &state); 346 if (ret < 0) 347 return ret; 348 349 memset(fqs, 0, sizeof(*fqs)); 350 fqs->qs_version = FS_QSTAT_VERSION; 351 fqs->qs_flags = quota_state_to_flags(&state); 352 /* No quota enabled? */ 353 if (!fqs->qs_flags) 354 return -ENOSYS; 355 fqs->qs_incoredqs = state.s_incoredqs; 356 /* 357 * GETXSTATE quotactl has space for just one set of time limits so 358 * report them for the first enabled quota type 359 */ 360 for (type = 0; type < XQM_MAXQUOTAS; type++) 361 if (state.s_state[type].flags & QCI_ACCT_ENABLED) 362 break; 363 BUG_ON(type == XQM_MAXQUOTAS); 364 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 365 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 366 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; 367 fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; 368 fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; 369 370 /* Inodes may be allocated even if inactive; copy out if present */ 371 if (state.s_state[USRQUOTA].ino) { 372 fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; 373 fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; 374 fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; 375 } 376 if (state.s_state[GRPQUOTA].ino) { 377 fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; 378 fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; 379 fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; 380 } 381 if (state.s_state[PRJQUOTA].ino) { 382 /* 383 * Q_XGETQSTAT doesn't have room for both group and project 384 * quotas. So, allow the project quota values to be copied out 385 * only if there is no group quota information available. 386 */ 387 if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) { 388 fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino; 389 fqs->qs_gquota.qfs_nblks = 390 state.s_state[PRJQUOTA].blocks; 391 fqs->qs_gquota.qfs_nextents = 392 state.s_state[PRJQUOTA].nextents; 393 } 394 } 395 return 0; 396 } 397 398 static int quota_getxstate(struct super_block *sb, void __user *addr) 399 { 400 struct fs_quota_stat fqs; 401 int ret; 402 403 if (!sb->s_qcop->get_state) 404 return -ENOSYS; 405 ret = quota_getstate(sb, &fqs); 406 if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) 407 return -EFAULT; 408 return ret; 409 } 410 411 static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs) 412 { 413 int type; 414 struct qc_state state; 415 int ret; 416 417 memset(&state, 0, sizeof (struct qc_state)); 418 ret = sb->s_qcop->get_state(sb, &state); 419 if (ret < 0) 420 return ret; 421 422 memset(fqs, 0, sizeof(*fqs)); 423 fqs->qs_version = FS_QSTAT_VERSION; 424 fqs->qs_flags = quota_state_to_flags(&state); 425 /* No quota enabled? */ 426 if (!fqs->qs_flags) 427 return -ENOSYS; 428 fqs->qs_incoredqs = state.s_incoredqs; 429 /* 430 * GETXSTATV quotactl has space for just one set of time limits so 431 * report them for the first enabled quota type 432 */ 433 for (type = 0; type < XQM_MAXQUOTAS; type++) 434 if (state.s_state[type].flags & QCI_ACCT_ENABLED) 435 break; 436 BUG_ON(type == XQM_MAXQUOTAS); 437 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 438 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 439 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; 440 fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; 441 fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; 442 443 /* Inodes may be allocated even if inactive; copy out if present */ 444 if (state.s_state[USRQUOTA].ino) { 445 fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; 446 fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; 447 fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; 448 } 449 if (state.s_state[GRPQUOTA].ino) { 450 fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; 451 fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; 452 fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; 453 } 454 if (state.s_state[PRJQUOTA].ino) { 455 fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino; 456 fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks; 457 fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents; 458 } 459 return 0; 460 } 461 462 static int quota_getxstatev(struct super_block *sb, void __user *addr) 463 { 464 struct fs_quota_statv fqs; 465 int ret; 466 467 if (!sb->s_qcop->get_state) 468 return -ENOSYS; 469 470 memset(&fqs, 0, sizeof(fqs)); 471 if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */ 472 return -EFAULT; 473 474 /* If this kernel doesn't support user specified version, fail */ 475 switch (fqs.qs_version) { 476 case FS_QSTATV_VERSION1: 477 break; 478 default: 479 return -EINVAL; 480 } 481 ret = quota_getstatev(sb, &fqs); 482 if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) 483 return -EFAULT; 484 return ret; 485 } 486 487 /* 488 * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them 489 * out of there as xfsprogs rely on definitions being in that header file. So 490 * just define same functions here for quota purposes. 491 */ 492 #define XFS_BB_SHIFT 9 493 494 static inline u64 quota_bbtob(u64 blocks) 495 { 496 return blocks << XFS_BB_SHIFT; 497 } 498 499 static inline u64 quota_btobb(u64 bytes) 500 { 501 return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; 502 } 503 504 static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) 505 { 506 dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); 507 dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); 508 dst->d_ino_hardlimit = src->d_ino_hardlimit; 509 dst->d_ino_softlimit = src->d_ino_softlimit; 510 dst->d_space = quota_bbtob(src->d_bcount); 511 dst->d_ino_count = src->d_icount; 512 dst->d_ino_timer = src->d_itimer; 513 dst->d_spc_timer = src->d_btimer; 514 dst->d_ino_warns = src->d_iwarns; 515 dst->d_spc_warns = src->d_bwarns; 516 dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); 517 dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); 518 dst->d_rt_space = quota_bbtob(src->d_rtbcount); 519 dst->d_rt_spc_timer = src->d_rtbtimer; 520 dst->d_rt_spc_warns = src->d_rtbwarns; 521 dst->d_fieldmask = 0; 522 if (src->d_fieldmask & FS_DQ_ISOFT) 523 dst->d_fieldmask |= QC_INO_SOFT; 524 if (src->d_fieldmask & FS_DQ_IHARD) 525 dst->d_fieldmask |= QC_INO_HARD; 526 if (src->d_fieldmask & FS_DQ_BSOFT) 527 dst->d_fieldmask |= QC_SPC_SOFT; 528 if (src->d_fieldmask & FS_DQ_BHARD) 529 dst->d_fieldmask |= QC_SPC_HARD; 530 if (src->d_fieldmask & FS_DQ_RTBSOFT) 531 dst->d_fieldmask |= QC_RT_SPC_SOFT; 532 if (src->d_fieldmask & FS_DQ_RTBHARD) 533 dst->d_fieldmask |= QC_RT_SPC_HARD; 534 if (src->d_fieldmask & FS_DQ_BTIMER) 535 dst->d_fieldmask |= QC_SPC_TIMER; 536 if (src->d_fieldmask & FS_DQ_ITIMER) 537 dst->d_fieldmask |= QC_INO_TIMER; 538 if (src->d_fieldmask & FS_DQ_RTBTIMER) 539 dst->d_fieldmask |= QC_RT_SPC_TIMER; 540 if (src->d_fieldmask & FS_DQ_BWARNS) 541 dst->d_fieldmask |= QC_SPC_WARNS; 542 if (src->d_fieldmask & FS_DQ_IWARNS) 543 dst->d_fieldmask |= QC_INO_WARNS; 544 if (src->d_fieldmask & FS_DQ_RTBWARNS) 545 dst->d_fieldmask |= QC_RT_SPC_WARNS; 546 if (src->d_fieldmask & FS_DQ_BCOUNT) 547 dst->d_fieldmask |= QC_SPACE; 548 if (src->d_fieldmask & FS_DQ_ICOUNT) 549 dst->d_fieldmask |= QC_INO_COUNT; 550 if (src->d_fieldmask & FS_DQ_RTBCOUNT) 551 dst->d_fieldmask |= QC_RT_SPACE; 552 } 553 554 static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst, 555 struct fs_disk_quota *src) 556 { 557 memset(dst, 0, sizeof(*dst)); 558 dst->i_spc_timelimit = src->d_btimer; 559 dst->i_ino_timelimit = src->d_itimer; 560 dst->i_rt_spc_timelimit = src->d_rtbtimer; 561 dst->i_ino_warnlimit = src->d_iwarns; 562 dst->i_spc_warnlimit = src->d_bwarns; 563 dst->i_rt_spc_warnlimit = src->d_rtbwarns; 564 if (src->d_fieldmask & FS_DQ_BWARNS) 565 dst->i_fieldmask |= QC_SPC_WARNS; 566 if (src->d_fieldmask & FS_DQ_IWARNS) 567 dst->i_fieldmask |= QC_INO_WARNS; 568 if (src->d_fieldmask & FS_DQ_RTBWARNS) 569 dst->i_fieldmask |= QC_RT_SPC_WARNS; 570 if (src->d_fieldmask & FS_DQ_BTIMER) 571 dst->i_fieldmask |= QC_SPC_TIMER; 572 if (src->d_fieldmask & FS_DQ_ITIMER) 573 dst->i_fieldmask |= QC_INO_TIMER; 574 if (src->d_fieldmask & FS_DQ_RTBTIMER) 575 dst->i_fieldmask |= QC_RT_SPC_TIMER; 576 } 577 578 static int quota_setxquota(struct super_block *sb, int type, qid_t id, 579 void __user *addr) 580 { 581 struct fs_disk_quota fdq; 582 struct qc_dqblk qdq; 583 struct kqid qid; 584 585 if (copy_from_user(&fdq, addr, sizeof(fdq))) 586 return -EFAULT; 587 if (!sb->s_qcop->set_dqblk) 588 return -ENOSYS; 589 qid = make_kqid(current_user_ns(), type, id); 590 if (!qid_has_mapping(sb->s_user_ns, qid)) 591 return -EINVAL; 592 /* Are we actually setting timer / warning limits for all users? */ 593 if (from_kqid(sb->s_user_ns, qid) == 0 && 594 fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) { 595 struct qc_info qinfo; 596 int ret; 597 598 if (!sb->s_qcop->set_info) 599 return -EINVAL; 600 copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq); 601 ret = sb->s_qcop->set_info(sb, type, &qinfo); 602 if (ret) 603 return ret; 604 /* These are already done */ 605 fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK); 606 } 607 copy_from_xfs_dqblk(&qdq, &fdq); 608 return sb->s_qcop->set_dqblk(sb, qid, &qdq); 609 } 610 611 static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, 612 int type, qid_t id) 613 { 614 memset(dst, 0, sizeof(*dst)); 615 dst->d_version = FS_DQUOT_VERSION; 616 dst->d_id = id; 617 if (type == USRQUOTA) 618 dst->d_flags = FS_USER_QUOTA; 619 else if (type == PRJQUOTA) 620 dst->d_flags = FS_PROJ_QUOTA; 621 else 622 dst->d_flags = FS_GROUP_QUOTA; 623 dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); 624 dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); 625 dst->d_ino_hardlimit = src->d_ino_hardlimit; 626 dst->d_ino_softlimit = src->d_ino_softlimit; 627 dst->d_bcount = quota_btobb(src->d_space); 628 dst->d_icount = src->d_ino_count; 629 dst->d_itimer = src->d_ino_timer; 630 dst->d_btimer = src->d_spc_timer; 631 dst->d_iwarns = src->d_ino_warns; 632 dst->d_bwarns = src->d_spc_warns; 633 dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); 634 dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); 635 dst->d_rtbcount = quota_btobb(src->d_rt_space); 636 dst->d_rtbtimer = src->d_rt_spc_timer; 637 dst->d_rtbwarns = src->d_rt_spc_warns; 638 } 639 640 static int quota_getxquota(struct super_block *sb, int type, qid_t id, 641 void __user *addr) 642 { 643 struct fs_disk_quota fdq; 644 struct qc_dqblk qdq; 645 struct kqid qid; 646 int ret; 647 648 if (!sb->s_qcop->get_dqblk) 649 return -ENOSYS; 650 qid = make_kqid(current_user_ns(), type, id); 651 if (!qid_has_mapping(sb->s_user_ns, qid)) 652 return -EINVAL; 653 ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); 654 if (ret) 655 return ret; 656 copy_to_xfs_dqblk(&fdq, &qdq, type, id); 657 if (copy_to_user(addr, &fdq, sizeof(fdq))) 658 return -EFAULT; 659 return ret; 660 } 661 662 /* 663 * Return quota for next active quota >= this id, if any exists, 664 * otherwise return -ENOENT via ->get_nextdqblk. 665 */ 666 static int quota_getnextxquota(struct super_block *sb, int type, qid_t id, 667 void __user *addr) 668 { 669 struct fs_disk_quota fdq; 670 struct qc_dqblk qdq; 671 struct kqid qid; 672 qid_t id_out; 673 int ret; 674 675 if (!sb->s_qcop->get_nextdqblk) 676 return -ENOSYS; 677 qid = make_kqid(current_user_ns(), type, id); 678 if (!qid_has_mapping(sb->s_user_ns, qid)) 679 return -EINVAL; 680 ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq); 681 if (ret) 682 return ret; 683 id_out = from_kqid(current_user_ns(), qid); 684 copy_to_xfs_dqblk(&fdq, &qdq, type, id_out); 685 if (copy_to_user(addr, &fdq, sizeof(fdq))) 686 return -EFAULT; 687 return ret; 688 } 689 690 static int quota_rmxquota(struct super_block *sb, void __user *addr) 691 { 692 __u32 flags; 693 694 if (copy_from_user(&flags, addr, sizeof(flags))) 695 return -EFAULT; 696 if (!sb->s_qcop->rm_xquota) 697 return -ENOSYS; 698 return sb->s_qcop->rm_xquota(sb, flags); 699 } 700 701 /* Copy parameters and call proper function */ 702 static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, 703 void __user *addr, struct path *path) 704 { 705 int ret; 706 707 if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) 708 return -EINVAL; 709 /* 710 * Quota not supported on this fs? Check this before s_quota_types 711 * since they needn't be set if quota is not supported at all. 712 */ 713 if (!sb->s_qcop) 714 return -ENOSYS; 715 if (!(sb->s_quota_types & (1 << type))) 716 return -EINVAL; 717 718 ret = check_quotactl_permission(sb, type, cmd, id); 719 if (ret < 0) 720 return ret; 721 722 switch (cmd) { 723 case Q_QUOTAON: 724 return quota_quotaon(sb, type, id, path); 725 case Q_QUOTAOFF: 726 return quota_quotaoff(sb, type); 727 case Q_GETFMT: 728 return quota_getfmt(sb, type, addr); 729 case Q_GETINFO: 730 return quota_getinfo(sb, type, addr); 731 case Q_SETINFO: 732 return quota_setinfo(sb, type, addr); 733 case Q_GETQUOTA: 734 return quota_getquota(sb, type, id, addr); 735 case Q_GETNEXTQUOTA: 736 return quota_getnextquota(sb, type, id, addr); 737 case Q_SETQUOTA: 738 return quota_setquota(sb, type, id, addr); 739 case Q_SYNC: 740 if (!sb->s_qcop->quota_sync) 741 return -ENOSYS; 742 return sb->s_qcop->quota_sync(sb, type); 743 case Q_XQUOTAON: 744 return quota_enable(sb, addr); 745 case Q_XQUOTAOFF: 746 return quota_disable(sb, addr); 747 case Q_XQUOTARM: 748 return quota_rmxquota(sb, addr); 749 case Q_XGETQSTAT: 750 return quota_getxstate(sb, addr); 751 case Q_XGETQSTATV: 752 return quota_getxstatev(sb, addr); 753 case Q_XSETQLIM: 754 return quota_setxquota(sb, type, id, addr); 755 case Q_XGETQUOTA: 756 return quota_getxquota(sb, type, id, addr); 757 case Q_XGETNEXTQUOTA: 758 return quota_getnextxquota(sb, type, id, addr); 759 case Q_XQUOTASYNC: 760 if (sb->s_flags & MS_RDONLY) 761 return -EROFS; 762 /* XFS quotas are fully coherent now, making this call a noop */ 763 return 0; 764 default: 765 return -EINVAL; 766 } 767 } 768 769 #ifdef CONFIG_BLOCK 770 771 /* Return 1 if 'cmd' will block on frozen filesystem */ 772 static int quotactl_cmd_write(int cmd) 773 { 774 /* 775 * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access 776 * as dquot_acquire() may allocate space for new structure and OCFS2 777 * needs to increment on-disk use count. 778 */ 779 switch (cmd) { 780 case Q_GETFMT: 781 case Q_GETINFO: 782 case Q_SYNC: 783 case Q_XGETQSTAT: 784 case Q_XGETQSTATV: 785 case Q_XGETQUOTA: 786 case Q_XGETNEXTQUOTA: 787 case Q_XQUOTASYNC: 788 return 0; 789 } 790 return 1; 791 } 792 793 #endif /* CONFIG_BLOCK */ 794 795 /* 796 * look up a superblock on which quota ops will be performed 797 * - use the name of a block device to find the superblock thereon 798 */ 799 static struct super_block *quotactl_block(const char __user *special, int cmd) 800 { 801 #ifdef CONFIG_BLOCK 802 struct block_device *bdev; 803 struct super_block *sb; 804 struct filename *tmp = getname(special); 805 806 if (IS_ERR(tmp)) 807 return ERR_CAST(tmp); 808 bdev = lookup_bdev(tmp->name); 809 putname(tmp); 810 if (IS_ERR(bdev)) 811 return ERR_CAST(bdev); 812 if (quotactl_cmd_write(cmd)) 813 sb = get_super_thawed(bdev); 814 else 815 sb = get_super(bdev); 816 bdput(bdev); 817 if (!sb) 818 return ERR_PTR(-ENODEV); 819 820 return sb; 821 #else 822 return ERR_PTR(-ENODEV); 823 #endif 824 } 825 826 /* 827 * This is the system call interface. This communicates with 828 * the user-level programs. Currently this only supports diskquota 829 * calls. Maybe we need to add the process quotas etc. in the future, 830 * but we probably should use rlimits for that. 831 */ 832 SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, 833 qid_t, id, void __user *, addr) 834 { 835 uint cmds, type; 836 struct super_block *sb = NULL; 837 struct path path, *pathp = NULL; 838 int ret; 839 840 cmds = cmd >> SUBCMDSHIFT; 841 type = cmd & SUBCMDMASK; 842 843 /* 844 * As a special case Q_SYNC can be called without a specific device. 845 * It will iterate all superblocks that have quota enabled and call 846 * the sync action on each of them. 847 */ 848 if (!special) { 849 if (cmds == Q_SYNC) 850 return quota_sync_all(type); 851 return -ENODEV; 852 } 853 854 /* 855 * Path for quotaon has to be resolved before grabbing superblock 856 * because that gets s_umount sem which is also possibly needed by path 857 * resolution (think about autofs) and thus deadlocks could arise. 858 */ 859 if (cmds == Q_QUOTAON) { 860 ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 861 if (ret) 862 pathp = ERR_PTR(ret); 863 else 864 pathp = &path; 865 } 866 867 sb = quotactl_block(special, cmds); 868 if (IS_ERR(sb)) { 869 ret = PTR_ERR(sb); 870 goto out; 871 } 872 873 ret = do_quotactl(sb, type, cmds, id, addr, pathp); 874 875 drop_super(sb); 876 out: 877 if (pathp && !IS_ERR(pathp)) 878 path_put(pathp); 879 return ret; 880 } 881