1 /* 2 * linux/kernel/compat.c 3 * 4 * Kernel compatibililty routines for e.g. 32 bit syscall support 5 * on 64 bit kernels. 6 * 7 * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/linkage.h> 15 #include <linux/compat.h> 16 #include <linux/errno.h> 17 #include <linux/time.h> 18 #include <linux/signal.h> 19 #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */ 20 #include <linux/syscalls.h> 21 #include <linux/unistd.h> 22 #include <linux/security.h> 23 #include <linux/timex.h> 24 #include <linux/migrate.h> 25 #include <linux/posix-timers.h> 26 #include <linux/times.h> 27 #include <linux/ptrace.h> 28 #include <linux/gfp.h> 29 30 #include <asm/uaccess.h> 31 32 /* 33 * Note that the native side is already converted to a timespec, because 34 * that's what we want anyway. 35 */ 36 static int compat_get_timeval(struct timespec *o, 37 struct compat_timeval __user *i) 38 { 39 long usec; 40 41 if (get_user(o->tv_sec, &i->tv_sec) || 42 get_user(usec, &i->tv_usec)) 43 return -EFAULT; 44 o->tv_nsec = usec * 1000; 45 return 0; 46 } 47 48 static int compat_put_timeval(struct compat_timeval __user *o, 49 struct timeval *i) 50 { 51 return (put_user(i->tv_sec, &o->tv_sec) || 52 put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0; 53 } 54 55 static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp) 56 { 57 memset(txc, 0, sizeof(struct timex)); 58 59 if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) || 60 __get_user(txc->modes, &utp->modes) || 61 __get_user(txc->offset, &utp->offset) || 62 __get_user(txc->freq, &utp->freq) || 63 __get_user(txc->maxerror, &utp->maxerror) || 64 __get_user(txc->esterror, &utp->esterror) || 65 __get_user(txc->status, &utp->status) || 66 __get_user(txc->constant, &utp->constant) || 67 __get_user(txc->precision, &utp->precision) || 68 __get_user(txc->tolerance, &utp->tolerance) || 69 __get_user(txc->time.tv_sec, &utp->time.tv_sec) || 70 __get_user(txc->time.tv_usec, &utp->time.tv_usec) || 71 __get_user(txc->tick, &utp->tick) || 72 __get_user(txc->ppsfreq, &utp->ppsfreq) || 73 __get_user(txc->jitter, &utp->jitter) || 74 __get_user(txc->shift, &utp->shift) || 75 __get_user(txc->stabil, &utp->stabil) || 76 __get_user(txc->jitcnt, &utp->jitcnt) || 77 __get_user(txc->calcnt, &utp->calcnt) || 78 __get_user(txc->errcnt, &utp->errcnt) || 79 __get_user(txc->stbcnt, &utp->stbcnt)) 80 return -EFAULT; 81 82 return 0; 83 } 84 85 static int compat_put_timex(struct compat_timex __user *utp, struct timex *txc) 86 { 87 if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) || 88 __put_user(txc->modes, &utp->modes) || 89 __put_user(txc->offset, &utp->offset) || 90 __put_user(txc->freq, &utp->freq) || 91 __put_user(txc->maxerror, &utp->maxerror) || 92 __put_user(txc->esterror, &utp->esterror) || 93 __put_user(txc->status, &utp->status) || 94 __put_user(txc->constant, &utp->constant) || 95 __put_user(txc->precision, &utp->precision) || 96 __put_user(txc->tolerance, &utp->tolerance) || 97 __put_user(txc->time.tv_sec, &utp->time.tv_sec) || 98 __put_user(txc->time.tv_usec, &utp->time.tv_usec) || 99 __put_user(txc->tick, &utp->tick) || 100 __put_user(txc->ppsfreq, &utp->ppsfreq) || 101 __put_user(txc->jitter, &utp->jitter) || 102 __put_user(txc->shift, &utp->shift) || 103 __put_user(txc->stabil, &utp->stabil) || 104 __put_user(txc->jitcnt, &utp->jitcnt) || 105 __put_user(txc->calcnt, &utp->calcnt) || 106 __put_user(txc->errcnt, &utp->errcnt) || 107 __put_user(txc->stbcnt, &utp->stbcnt) || 108 __put_user(txc->tai, &utp->tai)) 109 return -EFAULT; 110 return 0; 111 } 112 113 asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, 114 struct timezone __user *tz) 115 { 116 if (tv) { 117 struct timeval ktv; 118 do_gettimeofday(&ktv); 119 if (compat_put_timeval(tv, &ktv)) 120 return -EFAULT; 121 } 122 if (tz) { 123 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) 124 return -EFAULT; 125 } 126 127 return 0; 128 } 129 130 asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, 131 struct timezone __user *tz) 132 { 133 struct timespec kts; 134 struct timezone ktz; 135 136 if (tv) { 137 if (compat_get_timeval(&kts, tv)) 138 return -EFAULT; 139 } 140 if (tz) { 141 if (copy_from_user(&ktz, tz, sizeof(ktz))) 142 return -EFAULT; 143 } 144 145 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); 146 } 147 148 int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) 149 { 150 return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || 151 __get_user(ts->tv_sec, &cts->tv_sec) || 152 __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; 153 } 154 155 int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) 156 { 157 return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || 158 __put_user(ts->tv_sec, &cts->tv_sec) || 159 __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; 160 } 161 162 static long compat_nanosleep_restart(struct restart_block *restart) 163 { 164 struct compat_timespec __user *rmtp; 165 struct timespec rmt; 166 mm_segment_t oldfs; 167 long ret; 168 169 restart->nanosleep.rmtp = (struct timespec __user *) &rmt; 170 oldfs = get_fs(); 171 set_fs(KERNEL_DS); 172 ret = hrtimer_nanosleep_restart(restart); 173 set_fs(oldfs); 174 175 if (ret) { 176 rmtp = restart->nanosleep.compat_rmtp; 177 178 if (rmtp && put_compat_timespec(&rmt, rmtp)) 179 return -EFAULT; 180 } 181 182 return ret; 183 } 184 185 asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, 186 struct compat_timespec __user *rmtp) 187 { 188 struct timespec tu, rmt; 189 mm_segment_t oldfs; 190 long ret; 191 192 if (get_compat_timespec(&tu, rqtp)) 193 return -EFAULT; 194 195 if (!timespec_valid(&tu)) 196 return -EINVAL; 197 198 oldfs = get_fs(); 199 set_fs(KERNEL_DS); 200 ret = hrtimer_nanosleep(&tu, 201 rmtp ? (struct timespec __user *)&rmt : NULL, 202 HRTIMER_MODE_REL, CLOCK_MONOTONIC); 203 set_fs(oldfs); 204 205 if (ret) { 206 struct restart_block *restart 207 = ¤t_thread_info()->restart_block; 208 209 restart->fn = compat_nanosleep_restart; 210 restart->nanosleep.compat_rmtp = rmtp; 211 212 if (rmtp && put_compat_timespec(&rmt, rmtp)) 213 return -EFAULT; 214 } 215 216 return ret; 217 } 218 219 static inline long get_compat_itimerval(struct itimerval *o, 220 struct compat_itimerval __user *i) 221 { 222 return (!access_ok(VERIFY_READ, i, sizeof(*i)) || 223 (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | 224 __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | 225 __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | 226 __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); 227 } 228 229 static inline long put_compat_itimerval(struct compat_itimerval __user *o, 230 struct itimerval *i) 231 { 232 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || 233 (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | 234 __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | 235 __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | 236 __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); 237 } 238 239 asmlinkage long compat_sys_getitimer(int which, 240 struct compat_itimerval __user *it) 241 { 242 struct itimerval kit; 243 int error; 244 245 error = do_getitimer(which, &kit); 246 if (!error && put_compat_itimerval(it, &kit)) 247 error = -EFAULT; 248 return error; 249 } 250 251 asmlinkage long compat_sys_setitimer(int which, 252 struct compat_itimerval __user *in, 253 struct compat_itimerval __user *out) 254 { 255 struct itimerval kin, kout; 256 int error; 257 258 if (in) { 259 if (get_compat_itimerval(&kin, in)) 260 return -EFAULT; 261 } else 262 memset(&kin, 0, sizeof(kin)); 263 264 error = do_setitimer(which, &kin, out ? &kout : NULL); 265 if (error || !out) 266 return error; 267 if (put_compat_itimerval(out, &kout)) 268 return -EFAULT; 269 return 0; 270 } 271 272 static compat_clock_t clock_t_to_compat_clock_t(clock_t x) 273 { 274 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); 275 } 276 277 asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) 278 { 279 if (tbuf) { 280 struct tms tms; 281 struct compat_tms tmp; 282 283 do_sys_times(&tms); 284 /* Convert our struct tms to the compat version. */ 285 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); 286 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); 287 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); 288 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); 289 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) 290 return -EFAULT; 291 } 292 force_successful_syscall_return(); 293 return compat_jiffies_to_clock_t(jiffies); 294 } 295 296 #ifdef __ARCH_WANT_SYS_SIGPENDING 297 298 /* 299 * Assumption: old_sigset_t and compat_old_sigset_t are both 300 * types that can be passed to put_user()/get_user(). 301 */ 302 303 asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) 304 { 305 old_sigset_t s; 306 long ret; 307 mm_segment_t old_fs = get_fs(); 308 309 set_fs(KERNEL_DS); 310 ret = sys_sigpending((old_sigset_t __user *) &s); 311 set_fs(old_fs); 312 if (ret == 0) 313 ret = put_user(s, set); 314 return ret; 315 } 316 317 #endif 318 319 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 320 321 asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, 322 compat_old_sigset_t __user *oset) 323 { 324 old_sigset_t s; 325 long ret; 326 mm_segment_t old_fs; 327 328 if (set && get_user(s, set)) 329 return -EFAULT; 330 old_fs = get_fs(); 331 set_fs(KERNEL_DS); 332 ret = sys_sigprocmask(how, 333 set ? (old_sigset_t __user *) &s : NULL, 334 oset ? (old_sigset_t __user *) &s : NULL); 335 set_fs(old_fs); 336 if (ret == 0) 337 if (oset) 338 ret = put_user(s, oset); 339 return ret; 340 } 341 342 #endif 343 344 asmlinkage long compat_sys_setrlimit(unsigned int resource, 345 struct compat_rlimit __user *rlim) 346 { 347 struct rlimit r; 348 349 if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) || 350 __get_user(r.rlim_cur, &rlim->rlim_cur) || 351 __get_user(r.rlim_max, &rlim->rlim_max)) 352 return -EFAULT; 353 354 if (r.rlim_cur == COMPAT_RLIM_INFINITY) 355 r.rlim_cur = RLIM_INFINITY; 356 if (r.rlim_max == COMPAT_RLIM_INFINITY) 357 r.rlim_max = RLIM_INFINITY; 358 return do_prlimit(current, resource, &r, NULL); 359 } 360 361 #ifdef COMPAT_RLIM_OLD_INFINITY 362 363 asmlinkage long compat_sys_old_getrlimit(unsigned int resource, 364 struct compat_rlimit __user *rlim) 365 { 366 struct rlimit r; 367 int ret; 368 mm_segment_t old_fs = get_fs(); 369 370 set_fs(KERNEL_DS); 371 ret = sys_old_getrlimit(resource, &r); 372 set_fs(old_fs); 373 374 if (!ret) { 375 if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY) 376 r.rlim_cur = COMPAT_RLIM_INFINITY; 377 if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY) 378 r.rlim_max = COMPAT_RLIM_INFINITY; 379 380 if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || 381 __put_user(r.rlim_cur, &rlim->rlim_cur) || 382 __put_user(r.rlim_max, &rlim->rlim_max)) 383 return -EFAULT; 384 } 385 return ret; 386 } 387 388 #endif 389 390 asmlinkage long compat_sys_getrlimit(unsigned int resource, 391 struct compat_rlimit __user *rlim) 392 { 393 struct rlimit r; 394 int ret; 395 396 ret = do_prlimit(current, resource, NULL, &r); 397 if (!ret) { 398 if (r.rlim_cur > COMPAT_RLIM_INFINITY) 399 r.rlim_cur = COMPAT_RLIM_INFINITY; 400 if (r.rlim_max > COMPAT_RLIM_INFINITY) 401 r.rlim_max = COMPAT_RLIM_INFINITY; 402 403 if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || 404 __put_user(r.rlim_cur, &rlim->rlim_cur) || 405 __put_user(r.rlim_max, &rlim->rlim_max)) 406 return -EFAULT; 407 } 408 return ret; 409 } 410 411 int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru) 412 { 413 if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) || 414 __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) || 415 __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) || 416 __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) || 417 __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) || 418 __put_user(r->ru_maxrss, &ru->ru_maxrss) || 419 __put_user(r->ru_ixrss, &ru->ru_ixrss) || 420 __put_user(r->ru_idrss, &ru->ru_idrss) || 421 __put_user(r->ru_isrss, &ru->ru_isrss) || 422 __put_user(r->ru_minflt, &ru->ru_minflt) || 423 __put_user(r->ru_majflt, &ru->ru_majflt) || 424 __put_user(r->ru_nswap, &ru->ru_nswap) || 425 __put_user(r->ru_inblock, &ru->ru_inblock) || 426 __put_user(r->ru_oublock, &ru->ru_oublock) || 427 __put_user(r->ru_msgsnd, &ru->ru_msgsnd) || 428 __put_user(r->ru_msgrcv, &ru->ru_msgrcv) || 429 __put_user(r->ru_nsignals, &ru->ru_nsignals) || 430 __put_user(r->ru_nvcsw, &ru->ru_nvcsw) || 431 __put_user(r->ru_nivcsw, &ru->ru_nivcsw)) 432 return -EFAULT; 433 return 0; 434 } 435 436 asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) 437 { 438 struct rusage r; 439 int ret; 440 mm_segment_t old_fs = get_fs(); 441 442 set_fs(KERNEL_DS); 443 ret = sys_getrusage(who, (struct rusage __user *) &r); 444 set_fs(old_fs); 445 446 if (ret) 447 return ret; 448 449 if (put_compat_rusage(&r, ru)) 450 return -EFAULT; 451 452 return 0; 453 } 454 455 asmlinkage long 456 compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, 457 struct compat_rusage __user *ru) 458 { 459 if (!ru) { 460 return sys_wait4(pid, stat_addr, options, NULL); 461 } else { 462 struct rusage r; 463 int ret; 464 unsigned int status; 465 mm_segment_t old_fs = get_fs(); 466 467 set_fs (KERNEL_DS); 468 ret = sys_wait4(pid, 469 (stat_addr ? 470 (unsigned int __user *) &status : NULL), 471 options, (struct rusage __user *) &r); 472 set_fs (old_fs); 473 474 if (ret > 0) { 475 if (put_compat_rusage(&r, ru)) 476 return -EFAULT; 477 if (stat_addr && put_user(status, stat_addr)) 478 return -EFAULT; 479 } 480 return ret; 481 } 482 } 483 484 asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, 485 struct compat_siginfo __user *uinfo, int options, 486 struct compat_rusage __user *uru) 487 { 488 siginfo_t info; 489 struct rusage ru; 490 long ret; 491 mm_segment_t old_fs = get_fs(); 492 493 memset(&info, 0, sizeof(info)); 494 495 set_fs(KERNEL_DS); 496 ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options, 497 uru ? (struct rusage __user *)&ru : NULL); 498 set_fs(old_fs); 499 500 if ((ret < 0) || (info.si_signo == 0)) 501 return ret; 502 503 if (uru) { 504 ret = put_compat_rusage(&ru, uru); 505 if (ret) 506 return ret; 507 } 508 509 BUG_ON(info.si_code & __SI_MASK); 510 info.si_code |= __SI_CHLD; 511 return copy_siginfo_to_user32(uinfo, &info); 512 } 513 514 static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, 515 unsigned len, struct cpumask *new_mask) 516 { 517 unsigned long *k; 518 519 if (len < cpumask_size()) 520 memset(new_mask, 0, cpumask_size()); 521 else if (len > cpumask_size()) 522 len = cpumask_size(); 523 524 k = cpumask_bits(new_mask); 525 return compat_get_bitmap(k, user_mask_ptr, len * 8); 526 } 527 528 asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, 529 unsigned int len, 530 compat_ulong_t __user *user_mask_ptr) 531 { 532 cpumask_var_t new_mask; 533 int retval; 534 535 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 536 return -ENOMEM; 537 538 retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask); 539 if (retval) 540 goto out; 541 542 retval = sched_setaffinity(pid, new_mask); 543 out: 544 free_cpumask_var(new_mask); 545 return retval; 546 } 547 548 asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, 549 compat_ulong_t __user *user_mask_ptr) 550 { 551 int ret; 552 cpumask_var_t mask; 553 554 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 555 return -EINVAL; 556 if (len & (sizeof(compat_ulong_t)-1)) 557 return -EINVAL; 558 559 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 560 return -ENOMEM; 561 562 ret = sched_getaffinity(pid, mask); 563 if (ret == 0) { 564 size_t retlen = min_t(size_t, len, cpumask_size()); 565 566 if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) 567 ret = -EFAULT; 568 else 569 ret = retlen; 570 } 571 free_cpumask_var(mask); 572 573 return ret; 574 } 575 576 int get_compat_itimerspec(struct itimerspec *dst, 577 const struct compat_itimerspec __user *src) 578 { 579 if (get_compat_timespec(&dst->it_interval, &src->it_interval) || 580 get_compat_timespec(&dst->it_value, &src->it_value)) 581 return -EFAULT; 582 return 0; 583 } 584 585 int put_compat_itimerspec(struct compat_itimerspec __user *dst, 586 const struct itimerspec *src) 587 { 588 if (put_compat_timespec(&src->it_interval, &dst->it_interval) || 589 put_compat_timespec(&src->it_value, &dst->it_value)) 590 return -EFAULT; 591 return 0; 592 } 593 594 long compat_sys_timer_create(clockid_t which_clock, 595 struct compat_sigevent __user *timer_event_spec, 596 timer_t __user *created_timer_id) 597 { 598 struct sigevent __user *event = NULL; 599 600 if (timer_event_spec) { 601 struct sigevent kevent; 602 603 event = compat_alloc_user_space(sizeof(*event)); 604 if (get_compat_sigevent(&kevent, timer_event_spec) || 605 copy_to_user(event, &kevent, sizeof(*event))) 606 return -EFAULT; 607 } 608 609 return sys_timer_create(which_clock, event, created_timer_id); 610 } 611 612 long compat_sys_timer_settime(timer_t timer_id, int flags, 613 struct compat_itimerspec __user *new, 614 struct compat_itimerspec __user *old) 615 { 616 long err; 617 mm_segment_t oldfs; 618 struct itimerspec newts, oldts; 619 620 if (!new) 621 return -EINVAL; 622 if (get_compat_itimerspec(&newts, new)) 623 return -EFAULT; 624 oldfs = get_fs(); 625 set_fs(KERNEL_DS); 626 err = sys_timer_settime(timer_id, flags, 627 (struct itimerspec __user *) &newts, 628 (struct itimerspec __user *) &oldts); 629 set_fs(oldfs); 630 if (!err && old && put_compat_itimerspec(old, &oldts)) 631 return -EFAULT; 632 return err; 633 } 634 635 long compat_sys_timer_gettime(timer_t timer_id, 636 struct compat_itimerspec __user *setting) 637 { 638 long err; 639 mm_segment_t oldfs; 640 struct itimerspec ts; 641 642 oldfs = get_fs(); 643 set_fs(KERNEL_DS); 644 err = sys_timer_gettime(timer_id, 645 (struct itimerspec __user *) &ts); 646 set_fs(oldfs); 647 if (!err && put_compat_itimerspec(setting, &ts)) 648 return -EFAULT; 649 return err; 650 } 651 652 long compat_sys_clock_settime(clockid_t which_clock, 653 struct compat_timespec __user *tp) 654 { 655 long err; 656 mm_segment_t oldfs; 657 struct timespec ts; 658 659 if (get_compat_timespec(&ts, tp)) 660 return -EFAULT; 661 oldfs = get_fs(); 662 set_fs(KERNEL_DS); 663 err = sys_clock_settime(which_clock, 664 (struct timespec __user *) &ts); 665 set_fs(oldfs); 666 return err; 667 } 668 669 long compat_sys_clock_gettime(clockid_t which_clock, 670 struct compat_timespec __user *tp) 671 { 672 long err; 673 mm_segment_t oldfs; 674 struct timespec ts; 675 676 oldfs = get_fs(); 677 set_fs(KERNEL_DS); 678 err = sys_clock_gettime(which_clock, 679 (struct timespec __user *) &ts); 680 set_fs(oldfs); 681 if (!err && put_compat_timespec(&ts, tp)) 682 return -EFAULT; 683 return err; 684 } 685 686 long compat_sys_clock_adjtime(clockid_t which_clock, 687 struct compat_timex __user *utp) 688 { 689 struct timex txc; 690 mm_segment_t oldfs; 691 int err, ret; 692 693 err = compat_get_timex(&txc, utp); 694 if (err) 695 return err; 696 697 oldfs = get_fs(); 698 set_fs(KERNEL_DS); 699 ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc); 700 set_fs(oldfs); 701 702 err = compat_put_timex(utp, &txc); 703 if (err) 704 return err; 705 706 return ret; 707 } 708 709 long compat_sys_clock_getres(clockid_t which_clock, 710 struct compat_timespec __user *tp) 711 { 712 long err; 713 mm_segment_t oldfs; 714 struct timespec ts; 715 716 oldfs = get_fs(); 717 set_fs(KERNEL_DS); 718 err = sys_clock_getres(which_clock, 719 (struct timespec __user *) &ts); 720 set_fs(oldfs); 721 if (!err && tp && put_compat_timespec(&ts, tp)) 722 return -EFAULT; 723 return err; 724 } 725 726 static long compat_clock_nanosleep_restart(struct restart_block *restart) 727 { 728 long err; 729 mm_segment_t oldfs; 730 struct timespec tu; 731 struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; 732 733 restart->nanosleep.rmtp = (struct timespec __user *) &tu; 734 oldfs = get_fs(); 735 set_fs(KERNEL_DS); 736 err = clock_nanosleep_restart(restart); 737 set_fs(oldfs); 738 739 if ((err == -ERESTART_RESTARTBLOCK) && rmtp && 740 put_compat_timespec(&tu, rmtp)) 741 return -EFAULT; 742 743 if (err == -ERESTART_RESTARTBLOCK) { 744 restart->fn = compat_clock_nanosleep_restart; 745 restart->nanosleep.compat_rmtp = rmtp; 746 } 747 return err; 748 } 749 750 long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, 751 struct compat_timespec __user *rqtp, 752 struct compat_timespec __user *rmtp) 753 { 754 long err; 755 mm_segment_t oldfs; 756 struct timespec in, out; 757 struct restart_block *restart; 758 759 if (get_compat_timespec(&in, rqtp)) 760 return -EFAULT; 761 762 oldfs = get_fs(); 763 set_fs(KERNEL_DS); 764 err = sys_clock_nanosleep(which_clock, flags, 765 (struct timespec __user *) &in, 766 (struct timespec __user *) &out); 767 set_fs(oldfs); 768 769 if ((err == -ERESTART_RESTARTBLOCK) && rmtp && 770 put_compat_timespec(&out, rmtp)) 771 return -EFAULT; 772 773 if (err == -ERESTART_RESTARTBLOCK) { 774 restart = ¤t_thread_info()->restart_block; 775 restart->fn = compat_clock_nanosleep_restart; 776 restart->nanosleep.compat_rmtp = rmtp; 777 } 778 return err; 779 } 780 781 /* 782 * We currently only need the following fields from the sigevent 783 * structure: sigev_value, sigev_signo, sig_notify and (sometimes 784 * sigev_notify_thread_id). The others are handled in user mode. 785 * We also assume that copying sigev_value.sival_int is sufficient 786 * to keep all the bits of sigev_value.sival_ptr intact. 787 */ 788 int get_compat_sigevent(struct sigevent *event, 789 const struct compat_sigevent __user *u_event) 790 { 791 memset(event, 0, sizeof(*event)); 792 return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) || 793 __get_user(event->sigev_value.sival_int, 794 &u_event->sigev_value.sival_int) || 795 __get_user(event->sigev_signo, &u_event->sigev_signo) || 796 __get_user(event->sigev_notify, &u_event->sigev_notify) || 797 __get_user(event->sigev_notify_thread_id, 798 &u_event->sigev_notify_thread_id)) 799 ? -EFAULT : 0; 800 } 801 802 long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, 803 unsigned long bitmap_size) 804 { 805 int i, j; 806 unsigned long m; 807 compat_ulong_t um; 808 unsigned long nr_compat_longs; 809 810 /* align bitmap up to nearest compat_long_t boundary */ 811 bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); 812 813 if (!access_ok(VERIFY_READ, umask, bitmap_size / 8)) 814 return -EFAULT; 815 816 nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); 817 818 for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { 819 m = 0; 820 821 for (j = 0; j < sizeof(m)/sizeof(um); j++) { 822 /* 823 * We dont want to read past the end of the userspace 824 * bitmap. We must however ensure the end of the 825 * kernel bitmap is zeroed. 826 */ 827 if (nr_compat_longs-- > 0) { 828 if (__get_user(um, umask)) 829 return -EFAULT; 830 } else { 831 um = 0; 832 } 833 834 umask++; 835 m |= (long)um << (j * BITS_PER_COMPAT_LONG); 836 } 837 *mask++ = m; 838 } 839 840 return 0; 841 } 842 843 long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, 844 unsigned long bitmap_size) 845 { 846 int i, j; 847 unsigned long m; 848 compat_ulong_t um; 849 unsigned long nr_compat_longs; 850 851 /* align bitmap up to nearest compat_long_t boundary */ 852 bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); 853 854 if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8)) 855 return -EFAULT; 856 857 nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); 858 859 for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { 860 m = *mask++; 861 862 for (j = 0; j < sizeof(m)/sizeof(um); j++) { 863 um = m; 864 865 /* 866 * We dont want to write past the end of the userspace 867 * bitmap. 868 */ 869 if (nr_compat_longs-- > 0) { 870 if (__put_user(um, umask)) 871 return -EFAULT; 872 } 873 874 umask++; 875 m >>= 4*sizeof(um); 876 m >>= 4*sizeof(um); 877 } 878 } 879 880 return 0; 881 } 882 883 void 884 sigset_from_compat (sigset_t *set, compat_sigset_t *compat) 885 { 886 switch (_NSIG_WORDS) { 887 case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 ); 888 case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 ); 889 case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 ); 890 case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 ); 891 } 892 } 893 EXPORT_SYMBOL_GPL(sigset_from_compat); 894 895 asmlinkage long 896 compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, 897 struct compat_siginfo __user *uinfo, 898 struct compat_timespec __user *uts, compat_size_t sigsetsize) 899 { 900 compat_sigset_t s32; 901 sigset_t s; 902 struct timespec t; 903 siginfo_t info; 904 long ret; 905 906 if (sigsetsize != sizeof(sigset_t)) 907 return -EINVAL; 908 909 if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) 910 return -EFAULT; 911 sigset_from_compat(&s, &s32); 912 913 if (uts) { 914 if (get_compat_timespec(&t, uts)) 915 return -EFAULT; 916 } 917 918 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 919 920 if (ret > 0 && uinfo) { 921 if (copy_siginfo_to_user32(uinfo, &info)) 922 ret = -EFAULT; 923 } 924 925 return ret; 926 927 } 928 929 asmlinkage long 930 compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, 931 struct compat_siginfo __user *uinfo) 932 { 933 siginfo_t info; 934 935 if (copy_siginfo_from_user32(&info, uinfo)) 936 return -EFAULT; 937 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 938 } 939 940 #ifdef __ARCH_WANT_COMPAT_SYS_TIME 941 942 /* compat_time_t is a 32 bit "long" and needs to get converted. */ 943 944 asmlinkage long compat_sys_time(compat_time_t __user * tloc) 945 { 946 compat_time_t i; 947 struct timeval tv; 948 949 do_gettimeofday(&tv); 950 i = tv.tv_sec; 951 952 if (tloc) { 953 if (put_user(i,tloc)) 954 return -EFAULT; 955 } 956 force_successful_syscall_return(); 957 return i; 958 } 959 960 asmlinkage long compat_sys_stime(compat_time_t __user *tptr) 961 { 962 struct timespec tv; 963 int err; 964 965 if (get_user(tv.tv_sec, tptr)) 966 return -EFAULT; 967 968 tv.tv_nsec = 0; 969 970 err = security_settime(&tv, NULL); 971 if (err) 972 return err; 973 974 do_settimeofday(&tv); 975 return 0; 976 } 977 978 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */ 979 980 #ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND 981 asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize) 982 { 983 sigset_t newset; 984 compat_sigset_t newset32; 985 986 /* XXX: Don't preclude handling different sized sigset_t's. */ 987 if (sigsetsize != sizeof(sigset_t)) 988 return -EINVAL; 989 990 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t))) 991 return -EFAULT; 992 sigset_from_compat(&newset, &newset32); 993 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 994 995 spin_lock_irq(¤t->sighand->siglock); 996 current->saved_sigmask = current->blocked; 997 current->blocked = newset; 998 recalc_sigpending(); 999 spin_unlock_irq(¤t->sighand->siglock); 1000 1001 current->state = TASK_INTERRUPTIBLE; 1002 schedule(); 1003 set_restore_sigmask(); 1004 return -ERESTARTNOHAND; 1005 } 1006 #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */ 1007 1008 asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) 1009 { 1010 struct timex txc; 1011 int err, ret; 1012 1013 err = compat_get_timex(&txc, utp); 1014 if (err) 1015 return err; 1016 1017 ret = do_adjtimex(&txc); 1018 1019 err = compat_put_timex(utp, &txc); 1020 if (err) 1021 return err; 1022 1023 return ret; 1024 } 1025 1026 #ifdef CONFIG_NUMA 1027 asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, 1028 compat_uptr_t __user *pages32, 1029 const int __user *nodes, 1030 int __user *status, 1031 int flags) 1032 { 1033 const void __user * __user *pages; 1034 int i; 1035 1036 pages = compat_alloc_user_space(nr_pages * sizeof(void *)); 1037 for (i = 0; i < nr_pages; i++) { 1038 compat_uptr_t p; 1039 1040 if (get_user(p, pages32 + i) || 1041 put_user(compat_ptr(p), pages + i)) 1042 return -EFAULT; 1043 } 1044 return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); 1045 } 1046 1047 asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, 1048 compat_ulong_t maxnode, 1049 const compat_ulong_t __user *old_nodes, 1050 const compat_ulong_t __user *new_nodes) 1051 { 1052 unsigned long __user *old = NULL; 1053 unsigned long __user *new = NULL; 1054 nodemask_t tmp_mask; 1055 unsigned long nr_bits; 1056 unsigned long size; 1057 1058 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); 1059 size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1060 if (old_nodes) { 1061 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) 1062 return -EFAULT; 1063 old = compat_alloc_user_space(new_nodes ? size * 2 : size); 1064 if (new_nodes) 1065 new = old + size / sizeof(unsigned long); 1066 if (copy_to_user(old, nodes_addr(tmp_mask), size)) 1067 return -EFAULT; 1068 } 1069 if (new_nodes) { 1070 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) 1071 return -EFAULT; 1072 if (new == NULL) 1073 new = compat_alloc_user_space(size); 1074 if (copy_to_user(new, nodes_addr(tmp_mask), size)) 1075 return -EFAULT; 1076 } 1077 return sys_migrate_pages(pid, nr_bits + 1, old, new); 1078 } 1079 #endif 1080 1081 struct compat_sysinfo { 1082 s32 uptime; 1083 u32 loads[3]; 1084 u32 totalram; 1085 u32 freeram; 1086 u32 sharedram; 1087 u32 bufferram; 1088 u32 totalswap; 1089 u32 freeswap; 1090 u16 procs; 1091 u16 pad; 1092 u32 totalhigh; 1093 u32 freehigh; 1094 u32 mem_unit; 1095 char _f[20-2*sizeof(u32)-sizeof(int)]; 1096 }; 1097 1098 asmlinkage long 1099 compat_sys_sysinfo(struct compat_sysinfo __user *info) 1100 { 1101 struct sysinfo s; 1102 1103 do_sysinfo(&s); 1104 1105 /* Check to see if any memory value is too large for 32-bit and scale 1106 * down if needed 1107 */ 1108 if ((s.totalram >> 32) || (s.totalswap >> 32)) { 1109 int bitcount = 0; 1110 1111 while (s.mem_unit < PAGE_SIZE) { 1112 s.mem_unit <<= 1; 1113 bitcount++; 1114 } 1115 1116 s.totalram >>= bitcount; 1117 s.freeram >>= bitcount; 1118 s.sharedram >>= bitcount; 1119 s.bufferram >>= bitcount; 1120 s.totalswap >>= bitcount; 1121 s.freeswap >>= bitcount; 1122 s.totalhigh >>= bitcount; 1123 s.freehigh >>= bitcount; 1124 } 1125 1126 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) || 1127 __put_user (s.uptime, &info->uptime) || 1128 __put_user (s.loads[0], &info->loads[0]) || 1129 __put_user (s.loads[1], &info->loads[1]) || 1130 __put_user (s.loads[2], &info->loads[2]) || 1131 __put_user (s.totalram, &info->totalram) || 1132 __put_user (s.freeram, &info->freeram) || 1133 __put_user (s.sharedram, &info->sharedram) || 1134 __put_user (s.bufferram, &info->bufferram) || 1135 __put_user (s.totalswap, &info->totalswap) || 1136 __put_user (s.freeswap, &info->freeswap) || 1137 __put_user (s.procs, &info->procs) || 1138 __put_user (s.totalhigh, &info->totalhigh) || 1139 __put_user (s.freehigh, &info->freehigh) || 1140 __put_user (s.mem_unit, &info->mem_unit)) 1141 return -EFAULT; 1142 1143 return 0; 1144 } 1145 1146 /* 1147 * Allocate user-space memory for the duration of a single system call, 1148 * in order to marshall parameters inside a compat thunk. 1149 */ 1150 void __user *compat_alloc_user_space(unsigned long len) 1151 { 1152 void __user *ptr; 1153 1154 /* If len would occupy more than half of the entire compat space... */ 1155 if (unlikely(len > (((compat_uptr_t)~0) >> 1))) 1156 return NULL; 1157 1158 ptr = arch_compat_alloc_user_space(len); 1159 1160 if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) 1161 return NULL; 1162 1163 return ptr; 1164 } 1165 EXPORT_SYMBOL_GPL(compat_alloc_user_space); 1166