1 /* 2 * arch/s390x/kernel/linux32.c 3 * 4 * S390 version 5 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Gerhard Tonn (ton@de.ibm.com) 8 * Thomas Spatzier (tspat@de.ibm.com) 9 * 10 * Conversion between 31bit and 64bit native syscalls. 11 * 12 * Heavily inspired by the 32-bit Sparc compat code which is 13 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 14 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 15 * 16 */ 17 18 19 #include <linux/kernel.h> 20 #include <linux/sched.h> 21 #include <linux/fs.h> 22 #include <linux/mm.h> 23 #include <linux/file.h> 24 #include <linux/signal.h> 25 #include <linux/resource.h> 26 #include <linux/times.h> 27 #include <linux/smp.h> 28 #include <linux/smp_lock.h> 29 #include <linux/sem.h> 30 #include <linux/msg.h> 31 #include <linux/shm.h> 32 #include <linux/slab.h> 33 #include <linux/uio.h> 34 #include <linux/quota.h> 35 #include <linux/module.h> 36 #include <linux/poll.h> 37 #include <linux/personality.h> 38 #include <linux/stat.h> 39 #include <linux/filter.h> 40 #include <linux/highmem.h> 41 #include <linux/highuid.h> 42 #include <linux/mman.h> 43 #include <linux/ipv6.h> 44 #include <linux/in.h> 45 #include <linux/icmpv6.h> 46 #include <linux/syscalls.h> 47 #include <linux/sysctl.h> 48 #include <linux/binfmts.h> 49 #include <linux/capability.h> 50 #include <linux/compat.h> 51 #include <linux/vfs.h> 52 #include <linux/ptrace.h> 53 #include <linux/fadvise.h> 54 #include <linux/ipc.h> 55 56 #include <asm/types.h> 57 #include <asm/uaccess.h> 58 59 #include <net/scm.h> 60 #include <net/sock.h> 61 62 #include "compat_linux.h" 63 64 long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | 65 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | 66 PSW_MASK_PSTATE | PSW_DEFAULT_KEY); 67 long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | 68 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | 69 PSW32_MASK_PSTATE); 70 71 /* For this source file, we want overflow handling. */ 72 73 #undef high2lowuid 74 #undef high2lowgid 75 #undef low2highuid 76 #undef low2highgid 77 #undef SET_UID16 78 #undef SET_GID16 79 #undef NEW_TO_OLD_UID 80 #undef NEW_TO_OLD_GID 81 #undef SET_OLDSTAT_UID 82 #undef SET_OLDSTAT_GID 83 #undef SET_STAT_UID 84 #undef SET_STAT_GID 85 86 #define high2lowuid(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid) 87 #define high2lowgid(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid) 88 #define low2highuid(uid) ((uid) == (u16)-1) ? (uid_t)-1 : (uid_t)(uid) 89 #define low2highgid(gid) ((gid) == (u16)-1) ? (gid_t)-1 : (gid_t)(gid) 90 #define SET_UID16(var, uid) var = high2lowuid(uid) 91 #define SET_GID16(var, gid) var = high2lowgid(gid) 92 #define NEW_TO_OLD_UID(uid) high2lowuid(uid) 93 #define NEW_TO_OLD_GID(gid) high2lowgid(gid) 94 #define SET_OLDSTAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid) 95 #define SET_OLDSTAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid) 96 #define SET_STAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid) 97 #define SET_STAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid) 98 99 asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group) 100 { 101 return sys_chown(filename, low2highuid(user), low2highgid(group)); 102 } 103 104 asmlinkage long sys32_lchown16(const char __user * filename, u16 user, u16 group) 105 { 106 return sys_lchown(filename, low2highuid(user), low2highgid(group)); 107 } 108 109 asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group) 110 { 111 return sys_fchown(fd, low2highuid(user), low2highgid(group)); 112 } 113 114 asmlinkage long sys32_setregid16(u16 rgid, u16 egid) 115 { 116 return sys_setregid(low2highgid(rgid), low2highgid(egid)); 117 } 118 119 asmlinkage long sys32_setgid16(u16 gid) 120 { 121 return sys_setgid((gid_t)gid); 122 } 123 124 asmlinkage long sys32_setreuid16(u16 ruid, u16 euid) 125 { 126 return sys_setreuid(low2highuid(ruid), low2highuid(euid)); 127 } 128 129 asmlinkage long sys32_setuid16(u16 uid) 130 { 131 return sys_setuid((uid_t)uid); 132 } 133 134 asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid) 135 { 136 return sys_setresuid(low2highuid(ruid), low2highuid(euid), 137 low2highuid(suid)); 138 } 139 140 asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid) 141 { 142 int retval; 143 144 if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) && 145 !(retval = put_user(high2lowuid(current->cred->euid), euid))) 146 retval = put_user(high2lowuid(current->cred->suid), suid); 147 148 return retval; 149 } 150 151 asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid) 152 { 153 return sys_setresgid(low2highgid(rgid), low2highgid(egid), 154 low2highgid(sgid)); 155 } 156 157 asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid) 158 { 159 int retval; 160 161 if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) && 162 !(retval = put_user(high2lowgid(current->cred->egid), egid))) 163 retval = put_user(high2lowgid(current->cred->sgid), sgid); 164 165 return retval; 166 } 167 168 asmlinkage long sys32_setfsuid16(u16 uid) 169 { 170 return sys_setfsuid((uid_t)uid); 171 } 172 173 asmlinkage long sys32_setfsgid16(u16 gid) 174 { 175 return sys_setfsgid((gid_t)gid); 176 } 177 178 static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) 179 { 180 int i; 181 u16 group; 182 183 for (i = 0; i < group_info->ngroups; i++) { 184 group = (u16)GROUP_AT(group_info, i); 185 if (put_user(group, grouplist+i)) 186 return -EFAULT; 187 } 188 189 return 0; 190 } 191 192 static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist) 193 { 194 int i; 195 u16 group; 196 197 for (i = 0; i < group_info->ngroups; i++) { 198 if (get_user(group, grouplist+i)) 199 return -EFAULT; 200 GROUP_AT(group_info, i) = (gid_t)group; 201 } 202 203 return 0; 204 } 205 206 asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist) 207 { 208 int i; 209 210 if (gidsetsize < 0) 211 return -EINVAL; 212 213 get_group_info(current->cred->group_info); 214 i = current->cred->group_info->ngroups; 215 if (gidsetsize) { 216 if (i > gidsetsize) { 217 i = -EINVAL; 218 goto out; 219 } 220 if (groups16_to_user(grouplist, current->cred->group_info)) { 221 i = -EFAULT; 222 goto out; 223 } 224 } 225 out: 226 put_group_info(current->cred->group_info); 227 return i; 228 } 229 230 asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist) 231 { 232 struct group_info *group_info; 233 int retval; 234 235 if (!capable(CAP_SETGID)) 236 return -EPERM; 237 if ((unsigned)gidsetsize > NGROUPS_MAX) 238 return -EINVAL; 239 240 group_info = groups_alloc(gidsetsize); 241 if (!group_info) 242 return -ENOMEM; 243 retval = groups16_from_user(group_info, grouplist); 244 if (retval) { 245 put_group_info(group_info); 246 return retval; 247 } 248 249 retval = set_current_groups(group_info); 250 put_group_info(group_info); 251 252 return retval; 253 } 254 255 asmlinkage long sys32_getuid16(void) 256 { 257 return high2lowuid(current->cred->uid); 258 } 259 260 asmlinkage long sys32_geteuid16(void) 261 { 262 return high2lowuid(current->cred->euid); 263 } 264 265 asmlinkage long sys32_getgid16(void) 266 { 267 return high2lowgid(current->cred->gid); 268 } 269 270 asmlinkage long sys32_getegid16(void) 271 { 272 return high2lowgid(current->cred->egid); 273 } 274 275 /* 276 * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation. 277 * 278 * This is really horribly ugly. 279 */ 280 #ifdef CONFIG_SYSVIPC 281 asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr) 282 { 283 if (call >> 16) /* hack for backward compatibility */ 284 return -EINVAL; 285 286 call &= 0xffff; 287 288 switch (call) { 289 case SEMTIMEDOP: 290 return compat_sys_semtimedop(first, compat_ptr(ptr), 291 second, compat_ptr(third)); 292 case SEMOP: 293 /* struct sembuf is the same on 32 and 64bit :)) */ 294 return sys_semtimedop(first, compat_ptr(ptr), 295 second, NULL); 296 case SEMGET: 297 return sys_semget(first, second, third); 298 case SEMCTL: 299 return compat_sys_semctl(first, second, third, 300 compat_ptr(ptr)); 301 case MSGSND: 302 return compat_sys_msgsnd(first, second, third, 303 compat_ptr(ptr)); 304 case MSGRCV: 305 return compat_sys_msgrcv(first, second, 0, third, 306 0, compat_ptr(ptr)); 307 case MSGGET: 308 return sys_msgget((key_t) first, second); 309 case MSGCTL: 310 return compat_sys_msgctl(first, second, compat_ptr(ptr)); 311 case SHMAT: 312 return compat_sys_shmat(first, second, third, 313 0, compat_ptr(ptr)); 314 case SHMDT: 315 return sys_shmdt(compat_ptr(ptr)); 316 case SHMGET: 317 return sys_shmget(first, (unsigned)second, third); 318 case SHMCTL: 319 return compat_sys_shmctl(first, second, compat_ptr(ptr)); 320 } 321 322 return -ENOSYS; 323 } 324 #endif 325 326 asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low) 327 { 328 if ((int)high < 0) 329 return -EINVAL; 330 else 331 return sys_truncate(path, (high << 32) | low); 332 } 333 334 asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low) 335 { 336 if ((int)high < 0) 337 return -EINVAL; 338 else 339 return sys_ftruncate(fd, (high << 32) | low); 340 } 341 342 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid, 343 struct compat_timespec __user *interval) 344 { 345 struct timespec t; 346 int ret; 347 mm_segment_t old_fs = get_fs (); 348 349 set_fs (KERNEL_DS); 350 ret = sys_sched_rr_get_interval(pid, 351 (struct timespec __force __user *) &t); 352 set_fs (old_fs); 353 if (put_compat_timespec(&t, interval)) 354 return -EFAULT; 355 return ret; 356 } 357 358 asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, 359 compat_sigset_t __user *oset, size_t sigsetsize) 360 { 361 sigset_t s; 362 compat_sigset_t s32; 363 int ret; 364 mm_segment_t old_fs = get_fs(); 365 366 if (set) { 367 if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) 368 return -EFAULT; 369 switch (_NSIG_WORDS) { 370 case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); 371 case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); 372 case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); 373 case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); 374 } 375 } 376 set_fs (KERNEL_DS); 377 ret = sys_rt_sigprocmask(how, 378 set ? (sigset_t __force __user *) &s : NULL, 379 oset ? (sigset_t __force __user *) &s : NULL, 380 sigsetsize); 381 set_fs (old_fs); 382 if (ret) return ret; 383 if (oset) { 384 switch (_NSIG_WORDS) { 385 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; 386 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; 387 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; 388 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; 389 } 390 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) 391 return -EFAULT; 392 } 393 return 0; 394 } 395 396 asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, 397 size_t sigsetsize) 398 { 399 sigset_t s; 400 compat_sigset_t s32; 401 int ret; 402 mm_segment_t old_fs = get_fs(); 403 404 set_fs (KERNEL_DS); 405 ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); 406 set_fs (old_fs); 407 if (!ret) { 408 switch (_NSIG_WORDS) { 409 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; 410 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; 411 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; 412 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; 413 } 414 if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) 415 return -EFAULT; 416 } 417 return ret; 418 } 419 420 asmlinkage long 421 sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) 422 { 423 siginfo_t info; 424 int ret; 425 mm_segment_t old_fs = get_fs(); 426 427 if (copy_siginfo_from_user32(&info, uinfo)) 428 return -EFAULT; 429 set_fs (KERNEL_DS); 430 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info); 431 set_fs (old_fs); 432 return ret; 433 } 434 435 /* 436 * sys32_execve() executes a new program after the asm stub has set 437 * things up for us. This should basically do what I want it to. 438 */ 439 asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv, 440 compat_uptr_t __user *envp) 441 { 442 struct pt_regs *regs = task_pt_regs(current); 443 char *filename; 444 long rc; 445 446 filename = getname(name); 447 rc = PTR_ERR(filename); 448 if (IS_ERR(filename)) 449 return rc; 450 rc = compat_do_execve(filename, argv, envp, regs); 451 if (rc) 452 goto out; 453 current->thread.fp_regs.fpc=0; 454 asm volatile("sfpc %0,0" : : "d" (0)); 455 rc = regs->gprs[2]; 456 out: 457 putname(filename); 458 return rc; 459 } 460 461 asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf, 462 size_t count, u32 poshi, u32 poslo) 463 { 464 if ((compat_ssize_t) count < 0) 465 return -EINVAL; 466 return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); 467 } 468 469 asmlinkage long sys32_pwrite64(unsigned int fd, const char __user *ubuf, 470 size_t count, u32 poshi, u32 poslo) 471 { 472 if ((compat_ssize_t) count < 0) 473 return -EINVAL; 474 return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); 475 } 476 477 asmlinkage compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count) 478 { 479 return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count); 480 } 481 482 asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, size_t count) 483 { 484 mm_segment_t old_fs = get_fs(); 485 int ret; 486 off_t of; 487 488 if (offset && get_user(of, offset)) 489 return -EFAULT; 490 491 set_fs(KERNEL_DS); 492 ret = sys_sendfile(out_fd, in_fd, 493 offset ? (off_t __force __user *) &of : NULL, count); 494 set_fs(old_fs); 495 496 if (offset && put_user(of, offset)) 497 return -EFAULT; 498 499 return ret; 500 } 501 502 asmlinkage long sys32_sendfile64(int out_fd, int in_fd, 503 compat_loff_t __user *offset, s32 count) 504 { 505 mm_segment_t old_fs = get_fs(); 506 int ret; 507 loff_t lof; 508 509 if (offset && get_user(lof, offset)) 510 return -EFAULT; 511 512 set_fs(KERNEL_DS); 513 ret = sys_sendfile64(out_fd, in_fd, 514 offset ? (loff_t __force __user *) &lof : NULL, 515 count); 516 set_fs(old_fs); 517 518 if (offset && put_user(lof, offset)) 519 return -EFAULT; 520 521 return ret; 522 } 523 524 struct stat64_emu31 { 525 unsigned long long st_dev; 526 unsigned int __pad1; 527 #define STAT64_HAS_BROKEN_ST_INO 1 528 u32 __st_ino; 529 unsigned int st_mode; 530 unsigned int st_nlink; 531 u32 st_uid; 532 u32 st_gid; 533 unsigned long long st_rdev; 534 unsigned int __pad3; 535 long st_size; 536 u32 st_blksize; 537 unsigned char __pad4[4]; 538 u32 __pad5; /* future possible st_blocks high bits */ 539 u32 st_blocks; /* Number 512-byte blocks allocated. */ 540 u32 st_atime; 541 u32 __pad6; 542 u32 st_mtime; 543 u32 __pad7; 544 u32 st_ctime; 545 u32 __pad8; /* will be high 32 bits of ctime someday */ 546 unsigned long st_ino; 547 }; 548 549 static int cp_stat64(struct stat64_emu31 __user *ubuf, struct kstat *stat) 550 { 551 struct stat64_emu31 tmp; 552 553 memset(&tmp, 0, sizeof(tmp)); 554 555 tmp.st_dev = huge_encode_dev(stat->dev); 556 tmp.st_ino = stat->ino; 557 tmp.__st_ino = (u32)stat->ino; 558 tmp.st_mode = stat->mode; 559 tmp.st_nlink = (unsigned int)stat->nlink; 560 tmp.st_uid = stat->uid; 561 tmp.st_gid = stat->gid; 562 tmp.st_rdev = huge_encode_dev(stat->rdev); 563 tmp.st_size = stat->size; 564 tmp.st_blksize = (u32)stat->blksize; 565 tmp.st_blocks = (u32)stat->blocks; 566 tmp.st_atime = (u32)stat->atime.tv_sec; 567 tmp.st_mtime = (u32)stat->mtime.tv_sec; 568 tmp.st_ctime = (u32)stat->ctime.tv_sec; 569 570 return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 571 } 572 573 asmlinkage long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf) 574 { 575 struct kstat stat; 576 int ret = vfs_stat(filename, &stat); 577 if (!ret) 578 ret = cp_stat64(statbuf, &stat); 579 return ret; 580 } 581 582 asmlinkage long sys32_lstat64(char __user * filename, struct stat64_emu31 __user * statbuf) 583 { 584 struct kstat stat; 585 int ret = vfs_lstat(filename, &stat); 586 if (!ret) 587 ret = cp_stat64(statbuf, &stat); 588 return ret; 589 } 590 591 asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf) 592 { 593 struct kstat stat; 594 int ret = vfs_fstat(fd, &stat); 595 if (!ret) 596 ret = cp_stat64(statbuf, &stat); 597 return ret; 598 } 599 600 asmlinkage long sys32_fstatat64(unsigned int dfd, char __user *filename, 601 struct stat64_emu31 __user* statbuf, int flag) 602 { 603 struct kstat stat; 604 int error; 605 606 error = vfs_fstatat(dfd, filename, &stat, flag); 607 if (error) 608 return error; 609 return cp_stat64(statbuf, &stat); 610 } 611 612 /* 613 * Linux/i386 didn't use to be able to handle more than 614 * 4 system call parameters, so these system calls used a memory 615 * block for parameter passing.. 616 */ 617 618 struct mmap_arg_struct_emu31 { 619 u32 addr; 620 u32 len; 621 u32 prot; 622 u32 flags; 623 u32 fd; 624 u32 offset; 625 }; 626 627 asmlinkage unsigned long 628 old32_mmap(struct mmap_arg_struct_emu31 __user *arg) 629 { 630 struct mmap_arg_struct_emu31 a; 631 int error = -EFAULT; 632 633 if (copy_from_user(&a, arg, sizeof(a))) 634 goto out; 635 636 error = -EINVAL; 637 if (a.offset & ~PAGE_MASK) 638 goto out; 639 640 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 641 a.offset >> PAGE_SHIFT); 642 out: 643 return error; 644 } 645 646 asmlinkage long 647 sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) 648 { 649 struct mmap_arg_struct_emu31 a; 650 int error = -EFAULT; 651 652 if (copy_from_user(&a, arg, sizeof(a))) 653 goto out; 654 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); 655 out: 656 return error; 657 } 658 659 asmlinkage long sys32_read(unsigned int fd, char __user * buf, size_t count) 660 { 661 if ((compat_ssize_t) count < 0) 662 return -EINVAL; 663 664 return sys_read(fd, buf, count); 665 } 666 667 asmlinkage long sys32_write(unsigned int fd, char __user * buf, size_t count) 668 { 669 if ((compat_ssize_t) count < 0) 670 return -EINVAL; 671 672 return sys_write(fd, buf, count); 673 } 674 675 /* 676 * 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64. 677 * These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE} 678 * because the 31 bit values differ from the 64 bit values. 679 */ 680 681 asmlinkage long 682 sys32_fadvise64(int fd, loff_t offset, size_t len, int advise) 683 { 684 if (advise == 4) 685 advise = POSIX_FADV_DONTNEED; 686 else if (advise == 5) 687 advise = POSIX_FADV_NOREUSE; 688 return sys_fadvise64(fd, offset, len, advise); 689 } 690 691 struct fadvise64_64_args { 692 int fd; 693 long long offset; 694 long long len; 695 int advice; 696 }; 697 698 asmlinkage long 699 sys32_fadvise64_64(struct fadvise64_64_args __user *args) 700 { 701 struct fadvise64_64_args a; 702 703 if ( copy_from_user(&a, args, sizeof(a)) ) 704 return -EFAULT; 705 if (a.advice == 4) 706 a.advice = POSIX_FADV_DONTNEED; 707 else if (a.advice == 5) 708 a.advice = POSIX_FADV_NOREUSE; 709 return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); 710 } 711