1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Ptrace user space interface. 4 * 5 * Copyright IBM Corp. 1999, 2010 6 * Author(s): Denis Joseph Barrow 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 */ 9 10 #include "asm/ptrace.h" 11 #include <linux/kernel.h> 12 #include <linux/sched.h> 13 #include <linux/sched/task_stack.h> 14 #include <linux/mm.h> 15 #include <linux/smp.h> 16 #include <linux/errno.h> 17 #include <linux/ptrace.h> 18 #include <linux/user.h> 19 #include <linux/security.h> 20 #include <linux/audit.h> 21 #include <linux/signal.h> 22 #include <linux/elf.h> 23 #include <linux/regset.h> 24 #include <linux/tracehook.h> 25 #include <linux/seccomp.h> 26 #include <linux/compat.h> 27 #include <trace/syscall.h> 28 #include <asm/page.h> 29 #include <linux/uaccess.h> 30 #include <asm/unistd.h> 31 #include <asm/switch_to.h> 32 #include <asm/runtime_instr.h> 33 #include <asm/facility.h> 34 35 #include "entry.h" 36 37 #ifdef CONFIG_COMPAT 38 #include "compat_ptrace.h" 39 #endif 40 41 void update_cr_regs(struct task_struct *task) 42 { 43 struct pt_regs *regs = task_pt_regs(task); 44 struct thread_struct *thread = &task->thread; 45 struct per_regs old, new; 46 union ctlreg0 cr0_old, cr0_new; 47 union ctlreg2 cr2_old, cr2_new; 48 int cr0_changed, cr2_changed; 49 50 __ctl_store(cr0_old.val, 0, 0); 51 __ctl_store(cr2_old.val, 2, 2); 52 cr0_new = cr0_old; 53 cr2_new = cr2_old; 54 /* Take care of the enable/disable of transactional execution. */ 55 if (MACHINE_HAS_TE) { 56 /* Set or clear transaction execution TXC bit 8. */ 57 cr0_new.tcx = 1; 58 if (task->thread.per_flags & PER_FLAG_NO_TE) 59 cr0_new.tcx = 0; 60 /* Set or clear transaction execution TDC bits 62 and 63. */ 61 cr2_new.tdc = 0; 62 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { 63 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) 64 cr2_new.tdc = 1; 65 else 66 cr2_new.tdc = 2; 67 } 68 } 69 /* Take care of enable/disable of guarded storage. */ 70 if (MACHINE_HAS_GS) { 71 cr2_new.gse = 0; 72 if (task->thread.gs_cb) 73 cr2_new.gse = 1; 74 } 75 /* Load control register 0/2 iff changed */ 76 cr0_changed = cr0_new.val != cr0_old.val; 77 cr2_changed = cr2_new.val != cr2_old.val; 78 if (cr0_changed) 79 __ctl_load(cr0_new.val, 0, 0); 80 if (cr2_changed) 81 __ctl_load(cr2_new.val, 2, 2); 82 /* Copy user specified PER registers */ 83 new.control = thread->per_user.control; 84 new.start = thread->per_user.start; 85 new.end = thread->per_user.end; 86 87 /* merge TIF_SINGLE_STEP into user specified PER registers. */ 88 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) || 89 test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) { 90 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP)) 91 new.control |= PER_EVENT_BRANCH; 92 else 93 new.control |= PER_EVENT_IFETCH; 94 new.control |= PER_CONTROL_SUSPENSION; 95 new.control |= PER_EVENT_TRANSACTION_END; 96 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) 97 new.control |= PER_EVENT_IFETCH; 98 new.start = 0; 99 new.end = -1UL; 100 } 101 102 /* Take care of the PER enablement bit in the PSW. */ 103 if (!(new.control & PER_EVENT_MASK)) { 104 regs->psw.mask &= ~PSW_MASK_PER; 105 return; 106 } 107 regs->psw.mask |= PSW_MASK_PER; 108 __ctl_store(old, 9, 11); 109 if (memcmp(&new, &old, sizeof(struct per_regs)) != 0) 110 __ctl_load(new, 9, 11); 111 } 112 113 void user_enable_single_step(struct task_struct *task) 114 { 115 clear_tsk_thread_flag(task, TIF_BLOCK_STEP); 116 set_tsk_thread_flag(task, TIF_SINGLE_STEP); 117 } 118 119 void user_disable_single_step(struct task_struct *task) 120 { 121 clear_tsk_thread_flag(task, TIF_BLOCK_STEP); 122 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 123 } 124 125 void user_enable_block_step(struct task_struct *task) 126 { 127 set_tsk_thread_flag(task, TIF_SINGLE_STEP); 128 set_tsk_thread_flag(task, TIF_BLOCK_STEP); 129 } 130 131 /* 132 * Called by kernel/ptrace.c when detaching.. 133 * 134 * Clear all debugging related fields. 135 */ 136 void ptrace_disable(struct task_struct *task) 137 { 138 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); 139 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); 140 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 141 clear_tsk_thread_flag(task, TIF_PER_TRAP); 142 task->thread.per_flags = 0; 143 } 144 145 #define __ADDR_MASK 7 146 147 static inline unsigned long __peek_user_per(struct task_struct *child, 148 addr_t addr) 149 { 150 struct per_struct_kernel *dummy = NULL; 151 152 if (addr == (addr_t) &dummy->cr9) 153 /* Control bits of the active per set. */ 154 return test_thread_flag(TIF_SINGLE_STEP) ? 155 PER_EVENT_IFETCH : child->thread.per_user.control; 156 else if (addr == (addr_t) &dummy->cr10) 157 /* Start address of the active per set. */ 158 return test_thread_flag(TIF_SINGLE_STEP) ? 159 0 : child->thread.per_user.start; 160 else if (addr == (addr_t) &dummy->cr11) 161 /* End address of the active per set. */ 162 return test_thread_flag(TIF_SINGLE_STEP) ? 163 -1UL : child->thread.per_user.end; 164 else if (addr == (addr_t) &dummy->bits) 165 /* Single-step bit. */ 166 return test_thread_flag(TIF_SINGLE_STEP) ? 167 (1UL << (BITS_PER_LONG - 1)) : 0; 168 else if (addr == (addr_t) &dummy->starting_addr) 169 /* Start address of the user specified per set. */ 170 return child->thread.per_user.start; 171 else if (addr == (addr_t) &dummy->ending_addr) 172 /* End address of the user specified per set. */ 173 return child->thread.per_user.end; 174 else if (addr == (addr_t) &dummy->perc_atmid) 175 /* PER code, ATMID and AI of the last PER trap */ 176 return (unsigned long) 177 child->thread.per_event.cause << (BITS_PER_LONG - 16); 178 else if (addr == (addr_t) &dummy->address) 179 /* Address of the last PER trap */ 180 return child->thread.per_event.address; 181 else if (addr == (addr_t) &dummy->access_id) 182 /* Access id of the last PER trap */ 183 return (unsigned long) 184 child->thread.per_event.paid << (BITS_PER_LONG - 8); 185 return 0; 186 } 187 188 /* 189 * Read the word at offset addr from the user area of a process. The 190 * trouble here is that the information is littered over different 191 * locations. The process registers are found on the kernel stack, 192 * the floating point stuff and the trace settings are stored in 193 * the task structure. In addition the different structures in 194 * struct user contain pad bytes that should be read as zeroes. 195 * Lovely... 196 */ 197 static unsigned long __peek_user(struct task_struct *child, addr_t addr) 198 { 199 struct user *dummy = NULL; 200 addr_t offset, tmp; 201 202 if (addr < (addr_t) &dummy->regs.acrs) { 203 /* 204 * psw and gprs are stored on the stack 205 */ 206 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); 207 if (addr == (addr_t) &dummy->regs.psw.mask) { 208 /* Return a clean psw mask. */ 209 tmp &= PSW_MASK_USER | PSW_MASK_RI; 210 tmp |= PSW_USER_BITS; 211 } 212 213 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { 214 /* 215 * access registers are stored in the thread structure 216 */ 217 offset = addr - (addr_t) &dummy->regs.acrs; 218 /* 219 * Very special case: old & broken 64 bit gdb reading 220 * from acrs[15]. Result is a 64 bit value. Read the 221 * 32 bit acrs[15] value and shift it by 32. Sick... 222 */ 223 if (addr == (addr_t) &dummy->regs.acrs[15]) 224 tmp = ((unsigned long) child->thread.acrs[15]) << 32; 225 else 226 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); 227 228 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 229 /* 230 * orig_gpr2 is stored on the kernel stack 231 */ 232 tmp = (addr_t) task_pt_regs(child)->orig_gpr2; 233 234 } else if (addr < (addr_t) &dummy->regs.fp_regs) { 235 /* 236 * prevent reads of padding hole between 237 * orig_gpr2 and fp_regs on s390. 238 */ 239 tmp = 0; 240 241 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { 242 /* 243 * floating point control reg. is in the thread structure 244 */ 245 tmp = child->thread.fpu.fpc; 246 tmp <<= BITS_PER_LONG - 32; 247 248 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 249 /* 250 * floating point regs. are either in child->thread.fpu 251 * or the child->thread.fpu.vxrs array 252 */ 253 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 254 if (MACHINE_HAS_VX) 255 tmp = *(addr_t *) 256 ((addr_t) child->thread.fpu.vxrs + 2*offset); 257 else 258 tmp = *(addr_t *) 259 ((addr_t) child->thread.fpu.fprs + offset); 260 261 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 262 /* 263 * Handle access to the per_info structure. 264 */ 265 addr -= (addr_t) &dummy->regs.per_info; 266 tmp = __peek_user_per(child, addr); 267 268 } else 269 tmp = 0; 270 271 return tmp; 272 } 273 274 static int 275 peek_user(struct task_struct *child, addr_t addr, addr_t data) 276 { 277 addr_t tmp, mask; 278 279 /* 280 * Stupid gdb peeks/pokes the access registers in 64 bit with 281 * an alignment of 4. Programmers from hell... 282 */ 283 mask = __ADDR_MASK; 284 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 285 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 286 mask = 3; 287 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 288 return -EIO; 289 290 tmp = __peek_user(child, addr); 291 return put_user(tmp, (addr_t __user *) data); 292 } 293 294 static inline void __poke_user_per(struct task_struct *child, 295 addr_t addr, addr_t data) 296 { 297 struct per_struct_kernel *dummy = NULL; 298 299 /* 300 * There are only three fields in the per_info struct that the 301 * debugger user can write to. 302 * 1) cr9: the debugger wants to set a new PER event mask 303 * 2) starting_addr: the debugger wants to set a new starting 304 * address to use with the PER event mask. 305 * 3) ending_addr: the debugger wants to set a new ending 306 * address to use with the PER event mask. 307 * The user specified PER event mask and the start and end 308 * addresses are used only if single stepping is not in effect. 309 * Writes to any other field in per_info are ignored. 310 */ 311 if (addr == (addr_t) &dummy->cr9) 312 /* PER event mask of the user specified per set. */ 313 child->thread.per_user.control = 314 data & (PER_EVENT_MASK | PER_CONTROL_MASK); 315 else if (addr == (addr_t) &dummy->starting_addr) 316 /* Starting address of the user specified per set. */ 317 child->thread.per_user.start = data; 318 else if (addr == (addr_t) &dummy->ending_addr) 319 /* Ending address of the user specified per set. */ 320 child->thread.per_user.end = data; 321 } 322 323 /* 324 * Write a word to the user area of a process at location addr. This 325 * operation does have an additional problem compared to peek_user. 326 * Stores to the program status word and on the floating point 327 * control register needs to get checked for validity. 328 */ 329 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) 330 { 331 struct user *dummy = NULL; 332 addr_t offset; 333 334 335 if (addr < (addr_t) &dummy->regs.acrs) { 336 struct pt_regs *regs = task_pt_regs(child); 337 /* 338 * psw and gprs are stored on the stack 339 */ 340 if (addr == (addr_t) &dummy->regs.psw.mask) { 341 unsigned long mask = PSW_MASK_USER; 342 343 mask |= is_ri_task(child) ? PSW_MASK_RI : 0; 344 if ((data ^ PSW_USER_BITS) & ~mask) 345 /* Invalid psw mask. */ 346 return -EINVAL; 347 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME) 348 /* Invalid address-space-control bits */ 349 return -EINVAL; 350 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) 351 /* Invalid addressing mode bits */ 352 return -EINVAL; 353 } 354 355 if (test_pt_regs_flag(regs, PIF_SYSCALL) && 356 addr == offsetof(struct user, regs.gprs[2])) { 357 struct pt_regs *regs = task_pt_regs(child); 358 359 regs->int_code = 0x20000 | (data & 0xffff); 360 } 361 *(addr_t *)((addr_t) ®s->psw + addr) = data; 362 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { 363 /* 364 * access registers are stored in the thread structure 365 */ 366 offset = addr - (addr_t) &dummy->regs.acrs; 367 /* 368 * Very special case: old & broken 64 bit gdb writing 369 * to acrs[15] with a 64 bit value. Ignore the lower 370 * half of the value and write the upper 32 bit to 371 * acrs[15]. Sick... 372 */ 373 if (addr == (addr_t) &dummy->regs.acrs[15]) 374 child->thread.acrs[15] = (unsigned int) (data >> 32); 375 else 376 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; 377 378 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 379 /* 380 * orig_gpr2 is stored on the kernel stack 381 */ 382 task_pt_regs(child)->orig_gpr2 = data; 383 384 } else if (addr < (addr_t) &dummy->regs.fp_regs) { 385 /* 386 * prevent writes of padding hole between 387 * orig_gpr2 and fp_regs on s390. 388 */ 389 return 0; 390 391 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { 392 /* 393 * floating point control reg. is in the thread structure 394 */ 395 if ((unsigned int) data != 0 || 396 test_fp_ctl(data >> (BITS_PER_LONG - 32))) 397 return -EINVAL; 398 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); 399 400 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 401 /* 402 * floating point regs. are either in child->thread.fpu 403 * or the child->thread.fpu.vxrs array 404 */ 405 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 406 if (MACHINE_HAS_VX) 407 *(addr_t *)((addr_t) 408 child->thread.fpu.vxrs + 2*offset) = data; 409 else 410 *(addr_t *)((addr_t) 411 child->thread.fpu.fprs + offset) = data; 412 413 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 414 /* 415 * Handle access to the per_info structure. 416 */ 417 addr -= (addr_t) &dummy->regs.per_info; 418 __poke_user_per(child, addr, data); 419 420 } 421 422 return 0; 423 } 424 425 static int poke_user(struct task_struct *child, addr_t addr, addr_t data) 426 { 427 addr_t mask; 428 429 /* 430 * Stupid gdb peeks/pokes the access registers in 64 bit with 431 * an alignment of 4. Programmers from hell indeed... 432 */ 433 mask = __ADDR_MASK; 434 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 435 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 436 mask = 3; 437 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 438 return -EIO; 439 440 return __poke_user(child, addr, data); 441 } 442 443 long arch_ptrace(struct task_struct *child, long request, 444 unsigned long addr, unsigned long data) 445 { 446 ptrace_area parea; 447 int copied, ret; 448 449 switch (request) { 450 case PTRACE_PEEKUSR: 451 /* read the word at location addr in the USER area. */ 452 return peek_user(child, addr, data); 453 454 case PTRACE_POKEUSR: 455 /* write the word at location addr in the USER area */ 456 return poke_user(child, addr, data); 457 458 case PTRACE_PEEKUSR_AREA: 459 case PTRACE_POKEUSR_AREA: 460 if (copy_from_user(&parea, (void __force __user *) addr, 461 sizeof(parea))) 462 return -EFAULT; 463 addr = parea.kernel_addr; 464 data = parea.process_addr; 465 copied = 0; 466 while (copied < parea.len) { 467 if (request == PTRACE_PEEKUSR_AREA) 468 ret = peek_user(child, addr, data); 469 else { 470 addr_t utmp; 471 if (get_user(utmp, 472 (addr_t __force __user *) data)) 473 return -EFAULT; 474 ret = poke_user(child, addr, utmp); 475 } 476 if (ret) 477 return ret; 478 addr += sizeof(unsigned long); 479 data += sizeof(unsigned long); 480 copied += sizeof(unsigned long); 481 } 482 return 0; 483 case PTRACE_GET_LAST_BREAK: 484 put_user(child->thread.last_break, 485 (unsigned long __user *) data); 486 return 0; 487 case PTRACE_ENABLE_TE: 488 if (!MACHINE_HAS_TE) 489 return -EIO; 490 child->thread.per_flags &= ~PER_FLAG_NO_TE; 491 return 0; 492 case PTRACE_DISABLE_TE: 493 if (!MACHINE_HAS_TE) 494 return -EIO; 495 child->thread.per_flags |= PER_FLAG_NO_TE; 496 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; 497 return 0; 498 case PTRACE_TE_ABORT_RAND: 499 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE)) 500 return -EIO; 501 switch (data) { 502 case 0UL: 503 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; 504 break; 505 case 1UL: 506 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; 507 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND; 508 break; 509 case 2UL: 510 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; 511 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND; 512 break; 513 default: 514 return -EINVAL; 515 } 516 return 0; 517 default: 518 return ptrace_request(child, request, addr, data); 519 } 520 } 521 522 #ifdef CONFIG_COMPAT 523 /* 524 * Now the fun part starts... a 31 bit program running in the 525 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT, 526 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy 527 * to handle, the difference to the 64 bit versions of the requests 528 * is that the access is done in multiples of 4 byte instead of 529 * 8 bytes (sizeof(unsigned long) on 31/64 bit). 530 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA, 531 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program 532 * is a 31 bit program too, the content of struct user can be 533 * emulated. A 31 bit program peeking into the struct user of 534 * a 64 bit program is a no-no. 535 */ 536 537 /* 538 * Same as peek_user_per but for a 31 bit program. 539 */ 540 static inline __u32 __peek_user_per_compat(struct task_struct *child, 541 addr_t addr) 542 { 543 struct compat_per_struct_kernel *dummy32 = NULL; 544 545 if (addr == (addr_t) &dummy32->cr9) 546 /* Control bits of the active per set. */ 547 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 548 PER_EVENT_IFETCH : child->thread.per_user.control; 549 else if (addr == (addr_t) &dummy32->cr10) 550 /* Start address of the active per set. */ 551 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 552 0 : child->thread.per_user.start; 553 else if (addr == (addr_t) &dummy32->cr11) 554 /* End address of the active per set. */ 555 return test_thread_flag(TIF_SINGLE_STEP) ? 556 PSW32_ADDR_INSN : child->thread.per_user.end; 557 else if (addr == (addr_t) &dummy32->bits) 558 /* Single-step bit. */ 559 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 560 0x80000000 : 0; 561 else if (addr == (addr_t) &dummy32->starting_addr) 562 /* Start address of the user specified per set. */ 563 return (__u32) child->thread.per_user.start; 564 else if (addr == (addr_t) &dummy32->ending_addr) 565 /* End address of the user specified per set. */ 566 return (__u32) child->thread.per_user.end; 567 else if (addr == (addr_t) &dummy32->perc_atmid) 568 /* PER code, ATMID and AI of the last PER trap */ 569 return (__u32) child->thread.per_event.cause << 16; 570 else if (addr == (addr_t) &dummy32->address) 571 /* Address of the last PER trap */ 572 return (__u32) child->thread.per_event.address; 573 else if (addr == (addr_t) &dummy32->access_id) 574 /* Access id of the last PER trap */ 575 return (__u32) child->thread.per_event.paid << 24; 576 return 0; 577 } 578 579 /* 580 * Same as peek_user but for a 31 bit program. 581 */ 582 static u32 __peek_user_compat(struct task_struct *child, addr_t addr) 583 { 584 struct compat_user *dummy32 = NULL; 585 addr_t offset; 586 __u32 tmp; 587 588 if (addr < (addr_t) &dummy32->regs.acrs) { 589 struct pt_regs *regs = task_pt_regs(child); 590 /* 591 * psw and gprs are stored on the stack 592 */ 593 if (addr == (addr_t) &dummy32->regs.psw.mask) { 594 /* Fake a 31 bit psw mask. */ 595 tmp = (__u32)(regs->psw.mask >> 32); 596 tmp &= PSW32_MASK_USER | PSW32_MASK_RI; 597 tmp |= PSW32_USER_BITS; 598 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 599 /* Fake a 31 bit psw address. */ 600 tmp = (__u32) regs->psw.addr | 601 (__u32)(regs->psw.mask & PSW_MASK_BA); 602 } else { 603 /* gpr 0-15 */ 604 tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4); 605 } 606 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 607 /* 608 * access registers are stored in the thread structure 609 */ 610 offset = addr - (addr_t) &dummy32->regs.acrs; 611 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); 612 613 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { 614 /* 615 * orig_gpr2 is stored on the kernel stack 616 */ 617 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); 618 619 } else if (addr < (addr_t) &dummy32->regs.fp_regs) { 620 /* 621 * prevent reads of padding hole between 622 * orig_gpr2 and fp_regs on s390. 623 */ 624 tmp = 0; 625 626 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { 627 /* 628 * floating point control reg. is in the thread structure 629 */ 630 tmp = child->thread.fpu.fpc; 631 632 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 633 /* 634 * floating point regs. are either in child->thread.fpu 635 * or the child->thread.fpu.vxrs array 636 */ 637 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 638 if (MACHINE_HAS_VX) 639 tmp = *(__u32 *) 640 ((addr_t) child->thread.fpu.vxrs + 2*offset); 641 else 642 tmp = *(__u32 *) 643 ((addr_t) child->thread.fpu.fprs + offset); 644 645 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 646 /* 647 * Handle access to the per_info structure. 648 */ 649 addr -= (addr_t) &dummy32->regs.per_info; 650 tmp = __peek_user_per_compat(child, addr); 651 652 } else 653 tmp = 0; 654 655 return tmp; 656 } 657 658 static int peek_user_compat(struct task_struct *child, 659 addr_t addr, addr_t data) 660 { 661 __u32 tmp; 662 663 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3) 664 return -EIO; 665 666 tmp = __peek_user_compat(child, addr); 667 return put_user(tmp, (__u32 __user *) data); 668 } 669 670 /* 671 * Same as poke_user_per but for a 31 bit program. 672 */ 673 static inline void __poke_user_per_compat(struct task_struct *child, 674 addr_t addr, __u32 data) 675 { 676 struct compat_per_struct_kernel *dummy32 = NULL; 677 678 if (addr == (addr_t) &dummy32->cr9) 679 /* PER event mask of the user specified per set. */ 680 child->thread.per_user.control = 681 data & (PER_EVENT_MASK | PER_CONTROL_MASK); 682 else if (addr == (addr_t) &dummy32->starting_addr) 683 /* Starting address of the user specified per set. */ 684 child->thread.per_user.start = data; 685 else if (addr == (addr_t) &dummy32->ending_addr) 686 /* Ending address of the user specified per set. */ 687 child->thread.per_user.end = data; 688 } 689 690 /* 691 * Same as poke_user but for a 31 bit program. 692 */ 693 static int __poke_user_compat(struct task_struct *child, 694 addr_t addr, addr_t data) 695 { 696 struct compat_user *dummy32 = NULL; 697 __u32 tmp = (__u32) data; 698 addr_t offset; 699 700 if (addr < (addr_t) &dummy32->regs.acrs) { 701 struct pt_regs *regs = task_pt_regs(child); 702 /* 703 * psw, gprs, acrs and orig_gpr2 are stored on the stack 704 */ 705 if (addr == (addr_t) &dummy32->regs.psw.mask) { 706 __u32 mask = PSW32_MASK_USER; 707 708 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; 709 /* Build a 64 bit psw mask from 31 bit mask. */ 710 if ((tmp ^ PSW32_USER_BITS) & ~mask) 711 /* Invalid psw mask. */ 712 return -EINVAL; 713 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) 714 /* Invalid address-space-control bits */ 715 return -EINVAL; 716 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 717 (regs->psw.mask & PSW_MASK_BA) | 718 (__u64)(tmp & mask) << 32; 719 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 720 /* Build a 64 bit psw address from 31 bit address. */ 721 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; 722 /* Transfer 31 bit amode bit to psw mask. */ 723 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | 724 (__u64)(tmp & PSW32_ADDR_AMODE); 725 } else { 726 if (test_pt_regs_flag(regs, PIF_SYSCALL) && 727 addr == offsetof(struct compat_user, regs.gprs[2])) { 728 struct pt_regs *regs = task_pt_regs(child); 729 730 regs->int_code = 0x20000 | (data & 0xffff); 731 } 732 /* gpr 0-15 */ 733 *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; 734 } 735 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 736 /* 737 * access registers are stored in the thread structure 738 */ 739 offset = addr - (addr_t) &dummy32->regs.acrs; 740 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; 741 742 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { 743 /* 744 * orig_gpr2 is stored on the kernel stack 745 */ 746 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; 747 748 } else if (addr < (addr_t) &dummy32->regs.fp_regs) { 749 /* 750 * prevent writess of padding hole between 751 * orig_gpr2 and fp_regs on s390. 752 */ 753 return 0; 754 755 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { 756 /* 757 * floating point control reg. is in the thread structure 758 */ 759 if (test_fp_ctl(tmp)) 760 return -EINVAL; 761 child->thread.fpu.fpc = data; 762 763 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 764 /* 765 * floating point regs. are either in child->thread.fpu 766 * or the child->thread.fpu.vxrs array 767 */ 768 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 769 if (MACHINE_HAS_VX) 770 *(__u32 *)((addr_t) 771 child->thread.fpu.vxrs + 2*offset) = tmp; 772 else 773 *(__u32 *)((addr_t) 774 child->thread.fpu.fprs + offset) = tmp; 775 776 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 777 /* 778 * Handle access to the per_info structure. 779 */ 780 addr -= (addr_t) &dummy32->regs.per_info; 781 __poke_user_per_compat(child, addr, data); 782 } 783 784 return 0; 785 } 786 787 static int poke_user_compat(struct task_struct *child, 788 addr_t addr, addr_t data) 789 { 790 if (!is_compat_task() || (addr & 3) || 791 addr > sizeof(struct compat_user) - 3) 792 return -EIO; 793 794 return __poke_user_compat(child, addr, data); 795 } 796 797 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 798 compat_ulong_t caddr, compat_ulong_t cdata) 799 { 800 unsigned long addr = caddr; 801 unsigned long data = cdata; 802 compat_ptrace_area parea; 803 int copied, ret; 804 805 switch (request) { 806 case PTRACE_PEEKUSR: 807 /* read the word at location addr in the USER area. */ 808 return peek_user_compat(child, addr, data); 809 810 case PTRACE_POKEUSR: 811 /* write the word at location addr in the USER area */ 812 return poke_user_compat(child, addr, data); 813 814 case PTRACE_PEEKUSR_AREA: 815 case PTRACE_POKEUSR_AREA: 816 if (copy_from_user(&parea, (void __force __user *) addr, 817 sizeof(parea))) 818 return -EFAULT; 819 addr = parea.kernel_addr; 820 data = parea.process_addr; 821 copied = 0; 822 while (copied < parea.len) { 823 if (request == PTRACE_PEEKUSR_AREA) 824 ret = peek_user_compat(child, addr, data); 825 else { 826 __u32 utmp; 827 if (get_user(utmp, 828 (__u32 __force __user *) data)) 829 return -EFAULT; 830 ret = poke_user_compat(child, addr, utmp); 831 } 832 if (ret) 833 return ret; 834 addr += sizeof(unsigned int); 835 data += sizeof(unsigned int); 836 copied += sizeof(unsigned int); 837 } 838 return 0; 839 case PTRACE_GET_LAST_BREAK: 840 put_user(child->thread.last_break, 841 (unsigned int __user *) data); 842 return 0; 843 } 844 return compat_ptrace_request(child, request, addr, data); 845 } 846 #endif 847 848 /* 849 * user_regset definitions. 850 */ 851 852 static int s390_regs_get(struct task_struct *target, 853 const struct user_regset *regset, 854 struct membuf to) 855 { 856 unsigned pos; 857 if (target == current) 858 save_access_regs(target->thread.acrs); 859 860 for (pos = 0; pos < sizeof(s390_regs); pos += sizeof(long)) 861 membuf_store(&to, __peek_user(target, pos)); 862 return 0; 863 } 864 865 static int s390_regs_set(struct task_struct *target, 866 const struct user_regset *regset, 867 unsigned int pos, unsigned int count, 868 const void *kbuf, const void __user *ubuf) 869 { 870 int rc = 0; 871 872 if (target == current) 873 save_access_regs(target->thread.acrs); 874 875 if (kbuf) { 876 const unsigned long *k = kbuf; 877 while (count > 0 && !rc) { 878 rc = __poke_user(target, pos, *k++); 879 count -= sizeof(*k); 880 pos += sizeof(*k); 881 } 882 } else { 883 const unsigned long __user *u = ubuf; 884 while (count > 0 && !rc) { 885 unsigned long word; 886 rc = __get_user(word, u++); 887 if (rc) 888 break; 889 rc = __poke_user(target, pos, word); 890 count -= sizeof(*u); 891 pos += sizeof(*u); 892 } 893 } 894 895 if (rc == 0 && target == current) 896 restore_access_regs(target->thread.acrs); 897 898 return rc; 899 } 900 901 static int s390_fpregs_get(struct task_struct *target, 902 const struct user_regset *regset, 903 struct membuf to) 904 { 905 _s390_fp_regs fp_regs; 906 907 if (target == current) 908 save_fpu_regs(); 909 910 fp_regs.fpc = target->thread.fpu.fpc; 911 fpregs_store(&fp_regs, &target->thread.fpu); 912 913 return membuf_write(&to, &fp_regs, sizeof(fp_regs)); 914 } 915 916 static int s390_fpregs_set(struct task_struct *target, 917 const struct user_regset *regset, unsigned int pos, 918 unsigned int count, const void *kbuf, 919 const void __user *ubuf) 920 { 921 int rc = 0; 922 freg_t fprs[__NUM_FPRS]; 923 924 if (target == current) 925 save_fpu_regs(); 926 927 if (MACHINE_HAS_VX) 928 convert_vx_to_fp(fprs, target->thread.fpu.vxrs); 929 else 930 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs)); 931 932 /* If setting FPC, must validate it first. */ 933 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { 934 u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; 935 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc, 936 0, offsetof(s390_fp_regs, fprs)); 937 if (rc) 938 return rc; 939 if (ufpc[1] != 0 || test_fp_ctl(ufpc[0])) 940 return -EINVAL; 941 target->thread.fpu.fpc = ufpc[0]; 942 } 943 944 if (rc == 0 && count > 0) 945 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 946 fprs, offsetof(s390_fp_regs, fprs), -1); 947 if (rc) 948 return rc; 949 950 if (MACHINE_HAS_VX) 951 convert_fp_to_vx(target->thread.fpu.vxrs, fprs); 952 else 953 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); 954 955 return rc; 956 } 957 958 static int s390_last_break_get(struct task_struct *target, 959 const struct user_regset *regset, 960 struct membuf to) 961 { 962 return membuf_store(&to, target->thread.last_break); 963 } 964 965 static int s390_last_break_set(struct task_struct *target, 966 const struct user_regset *regset, 967 unsigned int pos, unsigned int count, 968 const void *kbuf, const void __user *ubuf) 969 { 970 return 0; 971 } 972 973 static int s390_tdb_get(struct task_struct *target, 974 const struct user_regset *regset, 975 struct membuf to) 976 { 977 struct pt_regs *regs = task_pt_regs(target); 978 979 if (!(regs->int_code & 0x200)) 980 return -ENODATA; 981 return membuf_write(&to, target->thread.trap_tdb, 256); 982 } 983 984 static int s390_tdb_set(struct task_struct *target, 985 const struct user_regset *regset, 986 unsigned int pos, unsigned int count, 987 const void *kbuf, const void __user *ubuf) 988 { 989 return 0; 990 } 991 992 static int s390_vxrs_low_get(struct task_struct *target, 993 const struct user_regset *regset, 994 struct membuf to) 995 { 996 __u64 vxrs[__NUM_VXRS_LOW]; 997 int i; 998 999 if (!MACHINE_HAS_VX) 1000 return -ENODEV; 1001 if (target == current) 1002 save_fpu_regs(); 1003 for (i = 0; i < __NUM_VXRS_LOW; i++) 1004 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); 1005 return membuf_write(&to, vxrs, sizeof(vxrs)); 1006 } 1007 1008 static int s390_vxrs_low_set(struct task_struct *target, 1009 const struct user_regset *regset, 1010 unsigned int pos, unsigned int count, 1011 const void *kbuf, const void __user *ubuf) 1012 { 1013 __u64 vxrs[__NUM_VXRS_LOW]; 1014 int i, rc; 1015 1016 if (!MACHINE_HAS_VX) 1017 return -ENODEV; 1018 if (target == current) 1019 save_fpu_regs(); 1020 1021 for (i = 0; i < __NUM_VXRS_LOW; i++) 1022 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); 1023 1024 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); 1025 if (rc == 0) 1026 for (i = 0; i < __NUM_VXRS_LOW; i++) 1027 *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i]; 1028 1029 return rc; 1030 } 1031 1032 static int s390_vxrs_high_get(struct task_struct *target, 1033 const struct user_regset *regset, 1034 struct membuf to) 1035 { 1036 if (!MACHINE_HAS_VX) 1037 return -ENODEV; 1038 if (target == current) 1039 save_fpu_regs(); 1040 return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW, 1041 __NUM_VXRS_HIGH * sizeof(__vector128)); 1042 } 1043 1044 static int s390_vxrs_high_set(struct task_struct *target, 1045 const struct user_regset *regset, 1046 unsigned int pos, unsigned int count, 1047 const void *kbuf, const void __user *ubuf) 1048 { 1049 int rc; 1050 1051 if (!MACHINE_HAS_VX) 1052 return -ENODEV; 1053 if (target == current) 1054 save_fpu_regs(); 1055 1056 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1057 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); 1058 return rc; 1059 } 1060 1061 static int s390_system_call_get(struct task_struct *target, 1062 const struct user_regset *regset, 1063 struct membuf to) 1064 { 1065 return membuf_store(&to, target->thread.system_call); 1066 } 1067 1068 static int s390_system_call_set(struct task_struct *target, 1069 const struct user_regset *regset, 1070 unsigned int pos, unsigned int count, 1071 const void *kbuf, const void __user *ubuf) 1072 { 1073 unsigned int *data = &target->thread.system_call; 1074 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1075 data, 0, sizeof(unsigned int)); 1076 } 1077 1078 static int s390_gs_cb_get(struct task_struct *target, 1079 const struct user_regset *regset, 1080 struct membuf to) 1081 { 1082 struct gs_cb *data = target->thread.gs_cb; 1083 1084 if (!MACHINE_HAS_GS) 1085 return -ENODEV; 1086 if (!data) 1087 return -ENODATA; 1088 if (target == current) 1089 save_gs_cb(data); 1090 return membuf_write(&to, data, sizeof(struct gs_cb)); 1091 } 1092 1093 static int s390_gs_cb_set(struct task_struct *target, 1094 const struct user_regset *regset, 1095 unsigned int pos, unsigned int count, 1096 const void *kbuf, const void __user *ubuf) 1097 { 1098 struct gs_cb gs_cb = { }, *data = NULL; 1099 int rc; 1100 1101 if (!MACHINE_HAS_GS) 1102 return -ENODEV; 1103 if (!target->thread.gs_cb) { 1104 data = kzalloc(sizeof(*data), GFP_KERNEL); 1105 if (!data) 1106 return -ENOMEM; 1107 } 1108 if (!target->thread.gs_cb) 1109 gs_cb.gsd = 25; 1110 else if (target == current) 1111 save_gs_cb(&gs_cb); 1112 else 1113 gs_cb = *target->thread.gs_cb; 1114 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1115 &gs_cb, 0, sizeof(gs_cb)); 1116 if (rc) { 1117 kfree(data); 1118 return -EFAULT; 1119 } 1120 preempt_disable(); 1121 if (!target->thread.gs_cb) 1122 target->thread.gs_cb = data; 1123 *target->thread.gs_cb = gs_cb; 1124 if (target == current) { 1125 __ctl_set_bit(2, 4); 1126 restore_gs_cb(target->thread.gs_cb); 1127 } 1128 preempt_enable(); 1129 return rc; 1130 } 1131 1132 static int s390_gs_bc_get(struct task_struct *target, 1133 const struct user_regset *regset, 1134 struct membuf to) 1135 { 1136 struct gs_cb *data = target->thread.gs_bc_cb; 1137 1138 if (!MACHINE_HAS_GS) 1139 return -ENODEV; 1140 if (!data) 1141 return -ENODATA; 1142 return membuf_write(&to, data, sizeof(struct gs_cb)); 1143 } 1144 1145 static int s390_gs_bc_set(struct task_struct *target, 1146 const struct user_regset *regset, 1147 unsigned int pos, unsigned int count, 1148 const void *kbuf, const void __user *ubuf) 1149 { 1150 struct gs_cb *data = target->thread.gs_bc_cb; 1151 1152 if (!MACHINE_HAS_GS) 1153 return -ENODEV; 1154 if (!data) { 1155 data = kzalloc(sizeof(*data), GFP_KERNEL); 1156 if (!data) 1157 return -ENOMEM; 1158 target->thread.gs_bc_cb = data; 1159 } 1160 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1161 data, 0, sizeof(struct gs_cb)); 1162 } 1163 1164 static bool is_ri_cb_valid(struct runtime_instr_cb *cb) 1165 { 1166 return (cb->rca & 0x1f) == 0 && 1167 (cb->roa & 0xfff) == 0 && 1168 (cb->rla & 0xfff) == 0xfff && 1169 cb->s == 1 && 1170 cb->k == 1 && 1171 cb->h == 0 && 1172 cb->reserved1 == 0 && 1173 cb->ps == 1 && 1174 cb->qs == 0 && 1175 cb->pc == 1 && 1176 cb->qc == 0 && 1177 cb->reserved2 == 0 && 1178 cb->reserved3 == 0 && 1179 cb->reserved4 == 0 && 1180 cb->reserved5 == 0 && 1181 cb->reserved6 == 0 && 1182 cb->reserved7 == 0 && 1183 cb->reserved8 == 0 && 1184 cb->rla >= cb->roa && 1185 cb->rca >= cb->roa && 1186 cb->rca <= cb->rla+1 && 1187 cb->m < 3; 1188 } 1189 1190 static int s390_runtime_instr_get(struct task_struct *target, 1191 const struct user_regset *regset, 1192 struct membuf to) 1193 { 1194 struct runtime_instr_cb *data = target->thread.ri_cb; 1195 1196 if (!test_facility(64)) 1197 return -ENODEV; 1198 if (!data) 1199 return -ENODATA; 1200 1201 return membuf_write(&to, data, sizeof(struct runtime_instr_cb)); 1202 } 1203 1204 static int s390_runtime_instr_set(struct task_struct *target, 1205 const struct user_regset *regset, 1206 unsigned int pos, unsigned int count, 1207 const void *kbuf, const void __user *ubuf) 1208 { 1209 struct runtime_instr_cb ri_cb = { }, *data = NULL; 1210 int rc; 1211 1212 if (!test_facility(64)) 1213 return -ENODEV; 1214 1215 if (!target->thread.ri_cb) { 1216 data = kzalloc(sizeof(*data), GFP_KERNEL); 1217 if (!data) 1218 return -ENOMEM; 1219 } 1220 1221 if (target->thread.ri_cb) { 1222 if (target == current) 1223 store_runtime_instr_cb(&ri_cb); 1224 else 1225 ri_cb = *target->thread.ri_cb; 1226 } 1227 1228 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1229 &ri_cb, 0, sizeof(struct runtime_instr_cb)); 1230 if (rc) { 1231 kfree(data); 1232 return -EFAULT; 1233 } 1234 1235 if (!is_ri_cb_valid(&ri_cb)) { 1236 kfree(data); 1237 return -EINVAL; 1238 } 1239 /* 1240 * Override access key in any case, since user space should 1241 * not be able to set it, nor should it care about it. 1242 */ 1243 ri_cb.key = PAGE_DEFAULT_KEY >> 4; 1244 preempt_disable(); 1245 if (!target->thread.ri_cb) 1246 target->thread.ri_cb = data; 1247 *target->thread.ri_cb = ri_cb; 1248 if (target == current) 1249 load_runtime_instr_cb(target->thread.ri_cb); 1250 preempt_enable(); 1251 1252 return 0; 1253 } 1254 1255 static const struct user_regset s390_regsets[] = { 1256 { 1257 .core_note_type = NT_PRSTATUS, 1258 .n = sizeof(s390_regs) / sizeof(long), 1259 .size = sizeof(long), 1260 .align = sizeof(long), 1261 .regset_get = s390_regs_get, 1262 .set = s390_regs_set, 1263 }, 1264 { 1265 .core_note_type = NT_PRFPREG, 1266 .n = sizeof(s390_fp_regs) / sizeof(long), 1267 .size = sizeof(long), 1268 .align = sizeof(long), 1269 .regset_get = s390_fpregs_get, 1270 .set = s390_fpregs_set, 1271 }, 1272 { 1273 .core_note_type = NT_S390_SYSTEM_CALL, 1274 .n = 1, 1275 .size = sizeof(unsigned int), 1276 .align = sizeof(unsigned int), 1277 .regset_get = s390_system_call_get, 1278 .set = s390_system_call_set, 1279 }, 1280 { 1281 .core_note_type = NT_S390_LAST_BREAK, 1282 .n = 1, 1283 .size = sizeof(long), 1284 .align = sizeof(long), 1285 .regset_get = s390_last_break_get, 1286 .set = s390_last_break_set, 1287 }, 1288 { 1289 .core_note_type = NT_S390_TDB, 1290 .n = 1, 1291 .size = 256, 1292 .align = 1, 1293 .regset_get = s390_tdb_get, 1294 .set = s390_tdb_set, 1295 }, 1296 { 1297 .core_note_type = NT_S390_VXRS_LOW, 1298 .n = __NUM_VXRS_LOW, 1299 .size = sizeof(__u64), 1300 .align = sizeof(__u64), 1301 .regset_get = s390_vxrs_low_get, 1302 .set = s390_vxrs_low_set, 1303 }, 1304 { 1305 .core_note_type = NT_S390_VXRS_HIGH, 1306 .n = __NUM_VXRS_HIGH, 1307 .size = sizeof(__vector128), 1308 .align = sizeof(__vector128), 1309 .regset_get = s390_vxrs_high_get, 1310 .set = s390_vxrs_high_set, 1311 }, 1312 { 1313 .core_note_type = NT_S390_GS_CB, 1314 .n = sizeof(struct gs_cb) / sizeof(__u64), 1315 .size = sizeof(__u64), 1316 .align = sizeof(__u64), 1317 .regset_get = s390_gs_cb_get, 1318 .set = s390_gs_cb_set, 1319 }, 1320 { 1321 .core_note_type = NT_S390_GS_BC, 1322 .n = sizeof(struct gs_cb) / sizeof(__u64), 1323 .size = sizeof(__u64), 1324 .align = sizeof(__u64), 1325 .regset_get = s390_gs_bc_get, 1326 .set = s390_gs_bc_set, 1327 }, 1328 { 1329 .core_note_type = NT_S390_RI_CB, 1330 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64), 1331 .size = sizeof(__u64), 1332 .align = sizeof(__u64), 1333 .regset_get = s390_runtime_instr_get, 1334 .set = s390_runtime_instr_set, 1335 }, 1336 }; 1337 1338 static const struct user_regset_view user_s390_view = { 1339 .name = "s390x", 1340 .e_machine = EM_S390, 1341 .regsets = s390_regsets, 1342 .n = ARRAY_SIZE(s390_regsets) 1343 }; 1344 1345 #ifdef CONFIG_COMPAT 1346 static int s390_compat_regs_get(struct task_struct *target, 1347 const struct user_regset *regset, 1348 struct membuf to) 1349 { 1350 unsigned n; 1351 1352 if (target == current) 1353 save_access_regs(target->thread.acrs); 1354 1355 for (n = 0; n < sizeof(s390_compat_regs); n += sizeof(compat_ulong_t)) 1356 membuf_store(&to, __peek_user_compat(target, n)); 1357 return 0; 1358 } 1359 1360 static int s390_compat_regs_set(struct task_struct *target, 1361 const struct user_regset *regset, 1362 unsigned int pos, unsigned int count, 1363 const void *kbuf, const void __user *ubuf) 1364 { 1365 int rc = 0; 1366 1367 if (target == current) 1368 save_access_regs(target->thread.acrs); 1369 1370 if (kbuf) { 1371 const compat_ulong_t *k = kbuf; 1372 while (count > 0 && !rc) { 1373 rc = __poke_user_compat(target, pos, *k++); 1374 count -= sizeof(*k); 1375 pos += sizeof(*k); 1376 } 1377 } else { 1378 const compat_ulong_t __user *u = ubuf; 1379 while (count > 0 && !rc) { 1380 compat_ulong_t word; 1381 rc = __get_user(word, u++); 1382 if (rc) 1383 break; 1384 rc = __poke_user_compat(target, pos, word); 1385 count -= sizeof(*u); 1386 pos += sizeof(*u); 1387 } 1388 } 1389 1390 if (rc == 0 && target == current) 1391 restore_access_regs(target->thread.acrs); 1392 1393 return rc; 1394 } 1395 1396 static int s390_compat_regs_high_get(struct task_struct *target, 1397 const struct user_regset *regset, 1398 struct membuf to) 1399 { 1400 compat_ulong_t *gprs_high; 1401 int i; 1402 1403 gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs; 1404 for (i = 0; i < NUM_GPRS; i++, gprs_high += 2) 1405 membuf_store(&to, *gprs_high); 1406 return 0; 1407 } 1408 1409 static int s390_compat_regs_high_set(struct task_struct *target, 1410 const struct user_regset *regset, 1411 unsigned int pos, unsigned int count, 1412 const void *kbuf, const void __user *ubuf) 1413 { 1414 compat_ulong_t *gprs_high; 1415 int rc = 0; 1416 1417 gprs_high = (compat_ulong_t *) 1418 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)]; 1419 if (kbuf) { 1420 const compat_ulong_t *k = kbuf; 1421 while (count > 0) { 1422 *gprs_high = *k++; 1423 *gprs_high += 2; 1424 count -= sizeof(*k); 1425 } 1426 } else { 1427 const compat_ulong_t __user *u = ubuf; 1428 while (count > 0 && !rc) { 1429 unsigned long word; 1430 rc = __get_user(word, u++); 1431 if (rc) 1432 break; 1433 *gprs_high = word; 1434 *gprs_high += 2; 1435 count -= sizeof(*u); 1436 } 1437 } 1438 1439 return rc; 1440 } 1441 1442 static int s390_compat_last_break_get(struct task_struct *target, 1443 const struct user_regset *regset, 1444 struct membuf to) 1445 { 1446 compat_ulong_t last_break = target->thread.last_break; 1447 1448 return membuf_store(&to, (unsigned long)last_break); 1449 } 1450 1451 static int s390_compat_last_break_set(struct task_struct *target, 1452 const struct user_regset *regset, 1453 unsigned int pos, unsigned int count, 1454 const void *kbuf, const void __user *ubuf) 1455 { 1456 return 0; 1457 } 1458 1459 static const struct user_regset s390_compat_regsets[] = { 1460 { 1461 .core_note_type = NT_PRSTATUS, 1462 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t), 1463 .size = sizeof(compat_long_t), 1464 .align = sizeof(compat_long_t), 1465 .regset_get = s390_compat_regs_get, 1466 .set = s390_compat_regs_set, 1467 }, 1468 { 1469 .core_note_type = NT_PRFPREG, 1470 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t), 1471 .size = sizeof(compat_long_t), 1472 .align = sizeof(compat_long_t), 1473 .regset_get = s390_fpregs_get, 1474 .set = s390_fpregs_set, 1475 }, 1476 { 1477 .core_note_type = NT_S390_SYSTEM_CALL, 1478 .n = 1, 1479 .size = sizeof(compat_uint_t), 1480 .align = sizeof(compat_uint_t), 1481 .regset_get = s390_system_call_get, 1482 .set = s390_system_call_set, 1483 }, 1484 { 1485 .core_note_type = NT_S390_LAST_BREAK, 1486 .n = 1, 1487 .size = sizeof(long), 1488 .align = sizeof(long), 1489 .regset_get = s390_compat_last_break_get, 1490 .set = s390_compat_last_break_set, 1491 }, 1492 { 1493 .core_note_type = NT_S390_TDB, 1494 .n = 1, 1495 .size = 256, 1496 .align = 1, 1497 .regset_get = s390_tdb_get, 1498 .set = s390_tdb_set, 1499 }, 1500 { 1501 .core_note_type = NT_S390_VXRS_LOW, 1502 .n = __NUM_VXRS_LOW, 1503 .size = sizeof(__u64), 1504 .align = sizeof(__u64), 1505 .regset_get = s390_vxrs_low_get, 1506 .set = s390_vxrs_low_set, 1507 }, 1508 { 1509 .core_note_type = NT_S390_VXRS_HIGH, 1510 .n = __NUM_VXRS_HIGH, 1511 .size = sizeof(__vector128), 1512 .align = sizeof(__vector128), 1513 .regset_get = s390_vxrs_high_get, 1514 .set = s390_vxrs_high_set, 1515 }, 1516 { 1517 .core_note_type = NT_S390_HIGH_GPRS, 1518 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 1519 .size = sizeof(compat_long_t), 1520 .align = sizeof(compat_long_t), 1521 .regset_get = s390_compat_regs_high_get, 1522 .set = s390_compat_regs_high_set, 1523 }, 1524 { 1525 .core_note_type = NT_S390_GS_CB, 1526 .n = sizeof(struct gs_cb) / sizeof(__u64), 1527 .size = sizeof(__u64), 1528 .align = sizeof(__u64), 1529 .regset_get = s390_gs_cb_get, 1530 .set = s390_gs_cb_set, 1531 }, 1532 { 1533 .core_note_type = NT_S390_GS_BC, 1534 .n = sizeof(struct gs_cb) / sizeof(__u64), 1535 .size = sizeof(__u64), 1536 .align = sizeof(__u64), 1537 .regset_get = s390_gs_bc_get, 1538 .set = s390_gs_bc_set, 1539 }, 1540 { 1541 .core_note_type = NT_S390_RI_CB, 1542 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64), 1543 .size = sizeof(__u64), 1544 .align = sizeof(__u64), 1545 .regset_get = s390_runtime_instr_get, 1546 .set = s390_runtime_instr_set, 1547 }, 1548 }; 1549 1550 static const struct user_regset_view user_s390_compat_view = { 1551 .name = "s390", 1552 .e_machine = EM_S390, 1553 .regsets = s390_compat_regsets, 1554 .n = ARRAY_SIZE(s390_compat_regsets) 1555 }; 1556 #endif 1557 1558 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1559 { 1560 #ifdef CONFIG_COMPAT 1561 if (test_tsk_thread_flag(task, TIF_31BIT)) 1562 return &user_s390_compat_view; 1563 #endif 1564 return &user_s390_view; 1565 } 1566 1567 static const char *gpr_names[NUM_GPRS] = { 1568 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 1569 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 1570 }; 1571 1572 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) 1573 { 1574 if (offset >= NUM_GPRS) 1575 return 0; 1576 return regs->gprs[offset]; 1577 } 1578 1579 int regs_query_register_offset(const char *name) 1580 { 1581 unsigned long offset; 1582 1583 if (!name || *name != 'r') 1584 return -EINVAL; 1585 if (kstrtoul(name + 1, 10, &offset)) 1586 return -EINVAL; 1587 if (offset >= NUM_GPRS) 1588 return -EINVAL; 1589 return offset; 1590 } 1591 1592 const char *regs_query_register_name(unsigned int offset) 1593 { 1594 if (offset >= NUM_GPRS) 1595 return NULL; 1596 return gpr_names[offset]; 1597 } 1598 1599 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 1600 { 1601 unsigned long ksp = kernel_stack_pointer(regs); 1602 1603 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); 1604 } 1605 1606 /** 1607 * regs_get_kernel_stack_nth() - get Nth entry of the stack 1608 * @regs:pt_regs which contains kernel stack pointer. 1609 * @n:stack entry number. 1610 * 1611 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 1612 * is specifined by @regs. If the @n th entry is NOT in the kernel stack, 1613 * this returns 0. 1614 */ 1615 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 1616 { 1617 unsigned long addr; 1618 1619 addr = kernel_stack_pointer(regs) + n * sizeof(long); 1620 if (!regs_within_kernel_stack(regs, addr)) 1621 return 0; 1622 return *(unsigned long *)addr; 1623 } 1624