1 /* 2 * S/390 memory access helper routines 3 * 4 * Copyright (c) 2009 Ulrich Hecht 5 * Copyright (c) 2009 Alexander Graf 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu/log.h" 23 #include "cpu.h" 24 #include "s390x-internal.h" 25 #include "tcg_s390x.h" 26 #include "exec/helper-proto.h" 27 #include "exec/exec-all.h" 28 #include "exec/page-protection.h" 29 #include "exec/cpu_ldst.h" 30 #include "hw/core/tcg-cpu-ops.h" 31 #include "qemu/int128.h" 32 #include "qemu/atomic128.h" 33 34 #if !defined(CONFIG_USER_ONLY) 35 #include "hw/s390x/storage-keys.h" 36 #include "hw/boards.h" 37 #endif 38 39 #ifdef CONFIG_USER_ONLY 40 # define user_or_likely(X) true 41 #else 42 # define user_or_likely(X) likely(X) 43 #endif 44 45 /*****************************************************************************/ 46 /* Softmmu support */ 47 48 /* #define DEBUG_HELPER */ 49 #ifdef DEBUG_HELPER 50 #define HELPER_LOG(x...) qemu_log(x) 51 #else 52 #define HELPER_LOG(x...) 53 #endif 54 55 static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key) 56 { 57 uint16_t pkm = env->cregs[3] >> 16; 58 59 if (env->psw.mask & PSW_MASK_PSTATE) { 60 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */ 61 return pkm & (0x8000 >> psw_key); 62 } 63 return true; 64 } 65 66 static bool is_destructive_overlap(CPUS390XState *env, uint64_t dest, 67 uint64_t src, uint32_t len) 68 { 69 if (!len || src == dest) { 70 return false; 71 } 72 /* Take care of wrapping at the end of address space. */ 73 if (unlikely(wrap_address(env, src + len - 1) < src)) { 74 return dest > src || dest <= wrap_address(env, src + len - 1); 75 } 76 return dest > src && dest <= src + len - 1; 77 } 78 79 /* Trigger a SPECIFICATION exception if an address or a length is not 80 naturally aligned. */ 81 static inline void check_alignment(CPUS390XState *env, uint64_t v, 82 int wordsize, uintptr_t ra) 83 { 84 if (v % wordsize) { 85 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 86 } 87 } 88 89 /* Load a value from memory according to its size. */ 90 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr, 91 int wordsize, uintptr_t ra) 92 { 93 switch (wordsize) { 94 case 1: 95 return cpu_ldub_data_ra(env, addr, ra); 96 case 2: 97 return cpu_lduw_data_ra(env, addr, ra); 98 default: 99 abort(); 100 } 101 } 102 103 /* Store a to memory according to its size. */ 104 static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr, 105 uint64_t value, int wordsize, 106 uintptr_t ra) 107 { 108 switch (wordsize) { 109 case 1: 110 cpu_stb_data_ra(env, addr, value, ra); 111 break; 112 case 2: 113 cpu_stw_data_ra(env, addr, value, ra); 114 break; 115 default: 116 abort(); 117 } 118 } 119 120 /* An access covers at most 4096 bytes and therefore at most two pages. */ 121 typedef struct S390Access { 122 target_ulong vaddr1; 123 target_ulong vaddr2; 124 void *haddr1; 125 void *haddr2; 126 uint16_t size1; 127 uint16_t size2; 128 /* 129 * If we can't access the host page directly, we'll have to do I/O access 130 * via ld/st helpers. These are internal details, so we store the 131 * mmu idx to do the access here instead of passing it around in the 132 * helpers. 133 */ 134 int mmu_idx; 135 } S390Access; 136 137 /* 138 * With nonfault=1, return the PGM_ exception that would have been injected 139 * into the guest; return 0 if no exception was detected. 140 * 141 * For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec. 142 * For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr. 143 */ 144 static inline int s390_probe_access(CPUArchState *env, target_ulong addr, 145 int size, MMUAccessType access_type, 146 int mmu_idx, bool nonfault, 147 void **phost, uintptr_t ra) 148 { 149 int flags = probe_access_flags(env, addr, 0, access_type, mmu_idx, 150 nonfault, phost, ra); 151 152 if (unlikely(flags & TLB_INVALID_MASK)) { 153 #ifdef CONFIG_USER_ONLY 154 /* Address is in TEC in system mode; see s390_cpu_record_sigsegv. */ 155 env->__excp_addr = addr & TARGET_PAGE_MASK; 156 return (page_get_flags(addr) & PAGE_VALID 157 ? PGM_PROTECTION : PGM_ADDRESSING); 158 #else 159 return env->tlb_fill_exc; 160 #endif 161 } 162 163 #ifndef CONFIG_USER_ONLY 164 if (unlikely(flags & TLB_WATCHPOINT)) { 165 /* S390 does not presently use transaction attributes. */ 166 cpu_check_watchpoint(env_cpu(env), addr, size, 167 MEMTXATTRS_UNSPECIFIED, 168 (access_type == MMU_DATA_STORE 169 ? BP_MEM_WRITE : BP_MEM_READ), ra); 170 } 171 #endif 172 173 return 0; 174 } 175 176 static int access_prepare_nf(S390Access *access, CPUS390XState *env, 177 bool nonfault, vaddr vaddr1, int size, 178 MMUAccessType access_type, 179 int mmu_idx, uintptr_t ra) 180 { 181 int size1, size2, exc; 182 183 assert(size > 0 && size <= 4096); 184 185 size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)), 186 size2 = size - size1; 187 188 memset(access, 0, sizeof(*access)); 189 access->vaddr1 = vaddr1; 190 access->size1 = size1; 191 access->size2 = size2; 192 access->mmu_idx = mmu_idx; 193 194 exc = s390_probe_access(env, vaddr1, size1, access_type, mmu_idx, nonfault, 195 &access->haddr1, ra); 196 if (unlikely(exc)) { 197 return exc; 198 } 199 if (unlikely(size2)) { 200 /* The access crosses page boundaries. */ 201 vaddr vaddr2 = wrap_address(env, vaddr1 + size1); 202 203 access->vaddr2 = vaddr2; 204 exc = s390_probe_access(env, vaddr2, size2, access_type, mmu_idx, 205 nonfault, &access->haddr2, ra); 206 if (unlikely(exc)) { 207 return exc; 208 } 209 } 210 return 0; 211 } 212 213 static inline void access_prepare(S390Access *ret, CPUS390XState *env, 214 vaddr vaddr, int size, 215 MMUAccessType access_type, int mmu_idx, 216 uintptr_t ra) 217 { 218 int exc = access_prepare_nf(ret, env, false, vaddr, size, 219 access_type, mmu_idx, ra); 220 assert(!exc); 221 } 222 223 /* Helper to handle memset on a single page. */ 224 static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr, 225 uint8_t byte, uint16_t size, int mmu_idx, 226 uintptr_t ra) 227 { 228 if (user_or_likely(haddr)) { 229 memset(haddr, byte, size); 230 } else { 231 MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); 232 for (int i = 0; i < size; i++) { 233 cpu_stb_mmu(env, vaddr + i, byte, oi, ra); 234 } 235 } 236 } 237 238 static void access_memset(CPUS390XState *env, S390Access *desta, 239 uint8_t byte, uintptr_t ra) 240 { 241 242 do_access_memset(env, desta->vaddr1, desta->haddr1, byte, desta->size1, 243 desta->mmu_idx, ra); 244 if (likely(!desta->size2)) { 245 return; 246 } 247 do_access_memset(env, desta->vaddr2, desta->haddr2, byte, desta->size2, 248 desta->mmu_idx, ra); 249 } 250 251 static uint8_t access_get_byte(CPUS390XState *env, S390Access *access, 252 int offset, uintptr_t ra) 253 { 254 target_ulong vaddr = access->vaddr1; 255 void *haddr = access->haddr1; 256 257 if (unlikely(offset >= access->size1)) { 258 offset -= access->size1; 259 vaddr = access->vaddr2; 260 haddr = access->haddr2; 261 } 262 263 if (user_or_likely(haddr)) { 264 return ldub_p(haddr + offset); 265 } else { 266 MemOpIdx oi = make_memop_idx(MO_UB, access->mmu_idx); 267 return cpu_ldb_mmu(env, vaddr + offset, oi, ra); 268 } 269 } 270 271 static void access_set_byte(CPUS390XState *env, S390Access *access, 272 int offset, uint8_t byte, uintptr_t ra) 273 { 274 target_ulong vaddr = access->vaddr1; 275 void *haddr = access->haddr1; 276 277 if (unlikely(offset >= access->size1)) { 278 offset -= access->size1; 279 vaddr = access->vaddr2; 280 haddr = access->haddr2; 281 } 282 283 if (user_or_likely(haddr)) { 284 stb_p(haddr + offset, byte); 285 } else { 286 MemOpIdx oi = make_memop_idx(MO_UB, access->mmu_idx); 287 cpu_stb_mmu(env, vaddr + offset, byte, oi, ra); 288 } 289 } 290 291 /* 292 * Move data with the same semantics as memmove() in case ranges don't overlap 293 * or src > dest. Undefined behavior on destructive overlaps. 294 */ 295 static void access_memmove(CPUS390XState *env, S390Access *desta, 296 S390Access *srca, uintptr_t ra) 297 { 298 int len = desta->size1 + desta->size2; 299 300 assert(len == srca->size1 + srca->size2); 301 302 /* Fallback to slow access in case we don't have access to all host pages */ 303 if (user_or_likely(desta->haddr1 && 304 srca->haddr1 && 305 (!desta->size2 || desta->haddr2) && 306 (!srca->size2 || srca->haddr2))) { 307 int diff = desta->size1 - srca->size1; 308 309 if (likely(diff == 0)) { 310 memmove(desta->haddr1, srca->haddr1, srca->size1); 311 if (unlikely(srca->size2)) { 312 memmove(desta->haddr2, srca->haddr2, srca->size2); 313 } 314 } else if (diff > 0) { 315 memmove(desta->haddr1, srca->haddr1, srca->size1); 316 memmove(desta->haddr1 + srca->size1, srca->haddr2, diff); 317 if (likely(desta->size2)) { 318 memmove(desta->haddr2, srca->haddr2 + diff, desta->size2); 319 } 320 } else { 321 diff = -diff; 322 memmove(desta->haddr1, srca->haddr1, desta->size1); 323 memmove(desta->haddr2, srca->haddr1 + desta->size1, diff); 324 if (likely(srca->size2)) { 325 memmove(desta->haddr2 + diff, srca->haddr2, srca->size2); 326 } 327 } 328 } else { 329 for (int i = 0; i < len; i++) { 330 uint8_t byte = access_get_byte(env, srca, i, ra); 331 access_set_byte(env, desta, i, byte, ra); 332 } 333 } 334 } 335 336 static int mmu_idx_from_as(uint8_t as) 337 { 338 switch (as) { 339 case AS_PRIMARY: 340 return MMU_PRIMARY_IDX; 341 case AS_SECONDARY: 342 return MMU_SECONDARY_IDX; 343 case AS_HOME: 344 return MMU_HOME_IDX; 345 default: 346 /* FIXME AS_ACCREG */ 347 g_assert_not_reached(); 348 } 349 } 350 351 /* and on array */ 352 static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest, 353 uint64_t src, uintptr_t ra) 354 { 355 const int mmu_idx = s390x_env_mmu_index(env, false); 356 S390Access srca1, srca2, desta; 357 uint32_t i; 358 uint8_t c = 0; 359 360 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", 361 __func__, l, dest, src); 362 363 /* NC always processes one more byte than specified - maximum is 256 */ 364 l++; 365 366 access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra); 367 access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); 368 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra); 369 for (i = 0; i < l; i++) { 370 const uint8_t x = access_get_byte(env, &srca1, i, ra) & 371 access_get_byte(env, &srca2, i, ra); 372 373 c |= x; 374 access_set_byte(env, &desta, i, x, ra); 375 } 376 return c != 0; 377 } 378 379 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest, 380 uint64_t src) 381 { 382 return do_helper_nc(env, l, dest, src, GETPC()); 383 } 384 385 /* xor on array */ 386 static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest, 387 uint64_t src, uintptr_t ra) 388 { 389 const int mmu_idx = s390x_env_mmu_index(env, false); 390 S390Access srca1, srca2, desta; 391 uint32_t i; 392 uint8_t c = 0; 393 394 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", 395 __func__, l, dest, src); 396 397 /* XC always processes one more byte than specified - maximum is 256 */ 398 l++; 399 400 access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra); 401 access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); 402 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra); 403 404 /* xor with itself is the same as memset(0) */ 405 if (src == dest) { 406 access_memset(env, &desta, 0, ra); 407 return 0; 408 } 409 410 for (i = 0; i < l; i++) { 411 const uint8_t x = access_get_byte(env, &srca1, i, ra) ^ 412 access_get_byte(env, &srca2, i, ra); 413 414 c |= x; 415 access_set_byte(env, &desta, i, x, ra); 416 } 417 return c != 0; 418 } 419 420 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest, 421 uint64_t src) 422 { 423 return do_helper_xc(env, l, dest, src, GETPC()); 424 } 425 426 /* or on array */ 427 static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest, 428 uint64_t src, uintptr_t ra) 429 { 430 const int mmu_idx = s390x_env_mmu_index(env, false); 431 S390Access srca1, srca2, desta; 432 uint32_t i; 433 uint8_t c = 0; 434 435 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", 436 __func__, l, dest, src); 437 438 /* OC always processes one more byte than specified - maximum is 256 */ 439 l++; 440 441 access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra); 442 access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); 443 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra); 444 for (i = 0; i < l; i++) { 445 const uint8_t x = access_get_byte(env, &srca1, i, ra) | 446 access_get_byte(env, &srca2, i, ra); 447 448 c |= x; 449 access_set_byte(env, &desta, i, x, ra); 450 } 451 return c != 0; 452 } 453 454 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest, 455 uint64_t src) 456 { 457 return do_helper_oc(env, l, dest, src, GETPC()); 458 } 459 460 /* memmove */ 461 static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest, 462 uint64_t src, uintptr_t ra) 463 { 464 const int mmu_idx = s390x_env_mmu_index(env, false); 465 S390Access srca, desta; 466 uint32_t i; 467 468 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", 469 __func__, l, dest, src); 470 471 /* MVC always copies one more byte than specified - maximum is 256 */ 472 l++; 473 474 access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra); 475 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra); 476 477 /* 478 * "When the operands overlap, the result is obtained as if the operands 479 * were processed one byte at a time". Only non-destructive overlaps 480 * behave like memmove(). 481 */ 482 if (dest == src + 1) { 483 access_memset(env, &desta, access_get_byte(env, &srca, 0, ra), ra); 484 } else if (!is_destructive_overlap(env, dest, src, l)) { 485 access_memmove(env, &desta, &srca, ra); 486 } else { 487 for (i = 0; i < l; i++) { 488 uint8_t byte = access_get_byte(env, &srca, i, ra); 489 490 access_set_byte(env, &desta, i, byte, ra); 491 } 492 } 493 494 return env->cc_op; 495 } 496 497 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) 498 { 499 do_helper_mvc(env, l, dest, src, GETPC()); 500 } 501 502 /* move right to left */ 503 void HELPER(mvcrl)(CPUS390XState *env, uint64_t l, uint64_t dest, uint64_t src) 504 { 505 const int mmu_idx = s390x_env_mmu_index(env, false); 506 const uint64_t ra = GETPC(); 507 S390Access srca, desta; 508 int32_t i; 509 510 /* MVCRL always copies one more byte than specified - maximum is 256 */ 511 l &= 0xff; 512 l++; 513 514 access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra); 515 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra); 516 517 for (i = l - 1; i >= 0; i--) { 518 uint8_t byte = access_get_byte(env, &srca, i, ra); 519 access_set_byte(env, &desta, i, byte, ra); 520 } 521 } 522 523 /* move inverse */ 524 void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) 525 { 526 const int mmu_idx = s390x_env_mmu_index(env, false); 527 S390Access srca, desta; 528 uintptr_t ra = GETPC(); 529 int i; 530 531 /* MVCIN always copies one more byte than specified - maximum is 256 */ 532 l++; 533 534 src = wrap_address(env, src - l + 1); 535 access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra); 536 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra); 537 for (i = 0; i < l; i++) { 538 const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra); 539 540 access_set_byte(env, &desta, i, x, ra); 541 } 542 } 543 544 /* move numerics */ 545 void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) 546 { 547 const int mmu_idx = s390x_env_mmu_index(env, false); 548 S390Access srca1, srca2, desta; 549 uintptr_t ra = GETPC(); 550 int i; 551 552 /* MVN always copies one more byte than specified - maximum is 256 */ 553 l++; 554 555 access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra); 556 access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); 557 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra); 558 for (i = 0; i < l; i++) { 559 const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) | 560 (access_get_byte(env, &srca2, i, ra) & 0xf0); 561 562 access_set_byte(env, &desta, i, x, ra); 563 } 564 } 565 566 /* move with offset */ 567 void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) 568 { 569 const int mmu_idx = s390x_env_mmu_index(env, false); 570 /* MVO always processes one more byte than specified - maximum is 16 */ 571 const int len_dest = (l >> 4) + 1; 572 const int len_src = (l & 0xf) + 1; 573 uintptr_t ra = GETPC(); 574 uint8_t byte_dest, byte_src; 575 S390Access srca, desta; 576 int i, j; 577 578 access_prepare(&srca, env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra); 579 access_prepare(&desta, env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra); 580 581 /* Handle rightmost byte */ 582 byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra); 583 byte_src = access_get_byte(env, &srca, len_src - 1, ra); 584 byte_dest = (byte_dest & 0x0f) | (byte_src << 4); 585 access_set_byte(env, &desta, len_dest - 1, byte_dest, ra); 586 587 /* Process remaining bytes from right to left */ 588 for (i = len_dest - 2, j = len_src - 2; i >= 0; i--, j--) { 589 byte_dest = byte_src >> 4; 590 if (j >= 0) { 591 byte_src = access_get_byte(env, &srca, j, ra); 592 } else { 593 byte_src = 0; 594 } 595 byte_dest |= byte_src << 4; 596 access_set_byte(env, &desta, i, byte_dest, ra); 597 } 598 } 599 600 /* move zones */ 601 void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) 602 { 603 const int mmu_idx = s390x_env_mmu_index(env, false); 604 S390Access srca1, srca2, desta; 605 uintptr_t ra = GETPC(); 606 int i; 607 608 /* MVZ always copies one more byte than specified - maximum is 256 */ 609 l++; 610 611 access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra); 612 access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); 613 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra); 614 for (i = 0; i < l; i++) { 615 const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) | 616 (access_get_byte(env, &srca2, i, ra) & 0x0f); 617 618 access_set_byte(env, &desta, i, x, ra); 619 } 620 } 621 622 /* compare unsigned byte arrays */ 623 static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1, 624 uint64_t s2, uintptr_t ra) 625 { 626 uint32_t i; 627 uint32_t cc = 0; 628 629 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n", 630 __func__, l, s1, s2); 631 632 for (i = 0; i <= l; i++) { 633 uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra); 634 uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra); 635 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y); 636 if (x < y) { 637 cc = 1; 638 break; 639 } else if (x > y) { 640 cc = 2; 641 break; 642 } 643 } 644 645 HELPER_LOG("\n"); 646 return cc; 647 } 648 649 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2) 650 { 651 return do_helper_clc(env, l, s1, s2, GETPC()); 652 } 653 654 /* compare logical under mask */ 655 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask, 656 uint64_t addr) 657 { 658 uintptr_t ra = GETPC(); 659 uint32_t cc = 0; 660 661 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1, 662 mask, addr); 663 664 if (!mask) { 665 /* Recognize access exceptions for the first byte */ 666 probe_read(env, addr, 1, s390x_env_mmu_index(env, false), ra); 667 } 668 669 while (mask) { 670 if (mask & 8) { 671 uint8_t d = cpu_ldub_data_ra(env, addr, ra); 672 uint8_t r = extract32(r1, 24, 8); 673 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d, 674 addr); 675 if (r < d) { 676 cc = 1; 677 break; 678 } else if (r > d) { 679 cc = 2; 680 break; 681 } 682 addr++; 683 } 684 mask = (mask << 1) & 0xf; 685 r1 <<= 8; 686 } 687 688 HELPER_LOG("\n"); 689 return cc; 690 } 691 692 static inline uint64_t get_address(CPUS390XState *env, int reg) 693 { 694 return wrap_address(env, env->regs[reg]); 695 } 696 697 /* 698 * Store the address to the given register, zeroing out unused leftmost 699 * bits in bit positions 32-63 (24-bit and 31-bit mode only). 700 */ 701 static inline void set_address_zero(CPUS390XState *env, int reg, 702 uint64_t address) 703 { 704 if (env->psw.mask & PSW_MASK_64) { 705 env->regs[reg] = address; 706 } else { 707 if (!(env->psw.mask & PSW_MASK_32)) { 708 address &= 0x00ffffff; 709 } else { 710 address &= 0x7fffffff; 711 } 712 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address); 713 } 714 } 715 716 static inline void set_address(CPUS390XState *env, int reg, uint64_t address) 717 { 718 if (env->psw.mask & PSW_MASK_64) { 719 /* 64-Bit mode */ 720 env->regs[reg] = address; 721 } else { 722 if (!(env->psw.mask & PSW_MASK_32)) { 723 /* 24-Bit mode. According to the PoO it is implementation 724 dependent if bits 32-39 remain unchanged or are set to 725 zeros. Choose the former so that the function can also be 726 used for TRT. */ 727 env->regs[reg] = deposit64(env->regs[reg], 0, 24, address); 728 } else { 729 /* 31-Bit mode. According to the PoO it is implementation 730 dependent if bit 32 remains unchanged or is set to zero. 731 Choose the latter so that the function can also be used for 732 TRT. */ 733 address &= 0x7fffffff; 734 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address); 735 } 736 } 737 } 738 739 static inline uint64_t wrap_length32(CPUS390XState *env, uint64_t length) 740 { 741 if (!(env->psw.mask & PSW_MASK_64)) { 742 return (uint32_t)length; 743 } 744 return length; 745 } 746 747 static inline uint64_t wrap_length31(CPUS390XState *env, uint64_t length) 748 { 749 if (!(env->psw.mask & PSW_MASK_64)) { 750 /* 24-Bit and 31-Bit mode */ 751 length &= 0x7fffffff; 752 } 753 return length; 754 } 755 756 static inline uint64_t get_length(CPUS390XState *env, int reg) 757 { 758 return wrap_length31(env, env->regs[reg]); 759 } 760 761 static inline void set_length(CPUS390XState *env, int reg, uint64_t length) 762 { 763 if (env->psw.mask & PSW_MASK_64) { 764 /* 64-Bit mode */ 765 env->regs[reg] = length; 766 } else { 767 /* 24-Bit and 31-Bit mode */ 768 env->regs[reg] = deposit64(env->regs[reg], 0, 32, length); 769 } 770 } 771 772 /* search string (c is byte to search, r2 is string, r1 end of string) */ 773 void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2) 774 { 775 uintptr_t ra = GETPC(); 776 uint64_t end, str; 777 uint32_t len; 778 uint8_t v, c = env->regs[0]; 779 780 /* Bits 32-55 must contain all 0. */ 781 if (env->regs[0] & 0xffffff00u) { 782 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 783 } 784 785 str = get_address(env, r2); 786 end = get_address(env, r1); 787 788 /* Lest we fail to service interrupts in a timely manner, limit the 789 amount of work we're willing to do. For now, let's cap at 8k. */ 790 for (len = 0; len < 0x2000; ++len) { 791 if (str + len == end) { 792 /* Character not found. R1 & R2 are unmodified. */ 793 env->cc_op = 2; 794 return; 795 } 796 v = cpu_ldub_data_ra(env, str + len, ra); 797 if (v == c) { 798 /* Character found. Set R1 to the location; R2 is unmodified. */ 799 env->cc_op = 1; 800 set_address(env, r1, str + len); 801 return; 802 } 803 } 804 805 /* CPU-determined bytes processed. Advance R2 to next byte to process. */ 806 env->cc_op = 3; 807 set_address(env, r2, str + len); 808 } 809 810 void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2) 811 { 812 uintptr_t ra = GETPC(); 813 uint32_t len; 814 uint16_t v, c = env->regs[0]; 815 uint64_t end, str, adj_end; 816 817 /* Bits 32-47 of R0 must be zero. */ 818 if (env->regs[0] & 0xffff0000u) { 819 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 820 } 821 822 str = get_address(env, r2); 823 end = get_address(env, r1); 824 825 /* If the LSB of the two addresses differ, use one extra byte. */ 826 adj_end = end + ((str ^ end) & 1); 827 828 /* Lest we fail to service interrupts in a timely manner, limit the 829 amount of work we're willing to do. For now, let's cap at 8k. */ 830 for (len = 0; len < 0x2000; len += 2) { 831 if (str + len == adj_end) { 832 /* End of input found. */ 833 env->cc_op = 2; 834 return; 835 } 836 v = cpu_lduw_data_ra(env, str + len, ra); 837 if (v == c) { 838 /* Character found. Set R1 to the location; R2 is unmodified. */ 839 env->cc_op = 1; 840 set_address(env, r1, str + len); 841 return; 842 } 843 } 844 845 /* CPU-determined bytes processed. Advance R2 to next byte to process. */ 846 env->cc_op = 3; 847 set_address(env, r2, str + len); 848 } 849 850 /* unsigned string compare (c is string terminator) */ 851 Int128 HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2) 852 { 853 uintptr_t ra = GETPC(); 854 uint32_t len; 855 856 c = c & 0xff; 857 s1 = wrap_address(env, s1); 858 s2 = wrap_address(env, s2); 859 860 /* Lest we fail to service interrupts in a timely manner, limit the 861 amount of work we're willing to do. For now, let's cap at 8k. */ 862 for (len = 0; len < 0x2000; ++len) { 863 uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra); 864 uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra); 865 if (v1 == v2) { 866 if (v1 == c) { 867 /* Equal. CC=0, and don't advance the registers. */ 868 env->cc_op = 0; 869 return int128_make128(s2, s1); 870 } 871 } else { 872 /* Unequal. CC={1,2}, and advance the registers. Note that 873 the terminator need not be zero, but the string that contains 874 the terminator is by definition "low". */ 875 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2); 876 return int128_make128(s2 + len, s1 + len); 877 } 878 } 879 880 /* CPU-determined bytes equal; advance the registers. */ 881 env->cc_op = 3; 882 return int128_make128(s2 + len, s1 + len); 883 } 884 885 /* move page */ 886 uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint32_t r1, uint32_t r2) 887 { 888 const uint64_t src = get_address(env, r2) & TARGET_PAGE_MASK; 889 const uint64_t dst = get_address(env, r1) & TARGET_PAGE_MASK; 890 const int mmu_idx = s390x_env_mmu_index(env, false); 891 const bool f = extract64(r0, 11, 1); 892 const bool s = extract64(r0, 10, 1); 893 const bool cco = extract64(r0, 8, 1); 894 uintptr_t ra = GETPC(); 895 S390Access srca, desta; 896 int exc; 897 898 if ((f && s) || extract64(r0, 12, 4)) { 899 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); 900 } 901 902 /* 903 * We always manually handle exceptions such that we can properly store 904 * r1/r2 to the lowcore on page-translation exceptions. 905 * 906 * TODO: Access key handling 907 */ 908 exc = access_prepare_nf(&srca, env, true, src, TARGET_PAGE_SIZE, 909 MMU_DATA_LOAD, mmu_idx, ra); 910 if (exc) { 911 if (cco) { 912 return 2; 913 } 914 goto inject_exc; 915 } 916 exc = access_prepare_nf(&desta, env, true, dst, TARGET_PAGE_SIZE, 917 MMU_DATA_STORE, mmu_idx, ra); 918 if (exc) { 919 if (cco && exc != PGM_PROTECTION) { 920 return 1; 921 } 922 goto inject_exc; 923 } 924 access_memmove(env, &desta, &srca, ra); 925 return 0; /* data moved */ 926 inject_exc: 927 #if !defined(CONFIG_USER_ONLY) 928 if (exc != PGM_ADDRESSING) { 929 stq_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, trans_exc_code), 930 env->tlb_fill_tec); 931 } 932 if (exc == PGM_PAGE_TRANS) { 933 stb_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, op_access_id), 934 r1 << 4 | r2); 935 } 936 #endif 937 tcg_s390_program_interrupt(env, exc, ra); 938 } 939 940 /* string copy */ 941 uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2) 942 { 943 const int mmu_idx = s390x_env_mmu_index(env, false); 944 const uint64_t d = get_address(env, r1); 945 const uint64_t s = get_address(env, r2); 946 const uint8_t c = env->regs[0]; 947 const int len = MIN(-(d | TARGET_PAGE_MASK), -(s | TARGET_PAGE_MASK)); 948 S390Access srca, desta; 949 uintptr_t ra = GETPC(); 950 int i; 951 952 if (env->regs[0] & 0xffffff00ull) { 953 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 954 } 955 956 /* 957 * Our access should not exceed single pages, as we must not report access 958 * exceptions exceeding the actually copied range (which we don't know at 959 * this point). We might over-indicate watchpoints within the pages 960 * (if we ever care, we have to limit processing to a single byte). 961 */ 962 access_prepare(&srca, env, s, len, MMU_DATA_LOAD, mmu_idx, ra); 963 access_prepare(&desta, env, d, len, MMU_DATA_STORE, mmu_idx, ra); 964 for (i = 0; i < len; i++) { 965 const uint8_t v = access_get_byte(env, &srca, i, ra); 966 967 access_set_byte(env, &desta, i, v, ra); 968 if (v == c) { 969 set_address_zero(env, r1, d + i); 970 return 1; 971 } 972 } 973 set_address_zero(env, r1, d + len); 974 set_address_zero(env, r2, s + len); 975 return 3; 976 } 977 978 /* load access registers r1 to r3 from memory at a2 */ 979 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) 980 { 981 uintptr_t ra = GETPC(); 982 int i; 983 984 if (a2 & 0x3) { 985 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 986 } 987 988 for (i = r1;; i = (i + 1) % 16) { 989 env->aregs[i] = cpu_ldl_data_ra(env, a2, ra); 990 a2 += 4; 991 992 if (i == r3) { 993 break; 994 } 995 } 996 } 997 998 /* store access registers r1 to r3 in memory at a2 */ 999 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) 1000 { 1001 uintptr_t ra = GETPC(); 1002 int i; 1003 1004 if (a2 & 0x3) { 1005 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 1006 } 1007 1008 for (i = r1;; i = (i + 1) % 16) { 1009 cpu_stl_data_ra(env, a2, env->aregs[i], ra); 1010 a2 += 4; 1011 1012 if (i == r3) { 1013 break; 1014 } 1015 } 1016 } 1017 1018 /* move long helper */ 1019 static inline uint32_t do_mvcl(CPUS390XState *env, 1020 uint64_t *dest, uint64_t *destlen, 1021 uint64_t *src, uint64_t *srclen, 1022 uint16_t pad, int wordsize, uintptr_t ra) 1023 { 1024 const int mmu_idx = s390x_env_mmu_index(env, false); 1025 int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK)); 1026 S390Access srca, desta; 1027 int i, cc; 1028 1029 if (*destlen == *srclen) { 1030 cc = 0; 1031 } else if (*destlen < *srclen) { 1032 cc = 1; 1033 } else { 1034 cc = 2; 1035 } 1036 1037 if (!*destlen) { 1038 return cc; 1039 } 1040 1041 /* 1042 * Only perform one type of type of operation (move/pad) at a time. 1043 * Stay within single pages. 1044 */ 1045 if (*srclen) { 1046 /* Copy the src array */ 1047 len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len); 1048 *destlen -= len; 1049 *srclen -= len; 1050 access_prepare(&srca, env, *src, len, MMU_DATA_LOAD, mmu_idx, ra); 1051 access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); 1052 access_memmove(env, &desta, &srca, ra); 1053 *src = wrap_address(env, *src + len); 1054 *dest = wrap_address(env, *dest + len); 1055 } else if (wordsize == 1) { 1056 /* Pad the remaining area */ 1057 *destlen -= len; 1058 access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); 1059 access_memset(env, &desta, pad, ra); 1060 *dest = wrap_address(env, *dest + len); 1061 } else { 1062 access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); 1063 1064 /* The remaining length selects the padding byte. */ 1065 for (i = 0; i < len; (*destlen)--, i++) { 1066 if (*destlen & 1) { 1067 access_set_byte(env, &desta, i, pad, ra); 1068 } else { 1069 access_set_byte(env, &desta, i, pad >> 8, ra); 1070 } 1071 } 1072 *dest = wrap_address(env, *dest + len); 1073 } 1074 1075 return *destlen ? 3 : cc; 1076 } 1077 1078 /* move long */ 1079 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) 1080 { 1081 const int mmu_idx = s390x_env_mmu_index(env, false); 1082 uintptr_t ra = GETPC(); 1083 uint64_t destlen = env->regs[r1 + 1] & 0xffffff; 1084 uint64_t dest = get_address(env, r1); 1085 uint64_t srclen = env->regs[r2 + 1] & 0xffffff; 1086 uint64_t src = get_address(env, r2); 1087 uint8_t pad = env->regs[r2 + 1] >> 24; 1088 CPUState *cs = env_cpu(env); 1089 S390Access srca, desta; 1090 uint32_t cc, cur_len; 1091 1092 if (is_destructive_overlap(env, dest, src, MIN(srclen, destlen))) { 1093 cc = 3; 1094 } else if (srclen == destlen) { 1095 cc = 0; 1096 } else if (destlen < srclen) { 1097 cc = 1; 1098 } else { 1099 cc = 2; 1100 } 1101 1102 /* We might have to zero-out some bits even if there was no action. */ 1103 if (unlikely(!destlen || cc == 3)) { 1104 set_address_zero(env, r2, src); 1105 set_address_zero(env, r1, dest); 1106 return cc; 1107 } else if (!srclen) { 1108 set_address_zero(env, r2, src); 1109 } 1110 1111 /* 1112 * Only perform one type of type of operation (move/pad) in one step. 1113 * Stay within single pages. 1114 */ 1115 while (destlen) { 1116 cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK)); 1117 if (!srclen) { 1118 access_prepare(&desta, env, dest, cur_len, 1119 MMU_DATA_STORE, mmu_idx, ra); 1120 access_memset(env, &desta, pad, ra); 1121 } else { 1122 cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len); 1123 1124 access_prepare(&srca, env, src, cur_len, 1125 MMU_DATA_LOAD, mmu_idx, ra); 1126 access_prepare(&desta, env, dest, cur_len, 1127 MMU_DATA_STORE, mmu_idx, ra); 1128 access_memmove(env, &desta, &srca, ra); 1129 src = wrap_address(env, src + cur_len); 1130 srclen -= cur_len; 1131 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen); 1132 set_address_zero(env, r2, src); 1133 } 1134 dest = wrap_address(env, dest + cur_len); 1135 destlen -= cur_len; 1136 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen); 1137 set_address_zero(env, r1, dest); 1138 1139 /* 1140 * MVCL is interruptible. Return to the main loop if requested after 1141 * writing back all state to registers. If no interrupt will get 1142 * injected, we'll end up back in this handler and continue processing 1143 * the remaining parts. 1144 */ 1145 if (destlen && unlikely(cpu_loop_exit_requested(cs))) { 1146 cpu_loop_exit_restore(cs, ra); 1147 } 1148 } 1149 return cc; 1150 } 1151 1152 /* move long extended */ 1153 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, 1154 uint32_t r3) 1155 { 1156 uintptr_t ra = GETPC(); 1157 uint64_t destlen = get_length(env, r1 + 1); 1158 uint64_t dest = get_address(env, r1); 1159 uint64_t srclen = get_length(env, r3 + 1); 1160 uint64_t src = get_address(env, r3); 1161 uint8_t pad = a2; 1162 uint32_t cc; 1163 1164 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra); 1165 1166 set_length(env, r1 + 1, destlen); 1167 set_length(env, r3 + 1, srclen); 1168 set_address(env, r1, dest); 1169 set_address(env, r3, src); 1170 1171 return cc; 1172 } 1173 1174 /* move long unicode */ 1175 uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2, 1176 uint32_t r3) 1177 { 1178 uintptr_t ra = GETPC(); 1179 uint64_t destlen = get_length(env, r1 + 1); 1180 uint64_t dest = get_address(env, r1); 1181 uint64_t srclen = get_length(env, r3 + 1); 1182 uint64_t src = get_address(env, r3); 1183 uint16_t pad = a2; 1184 uint32_t cc; 1185 1186 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra); 1187 1188 set_length(env, r1 + 1, destlen); 1189 set_length(env, r3 + 1, srclen); 1190 set_address(env, r1, dest); 1191 set_address(env, r3, src); 1192 1193 return cc; 1194 } 1195 1196 /* compare logical long helper */ 1197 static inline uint32_t do_clcl(CPUS390XState *env, 1198 uint64_t *src1, uint64_t *src1len, 1199 uint64_t *src3, uint64_t *src3len, 1200 uint16_t pad, uint64_t limit, 1201 int wordsize, uintptr_t ra) 1202 { 1203 uint64_t len = MAX(*src1len, *src3len); 1204 uint32_t cc = 0; 1205 1206 check_alignment(env, *src1len | *src3len, wordsize, ra); 1207 1208 if (!len) { 1209 return cc; 1210 } 1211 1212 /* Lest we fail to service interrupts in a timely manner, limit the 1213 amount of work we're willing to do. */ 1214 if (len > limit) { 1215 len = limit; 1216 cc = 3; 1217 } 1218 1219 for (; len; len -= wordsize) { 1220 uint16_t v1 = pad; 1221 uint16_t v3 = pad; 1222 1223 if (*src1len) { 1224 v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra); 1225 } 1226 if (*src3len) { 1227 v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra); 1228 } 1229 1230 if (v1 != v3) { 1231 cc = (v1 < v3) ? 1 : 2; 1232 break; 1233 } 1234 1235 if (*src1len) { 1236 *src1 += wordsize; 1237 *src1len -= wordsize; 1238 } 1239 if (*src3len) { 1240 *src3 += wordsize; 1241 *src3len -= wordsize; 1242 } 1243 } 1244 1245 return cc; 1246 } 1247 1248 1249 /* compare logical long */ 1250 uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) 1251 { 1252 uintptr_t ra = GETPC(); 1253 uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24); 1254 uint64_t src1 = get_address(env, r1); 1255 uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24); 1256 uint64_t src3 = get_address(env, r2); 1257 uint8_t pad = env->regs[r2 + 1] >> 24; 1258 uint32_t cc; 1259 1260 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra); 1261 1262 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len); 1263 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len); 1264 set_address(env, r1, src1); 1265 set_address(env, r2, src3); 1266 1267 return cc; 1268 } 1269 1270 /* compare logical long extended memcompare insn with padding */ 1271 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, 1272 uint32_t r3) 1273 { 1274 uintptr_t ra = GETPC(); 1275 uint64_t src1len = get_length(env, r1 + 1); 1276 uint64_t src1 = get_address(env, r1); 1277 uint64_t src3len = get_length(env, r3 + 1); 1278 uint64_t src3 = get_address(env, r3); 1279 uint8_t pad = a2; 1280 uint32_t cc; 1281 1282 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra); 1283 1284 set_length(env, r1 + 1, src1len); 1285 set_length(env, r3 + 1, src3len); 1286 set_address(env, r1, src1); 1287 set_address(env, r3, src3); 1288 1289 return cc; 1290 } 1291 1292 /* compare logical long unicode memcompare insn with padding */ 1293 uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2, 1294 uint32_t r3) 1295 { 1296 uintptr_t ra = GETPC(); 1297 uint64_t src1len = get_length(env, r1 + 1); 1298 uint64_t src1 = get_address(env, r1); 1299 uint64_t src3len = get_length(env, r3 + 1); 1300 uint64_t src3 = get_address(env, r3); 1301 uint16_t pad = a2; 1302 uint32_t cc = 0; 1303 1304 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra); 1305 1306 set_length(env, r1 + 1, src1len); 1307 set_length(env, r3 + 1, src3len); 1308 set_address(env, r1, src1); 1309 set_address(env, r3, src3); 1310 1311 return cc; 1312 } 1313 1314 /* checksum */ 1315 Int128 HELPER(cksm)(CPUS390XState *env, uint64_t r1, 1316 uint64_t src, uint64_t src_len) 1317 { 1318 uintptr_t ra = GETPC(); 1319 uint64_t max_len, len; 1320 uint64_t cksm = (uint32_t)r1; 1321 1322 /* Lest we fail to service interrupts in a timely manner, limit the 1323 amount of work we're willing to do. For now, let's cap at 8k. */ 1324 max_len = (src_len > 0x2000 ? 0x2000 : src_len); 1325 1326 /* Process full words as available. */ 1327 for (len = 0; len + 4 <= max_len; len += 4, src += 4) { 1328 cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra); 1329 } 1330 1331 switch (max_len - len) { 1332 case 1: 1333 cksm += cpu_ldub_data_ra(env, src, ra) << 24; 1334 len += 1; 1335 break; 1336 case 2: 1337 cksm += cpu_lduw_data_ra(env, src, ra) << 16; 1338 len += 2; 1339 break; 1340 case 3: 1341 cksm += cpu_lduw_data_ra(env, src, ra) << 16; 1342 cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8; 1343 len += 3; 1344 break; 1345 } 1346 1347 /* Fold the carry from the checksum. Note that we can see carry-out 1348 during folding more than once (but probably not more than twice). */ 1349 while (cksm > 0xffffffffull) { 1350 cksm = (uint32_t)cksm + (cksm >> 32); 1351 } 1352 1353 /* Indicate whether or not we've processed everything. */ 1354 env->cc_op = (len == src_len ? 0 : 3); 1355 1356 /* Return both cksm and processed length. */ 1357 return int128_make128(cksm, len); 1358 } 1359 1360 void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src) 1361 { 1362 uintptr_t ra = GETPC(); 1363 int len_dest = len >> 4; 1364 int len_src = len & 0xf; 1365 uint8_t b; 1366 1367 dest += len_dest; 1368 src += len_src; 1369 1370 /* last byte is special, it only flips the nibbles */ 1371 b = cpu_ldub_data_ra(env, src, ra); 1372 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra); 1373 src--; 1374 len_src--; 1375 1376 /* now pack every value */ 1377 while (len_dest > 0) { 1378 b = 0; 1379 1380 if (len_src >= 0) { 1381 b = cpu_ldub_data_ra(env, src, ra) & 0x0f; 1382 src--; 1383 len_src--; 1384 } 1385 if (len_src >= 0) { 1386 b |= cpu_ldub_data_ra(env, src, ra) << 4; 1387 src--; 1388 len_src--; 1389 } 1390 1391 len_dest--; 1392 dest--; 1393 cpu_stb_data_ra(env, dest, b, ra); 1394 } 1395 } 1396 1397 static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src, 1398 uint32_t srclen, int ssize, uintptr_t ra) 1399 { 1400 int i; 1401 /* The destination operand is always 16 bytes long. */ 1402 const int destlen = 16; 1403 1404 /* The operands are processed from right to left. */ 1405 src += srclen - 1; 1406 dest += destlen - 1; 1407 1408 for (i = 0; i < destlen; i++) { 1409 uint8_t b = 0; 1410 1411 /* Start with a positive sign */ 1412 if (i == 0) { 1413 b = 0xc; 1414 } else if (srclen > ssize) { 1415 b = cpu_ldub_data_ra(env, src, ra) & 0x0f; 1416 src -= ssize; 1417 srclen -= ssize; 1418 } 1419 1420 if (srclen > ssize) { 1421 b |= cpu_ldub_data_ra(env, src, ra) << 4; 1422 src -= ssize; 1423 srclen -= ssize; 1424 } 1425 1426 cpu_stb_data_ra(env, dest, b, ra); 1427 dest--; 1428 } 1429 } 1430 1431 1432 void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src, 1433 uint32_t srclen) 1434 { 1435 do_pkau(env, dest, src, srclen, 1, GETPC()); 1436 } 1437 1438 void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src, 1439 uint32_t srclen) 1440 { 1441 do_pkau(env, dest, src, srclen, 2, GETPC()); 1442 } 1443 1444 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest, 1445 uint64_t src) 1446 { 1447 uintptr_t ra = GETPC(); 1448 int len_dest = len >> 4; 1449 int len_src = len & 0xf; 1450 uint8_t b; 1451 int second_nibble = 0; 1452 1453 dest += len_dest; 1454 src += len_src; 1455 1456 /* last byte is special, it only flips the nibbles */ 1457 b = cpu_ldub_data_ra(env, src, ra); 1458 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra); 1459 src--; 1460 len_src--; 1461 1462 /* now pad every nibble with 0xf0 */ 1463 1464 while (len_dest > 0) { 1465 uint8_t cur_byte = 0; 1466 1467 if (len_src > 0) { 1468 cur_byte = cpu_ldub_data_ra(env, src, ra); 1469 } 1470 1471 len_dest--; 1472 dest--; 1473 1474 /* only advance one nibble at a time */ 1475 if (second_nibble) { 1476 cur_byte >>= 4; 1477 len_src--; 1478 src--; 1479 } 1480 second_nibble = !second_nibble; 1481 1482 /* digit */ 1483 cur_byte = (cur_byte & 0xf); 1484 /* zone bits */ 1485 cur_byte |= 0xf0; 1486 1487 cpu_stb_data_ra(env, dest, cur_byte, ra); 1488 } 1489 } 1490 1491 static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest, 1492 uint32_t destlen, int dsize, uint64_t src, 1493 uintptr_t ra) 1494 { 1495 int i; 1496 uint32_t cc; 1497 uint8_t b; 1498 /* The source operand is always 16 bytes long. */ 1499 const int srclen = 16; 1500 1501 /* The operands are processed from right to left. */ 1502 src += srclen - 1; 1503 dest += destlen - dsize; 1504 1505 /* Check for the sign. */ 1506 b = cpu_ldub_data_ra(env, src, ra); 1507 src--; 1508 switch (b & 0xf) { 1509 case 0xa: 1510 case 0xc: 1511 case 0xe ... 0xf: 1512 cc = 0; /* plus */ 1513 break; 1514 case 0xb: 1515 case 0xd: 1516 cc = 1; /* minus */ 1517 break; 1518 default: 1519 case 0x0 ... 0x9: 1520 cc = 3; /* invalid */ 1521 break; 1522 } 1523 1524 /* Now pad every nibble with 0x30, advancing one nibble at a time. */ 1525 for (i = 0; i < destlen; i += dsize) { 1526 if (i == (31 * dsize)) { 1527 /* If length is 32/64 bytes, the leftmost byte is 0. */ 1528 b = 0; 1529 } else if (i % (2 * dsize)) { 1530 b = cpu_ldub_data_ra(env, src, ra); 1531 src--; 1532 } else { 1533 b >>= 4; 1534 } 1535 cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra); 1536 dest -= dsize; 1537 } 1538 1539 return cc; 1540 } 1541 1542 uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen, 1543 uint64_t src) 1544 { 1545 return do_unpkau(env, dest, destlen, 1, src, GETPC()); 1546 } 1547 1548 uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen, 1549 uint64_t src) 1550 { 1551 return do_unpkau(env, dest, destlen, 2, src, GETPC()); 1552 } 1553 1554 uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen) 1555 { 1556 uintptr_t ra = GETPC(); 1557 uint32_t cc = 0; 1558 int i; 1559 1560 for (i = 0; i < destlen; i++) { 1561 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra); 1562 /* digit */ 1563 cc |= (b & 0xf0) > 0x90 ? 2 : 0; 1564 1565 if (i == (destlen - 1)) { 1566 /* sign */ 1567 cc |= (b & 0xf) < 0xa ? 1 : 0; 1568 } else { 1569 /* digit */ 1570 cc |= (b & 0xf) > 0x9 ? 2 : 0; 1571 } 1572 } 1573 1574 return cc; 1575 } 1576 1577 static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array, 1578 uint64_t trans, uintptr_t ra) 1579 { 1580 uint32_t i; 1581 1582 for (i = 0; i <= len; i++) { 1583 uint8_t byte = cpu_ldub_data_ra(env, array + i, ra); 1584 uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra); 1585 cpu_stb_data_ra(env, array + i, new_byte, ra); 1586 } 1587 1588 return env->cc_op; 1589 } 1590 1591 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array, 1592 uint64_t trans) 1593 { 1594 do_helper_tr(env, len, array, trans, GETPC()); 1595 } 1596 1597 Int128 HELPER(tre)(CPUS390XState *env, uint64_t array, 1598 uint64_t len, uint64_t trans) 1599 { 1600 uintptr_t ra = GETPC(); 1601 uint8_t end = env->regs[0] & 0xff; 1602 uint64_t l = len; 1603 uint64_t i; 1604 uint32_t cc = 0; 1605 1606 if (!(env->psw.mask & PSW_MASK_64)) { 1607 array &= 0x7fffffff; 1608 l = (uint32_t)l; 1609 } 1610 1611 /* Lest we fail to service interrupts in a timely manner, limit the 1612 amount of work we're willing to do. For now, let's cap at 8k. */ 1613 if (l > 0x2000) { 1614 l = 0x2000; 1615 cc = 3; 1616 } 1617 1618 for (i = 0; i < l; i++) { 1619 uint8_t byte, new_byte; 1620 1621 byte = cpu_ldub_data_ra(env, array + i, ra); 1622 1623 if (byte == end) { 1624 cc = 1; 1625 break; 1626 } 1627 1628 new_byte = cpu_ldub_data_ra(env, trans + byte, ra); 1629 cpu_stb_data_ra(env, array + i, new_byte, ra); 1630 } 1631 1632 env->cc_op = cc; 1633 return int128_make128(len - i, array + i); 1634 } 1635 1636 static inline uint32_t do_helper_trt(CPUS390XState *env, int len, 1637 uint64_t array, uint64_t trans, 1638 int inc, uintptr_t ra) 1639 { 1640 int i; 1641 1642 for (i = 0; i <= len; i++) { 1643 uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra); 1644 uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra); 1645 1646 if (sbyte != 0) { 1647 set_address(env, 1, array + i * inc); 1648 env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte); 1649 return (i == len) ? 2 : 1; 1650 } 1651 } 1652 1653 return 0; 1654 } 1655 1656 static uint32_t do_helper_trt_fwd(CPUS390XState *env, uint32_t len, 1657 uint64_t array, uint64_t trans, 1658 uintptr_t ra) 1659 { 1660 return do_helper_trt(env, len, array, trans, 1, ra); 1661 } 1662 1663 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array, 1664 uint64_t trans) 1665 { 1666 return do_helper_trt(env, len, array, trans, 1, GETPC()); 1667 } 1668 1669 static uint32_t do_helper_trt_bkwd(CPUS390XState *env, uint32_t len, 1670 uint64_t array, uint64_t trans, 1671 uintptr_t ra) 1672 { 1673 return do_helper_trt(env, len, array, trans, -1, ra); 1674 } 1675 1676 uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array, 1677 uint64_t trans) 1678 { 1679 return do_helper_trt(env, len, array, trans, -1, GETPC()); 1680 } 1681 1682 /* Translate one/two to one/two */ 1683 uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2, 1684 uint32_t tst, uint32_t sizes) 1685 { 1686 uintptr_t ra = GETPC(); 1687 int dsize = (sizes & 1) ? 1 : 2; 1688 int ssize = (sizes & 2) ? 1 : 2; 1689 uint64_t tbl = get_address(env, 1); 1690 uint64_t dst = get_address(env, r1); 1691 uint64_t len = get_length(env, r1 + 1); 1692 uint64_t src = get_address(env, r2); 1693 uint32_t cc = 3; 1694 int i; 1695 1696 /* The lower address bits of TBL are ignored. For TROO, TROT, it's 1697 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either 1698 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */ 1699 if (ssize == 2 && !s390_has_feat(S390_FEAT_ETF2_ENH)) { 1700 tbl &= -4096; 1701 } else { 1702 tbl &= -8; 1703 } 1704 1705 check_alignment(env, len, ssize, ra); 1706 1707 /* Lest we fail to service interrupts in a timely manner, */ 1708 /* limit the amount of work we're willing to do. */ 1709 for (i = 0; i < 0x2000; i++) { 1710 uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra); 1711 uint64_t tble = tbl + (sval * dsize); 1712 uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra); 1713 if (dval == tst) { 1714 cc = 1; 1715 break; 1716 } 1717 cpu_stsize_data_ra(env, dst, dval, dsize, ra); 1718 1719 len -= ssize; 1720 src += ssize; 1721 dst += dsize; 1722 1723 if (len == 0) { 1724 cc = 0; 1725 break; 1726 } 1727 } 1728 1729 set_address(env, r1, dst); 1730 set_length(env, r1 + 1, len); 1731 set_address(env, r2, src); 1732 1733 return cc; 1734 } 1735 1736 static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, 1737 uint64_t a2, bool parallel) 1738 { 1739 uint32_t mem_idx = s390x_env_mmu_index(env, false); 1740 MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, mem_idx); 1741 MemOpIdx oi8 = make_memop_idx(MO_TE | MO_64, mem_idx); 1742 MemOpIdx oi4 = make_memop_idx(MO_TE | MO_32, mem_idx); 1743 MemOpIdx oi2 = make_memop_idx(MO_TE | MO_16, mem_idx); 1744 MemOpIdx oi1 = make_memop_idx(MO_8, mem_idx); 1745 uintptr_t ra = GETPC(); 1746 uint32_t fc = extract32(env->regs[0], 0, 8); 1747 uint32_t sc = extract32(env->regs[0], 8, 8); 1748 uint64_t pl = get_address(env, 1) & -16; 1749 uint64_t svh, svl; 1750 uint32_t cc; 1751 1752 /* Sanity check the function code and storage characteristic. */ 1753 if (fc > 1 || sc > 3) { 1754 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) { 1755 goto spec_exception; 1756 } 1757 if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) { 1758 goto spec_exception; 1759 } 1760 } 1761 1762 /* Sanity check the alignments. */ 1763 if (extract32(a1, 0, fc + 2) || extract32(a2, 0, sc)) { 1764 goto spec_exception; 1765 } 1766 1767 /* Sanity check writability of the store address. */ 1768 probe_write(env, a2, 1 << sc, mem_idx, ra); 1769 1770 /* 1771 * Note that the compare-and-swap is atomic, and the store is atomic, 1772 * but the complete operation is not. Therefore we do not need to 1773 * assert serial context in order to implement this. That said, 1774 * restart early if we can't support either operation that is supposed 1775 * to be atomic. 1776 */ 1777 if (parallel) { 1778 uint32_t max = 2; 1779 #ifdef CONFIG_ATOMIC64 1780 max = 3; 1781 #endif 1782 if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) || 1783 (HAVE_ATOMIC128_RW ? 0 : sc > max)) { 1784 cpu_loop_exit_atomic(env_cpu(env), ra); 1785 } 1786 } 1787 1788 /* 1789 * All loads happen before all stores. For simplicity, load the entire 1790 * store value area from the parameter list. 1791 */ 1792 svh = cpu_ldq_mmu(env, pl + 16, oi8, ra); 1793 svl = cpu_ldq_mmu(env, pl + 24, oi8, ra); 1794 1795 switch (fc) { 1796 case 0: 1797 { 1798 uint32_t nv = cpu_ldl_mmu(env, pl, oi4, ra); 1799 uint32_t cv = env->regs[r3]; 1800 uint32_t ov; 1801 1802 if (parallel) { 1803 ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi4, ra); 1804 } else { 1805 ov = cpu_ldl_mmu(env, a1, oi4, ra); 1806 cpu_stl_mmu(env, a1, (ov == cv ? nv : ov), oi4, ra); 1807 } 1808 cc = (ov != cv); 1809 env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov); 1810 } 1811 break; 1812 1813 case 1: 1814 { 1815 uint64_t nv = cpu_ldq_mmu(env, pl, oi8, ra); 1816 uint64_t cv = env->regs[r3]; 1817 uint64_t ov; 1818 1819 if (parallel) { 1820 #ifdef CONFIG_ATOMIC64 1821 ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi8, ra); 1822 #else 1823 /* Note that we asserted !parallel above. */ 1824 g_assert_not_reached(); 1825 #endif 1826 } else { 1827 ov = cpu_ldq_mmu(env, a1, oi8, ra); 1828 cpu_stq_mmu(env, a1, (ov == cv ? nv : ov), oi8, ra); 1829 } 1830 cc = (ov != cv); 1831 env->regs[r3] = ov; 1832 } 1833 break; 1834 1835 case 2: 1836 { 1837 Int128 nv = cpu_ld16_mmu(env, pl, oi16, ra); 1838 Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]); 1839 Int128 ov; 1840 1841 if (!parallel) { 1842 ov = cpu_ld16_mmu(env, a1, oi16, ra); 1843 cc = !int128_eq(ov, cv); 1844 if (cc) { 1845 nv = ov; 1846 } 1847 cpu_st16_mmu(env, a1, nv, oi16, ra); 1848 } else if (HAVE_CMPXCHG128) { 1849 ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi16, ra); 1850 cc = !int128_eq(ov, cv); 1851 } else { 1852 /* Note that we asserted !parallel above. */ 1853 g_assert_not_reached(); 1854 } 1855 1856 env->regs[r3 + 0] = int128_gethi(ov); 1857 env->regs[r3 + 1] = int128_getlo(ov); 1858 } 1859 break; 1860 1861 default: 1862 g_assert_not_reached(); 1863 } 1864 1865 /* Store only if the comparison succeeded. Note that above we use a pair 1866 of 64-bit big-endian loads, so for sc < 3 we must extract the value 1867 from the most-significant bits of svh. */ 1868 if (cc == 0) { 1869 switch (sc) { 1870 case 0: 1871 cpu_stb_mmu(env, a2, svh >> 56, oi1, ra); 1872 break; 1873 case 1: 1874 cpu_stw_mmu(env, a2, svh >> 48, oi2, ra); 1875 break; 1876 case 2: 1877 cpu_stl_mmu(env, a2, svh >> 32, oi4, ra); 1878 break; 1879 case 3: 1880 cpu_stq_mmu(env, a2, svh, oi8, ra); 1881 break; 1882 case 4: 1883 cpu_st16_mmu(env, a2, int128_make128(svl, svh), oi16, ra); 1884 break; 1885 default: 1886 g_assert_not_reached(); 1887 } 1888 } 1889 1890 return cc; 1891 1892 spec_exception: 1893 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 1894 } 1895 1896 uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2) 1897 { 1898 return do_csst(env, r3, a1, a2, false); 1899 } 1900 1901 uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1, 1902 uint64_t a2) 1903 { 1904 return do_csst(env, r3, a1, a2, true); 1905 } 1906 1907 #if !defined(CONFIG_USER_ONLY) 1908 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) 1909 { 1910 uintptr_t ra = GETPC(); 1911 bool PERchanged = false; 1912 uint64_t src = a2; 1913 uint32_t i; 1914 1915 if (src & 0x7) { 1916 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 1917 } 1918 1919 for (i = r1;; i = (i + 1) % 16) { 1920 uint64_t val = cpu_ldq_data_ra(env, src, ra); 1921 if (env->cregs[i] != val && i >= 9 && i <= 11) { 1922 PERchanged = true; 1923 } 1924 env->cregs[i] = val; 1925 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n", 1926 i, src, val); 1927 src += sizeof(uint64_t); 1928 1929 if (i == r3) { 1930 break; 1931 } 1932 } 1933 1934 if (PERchanged && env->psw.mask & PSW_MASK_PER) { 1935 s390_cpu_recompute_watchpoints(env_cpu(env)); 1936 } 1937 1938 tlb_flush(env_cpu(env)); 1939 } 1940 1941 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) 1942 { 1943 uintptr_t ra = GETPC(); 1944 bool PERchanged = false; 1945 uint64_t src = a2; 1946 uint32_t i; 1947 1948 if (src & 0x3) { 1949 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 1950 } 1951 1952 for (i = r1;; i = (i + 1) % 16) { 1953 uint32_t val = cpu_ldl_data_ra(env, src, ra); 1954 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) { 1955 PERchanged = true; 1956 } 1957 env->cregs[i] = deposit64(env->cregs[i], 0, 32, val); 1958 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val); 1959 src += sizeof(uint32_t); 1960 1961 if (i == r3) { 1962 break; 1963 } 1964 } 1965 1966 if (PERchanged && env->psw.mask & PSW_MASK_PER) { 1967 s390_cpu_recompute_watchpoints(env_cpu(env)); 1968 } 1969 1970 tlb_flush(env_cpu(env)); 1971 } 1972 1973 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) 1974 { 1975 uintptr_t ra = GETPC(); 1976 uint64_t dest = a2; 1977 uint32_t i; 1978 1979 if (dest & 0x7) { 1980 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 1981 } 1982 1983 for (i = r1;; i = (i + 1) % 16) { 1984 cpu_stq_data_ra(env, dest, env->cregs[i], ra); 1985 dest += sizeof(uint64_t); 1986 1987 if (i == r3) { 1988 break; 1989 } 1990 } 1991 } 1992 1993 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) 1994 { 1995 uintptr_t ra = GETPC(); 1996 uint64_t dest = a2; 1997 uint32_t i; 1998 1999 if (dest & 0x3) { 2000 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 2001 } 2002 2003 for (i = r1;; i = (i + 1) % 16) { 2004 cpu_stl_data_ra(env, dest, env->cregs[i], ra); 2005 dest += sizeof(uint32_t); 2006 2007 if (i == r3) { 2008 break; 2009 } 2010 } 2011 } 2012 2013 uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr) 2014 { 2015 uintptr_t ra = GETPC(); 2016 int i; 2017 2018 real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK; 2019 2020 for (i = 0; i < TARGET_PAGE_SIZE; i += 8) { 2021 cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra); 2022 } 2023 2024 return 0; 2025 } 2026 2027 uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2) 2028 { 2029 S390CPU *cpu = env_archcpu(env); 2030 CPUState *cs = env_cpu(env); 2031 2032 /* 2033 * TODO: we currently don't handle all access protection types 2034 * (including access-list and key-controlled) as well as AR mode. 2035 */ 2036 if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) { 2037 /* Fetching permitted; storing permitted */ 2038 return 0; 2039 } 2040 2041 if (env->int_pgm_code == PGM_PROTECTION) { 2042 /* retry if reading is possible */ 2043 cs->exception_index = -1; 2044 if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) { 2045 /* Fetching permitted; storing not permitted */ 2046 return 1; 2047 } 2048 } 2049 2050 switch (env->int_pgm_code) { 2051 case PGM_PROTECTION: 2052 /* Fetching not permitted; storing not permitted */ 2053 cs->exception_index = -1; 2054 return 2; 2055 case PGM_ADDRESSING: 2056 case PGM_TRANS_SPEC: 2057 /* exceptions forwarded to the guest */ 2058 s390_cpu_virt_mem_handle_exc(cpu, GETPC()); 2059 return 0; 2060 } 2061 2062 /* Translation not available */ 2063 cs->exception_index = -1; 2064 return 3; 2065 } 2066 2067 /* insert storage key extended */ 2068 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) 2069 { 2070 static S390SKeysState *ss; 2071 static S390SKeysClass *skeyclass; 2072 uint64_t addr = wrap_address(env, r2); 2073 uint8_t key; 2074 int rc; 2075 2076 addr = mmu_real2abs(env, addr); 2077 if (!mmu_absolute_addr_valid(addr, false)) { 2078 tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC()); 2079 } 2080 2081 if (unlikely(!ss)) { 2082 ss = s390_get_skeys_device(); 2083 skeyclass = S390_SKEYS_GET_CLASS(ss); 2084 if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) { 2085 tlb_flush_all_cpus_synced(env_cpu(env)); 2086 } 2087 } 2088 2089 rc = s390_skeys_get(ss, addr / TARGET_PAGE_SIZE, 1, &key); 2090 if (rc) { 2091 return 0; 2092 } 2093 return key; 2094 } 2095 2096 /* set storage key extended */ 2097 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) 2098 { 2099 static S390SKeysState *ss; 2100 static S390SKeysClass *skeyclass; 2101 uint64_t addr = wrap_address(env, r2); 2102 uint8_t key; 2103 2104 addr = mmu_real2abs(env, addr); 2105 if (!mmu_absolute_addr_valid(addr, false)) { 2106 tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC()); 2107 } 2108 2109 if (unlikely(!ss)) { 2110 ss = s390_get_skeys_device(); 2111 skeyclass = S390_SKEYS_GET_CLASS(ss); 2112 if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) { 2113 tlb_flush_all_cpus_synced(env_cpu(env)); 2114 } 2115 } 2116 2117 key = r1 & 0xfe; 2118 s390_skeys_set(ss, addr / TARGET_PAGE_SIZE, 1, &key); 2119 /* 2120 * As we can only flush by virtual address and not all the entries 2121 * that point to a physical address we have to flush the whole TLB. 2122 */ 2123 tlb_flush_all_cpus_synced(env_cpu(env)); 2124 } 2125 2126 /* reset reference bit extended */ 2127 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) 2128 { 2129 uint64_t addr = wrap_address(env, r2); 2130 static S390SKeysState *ss; 2131 static S390SKeysClass *skeyclass; 2132 uint8_t re, key; 2133 int rc; 2134 2135 addr = mmu_real2abs(env, addr); 2136 if (!mmu_absolute_addr_valid(addr, false)) { 2137 tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC()); 2138 } 2139 2140 if (unlikely(!ss)) { 2141 ss = s390_get_skeys_device(); 2142 skeyclass = S390_SKEYS_GET_CLASS(ss); 2143 if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) { 2144 tlb_flush_all_cpus_synced(env_cpu(env)); 2145 } 2146 } 2147 2148 rc = s390_skeys_get(ss, addr / TARGET_PAGE_SIZE, 1, &key); 2149 if (rc) { 2150 return 0; 2151 } 2152 2153 re = key & (SK_R | SK_C); 2154 key &= ~SK_R; 2155 2156 rc = s390_skeys_set(ss, addr / TARGET_PAGE_SIZE, 1, &key); 2157 if (rc) { 2158 return 0; 2159 } 2160 /* 2161 * As we can only flush by virtual address and not all the entries 2162 * that point to a physical address we have to flush the whole TLB. 2163 */ 2164 tlb_flush_all_cpus_synced(env_cpu(env)); 2165 2166 /* 2167 * cc 2168 * 2169 * 0 Reference bit zero; change bit zero 2170 * 1 Reference bit zero; change bit one 2171 * 2 Reference bit one; change bit zero 2172 * 3 Reference bit one; change bit one 2173 */ 2174 2175 return re >> 1; 2176 } 2177 2178 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2, 2179 uint64_t key) 2180 { 2181 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; 2182 S390Access srca, desta; 2183 uintptr_t ra = GETPC(); 2184 int cc = 0; 2185 2186 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", 2187 __func__, l, a1, a2); 2188 2189 if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) || 2190 psw_as == AS_HOME || psw_as == AS_ACCREG) { 2191 s390_program_interrupt(env, PGM_SPECIAL_OP, ra); 2192 } 2193 2194 if (!psw_key_valid(env, (key >> 4) & 0xf)) { 2195 s390_program_interrupt(env, PGM_PRIVILEGED, ra); 2196 } 2197 2198 l = wrap_length32(env, l); 2199 if (l > 256) { 2200 /* max 256 */ 2201 l = 256; 2202 cc = 3; 2203 } else if (!l) { 2204 return cc; 2205 } 2206 2207 access_prepare(&srca, env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra); 2208 access_prepare(&desta, env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra); 2209 access_memmove(env, &desta, &srca, ra); 2210 return cc; 2211 } 2212 2213 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2, 2214 uint64_t key) 2215 { 2216 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; 2217 S390Access srca, desta; 2218 uintptr_t ra = GETPC(); 2219 int cc = 0; 2220 2221 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", 2222 __func__, l, a1, a2); 2223 2224 if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) || 2225 psw_as == AS_HOME || psw_as == AS_ACCREG) { 2226 s390_program_interrupt(env, PGM_SPECIAL_OP, ra); 2227 } 2228 2229 if (!psw_key_valid(env, (key >> 4) & 0xf)) { 2230 s390_program_interrupt(env, PGM_PRIVILEGED, ra); 2231 } 2232 2233 l = wrap_length32(env, l); 2234 if (l > 256) { 2235 /* max 256 */ 2236 l = 256; 2237 cc = 3; 2238 } else if (!l) { 2239 return cc; 2240 } 2241 access_prepare(&srca, env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra); 2242 access_prepare(&desta, env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra); 2243 access_memmove(env, &desta, &srca, ra); 2244 return cc; 2245 } 2246 2247 void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4) 2248 { 2249 CPUState *cs = env_cpu(env); 2250 const uintptr_t ra = GETPC(); 2251 uint64_t table, entry, raddr; 2252 uint16_t entries, i, index = 0; 2253 2254 if (r2 & 0xff000) { 2255 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); 2256 } 2257 2258 if (!(r2 & 0x800)) { 2259 /* invalidation-and-clearing operation */ 2260 table = r1 & ASCE_ORIGIN; 2261 entries = (r2 & 0x7ff) + 1; 2262 2263 switch (r1 & ASCE_TYPE_MASK) { 2264 case ASCE_TYPE_REGION1: 2265 index = (r2 >> 53) & 0x7ff; 2266 break; 2267 case ASCE_TYPE_REGION2: 2268 index = (r2 >> 42) & 0x7ff; 2269 break; 2270 case ASCE_TYPE_REGION3: 2271 index = (r2 >> 31) & 0x7ff; 2272 break; 2273 case ASCE_TYPE_SEGMENT: 2274 index = (r2 >> 20) & 0x7ff; 2275 break; 2276 } 2277 for (i = 0; i < entries; i++) { 2278 /* addresses are not wrapped in 24/31bit mode but table index is */ 2279 raddr = table + ((index + i) & 0x7ff) * sizeof(entry); 2280 entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra); 2281 if (!(entry & REGION_ENTRY_I)) { 2282 /* we are allowed to not store if already invalid */ 2283 entry |= REGION_ENTRY_I; 2284 cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra); 2285 } 2286 } 2287 } 2288 2289 /* We simply flush the complete tlb, therefore we can ignore r3. */ 2290 if (m4 & 1) { 2291 tlb_flush(cs); 2292 } else { 2293 tlb_flush_all_cpus_synced(cs); 2294 } 2295 } 2296 2297 /* invalidate pte */ 2298 void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr, 2299 uint32_t m4) 2300 { 2301 CPUState *cs = env_cpu(env); 2302 const uintptr_t ra = GETPC(); 2303 uint64_t page = vaddr & TARGET_PAGE_MASK; 2304 uint64_t pte_addr, pte; 2305 2306 /* Compute the page table entry address */ 2307 pte_addr = (pto & SEGMENT_ENTRY_ORIGIN); 2308 pte_addr += VADDR_PAGE_TX(vaddr) * 8; 2309 2310 /* Mark the page table entry as invalid */ 2311 pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra); 2312 pte |= PAGE_ENTRY_I; 2313 cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra); 2314 2315 /* XXX we exploit the fact that Linux passes the exact virtual 2316 address here - it's not obliged to! */ 2317 if (m4 & 1) { 2318 if (vaddr & ~VADDR_PAGE_TX_MASK) { 2319 tlb_flush_page(cs, page); 2320 /* XXX 31-bit hack */ 2321 tlb_flush_page(cs, page ^ 0x80000000); 2322 } else { 2323 /* looks like we don't have a valid virtual address */ 2324 tlb_flush(cs); 2325 } 2326 } else { 2327 if (vaddr & ~VADDR_PAGE_TX_MASK) { 2328 tlb_flush_page_all_cpus_synced(cs, page); 2329 /* XXX 31-bit hack */ 2330 tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000); 2331 } else { 2332 /* looks like we don't have a valid virtual address */ 2333 tlb_flush_all_cpus_synced(cs); 2334 } 2335 } 2336 } 2337 2338 /* flush local tlb */ 2339 void HELPER(ptlb)(CPUS390XState *env) 2340 { 2341 tlb_flush(env_cpu(env)); 2342 } 2343 2344 /* flush global tlb */ 2345 void HELPER(purge)(CPUS390XState *env) 2346 { 2347 tlb_flush_all_cpus_synced(env_cpu(env)); 2348 } 2349 2350 /* load real address */ 2351 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t r1, uint64_t addr) 2352 { 2353 uint64_t asc = env->psw.mask & PSW_MASK_ASC; 2354 uint64_t ret, tec; 2355 int flags, exc, cc; 2356 2357 /* XXX incomplete - has more corner cases */ 2358 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) { 2359 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC()); 2360 } 2361 2362 exc = mmu_translate(env, addr, MMU_S390_LRA, asc, &ret, &flags, &tec); 2363 if (exc) { 2364 cc = 3; 2365 ret = (r1 & 0xFFFFFFFF00000000ULL) | exc | 0x80000000; 2366 } else { 2367 cc = 0; 2368 ret |= addr & ~TARGET_PAGE_MASK; 2369 } 2370 2371 env->cc_op = cc; 2372 return ret; 2373 } 2374 #endif 2375 2376 /* Execute instruction. This instruction executes an insn modified with 2377 the contents of r1. It does not change the executed instruction in memory; 2378 it does not change the program counter. 2379 2380 Perform this by recording the modified instruction in env->ex_value. 2381 This will be noticed by cpu_get_tb_cpu_state and thus tb translation. 2382 */ 2383 void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr) 2384 { 2385 uint64_t insn; 2386 uint8_t opc; 2387 2388 /* EXECUTE targets must be at even addresses. */ 2389 if (addr & 1) { 2390 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); 2391 } 2392 2393 insn = cpu_lduw_code(env, addr); 2394 opc = insn >> 8; 2395 2396 /* Or in the contents of R1[56:63]. */ 2397 insn |= r1 & 0xff; 2398 2399 /* Load the rest of the instruction. */ 2400 insn <<= 48; 2401 switch (get_ilen(opc)) { 2402 case 2: 2403 break; 2404 case 4: 2405 insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32; 2406 break; 2407 case 6: 2408 insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16; 2409 break; 2410 default: 2411 g_assert_not_reached(); 2412 } 2413 2414 /* The very most common cases can be sped up by avoiding a new TB. */ 2415 if ((opc & 0xf0) == 0xd0) { 2416 typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t, 2417 uint64_t, uintptr_t); 2418 static const dx_helper dx[16] = { 2419 [0x0] = do_helper_trt_bkwd, 2420 [0x2] = do_helper_mvc, 2421 [0x4] = do_helper_nc, 2422 [0x5] = do_helper_clc, 2423 [0x6] = do_helper_oc, 2424 [0x7] = do_helper_xc, 2425 [0xc] = do_helper_tr, 2426 [0xd] = do_helper_trt_fwd, 2427 }; 2428 dx_helper helper = dx[opc & 0xf]; 2429 2430 if (helper) { 2431 uint32_t l = extract64(insn, 48, 8); 2432 uint32_t b1 = extract64(insn, 44, 4); 2433 uint32_t d1 = extract64(insn, 32, 12); 2434 uint32_t b2 = extract64(insn, 28, 4); 2435 uint32_t d2 = extract64(insn, 16, 12); 2436 uint64_t a1 = wrap_address(env, (b1 ? env->regs[b1] : 0) + d1); 2437 uint64_t a2 = wrap_address(env, (b2 ? env->regs[b2] : 0) + d2); 2438 2439 env->cc_op = helper(env, l, a1, a2, 0); 2440 env->psw.addr += ilen; 2441 return; 2442 } 2443 } else if (opc == 0x0a) { 2444 env->int_svc_code = extract64(insn, 48, 8); 2445 env->int_svc_ilen = ilen; 2446 helper_exception(env, EXCP_SVC); 2447 g_assert_not_reached(); 2448 } 2449 2450 /* Record the insn we want to execute as well as the ilen to use 2451 during the execution of the target insn. This will also ensure 2452 that ex_value is non-zero, which flags that we are in a state 2453 that requires such execution. */ 2454 env->ex_value = insn | ilen; 2455 env->ex_target = addr; 2456 } 2457 2458 uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src, 2459 uint64_t len) 2460 { 2461 const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY; 2462 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; 2463 const uint64_t r0 = env->regs[0]; 2464 const uintptr_t ra = GETPC(); 2465 uint8_t dest_key, dest_as, dest_k, dest_a; 2466 uint8_t src_key, src_as, src_k, src_a; 2467 uint64_t val; 2468 int cc = 0; 2469 2470 HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n", 2471 __func__, dest, src, len); 2472 2473 if (!(env->psw.mask & PSW_MASK_DAT)) { 2474 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); 2475 } 2476 2477 /* OAC (operand access control) for the first operand -> dest */ 2478 val = (r0 & 0xffff0000ULL) >> 16; 2479 dest_key = (val >> 12) & 0xf; 2480 dest_as = (val >> 6) & 0x3; 2481 dest_k = (val >> 1) & 0x1; 2482 dest_a = val & 0x1; 2483 2484 /* OAC (operand access control) for the second operand -> src */ 2485 val = (r0 & 0x0000ffffULL); 2486 src_key = (val >> 12) & 0xf; 2487 src_as = (val >> 6) & 0x3; 2488 src_k = (val >> 1) & 0x1; 2489 src_a = val & 0x1; 2490 2491 if (!dest_k) { 2492 dest_key = psw_key; 2493 } 2494 if (!src_k) { 2495 src_key = psw_key; 2496 } 2497 if (!dest_a) { 2498 dest_as = psw_as; 2499 } 2500 if (!src_a) { 2501 src_as = psw_as; 2502 } 2503 2504 if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) { 2505 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); 2506 } 2507 if (!(env->cregs[0] & CR0_SECONDARY) && 2508 (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) { 2509 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); 2510 } 2511 if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) { 2512 tcg_s390_program_interrupt(env, PGM_PRIVILEGED, ra); 2513 } 2514 2515 len = wrap_length32(env, len); 2516 if (len > 4096) { 2517 cc = 3; 2518 len = 4096; 2519 } 2520 2521 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */ 2522 if (src_as == AS_ACCREG || dest_as == AS_ACCREG || 2523 (env->psw.mask & PSW_MASK_PSTATE)) { 2524 qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n", 2525 __func__); 2526 tcg_s390_program_interrupt(env, PGM_ADDRESSING, ra); 2527 } 2528 2529 /* FIXME: Access using correct keys and AR-mode */ 2530 if (len) { 2531 S390Access srca, desta; 2532 2533 access_prepare(&srca, env, src, len, MMU_DATA_LOAD, 2534 mmu_idx_from_as(src_as), ra); 2535 access_prepare(&desta, env, dest, len, MMU_DATA_STORE, 2536 mmu_idx_from_as(dest_as), ra); 2537 2538 access_memmove(env, &desta, &srca, ra); 2539 } 2540 2541 return cc; 2542 } 2543 2544 /* Decode a Unicode character. A return value < 0 indicates success, storing 2545 the UTF-32 result into OCHAR and the input length into OLEN. A return 2546 value >= 0 indicates failure, and the CC value to be returned. */ 2547 typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr, 2548 uint64_t ilen, bool enh_check, uintptr_t ra, 2549 uint32_t *ochar, uint32_t *olen); 2550 2551 /* Encode a Unicode character. A return value < 0 indicates success, storing 2552 the bytes into ADDR and the output length into OLEN. A return value >= 0 2553 indicates failure, and the CC value to be returned. */ 2554 typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr, 2555 uint64_t ilen, uintptr_t ra, uint32_t c, 2556 uint32_t *olen); 2557 2558 static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen, 2559 bool enh_check, uintptr_t ra, 2560 uint32_t *ochar, uint32_t *olen) 2561 { 2562 uint8_t s0, s1, s2, s3; 2563 uint32_t c, l; 2564 2565 if (ilen < 1) { 2566 return 0; 2567 } 2568 s0 = cpu_ldub_data_ra(env, addr, ra); 2569 if (s0 <= 0x7f) { 2570 /* one byte character */ 2571 l = 1; 2572 c = s0; 2573 } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) { 2574 /* invalid character */ 2575 return 2; 2576 } else if (s0 <= 0xdf) { 2577 /* two byte character */ 2578 l = 2; 2579 if (ilen < 2) { 2580 return 0; 2581 } 2582 s1 = cpu_ldub_data_ra(env, addr + 1, ra); 2583 c = s0 & 0x1f; 2584 c = (c << 6) | (s1 & 0x3f); 2585 if (enh_check && (s1 & 0xc0) != 0x80) { 2586 return 2; 2587 } 2588 } else if (s0 <= 0xef) { 2589 /* three byte character */ 2590 l = 3; 2591 if (ilen < 3) { 2592 return 0; 2593 } 2594 s1 = cpu_ldub_data_ra(env, addr + 1, ra); 2595 s2 = cpu_ldub_data_ra(env, addr + 2, ra); 2596 c = s0 & 0x0f; 2597 c = (c << 6) | (s1 & 0x3f); 2598 c = (c << 6) | (s2 & 0x3f); 2599 /* Fold the byte-by-byte range descriptions in the PoO into 2600 tests against the complete value. It disallows encodings 2601 that could be smaller, and the UTF-16 surrogates. */ 2602 if (enh_check 2603 && ((s1 & 0xc0) != 0x80 2604 || (s2 & 0xc0) != 0x80 2605 || c < 0x1000 2606 || (c >= 0xd800 && c <= 0xdfff))) { 2607 return 2; 2608 } 2609 } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) { 2610 /* four byte character */ 2611 l = 4; 2612 if (ilen < 4) { 2613 return 0; 2614 } 2615 s1 = cpu_ldub_data_ra(env, addr + 1, ra); 2616 s2 = cpu_ldub_data_ra(env, addr + 2, ra); 2617 s3 = cpu_ldub_data_ra(env, addr + 3, ra); 2618 c = s0 & 0x07; 2619 c = (c << 6) | (s1 & 0x3f); 2620 c = (c << 6) | (s2 & 0x3f); 2621 c = (c << 6) | (s3 & 0x3f); 2622 /* See above. */ 2623 if (enh_check 2624 && ((s1 & 0xc0) != 0x80 2625 || (s2 & 0xc0) != 0x80 2626 || (s3 & 0xc0) != 0x80 2627 || c < 0x010000 2628 || c > 0x10ffff)) { 2629 return 2; 2630 } 2631 } else { 2632 /* invalid character */ 2633 return 2; 2634 } 2635 2636 *ochar = c; 2637 *olen = l; 2638 return -1; 2639 } 2640 2641 static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen, 2642 bool enh_check, uintptr_t ra, 2643 uint32_t *ochar, uint32_t *olen) 2644 { 2645 uint16_t s0, s1; 2646 uint32_t c, l; 2647 2648 if (ilen < 2) { 2649 return 0; 2650 } 2651 s0 = cpu_lduw_data_ra(env, addr, ra); 2652 if ((s0 & 0xfc00) != 0xd800) { 2653 /* one word character */ 2654 l = 2; 2655 c = s0; 2656 } else { 2657 /* two word character */ 2658 l = 4; 2659 if (ilen < 4) { 2660 return 0; 2661 } 2662 s1 = cpu_lduw_data_ra(env, addr + 2, ra); 2663 c = extract32(s0, 6, 4) + 1; 2664 c = (c << 6) | (s0 & 0x3f); 2665 c = (c << 10) | (s1 & 0x3ff); 2666 if (enh_check && (s1 & 0xfc00) != 0xdc00) { 2667 /* invalid surrogate character */ 2668 return 2; 2669 } 2670 } 2671 2672 *ochar = c; 2673 *olen = l; 2674 return -1; 2675 } 2676 2677 static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen, 2678 bool enh_check, uintptr_t ra, 2679 uint32_t *ochar, uint32_t *olen) 2680 { 2681 uint32_t c; 2682 2683 if (ilen < 4) { 2684 return 0; 2685 } 2686 c = cpu_ldl_data_ra(env, addr, ra); 2687 if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) { 2688 /* invalid unicode character */ 2689 return 2; 2690 } 2691 2692 *ochar = c; 2693 *olen = 4; 2694 return -1; 2695 } 2696 2697 static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen, 2698 uintptr_t ra, uint32_t c, uint32_t *olen) 2699 { 2700 uint8_t d[4]; 2701 uint32_t l, i; 2702 2703 if (c <= 0x7f) { 2704 /* one byte character */ 2705 l = 1; 2706 d[0] = c; 2707 } else if (c <= 0x7ff) { 2708 /* two byte character */ 2709 l = 2; 2710 d[1] = 0x80 | extract32(c, 0, 6); 2711 d[0] = 0xc0 | extract32(c, 6, 5); 2712 } else if (c <= 0xffff) { 2713 /* three byte character */ 2714 l = 3; 2715 d[2] = 0x80 | extract32(c, 0, 6); 2716 d[1] = 0x80 | extract32(c, 6, 6); 2717 d[0] = 0xe0 | extract32(c, 12, 4); 2718 } else { 2719 /* four byte character */ 2720 l = 4; 2721 d[3] = 0x80 | extract32(c, 0, 6); 2722 d[2] = 0x80 | extract32(c, 6, 6); 2723 d[1] = 0x80 | extract32(c, 12, 6); 2724 d[0] = 0xf0 | extract32(c, 18, 3); 2725 } 2726 2727 if (ilen < l) { 2728 return 1; 2729 } 2730 for (i = 0; i < l; ++i) { 2731 cpu_stb_data_ra(env, addr + i, d[i], ra); 2732 } 2733 2734 *olen = l; 2735 return -1; 2736 } 2737 2738 static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen, 2739 uintptr_t ra, uint32_t c, uint32_t *olen) 2740 { 2741 uint16_t d0, d1; 2742 2743 if (c <= 0xffff) { 2744 /* one word character */ 2745 if (ilen < 2) { 2746 return 1; 2747 } 2748 cpu_stw_data_ra(env, addr, c, ra); 2749 *olen = 2; 2750 } else { 2751 /* two word character */ 2752 if (ilen < 4) { 2753 return 1; 2754 } 2755 d1 = 0xdc00 | extract32(c, 0, 10); 2756 d0 = 0xd800 | extract32(c, 10, 6); 2757 d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1); 2758 cpu_stw_data_ra(env, addr + 0, d0, ra); 2759 cpu_stw_data_ra(env, addr + 2, d1, ra); 2760 *olen = 4; 2761 } 2762 2763 return -1; 2764 } 2765 2766 static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen, 2767 uintptr_t ra, uint32_t c, uint32_t *olen) 2768 { 2769 if (ilen < 4) { 2770 return 1; 2771 } 2772 cpu_stl_data_ra(env, addr, c, ra); 2773 *olen = 4; 2774 return -1; 2775 } 2776 2777 static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1, 2778 uint32_t r2, uint32_t m3, uintptr_t ra, 2779 decode_unicode_fn decode, 2780 encode_unicode_fn encode) 2781 { 2782 uint64_t dst = get_address(env, r1); 2783 uint64_t dlen = get_length(env, r1 + 1); 2784 uint64_t src = get_address(env, r2); 2785 uint64_t slen = get_length(env, r2 + 1); 2786 bool enh_check = m3 & 1; 2787 int cc, i; 2788 2789 /* Lest we fail to service interrupts in a timely manner, limit the 2790 amount of work we're willing to do. For now, let's cap at 256. */ 2791 for (i = 0; i < 256; ++i) { 2792 uint32_t c, ilen, olen; 2793 2794 cc = decode(env, src, slen, enh_check, ra, &c, &ilen); 2795 if (unlikely(cc >= 0)) { 2796 break; 2797 } 2798 cc = encode(env, dst, dlen, ra, c, &olen); 2799 if (unlikely(cc >= 0)) { 2800 break; 2801 } 2802 2803 src += ilen; 2804 slen -= ilen; 2805 dst += olen; 2806 dlen -= olen; 2807 cc = 3; 2808 } 2809 2810 set_address(env, r1, dst); 2811 set_length(env, r1 + 1, dlen); 2812 set_address(env, r2, src); 2813 set_length(env, r2 + 1, slen); 2814 2815 return cc; 2816 } 2817 2818 uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) 2819 { 2820 return convert_unicode(env, r1, r2, m3, GETPC(), 2821 decode_utf8, encode_utf16); 2822 } 2823 2824 uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) 2825 { 2826 return convert_unicode(env, r1, r2, m3, GETPC(), 2827 decode_utf8, encode_utf32); 2828 } 2829 2830 uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) 2831 { 2832 return convert_unicode(env, r1, r2, m3, GETPC(), 2833 decode_utf16, encode_utf8); 2834 } 2835 2836 uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) 2837 { 2838 return convert_unicode(env, r1, r2, m3, GETPC(), 2839 decode_utf16, encode_utf32); 2840 } 2841 2842 uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) 2843 { 2844 return convert_unicode(env, r1, r2, m3, GETPC(), 2845 decode_utf32, encode_utf8); 2846 } 2847 2848 uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) 2849 { 2850 return convert_unicode(env, r1, r2, m3, GETPC(), 2851 decode_utf32, encode_utf16); 2852 } 2853 2854 void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, 2855 uintptr_t ra) 2856 { 2857 const int mmu_idx = s390x_env_mmu_index(env, false); 2858 2859 /* test the actual access, not just any access to the page due to LAP */ 2860 while (len) { 2861 const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); 2862 const uint64_t curlen = MIN(pagelen, len); 2863 2864 probe_write(env, addr, curlen, mmu_idx, ra); 2865 addr = wrap_address(env, addr + curlen); 2866 len -= curlen; 2867 } 2868 } 2869 2870 void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len) 2871 { 2872 probe_write_access(env, addr, len, GETPC()); 2873 } 2874