cputlb.c (be5c4787e9a6eed12fd765d9e890f7cc6cd63220) cputlb.c (9bf825bf3df4ebae3af51566c8088e3f1249a910)
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either

--- 1186 unchanged lines hidden (view full) ---

1195 }
1196
1197 return hostaddr;
1198
1199 stop_the_world:
1200 cpu_loop_exit_atomic(env_cpu(env), retaddr);
1201}
1202
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either

--- 1186 unchanged lines hidden (view full) ---

1195 }
1196
1197 return hostaddr;
1198
1199 stop_the_world:
1200 cpu_loop_exit_atomic(env_cpu(env), retaddr);
1201}
1202
1203#ifdef TARGET_WORDS_BIGENDIAN
1204#define NEED_BE_BSWAP 0
1205#define NEED_LE_BSWAP 1
1206#else
1207#define NEED_BE_BSWAP 1
1208#define NEED_LE_BSWAP 0
1209#endif
1210
1211/*
1203/*
1212 * Byte Swap Helper
1213 *
1214 * This should all dead code away depending on the build host and
1215 * access type.
1216 */
1217
1218static inline uint64_t handle_bswap(uint64_t val, MemOp op)
1219{
1220 if ((memop_big_endian(op) && NEED_BE_BSWAP) ||
1221 (!memop_big_endian(op) && NEED_LE_BSWAP)) {
1222 switch (op & MO_SIZE) {
1223 case MO_8: return val;
1224 case MO_16: return bswap16(val);
1225 case MO_32: return bswap32(val);
1226 case MO_64: return bswap64(val);
1227 default:
1228 g_assert_not_reached();
1229 }
1230 } else {
1231 return val;
1232 }
1233}
1234
1235/*
1236 * Load Helpers
1237 *
1238 * We support two different access types. SOFTMMU_CODE_ACCESS is
1239 * specifically for reading instructions from system memory. It is
1240 * called by the translation loop and in some helpers where the code
1241 * is disassembled. It shouldn't be called directly by guest code.
1242 */
1243

--- 57 unchanged lines hidden (view full) ---

1301 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1302 tlb_addr &= ~TLB_RECHECK;
1303 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1304 /* RAM access */
1305 goto do_aligned_access;
1306 }
1307 }
1308
1204 * Load Helpers
1205 *
1206 * We support two different access types. SOFTMMU_CODE_ACCESS is
1207 * specifically for reading instructions from system memory. It is
1208 * called by the translation loop and in some helpers where the code
1209 * is disassembled. It shouldn't be called directly by guest code.
1210 */
1211

--- 57 unchanged lines hidden (view full) ---

1269 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1270 tlb_addr &= ~TLB_RECHECK;
1271 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1272 /* RAM access */
1273 goto do_aligned_access;
1274 }
1275 }
1276
1309 /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */
1310 res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
1311 mmu_idx, addr, retaddr, access_type, op);
1312 return handle_bswap(res, op);
1277 return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
1278 mmu_idx, addr, retaddr, access_type, op);
1313 }
1314
1315 /* Handle slow unaligned access (it spans two pages or IO). */
1316 if (size > 1
1317 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1318 >= TARGET_PAGE_SIZE)) {
1319 target_ulong addr1, addr2;
1320 uint64_t r1, r2;

--- 226 unchanged lines hidden (view full) ---

1547 tlb_addr = tlb_addr_write(entry);
1548 tlb_addr &= ~TLB_RECHECK;
1549 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1550 /* RAM access */
1551 goto do_aligned_access;
1552 }
1553 }
1554
1279 }
1280
1281 /* Handle slow unaligned access (it spans two pages or IO). */
1282 if (size > 1
1283 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1284 >= TARGET_PAGE_SIZE)) {
1285 target_ulong addr1, addr2;
1286 uint64_t r1, r2;

--- 226 unchanged lines hidden (view full) ---

1513 tlb_addr = tlb_addr_write(entry);
1514 tlb_addr &= ~TLB_RECHECK;
1515 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1516 /* RAM access */
1517 goto do_aligned_access;
1518 }
1519 }
1520
1555 /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */
1556 io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
1521 io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
1557 handle_bswap(val, op),
1558 addr, retaddr, op);
1522 val, addr, retaddr, op);
1559 return;
1560 }
1561
1562 /* Handle slow unaligned access (it spans two pages or IO). */
1563 if (size > 1
1564 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1565 >= TARGET_PAGE_SIZE)) {
1566 int i;

--- 247 unchanged lines hidden ---
1523 return;
1524 }
1525
1526 /* Handle slow unaligned access (it spans two pages or IO). */
1527 if (size > 1
1528 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1529 >= TARGET_PAGE_SIZE)) {
1530 int i;

--- 247 unchanged lines hidden ---