cputlb.c (eed5664238ea5317689cf32426d9318686b2b75c) cputlb.c (f1be36969de2fb9b6b64397db1098f115210fcd9)
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either

--- 842 unchanged lines hidden (view full) ---

851 if (ram_addr == RAM_ADDR_INVALID) {
852 error_report("Bad ram pointer %p", ptr);
853 abort();
854 }
855 return ram_addr;
856}
857
858static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either

--- 842 unchanged lines hidden (view full) ---

851 if (ram_addr == RAM_ADDR_INVALID) {
852 error_report("Bad ram pointer %p", ptr);
853 abort();
854 }
855 return ram_addr;
856}
857
858static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
859 int mmu_idx,
860 target_ulong addr, uintptr_t retaddr,
861 bool recheck, MMUAccessType access_type, int size)
859 int mmu_idx, target_ulong addr, uintptr_t retaddr,
860 MMUAccessType access_type, int size)
862{
863 CPUState *cpu = ENV_GET_CPU(env);
864 hwaddr mr_offset;
865 MemoryRegionSection *section;
866 MemoryRegion *mr;
867 uint64_t val;
868 bool locked = false;
869 MemTxResult r;
870
861{
862 CPUState *cpu = ENV_GET_CPU(env);
863 hwaddr mr_offset;
864 MemoryRegionSection *section;
865 MemoryRegion *mr;
866 uint64_t val;
867 bool locked = false;
868 MemTxResult r;
869
871 if (recheck) {
872 /*
873 * This is a TLB_RECHECK access, where the MMU protection
874 * covers a smaller range than a target page, and we must
875 * repeat the MMU check here. This tlb_fill() call might
876 * longjump out if this access should cause a guest exception.
877 */
878 CPUTLBEntry *entry;
879 target_ulong tlb_addr;
880
881 tlb_fill(cpu, addr, size, access_type, mmu_idx, retaddr);
882
883 entry = tlb_entry(env, mmu_idx, addr);
884 tlb_addr = (access_type == MMU_DATA_LOAD ?
885 entry->addr_read : entry->addr_code);
886 if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
887 /* RAM access */
888 uintptr_t haddr = addr + entry->addend;
889
890 return ldn_p((void *)haddr, size);
891 }
892 /* Fall through for handling IO accesses */
893 }
894
895 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
896 mr = section->mr;
897 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
898 cpu->mem_io_pc = retaddr;
899 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
900 cpu_io_recompile(cpu, retaddr);
901 }
902

--- 17 unchanged lines hidden (view full) ---

920 if (locked) {
921 qemu_mutex_unlock_iothread();
922 }
923
924 return val;
925}
926
927static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
870 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
871 mr = section->mr;
872 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
873 cpu->mem_io_pc = retaddr;
874 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
875 cpu_io_recompile(cpu, retaddr);
876 }
877

--- 17 unchanged lines hidden (view full) ---

895 if (locked) {
896 qemu_mutex_unlock_iothread();
897 }
898
899 return val;
900}
901
902static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
928 int mmu_idx,
929 uint64_t val, target_ulong addr,
930 uintptr_t retaddr, bool recheck, int size)
903 int mmu_idx, uint64_t val, target_ulong addr,
904 uintptr_t retaddr, int size)
931{
932 CPUState *cpu = ENV_GET_CPU(env);
933 hwaddr mr_offset;
934 MemoryRegionSection *section;
935 MemoryRegion *mr;
936 bool locked = false;
937 MemTxResult r;
938
905{
906 CPUState *cpu = ENV_GET_CPU(env);
907 hwaddr mr_offset;
908 MemoryRegionSection *section;
909 MemoryRegion *mr;
910 bool locked = false;
911 MemTxResult r;
912
939 if (recheck) {
940 /*
941 * This is a TLB_RECHECK access, where the MMU protection
942 * covers a smaller range than a target page, and we must
943 * repeat the MMU check here. This tlb_fill() call might
944 * longjump out if this access should cause a guest exception.
945 */
946 CPUTLBEntry *entry;
947 target_ulong tlb_addr;
948
949 tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
950
951 entry = tlb_entry(env, mmu_idx, addr);
952 tlb_addr = tlb_addr_write(entry);
953 if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
954 /* RAM access */
955 uintptr_t haddr = addr + entry->addend;
956
957 stn_p((void *)haddr, size, val);
958 return;
959 }
960 /* Fall through for handling IO accesses */
961 }
962
963 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
964 mr = section->mr;
965 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
966 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
967 cpu_io_recompile(cpu, retaddr);
968 }
969 cpu->mem_io_vaddr = addr;
970 cpu->mem_io_pc = retaddr;

--- 242 unchanged lines hidden (view full) ---

1213 bool code_read)
1214{
1215 uintptr_t mmu_idx = get_mmuidx(oi);
1216 uintptr_t index = tlb_index(env, mmu_idx, addr);
1217 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1218 target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1219 const size_t tlb_off = code_read ?
1220 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
913 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
914 mr = section->mr;
915 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
916 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
917 cpu_io_recompile(cpu, retaddr);
918 }
919 cpu->mem_io_vaddr = addr;
920 cpu->mem_io_pc = retaddr;

--- 242 unchanged lines hidden (view full) ---

1163 bool code_read)
1164{
1165 uintptr_t mmu_idx = get_mmuidx(oi);
1166 uintptr_t index = tlb_index(env, mmu_idx, addr);
1167 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1168 target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1169 const size_t tlb_off = code_read ?
1170 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1171 const MMUAccessType access_type =
1172 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1221 unsigned a_bits = get_alignment_bits(get_memop(oi));
1222 void *haddr;
1223 uint64_t res;
1224
1225 /* Handle CPU specific unaligned behaviour */
1226 if (addr & ((1 << a_bits) - 1)) {
1173 unsigned a_bits = get_alignment_bits(get_memop(oi));
1174 void *haddr;
1175 uint64_t res;
1176
1177 /* Handle CPU specific unaligned behaviour */
1178 if (addr & ((1 << a_bits) - 1)) {
1227 cpu_unaligned_access(ENV_GET_CPU(env), addr,
1228 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD,
1179 cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type,
1229 mmu_idx, retaddr);
1230 }
1231
1232 /* If the TLB entry is for a different page, reload and try again. */
1233 if (!tlb_hit(tlb_addr, addr)) {
1234 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1235 addr & TARGET_PAGE_MASK)) {
1236 tlb_fill(ENV_GET_CPU(env), addr, size,
1180 mmu_idx, retaddr);
1181 }
1182
1183 /* If the TLB entry is for a different page, reload and try again. */
1184 if (!tlb_hit(tlb_addr, addr)) {
1185 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1186 addr & TARGET_PAGE_MASK)) {
1187 tlb_fill(ENV_GET_CPU(env), addr, size,
1237 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD,
1238 mmu_idx, retaddr);
1188 access_type, mmu_idx, retaddr);
1239 index = tlb_index(env, mmu_idx, addr);
1240 entry = tlb_entry(env, mmu_idx, addr);
1241 }
1242 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1243 }
1244
1245 /* Handle an IO access. */
1246 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1189 index = tlb_index(env, mmu_idx, addr);
1190 entry = tlb_entry(env, mmu_idx, addr);
1191 }
1192 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1193 }
1194
1195 /* Handle an IO access. */
1196 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1247 CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
1248 uint64_t tmp;
1249
1250 if ((addr & (size - 1)) != 0) {
1251 goto do_unaligned_access;
1252 }
1253
1197 if ((addr & (size - 1)) != 0) {
1198 goto do_unaligned_access;
1199 }
1200
1254 tmp = io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
1255 tlb_addr & TLB_RECHECK,
1256 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD, size);
1257 return handle_bswap(tmp, size, big_endian);
1201 if (tlb_addr & TLB_RECHECK) {
1202 /*
1203 * This is a TLB_RECHECK access, where the MMU protection
1204 * covers a smaller range than a target page, and we must
1205 * repeat the MMU check here. This tlb_fill() call might
1206 * longjump out if this access should cause a guest exception.
1207 */
1208 tlb_fill(ENV_GET_CPU(env), addr, size,
1209 access_type, mmu_idx, retaddr);
1210 index = tlb_index(env, mmu_idx, addr);
1211 entry = tlb_entry(env, mmu_idx, addr);
1212
1213 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1214 tlb_addr &= ~TLB_RECHECK;
1215 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1216 /* RAM access */
1217 goto do_aligned_access;
1218 }
1219 }
1220
1221 res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
1222 retaddr, access_type, size);
1223 return handle_bswap(res, size, big_endian);
1258 }
1259
1260 /* Handle slow unaligned access (it spans two pages or IO). */
1261 if (size > 1
1262 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1263 >= TARGET_PAGE_SIZE)) {
1264 target_ulong addr1, addr2;
1265 tcg_target_ulong r1, r2;

--- 10 unchanged lines hidden (view full) ---

1276 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1277 } else {
1278 /* Little-endian combine. */
1279 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1280 }
1281 return res & MAKE_64BIT_MASK(0, size * 8);
1282 }
1283
1224 }
1225
1226 /* Handle slow unaligned access (it spans two pages or IO). */
1227 if (size > 1
1228 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1229 >= TARGET_PAGE_SIZE)) {
1230 target_ulong addr1, addr2;
1231 tcg_target_ulong r1, r2;

--- 10 unchanged lines hidden (view full) ---

1242 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1243 } else {
1244 /* Little-endian combine. */
1245 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1246 }
1247 return res & MAKE_64BIT_MASK(0, size * 8);
1248 }
1249
1250 do_aligned_access:
1284 haddr = (void *)((uintptr_t)addr + entry->addend);
1251 haddr = (void *)((uintptr_t)addr + entry->addend);
1285
1286 switch (size) {
1287 case 1:
1288 res = ldub_p(haddr);
1289 break;
1290 case 2:
1291 if (big_endian) {
1292 res = lduw_be_p(haddr);
1293 } else {

--- 147 unchanged lines hidden (view full) ---

1441 index = tlb_index(env, mmu_idx, addr);
1442 entry = tlb_entry(env, mmu_idx, addr);
1443 }
1444 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1445 }
1446
1447 /* Handle an IO access. */
1448 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1252 switch (size) {
1253 case 1:
1254 res = ldub_p(haddr);
1255 break;
1256 case 2:
1257 if (big_endian) {
1258 res = lduw_be_p(haddr);
1259 } else {

--- 147 unchanged lines hidden (view full) ---

1407 index = tlb_index(env, mmu_idx, addr);
1408 entry = tlb_entry(env, mmu_idx, addr);
1409 }
1410 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1411 }
1412
1413 /* Handle an IO access. */
1414 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1449 CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
1450
1451 if ((addr & (size - 1)) != 0) {
1452 goto do_unaligned_access;
1453 }
1454
1415 if ((addr & (size - 1)) != 0) {
1416 goto do_unaligned_access;
1417 }
1418
1455 io_writex(env, iotlbentry, mmu_idx,
1419 if (tlb_addr & TLB_RECHECK) {
1420 /*
1421 * This is a TLB_RECHECK access, where the MMU protection
1422 * covers a smaller range than a target page, and we must
1423 * repeat the MMU check here. This tlb_fill() call might
1424 * longjump out if this access should cause a guest exception.
1425 */
1426 tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1427 mmu_idx, retaddr);
1428 index = tlb_index(env, mmu_idx, addr);
1429 entry = tlb_entry(env, mmu_idx, addr);
1430
1431 tlb_addr = tlb_addr_write(entry);
1432 tlb_addr &= ~TLB_RECHECK;
1433 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1434 /* RAM access */
1435 goto do_aligned_access;
1436 }
1437 }
1438
1439 io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
1456 handle_bswap(val, size, big_endian),
1440 handle_bswap(val, size, big_endian),
1457 addr, retaddr, tlb_addr & TLB_RECHECK, size);
1441 addr, retaddr, size);
1458 return;
1459 }
1460
1461 /* Handle slow unaligned access (it spans two pages or IO). */
1462 if (size > 1
1463 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1464 >= TARGET_PAGE_SIZE)) {
1465 int i;

--- 31 unchanged lines hidden (view full) ---

1497 /* Little-endian extract. */
1498 val8 = val >> (i * 8);
1499 }
1500 store_helper(env, addr + i, val8, oi, retaddr, 1, big_endian);
1501 }
1502 return;
1503 }
1504
1442 return;
1443 }
1444
1445 /* Handle slow unaligned access (it spans two pages or IO). */
1446 if (size > 1
1447 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1448 >= TARGET_PAGE_SIZE)) {
1449 int i;

--- 31 unchanged lines hidden (view full) ---

1481 /* Little-endian extract. */
1482 val8 = val >> (i * 8);
1483 }
1484 store_helper(env, addr + i, val8, oi, retaddr, 1, big_endian);
1485 }
1486 return;
1487 }
1488
1489 do_aligned_access:
1505 haddr = (void *)((uintptr_t)addr + entry->addend);
1490 haddr = (void *)((uintptr_t)addr + entry->addend);
1506
1507 switch (size) {
1508 case 1:
1509 stb_p(haddr, val);
1510 break;
1511 case 2:
1512 if (big_endian) {
1513 stw_be_p(haddr, val);
1514 } else {

--- 179 unchanged lines hidden ---
1491 switch (size) {
1492 case 1:
1493 stb_p(haddr, val);
1494 break;
1495 case 2:
1496 if (big_endian) {
1497 stw_be_p(haddr, val);
1498 } else {

--- 179 unchanged lines hidden ---