cputlb.c (97e03465f7dac073434373428388eb6e0998ecea) cputlb.c (7e0d9973ea665bf459b2dbd173d0e51bc6ca5216)
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either

--- 1468 unchanged lines hidden (view full) ---

1477 return false;
1478}
1479
1480/* Macro to call the above, with local variables from the use context. */
1481#define VICTIM_TLB_HIT(TY, ADDR) \
1482 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1483 (ADDR) & TARGET_PAGE_MASK)
1484
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either

--- 1468 unchanged lines hidden (view full) ---

1477 return false;
1478}
1479
1480/* Macro to call the above, with local variables from the use context. */
1481#define VICTIM_TLB_HIT(TY, ADDR) \
1482 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1483 (ADDR) & TARGET_PAGE_MASK)
1484
1485/*
1486 * Return a ram_addr_t for the virtual address for execution.
1487 *
1488 * Return -1 if we can't translate and execute from an entire page
1489 * of RAM. This will force us to execute by loading and translating
1490 * one insn at a time, without caching.
1491 *
1492 * NOTE: This function will trigger an exception if the page is
1493 * not executable.
1494 */
1495tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1496 void **hostp)
1497{
1498 uintptr_t mmu_idx = cpu_mmu_index(env, true);
1499 uintptr_t index = tlb_index(env, mmu_idx, addr);
1500 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1501 void *p;
1502
1503 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1504 if (!VICTIM_TLB_HIT(addr_code, addr)) {
1505 tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1506 index = tlb_index(env, mmu_idx, addr);
1507 entry = tlb_entry(env, mmu_idx, addr);
1508
1509 if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
1510 /*
1511 * The MMU protection covers a smaller range than a target
1512 * page, so we must redo the MMU check for every insn.
1513 */
1514 return -1;
1515 }
1516 }
1517 assert(tlb_hit(entry->addr_code, addr));
1518 }
1519
1520 if (unlikely(entry->addr_code & TLB_MMIO)) {
1521 /* The region is not backed by RAM. */
1522 if (hostp) {
1523 *hostp = NULL;
1524 }
1525 return -1;
1526 }
1527
1528 p = (void *)((uintptr_t)addr + entry->addend);
1529 if (hostp) {
1530 *hostp = p;
1531 }
1532 return qemu_ram_addr_from_host_nofail(p);
1533}
1534
1535static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1536 CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1537{
1538 ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1539
1540 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1541
1542 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {

--- 139 unchanged lines hidden (view full) ---

1682
1683 flags = probe_access_internal(env, addr, 0, access_type,
1684 mmu_idx, true, &host, 0);
1685
1686 /* No combination of flags are expected by the caller. */
1687 return flags ? NULL : host;
1688}
1689
1485static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1486 CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1487{
1488 ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1489
1490 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1491
1492 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {

--- 139 unchanged lines hidden (view full) ---

1632
1633 flags = probe_access_internal(env, addr, 0, access_type,
1634 mmu_idx, true, &host, 0);
1635
1636 /* No combination of flags are expected by the caller. */
1637 return flags ? NULL : host;
1638}
1639
1640/*
1641 * Return a ram_addr_t for the virtual address for execution.
1642 *
1643 * Return -1 if we can't translate and execute from an entire page
1644 * of RAM. This will force us to execute by loading and translating
1645 * one insn at a time, without caching.
1646 *
1647 * NOTE: This function will trigger an exception if the page is
1648 * not executable.
1649 */
1650tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1651 void **hostp)
1652{
1653 void *p;
1654
1655 (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
1656 cpu_mmu_index(env, true), false, &p, 0);
1657 if (p == NULL) {
1658 return -1;
1659 }
1660 if (hostp) {
1661 *hostp = p;
1662 }
1663 return qemu_ram_addr_from_host_nofail(p);
1664}
1665
1690#ifdef CONFIG_PLUGIN
1691/*
1692 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1693 * This should be a hot path as we will have just looked this path up
1694 * in the softmmu lookup code (or helper). We don't handle re-fills or
1695 * checking the victim table. This is purely informational.
1696 *
1697 * This almost never fails as the memory access being instrumented

--- 927 unchanged lines hidden ---
1666#ifdef CONFIG_PLUGIN
1667/*
1668 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1669 * This should be a hot path as we will have just looked this path up
1670 * in the softmmu lookup code (or helper). We don't handle re-fills or
1671 * checking the victim table. This is purely informational.
1672 *
1673 * This almost never fails as the memory access being instrumented

--- 927 unchanged lines hidden ---