Lines Matching +full:8 +full:- +full:cpu
2 * Common CPU TLB handling
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
28 #include "exec/tb-flush.h"
29 #include "exec/memory-internal.h"
31 #include "exec/mmu-access-type.h"
32 #include "exec/tlb-common.h"
35 #include "qemu/error-report.h"
37 #include "exec/helper-proto-common.h"
40 #include "exec/translate-all.h"
42 #include "tb-hash.h"
43 #include "internal-common.h"
44 #include "internal-target.h"
46 #include "qemu/plugin-memory.h"
48 #include "tcg/tcg-ldst.h"
49 #include "tcg/oversized-guest.h"
76 #define assert_cpu_is_self(cpu) do { \ argument
78 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
90 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
94 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; in tlb_n_entries()
99 return fast->mask + (1 << CPU_TLB_ENTRY_BITS); in sizeof_tlb()
115 const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type]; in tlb_read_idx()
119 const uint64_t *ptr = &entry->addr_idx[access_type]; in tlb_read_idx()
135 static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx, in tlb_index() argument
138 uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; in tlb_index()
144 static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx, in tlb_entry() argument
147 return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)]; in tlb_entry()
153 desc->window_begin_ns = ns; in tlb_window_reset()
154 desc->window_max_entries = max_entries; in tlb_window_reset()
157 static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr) in tb_jmp_cache_clear_page() argument
159 CPUJumpCache *jc = cpu->tb_jmp_cache; in tb_jmp_cache_clear_page()
168 qatomic_set(&jc->array[i0 + i].tb, NULL); in tb_jmp_cache_clear_page()
173 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
186 * In general, a memory-hungry process can benefit greatly from an appropriately
193 * To achieve near-optimal performance for all kinds of workloads, we:
197 * memory-hungry process will execute again, and its memory hungriness will
206 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
207 * since in that range performance is likely near-optimal. Recall that the TLB
220 bool window_expired = now > desc->window_begin_ns + window_len_ns; in tlb_mmu_resize_locked()
222 if (desc->n_used_entries > desc->window_max_entries) { in tlb_mmu_resize_locked()
223 desc->window_max_entries = desc->n_used_entries; in tlb_mmu_resize_locked()
225 rate = desc->window_max_entries * 100 / old_size; in tlb_mmu_resize_locked()
230 size_t ceil = pow2ceil(desc->window_max_entries); in tlb_mmu_resize_locked()
231 size_t expected_rate = desc->window_max_entries * 100 / ceil; in tlb_mmu_resize_locked()
240 * expect to get is 35%, which is still in the 30-70% range where in tlb_mmu_resize_locked()
251 tlb_window_reset(desc, now, desc->n_used_entries); in tlb_mmu_resize_locked()
256 g_free(fast->table); in tlb_mmu_resize_locked()
257 g_free(desc->fulltlb); in tlb_mmu_resize_locked()
260 /* desc->n_used_entries is cleared by the caller */ in tlb_mmu_resize_locked()
261 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; in tlb_mmu_resize_locked()
262 fast->table = g_try_new(CPUTLBEntry, new_size); in tlb_mmu_resize_locked()
263 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); in tlb_mmu_resize_locked()
272 while (fast->table == NULL || desc->fulltlb == NULL) { in tlb_mmu_resize_locked()
278 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; in tlb_mmu_resize_locked()
280 g_free(fast->table); in tlb_mmu_resize_locked()
281 g_free(desc->fulltlb); in tlb_mmu_resize_locked()
282 fast->table = g_try_new(CPUTLBEntry, new_size); in tlb_mmu_resize_locked()
283 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); in tlb_mmu_resize_locked()
289 desc->n_used_entries = 0; in tlb_mmu_flush_locked()
290 desc->large_page_addr = -1; in tlb_mmu_flush_locked()
291 desc->large_page_mask = -1; in tlb_mmu_flush_locked()
292 desc->vindex = 0; in tlb_mmu_flush_locked()
293 memset(fast->table, -1, sizeof_tlb(fast)); in tlb_mmu_flush_locked()
294 memset(desc->vtable, -1, sizeof(desc->vtable)); in tlb_mmu_flush_locked()
297 static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx, in tlb_flush_one_mmuidx_locked() argument
300 CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; in tlb_flush_one_mmuidx_locked()
301 CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; in tlb_flush_one_mmuidx_locked()
312 desc->n_used_entries = 0; in tlb_mmu_init()
313 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; in tlb_mmu_init()
314 fast->table = g_new(CPUTLBEntry, n_entries); in tlb_mmu_init()
315 desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); in tlb_mmu_init()
319 static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx) in tlb_n_used_entries_inc() argument
321 cpu->neg.tlb.d[mmu_idx].n_used_entries++; in tlb_n_used_entries_inc()
324 static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx) in tlb_n_used_entries_dec() argument
326 cpu->neg.tlb.d[mmu_idx].n_used_entries--; in tlb_n_used_entries_dec()
329 void tlb_init(CPUState *cpu) in tlb_init() argument
334 qemu_spin_init(&cpu->neg.tlb.c.lock); in tlb_init()
337 cpu->neg.tlb.c.dirty = 0; in tlb_init()
340 tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now); in tlb_init()
344 void tlb_destroy(CPUState *cpu) in tlb_destroy() argument
348 qemu_spin_destroy(&cpu->neg.tlb.c.lock); in tlb_destroy()
350 CPUTLBDesc *desc = &cpu->neg.tlb.d[i]; in tlb_destroy()
351 CPUTLBDescFast *fast = &cpu->neg.tlb.f[i]; in tlb_destroy()
353 g_free(fast->table); in tlb_destroy()
354 g_free(desc->fulltlb); in tlb_destroy()
360 * If the wait flag is set then the src cpu's helper will be queued as
368 CPUState *cpu; in flush_all_helper() local
370 CPU_FOREACH(cpu) { in flush_all_helper()
371 if (cpu != src) { in flush_all_helper()
372 async_run_on_cpu(cpu, fn, d); in flush_all_helper()
377 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) in tlb_flush_by_mmuidx_async_work() argument
383 assert_cpu_is_self(cpu); in tlb_flush_by_mmuidx_async_work()
387 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_flush_by_mmuidx_async_work()
389 all_dirty = cpu->neg.tlb.c.dirty; in tlb_flush_by_mmuidx_async_work()
392 cpu->neg.tlb.c.dirty = all_dirty; in tlb_flush_by_mmuidx_async_work()
394 for (work = to_clean; work != 0; work &= work - 1) { in tlb_flush_by_mmuidx_async_work()
396 tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now); in tlb_flush_by_mmuidx_async_work()
399 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_flush_by_mmuidx_async_work()
401 tcg_flush_jmp_cache(cpu); in tlb_flush_by_mmuidx_async_work()
404 qatomic_set(&cpu->neg.tlb.c.full_flush_count, in tlb_flush_by_mmuidx_async_work()
405 cpu->neg.tlb.c.full_flush_count + 1); in tlb_flush_by_mmuidx_async_work()
407 qatomic_set(&cpu->neg.tlb.c.part_flush_count, in tlb_flush_by_mmuidx_async_work()
408 cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean)); in tlb_flush_by_mmuidx_async_work()
410 qatomic_set(&cpu->neg.tlb.c.elide_flush_count, in tlb_flush_by_mmuidx_async_work()
411 cpu->neg.tlb.c.elide_flush_count + in tlb_flush_by_mmuidx_async_work()
417 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) in tlb_flush_by_mmuidx() argument
421 assert_cpu_is_self(cpu); in tlb_flush_by_mmuidx()
423 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); in tlb_flush_by_mmuidx()
426 void tlb_flush(CPUState *cpu) in tlb_flush() argument
428 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); in tlb_flush()
452 return (page == (tlb_entry->addr_read & mask) || in tlb_hit_page_mask_anyprot()
454 page == (tlb_entry->addr_code & mask)); in tlb_hit_page_mask_anyprot()
459 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); in tlb_hit_page_anyprot()
463 * tlb_entry_is_empty - return true if the entry is not in use
468 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; in tlb_entry_is_empty()
477 memset(tlb_entry, -1, sizeof(*tlb_entry)); in tlb_flush_entry_mask_locked()
485 return tlb_flush_entry_mask_locked(tlb_entry, page, -1); in tlb_flush_entry_locked()
489 static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx, in tlb_flush_vtlb_page_mask_locked() argument
493 CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx]; in tlb_flush_vtlb_page_mask_locked()
496 assert_cpu_is_self(cpu); in tlb_flush_vtlb_page_mask_locked()
498 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { in tlb_flush_vtlb_page_mask_locked()
499 tlb_n_used_entries_dec(cpu, mmu_idx); in tlb_flush_vtlb_page_mask_locked()
504 static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx, in tlb_flush_vtlb_page_locked() argument
507 tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1); in tlb_flush_vtlb_page_locked()
510 static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page) in tlb_flush_page_locked() argument
512 vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr; in tlb_flush_page_locked()
513 vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask; in tlb_flush_page_locked()
520 tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); in tlb_flush_page_locked()
522 if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) { in tlb_flush_page_locked()
523 tlb_n_used_entries_dec(cpu, midx); in tlb_flush_page_locked()
525 tlb_flush_vtlb_page_locked(cpu, midx, page); in tlb_flush_page_locked()
531 * @cpu: cpu on which to flush
536 * at @addr from the tlbs indicated by @idxmap from @cpu.
538 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, in tlb_flush_page_by_mmuidx_async_0() argument
544 assert_cpu_is_self(cpu); in tlb_flush_page_by_mmuidx_async_0()
548 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_flush_page_by_mmuidx_async_0()
551 tlb_flush_page_locked(cpu, mmu_idx, addr); in tlb_flush_page_by_mmuidx_async_0()
554 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_flush_page_by_mmuidx_async_0()
560 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); in tlb_flush_page_by_mmuidx_async_0()
561 tb_jmp_cache_clear_page(cpu, addr); in tlb_flush_page_by_mmuidx_async_0()
566 * @cpu: cpu on which to flush
574 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, in tlb_flush_page_by_mmuidx_async_1() argument
581 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); in tlb_flush_page_by_mmuidx_async_1()
591 * @cpu: cpu on which to flush
599 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, in tlb_flush_page_by_mmuidx_async_2() argument
604 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); in tlb_flush_page_by_mmuidx_async_2()
608 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap) in tlb_flush_page_by_mmuidx() argument
612 assert_cpu_is_self(cpu); in tlb_flush_page_by_mmuidx()
617 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); in tlb_flush_page_by_mmuidx()
620 void tlb_flush_page(CPUState *cpu, vaddr addr) in tlb_flush_page() argument
622 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); in tlb_flush_page()
647 /* Allocate a separate data block for each destination cpu. */ in tlb_flush_page_by_mmuidx_all_cpus_synced()
651 d->addr = addr; in tlb_flush_page_by_mmuidx_all_cpus_synced()
652 d->idxmap = idxmap; in tlb_flush_page_by_mmuidx_all_cpus_synced()
659 d->addr = addr; in tlb_flush_page_by_mmuidx_all_cpus_synced()
660 d->idxmap = idxmap; in tlb_flush_page_by_mmuidx_all_cpus_synced()
671 static void tlb_flush_range_locked(CPUState *cpu, int midx, in tlb_flush_range_locked() argument
675 CPUTLBDesc *d = &cpu->neg.tlb.d[midx]; in tlb_flush_range_locked()
676 CPUTLBDescFast *f = &cpu->neg.tlb.f[midx]; in tlb_flush_range_locked()
689 if (mask < f->mask || len > f->mask) { in tlb_flush_range_locked()
693 tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); in tlb_flush_range_locked()
702 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { in tlb_flush_range_locked()
705 midx, d->large_page_addr, d->large_page_mask); in tlb_flush_range_locked()
706 tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); in tlb_flush_range_locked()
712 CPUTLBEntry *entry = tlb_entry(cpu, midx, page); in tlb_flush_range_locked()
715 tlb_n_used_entries_dec(cpu, midx); in tlb_flush_range_locked()
717 tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask); in tlb_flush_range_locked()
728 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, in tlb_flush_range_by_mmuidx_async_0() argument
733 assert_cpu_is_self(cpu); in tlb_flush_range_by_mmuidx_async_0()
738 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_flush_range_by_mmuidx_async_0()
741 tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits); in tlb_flush_range_by_mmuidx_async_0()
744 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_flush_range_by_mmuidx_async_0()
751 tcg_flush_jmp_cache(cpu); in tlb_flush_range_by_mmuidx_async_0()
759 d.addr -= TARGET_PAGE_SIZE; in tlb_flush_range_by_mmuidx_async_0()
761 tb_jmp_cache_clear_page(cpu, d.addr); in tlb_flush_range_by_mmuidx_async_0()
766 static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, in tlb_flush_range_by_mmuidx_async_1() argument
770 tlb_flush_range_by_mmuidx_async_0(cpu, *d); in tlb_flush_range_by_mmuidx_async_1()
774 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, in tlb_flush_range_by_mmuidx() argument
780 assert_cpu_is_self(cpu); in tlb_flush_range_by_mmuidx()
787 tlb_flush_page_by_mmuidx(cpu, addr, idxmap); in tlb_flush_range_by_mmuidx()
792 tlb_flush_by_mmuidx(cpu, idxmap); in tlb_flush_range_by_mmuidx()
802 tlb_flush_range_by_mmuidx_async_0(cpu, d); in tlb_flush_range_by_mmuidx()
805 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, in tlb_flush_page_bits_by_mmuidx() argument
808 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); in tlb_flush_page_bits_by_mmuidx()
840 /* Allocate a separate data block for each destination cpu. */ in tlb_flush_range_by_mmuidx_all_cpus_synced()
891 * te->addr_write with qatomic_set. We don't need to worry about this for
899 uintptr_t addr = tlb_entry->addr_write; in tlb_reset_dirty_range_locked()
904 addr += tlb_entry->addend; in tlb_reset_dirty_range_locked()
905 if ((addr - start) < length) { in tlb_reset_dirty_range_locked()
907 uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write; in tlb_reset_dirty_range_locked()
911 tlb_entry->addr_write |= TLB_NOTDIRTY; in tlb_reset_dirty_range_locked()
913 qatomic_set(&tlb_entry->addr_write, in tlb_reset_dirty_range_locked()
914 tlb_entry->addr_write | TLB_NOTDIRTY); in tlb_reset_dirty_range_locked()
932 * thing actually updated is the target TLB entry ->addr_write flags.
934 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) in tlb_reset_dirty() argument
938 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_reset_dirty()
941 unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]); in tlb_reset_dirty()
944 tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i], in tlb_reset_dirty()
949 tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i], in tlb_reset_dirty()
953 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_reset_dirty()
960 if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) { in tlb_set_dirty1_locked()
961 tlb_entry->addr_write = addr; in tlb_set_dirty1_locked()
967 static void tlb_set_dirty(CPUState *cpu, vaddr addr) in tlb_set_dirty() argument
971 assert_cpu_is_self(cpu); in tlb_set_dirty()
974 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_set_dirty()
976 tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr); in tlb_set_dirty()
982 tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr); in tlb_set_dirty()
985 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_set_dirty()
990 static void tlb_add_large_page(CPUState *cpu, int mmu_idx, in tlb_add_large_page() argument
993 vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr; in tlb_add_large_page()
994 vaddr lp_mask = ~(size - 1); in tlb_add_large_page()
996 if (lp_addr == (vaddr)-1) { in tlb_add_large_page()
1003 lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask; in tlb_add_large_page()
1008 cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask; in tlb_add_large_page()
1009 cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask; in tlb_add_large_page()
1023 address = -1; in tlb_set_compare()
1026 ent->addr_idx[access_type] = address; in tlb_set_compare()
1027 full->slow_flags[access_type] = flags; in tlb_set_compare()
1035 * Called from TCG-generated code, which is under an RCU read-side
1038 void tlb_set_page_full(CPUState *cpu, int mmu_idx, in tlb_set_page_full() argument
1041 CPUTLB *tlb = &cpu->neg.tlb; in tlb_set_page_full()
1042 CPUTLBDesc *desc = &tlb->d[mmu_idx]; in tlb_set_page_full()
1052 assert_cpu_is_self(cpu); in tlb_set_page_full()
1054 if (full->lg_page_size <= TARGET_PAGE_BITS) { in tlb_set_page_full()
1057 sz = (hwaddr)1 << full->lg_page_size; in tlb_set_page_full()
1058 tlb_add_large_page(cpu, mmu_idx, addr, sz); in tlb_set_page_full()
1061 paddr_page = full->phys_addr & TARGET_PAGE_MASK; in tlb_set_page_full()
1063 prot = full->prot; in tlb_set_page_full()
1064 asidx = cpu_asidx_from_attrs(cpu, full->attrs); in tlb_set_page_full()
1065 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, in tlb_set_page_full()
1066 &xlat, &sz, full->attrs, &prot); in tlb_set_page_full()
1071 addr, full->phys_addr, prot, mmu_idx); in tlb_set_page_full()
1073 read_flags = full->tlb_fill_flags; in tlb_set_page_full()
1074 if (full->lg_page_size < TARGET_PAGE_BITS) { in tlb_set_page_full()
1079 is_ram = memory_region_is_ram(section->mr); in tlb_set_page_full()
1080 is_romd = memory_region_is_romd(section->mr); in tlb_set_page_full()
1084 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; in tlb_set_page_full()
1092 iotlb = memory_region_get_ram_addr(section->mr) + xlat; in tlb_set_page_full()
1099 if (section->readonly) { in tlb_set_page_full()
1107 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; in tlb_set_page_full()
1119 wp_flags = cpu_watchpoint_address_matches(cpu, addr_page, in tlb_set_page_full()
1122 index = tlb_index(cpu, mmu_idx, addr_page); in tlb_set_page_full()
1123 te = tlb_entry(cpu, mmu_idx, addr_page); in tlb_set_page_full()
1132 qemu_spin_lock(&tlb->c.lock); in tlb_set_page_full()
1135 tlb->c.dirty |= 1 << mmu_idx; in tlb_set_page_full()
1138 tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page); in tlb_set_page_full()
1145 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; in tlb_set_page_full()
1146 CPUTLBEntry *tv = &desc->vtable[vidx]; in tlb_set_page_full()
1150 desc->vfulltlb[vidx] = desc->fulltlb[index]; in tlb_set_page_full()
1151 tlb_n_used_entries_dec(cpu, mmu_idx); in tlb_set_page_full()
1159 * - a physical section number in the lower TARGET_PAGE_BITS in tlb_set_page_full()
1160 * - the offset within section->mr of the page base (I/O, ROMD) with the in tlb_set_page_full()
1164 * (non-page-aligned) vaddr of the eventual memory access to get in tlb_set_page_full()
1169 desc->fulltlb[index] = *full; in tlb_set_page_full()
1170 full = &desc->fulltlb[index]; in tlb_set_page_full()
1171 full->xlat_section = iotlb - addr_page; in tlb_set_page_full()
1172 full->phys_addr = paddr_page; in tlb_set_page_full()
1175 tn.addend = addend - addr_page; in tlb_set_page_full()
1196 tlb_n_used_entries_inc(cpu, mmu_idx); in tlb_set_page_full()
1197 qemu_spin_unlock(&tlb->c.lock); in tlb_set_page_full()
1200 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, in tlb_set_page_with_attrs() argument
1212 tlb_set_page_full(cpu, mmu_idx, addr, &full); in tlb_set_page_with_attrs()
1215 void tlb_set_page(CPUState *cpu, vaddr addr, in tlb_set_page() argument
1219 tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED, in tlb_set_page()
1229 static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type, in tlb_fill_align() argument
1233 const TCGCPUOps *ops = cpu->cc->tcg_ops; in tlb_fill_align()
1236 if (ops->tlb_fill_align) { in tlb_fill_align()
1237 if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx, in tlb_fill_align()
1239 tlb_set_page_full(cpu, mmu_idx, addr, &full); in tlb_fill_align()
1244 if (addr & ((1u << memop_alignment_bits(memop)) - 1)) { in tlb_fill_align()
1245 ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra); in tlb_fill_align()
1247 if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) { in tlb_fill_align()
1255 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, in cpu_unaligned_access() argument
1259 cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, in cpu_unaligned_access()
1264 io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat, in io_prepare() argument
1270 section = iotlb_to_section(cpu, xlat, attrs); in io_prepare()
1272 cpu->mem_io_pc = retaddr; in io_prepare()
1273 if (!cpu->neg.can_do_io) { in io_prepare()
1274 cpu_io_recompile(cpu, retaddr); in io_prepare()
1281 static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr, in io_failed() argument
1285 if (!cpu->ignore_memory_transaction_failures in io_failed()
1286 && cpu->cc->tcg_ops->do_transaction_failed) { in io_failed()
1287 hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK); in io_failed()
1289 cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, in io_failed()
1291 full->attrs, response, retaddr); in io_failed()
1297 static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index, in victim_tlb_hit() argument
1302 assert_cpu_is_self(cpu); in victim_tlb_hit()
1304 CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx]; in victim_tlb_hit()
1309 CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index]; in victim_tlb_hit()
1311 qemu_spin_lock(&cpu->neg.tlb.c.lock); in victim_tlb_hit()
1315 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in victim_tlb_hit()
1317 CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in victim_tlb_hit()
1318 CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx]; in victim_tlb_hit()
1327 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, in notdirty_write() argument
1330 ram_addr_t ram_addr = mem_vaddr + full->xlat_section; in notdirty_write()
1347 tlb_set_dirty(cpu, mem_vaddr); in notdirty_write()
1351 static int probe_access_internal(CPUState *cpu, vaddr addr, in probe_access_internal() argument
1357 uintptr_t index = tlb_index(cpu, mmu_idx, addr); in probe_access_internal()
1358 CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr); in probe_access_internal()
1362 bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu); in probe_access_internal()
1366 if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) { in probe_access_internal()
1367 if (!tlb_fill_align(cpu, addr, access_type, mmu_idx, in probe_access_internal()
1369 /* Non-faulting page table read failed. */ in probe_access_internal()
1376 index = tlb_index(cpu, mmu_idx, addr); in probe_access_internal()
1377 entry = tlb_entry(cpu, mmu_idx, addr); in probe_access_internal()
1390 *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in probe_access_internal()
1391 flags |= full->slow_flags[access_type]; in probe_access_internal()
1393 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ in probe_access_internal()
1401 *phost = (void *)((uintptr_t)addr + entry->addend); in probe_access_internal()
1455 g_assert(-(addr | TARGET_PAGE_MASK) >= size); in probe_access_flags()
1478 g_assert(-(addr | TARGET_PAGE_MASK) >= size); in probe_access()
1495 full->attrs, wp_access, retaddr); in probe_access()
1524 * Return -1 if we can't translate and execute from an entire page
1541 return -1; in get_page_addr_code_hostp()
1544 if (full->lg_page_size < TARGET_PAGE_BITS) { in get_page_addr_code_hostp()
1545 return -1; in get_page_addr_code_hostp()
1561 * in the softmmu lookup code (or helper). We don't handle re-fills or
1569 bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx, in tlb_plugin_lookup() argument
1572 CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr); in tlb_plugin_lookup()
1573 uintptr_t index = tlb_index(cpu, mmu_idx, addr); in tlb_plugin_lookup()
1582 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in tlb_plugin_lookup()
1583 data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK); in tlb_plugin_lookup()
1588 iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK, in tlb_plugin_lookup()
1589 full->attrs); in tlb_plugin_lookup()
1590 data->is_io = true; in tlb_plugin_lookup()
1591 data->mr = section->mr; in tlb_plugin_lookup()
1593 data->is_io = false; in tlb_plugin_lookup()
1594 data->mr = NULL; in tlb_plugin_lookup()
1621 * @cpu: generic cpu state
1633 static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop, in mmu_lookup1() argument
1636 vaddr addr = data->addr; in mmu_lookup1()
1637 uintptr_t index = tlb_index(cpu, mmu_idx, addr); in mmu_lookup1()
1638 CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr); in mmu_lookup1()
1646 if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, in mmu_lookup1()
1648 tlb_fill_align(cpu, addr, access_type, mmu_idx, in mmu_lookup1()
1649 memop, data->size, false, ra); in mmu_lookup1()
1651 index = tlb_index(cpu, mmu_idx, addr); in mmu_lookup1()
1652 entry = tlb_entry(cpu, mmu_idx, addr); in mmu_lookup1()
1657 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in mmu_lookup1()
1659 flags |= full->slow_flags[access_type]; in mmu_lookup1()
1675 if (unlikely(addr & ((1 << a_bits) - 1))) { in mmu_lookup1()
1676 cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra); in mmu_lookup1()
1680 data->full = full; in mmu_lookup1()
1681 data->flags = flags; in mmu_lookup1()
1683 data->haddr = (void *)((uintptr_t)addr + entry->addend); in mmu_lookup1()
1690 * @cpu: generic cpu state
1698 static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data, in mmu_watch_or_dirty() argument
1701 CPUTLBEntryFull *full = data->full; in mmu_watch_or_dirty()
1702 vaddr addr = data->addr; in mmu_watch_or_dirty()
1703 int flags = data->flags; in mmu_watch_or_dirty()
1704 int size = data->size; in mmu_watch_or_dirty()
1709 cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra); in mmu_watch_or_dirty()
1715 notdirty_write(cpu, addr, size, full, ra); in mmu_watch_or_dirty()
1718 data->flags = flags; in mmu_watch_or_dirty()
1723 * @cpu: generic cpu state
1733 static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, in mmu_lookup() argument
1739 l->memop = get_memop(oi); in mmu_lookup()
1740 l->mmu_idx = get_mmuidx(oi); in mmu_lookup()
1742 tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); in mmu_lookup()
1744 l->page[0].addr = addr; in mmu_lookup()
1745 l->page[0].size = memop_size(l->memop); in mmu_lookup()
1746 l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; in mmu_lookup()
1747 l->page[1].size = 0; in mmu_lookup()
1748 crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; in mmu_lookup()
1751 mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra); in mmu_lookup()
1753 flags = l->page[0].flags; in mmu_lookup()
1755 mmu_watch_or_dirty(cpu, &l->page[0], type, ra); in mmu_lookup()
1758 l->memop ^= MO_BSWAP; in mmu_lookup()
1762 int size0 = l->page[1].addr - addr; in mmu_lookup()
1763 l->page[1].size = l->page[0].size - size0; in mmu_lookup()
1764 l->page[0].size = size0; in mmu_lookup()
1770 mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra); in mmu_lookup()
1771 if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) { in mmu_lookup()
1772 uintptr_t index = tlb_index(cpu, l->mmu_idx, addr); in mmu_lookup()
1773 l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index]; in mmu_lookup()
1776 flags = l->page[0].flags | l->page[1].flags; in mmu_lookup()
1778 mmu_watch_or_dirty(cpu, &l->page[0], type, ra); in mmu_lookup()
1779 mmu_watch_or_dirty(cpu, &l->page[1], type, ra); in mmu_lookup()
1797 static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, in atomic_mmu_lookup() argument
1812 retaddr -= GETPC_ADJ; in atomic_mmu_lookup()
1814 index = tlb_index(cpu, mmu_idx, addr); in atomic_mmu_lookup()
1815 tlbe = tlb_entry(cpu, mmu_idx, addr); in atomic_mmu_lookup()
1820 if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE, in atomic_mmu_lookup()
1822 tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx, in atomic_mmu_lookup()
1825 index = tlb_index(cpu, mmu_idx, addr); in atomic_mmu_lookup()
1826 tlbe = tlb_entry(cpu, mmu_idx, addr); in atomic_mmu_lookup()
1832 * Let the guest notice RMW on a write-only page. in atomic_mmu_lookup()
1835 * but addr_read will only be -1 if PAGE_READ was unset. in atomic_mmu_lookup()
1837 if (unlikely(tlbe->addr_read == -1)) { in atomic_mmu_lookup()
1838 tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx, in atomic_mmu_lookup()
1849 if (!did_tlb_fill && (addr & ((1 << memop_alignment_bits(mop)) - 1))) { in atomic_mmu_lookup()
1850 cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, mmu_idx, retaddr); in atomic_mmu_lookup()
1854 if (unlikely(addr & (size - 1))) { in atomic_mmu_lookup()
1859 * mark an exception and exit the cpu loop. in atomic_mmu_lookup()
1865 tlb_addr |= tlbe->addr_read; in atomic_mmu_lookup()
1867 /* Notice an IO access or a needs-MMU-lookup access */ in atomic_mmu_lookup()
1870 support this apart from stop-the-world. */ in atomic_mmu_lookup()
1874 hostaddr = (void *)((uintptr_t)addr + tlbe->addend); in atomic_mmu_lookup()
1875 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in atomic_mmu_lookup()
1878 notdirty_write(cpu, addr, size, full, retaddr); in atomic_mmu_lookup()
1884 if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) { in atomic_mmu_lookup()
1887 if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) { in atomic_mmu_lookup()
1891 cpu_check_watchpoint(cpu, addr, size, in atomic_mmu_lookup()
1892 full->attrs, wp_flags, retaddr); in atomic_mmu_lookup()
1899 cpu_loop_exit_atomic(cpu, retaddr); in atomic_mmu_lookup()
1911 * complication of ABI-specific return type promotion and always
1913 * tcg_target_long, except in the case of a 32-bit host and 64-bit
1921 * @cpu: generic cpu state
1930 * Load @size bytes from @addr, which is memory-mapped i/o.
1931 * The bytes are concatenated in big-endian order with @ret_be.
1933 static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, in int_ld_mmio_beN() argument
1944 /* Read aligned pieces up to 8 bytes. */ in int_ld_mmio_beN()
1945 this_mop = ctz32(size | (int)addr | 8); in int_ld_mmio_beN()
1950 this_mop, full->attrs); in int_ld_mmio_beN()
1952 io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra); in int_ld_mmio_beN()
1954 if (this_size == 8) { in int_ld_mmio_beN()
1958 ret_be = (ret_be << (this_size * 8)) | val; in int_ld_mmio_beN()
1961 size -= this_size; in int_ld_mmio_beN()
1967 static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, in do_ld_mmio_beN() argument
1976 tcg_debug_assert(size > 0 && size <= 8); in do_ld_mmio_beN()
1978 attrs = full->attrs; in do_ld_mmio_beN()
1979 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); in do_ld_mmio_beN()
1980 mr = section->mr; in do_ld_mmio_beN()
1983 return int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx, in do_ld_mmio_beN()
1987 static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, in do_ld16_mmio_beN() argument
1997 tcg_debug_assert(size > 8 && size <= 16); in do_ld16_mmio_beN()
1999 attrs = full->attrs; in do_ld16_mmio_beN()
2000 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); in do_ld16_mmio_beN()
2001 mr = section->mr; in do_ld16_mmio_beN()
2004 a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx, in do_ld16_mmio_beN()
2006 b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx, in do_ld16_mmio_beN()
2007 MMU_DATA_LOAD, ra, mr, mr_offset + size - 8); in do_ld16_mmio_beN()
2016 * Load @p->size bytes from @p->haddr, which is RAM.
2017 * The bytes to concatenated in big-endian order with @ret_be.
2021 uint8_t *haddr = p->haddr; in do_ld_bytes_beN()
2022 int i, size = p->size; in do_ld_bytes_beN()
2025 ret_be = (ret_be << 8) | haddr[i]; in do_ld_bytes_beN()
2039 void *haddr = p->haddr; in do_ld_parts_beN()
2040 int size = p->size; in do_ld_parts_beN()
2066 ret_be = (ret_be << 8) | x; in do_ld_parts_beN()
2073 size -= n; in do_ld_parts_beN()
2088 int o = p->addr & 3; in do_ld_whole_be4()
2089 uint32_t x = load_atomic4(p->haddr - o); in do_ld_whole_be4()
2092 x <<= o * 8; in do_ld_whole_be4()
2093 x >>= (4 - p->size) * 8; in do_ld_whole_be4()
2094 return (ret_be << (p->size * 8)) | x; in do_ld_whole_be4()
2105 static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra, in do_ld_whole_be8() argument
2108 int o = p->addr & 7; in do_ld_whole_be8()
2109 uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o); in do_ld_whole_be8()
2112 x <<= o * 8; in do_ld_whole_be8()
2113 x >>= (8 - p->size) * 8; in do_ld_whole_be8()
2114 return (ret_be << (p->size * 8)) | x; in do_ld_whole_be8()
2125 static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra, in do_ld_whole_be16() argument
2128 int o = p->addr & 15; in do_ld_whole_be16()
2129 Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o); in do_ld_whole_be16()
2130 int size = p->size; in do_ld_whole_be16()
2135 y = int128_lshift(y, o * 8); in do_ld_whole_be16()
2136 y = int128_urshift(y, (16 - size) * 8); in do_ld_whole_be16()
2138 x = int128_lshift(x, size * 8); in do_ld_whole_be16()
2145 static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p, in do_ld_beN() argument
2152 if (unlikely(p->flags & TLB_MMIO)) { in do_ld_beN()
2153 return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size, in do_ld_beN()
2169 tmp = tmp ? tmp - 1 : 0; in do_ld_beN()
2172 ? p->size == half_size in do_ld_beN()
2173 : p->size >= half_size) { in do_ld_beN()
2174 if (!HAVE_al8_fast && p->size < 4) { in do_ld_beN()
2177 return do_ld_whole_be8(cpu, ra, p, ret_be); in do_ld_beN()
2193 * Wrapper for the above, for 8 < size < 16.
2195 static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p, in do_ld16_beN() argument
2198 int size = p->size; in do_ld16_beN()
2202 if (unlikely(p->flags & TLB_MMIO)) { in do_ld16_beN()
2203 return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra); in do_ld16_beN()
2213 p->size = size - 8; in do_ld16_beN()
2215 p->haddr += size - 8; in do_ld16_beN()
2216 p->size = 8; in do_ld16_beN()
2221 /* Since size > 8, this is the half that must be atomic. */ in do_ld16_beN()
2222 return do_ld_whole_be16(cpu, ra, p, a); in do_ld16_beN()
2226 * Since size > 8, both halves are misaligned, in do_ld16_beN()
2232 p->size = size - 8; in do_ld16_beN()
2234 b = ldq_be_p(p->haddr + size - 8); in do_ld16_beN()
2244 static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx, in do_ld_1() argument
2247 if (unlikely(p->flags & TLB_MMIO)) { in do_ld_1()
2248 return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra); in do_ld_1()
2250 return *(uint8_t *)p->haddr; in do_ld_1()
2254 static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx, in do_ld_2() argument
2259 if (unlikely(p->flags & TLB_MMIO)) { in do_ld_2()
2260 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra); in do_ld_2()
2266 ret = load_atom_2(cpu, ra, p->haddr, memop); in do_ld_2()
2274 static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx, in do_ld_4() argument
2279 if (unlikely(p->flags & TLB_MMIO)) { in do_ld_4()
2280 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra); in do_ld_4()
2286 ret = load_atom_4(cpu, ra, p->haddr, memop); in do_ld_4()
2294 static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx, in do_ld_8() argument
2299 if (unlikely(p->flags & TLB_MMIO)) { in do_ld_8()
2300 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra); in do_ld_8()
2306 ret = load_atom_8(cpu, ra, p->haddr, memop); in do_ld_8()
2314 static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, in do_ld1_mmu() argument
2321 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); in do_ld1_mmu()
2324 return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra); in do_ld1_mmu()
2327 static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, in do_ld2_mmu() argument
2336 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); in do_ld2_mmu()
2338 return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); in do_ld2_mmu()
2341 a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra); in do_ld2_mmu()
2342 b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra); in do_ld2_mmu()
2345 ret = a | (b << 8); in do_ld2_mmu()
2347 ret = b | (a << 8); in do_ld2_mmu()
2352 static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, in do_ld4_mmu() argument
2360 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); in do_ld4_mmu()
2362 return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); in do_ld4_mmu()
2365 ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); in do_ld4_mmu()
2366 ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); in do_ld4_mmu()
2373 static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, in do_ld8_mmu() argument
2381 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); in do_ld8_mmu()
2383 return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); in do_ld8_mmu()
2386 ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); in do_ld8_mmu()
2387 ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); in do_ld8_mmu()
2394 static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr, in do_ld16_mmu() argument
2404 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l); in do_ld16_mmu()
2407 ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16, in do_ld16_mmu()
2414 ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop); in do_ld16_mmu()
2423 if (first == 8) { in do_ld16_mmu()
2426 a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); in do_ld16_mmu()
2427 b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); in do_ld16_mmu()
2436 if (first < 8) { in do_ld16_mmu()
2437 a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, in do_ld16_mmu()
2439 ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra); in do_ld16_mmu()
2441 ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra); in do_ld16_mmu()
2443 ret = int128_lshift(ret, l.page[1].size * 8); in do_ld16_mmu()
2445 b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx, in do_ld16_mmu()
2461 * @cpu: generic cpu state
2470 * Store @size bytes at @addr, which is memory-mapped i/o.
2471 * The bytes to store are extracted in little-endian order from @val_le;
2472 * return the bytes of @val_le beyond @p->size that have not been stored.
2474 static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, in int_st_mmio_leN() argument
2484 /* Store aligned pieces up to 8 bytes. */ in int_st_mmio_leN()
2485 this_mop = ctz32(size | (int)addr | 8); in int_st_mmio_leN()
2490 this_mop, full->attrs); in int_st_mmio_leN()
2492 io_failed(cpu, full, addr, this_size, MMU_DATA_STORE, in int_st_mmio_leN()
2495 if (this_size == 8) { in int_st_mmio_leN()
2499 val_le >>= this_size * 8; in int_st_mmio_leN()
2502 size -= this_size; in int_st_mmio_leN()
2508 static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, in do_st_mmio_leN() argument
2517 tcg_debug_assert(size > 0 && size <= 8); in do_st_mmio_leN()
2519 attrs = full->attrs; in do_st_mmio_leN()
2520 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); in do_st_mmio_leN()
2521 mr = section->mr; in do_st_mmio_leN()
2524 return int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx, in do_st_mmio_leN()
2528 static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, in do_st16_mmio_leN() argument
2537 tcg_debug_assert(size > 8 && size <= 16); in do_st16_mmio_leN()
2539 attrs = full->attrs; in do_st16_mmio_leN()
2540 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); in do_st16_mmio_leN()
2541 mr = section->mr; in do_st16_mmio_leN()
2544 int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8, in do_st16_mmio_leN()
2546 return int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8, in do_st16_mmio_leN()
2547 size - 8, mmu_idx, ra, mr, mr_offset + 8); in do_st16_mmio_leN()
2553 static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p, in do_st_leN() argument
2560 if (unlikely(p->flags & TLB_MMIO)) { in do_st_leN()
2561 return do_st_mmio_leN(cpu, p->full, val_le, p->addr, in do_st_leN()
2562 p->size, mmu_idx, ra); in do_st_leN()
2563 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st_leN()
2564 return val_le >> (p->size * 8); in do_st_leN()
2574 return store_parts_leN(p->haddr, p->size, val_le); in do_st_leN()
2579 tmp = tmp ? tmp - 1 : 0; in do_st_leN()
2582 ? p->size == half_size in do_st_leN()
2583 : p->size >= half_size) { in do_st_leN()
2584 if (!HAVE_al8_fast && p->size <= 4) { in do_st_leN()
2585 return store_whole_le4(p->haddr, p->size, val_le); in do_st_leN()
2587 return store_whole_le8(p->haddr, p->size, val_le); in do_st_leN()
2589 cpu_loop_exit_atomic(cpu, ra); in do_st_leN()
2597 return store_bytes_leN(p->haddr, p->size, val_le); in do_st_leN()
2605 * Wrapper for the above, for 8 < size < 16.
2607 static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p, in do_st16_leN() argument
2611 int size = p->size; in do_st16_leN()
2614 if (unlikely(p->flags & TLB_MMIO)) { in do_st16_leN()
2615 return do_st16_mmio_leN(cpu, p->full, val_le, p->addr, in do_st16_leN()
2617 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st16_leN()
2618 return int128_gethi(val_le) >> ((size - 8) * 8); in do_st16_leN()
2628 store_parts_leN(p->haddr, 8, int128_getlo(val_le)); in do_st16_leN()
2629 return store_parts_leN(p->haddr + 8, p->size - 8, in do_st16_leN()
2633 /* Since size > 8, this is the half that must be atomic. */ in do_st16_leN()
2635 cpu_loop_exit_atomic(cpu, ra); in do_st16_leN()
2637 return store_whole_le16(p->haddr, p->size, val_le); in do_st16_leN()
2641 * Since size > 8, both halves are misaligned, in do_st16_leN()
2647 stq_le_p(p->haddr, int128_getlo(val_le)); in do_st16_leN()
2648 return store_bytes_leN(p->haddr + 8, p->size - 8, in do_st16_leN()
2656 static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val, in do_st_1() argument
2659 if (unlikely(p->flags & TLB_MMIO)) { in do_st_1()
2660 do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra); in do_st_1()
2661 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st_1()
2664 *(uint8_t *)p->haddr = val; in do_st_1()
2668 static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val, in do_st_2() argument
2671 if (unlikely(p->flags & TLB_MMIO)) { in do_st_2()
2675 do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra); in do_st_2()
2676 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st_2()
2683 store_atom_2(cpu, ra, p->haddr, memop, val); in do_st_2()
2687 static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val, in do_st_4() argument
2690 if (unlikely(p->flags & TLB_MMIO)) { in do_st_4()
2694 do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra); in do_st_4()
2695 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st_4()
2702 store_atom_4(cpu, ra, p->haddr, memop, val); in do_st_4()
2706 static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val, in do_st_8() argument
2709 if (unlikely(p->flags & TLB_MMIO)) { in do_st_8()
2713 do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra); in do_st_8()
2714 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st_8()
2721 store_atom_8(cpu, ra, p->haddr, memop, val); in do_st_8()
2725 static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val, in do_st1_mmu() argument
2732 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); in do_st1_mmu()
2735 do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra); in do_st1_mmu()
2738 static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, in do_st2_mmu() argument
2746 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); in do_st2_mmu()
2748 do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); in do_st2_mmu()
2753 a = val, b = val >> 8; in do_st2_mmu()
2755 b = val, a = val >> 8; in do_st2_mmu()
2757 do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra); in do_st2_mmu()
2758 do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra); in do_st2_mmu()
2761 static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, in do_st4_mmu() argument
2768 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); in do_st4_mmu()
2770 do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); in do_st4_mmu()
2778 val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); in do_st4_mmu()
2779 (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); in do_st4_mmu()
2782 static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, in do_st8_mmu() argument
2789 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); in do_st8_mmu()
2791 do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); in do_st8_mmu()
2799 val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); in do_st8_mmu()
2800 (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); in do_st8_mmu()
2803 static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, in do_st16_mmu() argument
2812 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); in do_st16_mmu()
2818 do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra); in do_st16_mmu()
2826 store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val); in do_st16_mmu()
2832 if (first == 8) { in do_st16_mmu()
2843 do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra); in do_st16_mmu()
2844 do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra); in do_st16_mmu()
2851 if (first < 8) { in do_st16_mmu()
2852 do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra); in do_st16_mmu()
2853 val = int128_urshift(val, first * 8); in do_st16_mmu()
2854 do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); in do_st16_mmu()
2856 b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); in do_st16_mmu()
2857 do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra); in do_st16_mmu()
2885 #define DATA_SIZE 8