1 /*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "exec/memory.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/tb-flush.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "exec/mmu-access-type.h"
32 #include "exec/tlb-common.h"
33 #include "exec/vaddr.h"
34 #include "tcg/tcg.h"
35 #include "qemu/error-report.h"
36 #include "exec/log.h"
37 #include "exec/helper-proto-common.h"
38 #include "qemu/atomic.h"
39 #include "qemu/atomic128.h"
40 #include "exec/translate-all.h"
41 #include "trace.h"
42 #include "tb-hash.h"
43 #include "internal-common.h"
44 #include "internal-target.h"
45 #ifdef CONFIG_PLUGIN
46 #include "qemu/plugin-memory.h"
47 #endif
48 #include "tcg/tcg-ldst.h"
49 #include "tcg/oversized-guest.h"
50
51 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
52 /* #define DEBUG_TLB */
53 /* #define DEBUG_TLB_LOG */
54
55 #ifdef DEBUG_TLB
56 # define DEBUG_TLB_GATE 1
57 # ifdef DEBUG_TLB_LOG
58 # define DEBUG_TLB_LOG_GATE 1
59 # else
60 # define DEBUG_TLB_LOG_GATE 0
61 # endif
62 #else
63 # define DEBUG_TLB_GATE 0
64 # define DEBUG_TLB_LOG_GATE 0
65 #endif
66
67 #define tlb_debug(fmt, ...) do { \
68 if (DEBUG_TLB_LOG_GATE) { \
69 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
70 ## __VA_ARGS__); \
71 } else if (DEBUG_TLB_GATE) { \
72 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
73 } \
74 } while (0)
75
76 #define assert_cpu_is_self(cpu) do { \
77 if (DEBUG_TLB_GATE) { \
78 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
79 } \
80 } while (0)
81
82 /* run_on_cpu_data.target_ptr should always be big enough for a
83 * vaddr even on 32 bit builds
84 */
85 QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data));
86
87 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
88 */
89 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
90 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
91
tlb_n_entries(CPUTLBDescFast * fast)92 static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
93 {
94 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
95 }
96
sizeof_tlb(CPUTLBDescFast * fast)97 static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
98 {
99 return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
100 }
101
tlb_read_idx(const CPUTLBEntry * entry,MMUAccessType access_type)102 static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
103 MMUAccessType access_type)
104 {
105 /* Do not rearrange the CPUTLBEntry structure members. */
106 QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
107 MMU_DATA_LOAD * sizeof(uint64_t));
108 QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
109 MMU_DATA_STORE * sizeof(uint64_t));
110 QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
111 MMU_INST_FETCH * sizeof(uint64_t));
112
113 #if TARGET_LONG_BITS == 32
114 /* Use qatomic_read, in case of addr_write; only care about low bits. */
115 const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
116 ptr += HOST_BIG_ENDIAN;
117 return qatomic_read(ptr);
118 #else
119 const uint64_t *ptr = &entry->addr_idx[access_type];
120 # if TCG_OVERSIZED_GUEST
121 return *ptr;
122 # else
123 /* ofs might correspond to .addr_write, so use qatomic_read */
124 return qatomic_read(ptr);
125 # endif
126 #endif
127 }
128
tlb_addr_write(const CPUTLBEntry * entry)129 static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
130 {
131 return tlb_read_idx(entry, MMU_DATA_STORE);
132 }
133
134 /* Find the TLB index corresponding to the mmu_idx + address pair. */
tlb_index(CPUState * cpu,uintptr_t mmu_idx,vaddr addr)135 static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
136 vaddr addr)
137 {
138 uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
139
140 return (addr >> TARGET_PAGE_BITS) & size_mask;
141 }
142
143 /* Find the TLB entry corresponding to the mmu_idx + address pair. */
tlb_entry(CPUState * cpu,uintptr_t mmu_idx,vaddr addr)144 static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
145 vaddr addr)
146 {
147 return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)];
148 }
149
tlb_window_reset(CPUTLBDesc * desc,int64_t ns,size_t max_entries)150 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
151 size_t max_entries)
152 {
153 desc->window_begin_ns = ns;
154 desc->window_max_entries = max_entries;
155 }
156
tb_jmp_cache_clear_page(CPUState * cpu,vaddr page_addr)157 static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
158 {
159 CPUJumpCache *jc = cpu->tb_jmp_cache;
160 int i, i0;
161
162 if (unlikely(!jc)) {
163 return;
164 }
165
166 i0 = tb_jmp_cache_hash_page(page_addr);
167 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
168 qatomic_set(&jc->array[i0 + i].tb, NULL);
169 }
170 }
171
172 /**
173 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
174 * @desc: The CPUTLBDesc portion of the TLB
175 * @fast: The CPUTLBDescFast portion of the same TLB
176 *
177 * Called with tlb_lock_held.
178 *
179 * We have two main constraints when resizing a TLB: (1) we only resize it
180 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
181 * the array or unnecessarily flushing it), which means we do not control how
182 * frequently the resizing can occur; (2) we don't have access to the guest's
183 * future scheduling decisions, and therefore have to decide the magnitude of
184 * the resize based on past observations.
185 *
186 * In general, a memory-hungry process can benefit greatly from an appropriately
187 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
188 * we just have to make the TLB as large as possible; while an oversized TLB
189 * results in minimal TLB miss rates, it also takes longer to be flushed
190 * (flushes can be _very_ frequent), and the reduced locality can also hurt
191 * performance.
192 *
193 * To achieve near-optimal performance for all kinds of workloads, we:
194 *
195 * 1. Aggressively increase the size of the TLB when the use rate of the
196 * TLB being flushed is high, since it is likely that in the near future this
197 * memory-hungry process will execute again, and its memory hungriness will
198 * probably be similar.
199 *
200 * 2. Slowly reduce the size of the TLB as the use rate declines over a
201 * reasonably large time window. The rationale is that if in such a time window
202 * we have not observed a high TLB use rate, it is likely that we won't observe
203 * it in the near future. In that case, once a time window expires we downsize
204 * the TLB to match the maximum use rate observed in the window.
205 *
206 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
207 * since in that range performance is likely near-optimal. Recall that the TLB
208 * is direct mapped, so we want the use rate to be low (or at least not too
209 * high), since otherwise we are likely to have a significant amount of
210 * conflict misses.
211 */
tlb_mmu_resize_locked(CPUTLBDesc * desc,CPUTLBDescFast * fast,int64_t now)212 static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
213 int64_t now)
214 {
215 size_t old_size = tlb_n_entries(fast);
216 size_t rate;
217 size_t new_size = old_size;
218 int64_t window_len_ms = 100;
219 int64_t window_len_ns = window_len_ms * 1000 * 1000;
220 bool window_expired = now > desc->window_begin_ns + window_len_ns;
221
222 if (desc->n_used_entries > desc->window_max_entries) {
223 desc->window_max_entries = desc->n_used_entries;
224 }
225 rate = desc->window_max_entries * 100 / old_size;
226
227 if (rate > 70) {
228 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
229 } else if (rate < 30 && window_expired) {
230 size_t ceil = pow2ceil(desc->window_max_entries);
231 size_t expected_rate = desc->window_max_entries * 100 / ceil;
232
233 /*
234 * Avoid undersizing when the max number of entries seen is just below
235 * a pow2. For instance, if max_entries == 1025, the expected use rate
236 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
237 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
238 * later. Thus, make sure that the expected use rate remains below 70%.
239 * (and since we double the size, that means the lowest rate we'd
240 * expect to get is 35%, which is still in the 30-70% range where
241 * we consider that the size is appropriate.)
242 */
243 if (expected_rate > 70) {
244 ceil *= 2;
245 }
246 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
247 }
248
249 if (new_size == old_size) {
250 if (window_expired) {
251 tlb_window_reset(desc, now, desc->n_used_entries);
252 }
253 return;
254 }
255
256 g_free(fast->table);
257 g_free(desc->fulltlb);
258
259 tlb_window_reset(desc, now, 0);
260 /* desc->n_used_entries is cleared by the caller */
261 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
262 fast->table = g_try_new(CPUTLBEntry, new_size);
263 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
264
265 /*
266 * If the allocations fail, try smaller sizes. We just freed some
267 * memory, so going back to half of new_size has a good chance of working.
268 * Increased memory pressure elsewhere in the system might cause the
269 * allocations to fail though, so we progressively reduce the allocation
270 * size, aborting if we cannot even allocate the smallest TLB we support.
271 */
272 while (fast->table == NULL || desc->fulltlb == NULL) {
273 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
274 error_report("%s: %s", __func__, strerror(errno));
275 abort();
276 }
277 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
278 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
279
280 g_free(fast->table);
281 g_free(desc->fulltlb);
282 fast->table = g_try_new(CPUTLBEntry, new_size);
283 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
284 }
285 }
286
tlb_mmu_flush_locked(CPUTLBDesc * desc,CPUTLBDescFast * fast)287 static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
288 {
289 desc->n_used_entries = 0;
290 desc->large_page_addr = -1;
291 desc->large_page_mask = -1;
292 desc->vindex = 0;
293 memset(fast->table, -1, sizeof_tlb(fast));
294 memset(desc->vtable, -1, sizeof(desc->vtable));
295 }
296
tlb_flush_one_mmuidx_locked(CPUState * cpu,int mmu_idx,int64_t now)297 static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
298 int64_t now)
299 {
300 CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
301 CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
302
303 tlb_mmu_resize_locked(desc, fast, now);
304 tlb_mmu_flush_locked(desc, fast);
305 }
306
tlb_mmu_init(CPUTLBDesc * desc,CPUTLBDescFast * fast,int64_t now)307 static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
308 {
309 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
310
311 tlb_window_reset(desc, now, 0);
312 desc->n_used_entries = 0;
313 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
314 fast->table = g_new(CPUTLBEntry, n_entries);
315 desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
316 tlb_mmu_flush_locked(desc, fast);
317 }
318
tlb_n_used_entries_inc(CPUState * cpu,uintptr_t mmu_idx)319 static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx)
320 {
321 cpu->neg.tlb.d[mmu_idx].n_used_entries++;
322 }
323
tlb_n_used_entries_dec(CPUState * cpu,uintptr_t mmu_idx)324 static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx)
325 {
326 cpu->neg.tlb.d[mmu_idx].n_used_entries--;
327 }
328
tlb_init(CPUState * cpu)329 void tlb_init(CPUState *cpu)
330 {
331 int64_t now = get_clock_realtime();
332 int i;
333
334 qemu_spin_init(&cpu->neg.tlb.c.lock);
335
336 /* All tlbs are initialized flushed. */
337 cpu->neg.tlb.c.dirty = 0;
338
339 for (i = 0; i < NB_MMU_MODES; i++) {
340 tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
341 }
342 }
343
tlb_destroy(CPUState * cpu)344 void tlb_destroy(CPUState *cpu)
345 {
346 int i;
347
348 qemu_spin_destroy(&cpu->neg.tlb.c.lock);
349 for (i = 0; i < NB_MMU_MODES; i++) {
350 CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
351 CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
352
353 g_free(fast->table);
354 g_free(desc->fulltlb);
355 }
356 }
357
358 /* flush_all_helper: run fn across all cpus
359 *
360 * If the wait flag is set then the src cpu's helper will be queued as
361 * "safe" work and the loop exited creating a synchronisation point
362 * where all queued work will be finished before execution starts
363 * again.
364 */
flush_all_helper(CPUState * src,run_on_cpu_func fn,run_on_cpu_data d)365 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
366 run_on_cpu_data d)
367 {
368 CPUState *cpu;
369
370 CPU_FOREACH(cpu) {
371 if (cpu != src) {
372 async_run_on_cpu(cpu, fn, d);
373 }
374 }
375 }
376
tlb_flush_by_mmuidx_async_work(CPUState * cpu,run_on_cpu_data data)377 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
378 {
379 uint16_t asked = data.host_int;
380 uint16_t all_dirty, work, to_clean;
381 int64_t now = get_clock_realtime();
382
383 assert_cpu_is_self(cpu);
384
385 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
386
387 qemu_spin_lock(&cpu->neg.tlb.c.lock);
388
389 all_dirty = cpu->neg.tlb.c.dirty;
390 to_clean = asked & all_dirty;
391 all_dirty &= ~to_clean;
392 cpu->neg.tlb.c.dirty = all_dirty;
393
394 for (work = to_clean; work != 0; work &= work - 1) {
395 int mmu_idx = ctz32(work);
396 tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now);
397 }
398
399 qemu_spin_unlock(&cpu->neg.tlb.c.lock);
400
401 tcg_flush_jmp_cache(cpu);
402
403 if (to_clean == ALL_MMUIDX_BITS) {
404 qatomic_set(&cpu->neg.tlb.c.full_flush_count,
405 cpu->neg.tlb.c.full_flush_count + 1);
406 } else {
407 qatomic_set(&cpu->neg.tlb.c.part_flush_count,
408 cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean));
409 if (to_clean != asked) {
410 qatomic_set(&cpu->neg.tlb.c.elide_flush_count,
411 cpu->neg.tlb.c.elide_flush_count +
412 ctpop16(asked & ~to_clean));
413 }
414 }
415 }
416
tlb_flush_by_mmuidx(CPUState * cpu,uint16_t idxmap)417 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
418 {
419 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
420
421 assert_cpu_is_self(cpu);
422
423 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
424 }
425
tlb_flush(CPUState * cpu)426 void tlb_flush(CPUState *cpu)
427 {
428 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
429 }
430
tlb_flush_by_mmuidx_all_cpus_synced(CPUState * src_cpu,uint16_t idxmap)431 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
432 {
433 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
434
435 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
436
437 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
438 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
439 }
440
tlb_flush_all_cpus_synced(CPUState * src_cpu)441 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
442 {
443 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
444 }
445
tlb_hit_page_mask_anyprot(CPUTLBEntry * tlb_entry,vaddr page,vaddr mask)446 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
447 vaddr page, vaddr mask)
448 {
449 page &= mask;
450 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
451
452 return (page == (tlb_entry->addr_read & mask) ||
453 page == (tlb_addr_write(tlb_entry) & mask) ||
454 page == (tlb_entry->addr_code & mask));
455 }
456
tlb_hit_page_anyprot(CPUTLBEntry * tlb_entry,vaddr page)457 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
458 {
459 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
460 }
461
462 /**
463 * tlb_entry_is_empty - return true if the entry is not in use
464 * @te: pointer to CPUTLBEntry
465 */
tlb_entry_is_empty(const CPUTLBEntry * te)466 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
467 {
468 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
469 }
470
471 /* Called with tlb_c.lock held */
tlb_flush_entry_mask_locked(CPUTLBEntry * tlb_entry,vaddr page,vaddr mask)472 static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
473 vaddr page,
474 vaddr mask)
475 {
476 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
477 memset(tlb_entry, -1, sizeof(*tlb_entry));
478 return true;
479 }
480 return false;
481 }
482
tlb_flush_entry_locked(CPUTLBEntry * tlb_entry,vaddr page)483 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
484 {
485 return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
486 }
487
488 /* Called with tlb_c.lock held */
tlb_flush_vtlb_page_mask_locked(CPUState * cpu,int mmu_idx,vaddr page,vaddr mask)489 static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx,
490 vaddr page,
491 vaddr mask)
492 {
493 CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx];
494 int k;
495
496 assert_cpu_is_self(cpu);
497 for (k = 0; k < CPU_VTLB_SIZE; k++) {
498 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
499 tlb_n_used_entries_dec(cpu, mmu_idx);
500 }
501 }
502 }
503
tlb_flush_vtlb_page_locked(CPUState * cpu,int mmu_idx,vaddr page)504 static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
505 vaddr page)
506 {
507 tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
508 }
509
tlb_flush_page_locked(CPUState * cpu,int midx,vaddr page)510 static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
511 {
512 vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr;
513 vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask;
514
515 /* Check if we need to flush due to large pages. */
516 if ((page & lp_mask) == lp_addr) {
517 tlb_debug("forcing full flush midx %d (%016"
518 VADDR_PRIx "/%016" VADDR_PRIx ")\n",
519 midx, lp_addr, lp_mask);
520 tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
521 } else {
522 if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) {
523 tlb_n_used_entries_dec(cpu, midx);
524 }
525 tlb_flush_vtlb_page_locked(cpu, midx, page);
526 }
527 }
528
529 /**
530 * tlb_flush_page_by_mmuidx_async_0:
531 * @cpu: cpu on which to flush
532 * @addr: page of virtual address to flush
533 * @idxmap: set of mmu_idx to flush
534 *
535 * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
536 * at @addr from the tlbs indicated by @idxmap from @cpu.
537 */
tlb_flush_page_by_mmuidx_async_0(CPUState * cpu,vaddr addr,uint16_t idxmap)538 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
539 vaddr addr,
540 uint16_t idxmap)
541 {
542 int mmu_idx;
543
544 assert_cpu_is_self(cpu);
545
546 tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
547
548 qemu_spin_lock(&cpu->neg.tlb.c.lock);
549 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
550 if ((idxmap >> mmu_idx) & 1) {
551 tlb_flush_page_locked(cpu, mmu_idx, addr);
552 }
553 }
554 qemu_spin_unlock(&cpu->neg.tlb.c.lock);
555
556 /*
557 * Discard jump cache entries for any tb which might potentially
558 * overlap the flushed page, which includes the previous.
559 */
560 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
561 tb_jmp_cache_clear_page(cpu, addr);
562 }
563
564 /**
565 * tlb_flush_page_by_mmuidx_async_1:
566 * @cpu: cpu on which to flush
567 * @data: encoded addr + idxmap
568 *
569 * Helper for tlb_flush_page_by_mmuidx and friends, called through
570 * async_run_on_cpu. The idxmap parameter is encoded in the page
571 * offset of the target_ptr field. This limits the set of mmu_idx
572 * that can be passed via this method.
573 */
tlb_flush_page_by_mmuidx_async_1(CPUState * cpu,run_on_cpu_data data)574 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
575 run_on_cpu_data data)
576 {
577 vaddr addr_and_idxmap = data.target_ptr;
578 vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
579 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
580
581 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
582 }
583
584 typedef struct {
585 vaddr addr;
586 uint16_t idxmap;
587 } TLBFlushPageByMMUIdxData;
588
589 /**
590 * tlb_flush_page_by_mmuidx_async_2:
591 * @cpu: cpu on which to flush
592 * @data: allocated addr + idxmap
593 *
594 * Helper for tlb_flush_page_by_mmuidx and friends, called through
595 * async_run_on_cpu. The addr+idxmap parameters are stored in a
596 * TLBFlushPageByMMUIdxData structure that has been allocated
597 * specifically for this helper. Free the structure when done.
598 */
tlb_flush_page_by_mmuidx_async_2(CPUState * cpu,run_on_cpu_data data)599 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
600 run_on_cpu_data data)
601 {
602 TLBFlushPageByMMUIdxData *d = data.host_ptr;
603
604 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
605 g_free(d);
606 }
607
tlb_flush_page_by_mmuidx(CPUState * cpu,vaddr addr,uint16_t idxmap)608 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
609 {
610 tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
611
612 assert_cpu_is_self(cpu);
613
614 /* This should already be page aligned */
615 addr &= TARGET_PAGE_MASK;
616
617 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
618 }
619
tlb_flush_page(CPUState * cpu,vaddr addr)620 void tlb_flush_page(CPUState *cpu, vaddr addr)
621 {
622 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
623 }
624
tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState * src_cpu,vaddr addr,uint16_t idxmap)625 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
626 vaddr addr,
627 uint16_t idxmap)
628 {
629 tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
630
631 /* This should already be page aligned */
632 addr &= TARGET_PAGE_MASK;
633
634 /*
635 * Allocate memory to hold addr+idxmap only when needed.
636 * See tlb_flush_page_by_mmuidx for details.
637 */
638 if (idxmap < TARGET_PAGE_SIZE) {
639 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
640 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
641 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
642 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
643 } else {
644 CPUState *dst_cpu;
645 TLBFlushPageByMMUIdxData *d;
646
647 /* Allocate a separate data block for each destination cpu. */
648 CPU_FOREACH(dst_cpu) {
649 if (dst_cpu != src_cpu) {
650 d = g_new(TLBFlushPageByMMUIdxData, 1);
651 d->addr = addr;
652 d->idxmap = idxmap;
653 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
654 RUN_ON_CPU_HOST_PTR(d));
655 }
656 }
657
658 d = g_new(TLBFlushPageByMMUIdxData, 1);
659 d->addr = addr;
660 d->idxmap = idxmap;
661 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
662 RUN_ON_CPU_HOST_PTR(d));
663 }
664 }
665
tlb_flush_page_all_cpus_synced(CPUState * src,vaddr addr)666 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
667 {
668 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
669 }
670
tlb_flush_range_locked(CPUState * cpu,int midx,vaddr addr,vaddr len,unsigned bits)671 static void tlb_flush_range_locked(CPUState *cpu, int midx,
672 vaddr addr, vaddr len,
673 unsigned bits)
674 {
675 CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
676 CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
677 vaddr mask = MAKE_64BIT_MASK(0, bits);
678
679 /*
680 * If @bits is smaller than the tlb size, there may be multiple entries
681 * within the TLB; otherwise all addresses that match under @mask hit
682 * the same TLB entry.
683 * TODO: Perhaps allow bits to be a few bits less than the size.
684 * For now, just flush the entire TLB.
685 *
686 * If @len is larger than the tlb size, then it will take longer to
687 * test all of the entries in the TLB than it will to flush it all.
688 */
689 if (mask < f->mask || len > f->mask) {
690 tlb_debug("forcing full flush midx %d ("
691 "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
692 midx, addr, mask, len);
693 tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
694 return;
695 }
696
697 /*
698 * Check if we need to flush due to large pages.
699 * Because large_page_mask contains all 1's from the msb,
700 * we only need to test the end of the range.
701 */
702 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
703 tlb_debug("forcing full flush midx %d ("
704 "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
705 midx, d->large_page_addr, d->large_page_mask);
706 tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
707 return;
708 }
709
710 for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
711 vaddr page = addr + i;
712 CPUTLBEntry *entry = tlb_entry(cpu, midx, page);
713
714 if (tlb_flush_entry_mask_locked(entry, page, mask)) {
715 tlb_n_used_entries_dec(cpu, midx);
716 }
717 tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
718 }
719 }
720
721 typedef struct {
722 vaddr addr;
723 vaddr len;
724 uint16_t idxmap;
725 uint16_t bits;
726 } TLBFlushRangeData;
727
tlb_flush_range_by_mmuidx_async_0(CPUState * cpu,TLBFlushRangeData d)728 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
729 TLBFlushRangeData d)
730 {
731 int mmu_idx;
732
733 assert_cpu_is_self(cpu);
734
735 tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
736 d.addr, d.bits, d.len, d.idxmap);
737
738 qemu_spin_lock(&cpu->neg.tlb.c.lock);
739 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
740 if ((d.idxmap >> mmu_idx) & 1) {
741 tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits);
742 }
743 }
744 qemu_spin_unlock(&cpu->neg.tlb.c.lock);
745
746 /*
747 * If the length is larger than the jump cache size, then it will take
748 * longer to clear each entry individually than it will to clear it all.
749 */
750 if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
751 tcg_flush_jmp_cache(cpu);
752 return;
753 }
754
755 /*
756 * Discard jump cache entries for any tb which might potentially
757 * overlap the flushed pages, which includes the previous.
758 */
759 d.addr -= TARGET_PAGE_SIZE;
760 for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
761 tb_jmp_cache_clear_page(cpu, d.addr);
762 d.addr += TARGET_PAGE_SIZE;
763 }
764 }
765
tlb_flush_range_by_mmuidx_async_1(CPUState * cpu,run_on_cpu_data data)766 static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
767 run_on_cpu_data data)
768 {
769 TLBFlushRangeData *d = data.host_ptr;
770 tlb_flush_range_by_mmuidx_async_0(cpu, *d);
771 g_free(d);
772 }
773
tlb_flush_range_by_mmuidx(CPUState * cpu,vaddr addr,vaddr len,uint16_t idxmap,unsigned bits)774 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
775 vaddr len, uint16_t idxmap,
776 unsigned bits)
777 {
778 TLBFlushRangeData d;
779
780 assert_cpu_is_self(cpu);
781
782 /*
783 * If all bits are significant, and len is small,
784 * this devolves to tlb_flush_page.
785 */
786 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
787 tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
788 return;
789 }
790 /* If no page bits are significant, this devolves to tlb_flush. */
791 if (bits < TARGET_PAGE_BITS) {
792 tlb_flush_by_mmuidx(cpu, idxmap);
793 return;
794 }
795
796 /* This should already be page aligned */
797 d.addr = addr & TARGET_PAGE_MASK;
798 d.len = len;
799 d.idxmap = idxmap;
800 d.bits = bits;
801
802 tlb_flush_range_by_mmuidx_async_0(cpu, d);
803 }
804
tlb_flush_page_bits_by_mmuidx(CPUState * cpu,vaddr addr,uint16_t idxmap,unsigned bits)805 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
806 uint16_t idxmap, unsigned bits)
807 {
808 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
809 }
810
tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState * src_cpu,vaddr addr,vaddr len,uint16_t idxmap,unsigned bits)811 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
812 vaddr addr,
813 vaddr len,
814 uint16_t idxmap,
815 unsigned bits)
816 {
817 TLBFlushRangeData d, *p;
818 CPUState *dst_cpu;
819
820 /*
821 * If all bits are significant, and len is small,
822 * this devolves to tlb_flush_page.
823 */
824 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
825 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
826 return;
827 }
828 /* If no page bits are significant, this devolves to tlb_flush. */
829 if (bits < TARGET_PAGE_BITS) {
830 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
831 return;
832 }
833
834 /* This should already be page aligned */
835 d.addr = addr & TARGET_PAGE_MASK;
836 d.len = len;
837 d.idxmap = idxmap;
838 d.bits = bits;
839
840 /* Allocate a separate data block for each destination cpu. */
841 CPU_FOREACH(dst_cpu) {
842 if (dst_cpu != src_cpu) {
843 p = g_memdup(&d, sizeof(d));
844 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
845 RUN_ON_CPU_HOST_PTR(p));
846 }
847 }
848
849 p = g_memdup(&d, sizeof(d));
850 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
851 RUN_ON_CPU_HOST_PTR(p));
852 }
853
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState * src_cpu,vaddr addr,uint16_t idxmap,unsigned bits)854 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
855 vaddr addr,
856 uint16_t idxmap,
857 unsigned bits)
858 {
859 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
860 idxmap, bits);
861 }
862
863 /* update the TLBs so that writes to code in the virtual page 'addr'
864 can be detected */
tlb_protect_code(ram_addr_t ram_addr)865 void tlb_protect_code(ram_addr_t ram_addr)
866 {
867 cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
868 TARGET_PAGE_SIZE,
869 DIRTY_MEMORY_CODE);
870 }
871
872 /* update the TLB so that writes in physical page 'phys_addr' are no longer
873 tested for self modifying code */
tlb_unprotect_code(ram_addr_t ram_addr)874 void tlb_unprotect_code(ram_addr_t ram_addr)
875 {
876 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
877 }
878
879
880 /*
881 * Dirty write flag handling
882 *
883 * When the TCG code writes to a location it looks up the address in
884 * the TLB and uses that data to compute the final address. If any of
885 * the lower bits of the address are set then the slow path is forced.
886 * There are a number of reasons to do this but for normal RAM the
887 * most usual is detecting writes to code regions which may invalidate
888 * generated code.
889 *
890 * Other vCPUs might be reading their TLBs during guest execution, so we update
891 * te->addr_write with qatomic_set. We don't need to worry about this for
892 * oversized guests as MTTCG is disabled for them.
893 *
894 * Called with tlb_c.lock held.
895 */
tlb_reset_dirty_range_locked(CPUTLBEntry * tlb_entry,uintptr_t start,uintptr_t length)896 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
897 uintptr_t start, uintptr_t length)
898 {
899 uintptr_t addr = tlb_entry->addr_write;
900
901 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
902 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
903 addr &= TARGET_PAGE_MASK;
904 addr += tlb_entry->addend;
905 if ((addr - start) < length) {
906 #if TARGET_LONG_BITS == 32
907 uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
908 ptr_write += HOST_BIG_ENDIAN;
909 qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
910 #elif TCG_OVERSIZED_GUEST
911 tlb_entry->addr_write |= TLB_NOTDIRTY;
912 #else
913 qatomic_set(&tlb_entry->addr_write,
914 tlb_entry->addr_write | TLB_NOTDIRTY);
915 #endif
916 }
917 }
918 }
919
920 /*
921 * Called with tlb_c.lock held.
922 * Called only from the vCPU context, i.e. the TLB's owner thread.
923 */
copy_tlb_helper_locked(CPUTLBEntry * d,const CPUTLBEntry * s)924 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
925 {
926 *d = *s;
927 }
928
929 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
930 * the target vCPU).
931 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
932 * thing actually updated is the target TLB entry ->addr_write flags.
933 */
tlb_reset_dirty(CPUState * cpu,ram_addr_t start1,ram_addr_t length)934 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
935 {
936 int mmu_idx;
937
938 qemu_spin_lock(&cpu->neg.tlb.c.lock);
939 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
940 unsigned int i;
941 unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
942
943 for (i = 0; i < n; i++) {
944 tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
945 start1, length);
946 }
947
948 for (i = 0; i < CPU_VTLB_SIZE; i++) {
949 tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
950 start1, length);
951 }
952 }
953 qemu_spin_unlock(&cpu->neg.tlb.c.lock);
954 }
955
956 /* Called with tlb_c.lock held */
tlb_set_dirty1_locked(CPUTLBEntry * tlb_entry,vaddr addr)957 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
958 vaddr addr)
959 {
960 if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
961 tlb_entry->addr_write = addr;
962 }
963 }
964
965 /* update the TLB corresponding to virtual page vaddr
966 so that it is no longer dirty */
tlb_set_dirty(CPUState * cpu,vaddr addr)967 static void tlb_set_dirty(CPUState *cpu, vaddr addr)
968 {
969 int mmu_idx;
970
971 assert_cpu_is_self(cpu);
972
973 addr &= TARGET_PAGE_MASK;
974 qemu_spin_lock(&cpu->neg.tlb.c.lock);
975 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
976 tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr);
977 }
978
979 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
980 int k;
981 for (k = 0; k < CPU_VTLB_SIZE; k++) {
982 tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr);
983 }
984 }
985 qemu_spin_unlock(&cpu->neg.tlb.c.lock);
986 }
987
988 /* Our TLB does not support large pages, so remember the area covered by
989 large pages and trigger a full TLB flush if these are invalidated. */
tlb_add_large_page(CPUState * cpu,int mmu_idx,vaddr addr,uint64_t size)990 static void tlb_add_large_page(CPUState *cpu, int mmu_idx,
991 vaddr addr, uint64_t size)
992 {
993 vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr;
994 vaddr lp_mask = ~(size - 1);
995
996 if (lp_addr == (vaddr)-1) {
997 /* No previous large page. */
998 lp_addr = addr;
999 } else {
1000 /* Extend the existing region to include the new page.
1001 This is a compromise between unnecessary flushes and
1002 the cost of maintaining a full variable size TLB. */
1003 lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask;
1004 while (((lp_addr ^ addr) & lp_mask) != 0) {
1005 lp_mask <<= 1;
1006 }
1007 }
1008 cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1009 cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask;
1010 }
1011
tlb_set_compare(CPUTLBEntryFull * full,CPUTLBEntry * ent,vaddr address,int flags,MMUAccessType access_type,bool enable)1012 static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
1013 vaddr address, int flags,
1014 MMUAccessType access_type, bool enable)
1015 {
1016 if (enable) {
1017 address |= flags & TLB_FLAGS_MASK;
1018 flags &= TLB_SLOW_FLAGS_MASK;
1019 if (flags) {
1020 address |= TLB_FORCE_SLOW;
1021 }
1022 } else {
1023 address = -1;
1024 flags = 0;
1025 }
1026 ent->addr_idx[access_type] = address;
1027 full->slow_flags[access_type] = flags;
1028 }
1029
1030 /*
1031 * Add a new TLB entry. At most one entry for a given virtual address
1032 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1033 * supplied size is only used by tlb_flush_page.
1034 *
1035 * Called from TCG-generated code, which is under an RCU read-side
1036 * critical section.
1037 */
tlb_set_page_full(CPUState * cpu,int mmu_idx,vaddr addr,CPUTLBEntryFull * full)1038 void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1039 vaddr addr, CPUTLBEntryFull *full)
1040 {
1041 CPUTLB *tlb = &cpu->neg.tlb;
1042 CPUTLBDesc *desc = &tlb->d[mmu_idx];
1043 MemoryRegionSection *section;
1044 unsigned int index, read_flags, write_flags;
1045 uintptr_t addend;
1046 CPUTLBEntry *te, tn;
1047 hwaddr iotlb, xlat, sz, paddr_page;
1048 vaddr addr_page;
1049 int asidx, wp_flags, prot;
1050 bool is_ram, is_romd;
1051
1052 assert_cpu_is_self(cpu);
1053
1054 if (full->lg_page_size <= TARGET_PAGE_BITS) {
1055 sz = TARGET_PAGE_SIZE;
1056 } else {
1057 sz = (hwaddr)1 << full->lg_page_size;
1058 tlb_add_large_page(cpu, mmu_idx, addr, sz);
1059 }
1060 addr_page = addr & TARGET_PAGE_MASK;
1061 paddr_page = full->phys_addr & TARGET_PAGE_MASK;
1062
1063 prot = full->prot;
1064 asidx = cpu_asidx_from_attrs(cpu, full->attrs);
1065 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1066 &xlat, &sz, full->attrs, &prot);
1067 assert(sz >= TARGET_PAGE_SIZE);
1068
1069 tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
1070 " prot=%x idx=%d\n",
1071 addr, full->phys_addr, prot, mmu_idx);
1072
1073 read_flags = full->tlb_fill_flags;
1074 if (full->lg_page_size < TARGET_PAGE_BITS) {
1075 /* Repeat the MMU check and TLB fill on every access. */
1076 read_flags |= TLB_INVALID_MASK;
1077 }
1078
1079 is_ram = memory_region_is_ram(section->mr);
1080 is_romd = memory_region_is_romd(section->mr);
1081
1082 if (is_ram || is_romd) {
1083 /* RAM and ROMD both have associated host memory. */
1084 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1085 } else {
1086 /* I/O does not; force the host address to NULL. */
1087 addend = 0;
1088 }
1089
1090 write_flags = read_flags;
1091 if (is_ram) {
1092 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1093 assert(!(iotlb & ~TARGET_PAGE_MASK));
1094 /*
1095 * Computing is_clean is expensive; avoid all that unless
1096 * the page is actually writable.
1097 */
1098 if (prot & PAGE_WRITE) {
1099 if (section->readonly) {
1100 write_flags |= TLB_DISCARD_WRITE;
1101 } else if (cpu_physical_memory_is_clean(iotlb)) {
1102 write_flags |= TLB_NOTDIRTY;
1103 }
1104 }
1105 } else {
1106 /* I/O or ROMD */
1107 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1108 /*
1109 * Writes to romd devices must go through MMIO to enable write.
1110 * Reads to romd devices go through the ram_ptr found above,
1111 * but of course reads to I/O must go through MMIO.
1112 */
1113 write_flags |= TLB_MMIO;
1114 if (!is_romd) {
1115 read_flags = write_flags;
1116 }
1117 }
1118
1119 wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
1120 TARGET_PAGE_SIZE);
1121
1122 index = tlb_index(cpu, mmu_idx, addr_page);
1123 te = tlb_entry(cpu, mmu_idx, addr_page);
1124
1125 /*
1126 * Hold the TLB lock for the rest of the function. We could acquire/release
1127 * the lock several times in the function, but it is faster to amortize the
1128 * acquisition cost by acquiring it just once. Note that this leads to
1129 * a longer critical section, but this is not a concern since the TLB lock
1130 * is unlikely to be contended.
1131 */
1132 qemu_spin_lock(&tlb->c.lock);
1133
1134 /* Note that the tlb is no longer clean. */
1135 tlb->c.dirty |= 1 << mmu_idx;
1136
1137 /* Make sure there's no cached translation for the new page. */
1138 tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page);
1139
1140 /*
1141 * Only evict the old entry to the victim tlb if it's for a
1142 * different page; otherwise just overwrite the stale data.
1143 */
1144 if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
1145 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1146 CPUTLBEntry *tv = &desc->vtable[vidx];
1147
1148 /* Evict the old entry into the victim tlb. */
1149 copy_tlb_helper_locked(tv, te);
1150 desc->vfulltlb[vidx] = desc->fulltlb[index];
1151 tlb_n_used_entries_dec(cpu, mmu_idx);
1152 }
1153
1154 /* refill the tlb */
1155 /*
1156 * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
1157 * aligned ram_addr_t of the page base of the target RAM.
1158 * Otherwise, iotlb contains
1159 * - a physical section number in the lower TARGET_PAGE_BITS
1160 * - the offset within section->mr of the page base (I/O, ROMD) with the
1161 * TARGET_PAGE_BITS masked off.
1162 * We subtract addr_page (which is page aligned and thus won't
1163 * disturb the low bits) to give an offset which can be added to the
1164 * (non-page-aligned) vaddr of the eventual memory access to get
1165 * the MemoryRegion offset for the access. Note that the vaddr we
1166 * subtract here is that of the page base, and not the same as the
1167 * vaddr we add back in io_prepare()/get_page_addr_code().
1168 */
1169 desc->fulltlb[index] = *full;
1170 full = &desc->fulltlb[index];
1171 full->xlat_section = iotlb - addr_page;
1172 full->phys_addr = paddr_page;
1173
1174 /* Now calculate the new entry */
1175 tn.addend = addend - addr_page;
1176
1177 tlb_set_compare(full, &tn, addr_page, read_flags,
1178 MMU_INST_FETCH, prot & PAGE_EXEC);
1179
1180 if (wp_flags & BP_MEM_READ) {
1181 read_flags |= TLB_WATCHPOINT;
1182 }
1183 tlb_set_compare(full, &tn, addr_page, read_flags,
1184 MMU_DATA_LOAD, prot & PAGE_READ);
1185
1186 if (prot & PAGE_WRITE_INV) {
1187 write_flags |= TLB_INVALID_MASK;
1188 }
1189 if (wp_flags & BP_MEM_WRITE) {
1190 write_flags |= TLB_WATCHPOINT;
1191 }
1192 tlb_set_compare(full, &tn, addr_page, write_flags,
1193 MMU_DATA_STORE, prot & PAGE_WRITE);
1194
1195 copy_tlb_helper_locked(te, &tn);
1196 tlb_n_used_entries_inc(cpu, mmu_idx);
1197 qemu_spin_unlock(&tlb->c.lock);
1198 }
1199
tlb_set_page_with_attrs(CPUState * cpu,vaddr addr,hwaddr paddr,MemTxAttrs attrs,int prot,int mmu_idx,uint64_t size)1200 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
1201 hwaddr paddr, MemTxAttrs attrs, int prot,
1202 int mmu_idx, uint64_t size)
1203 {
1204 CPUTLBEntryFull full = {
1205 .phys_addr = paddr,
1206 .attrs = attrs,
1207 .prot = prot,
1208 .lg_page_size = ctz64(size)
1209 };
1210
1211 assert(is_power_of_2(size));
1212 tlb_set_page_full(cpu, mmu_idx, addr, &full);
1213 }
1214
tlb_set_page(CPUState * cpu,vaddr addr,hwaddr paddr,int prot,int mmu_idx,uint64_t size)1215 void tlb_set_page(CPUState *cpu, vaddr addr,
1216 hwaddr paddr, int prot,
1217 int mmu_idx, uint64_t size)
1218 {
1219 tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
1220 prot, mmu_idx, size);
1221 }
1222
1223 /*
1224 * Note: tlb_fill_align() can trigger a resize of the TLB.
1225 * This means that all of the caller's prior references to the TLB table
1226 * (e.g. CPUTLBEntry pointers) must be discarded and looked up again
1227 * (e.g. via tlb_entry()).
1228 */
tlb_fill_align(CPUState * cpu,vaddr addr,MMUAccessType type,int mmu_idx,MemOp memop,int size,bool probe,uintptr_t ra)1229 static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type,
1230 int mmu_idx, MemOp memop, int size,
1231 bool probe, uintptr_t ra)
1232 {
1233 const TCGCPUOps *ops = cpu->cc->tcg_ops;
1234 CPUTLBEntryFull full;
1235
1236 if (ops->tlb_fill_align) {
1237 if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx,
1238 memop, size, probe, ra)) {
1239 tlb_set_page_full(cpu, mmu_idx, addr, &full);
1240 return true;
1241 }
1242 } else {
1243 /* Legacy behaviour is alignment before paging. */
1244 if (addr & ((1u << memop_alignment_bits(memop)) - 1)) {
1245 ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra);
1246 }
1247 if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) {
1248 return true;
1249 }
1250 }
1251 assert(probe);
1252 return false;
1253 }
1254
cpu_unaligned_access(CPUState * cpu,vaddr addr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)1255 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1256 MMUAccessType access_type,
1257 int mmu_idx, uintptr_t retaddr)
1258 {
1259 cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
1260 mmu_idx, retaddr);
1261 }
1262
1263 static MemoryRegionSection *
io_prepare(hwaddr * out_offset,CPUState * cpu,hwaddr xlat,MemTxAttrs attrs,vaddr addr,uintptr_t retaddr)1264 io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
1265 MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
1266 {
1267 MemoryRegionSection *section;
1268 hwaddr mr_offset;
1269
1270 section = iotlb_to_section(cpu, xlat, attrs);
1271 mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
1272 cpu->mem_io_pc = retaddr;
1273 if (!cpu->neg.can_do_io) {
1274 cpu_io_recompile(cpu, retaddr);
1275 }
1276
1277 *out_offset = mr_offset;
1278 return section;
1279 }
1280
io_failed(CPUState * cpu,CPUTLBEntryFull * full,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxResult response,uintptr_t retaddr)1281 static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr,
1282 unsigned size, MMUAccessType access_type, int mmu_idx,
1283 MemTxResult response, uintptr_t retaddr)
1284 {
1285 if (!cpu->ignore_memory_transaction_failures
1286 && cpu->cc->tcg_ops->do_transaction_failed) {
1287 hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1288
1289 cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1290 access_type, mmu_idx,
1291 full->attrs, response, retaddr);
1292 }
1293 }
1294
1295 /* Return true if ADDR is present in the victim tlb, and has been copied
1296 back to the main tlb. */
victim_tlb_hit(CPUState * cpu,size_t mmu_idx,size_t index,MMUAccessType access_type,vaddr page)1297 static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
1298 MMUAccessType access_type, vaddr page)
1299 {
1300 size_t vidx;
1301
1302 assert_cpu_is_self(cpu);
1303 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1304 CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx];
1305 uint64_t cmp = tlb_read_idx(vtlb, access_type);
1306
1307 if (cmp == page) {
1308 /* Found entry in victim tlb, swap tlb and iotlb. */
1309 CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
1310
1311 qemu_spin_lock(&cpu->neg.tlb.c.lock);
1312 copy_tlb_helper_locked(&tmptlb, tlb);
1313 copy_tlb_helper_locked(tlb, vtlb);
1314 copy_tlb_helper_locked(vtlb, &tmptlb);
1315 qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1316
1317 CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1318 CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx];
1319 CPUTLBEntryFull tmpf;
1320 tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1321 return true;
1322 }
1323 }
1324 return false;
1325 }
1326
notdirty_write(CPUState * cpu,vaddr mem_vaddr,unsigned size,CPUTLBEntryFull * full,uintptr_t retaddr)1327 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1328 CPUTLBEntryFull *full, uintptr_t retaddr)
1329 {
1330 ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1331
1332 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1333
1334 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1335 tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
1336 }
1337
1338 /*
1339 * Set both VGA and migration bits for simplicity and to remove
1340 * the notdirty callback faster.
1341 */
1342 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1343
1344 /* We remove the notdirty callback only if the code has been flushed. */
1345 if (!cpu_physical_memory_is_clean(ram_addr)) {
1346 trace_memory_notdirty_set_dirty(mem_vaddr);
1347 tlb_set_dirty(cpu, mem_vaddr);
1348 }
1349 }
1350
probe_access_internal(CPUState * cpu,vaddr addr,int fault_size,MMUAccessType access_type,int mmu_idx,bool nonfault,void ** phost,CPUTLBEntryFull ** pfull,uintptr_t retaddr,bool check_mem_cbs)1351 static int probe_access_internal(CPUState *cpu, vaddr addr,
1352 int fault_size, MMUAccessType access_type,
1353 int mmu_idx, bool nonfault,
1354 void **phost, CPUTLBEntryFull **pfull,
1355 uintptr_t retaddr, bool check_mem_cbs)
1356 {
1357 uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1358 CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1359 uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1360 vaddr page_addr = addr & TARGET_PAGE_MASK;
1361 int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
1362 bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu);
1363 CPUTLBEntryFull *full;
1364
1365 if (!tlb_hit_page(tlb_addr, page_addr)) {
1366 if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
1367 if (!tlb_fill_align(cpu, addr, access_type, mmu_idx,
1368 0, fault_size, nonfault, retaddr)) {
1369 /* Non-faulting page table read failed. */
1370 *phost = NULL;
1371 *pfull = NULL;
1372 return TLB_INVALID_MASK;
1373 }
1374
1375 /* TLB resize via tlb_fill_align may have moved the entry. */
1376 index = tlb_index(cpu, mmu_idx, addr);
1377 entry = tlb_entry(cpu, mmu_idx, addr);
1378
1379 /*
1380 * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1381 * to force the next access through tlb_fill_align. We've just
1382 * called tlb_fill_align, so we know that this entry *is* valid.
1383 */
1384 flags &= ~TLB_INVALID_MASK;
1385 }
1386 tlb_addr = tlb_read_idx(entry, access_type);
1387 }
1388 flags &= tlb_addr;
1389
1390 *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1391 flags |= full->slow_flags[access_type];
1392
1393 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
1394 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED))
1395 || (access_type != MMU_INST_FETCH && force_mmio)) {
1396 *phost = NULL;
1397 return TLB_MMIO;
1398 }
1399
1400 /* Everything else is RAM. */
1401 *phost = (void *)((uintptr_t)addr + entry->addend);
1402 return flags;
1403 }
1404
probe_access_full(CPUArchState * env,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,bool nonfault,void ** phost,CPUTLBEntryFull ** pfull,uintptr_t retaddr)1405 int probe_access_full(CPUArchState *env, vaddr addr, int size,
1406 MMUAccessType access_type, int mmu_idx,
1407 bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1408 uintptr_t retaddr)
1409 {
1410 int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1411 mmu_idx, nonfault, phost, pfull, retaddr,
1412 true);
1413
1414 /* Handle clean RAM pages. */
1415 if (unlikely(flags & TLB_NOTDIRTY)) {
1416 int dirtysize = size == 0 ? 1 : size;
1417 notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr);
1418 flags &= ~TLB_NOTDIRTY;
1419 }
1420
1421 return flags;
1422 }
1423
probe_access_full_mmu(CPUArchState * env,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,void ** phost,CPUTLBEntryFull ** pfull)1424 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
1425 MMUAccessType access_type, int mmu_idx,
1426 void **phost, CPUTLBEntryFull **pfull)
1427 {
1428 void *discard_phost;
1429 CPUTLBEntryFull *discard_tlb;
1430
1431 /* privately handle users that don't need full results */
1432 phost = phost ? phost : &discard_phost;
1433 pfull = pfull ? pfull : &discard_tlb;
1434
1435 int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1436 mmu_idx, true, phost, pfull, 0, false);
1437
1438 /* Handle clean RAM pages. */
1439 if (unlikely(flags & TLB_NOTDIRTY)) {
1440 int dirtysize = size == 0 ? 1 : size;
1441 notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0);
1442 flags &= ~TLB_NOTDIRTY;
1443 }
1444
1445 return flags;
1446 }
1447
probe_access_flags(CPUArchState * env,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,bool nonfault,void ** phost,uintptr_t retaddr)1448 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
1449 MMUAccessType access_type, int mmu_idx,
1450 bool nonfault, void **phost, uintptr_t retaddr)
1451 {
1452 CPUTLBEntryFull *full;
1453 int flags;
1454
1455 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1456
1457 flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1458 mmu_idx, nonfault, phost, &full, retaddr,
1459 true);
1460
1461 /* Handle clean RAM pages. */
1462 if (unlikely(flags & TLB_NOTDIRTY)) {
1463 int dirtysize = size == 0 ? 1 : size;
1464 notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr);
1465 flags &= ~TLB_NOTDIRTY;
1466 }
1467
1468 return flags;
1469 }
1470
probe_access(CPUArchState * env,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)1471 void *probe_access(CPUArchState *env, vaddr addr, int size,
1472 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1473 {
1474 CPUTLBEntryFull *full;
1475 void *host;
1476 int flags;
1477
1478 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1479
1480 flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1481 mmu_idx, false, &host, &full, retaddr,
1482 true);
1483
1484 /* Per the interface, size == 0 merely faults the access. */
1485 if (size == 0) {
1486 return NULL;
1487 }
1488
1489 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1490 /* Handle watchpoints. */
1491 if (flags & TLB_WATCHPOINT) {
1492 int wp_access = (access_type == MMU_DATA_STORE
1493 ? BP_MEM_WRITE : BP_MEM_READ);
1494 cpu_check_watchpoint(env_cpu(env), addr, size,
1495 full->attrs, wp_access, retaddr);
1496 }
1497
1498 /* Handle clean RAM pages. */
1499 if (flags & TLB_NOTDIRTY) {
1500 notdirty_write(env_cpu(env), addr, size, full, retaddr);
1501 }
1502 }
1503
1504 return host;
1505 }
1506
tlb_vaddr_to_host(CPUArchState * env,abi_ptr addr,MMUAccessType access_type,int mmu_idx)1507 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1508 MMUAccessType access_type, int mmu_idx)
1509 {
1510 CPUTLBEntryFull *full;
1511 void *host;
1512 int flags;
1513
1514 flags = probe_access_internal(env_cpu(env), addr, 0, access_type,
1515 mmu_idx, true, &host, &full, 0, false);
1516
1517 /* No combination of flags are expected by the caller. */
1518 return flags ? NULL : host;
1519 }
1520
1521 /*
1522 * Return a ram_addr_t for the virtual address for execution.
1523 *
1524 * Return -1 if we can't translate and execute from an entire page
1525 * of RAM. This will force us to execute by loading and translating
1526 * one insn at a time, without caching.
1527 *
1528 * NOTE: This function will trigger an exception if the page is
1529 * not executable.
1530 */
get_page_addr_code_hostp(CPUArchState * env,vaddr addr,void ** hostp)1531 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
1532 void **hostp)
1533 {
1534 CPUTLBEntryFull *full;
1535 void *p;
1536
1537 (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
1538 cpu_mmu_index(env_cpu(env), true), false,
1539 &p, &full, 0, false);
1540 if (p == NULL) {
1541 return -1;
1542 }
1543
1544 if (full->lg_page_size < TARGET_PAGE_BITS) {
1545 return -1;
1546 }
1547
1548 if (hostp) {
1549 *hostp = p;
1550 }
1551 return qemu_ram_addr_from_host_nofail(p);
1552 }
1553
1554 /* Load/store with atomicity primitives. */
1555 #include "ldst_atomicity.c.inc"
1556
1557 #ifdef CONFIG_PLUGIN
1558 /*
1559 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1560 * This should be a hot path as we will have just looked this path up
1561 * in the softmmu lookup code (or helper). We don't handle re-fills or
1562 * checking the victim table. This is purely informational.
1563 *
1564 * The one corner case is i/o write, which can cause changes to the
1565 * address space. Those changes, and the corresponding tlb flush,
1566 * should be delayed until the next TB, so even then this ought not fail.
1567 * But check, Just in Case.
1568 */
tlb_plugin_lookup(CPUState * cpu,vaddr addr,int mmu_idx,bool is_store,struct qemu_plugin_hwaddr * data)1569 bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
1570 bool is_store, struct qemu_plugin_hwaddr *data)
1571 {
1572 CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr);
1573 uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1574 MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
1575 uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
1576 CPUTLBEntryFull *full;
1577
1578 if (unlikely(!tlb_hit(tlb_addr, addr))) {
1579 return false;
1580 }
1581
1582 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1583 data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1584
1585 /* We must have an iotlb entry for MMIO */
1586 if (tlb_addr & TLB_MMIO) {
1587 MemoryRegionSection *section =
1588 iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
1589 full->attrs);
1590 data->is_io = true;
1591 data->mr = section->mr;
1592 } else {
1593 data->is_io = false;
1594 data->mr = NULL;
1595 }
1596 return true;
1597 }
1598 #endif
1599
1600 /*
1601 * Probe for a load/store operation.
1602 * Return the host address and into @flags.
1603 */
1604
1605 typedef struct MMULookupPageData {
1606 CPUTLBEntryFull *full;
1607 void *haddr;
1608 vaddr addr;
1609 int flags;
1610 int size;
1611 } MMULookupPageData;
1612
1613 typedef struct MMULookupLocals {
1614 MMULookupPageData page[2];
1615 MemOp memop;
1616 int mmu_idx;
1617 } MMULookupLocals;
1618
1619 /**
1620 * mmu_lookup1: translate one page
1621 * @cpu: generic cpu state
1622 * @data: lookup parameters
1623 * @memop: memory operation for the access, or 0
1624 * @mmu_idx: virtual address context
1625 * @access_type: load/store/code
1626 * @ra: return address into tcg generated code, or 0
1627 *
1628 * Resolve the translation for the one page at @data.addr, filling in
1629 * the rest of @data with the results. If the translation fails,
1630 * tlb_fill_align will longjmp out. Return true if the softmmu tlb for
1631 * @mmu_idx may have resized.
1632 */
mmu_lookup1(CPUState * cpu,MMULookupPageData * data,MemOp memop,int mmu_idx,MMUAccessType access_type,uintptr_t ra)1633 static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
1634 int mmu_idx, MMUAccessType access_type, uintptr_t ra)
1635 {
1636 vaddr addr = data->addr;
1637 uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1638 CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1639 uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1640 bool maybe_resized = false;
1641 CPUTLBEntryFull *full;
1642 int flags;
1643
1644 /* If the TLB entry is for a different page, reload and try again. */
1645 if (!tlb_hit(tlb_addr, addr)) {
1646 if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
1647 addr & TARGET_PAGE_MASK)) {
1648 tlb_fill_align(cpu, addr, access_type, mmu_idx,
1649 memop, data->size, false, ra);
1650 maybe_resized = true;
1651 index = tlb_index(cpu, mmu_idx, addr);
1652 entry = tlb_entry(cpu, mmu_idx, addr);
1653 }
1654 tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
1655 }
1656
1657 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1658 flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
1659 flags |= full->slow_flags[access_type];
1660
1661 if (likely(!maybe_resized)) {
1662 /* Alignment has not been checked by tlb_fill_align. */
1663 int a_bits = memop_alignment_bits(memop);
1664
1665 /*
1666 * This alignment check differs from the one above, in that this is
1667 * based on the atomicity of the operation. The intended use case is
1668 * the ARM memory type field of each PTE, where access to pages with
1669 * Device memory type require alignment.
1670 */
1671 if (unlikely(flags & TLB_CHECK_ALIGNED)) {
1672 int at_bits = memop_atomicity_bits(memop);
1673 a_bits = MAX(a_bits, at_bits);
1674 }
1675 if (unlikely(addr & ((1 << a_bits) - 1))) {
1676 cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra);
1677 }
1678 }
1679
1680 data->full = full;
1681 data->flags = flags;
1682 /* Compute haddr speculatively; depending on flags it might be invalid. */
1683 data->haddr = (void *)((uintptr_t)addr + entry->addend);
1684
1685 return maybe_resized;
1686 }
1687
1688 /**
1689 * mmu_watch_or_dirty
1690 * @cpu: generic cpu state
1691 * @data: lookup parameters
1692 * @access_type: load/store/code
1693 * @ra: return address into tcg generated code, or 0
1694 *
1695 * Trigger watchpoints for @data.addr:@data.size;
1696 * record writes to protected clean pages.
1697 */
mmu_watch_or_dirty(CPUState * cpu,MMULookupPageData * data,MMUAccessType access_type,uintptr_t ra)1698 static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
1699 MMUAccessType access_type, uintptr_t ra)
1700 {
1701 CPUTLBEntryFull *full = data->full;
1702 vaddr addr = data->addr;
1703 int flags = data->flags;
1704 int size = data->size;
1705
1706 /* On watchpoint hit, this will longjmp out. */
1707 if (flags & TLB_WATCHPOINT) {
1708 int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
1709 cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
1710 flags &= ~TLB_WATCHPOINT;
1711 }
1712
1713 /* Note that notdirty is only set for writes. */
1714 if (flags & TLB_NOTDIRTY) {
1715 notdirty_write(cpu, addr, size, full, ra);
1716 flags &= ~TLB_NOTDIRTY;
1717 }
1718 data->flags = flags;
1719 }
1720
1721 /**
1722 * mmu_lookup: translate page(s)
1723 * @cpu: generic cpu state
1724 * @addr: virtual address
1725 * @oi: combined mmu_idx and MemOp
1726 * @ra: return address into tcg generated code, or 0
1727 * @access_type: load/store/code
1728 * @l: output result
1729 *
1730 * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
1731 * bytes. Return true if the lookup crosses a page boundary.
1732 */
mmu_lookup(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType type,MMULookupLocals * l)1733 static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1734 uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
1735 {
1736 bool crosspage;
1737 int flags;
1738
1739 l->memop = get_memop(oi);
1740 l->mmu_idx = get_mmuidx(oi);
1741
1742 tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
1743
1744 l->page[0].addr = addr;
1745 l->page[0].size = memop_size(l->memop);
1746 l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
1747 l->page[1].size = 0;
1748 crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
1749
1750 if (likely(!crosspage)) {
1751 mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
1752
1753 flags = l->page[0].flags;
1754 if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1755 mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1756 }
1757 if (unlikely(flags & TLB_BSWAP)) {
1758 l->memop ^= MO_BSWAP;
1759 }
1760 } else {
1761 /* Finish compute of page crossing. */
1762 int size0 = l->page[1].addr - addr;
1763 l->page[1].size = l->page[0].size - size0;
1764 l->page[0].size = size0;
1765
1766 /*
1767 * Lookup both pages, recognizing exceptions from either. If the
1768 * second lookup potentially resized, refresh first CPUTLBEntryFull.
1769 */
1770 mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
1771 if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) {
1772 uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
1773 l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
1774 }
1775
1776 flags = l->page[0].flags | l->page[1].flags;
1777 if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1778 mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1779 mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
1780 }
1781
1782 /*
1783 * Since target/sparc is the only user of TLB_BSWAP, and all
1784 * Sparc accesses are aligned, any treatment across two pages
1785 * would be arbitrary. Refuse it until there's a use.
1786 */
1787 tcg_debug_assert((flags & TLB_BSWAP) == 0);
1788 }
1789
1790 return crosspage;
1791 }
1792
1793 /*
1794 * Probe for an atomic operation. Do not allow unaligned operations,
1795 * or io operations to proceed. Return the host address.
1796 */
atomic_mmu_lookup(CPUState * cpu,vaddr addr,MemOpIdx oi,int size,uintptr_t retaddr)1797 static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1798 int size, uintptr_t retaddr)
1799 {
1800 uintptr_t mmu_idx = get_mmuidx(oi);
1801 MemOp mop = get_memop(oi);
1802 uintptr_t index;
1803 CPUTLBEntry *tlbe;
1804 vaddr tlb_addr;
1805 void *hostaddr;
1806 CPUTLBEntryFull *full;
1807 bool did_tlb_fill = false;
1808
1809 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1810
1811 /* Adjust the given return address. */
1812 retaddr -= GETPC_ADJ;
1813
1814 index = tlb_index(cpu, mmu_idx, addr);
1815 tlbe = tlb_entry(cpu, mmu_idx, addr);
1816
1817 /* Check TLB entry and enforce page permissions. */
1818 tlb_addr = tlb_addr_write(tlbe);
1819 if (!tlb_hit(tlb_addr, addr)) {
1820 if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
1821 addr & TARGET_PAGE_MASK)) {
1822 tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx,
1823 mop, size, false, retaddr);
1824 did_tlb_fill = true;
1825 index = tlb_index(cpu, mmu_idx, addr);
1826 tlbe = tlb_entry(cpu, mmu_idx, addr);
1827 }
1828 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1829 }
1830
1831 /*
1832 * Let the guest notice RMW on a write-only page.
1833 * We have just verified that the page is writable.
1834 * Subpage lookups may have left TLB_INVALID_MASK set,
1835 * but addr_read will only be -1 if PAGE_READ was unset.
1836 */
1837 if (unlikely(tlbe->addr_read == -1)) {
1838 tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx,
1839 0, size, false, retaddr);
1840 /*
1841 * Since we don't support reads and writes to different
1842 * addresses, and we do have the proper page loaded for
1843 * write, this shouldn't ever return.
1844 */
1845 g_assert_not_reached();
1846 }
1847
1848 /* Enforce guest required alignment, if not handled by tlb_fill_align. */
1849 if (!did_tlb_fill && (addr & ((1 << memop_alignment_bits(mop)) - 1))) {
1850 cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, mmu_idx, retaddr);
1851 }
1852
1853 /* Enforce qemu required alignment. */
1854 if (unlikely(addr & (size - 1))) {
1855 /*
1856 * We get here if guest alignment was not requested, or was not
1857 * enforced by cpu_unaligned_access or tlb_fill_align above.
1858 * We might widen the access and emulate, but for now
1859 * mark an exception and exit the cpu loop.
1860 */
1861 goto stop_the_world;
1862 }
1863
1864 /* Collect tlb flags for read. */
1865 tlb_addr |= tlbe->addr_read;
1866
1867 /* Notice an IO access or a needs-MMU-lookup access */
1868 if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
1869 /* There's really nothing that can be done to
1870 support this apart from stop-the-world. */
1871 goto stop_the_world;
1872 }
1873
1874 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1875 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1876
1877 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1878 notdirty_write(cpu, addr, size, full, retaddr);
1879 }
1880
1881 if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
1882 int wp_flags = 0;
1883
1884 if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
1885 wp_flags |= BP_MEM_WRITE;
1886 }
1887 if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
1888 wp_flags |= BP_MEM_READ;
1889 }
1890 if (wp_flags) {
1891 cpu_check_watchpoint(cpu, addr, size,
1892 full->attrs, wp_flags, retaddr);
1893 }
1894 }
1895
1896 return hostaddr;
1897
1898 stop_the_world:
1899 cpu_loop_exit_atomic(cpu, retaddr);
1900 }
1901
1902 /*
1903 * Load Helpers
1904 *
1905 * We support two different access types. SOFTMMU_CODE_ACCESS is
1906 * specifically for reading instructions from system memory. It is
1907 * called by the translation loop and in some helpers where the code
1908 * is disassembled. It shouldn't be called directly by guest code.
1909 *
1910 * For the benefit of TCG generated code, we want to avoid the
1911 * complication of ABI-specific return type promotion and always
1912 * return a value extended to the register size of the host. This is
1913 * tcg_target_long, except in the case of a 32-bit host and 64-bit
1914 * data, and for that we always have uint64_t.
1915 *
1916 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1917 */
1918
1919 /**
1920 * do_ld_mmio_beN:
1921 * @cpu: generic cpu state
1922 * @full: page parameters
1923 * @ret_be: accumulated data
1924 * @addr: virtual address
1925 * @size: number of bytes
1926 * @mmu_idx: virtual address context
1927 * @ra: return address into tcg generated code, or 0
1928 * Context: BQL held
1929 *
1930 * Load @size bytes from @addr, which is memory-mapped i/o.
1931 * The bytes are concatenated in big-endian order with @ret_be.
1932 */
int_ld_mmio_beN(CPUState * cpu,CPUTLBEntryFull * full,uint64_t ret_be,vaddr addr,int size,int mmu_idx,MMUAccessType type,uintptr_t ra,MemoryRegion * mr,hwaddr mr_offset)1933 static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
1934 uint64_t ret_be, vaddr addr, int size,
1935 int mmu_idx, MMUAccessType type, uintptr_t ra,
1936 MemoryRegion *mr, hwaddr mr_offset)
1937 {
1938 do {
1939 MemOp this_mop;
1940 unsigned this_size;
1941 uint64_t val;
1942 MemTxResult r;
1943
1944 /* Read aligned pieces up to 8 bytes. */
1945 this_mop = ctz32(size | (int)addr | 8);
1946 this_size = 1 << this_mop;
1947 this_mop |= MO_BE;
1948
1949 r = memory_region_dispatch_read(mr, mr_offset, &val,
1950 this_mop, full->attrs);
1951 if (unlikely(r != MEMTX_OK)) {
1952 io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
1953 }
1954 if (this_size == 8) {
1955 return val;
1956 }
1957
1958 ret_be = (ret_be << (this_size * 8)) | val;
1959 addr += this_size;
1960 mr_offset += this_size;
1961 size -= this_size;
1962 } while (size);
1963
1964 return ret_be;
1965 }
1966
do_ld_mmio_beN(CPUState * cpu,CPUTLBEntryFull * full,uint64_t ret_be,vaddr addr,int size,int mmu_idx,MMUAccessType type,uintptr_t ra)1967 static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
1968 uint64_t ret_be, vaddr addr, int size,
1969 int mmu_idx, MMUAccessType type, uintptr_t ra)
1970 {
1971 MemoryRegionSection *section;
1972 MemoryRegion *mr;
1973 hwaddr mr_offset;
1974 MemTxAttrs attrs;
1975
1976 tcg_debug_assert(size > 0 && size <= 8);
1977
1978 attrs = full->attrs;
1979 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
1980 mr = section->mr;
1981
1982 BQL_LOCK_GUARD();
1983 return int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
1984 type, ra, mr, mr_offset);
1985 }
1986
do_ld16_mmio_beN(CPUState * cpu,CPUTLBEntryFull * full,uint64_t ret_be,vaddr addr,int size,int mmu_idx,uintptr_t ra)1987 static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
1988 uint64_t ret_be, vaddr addr, int size,
1989 int mmu_idx, uintptr_t ra)
1990 {
1991 MemoryRegionSection *section;
1992 MemoryRegion *mr;
1993 hwaddr mr_offset;
1994 MemTxAttrs attrs;
1995 uint64_t a, b;
1996
1997 tcg_debug_assert(size > 8 && size <= 16);
1998
1999 attrs = full->attrs;
2000 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2001 mr = section->mr;
2002
2003 BQL_LOCK_GUARD();
2004 a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
2005 MMU_DATA_LOAD, ra, mr, mr_offset);
2006 b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
2007 MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
2008 return int128_make128(b, a);
2009 }
2010
2011 /**
2012 * do_ld_bytes_beN
2013 * @p: translation parameters
2014 * @ret_be: accumulated data
2015 *
2016 * Load @p->size bytes from @p->haddr, which is RAM.
2017 * The bytes to concatenated in big-endian order with @ret_be.
2018 */
do_ld_bytes_beN(MMULookupPageData * p,uint64_t ret_be)2019 static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
2020 {
2021 uint8_t *haddr = p->haddr;
2022 int i, size = p->size;
2023
2024 for (i = 0; i < size; i++) {
2025 ret_be = (ret_be << 8) | haddr[i];
2026 }
2027 return ret_be;
2028 }
2029
2030 /**
2031 * do_ld_parts_beN
2032 * @p: translation parameters
2033 * @ret_be: accumulated data
2034 *
2035 * As do_ld_bytes_beN, but atomically on each aligned part.
2036 */
do_ld_parts_beN(MMULookupPageData * p,uint64_t ret_be)2037 static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
2038 {
2039 void *haddr = p->haddr;
2040 int size = p->size;
2041
2042 do {
2043 uint64_t x;
2044 int n;
2045
2046 /*
2047 * Find minimum of alignment and size.
2048 * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2049 * would have only checked the low bits of addr|size once at the start,
2050 * but is just as easy.
2051 */
2052 switch (((uintptr_t)haddr | size) & 7) {
2053 case 4:
2054 x = cpu_to_be32(load_atomic4(haddr));
2055 ret_be = (ret_be << 32) | x;
2056 n = 4;
2057 break;
2058 case 2:
2059 case 6:
2060 x = cpu_to_be16(load_atomic2(haddr));
2061 ret_be = (ret_be << 16) | x;
2062 n = 2;
2063 break;
2064 default:
2065 x = *(uint8_t *)haddr;
2066 ret_be = (ret_be << 8) | x;
2067 n = 1;
2068 break;
2069 case 0:
2070 g_assert_not_reached();
2071 }
2072 haddr += n;
2073 size -= n;
2074 } while (size != 0);
2075 return ret_be;
2076 }
2077
2078 /**
2079 * do_ld_parts_be4
2080 * @p: translation parameters
2081 * @ret_be: accumulated data
2082 *
2083 * As do_ld_bytes_beN, but with one atomic load.
2084 * Four aligned bytes are guaranteed to cover the load.
2085 */
do_ld_whole_be4(MMULookupPageData * p,uint64_t ret_be)2086 static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
2087 {
2088 int o = p->addr & 3;
2089 uint32_t x = load_atomic4(p->haddr - o);
2090
2091 x = cpu_to_be32(x);
2092 x <<= o * 8;
2093 x >>= (4 - p->size) * 8;
2094 return (ret_be << (p->size * 8)) | x;
2095 }
2096
2097 /**
2098 * do_ld_parts_be8
2099 * @p: translation parameters
2100 * @ret_be: accumulated data
2101 *
2102 * As do_ld_bytes_beN, but with one atomic load.
2103 * Eight aligned bytes are guaranteed to cover the load.
2104 */
do_ld_whole_be8(CPUState * cpu,uintptr_t ra,MMULookupPageData * p,uint64_t ret_be)2105 static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
2106 MMULookupPageData *p, uint64_t ret_be)
2107 {
2108 int o = p->addr & 7;
2109 uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o);
2110
2111 x = cpu_to_be64(x);
2112 x <<= o * 8;
2113 x >>= (8 - p->size) * 8;
2114 return (ret_be << (p->size * 8)) | x;
2115 }
2116
2117 /**
2118 * do_ld_parts_be16
2119 * @p: translation parameters
2120 * @ret_be: accumulated data
2121 *
2122 * As do_ld_bytes_beN, but with one atomic load.
2123 * 16 aligned bytes are guaranteed to cover the load.
2124 */
do_ld_whole_be16(CPUState * cpu,uintptr_t ra,MMULookupPageData * p,uint64_t ret_be)2125 static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
2126 MMULookupPageData *p, uint64_t ret_be)
2127 {
2128 int o = p->addr & 15;
2129 Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o);
2130 int size = p->size;
2131
2132 if (!HOST_BIG_ENDIAN) {
2133 y = bswap128(y);
2134 }
2135 y = int128_lshift(y, o * 8);
2136 y = int128_urshift(y, (16 - size) * 8);
2137 x = int128_make64(ret_be);
2138 x = int128_lshift(x, size * 8);
2139 return int128_or(x, y);
2140 }
2141
2142 /*
2143 * Wrapper for the above.
2144 */
do_ld_beN(CPUState * cpu,MMULookupPageData * p,uint64_t ret_be,int mmu_idx,MMUAccessType type,MemOp mop,uintptr_t ra)2145 static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
2146 uint64_t ret_be, int mmu_idx, MMUAccessType type,
2147 MemOp mop, uintptr_t ra)
2148 {
2149 MemOp atom;
2150 unsigned tmp, half_size;
2151
2152 if (unlikely(p->flags & TLB_MMIO)) {
2153 return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
2154 mmu_idx, type, ra);
2155 }
2156
2157 /*
2158 * It is a given that we cross a page and therefore there is no
2159 * atomicity for the load as a whole, but subobjects may need attention.
2160 */
2161 atom = mop & MO_ATOM_MASK;
2162 switch (atom) {
2163 case MO_ATOM_SUBALIGN:
2164 return do_ld_parts_beN(p, ret_be);
2165
2166 case MO_ATOM_IFALIGN_PAIR:
2167 case MO_ATOM_WITHIN16_PAIR:
2168 tmp = mop & MO_SIZE;
2169 tmp = tmp ? tmp - 1 : 0;
2170 half_size = 1 << tmp;
2171 if (atom == MO_ATOM_IFALIGN_PAIR
2172 ? p->size == half_size
2173 : p->size >= half_size) {
2174 if (!HAVE_al8_fast && p->size < 4) {
2175 return do_ld_whole_be4(p, ret_be);
2176 } else {
2177 return do_ld_whole_be8(cpu, ra, p, ret_be);
2178 }
2179 }
2180 /* fall through */
2181
2182 case MO_ATOM_IFALIGN:
2183 case MO_ATOM_WITHIN16:
2184 case MO_ATOM_NONE:
2185 return do_ld_bytes_beN(p, ret_be);
2186
2187 default:
2188 g_assert_not_reached();
2189 }
2190 }
2191
2192 /*
2193 * Wrapper for the above, for 8 < size < 16.
2194 */
do_ld16_beN(CPUState * cpu,MMULookupPageData * p,uint64_t a,int mmu_idx,MemOp mop,uintptr_t ra)2195 static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p,
2196 uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
2197 {
2198 int size = p->size;
2199 uint64_t b;
2200 MemOp atom;
2201
2202 if (unlikely(p->flags & TLB_MMIO)) {
2203 return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra);
2204 }
2205
2206 /*
2207 * It is a given that we cross a page and therefore there is no
2208 * atomicity for the load as a whole, but subobjects may need attention.
2209 */
2210 atom = mop & MO_ATOM_MASK;
2211 switch (atom) {
2212 case MO_ATOM_SUBALIGN:
2213 p->size = size - 8;
2214 a = do_ld_parts_beN(p, a);
2215 p->haddr += size - 8;
2216 p->size = 8;
2217 b = do_ld_parts_beN(p, 0);
2218 break;
2219
2220 case MO_ATOM_WITHIN16_PAIR:
2221 /* Since size > 8, this is the half that must be atomic. */
2222 return do_ld_whole_be16(cpu, ra, p, a);
2223
2224 case MO_ATOM_IFALIGN_PAIR:
2225 /*
2226 * Since size > 8, both halves are misaligned,
2227 * and so neither is atomic.
2228 */
2229 case MO_ATOM_IFALIGN:
2230 case MO_ATOM_WITHIN16:
2231 case MO_ATOM_NONE:
2232 p->size = size - 8;
2233 a = do_ld_bytes_beN(p, a);
2234 b = ldq_be_p(p->haddr + size - 8);
2235 break;
2236
2237 default:
2238 g_assert_not_reached();
2239 }
2240
2241 return int128_make128(b, a);
2242 }
2243
do_ld_1(CPUState * cpu,MMULookupPageData * p,int mmu_idx,MMUAccessType type,uintptr_t ra)2244 static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2245 MMUAccessType type, uintptr_t ra)
2246 {
2247 if (unlikely(p->flags & TLB_MMIO)) {
2248 return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra);
2249 } else {
2250 return *(uint8_t *)p->haddr;
2251 }
2252 }
2253
do_ld_2(CPUState * cpu,MMULookupPageData * p,int mmu_idx,MMUAccessType type,MemOp memop,uintptr_t ra)2254 static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2255 MMUAccessType type, MemOp memop, uintptr_t ra)
2256 {
2257 uint16_t ret;
2258
2259 if (unlikely(p->flags & TLB_MMIO)) {
2260 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra);
2261 if ((memop & MO_BSWAP) == MO_LE) {
2262 ret = bswap16(ret);
2263 }
2264 } else {
2265 /* Perform the load host endian, then swap if necessary. */
2266 ret = load_atom_2(cpu, ra, p->haddr, memop);
2267 if (memop & MO_BSWAP) {
2268 ret = bswap16(ret);
2269 }
2270 }
2271 return ret;
2272 }
2273
do_ld_4(CPUState * cpu,MMULookupPageData * p,int mmu_idx,MMUAccessType type,MemOp memop,uintptr_t ra)2274 static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2275 MMUAccessType type, MemOp memop, uintptr_t ra)
2276 {
2277 uint32_t ret;
2278
2279 if (unlikely(p->flags & TLB_MMIO)) {
2280 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra);
2281 if ((memop & MO_BSWAP) == MO_LE) {
2282 ret = bswap32(ret);
2283 }
2284 } else {
2285 /* Perform the load host endian. */
2286 ret = load_atom_4(cpu, ra, p->haddr, memop);
2287 if (memop & MO_BSWAP) {
2288 ret = bswap32(ret);
2289 }
2290 }
2291 return ret;
2292 }
2293
do_ld_8(CPUState * cpu,MMULookupPageData * p,int mmu_idx,MMUAccessType type,MemOp memop,uintptr_t ra)2294 static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2295 MMUAccessType type, MemOp memop, uintptr_t ra)
2296 {
2297 uint64_t ret;
2298
2299 if (unlikely(p->flags & TLB_MMIO)) {
2300 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra);
2301 if ((memop & MO_BSWAP) == MO_LE) {
2302 ret = bswap64(ret);
2303 }
2304 } else {
2305 /* Perform the load host endian. */
2306 ret = load_atom_8(cpu, ra, p->haddr, memop);
2307 if (memop & MO_BSWAP) {
2308 ret = bswap64(ret);
2309 }
2310 }
2311 return ret;
2312 }
2313
do_ld1_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)2314 static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2315 uintptr_t ra, MMUAccessType access_type)
2316 {
2317 MMULookupLocals l;
2318 bool crosspage;
2319
2320 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2321 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2322 tcg_debug_assert(!crosspage);
2323
2324 return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2325 }
2326
do_ld2_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)2327 static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2328 uintptr_t ra, MMUAccessType access_type)
2329 {
2330 MMULookupLocals l;
2331 bool crosspage;
2332 uint16_t ret;
2333 uint8_t a, b;
2334
2335 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2336 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2337 if (likely(!crosspage)) {
2338 return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2339 }
2340
2341 a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2342 b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra);
2343
2344 if ((l.memop & MO_BSWAP) == MO_LE) {
2345 ret = a | (b << 8);
2346 } else {
2347 ret = b | (a << 8);
2348 }
2349 return ret;
2350 }
2351
do_ld4_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)2352 static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2353 uintptr_t ra, MMUAccessType access_type)
2354 {
2355 MMULookupLocals l;
2356 bool crosspage;
2357 uint32_t ret;
2358
2359 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2360 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2361 if (likely(!crosspage)) {
2362 return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2363 }
2364
2365 ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2366 ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2367 if ((l.memop & MO_BSWAP) == MO_LE) {
2368 ret = bswap32(ret);
2369 }
2370 return ret;
2371 }
2372
do_ld8_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)2373 static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2374 uintptr_t ra, MMUAccessType access_type)
2375 {
2376 MMULookupLocals l;
2377 bool crosspage;
2378 uint64_t ret;
2379
2380 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2381 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2382 if (likely(!crosspage)) {
2383 return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2384 }
2385
2386 ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2387 ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2388 if ((l.memop & MO_BSWAP) == MO_LE) {
2389 ret = bswap64(ret);
2390 }
2391 return ret;
2392 }
2393
do_ld16_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra)2394 static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
2395 MemOpIdx oi, uintptr_t ra)
2396 {
2397 MMULookupLocals l;
2398 bool crosspage;
2399 uint64_t a, b;
2400 Int128 ret;
2401 int first;
2402
2403 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2404 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
2405 if (likely(!crosspage)) {
2406 if (unlikely(l.page[0].flags & TLB_MMIO)) {
2407 ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16,
2408 l.mmu_idx, ra);
2409 if ((l.memop & MO_BSWAP) == MO_LE) {
2410 ret = bswap128(ret);
2411 }
2412 } else {
2413 /* Perform the load host endian. */
2414 ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop);
2415 if (l.memop & MO_BSWAP) {
2416 ret = bswap128(ret);
2417 }
2418 }
2419 return ret;
2420 }
2421
2422 first = l.page[0].size;
2423 if (first == 8) {
2424 MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
2425
2426 a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2427 b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2428 if ((mop8 & MO_BSWAP) == MO_LE) {
2429 ret = int128_make128(a, b);
2430 } else {
2431 ret = int128_make128(b, a);
2432 }
2433 return ret;
2434 }
2435
2436 if (first < 8) {
2437 a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx,
2438 MMU_DATA_LOAD, l.memop, ra);
2439 ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra);
2440 } else {
2441 ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra);
2442 b = int128_getlo(ret);
2443 ret = int128_lshift(ret, l.page[1].size * 8);
2444 a = int128_gethi(ret);
2445 b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx,
2446 MMU_DATA_LOAD, l.memop, ra);
2447 ret = int128_make128(b, a);
2448 }
2449 if ((l.memop & MO_BSWAP) == MO_LE) {
2450 ret = bswap128(ret);
2451 }
2452 return ret;
2453 }
2454
2455 /*
2456 * Store Helpers
2457 */
2458
2459 /**
2460 * do_st_mmio_leN:
2461 * @cpu: generic cpu state
2462 * @full: page parameters
2463 * @val_le: data to store
2464 * @addr: virtual address
2465 * @size: number of bytes
2466 * @mmu_idx: virtual address context
2467 * @ra: return address into tcg generated code, or 0
2468 * Context: BQL held
2469 *
2470 * Store @size bytes at @addr, which is memory-mapped i/o.
2471 * The bytes to store are extracted in little-endian order from @val_le;
2472 * return the bytes of @val_le beyond @p->size that have not been stored.
2473 */
int_st_mmio_leN(CPUState * cpu,CPUTLBEntryFull * full,uint64_t val_le,vaddr addr,int size,int mmu_idx,uintptr_t ra,MemoryRegion * mr,hwaddr mr_offset)2474 static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2475 uint64_t val_le, vaddr addr, int size,
2476 int mmu_idx, uintptr_t ra,
2477 MemoryRegion *mr, hwaddr mr_offset)
2478 {
2479 do {
2480 MemOp this_mop;
2481 unsigned this_size;
2482 MemTxResult r;
2483
2484 /* Store aligned pieces up to 8 bytes. */
2485 this_mop = ctz32(size | (int)addr | 8);
2486 this_size = 1 << this_mop;
2487 this_mop |= MO_LE;
2488
2489 r = memory_region_dispatch_write(mr, mr_offset, val_le,
2490 this_mop, full->attrs);
2491 if (unlikely(r != MEMTX_OK)) {
2492 io_failed(cpu, full, addr, this_size, MMU_DATA_STORE,
2493 mmu_idx, r, ra);
2494 }
2495 if (this_size == 8) {
2496 return 0;
2497 }
2498
2499 val_le >>= this_size * 8;
2500 addr += this_size;
2501 mr_offset += this_size;
2502 size -= this_size;
2503 } while (size);
2504
2505 return val_le;
2506 }
2507
do_st_mmio_leN(CPUState * cpu,CPUTLBEntryFull * full,uint64_t val_le,vaddr addr,int size,int mmu_idx,uintptr_t ra)2508 static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2509 uint64_t val_le, vaddr addr, int size,
2510 int mmu_idx, uintptr_t ra)
2511 {
2512 MemoryRegionSection *section;
2513 hwaddr mr_offset;
2514 MemoryRegion *mr;
2515 MemTxAttrs attrs;
2516
2517 tcg_debug_assert(size > 0 && size <= 8);
2518
2519 attrs = full->attrs;
2520 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2521 mr = section->mr;
2522
2523 BQL_LOCK_GUARD();
2524 return int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
2525 ra, mr, mr_offset);
2526 }
2527
do_st16_mmio_leN(CPUState * cpu,CPUTLBEntryFull * full,Int128 val_le,vaddr addr,int size,int mmu_idx,uintptr_t ra)2528 static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2529 Int128 val_le, vaddr addr, int size,
2530 int mmu_idx, uintptr_t ra)
2531 {
2532 MemoryRegionSection *section;
2533 MemoryRegion *mr;
2534 hwaddr mr_offset;
2535 MemTxAttrs attrs;
2536
2537 tcg_debug_assert(size > 8 && size <= 16);
2538
2539 attrs = full->attrs;
2540 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2541 mr = section->mr;
2542
2543 BQL_LOCK_GUARD();
2544 int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
2545 mmu_idx, ra, mr, mr_offset);
2546 return int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
2547 size - 8, mmu_idx, ra, mr, mr_offset + 8);
2548 }
2549
2550 /*
2551 * Wrapper for the above.
2552 */
do_st_leN(CPUState * cpu,MMULookupPageData * p,uint64_t val_le,int mmu_idx,MemOp mop,uintptr_t ra)2553 static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
2554 uint64_t val_le, int mmu_idx,
2555 MemOp mop, uintptr_t ra)
2556 {
2557 MemOp atom;
2558 unsigned tmp, half_size;
2559
2560 if (unlikely(p->flags & TLB_MMIO)) {
2561 return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
2562 p->size, mmu_idx, ra);
2563 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2564 return val_le >> (p->size * 8);
2565 }
2566
2567 /*
2568 * It is a given that we cross a page and therefore there is no atomicity
2569 * for the store as a whole, but subobjects may need attention.
2570 */
2571 atom = mop & MO_ATOM_MASK;
2572 switch (atom) {
2573 case MO_ATOM_SUBALIGN:
2574 return store_parts_leN(p->haddr, p->size, val_le);
2575
2576 case MO_ATOM_IFALIGN_PAIR:
2577 case MO_ATOM_WITHIN16_PAIR:
2578 tmp = mop & MO_SIZE;
2579 tmp = tmp ? tmp - 1 : 0;
2580 half_size = 1 << tmp;
2581 if (atom == MO_ATOM_IFALIGN_PAIR
2582 ? p->size == half_size
2583 : p->size >= half_size) {
2584 if (!HAVE_al8_fast && p->size <= 4) {
2585 return store_whole_le4(p->haddr, p->size, val_le);
2586 } else if (HAVE_al8) {
2587 return store_whole_le8(p->haddr, p->size, val_le);
2588 } else {
2589 cpu_loop_exit_atomic(cpu, ra);
2590 }
2591 }
2592 /* fall through */
2593
2594 case MO_ATOM_IFALIGN:
2595 case MO_ATOM_WITHIN16:
2596 case MO_ATOM_NONE:
2597 return store_bytes_leN(p->haddr, p->size, val_le);
2598
2599 default:
2600 g_assert_not_reached();
2601 }
2602 }
2603
2604 /*
2605 * Wrapper for the above, for 8 < size < 16.
2606 */
do_st16_leN(CPUState * cpu,MMULookupPageData * p,Int128 val_le,int mmu_idx,MemOp mop,uintptr_t ra)2607 static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
2608 Int128 val_le, int mmu_idx,
2609 MemOp mop, uintptr_t ra)
2610 {
2611 int size = p->size;
2612 MemOp atom;
2613
2614 if (unlikely(p->flags & TLB_MMIO)) {
2615 return do_st16_mmio_leN(cpu, p->full, val_le, p->addr,
2616 size, mmu_idx, ra);
2617 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2618 return int128_gethi(val_le) >> ((size - 8) * 8);
2619 }
2620
2621 /*
2622 * It is a given that we cross a page and therefore there is no atomicity
2623 * for the store as a whole, but subobjects may need attention.
2624 */
2625 atom = mop & MO_ATOM_MASK;
2626 switch (atom) {
2627 case MO_ATOM_SUBALIGN:
2628 store_parts_leN(p->haddr, 8, int128_getlo(val_le));
2629 return store_parts_leN(p->haddr + 8, p->size - 8,
2630 int128_gethi(val_le));
2631
2632 case MO_ATOM_WITHIN16_PAIR:
2633 /* Since size > 8, this is the half that must be atomic. */
2634 if (!HAVE_CMPXCHG128) {
2635 cpu_loop_exit_atomic(cpu, ra);
2636 }
2637 return store_whole_le16(p->haddr, p->size, val_le);
2638
2639 case MO_ATOM_IFALIGN_PAIR:
2640 /*
2641 * Since size > 8, both halves are misaligned,
2642 * and so neither is atomic.
2643 */
2644 case MO_ATOM_IFALIGN:
2645 case MO_ATOM_WITHIN16:
2646 case MO_ATOM_NONE:
2647 stq_le_p(p->haddr, int128_getlo(val_le));
2648 return store_bytes_leN(p->haddr + 8, p->size - 8,
2649 int128_gethi(val_le));
2650
2651 default:
2652 g_assert_not_reached();
2653 }
2654 }
2655
do_st_1(CPUState * cpu,MMULookupPageData * p,uint8_t val,int mmu_idx,uintptr_t ra)2656 static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val,
2657 int mmu_idx, uintptr_t ra)
2658 {
2659 if (unlikely(p->flags & TLB_MMIO)) {
2660 do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra);
2661 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2662 /* nothing */
2663 } else {
2664 *(uint8_t *)p->haddr = val;
2665 }
2666 }
2667
do_st_2(CPUState * cpu,MMULookupPageData * p,uint16_t val,int mmu_idx,MemOp memop,uintptr_t ra)2668 static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
2669 int mmu_idx, MemOp memop, uintptr_t ra)
2670 {
2671 if (unlikely(p->flags & TLB_MMIO)) {
2672 if ((memop & MO_BSWAP) != MO_LE) {
2673 val = bswap16(val);
2674 }
2675 do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra);
2676 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2677 /* nothing */
2678 } else {
2679 /* Swap to host endian if necessary, then store. */
2680 if (memop & MO_BSWAP) {
2681 val = bswap16(val);
2682 }
2683 store_atom_2(cpu, ra, p->haddr, memop, val);
2684 }
2685 }
2686
do_st_4(CPUState * cpu,MMULookupPageData * p,uint32_t val,int mmu_idx,MemOp memop,uintptr_t ra)2687 static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
2688 int mmu_idx, MemOp memop, uintptr_t ra)
2689 {
2690 if (unlikely(p->flags & TLB_MMIO)) {
2691 if ((memop & MO_BSWAP) != MO_LE) {
2692 val = bswap32(val);
2693 }
2694 do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra);
2695 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2696 /* nothing */
2697 } else {
2698 /* Swap to host endian if necessary, then store. */
2699 if (memop & MO_BSWAP) {
2700 val = bswap32(val);
2701 }
2702 store_atom_4(cpu, ra, p->haddr, memop, val);
2703 }
2704 }
2705
do_st_8(CPUState * cpu,MMULookupPageData * p,uint64_t val,int mmu_idx,MemOp memop,uintptr_t ra)2706 static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
2707 int mmu_idx, MemOp memop, uintptr_t ra)
2708 {
2709 if (unlikely(p->flags & TLB_MMIO)) {
2710 if ((memop & MO_BSWAP) != MO_LE) {
2711 val = bswap64(val);
2712 }
2713 do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra);
2714 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2715 /* nothing */
2716 } else {
2717 /* Swap to host endian if necessary, then store. */
2718 if (memop & MO_BSWAP) {
2719 val = bswap64(val);
2720 }
2721 store_atom_8(cpu, ra, p->haddr, memop, val);
2722 }
2723 }
2724
do_st1_mmu(CPUState * cpu,vaddr addr,uint8_t val,MemOpIdx oi,uintptr_t ra)2725 static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
2726 MemOpIdx oi, uintptr_t ra)
2727 {
2728 MMULookupLocals l;
2729 bool crosspage;
2730
2731 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2732 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2733 tcg_debug_assert(!crosspage);
2734
2735 do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra);
2736 }
2737
do_st2_mmu(CPUState * cpu,vaddr addr,uint16_t val,MemOpIdx oi,uintptr_t ra)2738 static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
2739 MemOpIdx oi, uintptr_t ra)
2740 {
2741 MMULookupLocals l;
2742 bool crosspage;
2743 uint8_t a, b;
2744
2745 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2746 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2747 if (likely(!crosspage)) {
2748 do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2749 return;
2750 }
2751
2752 if ((l.memop & MO_BSWAP) == MO_LE) {
2753 a = val, b = val >> 8;
2754 } else {
2755 b = val, a = val >> 8;
2756 }
2757 do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra);
2758 do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra);
2759 }
2760
do_st4_mmu(CPUState * cpu,vaddr addr,uint32_t val,MemOpIdx oi,uintptr_t ra)2761 static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
2762 MemOpIdx oi, uintptr_t ra)
2763 {
2764 MMULookupLocals l;
2765 bool crosspage;
2766
2767 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2768 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2769 if (likely(!crosspage)) {
2770 do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2771 return;
2772 }
2773
2774 /* Swap to little endian for simplicity, then store by bytes. */
2775 if ((l.memop & MO_BSWAP) != MO_LE) {
2776 val = bswap32(val);
2777 }
2778 val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2779 (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2780 }
2781
do_st8_mmu(CPUState * cpu,vaddr addr,uint64_t val,MemOpIdx oi,uintptr_t ra)2782 static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
2783 MemOpIdx oi, uintptr_t ra)
2784 {
2785 MMULookupLocals l;
2786 bool crosspage;
2787
2788 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2789 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2790 if (likely(!crosspage)) {
2791 do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2792 return;
2793 }
2794
2795 /* Swap to little endian for simplicity, then store by bytes. */
2796 if ((l.memop & MO_BSWAP) != MO_LE) {
2797 val = bswap64(val);
2798 }
2799 val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2800 (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2801 }
2802
do_st16_mmu(CPUState * cpu,vaddr addr,Int128 val,MemOpIdx oi,uintptr_t ra)2803 static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
2804 MemOpIdx oi, uintptr_t ra)
2805 {
2806 MMULookupLocals l;
2807 bool crosspage;
2808 uint64_t a, b;
2809 int first;
2810
2811 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2812 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2813 if (likely(!crosspage)) {
2814 if (unlikely(l.page[0].flags & TLB_MMIO)) {
2815 if ((l.memop & MO_BSWAP) != MO_LE) {
2816 val = bswap128(val);
2817 }
2818 do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
2819 } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
2820 /* nothing */
2821 } else {
2822 /* Swap to host endian if necessary, then store. */
2823 if (l.memop & MO_BSWAP) {
2824 val = bswap128(val);
2825 }
2826 store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val);
2827 }
2828 return;
2829 }
2830
2831 first = l.page[0].size;
2832 if (first == 8) {
2833 MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
2834
2835 if (l.memop & MO_BSWAP) {
2836 val = bswap128(val);
2837 }
2838 if (HOST_BIG_ENDIAN) {
2839 b = int128_getlo(val), a = int128_gethi(val);
2840 } else {
2841 a = int128_getlo(val), b = int128_gethi(val);
2842 }
2843 do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra);
2844 do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra);
2845 return;
2846 }
2847
2848 if ((l.memop & MO_BSWAP) != MO_LE) {
2849 val = bswap128(val);
2850 }
2851 if (first < 8) {
2852 do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
2853 val = int128_urshift(val, first * 8);
2854 do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2855 } else {
2856 b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2857 do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra);
2858 }
2859 }
2860
2861 #include "ldst_common.c.inc"
2862
2863 /*
2864 * First set of functions passes in OI and RETADDR.
2865 * This makes them callable from other helpers.
2866 */
2867
2868 #define ATOMIC_NAME(X) \
2869 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2870
2871 #define ATOMIC_MMU_CLEANUP
2872
2873 #include "atomic_common.c.inc"
2874
2875 #define DATA_SIZE 1
2876 #include "atomic_template.h"
2877
2878 #define DATA_SIZE 2
2879 #include "atomic_template.h"
2880
2881 #define DATA_SIZE 4
2882 #include "atomic_template.h"
2883
2884 #ifdef CONFIG_ATOMIC64
2885 #define DATA_SIZE 8
2886 #include "atomic_template.h"
2887 #endif
2888
2889 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
2890 #define DATA_SIZE 16
2891 #include "atomic_template.h"
2892 #endif
2893
2894 /* Code access functions. */
2895
cpu_ldub_code(CPUArchState * env,abi_ptr addr)2896 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2897 {
2898 CPUState *cs = env_cpu(env);
2899 MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true));
2900 return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
2901 }
2902
cpu_lduw_code(CPUArchState * env,abi_ptr addr)2903 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
2904 {
2905 CPUState *cs = env_cpu(env);
2906 MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true));
2907 return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
2908 }
2909
cpu_ldl_code(CPUArchState * env,abi_ptr addr)2910 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
2911 {
2912 CPUState *cs = env_cpu(env);
2913 MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true));
2914 return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
2915 }
2916
cpu_ldq_code(CPUArchState * env,abi_ptr addr)2917 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2918 {
2919 CPUState *cs = env_cpu(env);
2920 MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true));
2921 return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
2922 }
2923
cpu_ldb_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t retaddr)2924 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
2925 MemOpIdx oi, uintptr_t retaddr)
2926 {
2927 return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
2928 }
2929
cpu_ldw_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t retaddr)2930 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
2931 MemOpIdx oi, uintptr_t retaddr)
2932 {
2933 return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
2934 }
2935
cpu_ldl_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t retaddr)2936 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
2937 MemOpIdx oi, uintptr_t retaddr)
2938 {
2939 return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
2940 }
2941
cpu_ldq_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t retaddr)2942 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
2943 MemOpIdx oi, uintptr_t retaddr)
2944 {
2945 return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
2946 }
2947