1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* Access guest memory in blocks. */
3
4 #include "qemu/osdep.h"
5 #include "cpu.h"
6 #include "exec/cpu_ldst.h"
7 #include "exec/exec-all.h"
8 #include "access.h"
9
10
access_prepare_mmu(X86Access * ret,CPUX86State * env,vaddr vaddr,unsigned size,MMUAccessType type,int mmu_idx,uintptr_t ra)11 void access_prepare_mmu(X86Access *ret, CPUX86State *env,
12 vaddr vaddr, unsigned size,
13 MMUAccessType type, int mmu_idx, uintptr_t ra)
14 {
15 int size1, size2;
16 void *haddr1, *haddr2;
17
18 assert(size > 0 && size <= TARGET_PAGE_SIZE);
19
20 size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)),
21 size2 = size - size1;
22
23 memset(ret, 0, sizeof(*ret));
24 ret->vaddr = vaddr;
25 ret->size = size;
26 ret->size1 = size1;
27 ret->mmu_idx = mmu_idx;
28 ret->env = env;
29 ret->ra = ra;
30
31 haddr1 = probe_access(env, vaddr, size1, type, mmu_idx, ra);
32 ret->haddr1 = haddr1;
33
34 if (unlikely(size2)) {
35 haddr2 = probe_access(env, vaddr + size1, size2, type, mmu_idx, ra);
36 if (haddr2 == haddr1 + size1) {
37 ret->size1 = size;
38 } else {
39 #ifdef CONFIG_USER_ONLY
40 g_assert_not_reached();
41 #else
42 ret->haddr2 = haddr2;
43 #endif
44 }
45 }
46 }
47
access_prepare(X86Access * ret,CPUX86State * env,vaddr vaddr,unsigned size,MMUAccessType type,uintptr_t ra)48 void access_prepare(X86Access *ret, CPUX86State *env, vaddr vaddr,
49 unsigned size, MMUAccessType type, uintptr_t ra)
50 {
51 int mmu_idx = cpu_mmu_index(env_cpu(env), false);
52 access_prepare_mmu(ret, env, vaddr, size, type, mmu_idx, ra);
53 }
54
access_ptr(X86Access * ac,vaddr addr,unsigned len)55 static void *access_ptr(X86Access *ac, vaddr addr, unsigned len)
56 {
57 vaddr offset = addr - ac->vaddr;
58
59 assert(addr >= ac->vaddr);
60
61 /* No haddr means probe_access wants to force slow path */
62 if (!ac->haddr1) {
63 return NULL;
64 }
65
66 #ifdef CONFIG_USER_ONLY
67 assert(offset <= ac->size1 - len);
68 return ac->haddr1 + offset;
69 #else
70 if (likely(offset <= ac->size1 - len)) {
71 return ac->haddr1 + offset;
72 }
73 assert(offset <= ac->size - len);
74 /*
75 * If the address is not naturally aligned, it might span both pages.
76 * Only return ac->haddr2 if the area is entirely within the second page,
77 * otherwise fall back to slow accesses.
78 */
79 if (likely(offset >= ac->size1)) {
80 return ac->haddr2 + (offset - ac->size1);
81 }
82 return NULL;
83 #endif
84 }
85
access_ldb(X86Access * ac,vaddr addr)86 uint8_t access_ldb(X86Access *ac, vaddr addr)
87 {
88 void *p = access_ptr(ac, addr, sizeof(uint8_t));
89
90 if (likely(p)) {
91 return ldub_p(p);
92 }
93 return cpu_ldub_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
94 }
95
access_ldw(X86Access * ac,vaddr addr)96 uint16_t access_ldw(X86Access *ac, vaddr addr)
97 {
98 void *p = access_ptr(ac, addr, sizeof(uint16_t));
99
100 if (likely(p)) {
101 return lduw_le_p(p);
102 }
103 return cpu_lduw_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
104 }
105
access_ldl(X86Access * ac,vaddr addr)106 uint32_t access_ldl(X86Access *ac, vaddr addr)
107 {
108 void *p = access_ptr(ac, addr, sizeof(uint32_t));
109
110 if (likely(p)) {
111 return ldl_le_p(p);
112 }
113 return cpu_ldl_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
114 }
115
access_ldq(X86Access * ac,vaddr addr)116 uint64_t access_ldq(X86Access *ac, vaddr addr)
117 {
118 void *p = access_ptr(ac, addr, sizeof(uint64_t));
119
120 if (likely(p)) {
121 return ldq_le_p(p);
122 }
123 return cpu_ldq_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
124 }
125
access_stb(X86Access * ac,vaddr addr,uint8_t val)126 void access_stb(X86Access *ac, vaddr addr, uint8_t val)
127 {
128 void *p = access_ptr(ac, addr, sizeof(uint8_t));
129
130 if (likely(p)) {
131 stb_p(p, val);
132 } else {
133 cpu_stb_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
134 }
135 }
136
access_stw(X86Access * ac,vaddr addr,uint16_t val)137 void access_stw(X86Access *ac, vaddr addr, uint16_t val)
138 {
139 void *p = access_ptr(ac, addr, sizeof(uint16_t));
140
141 if (likely(p)) {
142 stw_le_p(p, val);
143 } else {
144 cpu_stw_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
145 }
146 }
147
access_stl(X86Access * ac,vaddr addr,uint32_t val)148 void access_stl(X86Access *ac, vaddr addr, uint32_t val)
149 {
150 void *p = access_ptr(ac, addr, sizeof(uint32_t));
151
152 if (likely(p)) {
153 stl_le_p(p, val);
154 } else {
155 cpu_stl_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
156 }
157 }
158
access_stq(X86Access * ac,vaddr addr,uint64_t val)159 void access_stq(X86Access *ac, vaddr addr, uint64_t val)
160 {
161 void *p = access_ptr(ac, addr, sizeof(uint64_t));
162
163 if (likely(p)) {
164 stq_le_p(p, val);
165 } else {
166 cpu_stq_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
167 }
168 }
169