xref: /openbmc/qemu/target/microblaze/mmu.c (revision e9206163)
1 /*
2  *  Microblaze MMU emulation for qemu.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 
26 static unsigned int tlb_decode_size(unsigned int f)
27 {
28     static const unsigned int sizes[] = {
29         1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
30         1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
31     };
32     assert(f < ARRAY_SIZE(sizes));
33     return sizes[f];
34 }
35 
36 static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
37 {
38     CPUState *cs = env_cpu(env);
39     MicroBlazeMMU *mmu = &env->mmu;
40     unsigned int tlb_size;
41     uint32_t tlb_tag, end, t;
42 
43     t = mmu->rams[RAM_TAG][idx];
44     if (!(t & TLB_VALID))
45         return;
46 
47     tlb_tag = t & TLB_EPN_MASK;
48     tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
49     end = tlb_tag + tlb_size;
50 
51     while (tlb_tag < end) {
52         tlb_flush_page(cs, tlb_tag);
53         tlb_tag += TARGET_PAGE_SIZE;
54     }
55 }
56 
57 static void mmu_change_pid(CPUMBState *env, unsigned int newpid)
58 {
59     MicroBlazeMMU *mmu = &env->mmu;
60     unsigned int i;
61     uint32_t t;
62 
63     if (newpid & ~0xff)
64         qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid);
65 
66     for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
67         /* Lookup and decode.  */
68         t = mmu->rams[RAM_TAG][i];
69         if (t & TLB_VALID) {
70             if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
71                 mmu_flush_idx(env, i);
72         }
73     }
74 }
75 
76 /* rw - 0 = read, 1 = write, 2 = fetch.  */
77 unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
78                            target_ulong vaddr, MMUAccessType rw, int mmu_idx)
79 {
80     MicroBlazeMMU *mmu = &cpu->env.mmu;
81     unsigned int i, hit = 0;
82     unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
83     uint64_t tlb_tag, tlb_rpn, mask;
84     uint32_t tlb_size, t0;
85 
86     lu->err = ERR_MISS;
87     for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
88         uint64_t t, d;
89 
90         /* Lookup and decode.  */
91         t = mmu->rams[RAM_TAG][i];
92         if (t & TLB_VALID) {
93             tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
94             if (tlb_size < TARGET_PAGE_SIZE) {
95                 qemu_log_mask(LOG_UNIMP, "%d pages not supported\n", tlb_size);
96                 abort();
97             }
98 
99             mask = ~((uint64_t)tlb_size - 1);
100             tlb_tag = t & TLB_EPN_MASK;
101             if ((vaddr & mask) != (tlb_tag & mask)) {
102                 continue;
103             }
104             if (mmu->tids[i]
105                 && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
106                 continue;
107             }
108 
109             /* Bring in the data part.  */
110             d = mmu->rams[RAM_DATA][i];
111             tlb_ex = d & TLB_EX;
112             tlb_wr = d & TLB_WR;
113 
114             /* Now let's see if there is a zone that overrides the protbits.  */
115             tlb_zsel = (d >> 4) & 0xf;
116             t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
117             t0 &= 0x3;
118 
119             if (tlb_zsel > cpu->cfg.mmu_zones) {
120                 qemu_log_mask(LOG_GUEST_ERROR,
121                               "tlb zone select out of range! %d\n", tlb_zsel);
122                 t0 = 1; /* Ignore.  */
123             }
124 
125             if (cpu->cfg.mmu == 1) {
126                 t0 = 1; /* Zones are disabled.  */
127             }
128 
129             switch (t0) {
130                 case 0:
131                     if (mmu_idx == MMU_USER_IDX)
132                         continue;
133                     break;
134                 case 2:
135                     if (mmu_idx != MMU_USER_IDX) {
136                         tlb_ex = 1;
137                         tlb_wr = 1;
138                     }
139                     break;
140                 case 3:
141                     tlb_ex = 1;
142                     tlb_wr = 1;
143                     break;
144                 default: break;
145             }
146 
147             lu->err = ERR_PROT;
148             lu->prot = PAGE_READ;
149             if (tlb_wr)
150                 lu->prot |= PAGE_WRITE;
151             else if (rw == 1)
152                 goto done;
153             if (tlb_ex)
154                 lu->prot |=PAGE_EXEC;
155             else if (rw == 2) {
156                 goto done;
157             }
158 
159             tlb_rpn = d & TLB_RPN_MASK;
160 
161             lu->vaddr = tlb_tag;
162             lu->paddr = tlb_rpn & cpu->cfg.addr_mask;
163             lu->size = tlb_size;
164             lu->err = ERR_HIT;
165             lu->idx = i;
166             hit = 1;
167             goto done;
168         }
169     }
170 done:
171     qemu_log_mask(CPU_LOG_MMU,
172                   "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
173                   vaddr, rw, tlb_wr, tlb_ex, hit);
174     return hit;
175 }
176 
177 /* Writes/reads to the MMU's special regs end up here.  */
178 uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
179 {
180     MicroBlazeCPU *cpu = env_archcpu(env);
181     unsigned int i;
182     uint32_t r = 0;
183 
184     if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) {
185         qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
186         return 0;
187     }
188     if (ext && rn != MMU_R_TLBLO) {
189         qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
190         return 0;
191     }
192 
193     switch (rn) {
194         /* Reads to HI/LO trig reads from the mmu rams.  */
195         case MMU_R_TLBLO:
196         case MMU_R_TLBHI:
197             if (!(cpu->cfg.mmu_tlb_access & 1)) {
198                 qemu_log_mask(LOG_GUEST_ERROR,
199                               "Invalid access to MMU reg %d\n", rn);
200                 return 0;
201             }
202 
203             i = env->mmu.regs[MMU_R_TLBX] & 0xff;
204             r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32);
205             if (rn == MMU_R_TLBHI)
206                 env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
207             break;
208         case MMU_R_PID:
209         case MMU_R_ZPR:
210             if (!(cpu->cfg.mmu_tlb_access & 1)) {
211                 qemu_log_mask(LOG_GUEST_ERROR,
212                               "Invalid access to MMU reg %d\n", rn);
213                 return 0;
214             }
215             r = env->mmu.regs[rn];
216             break;
217         case MMU_R_TLBX:
218             r = env->mmu.regs[rn];
219             break;
220         case MMU_R_TLBSX:
221             qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n");
222             break;
223         default:
224             qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
225             break;
226     }
227     qemu_log_mask(CPU_LOG_MMU, "%s rn=%d=%x\n", __func__, rn, r);
228     return r;
229 }
230 
231 void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
232 {
233     MicroBlazeCPU *cpu = env_archcpu(env);
234     uint64_t tmp64;
235     unsigned int i;
236 
237     qemu_log_mask(CPU_LOG_MMU,
238                   "%s rn=%d=%x old=%x\n", __func__, rn, v,
239                   rn < 3 ? env->mmu.regs[rn] : env->mmu.regs[MMU_R_TLBX]);
240 
241     if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) {
242         qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
243         return;
244     }
245     if (ext && rn != MMU_R_TLBLO) {
246         qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
247         return;
248     }
249 
250     switch (rn) {
251         /* Writes to HI/LO trig writes to the mmu rams.  */
252         case MMU_R_TLBLO:
253         case MMU_R_TLBHI:
254             i = env->mmu.regs[MMU_R_TLBX] & 0xff;
255             if (rn == MMU_R_TLBHI) {
256                 if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
257                     qemu_log_mask(LOG_GUEST_ERROR,
258                                   "invalidating index %x at pc=%x\n",
259                                   i, env->pc);
260                 env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
261                 mmu_flush_idx(env, i);
262             }
263             tmp64 = env->mmu.rams[rn & 1][i];
264             env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v);
265             break;
266         case MMU_R_ZPR:
267             if (cpu->cfg.mmu_tlb_access <= 1) {
268                 qemu_log_mask(LOG_GUEST_ERROR,
269                               "Invalid access to MMU reg %d\n", rn);
270                 return;
271             }
272 
273             /* Changes to the zone protection reg flush the QEMU TLB.
274                Fortunately, these are very uncommon.  */
275             if (v != env->mmu.regs[rn]) {
276                 tlb_flush(env_cpu(env));
277             }
278             env->mmu.regs[rn] = v;
279             break;
280         case MMU_R_PID:
281             if (cpu->cfg.mmu_tlb_access <= 1) {
282                 qemu_log_mask(LOG_GUEST_ERROR,
283                               "Invalid access to MMU reg %d\n", rn);
284                 return;
285             }
286 
287             if (v != env->mmu.regs[rn]) {
288                 mmu_change_pid(env, v);
289                 env->mmu.regs[rn] = v;
290             }
291             break;
292         case MMU_R_TLBX:
293             /* Bit 31 is read-only.  */
294             env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v);
295             break;
296         case MMU_R_TLBSX:
297         {
298             MicroBlazeMMULookup lu;
299             int hit;
300 
301             if (cpu->cfg.mmu_tlb_access <= 1) {
302                 qemu_log_mask(LOG_GUEST_ERROR,
303                               "Invalid access to MMU reg %d\n", rn);
304                 return;
305             }
306 
307             hit = mmu_translate(cpu, &lu, v & TLB_EPN_MASK,
308                                 0, cpu_mmu_index(env_cpu(env), false));
309             if (hit) {
310                 env->mmu.regs[MMU_R_TLBX] = lu.idx;
311             } else {
312                 env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK;
313             }
314             break;
315         }
316         default:
317             qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
318             break;
319    }
320 }
321 
322 void mmu_init(MicroBlazeMMU *mmu)
323 {
324     int i;
325     for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {
326         mmu->regs[i] = 0;
327     }
328 }
329