xref: /openbmc/qemu/target/ppc/timebase_helper.c (revision 0ca94b2f11223d41258e6a7a046e5ccde831de46)
1 /*
2  *  PowerPC emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "hw/ppc/ppc.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "qemu/log.h"
25 #include "qemu/main-loop.h"
26 
27 /*****************************************************************************/
28 /* SPR accesses */
29 
30 target_ulong helper_load_tbl(CPUPPCState *env)
31 {
32     return (target_ulong)cpu_ppc_load_tbl(env);
33 }
34 
35 target_ulong helper_load_tbu(CPUPPCState *env)
36 {
37     return cpu_ppc_load_tbu(env);
38 }
39 
40 target_ulong helper_load_atbl(CPUPPCState *env)
41 {
42     return (target_ulong)cpu_ppc_load_atbl(env);
43 }
44 
45 target_ulong helper_load_atbu(CPUPPCState *env)
46 {
47     return cpu_ppc_load_atbu(env);
48 }
49 
50 target_ulong helper_load_vtb(CPUPPCState *env)
51 {
52     return cpu_ppc_load_vtb(env);
53 }
54 
55 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
56 target_ulong helper_load_purr(CPUPPCState *env)
57 {
58     return (target_ulong)cpu_ppc_load_purr(env);
59 }
60 
61 void helper_store_purr(CPUPPCState *env, target_ulong val)
62 {
63     CPUState *cs = env_cpu(env);
64     CPUState *ccs;
65     uint32_t nr_threads = cs->nr_threads;
66 
67     if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
68         cpu_ppc_store_purr(env, val);
69         return;
70     }
71 
72     THREAD_SIBLING_FOREACH(cs, ccs) {
73         CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
74         cpu_ppc_store_purr(cenv, val);
75     }
76 }
77 #endif
78 
79 #if !defined(CONFIG_USER_ONLY)
80 void helper_store_tbl(CPUPPCState *env, target_ulong val)
81 {
82     CPUState *cs = env_cpu(env);
83     CPUState *ccs;
84     uint32_t nr_threads = cs->nr_threads;
85 
86     if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
87         cpu_ppc_store_tbl(env, val);
88         return;
89     }
90 
91     THREAD_SIBLING_FOREACH(cs, ccs) {
92         CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
93         cpu_ppc_store_tbl(cenv, val);
94     }
95 }
96 
97 void helper_store_tbu(CPUPPCState *env, target_ulong val)
98 {
99     CPUState *cs = env_cpu(env);
100     CPUState *ccs;
101     uint32_t nr_threads = cs->nr_threads;
102 
103     if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
104         cpu_ppc_store_tbu(env, val);
105         return;
106     }
107 
108     THREAD_SIBLING_FOREACH(cs, ccs) {
109         CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
110         cpu_ppc_store_tbu(cenv, val);
111     }
112 }
113 
114 void helper_store_atbl(CPUPPCState *env, target_ulong val)
115 {
116     cpu_ppc_store_atbl(env, val);
117 }
118 
119 void helper_store_atbu(CPUPPCState *env, target_ulong val)
120 {
121     cpu_ppc_store_atbu(env, val);
122 }
123 
124 target_ulong helper_load_decr(CPUPPCState *env)
125 {
126     return cpu_ppc_load_decr(env);
127 }
128 
129 void helper_store_decr(CPUPPCState *env, target_ulong val)
130 {
131     cpu_ppc_store_decr(env, val);
132 }
133 
134 target_ulong helper_load_hdecr(CPUPPCState *env)
135 {
136     return cpu_ppc_load_hdecr(env);
137 }
138 
139 void helper_store_hdecr(CPUPPCState *env, target_ulong val)
140 {
141     CPUState *cs = env_cpu(env);
142     CPUState *ccs;
143     uint32_t nr_threads = cs->nr_threads;
144 
145     if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
146         cpu_ppc_store_hdecr(env, val);
147         return;
148     }
149 
150     THREAD_SIBLING_FOREACH(cs, ccs) {
151         CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
152         cpu_ppc_store_hdecr(cenv, val);
153     }
154 }
155 
156 void helper_store_vtb(CPUPPCState *env, target_ulong val)
157 {
158     CPUState *cs = env_cpu(env);
159     CPUState *ccs;
160     uint32_t nr_threads = cs->nr_threads;
161 
162     if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
163         cpu_ppc_store_vtb(env, val);
164         return;
165     }
166 
167     THREAD_SIBLING_FOREACH(cs, ccs) {
168         CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
169         cpu_ppc_store_vtb(cenv, val);
170     }
171 }
172 
173 void helper_store_tbu40(CPUPPCState *env, target_ulong val)
174 {
175     CPUState *cs = env_cpu(env);
176     CPUState *ccs;
177     uint32_t nr_threads = cs->nr_threads;
178 
179     if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
180         cpu_ppc_store_tbu40(env, val);
181         return;
182     }
183 
184     THREAD_SIBLING_FOREACH(cs, ccs) {
185         CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
186         cpu_ppc_store_tbu40(cenv, val);
187     }
188 }
189 
190 target_ulong helper_load_40x_pit(CPUPPCState *env)
191 {
192     return load_40x_pit(env);
193 }
194 
195 void helper_store_40x_pit(CPUPPCState *env, target_ulong val)
196 {
197     store_40x_pit(env, val);
198 }
199 
200 void helper_store_40x_tcr(CPUPPCState *env, target_ulong val)
201 {
202     store_40x_tcr(env, val);
203 }
204 
205 void helper_store_40x_tsr(CPUPPCState *env, target_ulong val)
206 {
207     store_40x_tsr(env, val);
208 }
209 
210 void helper_store_booke_tcr(CPUPPCState *env, target_ulong val)
211 {
212     store_booke_tcr(env, val);
213 }
214 
215 void helper_store_booke_tsr(CPUPPCState *env, target_ulong val)
216 {
217     store_booke_tsr(env, val);
218 }
219 
220 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
221 /*
222  * qemu-user breaks with pnv headers, so they go under ifdefs for now.
223  * A clean up may be to move powernv specific registers and helpers into
224  * target/ppc/pnv_helper.c
225  */
226 #include "hw/ppc/pnv_core.h"
227 /*
228  * POWER processor Timebase Facility
229  */
230 
231 /*
232  * The TBST is the timebase state machine, which is a per-core machine that
233  * is used to synchronize the core TB with the ChipTOD. States 3,4,5 are
234  * not used in POWER8/9/10.
235  *
236  * The state machine gets driven by writes to TFMR SPR from the core, and
237  * by signals from the ChipTOD. The state machine table for common
238  * transitions is as follows (according to hardware specs, not necessarily
239  * this implementation):
240  *
241  * | Cur            | Event                            | New |
242  * +----------------+----------------------------------+-----+
243  * | 0 RESET        | TFMR |= LOAD_TOD_MOD             | 1   |
244  * | 1 SEND_TOD_MOD | "immediate transition"           | 2   |
245  * | 2 NOT_SET      | mttbu/mttbu40/mttbl              | 2   |
246  * | 2 NOT_SET      | TFMR |= MOVE_CHIP_TOD_TO_TB      | 6   |
247  * | 6 SYNC_WAIT    | "sync pulse from ChipTOD"        | 7   |
248  * | 7 GET_TOD      | ChipTOD xscom MOVE_TOD_TO_TB_REG | 8   |
249  * | 8 TB_RUNNING   | mttbu/mttbu40                    | 8   |
250  * | 8 TB_RUNNING   | TFMR |= LOAD_TOD_MOD             | 1   |
251  * | 8 TB_RUNNING   | mttbl                            | 9   |
252  * | 9 TB_ERROR     | TFMR |= CLEAR_TB_ERRORS          | 0   |
253  *
254  * - LOAD_TOD_MOD will also move states 2,6 to state 1, omitted from table
255  *   because it's not a typical init flow.
256  *
257  * - The ERROR state can be entered from most/all other states on invalid
258  *   states (e.g., if some TFMR control bit is set from a state where it's
259  *   not listed to cause a transition away from), omitted to avoid clutter.
260  *
261  * Note: mttbl causes a timebase error because this inevitably causes
262  * ticks to be lost and TB to become unsynchronized, whereas TB can be
263  * adjusted using mttbu* without losing ticks. mttbl behaviour is not
264  * modelled.
265  *
266  * Note: the TB state machine does not actually cause any real TB adjustment!
267  * TB starts out synchronized across all vCPUs (hardware threads) in
268  * QMEU, so for now the purpose of the TBST and ChipTOD model is simply
269  * to step through firmware initialisation sequences.
270  */
271 static unsigned int tfmr_get_tb_state(uint64_t tfmr)
272 {
273     return (tfmr & TFMR_TBST_ENCODED) >> (63 - 31);
274 }
275 
276 static uint64_t tfmr_new_tb_state(uint64_t tfmr, unsigned int tbst)
277 {
278     tfmr &= ~TFMR_TBST_LAST;
279     tfmr |= (tfmr & TFMR_TBST_ENCODED) >> 4; /* move state to last state */
280     tfmr &= ~TFMR_TBST_ENCODED;
281     tfmr |= (uint64_t)tbst << (63 - 31); /* move new state to state */
282 
283     if (tbst == TBST_TB_RUNNING) {
284         tfmr |= TFMR_TB_VALID;
285     } else {
286         tfmr &= ~TFMR_TB_VALID;
287     }
288 
289     return tfmr;
290 }
291 
292 static void write_tfmr(CPUPPCState *env, target_ulong val)
293 {
294     CPUState *cs = env_cpu(env);
295 
296     if (cs->nr_threads == 1) {
297         env->spr[SPR_TFMR] = val;
298     } else {
299         CPUState *ccs;
300         THREAD_SIBLING_FOREACH(cs, ccs) {
301             CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
302             cenv->spr[SPR_TFMR] = val;
303         }
304     }
305 }
306 
307 static PnvCoreTODState *cpu_get_tbst(PowerPCCPU *cpu)
308 {
309     PnvCore *pc = pnv_cpu_state(cpu)->pnv_core;
310 
311     return &pc->tod_state;
312 }
313 
314 static void tb_state_machine_step(CPUPPCState *env)
315 {
316     PowerPCCPU *cpu = env_archcpu(env);
317     PnvCoreTODState *tod_state = cpu_get_tbst(cpu);
318     uint64_t tfmr = env->spr[SPR_TFMR];
319     unsigned int tbst = tfmr_get_tb_state(tfmr);
320 
321     if (!(tfmr & TFMR_TB_ECLIPZ) || tbst == TBST_TB_ERROR) {
322         return;
323     }
324 
325     if (tod_state->tb_sync_pulse_timer) {
326         tod_state->tb_sync_pulse_timer--;
327     } else {
328         tfmr |= TFMR_TB_SYNC_OCCURED;
329         write_tfmr(env, tfmr);
330     }
331 
332     if (tod_state->tb_state_timer) {
333         tod_state->tb_state_timer--;
334         return;
335     }
336 
337     if (tfmr & TFMR_LOAD_TOD_MOD) {
338         tfmr &= ~TFMR_LOAD_TOD_MOD;
339         if (tbst == TBST_GET_TOD) {
340             tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
341             tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
342         } else {
343             tfmr = tfmr_new_tb_state(tfmr, TBST_SEND_TOD_MOD);
344             /* State seems to transition immediately */
345             tfmr = tfmr_new_tb_state(tfmr, TBST_NOT_SET);
346         }
347     } else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) {
348         if (tbst == TBST_SYNC_WAIT) {
349             tfmr = tfmr_new_tb_state(tfmr, TBST_GET_TOD);
350             tod_state->tb_state_timer = 3;
351         } else if (tbst == TBST_GET_TOD) {
352             if (tod_state->tod_sent_to_tb) {
353                 tfmr = tfmr_new_tb_state(tfmr, TBST_TB_RUNNING);
354                 tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB;
355                 tod_state->tb_ready_for_tod = 0;
356                 tod_state->tod_sent_to_tb = 0;
357             }
358         } else {
359             qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB "
360                           "state machine in invalid state 0x%x\n", tbst);
361             tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
362             tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
363             tod_state->tb_ready_for_tod = 0;
364         }
365     }
366 
367     write_tfmr(env, tfmr);
368 }
369 
370 target_ulong helper_load_tfmr(CPUPPCState *env)
371 {
372     tb_state_machine_step(env);
373 
374     return env->spr[SPR_TFMR] | TFMR_TB_ECLIPZ;
375 }
376 
377 void helper_store_tfmr(CPUPPCState *env, target_ulong val)
378 {
379     PowerPCCPU *cpu = env_archcpu(env);
380     PnvCoreTODState *tod_state = cpu_get_tbst(cpu);
381     uint64_t tfmr = env->spr[SPR_TFMR];
382     uint64_t clear_on_write;
383     unsigned int tbst = tfmr_get_tb_state(tfmr);
384 
385     if (!(val & TFMR_TB_ECLIPZ)) {
386         qemu_log_mask(LOG_UNIMP, "TFMR non-ECLIPZ mode not implemented\n");
387         tfmr &= ~TFMR_TBST_ENCODED;
388         tfmr &= ~TFMR_TBST_LAST;
389         goto out;
390     }
391 
392     /* Update control bits */
393     tfmr = (tfmr & ~TFMR_CONTROL_MASK) | (val & TFMR_CONTROL_MASK);
394 
395     /* Several bits are clear-on-write, only one is implemented so far */
396     clear_on_write = val & TFMR_FIRMWARE_CONTROL_ERROR;
397     tfmr &= ~clear_on_write;
398 
399     /*
400      * mtspr always clears this. The sync pulse timer makes it come back
401      * after the second mfspr.
402      */
403     tfmr &= ~TFMR_TB_SYNC_OCCURED;
404     tod_state->tb_sync_pulse_timer = 1;
405 
406     if (((tfmr | val) & (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) ==
407                         (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) {
408         qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: LOAD_TOD_MOD and "
409                                        "MOVE_CHIP_TOD_TO_TB both set\n");
410         tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
411         tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
412         tod_state->tb_ready_for_tod = 0;
413         goto out;
414     }
415 
416     if (tfmr & TFMR_CLEAR_TB_ERRORS) {
417         /*
418          * Workbook says TFMR_CLEAR_TB_ERRORS should be written twice.
419          * This is not simulated/required here.
420          */
421         tfmr = tfmr_new_tb_state(tfmr, TBST_RESET);
422         tfmr &= ~TFMR_CLEAR_TB_ERRORS;
423         tfmr &= ~TFMR_LOAD_TOD_MOD;
424         tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB;
425         tfmr &= ~TFMR_FIRMWARE_CONTROL_ERROR; /* XXX: should this be cleared? */
426         tod_state->tb_ready_for_tod = 0;
427         tod_state->tod_sent_to_tb = 0;
428         goto out;
429     }
430 
431     if (tbst == TBST_TB_ERROR) {
432         qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: mtspr TFMR in TB_ERROR"
433                                        " state\n");
434         tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
435         return;
436     }
437 
438     if (tfmr & TFMR_LOAD_TOD_MOD) {
439         /* Wait for an arbitrary 3 mfspr until the next state transition. */
440         tod_state->tb_state_timer = 3;
441     } else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) {
442         if (tbst == TBST_NOT_SET) {
443             tfmr = tfmr_new_tb_state(tfmr, TBST_SYNC_WAIT);
444             tod_state->tb_ready_for_tod = 1;
445             tod_state->tb_state_timer = 3; /* arbitrary */
446         } else {
447             qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB "
448                                            "not in TB not set state 0x%x\n",
449                                            tbst);
450             tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
451             tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
452             tod_state->tb_ready_for_tod = 0;
453         }
454     }
455 
456 out:
457     write_tfmr(env, tfmr);
458 }
459 #endif
460 
461 /*****************************************************************************/
462 /* Embedded PowerPC specific helpers */
463 
464 /* XXX: to be improved to check access rights when in user-mode */
465 target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn)
466 {
467     uint32_t val = 0;
468 
469     if (unlikely(env->dcr_env == NULL)) {
470         qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
471         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
472                                POWERPC_EXCP_INVAL |
473                                POWERPC_EXCP_INVAL_INVAL, GETPC());
474     } else {
475         int ret;
476 
477         bql_lock();
478         ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val);
479         bql_unlock();
480         if (unlikely(ret != 0)) {
481             qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n",
482                           (uint32_t)dcrn, (uint32_t)dcrn);
483             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
484                                    POWERPC_EXCP_INVAL |
485                                    POWERPC_EXCP_INVAL_INVAL, GETPC());
486         }
487     }
488     return val;
489 }
490 
491 void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val)
492 {
493     if (unlikely(env->dcr_env == NULL)) {
494         qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
495         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
496                                POWERPC_EXCP_INVAL |
497                                POWERPC_EXCP_INVAL_INVAL, GETPC());
498     } else {
499         int ret;
500         bql_lock();
501         ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val);
502         bql_unlock();
503         if (unlikely(ret != 0)) {
504             qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n",
505                           (uint32_t)dcrn, (uint32_t)dcrn);
506             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
507                                    POWERPC_EXCP_INVAL |
508                                    POWERPC_EXCP_INVAL_INVAL, GETPC());
509         }
510     }
511 }
512 #endif
513