xref: /openbmc/qemu/hw/openrisc/cputimer.c (revision be555ec4)
1 /*
2  * QEMU OpenRISC timer support
3  *
4  * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
5  *                         Zhizhou Zhang <etouzh@gmail.com>
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "migration/vmstate.h"
24 #include "qemu/timer.h"
25 #include "sysemu/reset.h"
26 
27 #define TIMER_PERIOD 50 /* 50 ns period for 20 MHz timer */
28 
29 /* Tick Timer global state to allow all cores to be in sync */
30 typedef struct OR1KTimerState {
31     uint32_t ttcr;
32     uint64_t last_clk;
33 } OR1KTimerState;
34 
35 static OR1KTimerState *or1k_timer;
36 
cpu_openrisc_count_set(OpenRISCCPU * cpu,uint32_t val)37 void cpu_openrisc_count_set(OpenRISCCPU *cpu, uint32_t val)
38 {
39     or1k_timer->ttcr = val;
40 }
41 
cpu_openrisc_count_get(OpenRISCCPU * cpu)42 uint32_t cpu_openrisc_count_get(OpenRISCCPU *cpu)
43 {
44     return or1k_timer->ttcr;
45 }
46 
47 /* Add elapsed ticks to ttcr */
cpu_openrisc_count_update(OpenRISCCPU * cpu)48 void cpu_openrisc_count_update(OpenRISCCPU *cpu)
49 {
50     uint64_t now;
51 
52     if (!cpu->env.is_counting) {
53         return;
54     }
55     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
56     or1k_timer->ttcr += (uint32_t)((now - or1k_timer->last_clk)
57                                     / TIMER_PERIOD);
58     or1k_timer->last_clk = now;
59 }
60 
61 /* Update the next timeout time as difference between ttmr and ttcr */
cpu_openrisc_timer_update(OpenRISCCPU * cpu)62 void cpu_openrisc_timer_update(OpenRISCCPU *cpu)
63 {
64     uint32_t wait;
65     uint64_t now, next;
66 
67     if (!cpu->env.is_counting) {
68         return;
69     }
70 
71     cpu_openrisc_count_update(cpu);
72     now = or1k_timer->last_clk;
73 
74     if ((cpu->env.ttmr & TTMR_TP) <= (or1k_timer->ttcr & TTMR_TP)) {
75         wait = TTMR_TP - (or1k_timer->ttcr & TTMR_TP) + 1;
76         wait += cpu->env.ttmr & TTMR_TP;
77     } else {
78         wait = (cpu->env.ttmr & TTMR_TP) - (or1k_timer->ttcr & TTMR_TP);
79     }
80     next = now + (uint64_t)wait * TIMER_PERIOD;
81     timer_mod(cpu->env.timer, next);
82 }
83 
cpu_openrisc_count_start(OpenRISCCPU * cpu)84 void cpu_openrisc_count_start(OpenRISCCPU *cpu)
85 {
86     cpu->env.is_counting = 1;
87     cpu_openrisc_count_update(cpu);
88 }
89 
cpu_openrisc_count_stop(OpenRISCCPU * cpu)90 void cpu_openrisc_count_stop(OpenRISCCPU *cpu)
91 {
92     timer_del(cpu->env.timer);
93     cpu_openrisc_count_update(cpu);
94     cpu->env.is_counting = 0;
95 }
96 
openrisc_timer_cb(void * opaque)97 static void openrisc_timer_cb(void *opaque)
98 {
99     OpenRISCCPU *cpu = opaque;
100 
101     if ((cpu->env.ttmr & TTMR_IE) &&
102          timer_expired(cpu->env.timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL))) {
103         CPUState *cs = CPU(cpu);
104 
105         cpu->env.ttmr |= TTMR_IP;
106         cs->interrupt_request |= CPU_INTERRUPT_TIMER;
107     }
108 
109     switch (cpu->env.ttmr & TTMR_M) {
110     case TIMER_NONE:
111         break;
112     case TIMER_INTR:
113         or1k_timer->ttcr = 0;
114         break;
115     case TIMER_SHOT:
116         cpu_openrisc_count_stop(cpu);
117         break;
118     case TIMER_CONT:
119         break;
120     }
121 
122     cpu_openrisc_timer_update(cpu);
123     qemu_cpu_kick(CPU(cpu));
124 }
125 
126 /* Reset the per CPU counter state. */
openrisc_count_reset(void * opaque)127 static void openrisc_count_reset(void *opaque)
128 {
129     OpenRISCCPU *cpu = opaque;
130 
131     if (cpu->env.is_counting) {
132         cpu_openrisc_count_stop(cpu);
133     }
134     cpu->env.ttmr = 0x00000000;
135 }
136 
137 /* Reset the global timer state. */
openrisc_timer_reset(void * opaque)138 static void openrisc_timer_reset(void *opaque)
139 {
140     or1k_timer->ttcr = 0x00000000;
141     or1k_timer->last_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
142 }
143 
144 static const VMStateDescription vmstate_or1k_timer = {
145     .name = "or1k_timer",
146     .version_id = 1,
147     .minimum_version_id = 1,
148     .fields = (const VMStateField[]) {
149         VMSTATE_UINT32(ttcr, OR1KTimerState),
150         VMSTATE_UINT64(last_clk, OR1KTimerState),
151         VMSTATE_END_OF_LIST()
152     }
153 };
154 
cpu_openrisc_clock_init(OpenRISCCPU * cpu)155 void cpu_openrisc_clock_init(OpenRISCCPU *cpu)
156 {
157     cpu->env.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &openrisc_timer_cb, cpu);
158 
159     qemu_register_reset(openrisc_count_reset, cpu);
160     if (or1k_timer == NULL) {
161         or1k_timer = g_new0(OR1KTimerState, 1);
162         qemu_register_reset(openrisc_timer_reset, cpu);
163         vmstate_register(NULL, 0, &vmstate_or1k_timer, or1k_timer);
164     }
165 }
166