xref: /openbmc/qemu/hw/openrisc/cputimer.c (revision 79b38d61)
1 /*
2  * QEMU OpenRISC timer support
3  *
4  * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
5  *                         Zhizhou Zhang <etouzh@gmail.com>
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "migration/vmstate.h"
24 #include "qemu/timer.h"
25 #include "sysemu/reset.h"
26 
27 #define TIMER_PERIOD 50 /* 50 ns period for 20 MHz timer */
28 
29 /* Tick Timer global state to allow all cores to be in sync */
30 typedef struct OR1KTimerState {
31     uint32_t ttcr;
32     uint32_t ttcr_offset;
33     uint64_t clk_offset;
34 } OR1KTimerState;
35 
36 static OR1KTimerState *or1k_timer;
37 
cpu_openrisc_count_set(OpenRISCCPU * cpu,uint32_t val)38 void cpu_openrisc_count_set(OpenRISCCPU *cpu, uint32_t val)
39 {
40     or1k_timer->ttcr = val;
41     or1k_timer->ttcr_offset = val;
42     or1k_timer->clk_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
43 }
44 
cpu_openrisc_count_get(OpenRISCCPU * cpu)45 uint32_t cpu_openrisc_count_get(OpenRISCCPU *cpu)
46 {
47     return or1k_timer->ttcr;
48 }
49 
50 /* Add elapsed ticks to ttcr */
cpu_openrisc_count_update(OpenRISCCPU * cpu)51 void cpu_openrisc_count_update(OpenRISCCPU *cpu)
52 {
53     uint64_t now;
54 
55     if (!cpu->env.is_counting) {
56         return;
57     }
58     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
59     or1k_timer->ttcr = or1k_timer->ttcr_offset +
60         DIV_ROUND_UP(now - or1k_timer->clk_offset, TIMER_PERIOD);
61 }
62 
63 /* Update the next timeout time as difference between ttmr and ttcr */
cpu_openrisc_timer_update(OpenRISCCPU * cpu)64 void cpu_openrisc_timer_update(OpenRISCCPU *cpu)
65 {
66     uint32_t wait;
67     uint64_t now, next;
68 
69     if (!cpu->env.is_counting) {
70         return;
71     }
72 
73     cpu_openrisc_count_update(cpu);
74     now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
75 
76     if ((cpu->env.ttmr & TTMR_TP) <= (or1k_timer->ttcr & TTMR_TP)) {
77         wait = TTMR_TP - (or1k_timer->ttcr & TTMR_TP) + 1;
78         wait += cpu->env.ttmr & TTMR_TP;
79     } else {
80         wait = (cpu->env.ttmr & TTMR_TP) - (or1k_timer->ttcr & TTMR_TP);
81     }
82     next = now + (uint64_t)wait * TIMER_PERIOD;
83     timer_mod(cpu->env.timer, next);
84 }
85 
cpu_openrisc_count_start(OpenRISCCPU * cpu)86 void cpu_openrisc_count_start(OpenRISCCPU *cpu)
87 {
88     cpu->env.is_counting = 1;
89     cpu_openrisc_count_update(cpu);
90 }
91 
cpu_openrisc_count_stop(OpenRISCCPU * cpu)92 void cpu_openrisc_count_stop(OpenRISCCPU *cpu)
93 {
94     timer_del(cpu->env.timer);
95     cpu_openrisc_count_update(cpu);
96     cpu->env.is_counting = 0;
97 }
98 
openrisc_timer_cb(void * opaque)99 static void openrisc_timer_cb(void *opaque)
100 {
101     OpenRISCCPU *cpu = opaque;
102 
103     if ((cpu->env.ttmr & TTMR_IE) &&
104          timer_expired(cpu->env.timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL))) {
105         CPUState *cs = CPU(cpu);
106 
107         cpu->env.ttmr |= TTMR_IP;
108         cs->interrupt_request |= CPU_INTERRUPT_TIMER;
109     }
110 
111     switch (cpu->env.ttmr & TTMR_M) {
112     case TIMER_NONE:
113         break;
114     case TIMER_INTR:
115         /* Zero the count by applying a negative offset to the counter */
116         or1k_timer->ttcr_offset -= (cpu->env.ttmr & TTMR_TP);
117         break;
118     case TIMER_SHOT:
119         cpu_openrisc_count_stop(cpu);
120         break;
121     case TIMER_CONT:
122         break;
123     }
124 
125     cpu_openrisc_timer_update(cpu);
126     qemu_cpu_kick(CPU(cpu));
127 }
128 
129 /* Reset the per CPU counter state. */
openrisc_count_reset(void * opaque)130 static void openrisc_count_reset(void *opaque)
131 {
132     OpenRISCCPU *cpu = opaque;
133 
134     if (cpu->env.is_counting) {
135         cpu_openrisc_count_stop(cpu);
136     }
137     cpu->env.ttmr = 0x00000000;
138 }
139 
140 /* Reset the global timer state. */
openrisc_timer_reset(void * opaque)141 static void openrisc_timer_reset(void *opaque)
142 {
143     OpenRISCCPU *cpu = opaque;
144     cpu_openrisc_count_set(cpu, 0);
145 }
146 
147 static const VMStateDescription vmstate_or1k_timer = {
148     .name = "or1k_timer",
149     .version_id = 2,
150     .minimum_version_id = 2,
151     .fields = (const VMStateField[]) {
152         VMSTATE_UINT32(ttcr, OR1KTimerState),
153         VMSTATE_UINT32(ttcr_offset, OR1KTimerState),
154         VMSTATE_UINT64(clk_offset, OR1KTimerState),
155         VMSTATE_END_OF_LIST()
156     }
157 };
158 
cpu_openrisc_clock_init(OpenRISCCPU * cpu)159 void cpu_openrisc_clock_init(OpenRISCCPU *cpu)
160 {
161     cpu->env.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &openrisc_timer_cb, cpu);
162 
163     qemu_register_reset(openrisc_count_reset, cpu);
164     if (or1k_timer == NULL) {
165         or1k_timer = g_new0(OR1KTimerState, 1);
166         qemu_register_reset(openrisc_timer_reset, cpu);
167         vmstate_register(NULL, 0, &vmstate_or1k_timer, or1k_timer);
168     }
169 }
170