1fe0007f3SPhilippe Mathieu-Daudé /*
2fe0007f3SPhilippe Mathieu-Daudé * CPU thread main loop - common bits for user and system mode emulation
3fe0007f3SPhilippe Mathieu-Daudé *
4fe0007f3SPhilippe Mathieu-Daudé * Copyright (c) 2003-2005 Fabrice Bellard
5fe0007f3SPhilippe Mathieu-Daudé *
6fe0007f3SPhilippe Mathieu-Daudé * This library is free software; you can redistribute it and/or
7fe0007f3SPhilippe Mathieu-Daudé * modify it under the terms of the GNU Lesser General Public
8fe0007f3SPhilippe Mathieu-Daudé * License as published by the Free Software Foundation; either
9fe0007f3SPhilippe Mathieu-Daudé * version 2.1 of the License, or (at your option) any later version.
10fe0007f3SPhilippe Mathieu-Daudé *
11fe0007f3SPhilippe Mathieu-Daudé * This library is distributed in the hope that it will be useful,
12fe0007f3SPhilippe Mathieu-Daudé * but WITHOUT ANY WARRANTY; without even the implied warranty of
13fe0007f3SPhilippe Mathieu-Daudé * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14fe0007f3SPhilippe Mathieu-Daudé * Lesser General Public License for more details.
15fe0007f3SPhilippe Mathieu-Daudé *
16fe0007f3SPhilippe Mathieu-Daudé * You should have received a copy of the GNU Lesser General Public
17fe0007f3SPhilippe Mathieu-Daudé * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18fe0007f3SPhilippe Mathieu-Daudé */
19fe0007f3SPhilippe Mathieu-Daudé
20fe0007f3SPhilippe Mathieu-Daudé #include "qemu/osdep.h"
21fe0007f3SPhilippe Mathieu-Daudé #include "qemu/main-loop.h"
22fe0007f3SPhilippe Mathieu-Daudé #include "exec/cpu-common.h"
23fe0007f3SPhilippe Mathieu-Daudé #include "hw/core/cpu.h"
24fe0007f3SPhilippe Mathieu-Daudé #include "sysemu/cpus.h"
25fe0007f3SPhilippe Mathieu-Daudé #include "qemu/lockable.h"
26fe0007f3SPhilippe Mathieu-Daudé #include "trace/trace-root.h"
27fe0007f3SPhilippe Mathieu-Daudé
28fe0007f3SPhilippe Mathieu-Daudé QemuMutex qemu_cpu_list_lock;
29fe0007f3SPhilippe Mathieu-Daudé static QemuCond exclusive_cond;
30fe0007f3SPhilippe Mathieu-Daudé static QemuCond exclusive_resume;
31fe0007f3SPhilippe Mathieu-Daudé static QemuCond qemu_work_cond;
32fe0007f3SPhilippe Mathieu-Daudé
33fe0007f3SPhilippe Mathieu-Daudé /* >= 1 if a thread is inside start_exclusive/end_exclusive. Written
34fe0007f3SPhilippe Mathieu-Daudé * under qemu_cpu_list_lock, read with atomic operations.
35fe0007f3SPhilippe Mathieu-Daudé */
36fe0007f3SPhilippe Mathieu-Daudé static int pending_cpus;
37fe0007f3SPhilippe Mathieu-Daudé
qemu_init_cpu_list(void)38fe0007f3SPhilippe Mathieu-Daudé void qemu_init_cpu_list(void)
39fe0007f3SPhilippe Mathieu-Daudé {
40fe0007f3SPhilippe Mathieu-Daudé /* This is needed because qemu_init_cpu_list is also called by the
41fe0007f3SPhilippe Mathieu-Daudé * child process in a fork. */
42fe0007f3SPhilippe Mathieu-Daudé pending_cpus = 0;
43fe0007f3SPhilippe Mathieu-Daudé
44fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_init(&qemu_cpu_list_lock);
45fe0007f3SPhilippe Mathieu-Daudé qemu_cond_init(&exclusive_cond);
46fe0007f3SPhilippe Mathieu-Daudé qemu_cond_init(&exclusive_resume);
47fe0007f3SPhilippe Mathieu-Daudé qemu_cond_init(&qemu_work_cond);
48fe0007f3SPhilippe Mathieu-Daudé }
49fe0007f3SPhilippe Mathieu-Daudé
cpu_list_lock(void)50fe0007f3SPhilippe Mathieu-Daudé void cpu_list_lock(void)
51fe0007f3SPhilippe Mathieu-Daudé {
52fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_lock(&qemu_cpu_list_lock);
53fe0007f3SPhilippe Mathieu-Daudé }
54fe0007f3SPhilippe Mathieu-Daudé
cpu_list_unlock(void)55fe0007f3SPhilippe Mathieu-Daudé void cpu_list_unlock(void)
56fe0007f3SPhilippe Mathieu-Daudé {
57fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_unlock(&qemu_cpu_list_lock);
58fe0007f3SPhilippe Mathieu-Daudé }
59fe0007f3SPhilippe Mathieu-Daudé
60fe0007f3SPhilippe Mathieu-Daudé
cpu_get_free_index(void)6118530e7cSHarsh Prateek Bora int cpu_get_free_index(void)
62fe0007f3SPhilippe Mathieu-Daudé {
63fe0007f3SPhilippe Mathieu-Daudé CPUState *some_cpu;
64fe0007f3SPhilippe Mathieu-Daudé int max_cpu_index = 0;
65fe0007f3SPhilippe Mathieu-Daudé
66fe0007f3SPhilippe Mathieu-Daudé CPU_FOREACH(some_cpu) {
67fe0007f3SPhilippe Mathieu-Daudé if (some_cpu->cpu_index >= max_cpu_index) {
68fe0007f3SPhilippe Mathieu-Daudé max_cpu_index = some_cpu->cpu_index + 1;
69fe0007f3SPhilippe Mathieu-Daudé }
70fe0007f3SPhilippe Mathieu-Daudé }
71fe0007f3SPhilippe Mathieu-Daudé return max_cpu_index;
72fe0007f3SPhilippe Mathieu-Daudé }
73fe0007f3SPhilippe Mathieu-Daudé
743c55dd58SPhilippe Mathieu-Daudé CPUTailQ cpus_queue = QTAILQ_HEAD_INITIALIZER(cpus_queue);
75fe0007f3SPhilippe Mathieu-Daudé static unsigned int cpu_list_generation_id;
76fe0007f3SPhilippe Mathieu-Daudé
cpu_list_generation_id_get(void)77fe0007f3SPhilippe Mathieu-Daudé unsigned int cpu_list_generation_id_get(void)
78fe0007f3SPhilippe Mathieu-Daudé {
79fe0007f3SPhilippe Mathieu-Daudé return cpu_list_generation_id;
80fe0007f3SPhilippe Mathieu-Daudé }
81fe0007f3SPhilippe Mathieu-Daudé
cpu_list_add(CPUState * cpu)82fe0007f3SPhilippe Mathieu-Daudé void cpu_list_add(CPUState *cpu)
83fe0007f3SPhilippe Mathieu-Daudé {
8418530e7cSHarsh Prateek Bora static bool cpu_index_auto_assigned;
8518530e7cSHarsh Prateek Bora
86fe0007f3SPhilippe Mathieu-Daudé QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
87fe0007f3SPhilippe Mathieu-Daudé if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
8818530e7cSHarsh Prateek Bora cpu_index_auto_assigned = true;
89fe0007f3SPhilippe Mathieu-Daudé cpu->cpu_index = cpu_get_free_index();
90fe0007f3SPhilippe Mathieu-Daudé assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
91fe0007f3SPhilippe Mathieu-Daudé } else {
92fe0007f3SPhilippe Mathieu-Daudé assert(!cpu_index_auto_assigned);
93fe0007f3SPhilippe Mathieu-Daudé }
943c55dd58SPhilippe Mathieu-Daudé QTAILQ_INSERT_TAIL_RCU(&cpus_queue, cpu, node);
95fe0007f3SPhilippe Mathieu-Daudé cpu_list_generation_id++;
96fe0007f3SPhilippe Mathieu-Daudé }
97fe0007f3SPhilippe Mathieu-Daudé
cpu_list_remove(CPUState * cpu)98fe0007f3SPhilippe Mathieu-Daudé void cpu_list_remove(CPUState *cpu)
99fe0007f3SPhilippe Mathieu-Daudé {
100fe0007f3SPhilippe Mathieu-Daudé QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
101fe0007f3SPhilippe Mathieu-Daudé if (!QTAILQ_IN_USE(cpu, node)) {
102fe0007f3SPhilippe Mathieu-Daudé /* there is nothing to undo since cpu_exec_init() hasn't been called */
103fe0007f3SPhilippe Mathieu-Daudé return;
104fe0007f3SPhilippe Mathieu-Daudé }
105fe0007f3SPhilippe Mathieu-Daudé
1063c55dd58SPhilippe Mathieu-Daudé QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node);
107fe0007f3SPhilippe Mathieu-Daudé cpu->cpu_index = UNASSIGNED_CPU_INDEX;
108fe0007f3SPhilippe Mathieu-Daudé cpu_list_generation_id++;
109fe0007f3SPhilippe Mathieu-Daudé }
110fe0007f3SPhilippe Mathieu-Daudé
qemu_get_cpu(int index)111fe0007f3SPhilippe Mathieu-Daudé CPUState *qemu_get_cpu(int index)
112fe0007f3SPhilippe Mathieu-Daudé {
113fe0007f3SPhilippe Mathieu-Daudé CPUState *cpu;
114fe0007f3SPhilippe Mathieu-Daudé
115fe0007f3SPhilippe Mathieu-Daudé CPU_FOREACH(cpu) {
116fe0007f3SPhilippe Mathieu-Daudé if (cpu->cpu_index == index) {
117fe0007f3SPhilippe Mathieu-Daudé return cpu;
118fe0007f3SPhilippe Mathieu-Daudé }
119fe0007f3SPhilippe Mathieu-Daudé }
120fe0007f3SPhilippe Mathieu-Daudé
121fe0007f3SPhilippe Mathieu-Daudé return NULL;
122fe0007f3SPhilippe Mathieu-Daudé }
123fe0007f3SPhilippe Mathieu-Daudé
124fe0007f3SPhilippe Mathieu-Daudé /* current CPU in the current thread. It is only valid inside cpu_exec() */
125fe0007f3SPhilippe Mathieu-Daudé __thread CPUState *current_cpu;
126fe0007f3SPhilippe Mathieu-Daudé
127fe0007f3SPhilippe Mathieu-Daudé struct qemu_work_item {
128fe0007f3SPhilippe Mathieu-Daudé QSIMPLEQ_ENTRY(qemu_work_item) node;
129fe0007f3SPhilippe Mathieu-Daudé run_on_cpu_func func;
130fe0007f3SPhilippe Mathieu-Daudé run_on_cpu_data data;
131fe0007f3SPhilippe Mathieu-Daudé bool free, exclusive, done;
132fe0007f3SPhilippe Mathieu-Daudé };
133fe0007f3SPhilippe Mathieu-Daudé
queue_work_on_cpu(CPUState * cpu,struct qemu_work_item * wi)134fe0007f3SPhilippe Mathieu-Daudé static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
135fe0007f3SPhilippe Mathieu-Daudé {
136fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_lock(&cpu->work_mutex);
137fe0007f3SPhilippe Mathieu-Daudé QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
138fe0007f3SPhilippe Mathieu-Daudé wi->done = false;
139fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_unlock(&cpu->work_mutex);
140fe0007f3SPhilippe Mathieu-Daudé
141fe0007f3SPhilippe Mathieu-Daudé qemu_cpu_kick(cpu);
142fe0007f3SPhilippe Mathieu-Daudé }
143fe0007f3SPhilippe Mathieu-Daudé
do_run_on_cpu(CPUState * cpu,run_on_cpu_func func,run_on_cpu_data data,QemuMutex * mutex)144fe0007f3SPhilippe Mathieu-Daudé void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
145fe0007f3SPhilippe Mathieu-Daudé QemuMutex *mutex)
146fe0007f3SPhilippe Mathieu-Daudé {
147fe0007f3SPhilippe Mathieu-Daudé struct qemu_work_item wi;
148fe0007f3SPhilippe Mathieu-Daudé
149fe0007f3SPhilippe Mathieu-Daudé if (qemu_cpu_is_self(cpu)) {
150fe0007f3SPhilippe Mathieu-Daudé func(cpu, data);
151fe0007f3SPhilippe Mathieu-Daudé return;
152fe0007f3SPhilippe Mathieu-Daudé }
153fe0007f3SPhilippe Mathieu-Daudé
154fe0007f3SPhilippe Mathieu-Daudé wi.func = func;
155fe0007f3SPhilippe Mathieu-Daudé wi.data = data;
156fe0007f3SPhilippe Mathieu-Daudé wi.done = false;
157fe0007f3SPhilippe Mathieu-Daudé wi.free = false;
158fe0007f3SPhilippe Mathieu-Daudé wi.exclusive = false;
159fe0007f3SPhilippe Mathieu-Daudé
160fe0007f3SPhilippe Mathieu-Daudé queue_work_on_cpu(cpu, &wi);
161fe0007f3SPhilippe Mathieu-Daudé while (!qatomic_load_acquire(&wi.done)) {
162fe0007f3SPhilippe Mathieu-Daudé CPUState *self_cpu = current_cpu;
163fe0007f3SPhilippe Mathieu-Daudé
164fe0007f3SPhilippe Mathieu-Daudé qemu_cond_wait(&qemu_work_cond, mutex);
165fe0007f3SPhilippe Mathieu-Daudé current_cpu = self_cpu;
166fe0007f3SPhilippe Mathieu-Daudé }
167fe0007f3SPhilippe Mathieu-Daudé }
168fe0007f3SPhilippe Mathieu-Daudé
async_run_on_cpu(CPUState * cpu,run_on_cpu_func func,run_on_cpu_data data)169fe0007f3SPhilippe Mathieu-Daudé void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
170fe0007f3SPhilippe Mathieu-Daudé {
171fe0007f3SPhilippe Mathieu-Daudé struct qemu_work_item *wi;
172fe0007f3SPhilippe Mathieu-Daudé
173fe0007f3SPhilippe Mathieu-Daudé wi = g_new0(struct qemu_work_item, 1);
174fe0007f3SPhilippe Mathieu-Daudé wi->func = func;
175fe0007f3SPhilippe Mathieu-Daudé wi->data = data;
176fe0007f3SPhilippe Mathieu-Daudé wi->free = true;
177fe0007f3SPhilippe Mathieu-Daudé
178fe0007f3SPhilippe Mathieu-Daudé queue_work_on_cpu(cpu, wi);
179fe0007f3SPhilippe Mathieu-Daudé }
180fe0007f3SPhilippe Mathieu-Daudé
181fe0007f3SPhilippe Mathieu-Daudé /* Wait for pending exclusive operations to complete. The CPU list lock
182fe0007f3SPhilippe Mathieu-Daudé must be held. */
exclusive_idle(void)183fe0007f3SPhilippe Mathieu-Daudé static inline void exclusive_idle(void)
184fe0007f3SPhilippe Mathieu-Daudé {
185fe0007f3SPhilippe Mathieu-Daudé while (pending_cpus) {
186fe0007f3SPhilippe Mathieu-Daudé qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock);
187fe0007f3SPhilippe Mathieu-Daudé }
188fe0007f3SPhilippe Mathieu-Daudé }
189fe0007f3SPhilippe Mathieu-Daudé
190fe0007f3SPhilippe Mathieu-Daudé /* Start an exclusive operation.
191fe0007f3SPhilippe Mathieu-Daudé Must only be called from outside cpu_exec. */
start_exclusive(void)192fe0007f3SPhilippe Mathieu-Daudé void start_exclusive(void)
193fe0007f3SPhilippe Mathieu-Daudé {
194fe0007f3SPhilippe Mathieu-Daudé CPUState *other_cpu;
195fe0007f3SPhilippe Mathieu-Daudé int running_cpus;
196fe0007f3SPhilippe Mathieu-Daudé
197*779f30a0SPierrick Bouvier /* Ensure we are not running, or start_exclusive will be blocked. */
198*779f30a0SPierrick Bouvier g_assert(!current_cpu->running);
199*779f30a0SPierrick Bouvier
200fe0007f3SPhilippe Mathieu-Daudé if (current_cpu->exclusive_context_count) {
201fe0007f3SPhilippe Mathieu-Daudé current_cpu->exclusive_context_count++;
202fe0007f3SPhilippe Mathieu-Daudé return;
203fe0007f3SPhilippe Mathieu-Daudé }
204fe0007f3SPhilippe Mathieu-Daudé
205fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_lock(&qemu_cpu_list_lock);
206fe0007f3SPhilippe Mathieu-Daudé exclusive_idle();
207fe0007f3SPhilippe Mathieu-Daudé
208fe0007f3SPhilippe Mathieu-Daudé /* Make all other cpus stop executing. */
209fe0007f3SPhilippe Mathieu-Daudé qatomic_set(&pending_cpus, 1);
210fe0007f3SPhilippe Mathieu-Daudé
211fe0007f3SPhilippe Mathieu-Daudé /* Write pending_cpus before reading other_cpu->running. */
212fe0007f3SPhilippe Mathieu-Daudé smp_mb();
213fe0007f3SPhilippe Mathieu-Daudé running_cpus = 0;
214fe0007f3SPhilippe Mathieu-Daudé CPU_FOREACH(other_cpu) {
215fe0007f3SPhilippe Mathieu-Daudé if (qatomic_read(&other_cpu->running)) {
216fe0007f3SPhilippe Mathieu-Daudé other_cpu->has_waiter = true;
217fe0007f3SPhilippe Mathieu-Daudé running_cpus++;
218fe0007f3SPhilippe Mathieu-Daudé qemu_cpu_kick(other_cpu);
219fe0007f3SPhilippe Mathieu-Daudé }
220fe0007f3SPhilippe Mathieu-Daudé }
221fe0007f3SPhilippe Mathieu-Daudé
222fe0007f3SPhilippe Mathieu-Daudé qatomic_set(&pending_cpus, running_cpus + 1);
223fe0007f3SPhilippe Mathieu-Daudé while (pending_cpus > 1) {
224fe0007f3SPhilippe Mathieu-Daudé qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
225fe0007f3SPhilippe Mathieu-Daudé }
226fe0007f3SPhilippe Mathieu-Daudé
227fe0007f3SPhilippe Mathieu-Daudé /* Can release mutex, no one will enter another exclusive
228fe0007f3SPhilippe Mathieu-Daudé * section until end_exclusive resets pending_cpus to 0.
229fe0007f3SPhilippe Mathieu-Daudé */
230fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_unlock(&qemu_cpu_list_lock);
231fe0007f3SPhilippe Mathieu-Daudé
232fe0007f3SPhilippe Mathieu-Daudé current_cpu->exclusive_context_count = 1;
233fe0007f3SPhilippe Mathieu-Daudé }
234fe0007f3SPhilippe Mathieu-Daudé
235fe0007f3SPhilippe Mathieu-Daudé /* Finish an exclusive operation. */
end_exclusive(void)236fe0007f3SPhilippe Mathieu-Daudé void end_exclusive(void)
237fe0007f3SPhilippe Mathieu-Daudé {
238fe0007f3SPhilippe Mathieu-Daudé current_cpu->exclusive_context_count--;
239fe0007f3SPhilippe Mathieu-Daudé if (current_cpu->exclusive_context_count) {
240fe0007f3SPhilippe Mathieu-Daudé return;
241fe0007f3SPhilippe Mathieu-Daudé }
242fe0007f3SPhilippe Mathieu-Daudé
243fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_lock(&qemu_cpu_list_lock);
244fe0007f3SPhilippe Mathieu-Daudé qatomic_set(&pending_cpus, 0);
245fe0007f3SPhilippe Mathieu-Daudé qemu_cond_broadcast(&exclusive_resume);
246fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_unlock(&qemu_cpu_list_lock);
247fe0007f3SPhilippe Mathieu-Daudé }
248fe0007f3SPhilippe Mathieu-Daudé
249fe0007f3SPhilippe Mathieu-Daudé /* Wait for exclusive ops to finish, and begin cpu execution. */
cpu_exec_start(CPUState * cpu)250fe0007f3SPhilippe Mathieu-Daudé void cpu_exec_start(CPUState *cpu)
251fe0007f3SPhilippe Mathieu-Daudé {
252fe0007f3SPhilippe Mathieu-Daudé qatomic_set(&cpu->running, true);
253fe0007f3SPhilippe Mathieu-Daudé
254fe0007f3SPhilippe Mathieu-Daudé /* Write cpu->running before reading pending_cpus. */
255fe0007f3SPhilippe Mathieu-Daudé smp_mb();
256fe0007f3SPhilippe Mathieu-Daudé
257fe0007f3SPhilippe Mathieu-Daudé /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
258fe0007f3SPhilippe Mathieu-Daudé * After taking the lock we'll see cpu->has_waiter == true and run---not
259fe0007f3SPhilippe Mathieu-Daudé * for long because start_exclusive kicked us. cpu_exec_end will
260fe0007f3SPhilippe Mathieu-Daudé * decrement pending_cpus and signal the waiter.
261fe0007f3SPhilippe Mathieu-Daudé *
262fe0007f3SPhilippe Mathieu-Daudé * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
263fe0007f3SPhilippe Mathieu-Daudé * This includes the case when an exclusive item is running now.
264fe0007f3SPhilippe Mathieu-Daudé * Then we'll see cpu->has_waiter == false and wait for the item to
265fe0007f3SPhilippe Mathieu-Daudé * complete.
266fe0007f3SPhilippe Mathieu-Daudé *
267fe0007f3SPhilippe Mathieu-Daudé * 3. pending_cpus == 0. Then start_exclusive is definitely going to
268fe0007f3SPhilippe Mathieu-Daudé * see cpu->running == true, and it will kick the CPU.
269fe0007f3SPhilippe Mathieu-Daudé */
270fe0007f3SPhilippe Mathieu-Daudé if (unlikely(qatomic_read(&pending_cpus))) {
271fe0007f3SPhilippe Mathieu-Daudé QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
272fe0007f3SPhilippe Mathieu-Daudé if (!cpu->has_waiter) {
273fe0007f3SPhilippe Mathieu-Daudé /* Not counted in pending_cpus, let the exclusive item
274fe0007f3SPhilippe Mathieu-Daudé * run. Since we have the lock, just set cpu->running to true
275fe0007f3SPhilippe Mathieu-Daudé * while holding it; no need to check pending_cpus again.
276fe0007f3SPhilippe Mathieu-Daudé */
277fe0007f3SPhilippe Mathieu-Daudé qatomic_set(&cpu->running, false);
278fe0007f3SPhilippe Mathieu-Daudé exclusive_idle();
279fe0007f3SPhilippe Mathieu-Daudé /* Now pending_cpus is zero. */
280fe0007f3SPhilippe Mathieu-Daudé qatomic_set(&cpu->running, true);
281fe0007f3SPhilippe Mathieu-Daudé } else {
282fe0007f3SPhilippe Mathieu-Daudé /* Counted in pending_cpus, go ahead and release the
283fe0007f3SPhilippe Mathieu-Daudé * waiter at cpu_exec_end.
284fe0007f3SPhilippe Mathieu-Daudé */
285fe0007f3SPhilippe Mathieu-Daudé }
286fe0007f3SPhilippe Mathieu-Daudé }
287fe0007f3SPhilippe Mathieu-Daudé }
288fe0007f3SPhilippe Mathieu-Daudé
289fe0007f3SPhilippe Mathieu-Daudé /* Mark cpu as not executing, and release pending exclusive ops. */
cpu_exec_end(CPUState * cpu)290fe0007f3SPhilippe Mathieu-Daudé void cpu_exec_end(CPUState *cpu)
291fe0007f3SPhilippe Mathieu-Daudé {
292fe0007f3SPhilippe Mathieu-Daudé qatomic_set(&cpu->running, false);
293fe0007f3SPhilippe Mathieu-Daudé
294fe0007f3SPhilippe Mathieu-Daudé /* Write cpu->running before reading pending_cpus. */
295fe0007f3SPhilippe Mathieu-Daudé smp_mb();
296fe0007f3SPhilippe Mathieu-Daudé
297fe0007f3SPhilippe Mathieu-Daudé /* 1. start_exclusive saw cpu->running == true. Then it will increment
298fe0007f3SPhilippe Mathieu-Daudé * pending_cpus and wait for exclusive_cond. After taking the lock
299fe0007f3SPhilippe Mathieu-Daudé * we'll see cpu->has_waiter == true.
300fe0007f3SPhilippe Mathieu-Daudé *
301fe0007f3SPhilippe Mathieu-Daudé * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
302fe0007f3SPhilippe Mathieu-Daudé * This includes the case when an exclusive item started after setting
303fe0007f3SPhilippe Mathieu-Daudé * cpu->running to false and before we read pending_cpus. Then we'll see
304fe0007f3SPhilippe Mathieu-Daudé * cpu->has_waiter == false and not touch pending_cpus. The next call to
305fe0007f3SPhilippe Mathieu-Daudé * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
306fe0007f3SPhilippe Mathieu-Daudé * for the item to complete.
307fe0007f3SPhilippe Mathieu-Daudé *
308fe0007f3SPhilippe Mathieu-Daudé * 3. pending_cpus == 0. Then start_exclusive is definitely going to
309fe0007f3SPhilippe Mathieu-Daudé * see cpu->running == false, and it can ignore this CPU until the
310fe0007f3SPhilippe Mathieu-Daudé * next cpu_exec_start.
311fe0007f3SPhilippe Mathieu-Daudé */
312fe0007f3SPhilippe Mathieu-Daudé if (unlikely(qatomic_read(&pending_cpus))) {
313fe0007f3SPhilippe Mathieu-Daudé QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
314fe0007f3SPhilippe Mathieu-Daudé if (cpu->has_waiter) {
315fe0007f3SPhilippe Mathieu-Daudé cpu->has_waiter = false;
316fe0007f3SPhilippe Mathieu-Daudé qatomic_set(&pending_cpus, pending_cpus - 1);
317fe0007f3SPhilippe Mathieu-Daudé if (pending_cpus == 1) {
318fe0007f3SPhilippe Mathieu-Daudé qemu_cond_signal(&exclusive_cond);
319fe0007f3SPhilippe Mathieu-Daudé }
320fe0007f3SPhilippe Mathieu-Daudé }
321fe0007f3SPhilippe Mathieu-Daudé }
322fe0007f3SPhilippe Mathieu-Daudé }
323fe0007f3SPhilippe Mathieu-Daudé
async_safe_run_on_cpu(CPUState * cpu,run_on_cpu_func func,run_on_cpu_data data)324fe0007f3SPhilippe Mathieu-Daudé void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
325fe0007f3SPhilippe Mathieu-Daudé run_on_cpu_data data)
326fe0007f3SPhilippe Mathieu-Daudé {
327fe0007f3SPhilippe Mathieu-Daudé struct qemu_work_item *wi;
328fe0007f3SPhilippe Mathieu-Daudé
329fe0007f3SPhilippe Mathieu-Daudé wi = g_new0(struct qemu_work_item, 1);
330fe0007f3SPhilippe Mathieu-Daudé wi->func = func;
331fe0007f3SPhilippe Mathieu-Daudé wi->data = data;
332fe0007f3SPhilippe Mathieu-Daudé wi->free = true;
333fe0007f3SPhilippe Mathieu-Daudé wi->exclusive = true;
334fe0007f3SPhilippe Mathieu-Daudé
335fe0007f3SPhilippe Mathieu-Daudé queue_work_on_cpu(cpu, wi);
336fe0007f3SPhilippe Mathieu-Daudé }
337fe0007f3SPhilippe Mathieu-Daudé
free_queued_cpu_work(CPUState * cpu)338f8b64d35SAkihiko Odaki void free_queued_cpu_work(CPUState *cpu)
339f8b64d35SAkihiko Odaki {
340f8b64d35SAkihiko Odaki while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
341f8b64d35SAkihiko Odaki struct qemu_work_item *wi = QSIMPLEQ_FIRST(&cpu->work_list);
342f8b64d35SAkihiko Odaki QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
343f8b64d35SAkihiko Odaki if (wi->free) {
344f8b64d35SAkihiko Odaki g_free(wi);
345f8b64d35SAkihiko Odaki }
346f8b64d35SAkihiko Odaki }
347f8b64d35SAkihiko Odaki }
348f8b64d35SAkihiko Odaki
process_queued_cpu_work(CPUState * cpu)349fe0007f3SPhilippe Mathieu-Daudé void process_queued_cpu_work(CPUState *cpu)
350fe0007f3SPhilippe Mathieu-Daudé {
351fe0007f3SPhilippe Mathieu-Daudé struct qemu_work_item *wi;
352fe0007f3SPhilippe Mathieu-Daudé
353fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_lock(&cpu->work_mutex);
354fe0007f3SPhilippe Mathieu-Daudé if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
355fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_unlock(&cpu->work_mutex);
356fe0007f3SPhilippe Mathieu-Daudé return;
357fe0007f3SPhilippe Mathieu-Daudé }
358fe0007f3SPhilippe Mathieu-Daudé while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
359fe0007f3SPhilippe Mathieu-Daudé wi = QSIMPLEQ_FIRST(&cpu->work_list);
360fe0007f3SPhilippe Mathieu-Daudé QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
361fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_unlock(&cpu->work_mutex);
362fe0007f3SPhilippe Mathieu-Daudé if (wi->exclusive) {
363fe0007f3SPhilippe Mathieu-Daudé /* Running work items outside the BQL avoids the following deadlock:
364fe0007f3SPhilippe Mathieu-Daudé * 1) start_exclusive() is called with the BQL taken while another
365fe0007f3SPhilippe Mathieu-Daudé * CPU is running; 2) cpu_exec in the other CPU tries to takes the
366fe0007f3SPhilippe Mathieu-Daudé * BQL, so it goes to sleep; start_exclusive() is sleeping too, so
367fe0007f3SPhilippe Mathieu-Daudé * neither CPU can proceed.
368fe0007f3SPhilippe Mathieu-Daudé */
369195801d7SStefan Hajnoczi bql_unlock();
370fe0007f3SPhilippe Mathieu-Daudé start_exclusive();
371fe0007f3SPhilippe Mathieu-Daudé wi->func(cpu, wi->data);
372fe0007f3SPhilippe Mathieu-Daudé end_exclusive();
373195801d7SStefan Hajnoczi bql_lock();
374fe0007f3SPhilippe Mathieu-Daudé } else {
375fe0007f3SPhilippe Mathieu-Daudé wi->func(cpu, wi->data);
376fe0007f3SPhilippe Mathieu-Daudé }
377fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_lock(&cpu->work_mutex);
378fe0007f3SPhilippe Mathieu-Daudé if (wi->free) {
379fe0007f3SPhilippe Mathieu-Daudé g_free(wi);
380fe0007f3SPhilippe Mathieu-Daudé } else {
381fe0007f3SPhilippe Mathieu-Daudé qatomic_store_release(&wi->done, true);
382fe0007f3SPhilippe Mathieu-Daudé }
383fe0007f3SPhilippe Mathieu-Daudé }
384fe0007f3SPhilippe Mathieu-Daudé qemu_mutex_unlock(&cpu->work_mutex);
385fe0007f3SPhilippe Mathieu-Daudé qemu_cond_broadcast(&qemu_work_cond);
386fe0007f3SPhilippe Mathieu-Daudé }
387fe0007f3SPhilippe Mathieu-Daudé
388fe0007f3SPhilippe Mathieu-Daudé /* Add a breakpoint. */
cpu_breakpoint_insert(CPUState * cpu,vaddr pc,int flags,CPUBreakpoint ** breakpoint)389fe0007f3SPhilippe Mathieu-Daudé int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
390fe0007f3SPhilippe Mathieu-Daudé CPUBreakpoint **breakpoint)
391fe0007f3SPhilippe Mathieu-Daudé {
392fe0007f3SPhilippe Mathieu-Daudé CPUClass *cc = CPU_GET_CLASS(cpu);
393fe0007f3SPhilippe Mathieu-Daudé CPUBreakpoint *bp;
394fe0007f3SPhilippe Mathieu-Daudé
395fe0007f3SPhilippe Mathieu-Daudé if (cc->gdb_adjust_breakpoint) {
396fe0007f3SPhilippe Mathieu-Daudé pc = cc->gdb_adjust_breakpoint(cpu, pc);
397fe0007f3SPhilippe Mathieu-Daudé }
398fe0007f3SPhilippe Mathieu-Daudé
399fe0007f3SPhilippe Mathieu-Daudé bp = g_malloc(sizeof(*bp));
400fe0007f3SPhilippe Mathieu-Daudé
401fe0007f3SPhilippe Mathieu-Daudé bp->pc = pc;
402fe0007f3SPhilippe Mathieu-Daudé bp->flags = flags;
403fe0007f3SPhilippe Mathieu-Daudé
404fe0007f3SPhilippe Mathieu-Daudé /* keep all GDB-injected breakpoints in front */
405fe0007f3SPhilippe Mathieu-Daudé if (flags & BP_GDB) {
406fe0007f3SPhilippe Mathieu-Daudé QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
407fe0007f3SPhilippe Mathieu-Daudé } else {
408fe0007f3SPhilippe Mathieu-Daudé QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
409fe0007f3SPhilippe Mathieu-Daudé }
410fe0007f3SPhilippe Mathieu-Daudé
411fe0007f3SPhilippe Mathieu-Daudé if (breakpoint) {
412fe0007f3SPhilippe Mathieu-Daudé *breakpoint = bp;
413fe0007f3SPhilippe Mathieu-Daudé }
414fe0007f3SPhilippe Mathieu-Daudé
415fe0007f3SPhilippe Mathieu-Daudé trace_breakpoint_insert(cpu->cpu_index, pc, flags);
416fe0007f3SPhilippe Mathieu-Daudé return 0;
417fe0007f3SPhilippe Mathieu-Daudé }
418fe0007f3SPhilippe Mathieu-Daudé
419fe0007f3SPhilippe Mathieu-Daudé /* Remove a specific breakpoint. */
cpu_breakpoint_remove(CPUState * cpu,vaddr pc,int flags)420fe0007f3SPhilippe Mathieu-Daudé int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
421fe0007f3SPhilippe Mathieu-Daudé {
422fe0007f3SPhilippe Mathieu-Daudé CPUClass *cc = CPU_GET_CLASS(cpu);
423fe0007f3SPhilippe Mathieu-Daudé CPUBreakpoint *bp;
424fe0007f3SPhilippe Mathieu-Daudé
425fe0007f3SPhilippe Mathieu-Daudé if (cc->gdb_adjust_breakpoint) {
426fe0007f3SPhilippe Mathieu-Daudé pc = cc->gdb_adjust_breakpoint(cpu, pc);
427fe0007f3SPhilippe Mathieu-Daudé }
428fe0007f3SPhilippe Mathieu-Daudé
429fe0007f3SPhilippe Mathieu-Daudé QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
430fe0007f3SPhilippe Mathieu-Daudé if (bp->pc == pc && bp->flags == flags) {
431fe0007f3SPhilippe Mathieu-Daudé cpu_breakpoint_remove_by_ref(cpu, bp);
432fe0007f3SPhilippe Mathieu-Daudé return 0;
433fe0007f3SPhilippe Mathieu-Daudé }
434fe0007f3SPhilippe Mathieu-Daudé }
435fe0007f3SPhilippe Mathieu-Daudé return -ENOENT;
436fe0007f3SPhilippe Mathieu-Daudé }
437fe0007f3SPhilippe Mathieu-Daudé
438fe0007f3SPhilippe Mathieu-Daudé /* Remove a specific breakpoint by reference. */
cpu_breakpoint_remove_by_ref(CPUState * cpu,CPUBreakpoint * bp)439fe0007f3SPhilippe Mathieu-Daudé void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp)
440fe0007f3SPhilippe Mathieu-Daudé {
441fe0007f3SPhilippe Mathieu-Daudé QTAILQ_REMOVE(&cpu->breakpoints, bp, entry);
442fe0007f3SPhilippe Mathieu-Daudé
443fe0007f3SPhilippe Mathieu-Daudé trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags);
444fe0007f3SPhilippe Mathieu-Daudé g_free(bp);
445fe0007f3SPhilippe Mathieu-Daudé }
446fe0007f3SPhilippe Mathieu-Daudé
447fe0007f3SPhilippe Mathieu-Daudé /* Remove all matching breakpoints. */
cpu_breakpoint_remove_all(CPUState * cpu,int mask)448fe0007f3SPhilippe Mathieu-Daudé void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
449fe0007f3SPhilippe Mathieu-Daudé {
450fe0007f3SPhilippe Mathieu-Daudé CPUBreakpoint *bp, *next;
451fe0007f3SPhilippe Mathieu-Daudé
452fe0007f3SPhilippe Mathieu-Daudé QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
453fe0007f3SPhilippe Mathieu-Daudé if (bp->flags & mask) {
454fe0007f3SPhilippe Mathieu-Daudé cpu_breakpoint_remove_by_ref(cpu, bp);
455fe0007f3SPhilippe Mathieu-Daudé }
456fe0007f3SPhilippe Mathieu-Daudé }
457fe0007f3SPhilippe Mathieu-Daudé }
458