xref: /openbmc/qemu/cpu-common.c (revision fe0007f3)
1*fe0007f3SPhilippe Mathieu-Daudé /*
2*fe0007f3SPhilippe Mathieu-Daudé  * CPU thread main loop - common bits for user and system mode emulation
3*fe0007f3SPhilippe Mathieu-Daudé  *
4*fe0007f3SPhilippe Mathieu-Daudé  *  Copyright (c) 2003-2005 Fabrice Bellard
5*fe0007f3SPhilippe Mathieu-Daudé  *
6*fe0007f3SPhilippe Mathieu-Daudé  * This library is free software; you can redistribute it and/or
7*fe0007f3SPhilippe Mathieu-Daudé  * modify it under the terms of the GNU Lesser General Public
8*fe0007f3SPhilippe Mathieu-Daudé  * License as published by the Free Software Foundation; either
9*fe0007f3SPhilippe Mathieu-Daudé  * version 2.1 of the License, or (at your option) any later version.
10*fe0007f3SPhilippe Mathieu-Daudé  *
11*fe0007f3SPhilippe Mathieu-Daudé  * This library is distributed in the hope that it will be useful,
12*fe0007f3SPhilippe Mathieu-Daudé  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13*fe0007f3SPhilippe Mathieu-Daudé  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14*fe0007f3SPhilippe Mathieu-Daudé  * Lesser General Public License for more details.
15*fe0007f3SPhilippe Mathieu-Daudé  *
16*fe0007f3SPhilippe Mathieu-Daudé  * You should have received a copy of the GNU Lesser General Public
17*fe0007f3SPhilippe Mathieu-Daudé  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18*fe0007f3SPhilippe Mathieu-Daudé  */
19*fe0007f3SPhilippe Mathieu-Daudé 
20*fe0007f3SPhilippe Mathieu-Daudé #include "qemu/osdep.h"
21*fe0007f3SPhilippe Mathieu-Daudé #include "qemu/main-loop.h"
22*fe0007f3SPhilippe Mathieu-Daudé #include "exec/cpu-common.h"
23*fe0007f3SPhilippe Mathieu-Daudé #include "hw/core/cpu.h"
24*fe0007f3SPhilippe Mathieu-Daudé #include "sysemu/cpus.h"
25*fe0007f3SPhilippe Mathieu-Daudé #include "qemu/lockable.h"
26*fe0007f3SPhilippe Mathieu-Daudé #include "trace/trace-root.h"
27*fe0007f3SPhilippe Mathieu-Daudé 
28*fe0007f3SPhilippe Mathieu-Daudé QemuMutex qemu_cpu_list_lock;
29*fe0007f3SPhilippe Mathieu-Daudé static QemuCond exclusive_cond;
30*fe0007f3SPhilippe Mathieu-Daudé static QemuCond exclusive_resume;
31*fe0007f3SPhilippe Mathieu-Daudé static QemuCond qemu_work_cond;
32*fe0007f3SPhilippe Mathieu-Daudé 
33*fe0007f3SPhilippe Mathieu-Daudé /* >= 1 if a thread is inside start_exclusive/end_exclusive.  Written
34*fe0007f3SPhilippe Mathieu-Daudé  * under qemu_cpu_list_lock, read with atomic operations.
35*fe0007f3SPhilippe Mathieu-Daudé  */
36*fe0007f3SPhilippe Mathieu-Daudé static int pending_cpus;
37*fe0007f3SPhilippe Mathieu-Daudé 
38*fe0007f3SPhilippe Mathieu-Daudé void qemu_init_cpu_list(void)
39*fe0007f3SPhilippe Mathieu-Daudé {
40*fe0007f3SPhilippe Mathieu-Daudé     /* This is needed because qemu_init_cpu_list is also called by the
41*fe0007f3SPhilippe Mathieu-Daudé      * child process in a fork.  */
42*fe0007f3SPhilippe Mathieu-Daudé     pending_cpus = 0;
43*fe0007f3SPhilippe Mathieu-Daudé 
44*fe0007f3SPhilippe Mathieu-Daudé     qemu_mutex_init(&qemu_cpu_list_lock);
45*fe0007f3SPhilippe Mathieu-Daudé     qemu_cond_init(&exclusive_cond);
46*fe0007f3SPhilippe Mathieu-Daudé     qemu_cond_init(&exclusive_resume);
47*fe0007f3SPhilippe Mathieu-Daudé     qemu_cond_init(&qemu_work_cond);
48*fe0007f3SPhilippe Mathieu-Daudé }
49*fe0007f3SPhilippe Mathieu-Daudé 
50*fe0007f3SPhilippe Mathieu-Daudé void cpu_list_lock(void)
51*fe0007f3SPhilippe Mathieu-Daudé {
52*fe0007f3SPhilippe Mathieu-Daudé     qemu_mutex_lock(&qemu_cpu_list_lock);
53*fe0007f3SPhilippe Mathieu-Daudé }
54*fe0007f3SPhilippe Mathieu-Daudé 
55*fe0007f3SPhilippe Mathieu-Daudé void cpu_list_unlock(void)
56*fe0007f3SPhilippe Mathieu-Daudé {
57*fe0007f3SPhilippe Mathieu-Daudé     qemu_mutex_unlock(&qemu_cpu_list_lock);
58*fe0007f3SPhilippe Mathieu-Daudé }
59*fe0007f3SPhilippe Mathieu-Daudé 
60*fe0007f3SPhilippe Mathieu-Daudé static bool cpu_index_auto_assigned;
61*fe0007f3SPhilippe Mathieu-Daudé 
62*fe0007f3SPhilippe Mathieu-Daudé static int cpu_get_free_index(void)
63*fe0007f3SPhilippe Mathieu-Daudé {
64*fe0007f3SPhilippe Mathieu-Daudé     CPUState *some_cpu;
65*fe0007f3SPhilippe Mathieu-Daudé     int max_cpu_index = 0;
66*fe0007f3SPhilippe Mathieu-Daudé 
67*fe0007f3SPhilippe Mathieu-Daudé     cpu_index_auto_assigned = true;
68*fe0007f3SPhilippe Mathieu-Daudé     CPU_FOREACH(some_cpu) {
69*fe0007f3SPhilippe Mathieu-Daudé         if (some_cpu->cpu_index >= max_cpu_index) {
70*fe0007f3SPhilippe Mathieu-Daudé             max_cpu_index = some_cpu->cpu_index + 1;
71*fe0007f3SPhilippe Mathieu-Daudé         }
72*fe0007f3SPhilippe Mathieu-Daudé     }
73*fe0007f3SPhilippe Mathieu-Daudé     return max_cpu_index;
74*fe0007f3SPhilippe Mathieu-Daudé }
75*fe0007f3SPhilippe Mathieu-Daudé 
76*fe0007f3SPhilippe Mathieu-Daudé CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
77*fe0007f3SPhilippe Mathieu-Daudé static unsigned int cpu_list_generation_id;
78*fe0007f3SPhilippe Mathieu-Daudé 
79*fe0007f3SPhilippe Mathieu-Daudé unsigned int cpu_list_generation_id_get(void)
80*fe0007f3SPhilippe Mathieu-Daudé {
81*fe0007f3SPhilippe Mathieu-Daudé     return cpu_list_generation_id;
82*fe0007f3SPhilippe Mathieu-Daudé }
83*fe0007f3SPhilippe Mathieu-Daudé 
84*fe0007f3SPhilippe Mathieu-Daudé void cpu_list_add(CPUState *cpu)
85*fe0007f3SPhilippe Mathieu-Daudé {
86*fe0007f3SPhilippe Mathieu-Daudé     QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
87*fe0007f3SPhilippe Mathieu-Daudé     if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
88*fe0007f3SPhilippe Mathieu-Daudé         cpu->cpu_index = cpu_get_free_index();
89*fe0007f3SPhilippe Mathieu-Daudé         assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
90*fe0007f3SPhilippe Mathieu-Daudé     } else {
91*fe0007f3SPhilippe Mathieu-Daudé         assert(!cpu_index_auto_assigned);
92*fe0007f3SPhilippe Mathieu-Daudé     }
93*fe0007f3SPhilippe Mathieu-Daudé     QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
94*fe0007f3SPhilippe Mathieu-Daudé     cpu_list_generation_id++;
95*fe0007f3SPhilippe Mathieu-Daudé }
96*fe0007f3SPhilippe Mathieu-Daudé 
97*fe0007f3SPhilippe Mathieu-Daudé void cpu_list_remove(CPUState *cpu)
98*fe0007f3SPhilippe Mathieu-Daudé {
99*fe0007f3SPhilippe Mathieu-Daudé     QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
100*fe0007f3SPhilippe Mathieu-Daudé     if (!QTAILQ_IN_USE(cpu, node)) {
101*fe0007f3SPhilippe Mathieu-Daudé         /* there is nothing to undo since cpu_exec_init() hasn't been called */
102*fe0007f3SPhilippe Mathieu-Daudé         return;
103*fe0007f3SPhilippe Mathieu-Daudé     }
104*fe0007f3SPhilippe Mathieu-Daudé 
105*fe0007f3SPhilippe Mathieu-Daudé     QTAILQ_REMOVE_RCU(&cpus, cpu, node);
106*fe0007f3SPhilippe Mathieu-Daudé     cpu->cpu_index = UNASSIGNED_CPU_INDEX;
107*fe0007f3SPhilippe Mathieu-Daudé     cpu_list_generation_id++;
108*fe0007f3SPhilippe Mathieu-Daudé }
109*fe0007f3SPhilippe Mathieu-Daudé 
110*fe0007f3SPhilippe Mathieu-Daudé CPUState *qemu_get_cpu(int index)
111*fe0007f3SPhilippe Mathieu-Daudé {
112*fe0007f3SPhilippe Mathieu-Daudé     CPUState *cpu;
113*fe0007f3SPhilippe Mathieu-Daudé 
114*fe0007f3SPhilippe Mathieu-Daudé     CPU_FOREACH(cpu) {
115*fe0007f3SPhilippe Mathieu-Daudé         if (cpu->cpu_index == index) {
116*fe0007f3SPhilippe Mathieu-Daudé             return cpu;
117*fe0007f3SPhilippe Mathieu-Daudé         }
118*fe0007f3SPhilippe Mathieu-Daudé     }
119*fe0007f3SPhilippe Mathieu-Daudé 
120*fe0007f3SPhilippe Mathieu-Daudé     return NULL;
121*fe0007f3SPhilippe Mathieu-Daudé }
122*fe0007f3SPhilippe Mathieu-Daudé 
123*fe0007f3SPhilippe Mathieu-Daudé /* current CPU in the current thread. It is only valid inside cpu_exec() */
124*fe0007f3SPhilippe Mathieu-Daudé __thread CPUState *current_cpu;
125*fe0007f3SPhilippe Mathieu-Daudé 
126*fe0007f3SPhilippe Mathieu-Daudé struct qemu_work_item {
127*fe0007f3SPhilippe Mathieu-Daudé     QSIMPLEQ_ENTRY(qemu_work_item) node;
128*fe0007f3SPhilippe Mathieu-Daudé     run_on_cpu_func func;
129*fe0007f3SPhilippe Mathieu-Daudé     run_on_cpu_data data;
130*fe0007f3SPhilippe Mathieu-Daudé     bool free, exclusive, done;
131*fe0007f3SPhilippe Mathieu-Daudé };
132*fe0007f3SPhilippe Mathieu-Daudé 
133*fe0007f3SPhilippe Mathieu-Daudé static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
134*fe0007f3SPhilippe Mathieu-Daudé {
135*fe0007f3SPhilippe Mathieu-Daudé     qemu_mutex_lock(&cpu->work_mutex);
136*fe0007f3SPhilippe Mathieu-Daudé     QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
137*fe0007f3SPhilippe Mathieu-Daudé     wi->done = false;
138*fe0007f3SPhilippe Mathieu-Daudé     qemu_mutex_unlock(&cpu->work_mutex);
139*fe0007f3SPhilippe Mathieu-Daudé 
140*fe0007f3SPhilippe Mathieu-Daudé     qemu_cpu_kick(cpu);
141*fe0007f3SPhilippe Mathieu-Daudé }
142*fe0007f3SPhilippe Mathieu-Daudé 
143*fe0007f3SPhilippe Mathieu-Daudé void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
144*fe0007f3SPhilippe Mathieu-Daudé                    QemuMutex *mutex)
145*fe0007f3SPhilippe Mathieu-Daudé {
146*fe0007f3SPhilippe Mathieu-Daudé     struct qemu_work_item wi;
147*fe0007f3SPhilippe Mathieu-Daudé 
148*fe0007f3SPhilippe Mathieu-Daudé     if (qemu_cpu_is_self(cpu)) {
149*fe0007f3SPhilippe Mathieu-Daudé         func(cpu, data);
150*fe0007f3SPhilippe Mathieu-Daudé         return;
151*fe0007f3SPhilippe Mathieu-Daudé     }
152*fe0007f3SPhilippe Mathieu-Daudé 
153*fe0007f3SPhilippe Mathieu-Daudé     wi.func = func;
154*fe0007f3SPhilippe Mathieu-Daudé     wi.data = data;
155*fe0007f3SPhilippe Mathieu-Daudé     wi.done = false;
156*fe0007f3SPhilippe Mathieu-Daudé     wi.free = false;
157*fe0007f3SPhilippe Mathieu-Daudé     wi.exclusive = false;
158*fe0007f3SPhilippe Mathieu-Daudé 
159*fe0007f3SPhilippe Mathieu-Daudé     queue_work_on_cpu(cpu, &wi);
160*fe0007f3SPhilippe Mathieu-Daudé     while (!qatomic_load_acquire(&wi.done)) {
161*fe0007f3SPhilippe Mathieu-Daudé         CPUState *self_cpu = current_cpu;
162*fe0007f3SPhilippe Mathieu-Daudé 
163*fe0007f3SPhilippe Mathieu-Daudé         qemu_cond_wait(&qemu_work_cond, mutex);
164*fe0007f3SPhilippe Mathieu-Daudé         current_cpu = self_cpu;
165*fe0007f3SPhilippe Mathieu-Daudé     }
166*fe0007f3SPhilippe Mathieu-Daudé }
167*fe0007f3SPhilippe Mathieu-Daudé 
168*fe0007f3SPhilippe Mathieu-Daudé void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
169*fe0007f3SPhilippe Mathieu-Daudé {
170*fe0007f3SPhilippe Mathieu-Daudé     struct qemu_work_item *wi;
171*fe0007f3SPhilippe Mathieu-Daudé 
172*fe0007f3SPhilippe Mathieu-Daudé     wi = g_new0(struct qemu_work_item, 1);
173*fe0007f3SPhilippe Mathieu-Daudé     wi->func = func;
174*fe0007f3SPhilippe Mathieu-Daudé     wi->data = data;
175*fe0007f3SPhilippe Mathieu-Daudé     wi->free = true;
176*fe0007f3SPhilippe Mathieu-Daudé 
177*fe0007f3SPhilippe Mathieu-Daudé     queue_work_on_cpu(cpu, wi);
178*fe0007f3SPhilippe Mathieu-Daudé }
179*fe0007f3SPhilippe Mathieu-Daudé 
180*fe0007f3SPhilippe Mathieu-Daudé /* Wait for pending exclusive operations to complete.  The CPU list lock
181*fe0007f3SPhilippe Mathieu-Daudé    must be held.  */
182*fe0007f3SPhilippe Mathieu-Daudé static inline void exclusive_idle(void)
183*fe0007f3SPhilippe Mathieu-Daudé {
184*fe0007f3SPhilippe Mathieu-Daudé     while (pending_cpus) {
185*fe0007f3SPhilippe Mathieu-Daudé         qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock);
186*fe0007f3SPhilippe Mathieu-Daudé     }
187*fe0007f3SPhilippe Mathieu-Daudé }
188*fe0007f3SPhilippe Mathieu-Daudé 
189*fe0007f3SPhilippe Mathieu-Daudé /* Start an exclusive operation.
190*fe0007f3SPhilippe Mathieu-Daudé    Must only be called from outside cpu_exec.  */
191*fe0007f3SPhilippe Mathieu-Daudé void start_exclusive(void)
192*fe0007f3SPhilippe Mathieu-Daudé {
193*fe0007f3SPhilippe Mathieu-Daudé     CPUState *other_cpu;
194*fe0007f3SPhilippe Mathieu-Daudé     int running_cpus;
195*fe0007f3SPhilippe Mathieu-Daudé 
196*fe0007f3SPhilippe Mathieu-Daudé     if (current_cpu->exclusive_context_count) {
197*fe0007f3SPhilippe Mathieu-Daudé         current_cpu->exclusive_context_count++;
198*fe0007f3SPhilippe Mathieu-Daudé         return;
199*fe0007f3SPhilippe Mathieu-Daudé     }
200*fe0007f3SPhilippe Mathieu-Daudé 
201*fe0007f3SPhilippe Mathieu-Daudé     qemu_mutex_lock(&qemu_cpu_list_lock);
202*fe0007f3SPhilippe Mathieu-Daudé     exclusive_idle();
203*fe0007f3SPhilippe Mathieu-Daudé 
204*fe0007f3SPhilippe Mathieu-Daudé     /* Make all other cpus stop executing.  */
205*fe0007f3SPhilippe Mathieu-Daudé     qatomic_set(&pending_cpus, 1);
206*fe0007f3SPhilippe Mathieu-Daudé 
207*fe0007f3SPhilippe Mathieu-Daudé     /* Write pending_cpus before reading other_cpu->running.  */
208*fe0007f3SPhilippe Mathieu-Daudé     smp_mb();
209*fe0007f3SPhilippe Mathieu-Daudé     running_cpus = 0;
210*fe0007f3SPhilippe Mathieu-Daudé     CPU_FOREACH(other_cpu) {
211*fe0007f3SPhilippe Mathieu-Daudé         if (qatomic_read(&other_cpu->running)) {
212*fe0007f3SPhilippe Mathieu-Daudé             other_cpu->has_waiter = true;
213*fe0007f3SPhilippe Mathieu-Daudé             running_cpus++;
214*fe0007f3SPhilippe Mathieu-Daudé             qemu_cpu_kick(other_cpu);
215*fe0007f3SPhilippe Mathieu-Daudé         }
216*fe0007f3SPhilippe Mathieu-Daudé     }
217*fe0007f3SPhilippe Mathieu-Daudé 
218*fe0007f3SPhilippe Mathieu-Daudé     qatomic_set(&pending_cpus, running_cpus + 1);
219*fe0007f3SPhilippe Mathieu-Daudé     while (pending_cpus > 1) {
220*fe0007f3SPhilippe Mathieu-Daudé         qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
221*fe0007f3SPhilippe Mathieu-Daudé     }
222*fe0007f3SPhilippe Mathieu-Daudé 
223*fe0007f3SPhilippe Mathieu-Daudé     /* Can release mutex, no one will enter another exclusive
224*fe0007f3SPhilippe Mathieu-Daudé      * section until end_exclusive resets pending_cpus to 0.
225*fe0007f3SPhilippe Mathieu-Daudé      */
226*fe0007f3SPhilippe Mathieu-Daudé     qemu_mutex_unlock(&qemu_cpu_list_lock);
227*fe0007f3SPhilippe Mathieu-Daudé 
228*fe0007f3SPhilippe Mathieu-Daudé     current_cpu->exclusive_context_count = 1;
229*fe0007f3SPhilippe Mathieu-Daudé }
230*fe0007f3SPhilippe Mathieu-Daudé 
231*fe0007f3SPhilippe Mathieu-Daudé /* Finish an exclusive operation.  */
232*fe0007f3SPhilippe Mathieu-Daudé void end_exclusive(void)
233*fe0007f3SPhilippe Mathieu-Daudé {
234*fe0007f3SPhilippe Mathieu-Daudé     current_cpu->exclusive_context_count--;
235*fe0007f3SPhilippe Mathieu-Daudé     if (current_cpu->exclusive_context_count) {
236*fe0007f3SPhilippe Mathieu-Daudé         return;
237*fe0007f3SPhilippe Mathieu-Daudé     }
238*fe0007f3SPhilippe Mathieu-Daudé 
239*fe0007f3SPhilippe Mathieu-Daudé     qemu_mutex_lock(&qemu_cpu_list_lock);
240*fe0007f3SPhilippe Mathieu-Daudé     qatomic_set(&pending_cpus, 0);
241*fe0007f3SPhilippe Mathieu-Daudé     qemu_cond_broadcast(&exclusive_resume);
242*fe0007f3SPhilippe Mathieu-Daudé     qemu_mutex_unlock(&qemu_cpu_list_lock);
243*fe0007f3SPhilippe Mathieu-Daudé }
244*fe0007f3SPhilippe Mathieu-Daudé 
245*fe0007f3SPhilippe Mathieu-Daudé /* Wait for exclusive ops to finish, and begin cpu execution.  */
246*fe0007f3SPhilippe Mathieu-Daudé void cpu_exec_start(CPUState *cpu)
247*fe0007f3SPhilippe Mathieu-Daudé {
248*fe0007f3SPhilippe Mathieu-Daudé     qatomic_set(&cpu->running, true);
249*fe0007f3SPhilippe Mathieu-Daudé 
250*fe0007f3SPhilippe Mathieu-Daudé     /* Write cpu->running before reading pending_cpus.  */
251*fe0007f3SPhilippe Mathieu-Daudé     smp_mb();
252*fe0007f3SPhilippe Mathieu-Daudé 
253*fe0007f3SPhilippe Mathieu-Daudé     /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
254*fe0007f3SPhilippe Mathieu-Daudé      * After taking the lock we'll see cpu->has_waiter == true and run---not
255*fe0007f3SPhilippe Mathieu-Daudé      * for long because start_exclusive kicked us.  cpu_exec_end will
256*fe0007f3SPhilippe Mathieu-Daudé      * decrement pending_cpus and signal the waiter.
257*fe0007f3SPhilippe Mathieu-Daudé      *
258*fe0007f3SPhilippe Mathieu-Daudé      * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
259*fe0007f3SPhilippe Mathieu-Daudé      * This includes the case when an exclusive item is running now.
260*fe0007f3SPhilippe Mathieu-Daudé      * Then we'll see cpu->has_waiter == false and wait for the item to
261*fe0007f3SPhilippe Mathieu-Daudé      * complete.
262*fe0007f3SPhilippe Mathieu-Daudé      *
263*fe0007f3SPhilippe Mathieu-Daudé      * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
264*fe0007f3SPhilippe Mathieu-Daudé      * see cpu->running == true, and it will kick the CPU.
265*fe0007f3SPhilippe Mathieu-Daudé      */
266*fe0007f3SPhilippe Mathieu-Daudé     if (unlikely(qatomic_read(&pending_cpus))) {
267*fe0007f3SPhilippe Mathieu-Daudé         QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
268*fe0007f3SPhilippe Mathieu-Daudé         if (!cpu->has_waiter) {
269*fe0007f3SPhilippe Mathieu-Daudé             /* Not counted in pending_cpus, let the exclusive item
270*fe0007f3SPhilippe Mathieu-Daudé              * run.  Since we have the lock, just set cpu->running to true
271*fe0007f3SPhilippe Mathieu-Daudé              * while holding it; no need to check pending_cpus again.
272*fe0007f3SPhilippe Mathieu-Daudé              */
273*fe0007f3SPhilippe Mathieu-Daudé             qatomic_set(&cpu->running, false);
274*fe0007f3SPhilippe Mathieu-Daudé             exclusive_idle();
275*fe0007f3SPhilippe Mathieu-Daudé             /* Now pending_cpus is zero.  */
276*fe0007f3SPhilippe Mathieu-Daudé             qatomic_set(&cpu->running, true);
277*fe0007f3SPhilippe Mathieu-Daudé         } else {
278*fe0007f3SPhilippe Mathieu-Daudé             /* Counted in pending_cpus, go ahead and release the
279*fe0007f3SPhilippe Mathieu-Daudé              * waiter at cpu_exec_end.
280*fe0007f3SPhilippe Mathieu-Daudé              */
281*fe0007f3SPhilippe Mathieu-Daudé         }
282*fe0007f3SPhilippe Mathieu-Daudé     }
283*fe0007f3SPhilippe Mathieu-Daudé }
284*fe0007f3SPhilippe Mathieu-Daudé 
285*fe0007f3SPhilippe Mathieu-Daudé /* Mark cpu as not executing, and release pending exclusive ops.  */
286*fe0007f3SPhilippe Mathieu-Daudé void cpu_exec_end(CPUState *cpu)
287*fe0007f3SPhilippe Mathieu-Daudé {
288*fe0007f3SPhilippe Mathieu-Daudé     qatomic_set(&cpu->running, false);
289*fe0007f3SPhilippe Mathieu-Daudé 
290*fe0007f3SPhilippe Mathieu-Daudé     /* Write cpu->running before reading pending_cpus.  */
291*fe0007f3SPhilippe Mathieu-Daudé     smp_mb();
292*fe0007f3SPhilippe Mathieu-Daudé 
293*fe0007f3SPhilippe Mathieu-Daudé     /* 1. start_exclusive saw cpu->running == true.  Then it will increment
294*fe0007f3SPhilippe Mathieu-Daudé      * pending_cpus and wait for exclusive_cond.  After taking the lock
295*fe0007f3SPhilippe Mathieu-Daudé      * we'll see cpu->has_waiter == true.
296*fe0007f3SPhilippe Mathieu-Daudé      *
297*fe0007f3SPhilippe Mathieu-Daudé      * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
298*fe0007f3SPhilippe Mathieu-Daudé      * This includes the case when an exclusive item started after setting
299*fe0007f3SPhilippe Mathieu-Daudé      * cpu->running to false and before we read pending_cpus.  Then we'll see
300*fe0007f3SPhilippe Mathieu-Daudé      * cpu->has_waiter == false and not touch pending_cpus.  The next call to
301*fe0007f3SPhilippe Mathieu-Daudé      * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
302*fe0007f3SPhilippe Mathieu-Daudé      * for the item to complete.
303*fe0007f3SPhilippe Mathieu-Daudé      *
304*fe0007f3SPhilippe Mathieu-Daudé      * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
305*fe0007f3SPhilippe Mathieu-Daudé      * see cpu->running == false, and it can ignore this CPU until the
306*fe0007f3SPhilippe Mathieu-Daudé      * next cpu_exec_start.
307*fe0007f3SPhilippe Mathieu-Daudé      */
308*fe0007f3SPhilippe Mathieu-Daudé     if (unlikely(qatomic_read(&pending_cpus))) {
309*fe0007f3SPhilippe Mathieu-Daudé         QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
310*fe0007f3SPhilippe Mathieu-Daudé         if (cpu->has_waiter) {
311*fe0007f3SPhilippe Mathieu-Daudé             cpu->has_waiter = false;
312*fe0007f3SPhilippe Mathieu-Daudé             qatomic_set(&pending_cpus, pending_cpus - 1);
313*fe0007f3SPhilippe Mathieu-Daudé             if (pending_cpus == 1) {
314*fe0007f3SPhilippe Mathieu-Daudé                 qemu_cond_signal(&exclusive_cond);
315*fe0007f3SPhilippe Mathieu-Daudé             }
316*fe0007f3SPhilippe Mathieu-Daudé         }
317*fe0007f3SPhilippe Mathieu-Daudé     }
318*fe0007f3SPhilippe Mathieu-Daudé }
319*fe0007f3SPhilippe Mathieu-Daudé 
320*fe0007f3SPhilippe Mathieu-Daudé void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
321*fe0007f3SPhilippe Mathieu-Daudé                            run_on_cpu_data data)
322*fe0007f3SPhilippe Mathieu-Daudé {
323*fe0007f3SPhilippe Mathieu-Daudé     struct qemu_work_item *wi;
324*fe0007f3SPhilippe Mathieu-Daudé 
325*fe0007f3SPhilippe Mathieu-Daudé     wi = g_new0(struct qemu_work_item, 1);
326*fe0007f3SPhilippe Mathieu-Daudé     wi->func = func;
327*fe0007f3SPhilippe Mathieu-Daudé     wi->data = data;
328*fe0007f3SPhilippe Mathieu-Daudé     wi->free = true;
329*fe0007f3SPhilippe Mathieu-Daudé     wi->exclusive = true;
330*fe0007f3SPhilippe Mathieu-Daudé 
331*fe0007f3SPhilippe Mathieu-Daudé     queue_work_on_cpu(cpu, wi);
332*fe0007f3SPhilippe Mathieu-Daudé }
333*fe0007f3SPhilippe Mathieu-Daudé 
334*fe0007f3SPhilippe Mathieu-Daudé void process_queued_cpu_work(CPUState *cpu)
335*fe0007f3SPhilippe Mathieu-Daudé {
336*fe0007f3SPhilippe Mathieu-Daudé     struct qemu_work_item *wi;
337*fe0007f3SPhilippe Mathieu-Daudé 
338*fe0007f3SPhilippe Mathieu-Daudé     qemu_mutex_lock(&cpu->work_mutex);
339*fe0007f3SPhilippe Mathieu-Daudé     if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
340*fe0007f3SPhilippe Mathieu-Daudé         qemu_mutex_unlock(&cpu->work_mutex);
341*fe0007f3SPhilippe Mathieu-Daudé         return;
342*fe0007f3SPhilippe Mathieu-Daudé     }
343*fe0007f3SPhilippe Mathieu-Daudé     while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
344*fe0007f3SPhilippe Mathieu-Daudé         wi = QSIMPLEQ_FIRST(&cpu->work_list);
345*fe0007f3SPhilippe Mathieu-Daudé         QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
346*fe0007f3SPhilippe Mathieu-Daudé         qemu_mutex_unlock(&cpu->work_mutex);
347*fe0007f3SPhilippe Mathieu-Daudé         if (wi->exclusive) {
348*fe0007f3SPhilippe Mathieu-Daudé             /* Running work items outside the BQL avoids the following deadlock:
349*fe0007f3SPhilippe Mathieu-Daudé              * 1) start_exclusive() is called with the BQL taken while another
350*fe0007f3SPhilippe Mathieu-Daudé              * CPU is running; 2) cpu_exec in the other CPU tries to takes the
351*fe0007f3SPhilippe Mathieu-Daudé              * BQL, so it goes to sleep; start_exclusive() is sleeping too, so
352*fe0007f3SPhilippe Mathieu-Daudé              * neither CPU can proceed.
353*fe0007f3SPhilippe Mathieu-Daudé              */
354*fe0007f3SPhilippe Mathieu-Daudé             qemu_mutex_unlock_iothread();
355*fe0007f3SPhilippe Mathieu-Daudé             start_exclusive();
356*fe0007f3SPhilippe Mathieu-Daudé             wi->func(cpu, wi->data);
357*fe0007f3SPhilippe Mathieu-Daudé             end_exclusive();
358*fe0007f3SPhilippe Mathieu-Daudé             qemu_mutex_lock_iothread();
359*fe0007f3SPhilippe Mathieu-Daudé         } else {
360*fe0007f3SPhilippe Mathieu-Daudé             wi->func(cpu, wi->data);
361*fe0007f3SPhilippe Mathieu-Daudé         }
362*fe0007f3SPhilippe Mathieu-Daudé         qemu_mutex_lock(&cpu->work_mutex);
363*fe0007f3SPhilippe Mathieu-Daudé         if (wi->free) {
364*fe0007f3SPhilippe Mathieu-Daudé             g_free(wi);
365*fe0007f3SPhilippe Mathieu-Daudé         } else {
366*fe0007f3SPhilippe Mathieu-Daudé             qatomic_store_release(&wi->done, true);
367*fe0007f3SPhilippe Mathieu-Daudé         }
368*fe0007f3SPhilippe Mathieu-Daudé     }
369*fe0007f3SPhilippe Mathieu-Daudé     qemu_mutex_unlock(&cpu->work_mutex);
370*fe0007f3SPhilippe Mathieu-Daudé     qemu_cond_broadcast(&qemu_work_cond);
371*fe0007f3SPhilippe Mathieu-Daudé }
372*fe0007f3SPhilippe Mathieu-Daudé 
373*fe0007f3SPhilippe Mathieu-Daudé /* Add a breakpoint.  */
374*fe0007f3SPhilippe Mathieu-Daudé int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
375*fe0007f3SPhilippe Mathieu-Daudé                           CPUBreakpoint **breakpoint)
376*fe0007f3SPhilippe Mathieu-Daudé {
377*fe0007f3SPhilippe Mathieu-Daudé     CPUClass *cc = CPU_GET_CLASS(cpu);
378*fe0007f3SPhilippe Mathieu-Daudé     CPUBreakpoint *bp;
379*fe0007f3SPhilippe Mathieu-Daudé 
380*fe0007f3SPhilippe Mathieu-Daudé     if (cc->gdb_adjust_breakpoint) {
381*fe0007f3SPhilippe Mathieu-Daudé         pc = cc->gdb_adjust_breakpoint(cpu, pc);
382*fe0007f3SPhilippe Mathieu-Daudé     }
383*fe0007f3SPhilippe Mathieu-Daudé 
384*fe0007f3SPhilippe Mathieu-Daudé     bp = g_malloc(sizeof(*bp));
385*fe0007f3SPhilippe Mathieu-Daudé 
386*fe0007f3SPhilippe Mathieu-Daudé     bp->pc = pc;
387*fe0007f3SPhilippe Mathieu-Daudé     bp->flags = flags;
388*fe0007f3SPhilippe Mathieu-Daudé 
389*fe0007f3SPhilippe Mathieu-Daudé     /* keep all GDB-injected breakpoints in front */
390*fe0007f3SPhilippe Mathieu-Daudé     if (flags & BP_GDB) {
391*fe0007f3SPhilippe Mathieu-Daudé         QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
392*fe0007f3SPhilippe Mathieu-Daudé     } else {
393*fe0007f3SPhilippe Mathieu-Daudé         QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
394*fe0007f3SPhilippe Mathieu-Daudé     }
395*fe0007f3SPhilippe Mathieu-Daudé 
396*fe0007f3SPhilippe Mathieu-Daudé     if (breakpoint) {
397*fe0007f3SPhilippe Mathieu-Daudé         *breakpoint = bp;
398*fe0007f3SPhilippe Mathieu-Daudé     }
399*fe0007f3SPhilippe Mathieu-Daudé 
400*fe0007f3SPhilippe Mathieu-Daudé     trace_breakpoint_insert(cpu->cpu_index, pc, flags);
401*fe0007f3SPhilippe Mathieu-Daudé     return 0;
402*fe0007f3SPhilippe Mathieu-Daudé }
403*fe0007f3SPhilippe Mathieu-Daudé 
404*fe0007f3SPhilippe Mathieu-Daudé /* Remove a specific breakpoint.  */
405*fe0007f3SPhilippe Mathieu-Daudé int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
406*fe0007f3SPhilippe Mathieu-Daudé {
407*fe0007f3SPhilippe Mathieu-Daudé     CPUClass *cc = CPU_GET_CLASS(cpu);
408*fe0007f3SPhilippe Mathieu-Daudé     CPUBreakpoint *bp;
409*fe0007f3SPhilippe Mathieu-Daudé 
410*fe0007f3SPhilippe Mathieu-Daudé     if (cc->gdb_adjust_breakpoint) {
411*fe0007f3SPhilippe Mathieu-Daudé         pc = cc->gdb_adjust_breakpoint(cpu, pc);
412*fe0007f3SPhilippe Mathieu-Daudé     }
413*fe0007f3SPhilippe Mathieu-Daudé 
414*fe0007f3SPhilippe Mathieu-Daudé     QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
415*fe0007f3SPhilippe Mathieu-Daudé         if (bp->pc == pc && bp->flags == flags) {
416*fe0007f3SPhilippe Mathieu-Daudé             cpu_breakpoint_remove_by_ref(cpu, bp);
417*fe0007f3SPhilippe Mathieu-Daudé             return 0;
418*fe0007f3SPhilippe Mathieu-Daudé         }
419*fe0007f3SPhilippe Mathieu-Daudé     }
420*fe0007f3SPhilippe Mathieu-Daudé     return -ENOENT;
421*fe0007f3SPhilippe Mathieu-Daudé }
422*fe0007f3SPhilippe Mathieu-Daudé 
423*fe0007f3SPhilippe Mathieu-Daudé /* Remove a specific breakpoint by reference.  */
424*fe0007f3SPhilippe Mathieu-Daudé void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp)
425*fe0007f3SPhilippe Mathieu-Daudé {
426*fe0007f3SPhilippe Mathieu-Daudé     QTAILQ_REMOVE(&cpu->breakpoints, bp, entry);
427*fe0007f3SPhilippe Mathieu-Daudé 
428*fe0007f3SPhilippe Mathieu-Daudé     trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags);
429*fe0007f3SPhilippe Mathieu-Daudé     g_free(bp);
430*fe0007f3SPhilippe Mathieu-Daudé }
431*fe0007f3SPhilippe Mathieu-Daudé 
432*fe0007f3SPhilippe Mathieu-Daudé /* Remove all matching breakpoints. */
433*fe0007f3SPhilippe Mathieu-Daudé void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
434*fe0007f3SPhilippe Mathieu-Daudé {
435*fe0007f3SPhilippe Mathieu-Daudé     CPUBreakpoint *bp, *next;
436*fe0007f3SPhilippe Mathieu-Daudé 
437*fe0007f3SPhilippe Mathieu-Daudé     QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
438*fe0007f3SPhilippe Mathieu-Daudé         if (bp->flags & mask) {
439*fe0007f3SPhilippe Mathieu-Daudé             cpu_breakpoint_remove_by_ref(cpu, bp);
440*fe0007f3SPhilippe Mathieu-Daudé         }
441*fe0007f3SPhilippe Mathieu-Daudé     }
442*fe0007f3SPhilippe Mathieu-Daudé }
443