xref: /openbmc/qemu/monitor/qmp.c (revision 3e6bed61)
1 /*
2  * QEMU monitor
3  *
4  * Copyright (c) 2003-2004 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 
27 #include "chardev/char-io.h"
28 #include "monitor-internal.h"
29 #include "qapi/error.h"
30 #include "qapi/qapi-commands-control.h"
31 #include "qapi/qmp/qdict.h"
32 #include "qapi/qmp/qjson.h"
33 #include "qapi/qmp/qlist.h"
34 #include "trace.h"
35 
36 struct QMPRequest {
37     /* Owner of the request */
38     MonitorQMP *mon;
39     /*
40      * Request object to be handled or Error to be reported
41      * (exactly one of them is non-null)
42      */
43     QObject *req;
44     Error *err;
45 };
46 typedef struct QMPRequest QMPRequest;
47 
48 QmpCommandList qmp_commands, qmp_cap_negotiation_commands;
49 
50 static bool qmp_oob_enabled(MonitorQMP *mon)
51 {
52     return mon->capab[QMP_CAPABILITY_OOB];
53 }
54 
55 static void monitor_qmp_caps_reset(MonitorQMP *mon)
56 {
57     memset(mon->capab_offered, 0, sizeof(mon->capab_offered));
58     memset(mon->capab, 0, sizeof(mon->capab));
59     mon->capab_offered[QMP_CAPABILITY_OOB] = mon->common.use_io_thread;
60 }
61 
62 static void qmp_request_free(QMPRequest *req)
63 {
64     qobject_unref(req->req);
65     error_free(req->err);
66     g_free(req);
67 }
68 
69 /* Caller must hold mon->qmp.qmp_queue_lock */
70 static void monitor_qmp_cleanup_req_queue_locked(MonitorQMP *mon)
71 {
72     while (!g_queue_is_empty(mon->qmp_requests)) {
73         qmp_request_free(g_queue_pop_head(mon->qmp_requests));
74     }
75 }
76 
77 static void monitor_qmp_cleanup_queue_and_resume(MonitorQMP *mon)
78 {
79     QEMU_LOCK_GUARD(&mon->qmp_queue_lock);
80 
81     /*
82      * Same condition as in monitor_qmp_dispatcher_co(), but before
83      * removing an element from the queue (hence no `- 1`).
84      * Also, the queue should not be empty either, otherwise the
85      * monitor hasn't been suspended yet (or was already resumed).
86      */
87     bool need_resume = (!qmp_oob_enabled(mon) ||
88         mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX)
89         && !g_queue_is_empty(mon->qmp_requests);
90 
91     monitor_qmp_cleanup_req_queue_locked(mon);
92 
93     if (need_resume) {
94         /*
95          * handle_qmp_command() suspended the monitor because the
96          * request queue filled up, to be resumed when the queue has
97          * space again.  We just emptied it; resume the monitor.
98          *
99          * Without this, the monitor would remain suspended forever
100          * when we get here while the monitor is suspended.  An
101          * unfortunately timed CHR_EVENT_CLOSED can do the trick.
102          */
103         monitor_resume(&mon->common);
104     }
105 
106 }
107 
108 void qmp_send_response(MonitorQMP *mon, const QDict *rsp)
109 {
110     const QObject *data = QOBJECT(rsp);
111     GString *json;
112 
113     json = qobject_to_json_pretty(data, mon->pretty);
114     assert(json != NULL);
115     trace_monitor_qmp_respond(mon, json->str);
116 
117     g_string_append_c(json, '\n');
118     monitor_puts(&mon->common, json->str);
119 
120     g_string_free(json, true);
121 }
122 
123 /*
124  * Emit QMP response @rsp to @mon.
125  * Null @rsp can only happen for commands with QCO_NO_SUCCESS_RESP.
126  * Nothing is emitted then.
127  */
128 static void monitor_qmp_respond(MonitorQMP *mon, QDict *rsp)
129 {
130     if (rsp) {
131         qmp_send_response(mon, rsp);
132     }
133 }
134 
135 /*
136  * Runs outside of coroutine context for OOB commands, but in
137  * coroutine context for everything else.
138  */
139 static void monitor_qmp_dispatch(MonitorQMP *mon, QObject *req)
140 {
141     QDict *rsp;
142     QDict *error;
143 
144     rsp = qmp_dispatch(mon->commands, req, qmp_oob_enabled(mon),
145                        &mon->common);
146 
147     if (mon->commands == &qmp_cap_negotiation_commands) {
148         error = qdict_get_qdict(rsp, "error");
149         if (error
150             && !g_strcmp0(qdict_get_try_str(error, "class"),
151                     QapiErrorClass_str(ERROR_CLASS_COMMAND_NOT_FOUND))) {
152             /* Provide a more useful error message */
153             qdict_del(error, "desc");
154             qdict_put_str(error, "desc", "Expecting capabilities negotiation"
155                           " with 'qmp_capabilities'");
156         }
157     }
158 
159     monitor_qmp_respond(mon, rsp);
160     qobject_unref(rsp);
161 }
162 
163 /*
164  * Pop a QMP request from a monitor request queue.
165  * Return the request, or NULL all request queues are empty.
166  * We are using round-robin fashion to pop the request, to avoid
167  * processing commands only on a very busy monitor.  To achieve that,
168  * when we process one request on a specific monitor, we put that
169  * monitor to the end of mon_list queue.
170  *
171  * Note: if the function returned with non-NULL, then the caller will
172  * be with qmp_mon->qmp_queue_lock held, and the caller is responsible
173  * to release it.
174  */
175 static QMPRequest *monitor_qmp_requests_pop_any_with_lock(void)
176 {
177     QMPRequest *req_obj = NULL;
178     Monitor *mon;
179     MonitorQMP *qmp_mon;
180 
181     QEMU_LOCK_GUARD(&monitor_lock);
182 
183     QTAILQ_FOREACH(mon, &mon_list, entry) {
184         if (!monitor_is_qmp(mon)) {
185             continue;
186         }
187 
188         qmp_mon = container_of(mon, MonitorQMP, common);
189         qemu_mutex_lock(&qmp_mon->qmp_queue_lock);
190         req_obj = g_queue_pop_head(qmp_mon->qmp_requests);
191         if (req_obj) {
192             /* With the lock of corresponding queue held */
193             break;
194         }
195         qemu_mutex_unlock(&qmp_mon->qmp_queue_lock);
196     }
197 
198     if (req_obj) {
199         /*
200          * We found one request on the monitor. Degrade this monitor's
201          * priority to lowest by re-inserting it to end of queue.
202          */
203         QTAILQ_REMOVE(&mon_list, mon, entry);
204         QTAILQ_INSERT_TAIL(&mon_list, mon, entry);
205     }
206 
207     return req_obj;
208 }
209 
210 void coroutine_fn monitor_qmp_dispatcher_co(void *data)
211 {
212     QMPRequest *req_obj = NULL;
213     QDict *rsp;
214     bool oob_enabled;
215     MonitorQMP *mon;
216 
217     while (true) {
218         assert(qatomic_mb_read(&qmp_dispatcher_co_busy) == true);
219 
220         /*
221          * Mark the dispatcher as not busy already here so that we
222          * don't miss any new requests coming in the middle of our
223          * processing.
224          */
225         qatomic_mb_set(&qmp_dispatcher_co_busy, false);
226 
227         /* On shutdown, don't take any more requests from the queue */
228         if (qmp_dispatcher_co_shutdown) {
229             qatomic_set(&qmp_dispatcher_co, NULL);
230             return;
231         }
232 
233         while (!(req_obj = monitor_qmp_requests_pop_any_with_lock())) {
234             /*
235              * No more requests to process.  Wait to be reentered from
236              * handle_qmp_command() when it pushes more requests, or
237              * from monitor_cleanup() when it requests shutdown.
238              */
239             if (!qmp_dispatcher_co_shutdown) {
240                 qemu_coroutine_yield();
241 
242                 /*
243                  * busy must be set to true again by whoever
244                  * rescheduled us to avoid double scheduling
245                  */
246                 assert(qatomic_xchg(&qmp_dispatcher_co_busy, false) == true);
247             }
248 
249             /*
250              * qmp_dispatcher_co_shutdown may have changed if we
251              * yielded and were reentered from monitor_cleanup()
252              */
253             if (qmp_dispatcher_co_shutdown) {
254                 qatomic_set(&qmp_dispatcher_co, NULL);
255                 return;
256             }
257         }
258 
259         trace_monitor_qmp_in_band_dequeue(req_obj,
260                                           req_obj->mon->qmp_requests->length);
261 
262         /*
263          * @req_obj has a request, we hold req_obj->mon->qmp_queue_lock
264          */
265 
266         mon = req_obj->mon;
267 
268         /*
269          * We need to resume the monitor if handle_qmp_command()
270          * suspended it.  Two cases:
271          * 1. OOB enabled: mon->qmp_requests has no more space
272          *    Resume right away, so that OOB commands can get executed while
273          *    this request is being processed.
274          * 2. OOB disabled: always
275          *    Resume only after we're done processing the request,
276          * We need to save qmp_oob_enabled() for later, because
277          * qmp_qmp_capabilities() can change it.
278          */
279         oob_enabled = qmp_oob_enabled(mon);
280         if (oob_enabled
281             && mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1) {
282             monitor_resume(&mon->common);
283         }
284 
285         /*
286          * Drop the queue mutex now, before yielding, otherwise we might
287          * deadlock if the main thread tries to lock it.
288          */
289         qemu_mutex_unlock(&mon->qmp_queue_lock);
290 
291         if (qatomic_xchg(&qmp_dispatcher_co_busy, true) == true) {
292             /*
293              * Someone rescheduled us (probably because a new requests
294              * came in), but we didn't actually yield. Do that now,
295              * only to be immediately reentered and removed from the
296              * list of scheduled coroutines.
297              */
298             qemu_coroutine_yield();
299         }
300 
301         /*
302          * Move the coroutine from iohandler_ctx to qemu_aio_context for
303          * executing the command handler so that it can make progress if it
304          * involves an AIO_WAIT_WHILE().
305          */
306         aio_co_schedule(qemu_get_aio_context(), qmp_dispatcher_co);
307         qemu_coroutine_yield();
308 
309         /* Process request */
310         if (req_obj->req) {
311             if (trace_event_get_state(TRACE_MONITOR_QMP_CMD_IN_BAND)) {
312                 QDict *qdict = qobject_to(QDict, req_obj->req);
313                 QObject *id = qdict ? qdict_get(qdict, "id") : NULL;
314                 GString *id_json;
315 
316                 id_json = id ? qobject_to_json(id) : g_string_new(NULL);
317                 trace_monitor_qmp_cmd_in_band(id_json->str);
318                 g_string_free(id_json, true);
319             }
320             monitor_qmp_dispatch(mon, req_obj->req);
321         } else {
322             assert(req_obj->err);
323             trace_monitor_qmp_err_in_band(error_get_pretty(req_obj->err));
324             rsp = qmp_error_response(req_obj->err);
325             req_obj->err = NULL;
326             monitor_qmp_respond(mon, rsp);
327             qobject_unref(rsp);
328         }
329 
330         if (!oob_enabled) {
331             monitor_resume(&mon->common);
332         }
333 
334         qmp_request_free(req_obj);
335 
336         /*
337          * Yield and reschedule so the main loop stays responsive.
338          *
339          * Move back to iohandler_ctx so that nested event loops for
340          * qemu_aio_context don't start new monitor commands.
341          */
342         aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co);
343         qemu_coroutine_yield();
344     }
345 }
346 
347 static void handle_qmp_command(void *opaque, QObject *req, Error *err)
348 {
349     MonitorQMP *mon = opaque;
350     QDict *qdict = qobject_to(QDict, req);
351     QMPRequest *req_obj;
352 
353     assert(!req != !err);
354 
355     if (req && trace_event_get_state_backends(TRACE_HANDLE_QMP_COMMAND)) {
356         GString *req_json = qobject_to_json(req);
357         trace_handle_qmp_command(mon, req_json->str);
358         g_string_free(req_json, true);
359     }
360 
361     if (qdict && qmp_is_oob(qdict)) {
362         /* OOB commands are executed immediately */
363         if (trace_event_get_state(TRACE_MONITOR_QMP_CMD_OUT_OF_BAND)) {
364             QObject *id = qdict_get(qdict, "id");
365             GString *id_json;
366 
367             id_json = id ? qobject_to_json(id) : g_string_new(NULL);
368             trace_monitor_qmp_cmd_out_of_band(id_json->str);
369             g_string_free(id_json, true);
370         }
371         monitor_qmp_dispatch(mon, req);
372         qobject_unref(req);
373         return;
374     }
375 
376     req_obj = g_new0(QMPRequest, 1);
377     req_obj->mon = mon;
378     req_obj->req = req;
379     req_obj->err = err;
380 
381     /* Protect qmp_requests and fetching its length. */
382     WITH_QEMU_LOCK_GUARD(&mon->qmp_queue_lock) {
383 
384         /*
385          * Suspend the monitor when we can't queue more requests after
386          * this one.  Dequeuing in monitor_qmp_dispatcher_co() or
387          * monitor_qmp_cleanup_queue_and_resume() will resume it.
388          * Note that when OOB is disabled, we queue at most one command,
389          * for backward compatibility.
390          */
391         if (!qmp_oob_enabled(mon) ||
392             mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1) {
393             monitor_suspend(&mon->common);
394         }
395 
396         /*
397          * Put the request to the end of queue so that requests will be
398          * handled in time order.  Ownership for req_obj, req,
399          * etc. will be delivered to the handler side.
400          */
401         trace_monitor_qmp_in_band_enqueue(req_obj, mon,
402                                           mon->qmp_requests->length);
403         assert(mon->qmp_requests->length < QMP_REQ_QUEUE_LEN_MAX);
404         g_queue_push_tail(mon->qmp_requests, req_obj);
405     }
406 
407     /* Kick the dispatcher routine */
408     if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) {
409         aio_co_wake(qmp_dispatcher_co);
410     }
411 }
412 
413 static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size)
414 {
415     MonitorQMP *mon = opaque;
416 
417     json_message_parser_feed(&mon->parser, (const char *) buf, size);
418 }
419 
420 static QDict *qmp_greeting(MonitorQMP *mon)
421 {
422     QList *cap_list = qlist_new();
423     QObject *ver = NULL;
424     QDict *args;
425     QMPCapability cap;
426 
427     args = qdict_new();
428     qmp_marshal_query_version(args, &ver, NULL);
429     qobject_unref(args);
430 
431     for (cap = 0; cap < QMP_CAPABILITY__MAX; cap++) {
432         if (mon->capab_offered[cap]) {
433             qlist_append_str(cap_list, QMPCapability_str(cap));
434         }
435     }
436 
437     return qdict_from_jsonf_nofail(
438         "{'QMP': {'version': %p, 'capabilities': %p}}",
439         ver, cap_list);
440 }
441 
442 static void monitor_qmp_event(void *opaque, QEMUChrEvent event)
443 {
444     QDict *data;
445     MonitorQMP *mon = opaque;
446 
447     switch (event) {
448     case CHR_EVENT_OPENED:
449         mon->commands = &qmp_cap_negotiation_commands;
450         monitor_qmp_caps_reset(mon);
451         data = qmp_greeting(mon);
452         qmp_send_response(mon, data);
453         qobject_unref(data);
454         mon_refcount++;
455         break;
456     case CHR_EVENT_CLOSED:
457         /*
458          * Note: this is only useful when the output of the chardev
459          * backend is still open.  For example, when the backend is
460          * stdio, it's possible that stdout is still open when stdin
461          * is closed.
462          */
463         monitor_qmp_cleanup_queue_and_resume(mon);
464         json_message_parser_destroy(&mon->parser);
465         json_message_parser_init(&mon->parser, handle_qmp_command,
466                                  mon, NULL);
467         mon_refcount--;
468         monitor_fdsets_cleanup();
469         break;
470     case CHR_EVENT_BREAK:
471     case CHR_EVENT_MUX_IN:
472     case CHR_EVENT_MUX_OUT:
473         /* Ignore */
474         break;
475     }
476 }
477 
478 void monitor_data_destroy_qmp(MonitorQMP *mon)
479 {
480     json_message_parser_destroy(&mon->parser);
481     qemu_mutex_destroy(&mon->qmp_queue_lock);
482     monitor_qmp_cleanup_req_queue_locked(mon);
483     g_queue_free(mon->qmp_requests);
484 }
485 
486 static void monitor_qmp_setup_handlers_bh(void *opaque)
487 {
488     MonitorQMP *mon = opaque;
489     GMainContext *context;
490 
491     assert(mon->common.use_io_thread);
492     context = iothread_get_g_main_context(mon_iothread);
493     assert(context);
494     qemu_chr_fe_set_handlers(&mon->common.chr, monitor_can_read,
495                              monitor_qmp_read, monitor_qmp_event,
496                              NULL, &mon->common, context, true);
497     monitor_list_append(&mon->common);
498 }
499 
500 void monitor_init_qmp(Chardev *chr, bool pretty, Error **errp)
501 {
502     MonitorQMP *mon = g_new0(MonitorQMP, 1);
503 
504     if (!qemu_chr_fe_init(&mon->common.chr, chr, errp)) {
505         g_free(mon);
506         return;
507     }
508     qemu_chr_fe_set_echo(&mon->common.chr, true);
509 
510     /* Note: we run QMP monitor in I/O thread when @chr supports that */
511     monitor_data_init(&mon->common, true, false,
512                       qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_GCONTEXT));
513 
514     mon->pretty = pretty;
515 
516     qemu_mutex_init(&mon->qmp_queue_lock);
517     mon->qmp_requests = g_queue_new();
518 
519     json_message_parser_init(&mon->parser, handle_qmp_command, mon, NULL);
520     if (mon->common.use_io_thread) {
521         /*
522          * Make sure the old iowatch is gone.  It's possible when
523          * e.g. the chardev is in client mode, with wait=on.
524          */
525         remove_fd_in_watch(chr);
526         /*
527          * We can't call qemu_chr_fe_set_handlers() directly here
528          * since chardev might be running in the monitor I/O
529          * thread.  Schedule a bottom half.
530          */
531         aio_bh_schedule_oneshot(iothread_get_aio_context(mon_iothread),
532                                 monitor_qmp_setup_handlers_bh, mon);
533         /* The bottom half will add @mon to @mon_list */
534     } else {
535         qemu_chr_fe_set_handlers(&mon->common.chr, monitor_can_read,
536                                  monitor_qmp_read, monitor_qmp_event,
537                                  NULL, &mon->common, NULL, true);
538         monitor_list_append(&mon->common);
539     }
540 }
541