xref: /openbmc/qemu/monitor/qmp.c (revision 69430111)
1 /*
2  * QEMU monitor
3  *
4  * Copyright (c) 2003-2004 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 
27 #include "chardev/char-io.h"
28 #include "monitor-internal.h"
29 #include "qapi/error.h"
30 #include "qapi/qapi-commands-control.h"
31 #include "qapi/qmp/qdict.h"
32 #include "qapi/qmp/qjson.h"
33 #include "qapi/qmp/qlist.h"
34 #include "qapi/qmp/qstring.h"
35 #include "trace.h"
36 
37 struct QMPRequest {
38     /* Owner of the request */
39     MonitorQMP *mon;
40     /*
41      * Request object to be handled or Error to be reported
42      * (exactly one of them is non-null)
43      */
44     QObject *req;
45     Error *err;
46 };
47 typedef struct QMPRequest QMPRequest;
48 
49 QmpCommandList qmp_commands, qmp_cap_negotiation_commands;
50 
51 static bool qmp_oob_enabled(MonitorQMP *mon)
52 {
53     return mon->capab[QMP_CAPABILITY_OOB];
54 }
55 
56 static void monitor_qmp_caps_reset(MonitorQMP *mon)
57 {
58     memset(mon->capab_offered, 0, sizeof(mon->capab_offered));
59     memset(mon->capab, 0, sizeof(mon->capab));
60     mon->capab_offered[QMP_CAPABILITY_OOB] = mon->common.use_io_thread;
61 }
62 
63 static void qmp_request_free(QMPRequest *req)
64 {
65     qobject_unref(req->req);
66     error_free(req->err);
67     g_free(req);
68 }
69 
70 /* Caller must hold mon->qmp.qmp_queue_lock */
71 static void monitor_qmp_cleanup_req_queue_locked(MonitorQMP *mon)
72 {
73     while (!g_queue_is_empty(mon->qmp_requests)) {
74         qmp_request_free(g_queue_pop_head(mon->qmp_requests));
75     }
76 }
77 
78 static void monitor_qmp_cleanup_queue_and_resume(MonitorQMP *mon)
79 {
80     qemu_mutex_lock(&mon->qmp_queue_lock);
81 
82     /*
83      * Same condition as in monitor_qmp_bh_dispatcher(), but before
84      * removing an element from the queue (hence no `- 1`).
85      * Also, the queue should not be empty either, otherwise the
86      * monitor hasn't been suspended yet (or was already resumed).
87      */
88     bool need_resume = (!qmp_oob_enabled(mon) ||
89         mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX)
90         && !g_queue_is_empty(mon->qmp_requests);
91 
92     monitor_qmp_cleanup_req_queue_locked(mon);
93 
94     if (need_resume) {
95         /*
96          * handle_qmp_command() suspended the monitor because the
97          * request queue filled up, to be resumed when the queue has
98          * space again.  We just emptied it; resume the monitor.
99          *
100          * Without this, the monitor would remain suspended forever
101          * when we get here while the monitor is suspended.  An
102          * unfortunately timed CHR_EVENT_CLOSED can do the trick.
103          */
104         monitor_resume(&mon->common);
105     }
106 
107     qemu_mutex_unlock(&mon->qmp_queue_lock);
108 }
109 
110 void qmp_send_response(MonitorQMP *mon, const QDict *rsp)
111 {
112     const QObject *data = QOBJECT(rsp);
113     QString *json;
114 
115     json = mon->pretty ? qobject_to_json_pretty(data) : qobject_to_json(data);
116     assert(json != NULL);
117 
118     qstring_append_chr(json, '\n');
119     monitor_puts(&mon->common, qstring_get_str(json));
120 
121     qobject_unref(json);
122 }
123 
124 /*
125  * Emit QMP response @rsp to @mon.
126  * Null @rsp can only happen for commands with QCO_NO_SUCCESS_RESP.
127  * Nothing is emitted then.
128  */
129 static void monitor_qmp_respond(MonitorQMP *mon, QDict *rsp)
130 {
131     if (rsp) {
132         qmp_send_response(mon, rsp);
133     }
134 }
135 
136 /*
137  * Runs outside of coroutine context for OOB commands, but in
138  * coroutine context for everything else.
139  */
140 static void monitor_qmp_dispatch(MonitorQMP *mon, QObject *req)
141 {
142     QDict *rsp;
143     QDict *error;
144 
145     rsp = qmp_dispatch(mon->commands, req, qmp_oob_enabled(mon),
146                        &mon->common);
147 
148     if (mon->commands == &qmp_cap_negotiation_commands) {
149         error = qdict_get_qdict(rsp, "error");
150         if (error
151             && !g_strcmp0(qdict_get_try_str(error, "class"),
152                     QapiErrorClass_str(ERROR_CLASS_COMMAND_NOT_FOUND))) {
153             /* Provide a more useful error message */
154             qdict_del(error, "desc");
155             qdict_put_str(error, "desc", "Expecting capabilities negotiation"
156                           " with 'qmp_capabilities'");
157         }
158     }
159 
160     monitor_qmp_respond(mon, rsp);
161     qobject_unref(rsp);
162 }
163 
164 /*
165  * Pop a QMP request from a monitor request queue.
166  * Return the request, or NULL all request queues are empty.
167  * We are using round-robin fashion to pop the request, to avoid
168  * processing commands only on a very busy monitor.  To achieve that,
169  * when we process one request on a specific monitor, we put that
170  * monitor to the end of mon_list queue.
171  *
172  * Note: if the function returned with non-NULL, then the caller will
173  * be with qmp_mon->qmp_queue_lock held, and the caller is responsible
174  * to release it.
175  */
176 static QMPRequest *monitor_qmp_requests_pop_any_with_lock(void)
177 {
178     QMPRequest *req_obj = NULL;
179     Monitor *mon;
180     MonitorQMP *qmp_mon;
181 
182     qemu_mutex_lock(&monitor_lock);
183 
184     QTAILQ_FOREACH(mon, &mon_list, entry) {
185         if (!monitor_is_qmp(mon)) {
186             continue;
187         }
188 
189         qmp_mon = container_of(mon, MonitorQMP, common);
190         qemu_mutex_lock(&qmp_mon->qmp_queue_lock);
191         req_obj = g_queue_pop_head(qmp_mon->qmp_requests);
192         if (req_obj) {
193             /* With the lock of corresponding queue held */
194             break;
195         }
196         qemu_mutex_unlock(&qmp_mon->qmp_queue_lock);
197     }
198 
199     if (req_obj) {
200         /*
201          * We found one request on the monitor. Degrade this monitor's
202          * priority to lowest by re-inserting it to end of queue.
203          */
204         QTAILQ_REMOVE(&mon_list, mon, entry);
205         QTAILQ_INSERT_TAIL(&mon_list, mon, entry);
206     }
207 
208     qemu_mutex_unlock(&monitor_lock);
209 
210     return req_obj;
211 }
212 
213 void coroutine_fn monitor_qmp_dispatcher_co(void *data)
214 {
215     QMPRequest *req_obj = NULL;
216     QDict *rsp;
217     bool need_resume;
218     MonitorQMP *mon;
219 
220     while (true) {
221         assert(qatomic_mb_read(&qmp_dispatcher_co_busy) == true);
222 
223         /*
224          * Mark the dispatcher as not busy already here so that we
225          * don't miss any new requests coming in the middle of our
226          * processing.
227          */
228         qatomic_mb_set(&qmp_dispatcher_co_busy, false);
229 
230         while (!(req_obj = monitor_qmp_requests_pop_any_with_lock())) {
231             /*
232              * No more requests to process.  Wait to be reentered from
233              * handle_qmp_command() when it pushes more requests, or
234              * from monitor_cleanup() when it requests shutdown.
235              */
236             if (!qmp_dispatcher_co_shutdown) {
237                 qemu_coroutine_yield();
238 
239                 /*
240                  * busy must be set to true again by whoever
241                  * rescheduled us to avoid double scheduling
242                  */
243                 assert(qatomic_xchg(&qmp_dispatcher_co_busy, false) == true);
244             }
245 
246             /*
247              * qmp_dispatcher_co_shutdown may have changed if we
248              * yielded and were reentered from monitor_cleanup()
249              */
250             if (qmp_dispatcher_co_shutdown) {
251                 return;
252             }
253         }
254 
255         if (qatomic_xchg(&qmp_dispatcher_co_busy, true) == true) {
256             /*
257              * Someone rescheduled us (probably because a new requests
258              * came in), but we didn't actually yield. Do that now,
259              * only to be immediately reentered and removed from the
260              * list of scheduled coroutines.
261              */
262             qemu_coroutine_yield();
263         }
264 
265         /*
266          * Move the coroutine from iohandler_ctx to qemu_aio_context for
267          * executing the command handler so that it can make progress if it
268          * involves an AIO_WAIT_WHILE().
269          */
270         aio_co_schedule(qemu_get_aio_context(), qmp_dispatcher_co);
271         qemu_coroutine_yield();
272 
273         mon = req_obj->mon;
274         /* qmp_oob_enabled() might change after "qmp_capabilities" */
275         need_resume = !qmp_oob_enabled(mon) ||
276             mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1;
277         qemu_mutex_unlock(&mon->qmp_queue_lock);
278         if (req_obj->req) {
279             QDict *qdict = qobject_to(QDict, req_obj->req);
280             QObject *id = qdict ? qdict_get(qdict, "id") : NULL;
281             trace_monitor_qmp_cmd_in_band(qobject_get_try_str(id) ?: "");
282             monitor_qmp_dispatch(mon, req_obj->req);
283         } else {
284             assert(req_obj->err);
285             rsp = qmp_error_response(req_obj->err);
286             req_obj->err = NULL;
287             monitor_qmp_respond(mon, rsp);
288             qobject_unref(rsp);
289         }
290 
291         if (need_resume) {
292             /* Pairs with the monitor_suspend() in handle_qmp_command() */
293             monitor_resume(&mon->common);
294         }
295         qmp_request_free(req_obj);
296 
297         /*
298          * Yield and reschedule so the main loop stays responsive.
299          *
300          * Move back to iohandler_ctx so that nested event loops for
301          * qemu_aio_context don't start new monitor commands.
302          */
303         aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co);
304         qemu_coroutine_yield();
305     }
306 }
307 
308 static void handle_qmp_command(void *opaque, QObject *req, Error *err)
309 {
310     MonitorQMP *mon = opaque;
311     QObject *id = NULL;
312     QDict *qdict;
313     QMPRequest *req_obj;
314 
315     assert(!req != !err);
316 
317     qdict = qobject_to(QDict, req);
318     if (qdict) {
319         id = qdict_get(qdict, "id");
320     } /* else will fail qmp_dispatch() */
321 
322     if (req && trace_event_get_state_backends(TRACE_HANDLE_QMP_COMMAND)) {
323         QString *req_json = qobject_to_json(req);
324         trace_handle_qmp_command(mon, qstring_get_str(req_json));
325         qobject_unref(req_json);
326     }
327 
328     if (qdict && qmp_is_oob(qdict)) {
329         /* OOB commands are executed immediately */
330         trace_monitor_qmp_cmd_out_of_band(qobject_get_try_str(id) ?: "");
331         monitor_qmp_dispatch(mon, req);
332         qobject_unref(req);
333         return;
334     }
335 
336     req_obj = g_new0(QMPRequest, 1);
337     req_obj->mon = mon;
338     req_obj->req = req;
339     req_obj->err = err;
340 
341     /* Protect qmp_requests and fetching its length. */
342     qemu_mutex_lock(&mon->qmp_queue_lock);
343 
344     /*
345      * Suspend the monitor when we can't queue more requests after
346      * this one.  Dequeuing in monitor_qmp_bh_dispatcher() or
347      * monitor_qmp_cleanup_queue_and_resume() will resume it.
348      * Note that when OOB is disabled, we queue at most one command,
349      * for backward compatibility.
350      */
351     if (!qmp_oob_enabled(mon) ||
352         mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1) {
353         monitor_suspend(&mon->common);
354     }
355 
356     /*
357      * Put the request to the end of queue so that requests will be
358      * handled in time order.  Ownership for req_obj, req,
359      * etc. will be delivered to the handler side.
360      */
361     assert(mon->qmp_requests->length < QMP_REQ_QUEUE_LEN_MAX);
362     g_queue_push_tail(mon->qmp_requests, req_obj);
363     qemu_mutex_unlock(&mon->qmp_queue_lock);
364 
365     /* Kick the dispatcher routine */
366     if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) {
367         aio_co_wake(qmp_dispatcher_co);
368     }
369 }
370 
371 static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size)
372 {
373     MonitorQMP *mon = opaque;
374 
375     json_message_parser_feed(&mon->parser, (const char *) buf, size);
376 }
377 
378 static QDict *qmp_greeting(MonitorQMP *mon)
379 {
380     QList *cap_list = qlist_new();
381     QObject *ver = NULL;
382     QDict *args;
383     QMPCapability cap;
384 
385     args = qdict_new();
386     qmp_marshal_query_version(args, &ver, NULL);
387     qobject_unref(args);
388 
389     for (cap = 0; cap < QMP_CAPABILITY__MAX; cap++) {
390         if (mon->capab_offered[cap]) {
391             qlist_append_str(cap_list, QMPCapability_str(cap));
392         }
393     }
394 
395     return qdict_from_jsonf_nofail(
396         "{'QMP': {'version': %p, 'capabilities': %p}}",
397         ver, cap_list);
398 }
399 
400 static void monitor_qmp_event(void *opaque, QEMUChrEvent event)
401 {
402     QDict *data;
403     MonitorQMP *mon = opaque;
404 
405     switch (event) {
406     case CHR_EVENT_OPENED:
407         mon->commands = &qmp_cap_negotiation_commands;
408         monitor_qmp_caps_reset(mon);
409         data = qmp_greeting(mon);
410         qmp_send_response(mon, data);
411         qobject_unref(data);
412         mon_refcount++;
413         break;
414     case CHR_EVENT_CLOSED:
415         /*
416          * Note: this is only useful when the output of the chardev
417          * backend is still open.  For example, when the backend is
418          * stdio, it's possible that stdout is still open when stdin
419          * is closed.
420          */
421         monitor_qmp_cleanup_queue_and_resume(mon);
422         json_message_parser_destroy(&mon->parser);
423         json_message_parser_init(&mon->parser, handle_qmp_command,
424                                  mon, NULL);
425         mon_refcount--;
426         monitor_fdsets_cleanup();
427         break;
428     case CHR_EVENT_BREAK:
429     case CHR_EVENT_MUX_IN:
430     case CHR_EVENT_MUX_OUT:
431         /* Ignore */
432         break;
433     }
434 }
435 
436 void monitor_data_destroy_qmp(MonitorQMP *mon)
437 {
438     json_message_parser_destroy(&mon->parser);
439     qemu_mutex_destroy(&mon->qmp_queue_lock);
440     monitor_qmp_cleanup_req_queue_locked(mon);
441     g_queue_free(mon->qmp_requests);
442 }
443 
444 static void monitor_qmp_setup_handlers_bh(void *opaque)
445 {
446     MonitorQMP *mon = opaque;
447     GMainContext *context;
448 
449     assert(mon->common.use_io_thread);
450     context = iothread_get_g_main_context(mon_iothread);
451     assert(context);
452     qemu_chr_fe_set_handlers(&mon->common.chr, monitor_can_read,
453                              monitor_qmp_read, monitor_qmp_event,
454                              NULL, &mon->common, context, true);
455     monitor_list_append(&mon->common);
456 }
457 
458 void monitor_init_qmp(Chardev *chr, bool pretty, Error **errp)
459 {
460     MonitorQMP *mon = g_new0(MonitorQMP, 1);
461 
462     if (!qemu_chr_fe_init(&mon->common.chr, chr, errp)) {
463         g_free(mon);
464         return;
465     }
466     qemu_chr_fe_set_echo(&mon->common.chr, true);
467 
468     /* Note: we run QMP monitor in I/O thread when @chr supports that */
469     monitor_data_init(&mon->common, true, false,
470                       qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_GCONTEXT));
471 
472     mon->pretty = pretty;
473 
474     qemu_mutex_init(&mon->qmp_queue_lock);
475     mon->qmp_requests = g_queue_new();
476 
477     json_message_parser_init(&mon->parser, handle_qmp_command, mon, NULL);
478     if (mon->common.use_io_thread) {
479         /*
480          * Make sure the old iowatch is gone.  It's possible when
481          * e.g. the chardev is in client mode, with wait=on.
482          */
483         remove_fd_in_watch(chr);
484         /*
485          * We can't call qemu_chr_fe_set_handlers() directly here
486          * since chardev might be running in the monitor I/O
487          * thread.  Schedule a bottom half.
488          */
489         aio_bh_schedule_oneshot(iothread_get_aio_context(mon_iothread),
490                                 monitor_qmp_setup_handlers_bh, mon);
491         /* The bottom half will add @mon to @mon_list */
492     } else {
493         qemu_chr_fe_set_handlers(&mon->common.chr, monitor_can_read,
494                                  monitor_qmp_read, monitor_qmp_event,
495                                  NULL, &mon->common, NULL, true);
496         monitor_list_append(&mon->common);
497     }
498 }
499