xref: /openbmc/qemu/monitor/monitor.c (revision 2df9f571)
1 /*
2  * QEMU monitor
3  *
4  * Copyright (c) 2003-2004 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "monitor-internal.h"
27 #include "qapi/error.h"
28 #include "qapi/opts-visitor.h"
29 #include "qapi/qapi-emit-events.h"
30 #include "qapi/qapi-visit-control.h"
31 #include "qapi/qmp/qdict.h"
32 #include "qapi/qmp/qstring.h"
33 #include "qemu/error-report.h"
34 #include "qemu/option.h"
35 #include "sysemu/qtest.h"
36 #include "sysemu/sysemu.h"
37 #include "trace.h"
38 
39 /*
40  * To prevent flooding clients, events can be throttled. The
41  * throttling is calculated globally, rather than per-Monitor
42  * instance.
43  */
44 typedef struct MonitorQAPIEventState {
45     QAPIEvent event;    /* Throttling state for this event type and... */
46     QDict *data;        /* ... data, see qapi_event_throttle_equal() */
47     QEMUTimer *timer;   /* Timer for handling delayed events */
48     QDict *qdict;       /* Delayed event (if any) */
49 } MonitorQAPIEventState;
50 
51 typedef struct {
52     int64_t rate;       /* Minimum time (in ns) between two events */
53 } MonitorQAPIEventConf;
54 
55 /* Shared monitor I/O thread */
56 IOThread *mon_iothread;
57 
58 /* Bottom half to dispatch the requests received from I/O thread */
59 QEMUBH *qmp_dispatcher_bh;
60 
61 /* Protects mon_list, monitor_qapi_event_state, monitor_destroyed.  */
62 QemuMutex monitor_lock;
63 static GHashTable *monitor_qapi_event_state;
64 
65 MonitorList mon_list;
66 int mon_refcount;
67 static bool monitor_destroyed;
68 
69 __thread Monitor *cur_mon;
70 
71 /**
72  * Is the current monitor, if any, a QMP monitor?
73  */
74 bool monitor_cur_is_qmp(void)
75 {
76     return cur_mon && monitor_is_qmp(cur_mon);
77 }
78 
79 /**
80  * Is @mon is using readline?
81  * Note: not all HMP monitors use readline, e.g., gdbserver has a
82  * non-interactive HMP monitor, so readline is not used there.
83  */
84 static inline bool monitor_uses_readline(const MonitorHMP *mon)
85 {
86     return mon->use_readline;
87 }
88 
89 static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
90 {
91     if (monitor_is_qmp(mon)) {
92         return false;
93     }
94 
95     return !monitor_uses_readline(container_of(mon, MonitorHMP, common));
96 }
97 
98 static void monitor_flush_locked(Monitor *mon);
99 
100 static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond,
101                                   void *opaque)
102 {
103     Monitor *mon = opaque;
104 
105     qemu_mutex_lock(&mon->mon_lock);
106     mon->out_watch = 0;
107     monitor_flush_locked(mon);
108     qemu_mutex_unlock(&mon->mon_lock);
109     return FALSE;
110 }
111 
112 /* Caller must hold mon->mon_lock */
113 static void monitor_flush_locked(Monitor *mon)
114 {
115     int rc;
116     size_t len;
117     const char *buf;
118 
119     if (mon->skip_flush) {
120         return;
121     }
122 
123     buf = qstring_get_str(mon->outbuf);
124     len = qstring_get_length(mon->outbuf);
125 
126     if (len && !mon->mux_out) {
127         rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
128         if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
129             /* all flushed or error */
130             qobject_unref(mon->outbuf);
131             mon->outbuf = qstring_new();
132             return;
133         }
134         if (rc > 0) {
135             /* partial write */
136             QString *tmp = qstring_from_str(buf + rc);
137             qobject_unref(mon->outbuf);
138             mon->outbuf = tmp;
139         }
140         if (mon->out_watch == 0) {
141             mon->out_watch =
142                 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP,
143                                       monitor_unblocked, mon);
144         }
145     }
146 }
147 
148 void monitor_flush(Monitor *mon)
149 {
150     qemu_mutex_lock(&mon->mon_lock);
151     monitor_flush_locked(mon);
152     qemu_mutex_unlock(&mon->mon_lock);
153 }
154 
155 /* flush at every end of line */
156 int monitor_puts(Monitor *mon, const char *str)
157 {
158     int i;
159     char c;
160 
161     qemu_mutex_lock(&mon->mon_lock);
162     for (i = 0; str[i]; i++) {
163         c = str[i];
164         if (c == '\n') {
165             qstring_append_chr(mon->outbuf, '\r');
166         }
167         qstring_append_chr(mon->outbuf, c);
168         if (c == '\n') {
169             monitor_flush_locked(mon);
170         }
171     }
172     qemu_mutex_unlock(&mon->mon_lock);
173 
174     return i;
175 }
176 
177 int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
178 {
179     char *buf;
180     int n;
181 
182     if (!mon) {
183         return -1;
184     }
185 
186     if (monitor_is_qmp(mon)) {
187         return -1;
188     }
189 
190     buf = g_strdup_vprintf(fmt, ap);
191     n = monitor_puts(mon, buf);
192     g_free(buf);
193     return n;
194 }
195 
196 int monitor_printf(Monitor *mon, const char *fmt, ...)
197 {
198     int ret;
199 
200     va_list ap;
201     va_start(ap, fmt);
202     ret = monitor_vprintf(mon, fmt, ap);
203     va_end(ap);
204     return ret;
205 }
206 
207 /*
208  * Print to current monitor if we have one, else to stderr.
209  */
210 int error_vprintf(const char *fmt, va_list ap)
211 {
212     if (cur_mon && !monitor_cur_is_qmp()) {
213         return monitor_vprintf(cur_mon, fmt, ap);
214     }
215     return vfprintf(stderr, fmt, ap);
216 }
217 
218 int error_vprintf_unless_qmp(const char *fmt, va_list ap)
219 {
220     if (!cur_mon) {
221         return vfprintf(stderr, fmt, ap);
222     }
223     if (!monitor_cur_is_qmp()) {
224         return monitor_vprintf(cur_mon, fmt, ap);
225     }
226     return -1;
227 }
228 
229 
230 static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
231     /* Limit guest-triggerable events to 1 per second */
232     [QAPI_EVENT_RTC_CHANGE]        = { 1000 * SCALE_MS },
233     [QAPI_EVENT_WATCHDOG]          = { 1000 * SCALE_MS },
234     [QAPI_EVENT_BALLOON_CHANGE]    = { 1000 * SCALE_MS },
235     [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS },
236     [QAPI_EVENT_QUORUM_FAILURE]    = { 1000 * SCALE_MS },
237     [QAPI_EVENT_VSERPORT_CHANGE]   = { 1000 * SCALE_MS },
238 };
239 
240 /*
241  * Return the clock to use for recording an event's time.
242  * It's QEMU_CLOCK_REALTIME, except for qtests it's
243  * QEMU_CLOCK_VIRTUAL, to support testing rate limits.
244  * Beware: result is invalid before configure_accelerator().
245  */
246 static inline QEMUClockType monitor_get_event_clock(void)
247 {
248     return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
249 }
250 
251 /*
252  * Broadcast an event to all monitors.
253  * @qdict is the event object.  Its member "event" must match @event.
254  * Caller must hold monitor_lock.
255  */
256 static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
257 {
258     Monitor *mon;
259     MonitorQMP *qmp_mon;
260 
261     trace_monitor_protocol_event_emit(event, qdict);
262     QTAILQ_FOREACH(mon, &mon_list, entry) {
263         if (!monitor_is_qmp(mon)) {
264             continue;
265         }
266 
267         qmp_mon = container_of(mon, MonitorQMP, common);
268         if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
269             qmp_send_response(qmp_mon, qdict);
270         }
271     }
272 }
273 
274 static void monitor_qapi_event_handler(void *opaque);
275 
276 /*
277  * Queue a new event for emission to Monitor instances,
278  * applying any rate limiting if required.
279  */
280 static void
281 monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict)
282 {
283     MonitorQAPIEventConf *evconf;
284     MonitorQAPIEventState *evstate;
285 
286     assert(event < QAPI_EVENT__MAX);
287     evconf = &monitor_qapi_event_conf[event];
288     trace_monitor_protocol_event_queue(event, qdict, evconf->rate);
289 
290     qemu_mutex_lock(&monitor_lock);
291 
292     if (!evconf->rate) {
293         /* Unthrottled event */
294         monitor_qapi_event_emit(event, qdict);
295     } else {
296         QDict *data = qobject_to(QDict, qdict_get(qdict, "data"));
297         MonitorQAPIEventState key = { .event = event, .data = data };
298 
299         evstate = g_hash_table_lookup(monitor_qapi_event_state, &key);
300         assert(!evstate || timer_pending(evstate->timer));
301 
302         if (evstate) {
303             /*
304              * Timer is pending for (at least) evconf->rate ns after
305              * last send.  Store event for sending when timer fires,
306              * replacing a prior stored event if any.
307              */
308             qobject_unref(evstate->qdict);
309             evstate->qdict = qobject_ref(qdict);
310         } else {
311             /*
312              * Last send was (at least) evconf->rate ns ago.
313              * Send immediately, and arm the timer to call
314              * monitor_qapi_event_handler() in evconf->rate ns.  Any
315              * events arriving before then will be delayed until then.
316              */
317             int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
318 
319             monitor_qapi_event_emit(event, qdict);
320 
321             evstate = g_new(MonitorQAPIEventState, 1);
322             evstate->event = event;
323             evstate->data = qobject_ref(data);
324             evstate->qdict = NULL;
325             evstate->timer = timer_new_ns(monitor_get_event_clock(),
326                                           monitor_qapi_event_handler,
327                                           evstate);
328             g_hash_table_add(monitor_qapi_event_state, evstate);
329             timer_mod_ns(evstate->timer, now + evconf->rate);
330         }
331     }
332 
333     qemu_mutex_unlock(&monitor_lock);
334 }
335 
336 void qapi_event_emit(QAPIEvent event, QDict *qdict)
337 {
338     /*
339      * monitor_qapi_event_queue_no_reenter() is not reentrant: it
340      * would deadlock on monitor_lock.  Work around by queueing
341      * events in thread-local storage.
342      * TODO: remove this, make it re-enter safe.
343      */
344     typedef struct MonitorQapiEvent {
345         QAPIEvent event;
346         QDict *qdict;
347         QSIMPLEQ_ENTRY(MonitorQapiEvent) entry;
348     } MonitorQapiEvent;
349     static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue;
350     static __thread bool reentered;
351     MonitorQapiEvent *ev;
352 
353     if (!reentered) {
354         QSIMPLEQ_INIT(&event_queue);
355     }
356 
357     ev = g_new(MonitorQapiEvent, 1);
358     ev->qdict = qobject_ref(qdict);
359     ev->event = event;
360     QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry);
361     if (reentered) {
362         return;
363     }
364 
365     reentered = true;
366 
367     while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) {
368         QSIMPLEQ_REMOVE_HEAD(&event_queue, entry);
369         monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict);
370         qobject_unref(ev->qdict);
371         g_free(ev);
372     }
373 
374     reentered = false;
375 }
376 
377 /*
378  * This function runs evconf->rate ns after sending a throttled
379  * event.
380  * If another event has since been stored, send it.
381  */
382 static void monitor_qapi_event_handler(void *opaque)
383 {
384     MonitorQAPIEventState *evstate = opaque;
385     MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event];
386 
387     trace_monitor_protocol_event_handler(evstate->event, evstate->qdict);
388     qemu_mutex_lock(&monitor_lock);
389 
390     if (evstate->qdict) {
391         int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
392 
393         monitor_qapi_event_emit(evstate->event, evstate->qdict);
394         qobject_unref(evstate->qdict);
395         evstate->qdict = NULL;
396         timer_mod_ns(evstate->timer, now + evconf->rate);
397     } else {
398         g_hash_table_remove(monitor_qapi_event_state, evstate);
399         qobject_unref(evstate->data);
400         timer_free(evstate->timer);
401         g_free(evstate);
402     }
403 
404     qemu_mutex_unlock(&monitor_lock);
405 }
406 
407 static unsigned int qapi_event_throttle_hash(const void *key)
408 {
409     const MonitorQAPIEventState *evstate = key;
410     unsigned int hash = evstate->event * 255;
411 
412     if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) {
413         hash += g_str_hash(qdict_get_str(evstate->data, "id"));
414     }
415 
416     if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
417         hash += g_str_hash(qdict_get_str(evstate->data, "node-name"));
418     }
419 
420     return hash;
421 }
422 
423 static gboolean qapi_event_throttle_equal(const void *a, const void *b)
424 {
425     const MonitorQAPIEventState *eva = a;
426     const MonitorQAPIEventState *evb = b;
427 
428     if (eva->event != evb->event) {
429         return FALSE;
430     }
431 
432     if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) {
433         return !strcmp(qdict_get_str(eva->data, "id"),
434                        qdict_get_str(evb->data, "id"));
435     }
436 
437     if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
438         return !strcmp(qdict_get_str(eva->data, "node-name"),
439                        qdict_get_str(evb->data, "node-name"));
440     }
441 
442     return TRUE;
443 }
444 
445 int monitor_suspend(Monitor *mon)
446 {
447     if (monitor_is_hmp_non_interactive(mon)) {
448         return -ENOTTY;
449     }
450 
451     atomic_inc(&mon->suspend_cnt);
452 
453     if (mon->use_io_thread) {
454         /*
455          * Kick I/O thread to make sure this takes effect.  It'll be
456          * evaluated again in prepare() of the watch object.
457          */
458         aio_notify(iothread_get_aio_context(mon_iothread));
459     }
460 
461     trace_monitor_suspend(mon, 1);
462     return 0;
463 }
464 
465 static void monitor_accept_input(void *opaque)
466 {
467     Monitor *mon = opaque;
468 
469     qemu_chr_fe_accept_input(&mon->chr);
470 }
471 
472 void monitor_resume(Monitor *mon)
473 {
474     if (monitor_is_hmp_non_interactive(mon)) {
475         return;
476     }
477 
478     if (atomic_dec_fetch(&mon->suspend_cnt) == 0) {
479         AioContext *ctx;
480 
481         if (mon->use_io_thread) {
482             ctx = iothread_get_aio_context(mon_iothread);
483         } else {
484             ctx = qemu_get_aio_context();
485         }
486 
487         if (!monitor_is_qmp(mon)) {
488             MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common);
489             assert(hmp_mon->rs);
490             readline_show_prompt(hmp_mon->rs);
491         }
492 
493         aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
494     }
495 
496     trace_monitor_suspend(mon, -1);
497 }
498 
499 int monitor_can_read(void *opaque)
500 {
501     Monitor *mon = opaque;
502 
503     return !atomic_mb_read(&mon->suspend_cnt);
504 }
505 
506 void monitor_list_append(Monitor *mon)
507 {
508     qemu_mutex_lock(&monitor_lock);
509     /*
510      * This prevents inserting new monitors during monitor_cleanup().
511      * A cleaner solution would involve the main thread telling other
512      * threads to terminate, waiting for their termination.
513      */
514     if (!monitor_destroyed) {
515         QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
516         mon = NULL;
517     }
518     qemu_mutex_unlock(&monitor_lock);
519 
520     if (mon) {
521         monitor_data_destroy(mon);
522         g_free(mon);
523     }
524 }
525 
526 static void monitor_iothread_init(void)
527 {
528     mon_iothread = iothread_create("mon_iothread", &error_abort);
529 }
530 
531 void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush,
532                        bool use_io_thread)
533 {
534     if (use_io_thread && !mon_iothread) {
535         monitor_iothread_init();
536     }
537     qemu_mutex_init(&mon->mon_lock);
538     mon->is_qmp = is_qmp;
539     mon->outbuf = qstring_new();
540     mon->skip_flush = skip_flush;
541     mon->use_io_thread = use_io_thread;
542 }
543 
544 void monitor_data_destroy(Monitor *mon)
545 {
546     g_free(mon->mon_cpu_path);
547     qemu_chr_fe_deinit(&mon->chr, false);
548     if (monitor_is_qmp(mon)) {
549         monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
550     } else {
551         readline_free(container_of(mon, MonitorHMP, common)->rs);
552     }
553     qobject_unref(mon->outbuf);
554     qemu_mutex_destroy(&mon->mon_lock);
555 }
556 
557 void monitor_cleanup(void)
558 {
559     /*
560      * We need to explicitly stop the I/O thread (but not destroy it),
561      * clean up the monitor resources, then destroy the I/O thread since
562      * we need to unregister from chardev below in
563      * monitor_data_destroy(), and chardev is not thread-safe yet
564      */
565     if (mon_iothread) {
566         iothread_stop(mon_iothread);
567     }
568 
569     /* Flush output buffers and destroy monitors */
570     qemu_mutex_lock(&monitor_lock);
571     monitor_destroyed = true;
572     while (!QTAILQ_EMPTY(&mon_list)) {
573         Monitor *mon = QTAILQ_FIRST(&mon_list);
574         QTAILQ_REMOVE(&mon_list, mon, entry);
575         /* Permit QAPI event emission from character frontend release */
576         qemu_mutex_unlock(&monitor_lock);
577         monitor_flush(mon);
578         monitor_data_destroy(mon);
579         qemu_mutex_lock(&monitor_lock);
580         g_free(mon);
581     }
582     qemu_mutex_unlock(&monitor_lock);
583 
584     /* QEMUBHs needs to be deleted before destroying the I/O thread */
585     qemu_bh_delete(qmp_dispatcher_bh);
586     qmp_dispatcher_bh = NULL;
587     if (mon_iothread) {
588         iothread_destroy(mon_iothread);
589         mon_iothread = NULL;
590     }
591 }
592 
593 static void monitor_qapi_event_init(void)
594 {
595     monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
596                                                 qapi_event_throttle_equal);
597 }
598 
599 void monitor_init_globals_core(void)
600 {
601     monitor_qapi_event_init();
602     qemu_mutex_init(&monitor_lock);
603 
604     /*
605      * The dispatcher BH must run in the main loop thread, since we
606      * have commands assuming that context.  It would be nice to get
607      * rid of those assumptions.
608      */
609     qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
610                                    monitor_qmp_bh_dispatcher,
611                                    NULL);
612 }
613 
614 int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp)
615 {
616     Chardev *chr;
617     Error *local_err = NULL;
618 
619     chr = qemu_chr_find(opts->chardev);
620     if (chr == NULL) {
621         error_setg(errp, "chardev \"%s\" not found", opts->chardev);
622         return -1;
623     }
624 
625     if (!opts->has_mode) {
626         opts->mode = allow_hmp ? MONITOR_MODE_READLINE : MONITOR_MODE_CONTROL;
627     }
628 
629     switch (opts->mode) {
630     case MONITOR_MODE_CONTROL:
631         monitor_init_qmp(chr, opts->pretty, &local_err);
632         break;
633     case MONITOR_MODE_READLINE:
634         if (!allow_hmp) {
635             error_setg(errp, "Only QMP is supported");
636             return -1;
637         }
638         if (opts->pretty) {
639             warn_report("'pretty' is deprecated for HMP monitors, it has no "
640                         "effect and will be removed in future versions");
641         }
642         monitor_init_hmp(chr, true, &local_err);
643         break;
644     default:
645         g_assert_not_reached();
646     }
647 
648     if (local_err) {
649         error_propagate(errp, local_err);
650         return -1;
651     }
652     return 0;
653 }
654 
655 int monitor_init_opts(QemuOpts *opts, Error **errp)
656 {
657     Visitor *v;
658     MonitorOptions *options;
659     Error *local_err = NULL;
660 
661     v = opts_visitor_new(opts);
662     visit_type_MonitorOptions(v, NULL, &options, &local_err);
663     visit_free(v);
664 
665     if (local_err) {
666         goto out;
667     }
668 
669     monitor_init(options, true, &local_err);
670     qapi_free_MonitorOptions(options);
671 
672 out:
673     if (local_err) {
674         error_propagate(errp, local_err);
675         return -1;
676     }
677     return 0;
678 }
679 
680 QemuOptsList qemu_mon_opts = {
681     .name = "mon",
682     .implied_opt_name = "chardev",
683     .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head),
684     .desc = {
685         {
686             .name = "mode",
687             .type = QEMU_OPT_STRING,
688         },{
689             .name = "chardev",
690             .type = QEMU_OPT_STRING,
691         },{
692             .name = "pretty",
693             .type = QEMU_OPT_BOOL,
694         },
695         { /* end of list */ }
696     },
697 };
698