xref: /openbmc/qemu/monitor/monitor.c (revision 1d95db74)
1 /*
2  * QEMU monitor
3  *
4  * Copyright (c) 2003-2004 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "monitor-internal.h"
27 #include "qapi/error.h"
28 #include "qapi/qapi-emit-events.h"
29 #include "qapi/qmp/qdict.h"
30 #include "qapi/qmp/qstring.h"
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "sysemu/qtest.h"
34 #include "trace.h"
35 
36 /*
37  * To prevent flooding clients, events can be throttled. The
38  * throttling is calculated globally, rather than per-Monitor
39  * instance.
40  */
41 typedef struct MonitorQAPIEventState {
42     QAPIEvent event;    /* Throttling state for this event type and... */
43     QDict *data;        /* ... data, see qapi_event_throttle_equal() */
44     QEMUTimer *timer;   /* Timer for handling delayed events */
45     QDict *qdict;       /* Delayed event (if any) */
46 } MonitorQAPIEventState;
47 
48 typedef struct {
49     int64_t rate;       /* Minimum time (in ns) between two events */
50 } MonitorQAPIEventConf;
51 
52 /* Shared monitor I/O thread */
53 IOThread *mon_iothread;
54 
55 /* Bottom half to dispatch the requests received from I/O thread */
56 QEMUBH *qmp_dispatcher_bh;
57 
58 /* Protects mon_list, monitor_qapi_event_state, monitor_destroyed.  */
59 QemuMutex monitor_lock;
60 static GHashTable *monitor_qapi_event_state;
61 
62 MonitorList mon_list;
63 int mon_refcount;
64 static bool monitor_destroyed;
65 
66 __thread Monitor *cur_mon;
67 
68 /**
69  * Is the current monitor, if any, a QMP monitor?
70  */
71 bool monitor_cur_is_qmp(void)
72 {
73     return cur_mon && monitor_is_qmp(cur_mon);
74 }
75 
76 /**
77  * Is @mon is using readline?
78  * Note: not all HMP monitors use readline, e.g., gdbserver has a
79  * non-interactive HMP monitor, so readline is not used there.
80  */
81 static inline bool monitor_uses_readline(const Monitor *mon)
82 {
83     return mon->flags & MONITOR_USE_READLINE;
84 }
85 
86 static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
87 {
88     return !monitor_is_qmp(mon) && !monitor_uses_readline(mon);
89 }
90 
91 static void monitor_flush_locked(Monitor *mon);
92 
93 static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond,
94                                   void *opaque)
95 {
96     Monitor *mon = opaque;
97 
98     qemu_mutex_lock(&mon->mon_lock);
99     mon->out_watch = 0;
100     monitor_flush_locked(mon);
101     qemu_mutex_unlock(&mon->mon_lock);
102     return FALSE;
103 }
104 
105 /* Caller must hold mon->mon_lock */
106 static void monitor_flush_locked(Monitor *mon)
107 {
108     int rc;
109     size_t len;
110     const char *buf;
111 
112     if (mon->skip_flush) {
113         return;
114     }
115 
116     buf = qstring_get_str(mon->outbuf);
117     len = qstring_get_length(mon->outbuf);
118 
119     if (len && !mon->mux_out) {
120         rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
121         if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
122             /* all flushed or error */
123             qobject_unref(mon->outbuf);
124             mon->outbuf = qstring_new();
125             return;
126         }
127         if (rc > 0) {
128             /* partial write */
129             QString *tmp = qstring_from_str(buf + rc);
130             qobject_unref(mon->outbuf);
131             mon->outbuf = tmp;
132         }
133         if (mon->out_watch == 0) {
134             mon->out_watch =
135                 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP,
136                                       monitor_unblocked, mon);
137         }
138     }
139 }
140 
141 void monitor_flush(Monitor *mon)
142 {
143     qemu_mutex_lock(&mon->mon_lock);
144     monitor_flush_locked(mon);
145     qemu_mutex_unlock(&mon->mon_lock);
146 }
147 
148 /* flush at every end of line */
149 int monitor_puts(Monitor *mon, const char *str)
150 {
151     int i;
152     char c;
153 
154     qemu_mutex_lock(&mon->mon_lock);
155     for (i = 0; str[i]; i++) {
156         c = str[i];
157         if (c == '\n') {
158             qstring_append_chr(mon->outbuf, '\r');
159         }
160         qstring_append_chr(mon->outbuf, c);
161         if (c == '\n') {
162             monitor_flush_locked(mon);
163         }
164     }
165     qemu_mutex_unlock(&mon->mon_lock);
166 
167     return i;
168 }
169 
170 int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
171 {
172     char *buf;
173     int n;
174 
175     if (!mon) {
176         return -1;
177     }
178 
179     if (monitor_is_qmp(mon)) {
180         return -1;
181     }
182 
183     buf = g_strdup_vprintf(fmt, ap);
184     n = monitor_puts(mon, buf);
185     g_free(buf);
186     return n;
187 }
188 
189 int monitor_printf(Monitor *mon, const char *fmt, ...)
190 {
191     int ret;
192 
193     va_list ap;
194     va_start(ap, fmt);
195     ret = monitor_vprintf(mon, fmt, ap);
196     va_end(ap);
197     return ret;
198 }
199 
200 /*
201  * Print to current monitor if we have one, else to stderr.
202  */
203 int error_vprintf(const char *fmt, va_list ap)
204 {
205     if (cur_mon && !monitor_cur_is_qmp()) {
206         return monitor_vprintf(cur_mon, fmt, ap);
207     }
208     return vfprintf(stderr, fmt, ap);
209 }
210 
211 int error_vprintf_unless_qmp(const char *fmt, va_list ap)
212 {
213     if (!cur_mon) {
214         return vfprintf(stderr, fmt, ap);
215     }
216     if (!monitor_cur_is_qmp()) {
217         return monitor_vprintf(cur_mon, fmt, ap);
218     }
219     return -1;
220 }
221 
222 
223 static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
224     /* Limit guest-triggerable events to 1 per second */
225     [QAPI_EVENT_RTC_CHANGE]        = { 1000 * SCALE_MS },
226     [QAPI_EVENT_WATCHDOG]          = { 1000 * SCALE_MS },
227     [QAPI_EVENT_BALLOON_CHANGE]    = { 1000 * SCALE_MS },
228     [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS },
229     [QAPI_EVENT_QUORUM_FAILURE]    = { 1000 * SCALE_MS },
230     [QAPI_EVENT_VSERPORT_CHANGE]   = { 1000 * SCALE_MS },
231 };
232 
233 /*
234  * Return the clock to use for recording an event's time.
235  * It's QEMU_CLOCK_REALTIME, except for qtests it's
236  * QEMU_CLOCK_VIRTUAL, to support testing rate limits.
237  * Beware: result is invalid before configure_accelerator().
238  */
239 static inline QEMUClockType monitor_get_event_clock(void)
240 {
241     return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
242 }
243 
244 /*
245  * Broadcast an event to all monitors.
246  * @qdict is the event object.  Its member "event" must match @event.
247  * Caller must hold monitor_lock.
248  */
249 static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
250 {
251     Monitor *mon;
252     MonitorQMP *qmp_mon;
253 
254     trace_monitor_protocol_event_emit(event, qdict);
255     QTAILQ_FOREACH(mon, &mon_list, entry) {
256         if (!monitor_is_qmp(mon)) {
257             continue;
258         }
259 
260         qmp_mon = container_of(mon, MonitorQMP, common);
261         if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
262             qmp_send_response(qmp_mon, qdict);
263         }
264     }
265 }
266 
267 static void monitor_qapi_event_handler(void *opaque);
268 
269 /*
270  * Queue a new event for emission to Monitor instances,
271  * applying any rate limiting if required.
272  */
273 static void
274 monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict)
275 {
276     MonitorQAPIEventConf *evconf;
277     MonitorQAPIEventState *evstate;
278 
279     assert(event < QAPI_EVENT__MAX);
280     evconf = &monitor_qapi_event_conf[event];
281     trace_monitor_protocol_event_queue(event, qdict, evconf->rate);
282 
283     qemu_mutex_lock(&monitor_lock);
284 
285     if (!evconf->rate) {
286         /* Unthrottled event */
287         monitor_qapi_event_emit(event, qdict);
288     } else {
289         QDict *data = qobject_to(QDict, qdict_get(qdict, "data"));
290         MonitorQAPIEventState key = { .event = event, .data = data };
291 
292         evstate = g_hash_table_lookup(monitor_qapi_event_state, &key);
293         assert(!evstate || timer_pending(evstate->timer));
294 
295         if (evstate) {
296             /*
297              * Timer is pending for (at least) evconf->rate ns after
298              * last send.  Store event for sending when timer fires,
299              * replacing a prior stored event if any.
300              */
301             qobject_unref(evstate->qdict);
302             evstate->qdict = qobject_ref(qdict);
303         } else {
304             /*
305              * Last send was (at least) evconf->rate ns ago.
306              * Send immediately, and arm the timer to call
307              * monitor_qapi_event_handler() in evconf->rate ns.  Any
308              * events arriving before then will be delayed until then.
309              */
310             int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
311 
312             monitor_qapi_event_emit(event, qdict);
313 
314             evstate = g_new(MonitorQAPIEventState, 1);
315             evstate->event = event;
316             evstate->data = qobject_ref(data);
317             evstate->qdict = NULL;
318             evstate->timer = timer_new_ns(monitor_get_event_clock(),
319                                           monitor_qapi_event_handler,
320                                           evstate);
321             g_hash_table_add(monitor_qapi_event_state, evstate);
322             timer_mod_ns(evstate->timer, now + evconf->rate);
323         }
324     }
325 
326     qemu_mutex_unlock(&monitor_lock);
327 }
328 
329 void qapi_event_emit(QAPIEvent event, QDict *qdict)
330 {
331     /*
332      * monitor_qapi_event_queue_no_reenter() is not reentrant: it
333      * would deadlock on monitor_lock.  Work around by queueing
334      * events in thread-local storage.
335      * TODO: remove this, make it re-enter safe.
336      */
337     typedef struct MonitorQapiEvent {
338         QAPIEvent event;
339         QDict *qdict;
340         QSIMPLEQ_ENTRY(MonitorQapiEvent) entry;
341     } MonitorQapiEvent;
342     static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue;
343     static __thread bool reentered;
344     MonitorQapiEvent *ev;
345 
346     if (!reentered) {
347         QSIMPLEQ_INIT(&event_queue);
348     }
349 
350     ev = g_new(MonitorQapiEvent, 1);
351     ev->qdict = qobject_ref(qdict);
352     ev->event = event;
353     QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry);
354     if (reentered) {
355         return;
356     }
357 
358     reentered = true;
359 
360     while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) {
361         QSIMPLEQ_REMOVE_HEAD(&event_queue, entry);
362         monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict);
363         qobject_unref(ev->qdict);
364         g_free(ev);
365     }
366 
367     reentered = false;
368 }
369 
370 /*
371  * This function runs evconf->rate ns after sending a throttled
372  * event.
373  * If another event has since been stored, send it.
374  */
375 static void monitor_qapi_event_handler(void *opaque)
376 {
377     MonitorQAPIEventState *evstate = opaque;
378     MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event];
379 
380     trace_monitor_protocol_event_handler(evstate->event, evstate->qdict);
381     qemu_mutex_lock(&monitor_lock);
382 
383     if (evstate->qdict) {
384         int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
385 
386         monitor_qapi_event_emit(evstate->event, evstate->qdict);
387         qobject_unref(evstate->qdict);
388         evstate->qdict = NULL;
389         timer_mod_ns(evstate->timer, now + evconf->rate);
390     } else {
391         g_hash_table_remove(monitor_qapi_event_state, evstate);
392         qobject_unref(evstate->data);
393         timer_free(evstate->timer);
394         g_free(evstate);
395     }
396 
397     qemu_mutex_unlock(&monitor_lock);
398 }
399 
400 static unsigned int qapi_event_throttle_hash(const void *key)
401 {
402     const MonitorQAPIEventState *evstate = key;
403     unsigned int hash = evstate->event * 255;
404 
405     if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) {
406         hash += g_str_hash(qdict_get_str(evstate->data, "id"));
407     }
408 
409     if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
410         hash += g_str_hash(qdict_get_str(evstate->data, "node-name"));
411     }
412 
413     return hash;
414 }
415 
416 static gboolean qapi_event_throttle_equal(const void *a, const void *b)
417 {
418     const MonitorQAPIEventState *eva = a;
419     const MonitorQAPIEventState *evb = b;
420 
421     if (eva->event != evb->event) {
422         return FALSE;
423     }
424 
425     if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) {
426         return !strcmp(qdict_get_str(eva->data, "id"),
427                        qdict_get_str(evb->data, "id"));
428     }
429 
430     if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
431         return !strcmp(qdict_get_str(eva->data, "node-name"),
432                        qdict_get_str(evb->data, "node-name"));
433     }
434 
435     return TRUE;
436 }
437 
438 int monitor_suspend(Monitor *mon)
439 {
440     if (monitor_is_hmp_non_interactive(mon)) {
441         return -ENOTTY;
442     }
443 
444     atomic_inc(&mon->suspend_cnt);
445 
446     if (mon->use_io_thread) {
447         /*
448          * Kick I/O thread to make sure this takes effect.  It'll be
449          * evaluated again in prepare() of the watch object.
450          */
451         aio_notify(iothread_get_aio_context(mon_iothread));
452     }
453 
454     trace_monitor_suspend(mon, 1);
455     return 0;
456 }
457 
458 static void monitor_accept_input(void *opaque)
459 {
460     Monitor *mon = opaque;
461 
462     qemu_chr_fe_accept_input(&mon->chr);
463 }
464 
465 void monitor_resume(Monitor *mon)
466 {
467     if (monitor_is_hmp_non_interactive(mon)) {
468         return;
469     }
470 
471     if (atomic_dec_fetch(&mon->suspend_cnt) == 0) {
472         AioContext *ctx;
473 
474         if (mon->use_io_thread) {
475             ctx = iothread_get_aio_context(mon_iothread);
476         } else {
477             ctx = qemu_get_aio_context();
478         }
479 
480         if (!monitor_is_qmp(mon)) {
481             MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common);
482             assert(hmp_mon->rs);
483             readline_show_prompt(hmp_mon->rs);
484         }
485 
486         aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
487     }
488 
489     trace_monitor_suspend(mon, -1);
490 }
491 
492 int monitor_can_read(void *opaque)
493 {
494     Monitor *mon = opaque;
495 
496     return !atomic_mb_read(&mon->suspend_cnt);
497 }
498 
499 void monitor_list_append(Monitor *mon)
500 {
501     qemu_mutex_lock(&monitor_lock);
502     /*
503      * This prevents inserting new monitors during monitor_cleanup().
504      * A cleaner solution would involve the main thread telling other
505      * threads to terminate, waiting for their termination.
506      */
507     if (!monitor_destroyed) {
508         QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
509         mon = NULL;
510     }
511     qemu_mutex_unlock(&monitor_lock);
512 
513     if (mon) {
514         monitor_data_destroy(mon);
515         g_free(mon);
516     }
517 }
518 
519 static void monitor_iothread_init(void)
520 {
521     mon_iothread = iothread_create("mon_iothread", &error_abort);
522 }
523 
524 void monitor_data_init(Monitor *mon, int flags, bool skip_flush,
525                        bool use_io_thread)
526 {
527     if (use_io_thread && !mon_iothread) {
528         monitor_iothread_init();
529     }
530     qemu_mutex_init(&mon->mon_lock);
531     mon->outbuf = qstring_new();
532     mon->skip_flush = skip_flush;
533     mon->use_io_thread = use_io_thread;
534     mon->flags = flags;
535 }
536 
537 void monitor_data_destroy(Monitor *mon)
538 {
539     g_free(mon->mon_cpu_path);
540     qemu_chr_fe_deinit(&mon->chr, false);
541     if (monitor_is_qmp(mon)) {
542         monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
543     } else {
544         readline_free(container_of(mon, MonitorHMP, common)->rs);
545     }
546     qobject_unref(mon->outbuf);
547     qemu_mutex_destroy(&mon->mon_lock);
548 }
549 
550 void monitor_init(Chardev *chr, int flags)
551 {
552     if (flags & MONITOR_USE_CONTROL) {
553         monitor_init_qmp(chr, flags);
554     } else {
555         monitor_init_hmp(chr, flags);
556     }
557 }
558 
559 void monitor_cleanup(void)
560 {
561     /*
562      * We need to explicitly stop the I/O thread (but not destroy it),
563      * clean up the monitor resources, then destroy the I/O thread since
564      * we need to unregister from chardev below in
565      * monitor_data_destroy(), and chardev is not thread-safe yet
566      */
567     if (mon_iothread) {
568         iothread_stop(mon_iothread);
569     }
570 
571     /* Flush output buffers and destroy monitors */
572     qemu_mutex_lock(&monitor_lock);
573     monitor_destroyed = true;
574     while (!QTAILQ_EMPTY(&mon_list)) {
575         Monitor *mon = QTAILQ_FIRST(&mon_list);
576         QTAILQ_REMOVE(&mon_list, mon, entry);
577         /* Permit QAPI event emission from character frontend release */
578         qemu_mutex_unlock(&monitor_lock);
579         monitor_flush(mon);
580         monitor_data_destroy(mon);
581         qemu_mutex_lock(&monitor_lock);
582         g_free(mon);
583     }
584     qemu_mutex_unlock(&monitor_lock);
585 
586     /* QEMUBHs needs to be deleted before destroying the I/O thread */
587     qemu_bh_delete(qmp_dispatcher_bh);
588     qmp_dispatcher_bh = NULL;
589     if (mon_iothread) {
590         iothread_destroy(mon_iothread);
591         mon_iothread = NULL;
592     }
593 }
594 
595 static void monitor_qapi_event_init(void)
596 {
597     monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
598                                                 qapi_event_throttle_equal);
599 }
600 
601 void monitor_init_globals_core(void)
602 {
603     monitor_qapi_event_init();
604     qemu_mutex_init(&monitor_lock);
605 
606     /*
607      * The dispatcher BH must run in the main loop thread, since we
608      * have commands assuming that context.  It would be nice to get
609      * rid of those assumptions.
610      */
611     qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
612                                    monitor_qmp_bh_dispatcher,
613                                    NULL);
614 }
615 
616 QemuOptsList qemu_mon_opts = {
617     .name = "mon",
618     .implied_opt_name = "chardev",
619     .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head),
620     .desc = {
621         {
622             .name = "mode",
623             .type = QEMU_OPT_STRING,
624         },{
625             .name = "chardev",
626             .type = QEMU_OPT_STRING,
627         },{
628             .name = "pretty",
629             .type = QEMU_OPT_BOOL,
630         },
631         { /* end of list */ }
632     },
633 };
634