1 /* 2 * QEMU monitor 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "monitor-internal.h" 27 #include "qapi/error.h" 28 #include "qapi/opts-visitor.h" 29 #include "qapi/qapi-emit-events.h" 30 #include "qapi/qapi-visit-control.h" 31 #include "qapi/qmp/qdict.h" 32 #include "qemu/error-report.h" 33 #include "qemu/option.h" 34 #include "sysemu/qtest.h" 35 #include "trace.h" 36 37 /* 38 * To prevent flooding clients, events can be throttled. The 39 * throttling is calculated globally, rather than per-Monitor 40 * instance. 41 */ 42 typedef struct MonitorQAPIEventState { 43 QAPIEvent event; /* Throttling state for this event type and... */ 44 QDict *data; /* ... data, see qapi_event_throttle_equal() */ 45 QEMUTimer *timer; /* Timer for handling delayed events */ 46 QDict *qdict; /* Delayed event (if any) */ 47 } MonitorQAPIEventState; 48 49 typedef struct { 50 int64_t rate; /* Minimum time (in ns) between two events */ 51 } MonitorQAPIEventConf; 52 53 /* Shared monitor I/O thread */ 54 IOThread *mon_iothread; 55 56 /* Coroutine to dispatch the requests received from I/O thread */ 57 Coroutine *qmp_dispatcher_co; 58 59 /* 60 * Set to true when the dispatcher coroutine should terminate. Protected 61 * by monitor_lock. 62 */ 63 bool qmp_dispatcher_co_shutdown; 64 65 /* 66 * Protects mon_list, monitor_qapi_event_state, coroutine_mon, 67 * monitor_destroyed. 68 */ 69 QemuMutex monitor_lock; 70 static GHashTable *monitor_qapi_event_state; 71 static GHashTable *coroutine_mon; /* Maps Coroutine* to Monitor* */ 72 73 MonitorList mon_list; 74 static bool monitor_destroyed; 75 76 Monitor *monitor_cur(void) 77 { 78 Monitor *mon; 79 80 qemu_mutex_lock(&monitor_lock); 81 mon = g_hash_table_lookup(coroutine_mon, qemu_coroutine_self()); 82 qemu_mutex_unlock(&monitor_lock); 83 84 return mon; 85 } 86 87 /** 88 * Sets a new current monitor and returns the old one. 89 * 90 * If a non-NULL monitor is set for a coroutine, another call 91 * resetting it to NULL is required before the coroutine terminates, 92 * otherwise a stale entry would remain in the hash table. 93 */ 94 Monitor *monitor_set_cur(Coroutine *co, Monitor *mon) 95 { 96 Monitor *old_monitor = monitor_cur(); 97 98 qemu_mutex_lock(&monitor_lock); 99 if (mon) { 100 g_hash_table_replace(coroutine_mon, co, mon); 101 } else { 102 g_hash_table_remove(coroutine_mon, co); 103 } 104 qemu_mutex_unlock(&monitor_lock); 105 106 return old_monitor; 107 } 108 109 /** 110 * Is the current monitor, if any, a QMP monitor? 111 */ 112 bool monitor_cur_is_qmp(void) 113 { 114 Monitor *cur_mon = monitor_cur(); 115 116 return cur_mon && monitor_is_qmp(cur_mon); 117 } 118 119 /** 120 * Is @mon is using readline? 121 * Note: not all HMP monitors use readline, e.g., gdbserver has a 122 * non-interactive HMP monitor, so readline is not used there. 123 */ 124 static inline bool monitor_uses_readline(const MonitorHMP *mon) 125 { 126 return mon->use_readline; 127 } 128 129 static inline bool monitor_is_hmp_non_interactive(const Monitor *mon) 130 { 131 if (monitor_is_qmp(mon)) { 132 return false; 133 } 134 135 return !monitor_uses_readline(container_of(mon, MonitorHMP, common)); 136 } 137 138 static gboolean monitor_unblocked(void *do_not_use, GIOCondition cond, 139 void *opaque) 140 { 141 Monitor *mon = opaque; 142 143 QEMU_LOCK_GUARD(&mon->mon_lock); 144 mon->out_watch = 0; 145 monitor_flush_locked(mon); 146 return G_SOURCE_REMOVE; 147 } 148 149 /* Caller must hold mon->mon_lock */ 150 void monitor_flush_locked(Monitor *mon) 151 { 152 int rc; 153 size_t len; 154 const char *buf; 155 156 if (mon->skip_flush) { 157 return; 158 } 159 160 buf = mon->outbuf->str; 161 len = mon->outbuf->len; 162 163 if (len && !mon->mux_out) { 164 rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len); 165 if ((rc < 0 && errno != EAGAIN) || (rc == len)) { 166 /* all flushed or error */ 167 g_string_truncate(mon->outbuf, 0); 168 return; 169 } 170 if (rc > 0) { 171 /* partial write */ 172 g_string_erase(mon->outbuf, 0, rc); 173 } 174 if (mon->out_watch == 0) { 175 mon->out_watch = 176 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP, 177 monitor_unblocked, mon); 178 } 179 } 180 } 181 182 void monitor_flush(Monitor *mon) 183 { 184 QEMU_LOCK_GUARD(&mon->mon_lock); 185 monitor_flush_locked(mon); 186 } 187 188 /* flush at every end of line */ 189 int monitor_puts_locked(Monitor *mon, const char *str) 190 { 191 int i; 192 char c; 193 194 for (i = 0; str[i]; i++) { 195 c = str[i]; 196 if (c == '\n') { 197 g_string_append_c(mon->outbuf, '\r'); 198 } 199 g_string_append_c(mon->outbuf, c); 200 if (c == '\n') { 201 monitor_flush_locked(mon); 202 } 203 } 204 205 return i; 206 } 207 208 int monitor_puts(Monitor *mon, const char *str) 209 { 210 QEMU_LOCK_GUARD(&mon->mon_lock); 211 return monitor_puts_locked(mon, str); 212 } 213 214 int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap) 215 { 216 char *buf; 217 int n; 218 219 if (!mon) { 220 return -1; 221 } 222 223 if (monitor_is_qmp(mon)) { 224 return -1; 225 } 226 227 buf = g_strdup_vprintf(fmt, ap); 228 n = monitor_puts(mon, buf); 229 g_free(buf); 230 return n; 231 } 232 233 int monitor_printf(Monitor *mon, const char *fmt, ...) 234 { 235 int ret; 236 237 va_list ap; 238 va_start(ap, fmt); 239 ret = monitor_vprintf(mon, fmt, ap); 240 va_end(ap); 241 return ret; 242 } 243 244 void monitor_printc(Monitor *mon, int c) 245 { 246 monitor_printf(mon, "'"); 247 switch(c) { 248 case '\'': 249 monitor_printf(mon, "\\'"); 250 break; 251 case '\\': 252 monitor_printf(mon, "\\\\"); 253 break; 254 case '\n': 255 monitor_printf(mon, "\\n"); 256 break; 257 case '\r': 258 monitor_printf(mon, "\\r"); 259 break; 260 default: 261 if (c >= 32 && c <= 126) { 262 monitor_printf(mon, "%c", c); 263 } else { 264 monitor_printf(mon, "\\x%02x", c); 265 } 266 break; 267 } 268 monitor_printf(mon, "'"); 269 } 270 271 /* 272 * Print to current monitor if we have one, else to stderr. 273 */ 274 int error_vprintf(const char *fmt, va_list ap) 275 { 276 Monitor *cur_mon = monitor_cur(); 277 278 if (cur_mon && !monitor_cur_is_qmp()) { 279 return monitor_vprintf(cur_mon, fmt, ap); 280 } 281 return vfprintf(stderr, fmt, ap); 282 } 283 284 int error_vprintf_unless_qmp(const char *fmt, va_list ap) 285 { 286 Monitor *cur_mon = monitor_cur(); 287 288 if (!cur_mon) { 289 return vfprintf(stderr, fmt, ap); 290 } 291 if (!monitor_cur_is_qmp()) { 292 return monitor_vprintf(cur_mon, fmt, ap); 293 } 294 return -1; 295 } 296 297 int error_printf_unless_qmp(const char *fmt, ...) 298 { 299 va_list ap; 300 int ret; 301 302 va_start(ap, fmt); 303 ret = error_vprintf_unless_qmp(fmt, ap); 304 va_end(ap); 305 return ret; 306 } 307 308 static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = { 309 /* Limit guest-triggerable events to 1 per second */ 310 [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS }, 311 [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS }, 312 [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS }, 313 [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS }, 314 [QAPI_EVENT_QUORUM_FAILURE] = { 1000 * SCALE_MS }, 315 [QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS }, 316 [QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE] = { 1000 * SCALE_MS }, 317 [QAPI_EVENT_HV_BALLOON_STATUS_REPORT] = { 1000 * SCALE_MS }, 318 }; 319 320 /* 321 * Return the clock to use for recording an event's time. 322 * It's QEMU_CLOCK_REALTIME, except for qtests it's 323 * QEMU_CLOCK_VIRTUAL, to support testing rate limits. 324 * Beware: result is invalid before configure_accelerator(). 325 */ 326 static inline QEMUClockType monitor_get_event_clock(void) 327 { 328 return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME; 329 } 330 331 /* 332 * Broadcast an event to all monitors. 333 * @qdict is the event object. Its member "event" must match @event. 334 * Caller must hold monitor_lock. 335 */ 336 static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict) 337 { 338 Monitor *mon; 339 MonitorQMP *qmp_mon; 340 341 trace_monitor_protocol_event_emit(event, qdict); 342 QTAILQ_FOREACH(mon, &mon_list, entry) { 343 if (!monitor_is_qmp(mon)) { 344 continue; 345 } 346 347 qmp_mon = container_of(mon, MonitorQMP, common); 348 if (qmp_mon->commands != &qmp_cap_negotiation_commands) { 349 qmp_send_response(qmp_mon, qdict); 350 } 351 } 352 } 353 354 static void monitor_qapi_event_handler(void *opaque); 355 356 /* 357 * Queue a new event for emission to Monitor instances, 358 * applying any rate limiting if required. 359 */ 360 static void 361 monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict) 362 { 363 MonitorQAPIEventConf *evconf; 364 MonitorQAPIEventState *evstate; 365 366 assert(event < QAPI_EVENT__MAX); 367 evconf = &monitor_qapi_event_conf[event]; 368 trace_monitor_protocol_event_queue(event, qdict, evconf->rate); 369 370 QEMU_LOCK_GUARD(&monitor_lock); 371 372 if (!evconf->rate) { 373 /* Unthrottled event */ 374 monitor_qapi_event_emit(event, qdict); 375 } else { 376 QDict *data = qobject_to(QDict, qdict_get(qdict, "data")); 377 MonitorQAPIEventState key = { .event = event, .data = data }; 378 379 evstate = g_hash_table_lookup(monitor_qapi_event_state, &key); 380 assert(!evstate || timer_pending(evstate->timer)); 381 382 if (evstate) { 383 /* 384 * Timer is pending for (at least) evconf->rate ns after 385 * last send. Store event for sending when timer fires, 386 * replacing a prior stored event if any. 387 */ 388 qobject_unref(evstate->qdict); 389 evstate->qdict = qobject_ref(qdict); 390 } else { 391 /* 392 * Last send was (at least) evconf->rate ns ago. 393 * Send immediately, and arm the timer to call 394 * monitor_qapi_event_handler() in evconf->rate ns. Any 395 * events arriving before then will be delayed until then. 396 */ 397 int64_t now = qemu_clock_get_ns(monitor_get_event_clock()); 398 399 monitor_qapi_event_emit(event, qdict); 400 401 evstate = g_new(MonitorQAPIEventState, 1); 402 evstate->event = event; 403 evstate->data = qobject_ref(data); 404 evstate->qdict = NULL; 405 evstate->timer = timer_new_ns(monitor_get_event_clock(), 406 monitor_qapi_event_handler, 407 evstate); 408 g_hash_table_add(monitor_qapi_event_state, evstate); 409 timer_mod_ns(evstate->timer, now + evconf->rate); 410 } 411 } 412 } 413 414 void qapi_event_emit(QAPIEvent event, QDict *qdict) 415 { 416 /* 417 * monitor_qapi_event_queue_no_reenter() is not reentrant: it 418 * would deadlock on monitor_lock. Work around by queueing 419 * events in thread-local storage. 420 * TODO: remove this, make it re-enter safe. 421 */ 422 typedef struct MonitorQapiEvent { 423 QAPIEvent event; 424 QDict *qdict; 425 QSIMPLEQ_ENTRY(MonitorQapiEvent) entry; 426 } MonitorQapiEvent; 427 static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue; 428 static __thread bool reentered; 429 MonitorQapiEvent *ev; 430 431 if (!reentered) { 432 QSIMPLEQ_INIT(&event_queue); 433 } 434 435 ev = g_new(MonitorQapiEvent, 1); 436 ev->qdict = qobject_ref(qdict); 437 ev->event = event; 438 QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry); 439 if (reentered) { 440 return; 441 } 442 443 reentered = true; 444 445 while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) { 446 QSIMPLEQ_REMOVE_HEAD(&event_queue, entry); 447 monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict); 448 qobject_unref(ev->qdict); 449 g_free(ev); 450 } 451 452 reentered = false; 453 } 454 455 /* 456 * This function runs evconf->rate ns after sending a throttled 457 * event. 458 * If another event has since been stored, send it. 459 */ 460 static void monitor_qapi_event_handler(void *opaque) 461 { 462 MonitorQAPIEventState *evstate = opaque; 463 MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event]; 464 465 trace_monitor_protocol_event_handler(evstate->event, evstate->qdict); 466 QEMU_LOCK_GUARD(&monitor_lock); 467 468 if (evstate->qdict) { 469 int64_t now = qemu_clock_get_ns(monitor_get_event_clock()); 470 471 monitor_qapi_event_emit(evstate->event, evstate->qdict); 472 qobject_unref(evstate->qdict); 473 evstate->qdict = NULL; 474 timer_mod_ns(evstate->timer, now + evconf->rate); 475 } else { 476 g_hash_table_remove(monitor_qapi_event_state, evstate); 477 qobject_unref(evstate->data); 478 timer_free(evstate->timer); 479 g_free(evstate); 480 } 481 } 482 483 static unsigned int qapi_event_throttle_hash(const void *key) 484 { 485 const MonitorQAPIEventState *evstate = key; 486 unsigned int hash = evstate->event * 255; 487 488 if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) { 489 hash += g_str_hash(qdict_get_str(evstate->data, "id")); 490 } 491 492 if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) { 493 hash += g_str_hash(qdict_get_str(evstate->data, "node-name")); 494 } 495 496 if (evstate->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) { 497 hash += g_str_hash(qdict_get_str(evstate->data, "qom-path")); 498 } 499 500 return hash; 501 } 502 503 static gboolean qapi_event_throttle_equal(const void *a, const void *b) 504 { 505 const MonitorQAPIEventState *eva = a; 506 const MonitorQAPIEventState *evb = b; 507 508 if (eva->event != evb->event) { 509 return FALSE; 510 } 511 512 if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) { 513 return !strcmp(qdict_get_str(eva->data, "id"), 514 qdict_get_str(evb->data, "id")); 515 } 516 517 if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) { 518 return !strcmp(qdict_get_str(eva->data, "node-name"), 519 qdict_get_str(evb->data, "node-name")); 520 } 521 522 if (eva->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) { 523 return !strcmp(qdict_get_str(eva->data, "qom-path"), 524 qdict_get_str(evb->data, "qom-path")); 525 } 526 527 return TRUE; 528 } 529 530 int monitor_suspend(Monitor *mon) 531 { 532 if (monitor_is_hmp_non_interactive(mon)) { 533 return -ENOTTY; 534 } 535 536 qatomic_inc(&mon->suspend_cnt); 537 538 if (mon->use_io_thread) { 539 /* 540 * Kick I/O thread to make sure this takes effect. It'll be 541 * evaluated again in prepare() of the watch object. 542 */ 543 aio_notify(iothread_get_aio_context(mon_iothread)); 544 } 545 546 trace_monitor_suspend(mon, 1); 547 return 0; 548 } 549 550 static void monitor_accept_input(void *opaque) 551 { 552 Monitor *mon = opaque; 553 554 qemu_mutex_lock(&mon->mon_lock); 555 if (!monitor_is_qmp(mon) && mon->reset_seen) { 556 MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common); 557 assert(hmp_mon->rs); 558 readline_restart(hmp_mon->rs); 559 qemu_mutex_unlock(&mon->mon_lock); 560 readline_show_prompt(hmp_mon->rs); 561 } else { 562 qemu_mutex_unlock(&mon->mon_lock); 563 } 564 565 qemu_chr_fe_accept_input(&mon->chr); 566 } 567 568 void monitor_resume(Monitor *mon) 569 { 570 if (monitor_is_hmp_non_interactive(mon)) { 571 return; 572 } 573 574 if (qatomic_dec_fetch(&mon->suspend_cnt) == 0) { 575 AioContext *ctx; 576 577 if (mon->use_io_thread) { 578 ctx = iothread_get_aio_context(mon_iothread); 579 } else { 580 ctx = qemu_get_aio_context(); 581 } 582 583 aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon); 584 } 585 586 trace_monitor_suspend(mon, -1); 587 } 588 589 int monitor_can_read(void *opaque) 590 { 591 Monitor *mon = opaque; 592 593 return !qatomic_read(&mon->suspend_cnt); 594 } 595 596 void monitor_list_append(Monitor *mon) 597 { 598 qemu_mutex_lock(&monitor_lock); 599 /* 600 * This prevents inserting new monitors during monitor_cleanup(). 601 * A cleaner solution would involve the main thread telling other 602 * threads to terminate, waiting for their termination. 603 */ 604 if (!monitor_destroyed) { 605 QTAILQ_INSERT_HEAD(&mon_list, mon, entry); 606 mon = NULL; 607 } 608 qemu_mutex_unlock(&monitor_lock); 609 610 if (mon) { 611 monitor_data_destroy(mon); 612 g_free(mon); 613 } 614 } 615 616 static void monitor_iothread_init(void) 617 { 618 mon_iothread = iothread_create("mon_iothread", &error_abort); 619 } 620 621 void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush, 622 bool use_io_thread) 623 { 624 if (use_io_thread && !mon_iothread) { 625 monitor_iothread_init(); 626 } 627 qemu_mutex_init(&mon->mon_lock); 628 mon->is_qmp = is_qmp; 629 mon->outbuf = g_string_new(NULL); 630 mon->skip_flush = skip_flush; 631 mon->use_io_thread = use_io_thread; 632 } 633 634 void monitor_data_destroy(Monitor *mon) 635 { 636 g_free(mon->mon_cpu_path); 637 qemu_chr_fe_deinit(&mon->chr, false); 638 if (monitor_is_qmp(mon)) { 639 monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common)); 640 } else { 641 readline_free(container_of(mon, MonitorHMP, common)->rs); 642 } 643 g_string_free(mon->outbuf, true); 644 qemu_mutex_destroy(&mon->mon_lock); 645 } 646 647 void monitor_cleanup(void) 648 { 649 /* 650 * The dispatcher needs to stop before destroying the monitor and 651 * the I/O thread. 652 * 653 * We need to poll both qemu_aio_context and iohandler_ctx to make 654 * sure that the dispatcher coroutine keeps making progress and 655 * eventually terminates. qemu_aio_context is automatically 656 * polled by calling AIO_WAIT_WHILE_UNLOCKED on it, but we must poll 657 * iohandler_ctx manually. 658 * 659 * Letting the iothread continue while shutting down the dispatcher 660 * means that new requests may still be coming in. This is okay, 661 * we'll just leave them in the queue without sending a response 662 * and monitor_data_destroy() will free them. 663 */ 664 WITH_QEMU_LOCK_GUARD(&monitor_lock) { 665 qmp_dispatcher_co_shutdown = true; 666 } 667 qmp_dispatcher_co_wake(); 668 669 AIO_WAIT_WHILE_UNLOCKED(NULL, 670 (aio_poll(iohandler_get_aio_context(), false), 671 qatomic_read(&qmp_dispatcher_co))); 672 673 /* 674 * We need to explicitly stop the I/O thread (but not destroy it), 675 * clean up the monitor resources, then destroy the I/O thread since 676 * we need to unregister from chardev below in 677 * monitor_data_destroy(), and chardev is not thread-safe yet 678 */ 679 if (mon_iothread) { 680 iothread_stop(mon_iothread); 681 } 682 683 /* Flush output buffers and destroy monitors */ 684 qemu_mutex_lock(&monitor_lock); 685 monitor_destroyed = true; 686 while (!QTAILQ_EMPTY(&mon_list)) { 687 Monitor *mon = QTAILQ_FIRST(&mon_list); 688 QTAILQ_REMOVE(&mon_list, mon, entry); 689 /* Permit QAPI event emission from character frontend release */ 690 qemu_mutex_unlock(&monitor_lock); 691 monitor_flush(mon); 692 monitor_data_destroy(mon); 693 qemu_mutex_lock(&monitor_lock); 694 g_free(mon); 695 } 696 qemu_mutex_unlock(&monitor_lock); 697 698 if (mon_iothread) { 699 iothread_destroy(mon_iothread); 700 mon_iothread = NULL; 701 } 702 } 703 704 static void monitor_qapi_event_init(void) 705 { 706 monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash, 707 qapi_event_throttle_equal); 708 } 709 710 void monitor_init_globals(void) 711 { 712 monitor_qapi_event_init(); 713 qemu_mutex_init(&monitor_lock); 714 coroutine_mon = g_hash_table_new(NULL, NULL); 715 716 /* 717 * The dispatcher BH must run in the main loop thread, since we 718 * have commands assuming that context. It would be nice to get 719 * rid of those assumptions. 720 */ 721 qmp_dispatcher_co = qemu_coroutine_create(monitor_qmp_dispatcher_co, NULL); 722 aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co); 723 } 724 725 int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp) 726 { 727 ERRP_GUARD(); 728 Chardev *chr; 729 730 chr = qemu_chr_find(opts->chardev); 731 if (chr == NULL) { 732 error_setg(errp, "chardev \"%s\" not found", opts->chardev); 733 return -1; 734 } 735 736 if (!opts->has_mode) { 737 opts->mode = allow_hmp ? MONITOR_MODE_READLINE : MONITOR_MODE_CONTROL; 738 } 739 740 switch (opts->mode) { 741 case MONITOR_MODE_CONTROL: 742 monitor_init_qmp(chr, opts->pretty, errp); 743 break; 744 case MONITOR_MODE_READLINE: 745 if (!allow_hmp) { 746 error_setg(errp, "Only QMP is supported"); 747 return -1; 748 } 749 if (opts->pretty) { 750 error_setg(errp, "'pretty' is not compatible with HMP monitors"); 751 return -1; 752 } 753 monitor_init_hmp(chr, true, errp); 754 break; 755 default: 756 g_assert_not_reached(); 757 } 758 759 return *errp ? -1 : 0; 760 } 761 762 int monitor_init_opts(QemuOpts *opts, Error **errp) 763 { 764 Visitor *v; 765 MonitorOptions *options; 766 int ret; 767 768 v = opts_visitor_new(opts); 769 visit_type_MonitorOptions(v, NULL, &options, errp); 770 visit_free(v); 771 if (!options) { 772 return -1; 773 } 774 775 ret = monitor_init(options, true, errp); 776 qapi_free_MonitorOptions(options); 777 return ret; 778 } 779 780 QemuOptsList qemu_mon_opts = { 781 .name = "mon", 782 .implied_opt_name = "chardev", 783 .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head), 784 .desc = { 785 { 786 .name = "mode", 787 .type = QEMU_OPT_STRING, 788 },{ 789 .name = "chardev", 790 .type = QEMU_OPT_STRING, 791 },{ 792 .name = "pretty", 793 .type = QEMU_OPT_BOOL, 794 }, 795 { /* end of list */ } 796 }, 797 }; 798