1 /* 2 * QEMU monitor 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "monitor-internal.h" 27 #include "qapi/error.h" 28 #include "qapi/opts-visitor.h" 29 #include "qapi/qapi-emit-events.h" 30 #include "qapi/qapi-visit-control.h" 31 #include "qapi/qmp/qdict.h" 32 #include "qemu/error-report.h" 33 #include "qemu/option.h" 34 #include "sysemu/qtest.h" 35 #include "trace.h" 36 37 /* 38 * To prevent flooding clients, events can be throttled. The 39 * throttling is calculated globally, rather than per-Monitor 40 * instance. 41 */ 42 typedef struct MonitorQAPIEventState { 43 QAPIEvent event; /* Throttling state for this event type and... */ 44 QDict *data; /* ... data, see qapi_event_throttle_equal() */ 45 QEMUTimer *timer; /* Timer for handling delayed events */ 46 QDict *qdict; /* Delayed event (if any) */ 47 } MonitorQAPIEventState; 48 49 typedef struct { 50 int64_t rate; /* Minimum time (in ns) between two events */ 51 } MonitorQAPIEventConf; 52 53 /* Shared monitor I/O thread */ 54 IOThread *mon_iothread; 55 56 /* Coroutine to dispatch the requests received from I/O thread */ 57 Coroutine *qmp_dispatcher_co; 58 59 /* Set to true when the dispatcher coroutine should terminate */ 60 bool qmp_dispatcher_co_shutdown; 61 62 /* 63 * qmp_dispatcher_co_busy is used for synchronisation between the 64 * monitor thread and the main thread to ensure that the dispatcher 65 * coroutine never gets scheduled a second time when it's already 66 * scheduled (scheduling the same coroutine twice is forbidden). 67 * 68 * It is true if the coroutine is active and processing requests. 69 * Additional requests may then be pushed onto mon->qmp_requests, 70 * and @qmp_dispatcher_co_shutdown may be set without further ado. 71 * @qmp_dispatcher_co_busy must not be woken up in this case. 72 * 73 * If false, you also have to set @qmp_dispatcher_co_busy to true and 74 * wake up @qmp_dispatcher_co after pushing the new requests. 75 * 76 * The coroutine will automatically change this variable back to false 77 * before it yields. Nobody else may set the variable to false. 78 * 79 * Access must be atomic for thread safety. 80 */ 81 bool qmp_dispatcher_co_busy; 82 83 /* 84 * Protects mon_list, monitor_qapi_event_state, coroutine_mon, 85 * monitor_destroyed. 86 */ 87 QemuMutex monitor_lock; 88 static GHashTable *monitor_qapi_event_state; 89 static GHashTable *coroutine_mon; /* Maps Coroutine* to Monitor* */ 90 91 MonitorList mon_list; 92 int mon_refcount; 93 static bool monitor_destroyed; 94 95 Monitor *monitor_cur(void) 96 { 97 Monitor *mon; 98 99 qemu_mutex_lock(&monitor_lock); 100 mon = g_hash_table_lookup(coroutine_mon, qemu_coroutine_self()); 101 qemu_mutex_unlock(&monitor_lock); 102 103 return mon; 104 } 105 106 /** 107 * Sets a new current monitor and returns the old one. 108 * 109 * If a non-NULL monitor is set for a coroutine, another call 110 * resetting it to NULL is required before the coroutine terminates, 111 * otherwise a stale entry would remain in the hash table. 112 */ 113 Monitor *monitor_set_cur(Coroutine *co, Monitor *mon) 114 { 115 Monitor *old_monitor = monitor_cur(); 116 117 qemu_mutex_lock(&monitor_lock); 118 if (mon) { 119 g_hash_table_replace(coroutine_mon, co, mon); 120 } else { 121 g_hash_table_remove(coroutine_mon, co); 122 } 123 qemu_mutex_unlock(&monitor_lock); 124 125 return old_monitor; 126 } 127 128 /** 129 * Is the current monitor, if any, a QMP monitor? 130 */ 131 bool monitor_cur_is_qmp(void) 132 { 133 Monitor *cur_mon = monitor_cur(); 134 135 return cur_mon && monitor_is_qmp(cur_mon); 136 } 137 138 /** 139 * Is @mon is using readline? 140 * Note: not all HMP monitors use readline, e.g., gdbserver has a 141 * non-interactive HMP monitor, so readline is not used there. 142 */ 143 static inline bool monitor_uses_readline(const MonitorHMP *mon) 144 { 145 return mon->use_readline; 146 } 147 148 static inline bool monitor_is_hmp_non_interactive(const Monitor *mon) 149 { 150 if (monitor_is_qmp(mon)) { 151 return false; 152 } 153 154 return !monitor_uses_readline(container_of(mon, MonitorHMP, common)); 155 } 156 157 static void monitor_flush_locked(Monitor *mon); 158 159 static gboolean monitor_unblocked(void *do_not_use, GIOCondition cond, 160 void *opaque) 161 { 162 Monitor *mon = opaque; 163 164 qemu_mutex_lock(&mon->mon_lock); 165 mon->out_watch = 0; 166 monitor_flush_locked(mon); 167 qemu_mutex_unlock(&mon->mon_lock); 168 return FALSE; 169 } 170 171 /* Caller must hold mon->mon_lock */ 172 static void monitor_flush_locked(Monitor *mon) 173 { 174 int rc; 175 size_t len; 176 const char *buf; 177 178 if (mon->skip_flush) { 179 return; 180 } 181 182 buf = mon->outbuf->str; 183 len = mon->outbuf->len; 184 185 if (len && !mon->mux_out) { 186 rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len); 187 if ((rc < 0 && errno != EAGAIN) || (rc == len)) { 188 /* all flushed or error */ 189 g_string_truncate(mon->outbuf, 0); 190 return; 191 } 192 if (rc > 0) { 193 /* partial write */ 194 g_string_erase(mon->outbuf, 0, rc); 195 } 196 if (mon->out_watch == 0) { 197 mon->out_watch = 198 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP, 199 monitor_unblocked, mon); 200 } 201 } 202 } 203 204 void monitor_flush(Monitor *mon) 205 { 206 qemu_mutex_lock(&mon->mon_lock); 207 monitor_flush_locked(mon); 208 qemu_mutex_unlock(&mon->mon_lock); 209 } 210 211 /* flush at every end of line */ 212 int monitor_puts(Monitor *mon, const char *str) 213 { 214 int i; 215 char c; 216 217 qemu_mutex_lock(&mon->mon_lock); 218 for (i = 0; str[i]; i++) { 219 c = str[i]; 220 if (c == '\n') { 221 g_string_append_c(mon->outbuf, '\r'); 222 } 223 g_string_append_c(mon->outbuf, c); 224 if (c == '\n') { 225 monitor_flush_locked(mon); 226 } 227 } 228 qemu_mutex_unlock(&mon->mon_lock); 229 230 return i; 231 } 232 233 int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap) 234 { 235 char *buf; 236 int n; 237 238 if (!mon) { 239 return -1; 240 } 241 242 if (monitor_is_qmp(mon)) { 243 return -1; 244 } 245 246 buf = g_strdup_vprintf(fmt, ap); 247 n = monitor_puts(mon, buf); 248 g_free(buf); 249 return n; 250 } 251 252 int monitor_printf(Monitor *mon, const char *fmt, ...) 253 { 254 int ret; 255 256 va_list ap; 257 va_start(ap, fmt); 258 ret = monitor_vprintf(mon, fmt, ap); 259 va_end(ap); 260 return ret; 261 } 262 263 /* 264 * Print to current monitor if we have one, else to stderr. 265 */ 266 int error_vprintf(const char *fmt, va_list ap) 267 { 268 Monitor *cur_mon = monitor_cur(); 269 270 if (cur_mon && !monitor_cur_is_qmp()) { 271 return monitor_vprintf(cur_mon, fmt, ap); 272 } 273 return vfprintf(stderr, fmt, ap); 274 } 275 276 int error_vprintf_unless_qmp(const char *fmt, va_list ap) 277 { 278 Monitor *cur_mon = monitor_cur(); 279 280 if (!cur_mon) { 281 return vfprintf(stderr, fmt, ap); 282 } 283 if (!monitor_cur_is_qmp()) { 284 return monitor_vprintf(cur_mon, fmt, ap); 285 } 286 return -1; 287 } 288 289 int error_printf_unless_qmp(const char *fmt, ...) 290 { 291 va_list ap; 292 int ret; 293 294 va_start(ap, fmt); 295 ret = error_vprintf_unless_qmp(fmt, ap); 296 va_end(ap); 297 return ret; 298 } 299 300 static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = { 301 /* Limit guest-triggerable events to 1 per second */ 302 [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS }, 303 [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS }, 304 [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS }, 305 [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS }, 306 [QAPI_EVENT_QUORUM_FAILURE] = { 1000 * SCALE_MS }, 307 [QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS }, 308 [QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE] = { 1000 * SCALE_MS }, 309 }; 310 311 /* 312 * Return the clock to use for recording an event's time. 313 * It's QEMU_CLOCK_REALTIME, except for qtests it's 314 * QEMU_CLOCK_VIRTUAL, to support testing rate limits. 315 * Beware: result is invalid before configure_accelerator(). 316 */ 317 static inline QEMUClockType monitor_get_event_clock(void) 318 { 319 return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME; 320 } 321 322 /* 323 * Broadcast an event to all monitors. 324 * @qdict is the event object. Its member "event" must match @event. 325 * Caller must hold monitor_lock. 326 */ 327 static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict) 328 { 329 Monitor *mon; 330 MonitorQMP *qmp_mon; 331 332 trace_monitor_protocol_event_emit(event, qdict); 333 QTAILQ_FOREACH(mon, &mon_list, entry) { 334 if (!monitor_is_qmp(mon)) { 335 continue; 336 } 337 338 qmp_mon = container_of(mon, MonitorQMP, common); 339 if (qmp_mon->commands != &qmp_cap_negotiation_commands) { 340 qmp_send_response(qmp_mon, qdict); 341 } 342 } 343 } 344 345 static void monitor_qapi_event_handler(void *opaque); 346 347 /* 348 * Queue a new event for emission to Monitor instances, 349 * applying any rate limiting if required. 350 */ 351 static void 352 monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict) 353 { 354 MonitorQAPIEventConf *evconf; 355 MonitorQAPIEventState *evstate; 356 357 assert(event < QAPI_EVENT__MAX); 358 evconf = &monitor_qapi_event_conf[event]; 359 trace_monitor_protocol_event_queue(event, qdict, evconf->rate); 360 361 QEMU_LOCK_GUARD(&monitor_lock); 362 363 if (!evconf->rate) { 364 /* Unthrottled event */ 365 monitor_qapi_event_emit(event, qdict); 366 } else { 367 QDict *data = qobject_to(QDict, qdict_get(qdict, "data")); 368 MonitorQAPIEventState key = { .event = event, .data = data }; 369 370 evstate = g_hash_table_lookup(monitor_qapi_event_state, &key); 371 assert(!evstate || timer_pending(evstate->timer)); 372 373 if (evstate) { 374 /* 375 * Timer is pending for (at least) evconf->rate ns after 376 * last send. Store event for sending when timer fires, 377 * replacing a prior stored event if any. 378 */ 379 qobject_unref(evstate->qdict); 380 evstate->qdict = qobject_ref(qdict); 381 } else { 382 /* 383 * Last send was (at least) evconf->rate ns ago. 384 * Send immediately, and arm the timer to call 385 * monitor_qapi_event_handler() in evconf->rate ns. Any 386 * events arriving before then will be delayed until then. 387 */ 388 int64_t now = qemu_clock_get_ns(monitor_get_event_clock()); 389 390 monitor_qapi_event_emit(event, qdict); 391 392 evstate = g_new(MonitorQAPIEventState, 1); 393 evstate->event = event; 394 evstate->data = qobject_ref(data); 395 evstate->qdict = NULL; 396 evstate->timer = timer_new_ns(monitor_get_event_clock(), 397 monitor_qapi_event_handler, 398 evstate); 399 g_hash_table_add(monitor_qapi_event_state, evstate); 400 timer_mod_ns(evstate->timer, now + evconf->rate); 401 } 402 } 403 } 404 405 void qapi_event_emit(QAPIEvent event, QDict *qdict) 406 { 407 /* 408 * monitor_qapi_event_queue_no_reenter() is not reentrant: it 409 * would deadlock on monitor_lock. Work around by queueing 410 * events in thread-local storage. 411 * TODO: remove this, make it re-enter safe. 412 */ 413 typedef struct MonitorQapiEvent { 414 QAPIEvent event; 415 QDict *qdict; 416 QSIMPLEQ_ENTRY(MonitorQapiEvent) entry; 417 } MonitorQapiEvent; 418 static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue; 419 static __thread bool reentered; 420 MonitorQapiEvent *ev; 421 422 if (!reentered) { 423 QSIMPLEQ_INIT(&event_queue); 424 } 425 426 ev = g_new(MonitorQapiEvent, 1); 427 ev->qdict = qobject_ref(qdict); 428 ev->event = event; 429 QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry); 430 if (reentered) { 431 return; 432 } 433 434 reentered = true; 435 436 while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) { 437 QSIMPLEQ_REMOVE_HEAD(&event_queue, entry); 438 monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict); 439 qobject_unref(ev->qdict); 440 g_free(ev); 441 } 442 443 reentered = false; 444 } 445 446 /* 447 * This function runs evconf->rate ns after sending a throttled 448 * event. 449 * If another event has since been stored, send it. 450 */ 451 static void monitor_qapi_event_handler(void *opaque) 452 { 453 MonitorQAPIEventState *evstate = opaque; 454 MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event]; 455 456 trace_monitor_protocol_event_handler(evstate->event, evstate->qdict); 457 QEMU_LOCK_GUARD(&monitor_lock); 458 459 if (evstate->qdict) { 460 int64_t now = qemu_clock_get_ns(monitor_get_event_clock()); 461 462 monitor_qapi_event_emit(evstate->event, evstate->qdict); 463 qobject_unref(evstate->qdict); 464 evstate->qdict = NULL; 465 timer_mod_ns(evstate->timer, now + evconf->rate); 466 } else { 467 g_hash_table_remove(monitor_qapi_event_state, evstate); 468 qobject_unref(evstate->data); 469 timer_free(evstate->timer); 470 g_free(evstate); 471 } 472 } 473 474 static unsigned int qapi_event_throttle_hash(const void *key) 475 { 476 const MonitorQAPIEventState *evstate = key; 477 unsigned int hash = evstate->event * 255; 478 479 if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) { 480 hash += g_str_hash(qdict_get_str(evstate->data, "id")); 481 } 482 483 if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) { 484 hash += g_str_hash(qdict_get_str(evstate->data, "node-name")); 485 } 486 487 if (evstate->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) { 488 hash += g_str_hash(qdict_get_str(evstate->data, "qom-path")); 489 } 490 491 return hash; 492 } 493 494 static gboolean qapi_event_throttle_equal(const void *a, const void *b) 495 { 496 const MonitorQAPIEventState *eva = a; 497 const MonitorQAPIEventState *evb = b; 498 499 if (eva->event != evb->event) { 500 return FALSE; 501 } 502 503 if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) { 504 return !strcmp(qdict_get_str(eva->data, "id"), 505 qdict_get_str(evb->data, "id")); 506 } 507 508 if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) { 509 return !strcmp(qdict_get_str(eva->data, "node-name"), 510 qdict_get_str(evb->data, "node-name")); 511 } 512 513 if (eva->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) { 514 return !strcmp(qdict_get_str(eva->data, "qom-path"), 515 qdict_get_str(evb->data, "qom-path")); 516 } 517 518 return TRUE; 519 } 520 521 int monitor_suspend(Monitor *mon) 522 { 523 if (monitor_is_hmp_non_interactive(mon)) { 524 return -ENOTTY; 525 } 526 527 qatomic_inc(&mon->suspend_cnt); 528 529 if (mon->use_io_thread) { 530 /* 531 * Kick I/O thread to make sure this takes effect. It'll be 532 * evaluated again in prepare() of the watch object. 533 */ 534 aio_notify(iothread_get_aio_context(mon_iothread)); 535 } 536 537 trace_monitor_suspend(mon, 1); 538 return 0; 539 } 540 541 static void monitor_accept_input(void *opaque) 542 { 543 Monitor *mon = opaque; 544 545 qemu_chr_fe_accept_input(&mon->chr); 546 } 547 548 void monitor_resume(Monitor *mon) 549 { 550 if (monitor_is_hmp_non_interactive(mon)) { 551 return; 552 } 553 554 if (qatomic_dec_fetch(&mon->suspend_cnt) == 0) { 555 AioContext *ctx; 556 557 if (mon->use_io_thread) { 558 ctx = iothread_get_aio_context(mon_iothread); 559 } else { 560 ctx = qemu_get_aio_context(); 561 } 562 563 if (!monitor_is_qmp(mon)) { 564 MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common); 565 assert(hmp_mon->rs); 566 readline_show_prompt(hmp_mon->rs); 567 } 568 569 aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon); 570 } 571 572 trace_monitor_suspend(mon, -1); 573 } 574 575 int monitor_can_read(void *opaque) 576 { 577 Monitor *mon = opaque; 578 579 return !qatomic_mb_read(&mon->suspend_cnt); 580 } 581 582 void monitor_list_append(Monitor *mon) 583 { 584 qemu_mutex_lock(&monitor_lock); 585 /* 586 * This prevents inserting new monitors during monitor_cleanup(). 587 * A cleaner solution would involve the main thread telling other 588 * threads to terminate, waiting for their termination. 589 */ 590 if (!monitor_destroyed) { 591 QTAILQ_INSERT_HEAD(&mon_list, mon, entry); 592 mon = NULL; 593 } 594 qemu_mutex_unlock(&monitor_lock); 595 596 if (mon) { 597 monitor_data_destroy(mon); 598 g_free(mon); 599 } 600 } 601 602 static void monitor_iothread_init(void) 603 { 604 mon_iothread = iothread_create("mon_iothread", &error_abort); 605 } 606 607 void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush, 608 bool use_io_thread) 609 { 610 if (use_io_thread && !mon_iothread) { 611 monitor_iothread_init(); 612 } 613 qemu_mutex_init(&mon->mon_lock); 614 mon->is_qmp = is_qmp; 615 mon->outbuf = g_string_new(NULL); 616 mon->skip_flush = skip_flush; 617 mon->use_io_thread = use_io_thread; 618 } 619 620 void monitor_data_destroy(Monitor *mon) 621 { 622 g_free(mon->mon_cpu_path); 623 qemu_chr_fe_deinit(&mon->chr, false); 624 if (monitor_is_qmp(mon)) { 625 monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common)); 626 } else { 627 readline_free(container_of(mon, MonitorHMP, common)->rs); 628 } 629 g_string_free(mon->outbuf, true); 630 qemu_mutex_destroy(&mon->mon_lock); 631 } 632 633 void monitor_cleanup(void) 634 { 635 /* 636 * The dispatcher needs to stop before destroying the monitor and 637 * the I/O thread. 638 * 639 * We need to poll both qemu_aio_context and iohandler_ctx to make 640 * sure that the dispatcher coroutine keeps making progress and 641 * eventually terminates. qemu_aio_context is automatically 642 * polled by calling AIO_WAIT_WHILE on it, but we must poll 643 * iohandler_ctx manually. 644 * 645 * Letting the iothread continue while shutting down the dispatcher 646 * means that new requests may still be coming in. This is okay, 647 * we'll just leave them in the queue without sending a response 648 * and monitor_data_destroy() will free them. 649 */ 650 qmp_dispatcher_co_shutdown = true; 651 if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) { 652 aio_co_wake(qmp_dispatcher_co); 653 } 654 655 AIO_WAIT_WHILE(qemu_get_aio_context(), 656 (aio_poll(iohandler_get_aio_context(), false), 657 qatomic_mb_read(&qmp_dispatcher_co_busy))); 658 659 /* 660 * We need to explicitly stop the I/O thread (but not destroy it), 661 * clean up the monitor resources, then destroy the I/O thread since 662 * we need to unregister from chardev below in 663 * monitor_data_destroy(), and chardev is not thread-safe yet 664 */ 665 if (mon_iothread) { 666 iothread_stop(mon_iothread); 667 } 668 669 /* Flush output buffers and destroy monitors */ 670 qemu_mutex_lock(&monitor_lock); 671 monitor_destroyed = true; 672 while (!QTAILQ_EMPTY(&mon_list)) { 673 Monitor *mon = QTAILQ_FIRST(&mon_list); 674 QTAILQ_REMOVE(&mon_list, mon, entry); 675 /* Permit QAPI event emission from character frontend release */ 676 qemu_mutex_unlock(&monitor_lock); 677 monitor_flush(mon); 678 monitor_data_destroy(mon); 679 qemu_mutex_lock(&monitor_lock); 680 g_free(mon); 681 } 682 qemu_mutex_unlock(&monitor_lock); 683 684 if (mon_iothread) { 685 iothread_destroy(mon_iothread); 686 mon_iothread = NULL; 687 } 688 } 689 690 static void monitor_qapi_event_init(void) 691 { 692 monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash, 693 qapi_event_throttle_equal); 694 } 695 696 void monitor_init_globals_core(void) 697 { 698 monitor_qapi_event_init(); 699 qemu_mutex_init(&monitor_lock); 700 coroutine_mon = g_hash_table_new(NULL, NULL); 701 702 /* 703 * The dispatcher BH must run in the main loop thread, since we 704 * have commands assuming that context. It would be nice to get 705 * rid of those assumptions. 706 */ 707 qmp_dispatcher_co = qemu_coroutine_create(monitor_qmp_dispatcher_co, NULL); 708 qatomic_mb_set(&qmp_dispatcher_co_busy, true); 709 aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co); 710 } 711 712 int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp) 713 { 714 Chardev *chr; 715 Error *local_err = NULL; 716 717 chr = qemu_chr_find(opts->chardev); 718 if (chr == NULL) { 719 error_setg(errp, "chardev \"%s\" not found", opts->chardev); 720 return -1; 721 } 722 723 if (!opts->has_mode) { 724 opts->mode = allow_hmp ? MONITOR_MODE_READLINE : MONITOR_MODE_CONTROL; 725 } 726 727 switch (opts->mode) { 728 case MONITOR_MODE_CONTROL: 729 monitor_init_qmp(chr, opts->pretty, &local_err); 730 break; 731 case MONITOR_MODE_READLINE: 732 if (!allow_hmp) { 733 error_setg(errp, "Only QMP is supported"); 734 return -1; 735 } 736 if (opts->pretty) { 737 error_setg(errp, "'pretty' is not compatible with HMP monitors"); 738 return -1; 739 } 740 monitor_init_hmp(chr, true, &local_err); 741 break; 742 default: 743 g_assert_not_reached(); 744 } 745 746 if (local_err) { 747 error_propagate(errp, local_err); 748 return -1; 749 } 750 return 0; 751 } 752 753 int monitor_init_opts(QemuOpts *opts, Error **errp) 754 { 755 Visitor *v; 756 MonitorOptions *options; 757 int ret; 758 759 v = opts_visitor_new(opts); 760 visit_type_MonitorOptions(v, NULL, &options, errp); 761 visit_free(v); 762 if (!options) { 763 return -1; 764 } 765 766 ret = monitor_init(options, true, errp); 767 qapi_free_MonitorOptions(options); 768 return ret; 769 } 770 771 QemuOptsList qemu_mon_opts = { 772 .name = "mon", 773 .implied_opt_name = "chardev", 774 .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head), 775 .desc = { 776 { 777 .name = "mode", 778 .type = QEMU_OPT_STRING, 779 },{ 780 .name = "chardev", 781 .type = QEMU_OPT_STRING, 782 },{ 783 .name = "pretty", 784 .type = QEMU_OPT_BOOL, 785 }, 786 { /* end of list */ } 787 }, 788 }; 789