1 /* 2 * QEMU monitor 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "monitor-internal.h" 27 #include "qapi/error.h" 28 #include "qapi/opts-visitor.h" 29 #include "qapi/qapi-emit-events.h" 30 #include "qapi/qapi-visit-control.h" 31 #include "qapi/qmp/qdict.h" 32 #include "qemu/error-report.h" 33 #include "qemu/option.h" 34 #include "sysemu/qtest.h" 35 #include "sysemu/sysemu.h" 36 #include "trace.h" 37 38 /* 39 * To prevent flooding clients, events can be throttled. The 40 * throttling is calculated globally, rather than per-Monitor 41 * instance. 42 */ 43 typedef struct MonitorQAPIEventState { 44 QAPIEvent event; /* Throttling state for this event type and... */ 45 QDict *data; /* ... data, see qapi_event_throttle_equal() */ 46 QEMUTimer *timer; /* Timer for handling delayed events */ 47 QDict *qdict; /* Delayed event (if any) */ 48 } MonitorQAPIEventState; 49 50 typedef struct { 51 int64_t rate; /* Minimum time (in ns) between two events */ 52 } MonitorQAPIEventConf; 53 54 /* Shared monitor I/O thread */ 55 IOThread *mon_iothread; 56 57 /* Coroutine to dispatch the requests received from I/O thread */ 58 Coroutine *qmp_dispatcher_co; 59 60 /* Set to true when the dispatcher coroutine should terminate */ 61 bool qmp_dispatcher_co_shutdown; 62 63 /* 64 * qmp_dispatcher_co_busy is used for synchronisation between the 65 * monitor thread and the main thread to ensure that the dispatcher 66 * coroutine never gets scheduled a second time when it's already 67 * scheduled (scheduling the same coroutine twice is forbidden). 68 * 69 * It is true if the coroutine is active and processing requests. 70 * Additional requests may then be pushed onto mon->qmp_requests, 71 * and @qmp_dispatcher_co_shutdown may be set without further ado. 72 * @qmp_dispatcher_co_busy must not be woken up in this case. 73 * 74 * If false, you also have to set @qmp_dispatcher_co_busy to true and 75 * wake up @qmp_dispatcher_co after pushing the new requests. 76 * 77 * The coroutine will automatically change this variable back to false 78 * before it yields. Nobody else may set the variable to false. 79 * 80 * Access must be atomic for thread safety. 81 */ 82 bool qmp_dispatcher_co_busy; 83 84 /* 85 * Protects mon_list, monitor_qapi_event_state, coroutine_mon, 86 * monitor_destroyed. 87 */ 88 QemuMutex monitor_lock; 89 static GHashTable *monitor_qapi_event_state; 90 static GHashTable *coroutine_mon; /* Maps Coroutine* to Monitor* */ 91 92 MonitorList mon_list; 93 int mon_refcount; 94 static bool monitor_destroyed; 95 96 Monitor *monitor_cur(void) 97 { 98 Monitor *mon; 99 100 qemu_mutex_lock(&monitor_lock); 101 mon = g_hash_table_lookup(coroutine_mon, qemu_coroutine_self()); 102 qemu_mutex_unlock(&monitor_lock); 103 104 return mon; 105 } 106 107 /** 108 * Sets a new current monitor and returns the old one. 109 * 110 * If a non-NULL monitor is set for a coroutine, another call 111 * resetting it to NULL is required before the coroutine terminates, 112 * otherwise a stale entry would remain in the hash table. 113 */ 114 Monitor *monitor_set_cur(Coroutine *co, Monitor *mon) 115 { 116 Monitor *old_monitor = monitor_cur(); 117 118 qemu_mutex_lock(&monitor_lock); 119 if (mon) { 120 g_hash_table_replace(coroutine_mon, co, mon); 121 } else { 122 g_hash_table_remove(coroutine_mon, co); 123 } 124 qemu_mutex_unlock(&monitor_lock); 125 126 return old_monitor; 127 } 128 129 /** 130 * Is the current monitor, if any, a QMP monitor? 131 */ 132 bool monitor_cur_is_qmp(void) 133 { 134 Monitor *cur_mon = monitor_cur(); 135 136 return cur_mon && monitor_is_qmp(cur_mon); 137 } 138 139 /** 140 * Is @mon is using readline? 141 * Note: not all HMP monitors use readline, e.g., gdbserver has a 142 * non-interactive HMP monitor, so readline is not used there. 143 */ 144 static inline bool monitor_uses_readline(const MonitorHMP *mon) 145 { 146 return mon->use_readline; 147 } 148 149 static inline bool monitor_is_hmp_non_interactive(const Monitor *mon) 150 { 151 if (monitor_is_qmp(mon)) { 152 return false; 153 } 154 155 return !monitor_uses_readline(container_of(mon, MonitorHMP, common)); 156 } 157 158 static void monitor_flush_locked(Monitor *mon); 159 160 static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond, 161 void *opaque) 162 { 163 Monitor *mon = opaque; 164 165 qemu_mutex_lock(&mon->mon_lock); 166 mon->out_watch = 0; 167 monitor_flush_locked(mon); 168 qemu_mutex_unlock(&mon->mon_lock); 169 return FALSE; 170 } 171 172 /* Caller must hold mon->mon_lock */ 173 static void monitor_flush_locked(Monitor *mon) 174 { 175 int rc; 176 size_t len; 177 const char *buf; 178 179 if (mon->skip_flush) { 180 return; 181 } 182 183 buf = mon->outbuf->str; 184 len = mon->outbuf->len; 185 186 if (len && !mon->mux_out) { 187 rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len); 188 if ((rc < 0 && errno != EAGAIN) || (rc == len)) { 189 /* all flushed or error */ 190 g_string_truncate(mon->outbuf, 0); 191 return; 192 } 193 if (rc > 0) { 194 /* partial write */ 195 g_string_erase(mon->outbuf, 0, rc); 196 } 197 if (mon->out_watch == 0) { 198 mon->out_watch = 199 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP, 200 monitor_unblocked, mon); 201 } 202 } 203 } 204 205 void monitor_flush(Monitor *mon) 206 { 207 qemu_mutex_lock(&mon->mon_lock); 208 monitor_flush_locked(mon); 209 qemu_mutex_unlock(&mon->mon_lock); 210 } 211 212 /* flush at every end of line */ 213 int monitor_puts(Monitor *mon, const char *str) 214 { 215 int i; 216 char c; 217 218 qemu_mutex_lock(&mon->mon_lock); 219 for (i = 0; str[i]; i++) { 220 c = str[i]; 221 if (c == '\n') { 222 g_string_append_c(mon->outbuf, '\r'); 223 } 224 g_string_append_c(mon->outbuf, c); 225 if (c == '\n') { 226 monitor_flush_locked(mon); 227 } 228 } 229 qemu_mutex_unlock(&mon->mon_lock); 230 231 return i; 232 } 233 234 int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap) 235 { 236 char *buf; 237 int n; 238 239 if (!mon) { 240 return -1; 241 } 242 243 if (monitor_is_qmp(mon)) { 244 return -1; 245 } 246 247 buf = g_strdup_vprintf(fmt, ap); 248 n = monitor_puts(mon, buf); 249 g_free(buf); 250 return n; 251 } 252 253 int monitor_printf(Monitor *mon, const char *fmt, ...) 254 { 255 int ret; 256 257 va_list ap; 258 va_start(ap, fmt); 259 ret = monitor_vprintf(mon, fmt, ap); 260 va_end(ap); 261 return ret; 262 } 263 264 /* 265 * Print to current monitor if we have one, else to stderr. 266 */ 267 int error_vprintf(const char *fmt, va_list ap) 268 { 269 Monitor *cur_mon = monitor_cur(); 270 271 if (cur_mon && !monitor_cur_is_qmp()) { 272 return monitor_vprintf(cur_mon, fmt, ap); 273 } 274 return vfprintf(stderr, fmt, ap); 275 } 276 277 int error_vprintf_unless_qmp(const char *fmt, va_list ap) 278 { 279 Monitor *cur_mon = monitor_cur(); 280 281 if (!cur_mon) { 282 return vfprintf(stderr, fmt, ap); 283 } 284 if (!monitor_cur_is_qmp()) { 285 return monitor_vprintf(cur_mon, fmt, ap); 286 } 287 return -1; 288 } 289 290 291 static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = { 292 /* Limit guest-triggerable events to 1 per second */ 293 [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS }, 294 [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS }, 295 [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS }, 296 [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS }, 297 [QAPI_EVENT_QUORUM_FAILURE] = { 1000 * SCALE_MS }, 298 [QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS }, 299 [QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE] = { 1000 * SCALE_MS }, 300 }; 301 302 /* 303 * Return the clock to use for recording an event's time. 304 * It's QEMU_CLOCK_REALTIME, except for qtests it's 305 * QEMU_CLOCK_VIRTUAL, to support testing rate limits. 306 * Beware: result is invalid before configure_accelerator(). 307 */ 308 static inline QEMUClockType monitor_get_event_clock(void) 309 { 310 return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME; 311 } 312 313 /* 314 * Broadcast an event to all monitors. 315 * @qdict is the event object. Its member "event" must match @event. 316 * Caller must hold monitor_lock. 317 */ 318 static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict) 319 { 320 Monitor *mon; 321 MonitorQMP *qmp_mon; 322 323 trace_monitor_protocol_event_emit(event, qdict); 324 QTAILQ_FOREACH(mon, &mon_list, entry) { 325 if (!monitor_is_qmp(mon)) { 326 continue; 327 } 328 329 qmp_mon = container_of(mon, MonitorQMP, common); 330 if (qmp_mon->commands != &qmp_cap_negotiation_commands) { 331 qmp_send_response(qmp_mon, qdict); 332 } 333 } 334 } 335 336 static void monitor_qapi_event_handler(void *opaque); 337 338 /* 339 * Queue a new event for emission to Monitor instances, 340 * applying any rate limiting if required. 341 */ 342 static void 343 monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict) 344 { 345 MonitorQAPIEventConf *evconf; 346 MonitorQAPIEventState *evstate; 347 348 assert(event < QAPI_EVENT__MAX); 349 evconf = &monitor_qapi_event_conf[event]; 350 trace_monitor_protocol_event_queue(event, qdict, evconf->rate); 351 352 qemu_mutex_lock(&monitor_lock); 353 354 if (!evconf->rate) { 355 /* Unthrottled event */ 356 monitor_qapi_event_emit(event, qdict); 357 } else { 358 QDict *data = qobject_to(QDict, qdict_get(qdict, "data")); 359 MonitorQAPIEventState key = { .event = event, .data = data }; 360 361 evstate = g_hash_table_lookup(monitor_qapi_event_state, &key); 362 assert(!evstate || timer_pending(evstate->timer)); 363 364 if (evstate) { 365 /* 366 * Timer is pending for (at least) evconf->rate ns after 367 * last send. Store event for sending when timer fires, 368 * replacing a prior stored event if any. 369 */ 370 qobject_unref(evstate->qdict); 371 evstate->qdict = qobject_ref(qdict); 372 } else { 373 /* 374 * Last send was (at least) evconf->rate ns ago. 375 * Send immediately, and arm the timer to call 376 * monitor_qapi_event_handler() in evconf->rate ns. Any 377 * events arriving before then will be delayed until then. 378 */ 379 int64_t now = qemu_clock_get_ns(monitor_get_event_clock()); 380 381 monitor_qapi_event_emit(event, qdict); 382 383 evstate = g_new(MonitorQAPIEventState, 1); 384 evstate->event = event; 385 evstate->data = qobject_ref(data); 386 evstate->qdict = NULL; 387 evstate->timer = timer_new_ns(monitor_get_event_clock(), 388 monitor_qapi_event_handler, 389 evstate); 390 g_hash_table_add(monitor_qapi_event_state, evstate); 391 timer_mod_ns(evstate->timer, now + evconf->rate); 392 } 393 } 394 395 qemu_mutex_unlock(&monitor_lock); 396 } 397 398 void qapi_event_emit(QAPIEvent event, QDict *qdict) 399 { 400 /* 401 * monitor_qapi_event_queue_no_reenter() is not reentrant: it 402 * would deadlock on monitor_lock. Work around by queueing 403 * events in thread-local storage. 404 * TODO: remove this, make it re-enter safe. 405 */ 406 typedef struct MonitorQapiEvent { 407 QAPIEvent event; 408 QDict *qdict; 409 QSIMPLEQ_ENTRY(MonitorQapiEvent) entry; 410 } MonitorQapiEvent; 411 static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue; 412 static __thread bool reentered; 413 MonitorQapiEvent *ev; 414 415 if (!reentered) { 416 QSIMPLEQ_INIT(&event_queue); 417 } 418 419 ev = g_new(MonitorQapiEvent, 1); 420 ev->qdict = qobject_ref(qdict); 421 ev->event = event; 422 QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry); 423 if (reentered) { 424 return; 425 } 426 427 reentered = true; 428 429 while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) { 430 QSIMPLEQ_REMOVE_HEAD(&event_queue, entry); 431 monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict); 432 qobject_unref(ev->qdict); 433 g_free(ev); 434 } 435 436 reentered = false; 437 } 438 439 /* 440 * This function runs evconf->rate ns after sending a throttled 441 * event. 442 * If another event has since been stored, send it. 443 */ 444 static void monitor_qapi_event_handler(void *opaque) 445 { 446 MonitorQAPIEventState *evstate = opaque; 447 MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event]; 448 449 trace_monitor_protocol_event_handler(evstate->event, evstate->qdict); 450 qemu_mutex_lock(&monitor_lock); 451 452 if (evstate->qdict) { 453 int64_t now = qemu_clock_get_ns(monitor_get_event_clock()); 454 455 monitor_qapi_event_emit(evstate->event, evstate->qdict); 456 qobject_unref(evstate->qdict); 457 evstate->qdict = NULL; 458 timer_mod_ns(evstate->timer, now + evconf->rate); 459 } else { 460 g_hash_table_remove(monitor_qapi_event_state, evstate); 461 qobject_unref(evstate->data); 462 timer_free(evstate->timer); 463 g_free(evstate); 464 } 465 466 qemu_mutex_unlock(&monitor_lock); 467 } 468 469 static unsigned int qapi_event_throttle_hash(const void *key) 470 { 471 const MonitorQAPIEventState *evstate = key; 472 unsigned int hash = evstate->event * 255; 473 474 if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) { 475 hash += g_str_hash(qdict_get_str(evstate->data, "id")); 476 } 477 478 if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) { 479 hash += g_str_hash(qdict_get_str(evstate->data, "node-name")); 480 } 481 482 return hash; 483 } 484 485 static gboolean qapi_event_throttle_equal(const void *a, const void *b) 486 { 487 const MonitorQAPIEventState *eva = a; 488 const MonitorQAPIEventState *evb = b; 489 490 if (eva->event != evb->event) { 491 return FALSE; 492 } 493 494 if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) { 495 return !strcmp(qdict_get_str(eva->data, "id"), 496 qdict_get_str(evb->data, "id")); 497 } 498 499 if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) { 500 return !strcmp(qdict_get_str(eva->data, "node-name"), 501 qdict_get_str(evb->data, "node-name")); 502 } 503 504 return TRUE; 505 } 506 507 int monitor_suspend(Monitor *mon) 508 { 509 if (monitor_is_hmp_non_interactive(mon)) { 510 return -ENOTTY; 511 } 512 513 qatomic_inc(&mon->suspend_cnt); 514 515 if (mon->use_io_thread) { 516 /* 517 * Kick I/O thread to make sure this takes effect. It'll be 518 * evaluated again in prepare() of the watch object. 519 */ 520 aio_notify(iothread_get_aio_context(mon_iothread)); 521 } 522 523 trace_monitor_suspend(mon, 1); 524 return 0; 525 } 526 527 static void monitor_accept_input(void *opaque) 528 { 529 Monitor *mon = opaque; 530 531 qemu_chr_fe_accept_input(&mon->chr); 532 } 533 534 void monitor_resume(Monitor *mon) 535 { 536 if (monitor_is_hmp_non_interactive(mon)) { 537 return; 538 } 539 540 if (qatomic_dec_fetch(&mon->suspend_cnt) == 0) { 541 AioContext *ctx; 542 543 if (mon->use_io_thread) { 544 ctx = iothread_get_aio_context(mon_iothread); 545 } else { 546 ctx = qemu_get_aio_context(); 547 } 548 549 if (!monitor_is_qmp(mon)) { 550 MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common); 551 assert(hmp_mon->rs); 552 readline_show_prompt(hmp_mon->rs); 553 } 554 555 aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon); 556 } 557 558 trace_monitor_suspend(mon, -1); 559 } 560 561 int monitor_can_read(void *opaque) 562 { 563 Monitor *mon = opaque; 564 565 return !qatomic_mb_read(&mon->suspend_cnt); 566 } 567 568 void monitor_list_append(Monitor *mon) 569 { 570 qemu_mutex_lock(&monitor_lock); 571 /* 572 * This prevents inserting new monitors during monitor_cleanup(). 573 * A cleaner solution would involve the main thread telling other 574 * threads to terminate, waiting for their termination. 575 */ 576 if (!monitor_destroyed) { 577 QTAILQ_INSERT_HEAD(&mon_list, mon, entry); 578 mon = NULL; 579 } 580 qemu_mutex_unlock(&monitor_lock); 581 582 if (mon) { 583 monitor_data_destroy(mon); 584 g_free(mon); 585 } 586 } 587 588 static void monitor_iothread_init(void) 589 { 590 mon_iothread = iothread_create("mon_iothread", &error_abort); 591 } 592 593 void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush, 594 bool use_io_thread) 595 { 596 if (use_io_thread && !mon_iothread) { 597 monitor_iothread_init(); 598 } 599 qemu_mutex_init(&mon->mon_lock); 600 mon->is_qmp = is_qmp; 601 mon->outbuf = g_string_new(NULL); 602 mon->skip_flush = skip_flush; 603 mon->use_io_thread = use_io_thread; 604 } 605 606 void monitor_data_destroy(Monitor *mon) 607 { 608 g_free(mon->mon_cpu_path); 609 qemu_chr_fe_deinit(&mon->chr, false); 610 if (monitor_is_qmp(mon)) { 611 monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common)); 612 } else { 613 readline_free(container_of(mon, MonitorHMP, common)->rs); 614 } 615 g_string_free(mon->outbuf, true); 616 qemu_mutex_destroy(&mon->mon_lock); 617 } 618 619 void monitor_cleanup(void) 620 { 621 /* 622 * The dispatcher needs to stop before destroying the monitor and 623 * the I/O thread. 624 * 625 * We need to poll both qemu_aio_context and iohandler_ctx to make 626 * sure that the dispatcher coroutine keeps making progress and 627 * eventually terminates. qemu_aio_context is automatically 628 * polled by calling AIO_WAIT_WHILE on it, but we must poll 629 * iohandler_ctx manually. 630 * 631 * Letting the iothread continue while shutting down the dispatcher 632 * means that new requests may still be coming in. This is okay, 633 * we'll just leave them in the queue without sending a response 634 * and monitor_data_destroy() will free them. 635 */ 636 qmp_dispatcher_co_shutdown = true; 637 if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) { 638 aio_co_wake(qmp_dispatcher_co); 639 } 640 641 AIO_WAIT_WHILE(qemu_get_aio_context(), 642 (aio_poll(iohandler_get_aio_context(), false), 643 qatomic_mb_read(&qmp_dispatcher_co_busy))); 644 645 /* 646 * We need to explicitly stop the I/O thread (but not destroy it), 647 * clean up the monitor resources, then destroy the I/O thread since 648 * we need to unregister from chardev below in 649 * monitor_data_destroy(), and chardev is not thread-safe yet 650 */ 651 if (mon_iothread) { 652 iothread_stop(mon_iothread); 653 } 654 655 /* Flush output buffers and destroy monitors */ 656 qemu_mutex_lock(&monitor_lock); 657 monitor_destroyed = true; 658 while (!QTAILQ_EMPTY(&mon_list)) { 659 Monitor *mon = QTAILQ_FIRST(&mon_list); 660 QTAILQ_REMOVE(&mon_list, mon, entry); 661 /* Permit QAPI event emission from character frontend release */ 662 qemu_mutex_unlock(&monitor_lock); 663 monitor_flush(mon); 664 monitor_data_destroy(mon); 665 qemu_mutex_lock(&monitor_lock); 666 g_free(mon); 667 } 668 qemu_mutex_unlock(&monitor_lock); 669 670 if (mon_iothread) { 671 iothread_destroy(mon_iothread); 672 mon_iothread = NULL; 673 } 674 } 675 676 static void monitor_qapi_event_init(void) 677 { 678 monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash, 679 qapi_event_throttle_equal); 680 } 681 682 void monitor_init_globals_core(void) 683 { 684 monitor_qapi_event_init(); 685 qemu_mutex_init(&monitor_lock); 686 coroutine_mon = g_hash_table_new(NULL, NULL); 687 688 /* 689 * The dispatcher BH must run in the main loop thread, since we 690 * have commands assuming that context. It would be nice to get 691 * rid of those assumptions. 692 */ 693 qmp_dispatcher_co = qemu_coroutine_create(monitor_qmp_dispatcher_co, NULL); 694 qatomic_mb_set(&qmp_dispatcher_co_busy, true); 695 aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co); 696 } 697 698 int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp) 699 { 700 Chardev *chr; 701 Error *local_err = NULL; 702 703 chr = qemu_chr_find(opts->chardev); 704 if (chr == NULL) { 705 error_setg(errp, "chardev \"%s\" not found", opts->chardev); 706 return -1; 707 } 708 709 if (!opts->has_mode) { 710 opts->mode = allow_hmp ? MONITOR_MODE_READLINE : MONITOR_MODE_CONTROL; 711 } 712 713 switch (opts->mode) { 714 case MONITOR_MODE_CONTROL: 715 monitor_init_qmp(chr, opts->pretty, &local_err); 716 break; 717 case MONITOR_MODE_READLINE: 718 if (!allow_hmp) { 719 error_setg(errp, "Only QMP is supported"); 720 return -1; 721 } 722 if (opts->pretty) { 723 warn_report("'pretty' is deprecated for HMP monitors, it has no " 724 "effect and will be removed in future versions"); 725 } 726 monitor_init_hmp(chr, true, &local_err); 727 break; 728 default: 729 g_assert_not_reached(); 730 } 731 732 if (local_err) { 733 error_propagate(errp, local_err); 734 return -1; 735 } 736 return 0; 737 } 738 739 int monitor_init_opts(QemuOpts *opts, Error **errp) 740 { 741 Visitor *v; 742 MonitorOptions *options; 743 int ret; 744 745 v = opts_visitor_new(opts); 746 visit_type_MonitorOptions(v, NULL, &options, errp); 747 visit_free(v); 748 if (!options) { 749 return -1; 750 } 751 752 ret = monitor_init(options, true, errp); 753 qapi_free_MonitorOptions(options); 754 return ret; 755 } 756 757 QemuOptsList qemu_mon_opts = { 758 .name = "mon", 759 .implied_opt_name = "chardev", 760 .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head), 761 .desc = { 762 { 763 .name = "mode", 764 .type = QEMU_OPT_STRING, 765 },{ 766 .name = "chardev", 767 .type = QEMU_OPT_STRING, 768 },{ 769 .name = "pretty", 770 .type = QEMU_OPT_BOOL, 771 }, 772 { /* end of list */ } 773 }, 774 }; 775