1 /* 2 * Event loop thread 3 * 4 * Copyright Red Hat Inc., 2013 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qom/object.h" 16 #include "qom/object_interfaces.h" 17 #include "qemu/module.h" 18 #include "block/aio.h" 19 #include "block/block.h" 20 #include "sysemu/iothread.h" 21 #include "qmp-commands.h" 22 #include "qemu/error-report.h" 23 #include "qemu/rcu.h" 24 #include "qemu/main-loop.h" 25 26 typedef ObjectClass IOThreadClass; 27 28 #define IOTHREAD_GET_CLASS(obj) \ 29 OBJECT_GET_CLASS(IOThreadClass, obj, TYPE_IOTHREAD) 30 #define IOTHREAD_CLASS(klass) \ 31 OBJECT_CLASS_CHECK(IOThreadClass, klass, TYPE_IOTHREAD) 32 33 /* Benchmark results from 2016 on NVMe SSD drives show max polling times around 34 * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32 35 * workloads. 36 */ 37 #define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL 38 39 static __thread IOThread *my_iothread; 40 41 AioContext *qemu_get_current_aio_context(void) 42 { 43 return my_iothread ? my_iothread->ctx : qemu_get_aio_context(); 44 } 45 46 static void *iothread_run(void *opaque) 47 { 48 IOThread *iothread = opaque; 49 50 rcu_register_thread(); 51 52 my_iothread = iothread; 53 qemu_mutex_lock(&iothread->init_done_lock); 54 iothread->thread_id = qemu_get_thread_id(); 55 qemu_cond_signal(&iothread->init_done_cond); 56 qemu_mutex_unlock(&iothread->init_done_lock); 57 58 while (!atomic_read(&iothread->stopping)) { 59 aio_poll(iothread->ctx, true); 60 61 if (atomic_read(&iothread->worker_context)) { 62 GMainLoop *loop; 63 64 g_main_context_push_thread_default(iothread->worker_context); 65 iothread->main_loop = 66 g_main_loop_new(iothread->worker_context, TRUE); 67 loop = iothread->main_loop; 68 69 g_main_loop_run(iothread->main_loop); 70 iothread->main_loop = NULL; 71 g_main_loop_unref(loop); 72 73 g_main_context_pop_thread_default(iothread->worker_context); 74 g_main_context_unref(iothread->worker_context); 75 iothread->worker_context = NULL; 76 } 77 } 78 79 rcu_unregister_thread(); 80 return NULL; 81 } 82 83 static int iothread_stop(Object *object, void *opaque) 84 { 85 IOThread *iothread; 86 87 iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD); 88 if (!iothread || !iothread->ctx) { 89 return 0; 90 } 91 iothread->stopping = true; 92 aio_notify(iothread->ctx); 93 if (atomic_read(&iothread->main_loop)) { 94 g_main_loop_quit(iothread->main_loop); 95 } 96 qemu_thread_join(&iothread->thread); 97 return 0; 98 } 99 100 static void iothread_instance_init(Object *obj) 101 { 102 IOThread *iothread = IOTHREAD(obj); 103 104 iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT; 105 } 106 107 static void iothread_instance_finalize(Object *obj) 108 { 109 IOThread *iothread = IOTHREAD(obj); 110 111 iothread_stop(obj, NULL); 112 qemu_cond_destroy(&iothread->init_done_cond); 113 qemu_mutex_destroy(&iothread->init_done_lock); 114 if (!iothread->ctx) { 115 return; 116 } 117 aio_context_unref(iothread->ctx); 118 } 119 120 static void iothread_complete(UserCreatable *obj, Error **errp) 121 { 122 Error *local_error = NULL; 123 IOThread *iothread = IOTHREAD(obj); 124 char *name, *thread_name; 125 126 iothread->stopping = false; 127 iothread->thread_id = -1; 128 iothread->ctx = aio_context_new(&local_error); 129 if (!iothread->ctx) { 130 error_propagate(errp, local_error); 131 return; 132 } 133 134 aio_context_set_poll_params(iothread->ctx, 135 iothread->poll_max_ns, 136 iothread->poll_grow, 137 iothread->poll_shrink, 138 &local_error); 139 if (local_error) { 140 error_propagate(errp, local_error); 141 aio_context_unref(iothread->ctx); 142 iothread->ctx = NULL; 143 return; 144 } 145 146 qemu_mutex_init(&iothread->init_done_lock); 147 qemu_cond_init(&iothread->init_done_cond); 148 iothread->once = (GOnce) G_ONCE_INIT; 149 150 /* This assumes we are called from a thread with useful CPU affinity for us 151 * to inherit. 152 */ 153 name = object_get_canonical_path_component(OBJECT(obj)); 154 thread_name = g_strdup_printf("IO %s", name); 155 qemu_thread_create(&iothread->thread, thread_name, iothread_run, 156 iothread, QEMU_THREAD_JOINABLE); 157 g_free(thread_name); 158 g_free(name); 159 160 /* Wait for initialization to complete */ 161 qemu_mutex_lock(&iothread->init_done_lock); 162 while (iothread->thread_id == -1) { 163 qemu_cond_wait(&iothread->init_done_cond, 164 &iothread->init_done_lock); 165 } 166 qemu_mutex_unlock(&iothread->init_done_lock); 167 } 168 169 typedef struct { 170 const char *name; 171 ptrdiff_t offset; /* field's byte offset in IOThread struct */ 172 } PollParamInfo; 173 174 static PollParamInfo poll_max_ns_info = { 175 "poll-max-ns", offsetof(IOThread, poll_max_ns), 176 }; 177 static PollParamInfo poll_grow_info = { 178 "poll-grow", offsetof(IOThread, poll_grow), 179 }; 180 static PollParamInfo poll_shrink_info = { 181 "poll-shrink", offsetof(IOThread, poll_shrink), 182 }; 183 184 static void iothread_get_poll_param(Object *obj, Visitor *v, 185 const char *name, void *opaque, Error **errp) 186 { 187 IOThread *iothread = IOTHREAD(obj); 188 PollParamInfo *info = opaque; 189 int64_t *field = (void *)iothread + info->offset; 190 191 visit_type_int64(v, name, field, errp); 192 } 193 194 static void iothread_set_poll_param(Object *obj, Visitor *v, 195 const char *name, void *opaque, Error **errp) 196 { 197 IOThread *iothread = IOTHREAD(obj); 198 PollParamInfo *info = opaque; 199 int64_t *field = (void *)iothread + info->offset; 200 Error *local_err = NULL; 201 int64_t value; 202 203 visit_type_int64(v, name, &value, &local_err); 204 if (local_err) { 205 goto out; 206 } 207 208 if (value < 0) { 209 error_setg(&local_err, "%s value must be in range [0, %"PRId64"]", 210 info->name, INT64_MAX); 211 goto out; 212 } 213 214 *field = value; 215 216 if (iothread->ctx) { 217 aio_context_set_poll_params(iothread->ctx, 218 iothread->poll_max_ns, 219 iothread->poll_grow, 220 iothread->poll_shrink, 221 &local_err); 222 } 223 224 out: 225 error_propagate(errp, local_err); 226 } 227 228 static void iothread_class_init(ObjectClass *klass, void *class_data) 229 { 230 UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass); 231 ucc->complete = iothread_complete; 232 233 object_class_property_add(klass, "poll-max-ns", "int", 234 iothread_get_poll_param, 235 iothread_set_poll_param, 236 NULL, &poll_max_ns_info, &error_abort); 237 object_class_property_add(klass, "poll-grow", "int", 238 iothread_get_poll_param, 239 iothread_set_poll_param, 240 NULL, &poll_grow_info, &error_abort); 241 object_class_property_add(klass, "poll-shrink", "int", 242 iothread_get_poll_param, 243 iothread_set_poll_param, 244 NULL, &poll_shrink_info, &error_abort); 245 } 246 247 static const TypeInfo iothread_info = { 248 .name = TYPE_IOTHREAD, 249 .parent = TYPE_OBJECT, 250 .class_init = iothread_class_init, 251 .instance_size = sizeof(IOThread), 252 .instance_init = iothread_instance_init, 253 .instance_finalize = iothread_instance_finalize, 254 .interfaces = (InterfaceInfo[]) { 255 {TYPE_USER_CREATABLE}, 256 {} 257 }, 258 }; 259 260 static void iothread_register_types(void) 261 { 262 type_register_static(&iothread_info); 263 } 264 265 type_init(iothread_register_types) 266 267 char *iothread_get_id(IOThread *iothread) 268 { 269 return object_get_canonical_path_component(OBJECT(iothread)); 270 } 271 272 AioContext *iothread_get_aio_context(IOThread *iothread) 273 { 274 return iothread->ctx; 275 } 276 277 static int query_one_iothread(Object *object, void *opaque) 278 { 279 IOThreadInfoList ***prev = opaque; 280 IOThreadInfoList *elem; 281 IOThreadInfo *info; 282 IOThread *iothread; 283 284 iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD); 285 if (!iothread) { 286 return 0; 287 } 288 289 info = g_new0(IOThreadInfo, 1); 290 info->id = iothread_get_id(iothread); 291 info->thread_id = iothread->thread_id; 292 info->poll_max_ns = iothread->poll_max_ns; 293 info->poll_grow = iothread->poll_grow; 294 info->poll_shrink = iothread->poll_shrink; 295 296 elem = g_new0(IOThreadInfoList, 1); 297 elem->value = info; 298 elem->next = NULL; 299 300 **prev = elem; 301 *prev = &elem->next; 302 return 0; 303 } 304 305 IOThreadInfoList *qmp_query_iothreads(Error **errp) 306 { 307 IOThreadInfoList *head = NULL; 308 IOThreadInfoList **prev = &head; 309 Object *container = object_get_objects_root(); 310 311 object_child_foreach(container, query_one_iothread, &prev); 312 return head; 313 } 314 315 void iothread_stop_all(void) 316 { 317 Object *container = object_get_objects_root(); 318 BlockDriverState *bs; 319 BdrvNextIterator it; 320 321 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 322 AioContext *ctx = bdrv_get_aio_context(bs); 323 if (ctx == qemu_get_aio_context()) { 324 continue; 325 } 326 aio_context_acquire(ctx); 327 bdrv_set_aio_context(bs, qemu_get_aio_context()); 328 aio_context_release(ctx); 329 } 330 331 object_child_foreach(container, iothread_stop, NULL); 332 } 333 334 static gpointer iothread_g_main_context_init(gpointer opaque) 335 { 336 AioContext *ctx; 337 IOThread *iothread = opaque; 338 GSource *source; 339 340 iothread->worker_context = g_main_context_new(); 341 342 ctx = iothread_get_aio_context(iothread); 343 source = aio_get_g_source(ctx); 344 g_source_attach(source, iothread->worker_context); 345 g_source_unref(source); 346 347 aio_notify(iothread->ctx); 348 return NULL; 349 } 350 351 GMainContext *iothread_get_g_main_context(IOThread *iothread) 352 { 353 g_once(&iothread->once, iothread_g_main_context_init, iothread); 354 355 return iothread->worker_context; 356 } 357