io.c (b0165585900f050f403cecba9d89adeccf35dd6c) | io.c (d736f119dae6d292e8d60f2e02fa51a79524113e) |
---|---|
1/* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights --- 256 unchanged lines hidden (view full) --- 265 bdrv_co_drain_bh_cb, &data); 266 267 qemu_coroutine_yield(); 268 /* If we are resumed from some other event (such as an aio completion or a 269 * timer callback), it is a bug in the caller that should be fixed. */ 270 assert(data.done); 271} 272 | 1/* 2 * Block layer I/O functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights --- 256 unchanged lines hidden (view full) --- 265 bdrv_co_drain_bh_cb, &data); 266 267 qemu_coroutine_yield(); 268 /* If we are resumed from some other event (such as an aio completion or a 269 * timer callback), it is a bug in the caller that should be fixed. */ 270 assert(data.done); 271} 272 |
273static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 274 BdrvChild *parent) | 273void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, 274 BdrvChild *parent) |
275{ 276 BdrvChild *child, *next; 277 278 if (qemu_in_coroutine()) { 279 bdrv_co_yield_to_drain(bs, true, recursive, parent); 280 return; 281 } 282 283 /* Stop things in parent-to-child order */ 284 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) { 285 aio_disable_external(bdrv_get_aio_context(bs)); 286 } 287 288 bdrv_parent_drained_begin(bs, parent); 289 bdrv_drain_invoke(bs, true, false); 290 bdrv_drain_recurse(bs); 291 292 if (recursive) { | 275{ 276 BdrvChild *child, *next; 277 278 if (qemu_in_coroutine()) { 279 bdrv_co_yield_to_drain(bs, true, recursive, parent); 280 return; 281 } 282 283 /* Stop things in parent-to-child order */ 284 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) { 285 aio_disable_external(bdrv_get_aio_context(bs)); 286 } 287 288 bdrv_parent_drained_begin(bs, parent); 289 bdrv_drain_invoke(bs, true, false); 290 bdrv_drain_recurse(bs); 291 292 if (recursive) { |
293 bs->recursive_quiesce_counter++; |
|
293 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 294 bdrv_do_drained_begin(child->bs, true, child); 295 } 296 } 297} 298 299void bdrv_drained_begin(BlockDriverState *bs) 300{ 301 bdrv_do_drained_begin(bs, false, NULL); 302} 303 304void bdrv_subtree_drained_begin(BlockDriverState *bs) 305{ 306 bdrv_do_drained_begin(bs, true, NULL); 307} 308 | 294 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 295 bdrv_do_drained_begin(child->bs, true, child); 296 } 297 } 298} 299 300void bdrv_drained_begin(BlockDriverState *bs) 301{ 302 bdrv_do_drained_begin(bs, false, NULL); 303} 304 305void bdrv_subtree_drained_begin(BlockDriverState *bs) 306{ 307 bdrv_do_drained_begin(bs, true, NULL); 308} 309 |
309static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 310 BdrvChild *parent) | 310void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, 311 BdrvChild *parent) |
311{ 312 BdrvChild *child, *next; 313 int old_quiesce_counter; 314 315 if (qemu_in_coroutine()) { 316 bdrv_co_yield_to_drain(bs, false, recursive, parent); 317 return; 318 } 319 assert(bs->quiesce_counter > 0); 320 old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter); 321 322 /* Re-enable things in child-to-parent order */ 323 bdrv_drain_invoke(bs, false, false); 324 bdrv_parent_drained_end(bs, parent); 325 if (old_quiesce_counter == 1) { 326 aio_enable_external(bdrv_get_aio_context(bs)); 327 } 328 329 if (recursive) { | 312{ 313 BdrvChild *child, *next; 314 int old_quiesce_counter; 315 316 if (qemu_in_coroutine()) { 317 bdrv_co_yield_to_drain(bs, false, recursive, parent); 318 return; 319 } 320 assert(bs->quiesce_counter > 0); 321 old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter); 322 323 /* Re-enable things in child-to-parent order */ 324 bdrv_drain_invoke(bs, false, false); 325 bdrv_parent_drained_end(bs, parent); 326 if (old_quiesce_counter == 1) { 327 aio_enable_external(bdrv_get_aio_context(bs)); 328 } 329 330 if (recursive) { |
331 bs->recursive_quiesce_counter--; |
|
330 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 331 bdrv_do_drained_end(child->bs, true, child); 332 } 333 } 334} 335 336void bdrv_drained_end(BlockDriverState *bs) 337{ 338 bdrv_do_drained_end(bs, false, NULL); 339} 340 341void bdrv_subtree_drained_end(BlockDriverState *bs) 342{ 343 bdrv_do_drained_end(bs, true, NULL); 344} 345 | 332 QLIST_FOREACH_SAFE(child, &bs->children, next, next) { 333 bdrv_do_drained_end(child->bs, true, child); 334 } 335 } 336} 337 338void bdrv_drained_end(BlockDriverState *bs) 339{ 340 bdrv_do_drained_end(bs, false, NULL); 341} 342 343void bdrv_subtree_drained_end(BlockDriverState *bs) 344{ 345 bdrv_do_drained_end(bs, true, NULL); 346} 347 |
348void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) 349{ 350 int i; 351 352 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { 353 bdrv_do_drained_begin(child->bs, true, child); 354 } 355} 356 357void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) 358{ 359 int i; 360 361 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { 362 bdrv_do_drained_end(child->bs, true, child); 363 } 364} 365 |
|
346/* 347 * Wait for pending requests to complete on a single BlockDriverState subtree, 348 * and suspend block driver's internal I/O until next request arrives. 349 * 350 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 351 * AioContext. 352 * 353 * Only this BlockDriverState's AioContext is run, so in-flight requests must --- 2454 unchanged lines hidden --- | 366/* 367 * Wait for pending requests to complete on a single BlockDriverState subtree, 368 * and suspend block driver's internal I/O until next request arrives. 369 * 370 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState 371 * AioContext. 372 * 373 * Only this BlockDriverState's AioContext is run, so in-flight requests must --- 2454 unchanged lines hidden --- |