throttle-groups.c (49d2165d7d6b589d1ea28b15a8874c417bdc55ed) | throttle-groups.c (27ccdd52598290f0f8b58be56e235aff7aebfaf3) |
---|---|
1/* 2 * QEMU block throttling group infrastructure 3 * 4 * Copyright (C) Nodalink, EURL. 2014 5 * Copyright (C) Igalia, S.L. 2015 6 * 7 * Authors: 8 * Benoît Canet <benoit.canet@nodalink.com> --- 16 unchanged lines hidden (view full) --- 25#include "qemu/osdep.h" 26#include "sysemu/block-backend.h" 27#include "block/throttle-groups.h" 28#include "qemu/queue.h" 29#include "qemu/thread.h" 30#include "sysemu/qtest.h" 31 32/* The ThrottleGroup structure (with its ThrottleState) is shared | 1/* 2 * QEMU block throttling group infrastructure 3 * 4 * Copyright (C) Nodalink, EURL. 2014 5 * Copyright (C) Igalia, S.L. 2015 6 * 7 * Authors: 8 * Benoît Canet <benoit.canet@nodalink.com> --- 16 unchanged lines hidden (view full) --- 25#include "qemu/osdep.h" 26#include "sysemu/block-backend.h" 27#include "block/throttle-groups.h" 28#include "qemu/queue.h" 29#include "qemu/thread.h" 30#include "sysemu/qtest.h" 31 32/* The ThrottleGroup structure (with its ThrottleState) is shared |
33 * among different BlockDriverState and it's independent from | 33 * among different BlockBackends and it's independent from |
34 * AioContext, so in order to use it from different threads it needs 35 * its own locking. 36 * 37 * This locking is however handled internally in this file, so it's 38 * transparent to outside users. 39 * 40 * The whole ThrottleGroup structure is private and invisible to 41 * outside users, that only use it through its ThrottleState. 42 * | 34 * AioContext, so in order to use it from different threads it needs 35 * its own locking. 36 * 37 * This locking is however handled internally in this file, so it's 38 * transparent to outside users. 39 * 40 * The whole ThrottleGroup structure is private and invisible to 41 * outside users, that only use it through its ThrottleState. 42 * |
43 * In addition to the ThrottleGroup structure, BlockDriverState has | 43 * In addition to the ThrottleGroup structure, BlockBackendPublic has |
44 * fields that need to be accessed by other members of the group and | 44 * fields that need to be accessed by other members of the group and |
45 * therefore also need to be protected by this lock. Once a BDS is 46 * registered in a group those fields can be accessed by other threads 47 * any time. | 45 * therefore also need to be protected by this lock. Once a 46 * BlockBackend is registered in a group those fields can be accessed 47 * by other threads any time. |
48 * 49 * Again, all this is handled internally and is mostly transparent to 50 * the outside. The 'throttle_timers' field however has an additional 51 * constraint because it may be temporarily invalid (see for example 52 * bdrv_set_aio_context()). Therefore in this file a thread will | 48 * 49 * Again, all this is handled internally and is mostly transparent to 50 * the outside. The 'throttle_timers' field however has an additional 51 * constraint because it may be temporarily invalid (see for example 52 * bdrv_set_aio_context()). Therefore in this file a thread will |
53 * access some other BDS's timers only after verifying that that BDS 54 * has throttled requests in the queue. | 53 * access some other BlockBackend's timers only after verifying that 54 * that BlockBackend has throttled requests in the queue. |
55 */ 56typedef struct ThrottleGroup { 57 char *name; /* This is constant during the lifetime of the group */ 58 59 QemuMutex lock; /* This lock protects the following four fields */ 60 ThrottleState ts; 61 QLIST_HEAD(, BlockBackendPublic) head; 62 BlockBackend *tokens[2]; --- 73 unchanged lines hidden (view full) --- 136/* Get the name from a BlockBackend's ThrottleGroup. The name (and the pointer) 137 * is guaranteed to remain constant during the lifetime of the group. 138 * 139 * @blk: a BlockBackend that is member of a throttling group 140 * @ret: the name of the group. 141 */ 142const char *throttle_group_get_name(BlockBackend *blk) 143{ | 55 */ 56typedef struct ThrottleGroup { 57 char *name; /* This is constant during the lifetime of the group */ 58 59 QemuMutex lock; /* This lock protects the following four fields */ 60 ThrottleState ts; 61 QLIST_HEAD(, BlockBackendPublic) head; 62 BlockBackend *tokens[2]; --- 73 unchanged lines hidden (view full) --- 136/* Get the name from a BlockBackend's ThrottleGroup. The name (and the pointer) 137 * is guaranteed to remain constant during the lifetime of the group. 138 * 139 * @blk: a BlockBackend that is member of a throttling group 140 * @ret: the name of the group. 141 */ 142const char *throttle_group_get_name(BlockBackend *blk) 143{ |
144 ThrottleGroup *tg = container_of(blk_bs(blk)->throttle_state, 145 ThrottleGroup, ts); | 144 BlockBackendPublic *blkp = blk_get_public(blk); 145 ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); |
146 return tg->name; 147} 148 149/* Return the next BlockBackend in the round-robin sequence, simulating a 150 * circular list. 151 * 152 * This assumes that tg->lock is held. 153 * 154 * @blk: the current BlockBackend 155 * @ret: the next BlockBackend in the sequence 156 */ 157static BlockBackend *throttle_group_next_blk(BlockBackend *blk) 158{ | 146 return tg->name; 147} 148 149/* Return the next BlockBackend in the round-robin sequence, simulating a 150 * circular list. 151 * 152 * This assumes that tg->lock is held. 153 * 154 * @blk: the current BlockBackend 155 * @ret: the next BlockBackend in the sequence 156 */ 157static BlockBackend *throttle_group_next_blk(BlockBackend *blk) 158{ |
159 BlockDriverState *bs = blk_bs(blk); 160 ThrottleState *ts = bs->throttle_state; | 159 BlockBackendPublic *blkp = blk_get_public(blk); 160 ThrottleState *ts = blkp->throttle_state; |
161 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); | 161 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); |
162 BlockBackendPublic *next = QLIST_NEXT(blk_get_public(blk), round_robin); | 162 BlockBackendPublic *next = QLIST_NEXT(blkp, round_robin); |
163 164 if (!next) { 165 next = QLIST_FIRST(&tg->head); 166 } 167 168 return blk_by_public(next); 169} 170 --- 4 unchanged lines hidden (view full) --- 175 * 176 * @blk: the current BlockBackend 177 * @is_write: the type of operation (read/write) 178 * @ret: the next BlockBackend with pending requests, or blk if there is 179 * none. 180 */ 181static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write) 182{ | 163 164 if (!next) { 165 next = QLIST_FIRST(&tg->head); 166 } 167 168 return blk_by_public(next); 169} 170 --- 4 unchanged lines hidden (view full) --- 175 * 176 * @blk: the current BlockBackend 177 * @is_write: the type of operation (read/write) 178 * @ret: the next BlockBackend with pending requests, or blk if there is 179 * none. 180 */ 181static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write) 182{ |
183 ThrottleGroup *tg = container_of(blk_bs(blk)->throttle_state, 184 ThrottleGroup, ts); | 183 BlockBackendPublic *blkp = blk_get_public(blk); 184 ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); |
185 BlockBackend *token, *start; 186 187 start = token = tg->tokens[is_write]; 188 189 /* get next bs round in round robin style */ 190 token = throttle_group_next_blk(token); | 185 BlockBackend *token, *start; 186 187 start = token = tg->tokens[is_write]; 188 189 /* get next bs round in round robin style */ 190 token = throttle_group_next_blk(token); |
191 while (token != start && !blk_bs(token)->pending_reqs[is_write]) { | 191 while (token != start && !blkp->pending_reqs[is_write]) { |
192 token = throttle_group_next_blk(token); 193 } 194 195 /* If no IO are queued for scheduling on the next round robin token 196 * then decide the token is the current bs because chances are 197 * the current bs get the current request queued. 198 */ | 192 token = throttle_group_next_blk(token); 193 } 194 195 /* If no IO are queued for scheduling on the next round robin token 196 * then decide the token is the current bs because chances are 197 * the current bs get the current request queued. 198 */ |
199 if (token == start && !blk_bs(token)->pending_reqs[is_write]) { | 199 if (token == start && !blkp->pending_reqs[is_write]) { |
200 token = blk; 201 } 202 203 return token; 204} 205 206/* Check if the next I/O request for a BlockBackend needs to be throttled or 207 * not. If there's no timer set in this group, set one and update the token 208 * accordingly. 209 * 210 * This assumes that tg->lock is held. 211 * 212 * @blk: the current BlockBackend 213 * @is_write: the type of operation (read/write) 214 * @ret: whether the I/O request needs to be throttled or not 215 */ 216static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write) 217{ | 200 token = blk; 201 } 202 203 return token; 204} 205 206/* Check if the next I/O request for a BlockBackend needs to be throttled or 207 * not. If there's no timer set in this group, set one and update the token 208 * accordingly. 209 * 210 * This assumes that tg->lock is held. 211 * 212 * @blk: the current BlockBackend 213 * @is_write: the type of operation (read/write) 214 * @ret: whether the I/O request needs to be throttled or not 215 */ 216static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write) 217{ |
218 ThrottleState *ts = blk_bs(blk)->throttle_state; 219 ThrottleTimers *tt = &blk_bs(blk)->throttle_timers; | 218 BlockBackendPublic *blkp = blk_get_public(blk); 219 ThrottleState *ts = blkp->throttle_state; 220 ThrottleTimers *tt = &blkp->throttle_timers; |
220 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); 221 bool must_wait; 222 | 221 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); 222 bool must_wait; 223 |
223 if (blk_bs(blk)->io_limits_disabled) { | 224 if (blkp->io_limits_disabled) { |
224 return false; 225 } 226 227 /* Check if any of the timers in this group is already armed */ 228 if (tg->any_timer_armed[is_write]) { 229 return true; 230 } 231 --- 12 unchanged lines hidden (view full) --- 244 * 245 * This assumes that tg->lock is held. 246 * 247 * @blk: the current BlockBackend 248 * @is_write: the type of operation (read/write) 249 */ 250static void schedule_next_request(BlockBackend *blk, bool is_write) 251{ | 225 return false; 226 } 227 228 /* Check if any of the timers in this group is already armed */ 229 if (tg->any_timer_armed[is_write]) { 230 return true; 231 } 232 --- 12 unchanged lines hidden (view full) --- 245 * 246 * This assumes that tg->lock is held. 247 * 248 * @blk: the current BlockBackend 249 * @is_write: the type of operation (read/write) 250 */ 251static void schedule_next_request(BlockBackend *blk, bool is_write) 252{ |
252 BlockDriverState *bs = blk_bs(blk); 253 ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts); | 253 BlockBackendPublic *blkp = blk_get_public(blk); 254 ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); |
254 bool must_wait; 255 BlockBackend *token; 256 257 /* Check if there's any pending request to schedule next */ 258 token = next_throttle_token(blk, is_write); | 255 bool must_wait; 256 BlockBackend *token; 257 258 /* Check if there's any pending request to schedule next */ 259 token = next_throttle_token(blk, is_write); |
259 if (!blk_bs(token)->pending_reqs[is_write]) { | 260 if (!blkp->pending_reqs[is_write]) { |
260 return; 261 } 262 263 /* Set a timer for the request if it needs to be throttled */ 264 must_wait = throttle_group_schedule_timer(token, is_write); 265 266 /* If it doesn't have to wait, queue it for immediate execution */ 267 if (!must_wait) { | 261 return; 262 } 263 264 /* Set a timer for the request if it needs to be throttled */ 265 must_wait = throttle_group_schedule_timer(token, is_write); 266 267 /* If it doesn't have to wait, queue it for immediate execution */ 268 if (!must_wait) { |
268 /* Give preference to requests from the current bs */ | 269 /* Give preference to requests from the current blk */ |
269 if (qemu_in_coroutine() && | 270 if (qemu_in_coroutine() && |
270 qemu_co_queue_next(&bs->throttled_reqs[is_write])) { | 271 qemu_co_queue_next(&blkp->throttled_reqs[is_write])) { |
271 token = blk; 272 } else { | 272 token = blk; 273 } else { |
273 ThrottleTimers *tt = &blk_bs(token)->throttle_timers; | 274 ThrottleTimers *tt = &blkp->throttle_timers; |
274 int64_t now = qemu_clock_get_ns(tt->clock_type); 275 timer_mod(tt->timers[is_write], now + 1); 276 tg->any_timer_armed[is_write] = true; 277 } 278 tg->tokens[is_write] = token; 279 } 280} 281 --- 7 unchanged lines hidden (view full) --- 289 */ 290void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs, 291 unsigned int bytes, 292 bool is_write) 293{ 294 bool must_wait; 295 BlockBackend *token; 296 | 275 int64_t now = qemu_clock_get_ns(tt->clock_type); 276 timer_mod(tt->timers[is_write], now + 1); 277 tg->any_timer_armed[is_write] = true; 278 } 279 tg->tokens[is_write] = token; 280 } 281} 282 --- 7 unchanged lines hidden (view full) --- 290 */ 291void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs, 292 unsigned int bytes, 293 bool is_write) 294{ 295 bool must_wait; 296 BlockBackend *token; 297 |
297 ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts); | 298 BlockBackend *blk = bs->blk; 299 BlockBackendPublic *blkp = blk_get_public(blk); 300 ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); |
298 qemu_mutex_lock(&tg->lock); 299 300 /* First we check if this I/O has to be throttled. */ | 301 qemu_mutex_lock(&tg->lock); 302 303 /* First we check if this I/O has to be throttled. */ |
301 token = next_throttle_token(bs->blk, is_write); | 304 token = next_throttle_token(blk, is_write); |
302 must_wait = throttle_group_schedule_timer(token, is_write); 303 304 /* Wait if there's a timer set or queued requests of this type */ | 305 must_wait = throttle_group_schedule_timer(token, is_write); 306 307 /* Wait if there's a timer set or queued requests of this type */ |
305 if (must_wait || bs->pending_reqs[is_write]) { 306 bs->pending_reqs[is_write]++; | 308 if (must_wait || blkp->pending_reqs[is_write]) { 309 blkp->pending_reqs[is_write]++; |
307 qemu_mutex_unlock(&tg->lock); | 310 qemu_mutex_unlock(&tg->lock); |
308 qemu_co_queue_wait(&bs->throttled_reqs[is_write]); | 311 qemu_co_queue_wait(&blkp->throttled_reqs[is_write]); |
309 qemu_mutex_lock(&tg->lock); | 312 qemu_mutex_lock(&tg->lock); |
310 bs->pending_reqs[is_write]--; | 313 blkp->pending_reqs[is_write]--; |
311 } 312 313 /* The I/O will be executed, so do the accounting */ | 314 } 315 316 /* The I/O will be executed, so do the accounting */ |
314 throttle_account(bs->throttle_state, is_write, bytes); | 317 throttle_account(blkp->throttle_state, is_write, bytes); |
315 316 /* Schedule the next request */ | 318 319 /* Schedule the next request */ |
317 schedule_next_request(bs->blk, is_write); | 320 schedule_next_request(blk, is_write); |
318 319 qemu_mutex_unlock(&tg->lock); 320} 321 | 321 322 qemu_mutex_unlock(&tg->lock); 323} 324 |
322void throttle_group_restart_bs(BlockDriverState *bs) | 325void throttle_group_restart_blk(BlockBackend *blk) |
323{ | 326{ |
327 BlockBackendPublic *blkp = blk_get_public(blk); |
|
324 int i; 325 326 for (i = 0; i < 2; i++) { | 328 int i; 329 330 for (i = 0; i < 2; i++) { |
327 while (qemu_co_enter_next(&bs->throttled_reqs[i])) { | 331 while (qemu_co_enter_next(&blkp->throttled_reqs[i])) { |
328 ; 329 } 330 } 331} 332 333/* Update the throttle configuration for a particular group. Similar 334 * to throttle_config(), but guarantees atomicity within the 335 * throttling group. 336 * 337 * @bs: a BlockDriverState that is member of the group 338 * @cfg: the configuration to set 339 */ 340void throttle_group_config(BlockDriverState *bs, ThrottleConfig *cfg) 341{ | 332 ; 333 } 334 } 335} 336 337/* Update the throttle configuration for a particular group. Similar 338 * to throttle_config(), but guarantees atomicity within the 339 * throttling group. 340 * 341 * @bs: a BlockDriverState that is member of the group 342 * @cfg: the configuration to set 343 */ 344void throttle_group_config(BlockDriverState *bs, ThrottleConfig *cfg) 345{ |
342 ThrottleTimers *tt = &bs->throttle_timers; 343 ThrottleState *ts = bs->throttle_state; | 346 BlockBackendPublic *blkp = blk_get_public(bs->blk); 347 ThrottleTimers *tt = &blkp->throttle_timers; 348 ThrottleState *ts = blkp->throttle_state; |
344 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); 345 qemu_mutex_lock(&tg->lock); 346 /* throttle_config() cancels the timers */ 347 if (timer_pending(tt->timers[0])) { 348 tg->any_timer_armed[0] = false; 349 } 350 if (timer_pending(tt->timers[1])) { 351 tg->any_timer_armed[1] = false; 352 } 353 throttle_config(ts, tt, cfg); 354 qemu_mutex_unlock(&tg->lock); 355 | 349 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); 350 qemu_mutex_lock(&tg->lock); 351 /* throttle_config() cancels the timers */ 352 if (timer_pending(tt->timers[0])) { 353 tg->any_timer_armed[0] = false; 354 } 355 if (timer_pending(tt->timers[1])) { 356 tg->any_timer_armed[1] = false; 357 } 358 throttle_config(ts, tt, cfg); 359 qemu_mutex_unlock(&tg->lock); 360 |
356 qemu_co_enter_next(&bs->throttled_reqs[0]); 357 qemu_co_enter_next(&bs->throttled_reqs[1]); | 361 qemu_co_enter_next(&blkp->throttled_reqs[0]); 362 qemu_co_enter_next(&blkp->throttled_reqs[1]); |
358} 359 360/* Get the throttle configuration from a particular group. Similar to 361 * throttle_get_config(), but guarantees atomicity within the 362 * throttling group. 363 * 364 * @bs: a BlockDriverState that is member of the group 365 * @cfg: the configuration will be written here 366 */ 367void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg) 368{ | 363} 364 365/* Get the throttle configuration from a particular group. Similar to 366 * throttle_get_config(), but guarantees atomicity within the 367 * throttling group. 368 * 369 * @bs: a BlockDriverState that is member of the group 370 * @cfg: the configuration will be written here 371 */ 372void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg) 373{ |
369 ThrottleState *ts = bs->throttle_state; | 374 BlockBackendPublic *blkp = blk_get_public(bs->blk); 375 ThrottleState *ts = blkp->throttle_state; |
370 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); 371 qemu_mutex_lock(&tg->lock); 372 throttle_get_config(ts, cfg); 373 qemu_mutex_unlock(&tg->lock); 374} 375 376/* ThrottleTimers callback. This wakes up a request that was waiting 377 * because it had been throttled. 378 * | 376 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); 377 qemu_mutex_lock(&tg->lock); 378 throttle_get_config(ts, cfg); 379 qemu_mutex_unlock(&tg->lock); 380} 381 382/* ThrottleTimers callback. This wakes up a request that was waiting 383 * because it had been throttled. 384 * |
379 * @bs: the BlockDriverState whose request had been throttled | 385 * @blk: the BlockBackend whose request had been throttled |
380 * @is_write: the type of operation (read/write) 381 */ | 386 * @is_write: the type of operation (read/write) 387 */ |
382static void timer_cb(BlockDriverState *bs, bool is_write) | 388static void timer_cb(BlockBackend *blk, bool is_write) |
383{ | 389{ |
384 ThrottleState *ts = bs->throttle_state; | 390 BlockBackendPublic *blkp = blk_get_public(blk); 391 ThrottleState *ts = blkp->throttle_state; |
385 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); 386 bool empty_queue; 387 388 /* The timer has just been fired, so we can update the flag */ 389 qemu_mutex_lock(&tg->lock); 390 tg->any_timer_armed[is_write] = false; 391 qemu_mutex_unlock(&tg->lock); 392 393 /* Run the request that was waiting for this timer */ | 392 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); 393 bool empty_queue; 394 395 /* The timer has just been fired, so we can update the flag */ 396 qemu_mutex_lock(&tg->lock); 397 tg->any_timer_armed[is_write] = false; 398 qemu_mutex_unlock(&tg->lock); 399 400 /* Run the request that was waiting for this timer */ |
394 empty_queue = !qemu_co_enter_next(&bs->throttled_reqs[is_write]); | 401 empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]); |
395 396 /* If the request queue was empty then we have to take care of 397 * scheduling the next one */ 398 if (empty_queue) { 399 qemu_mutex_lock(&tg->lock); | 402 403 /* If the request queue was empty then we have to take care of 404 * scheduling the next one */ 405 if (empty_queue) { 406 qemu_mutex_lock(&tg->lock); |
400 schedule_next_request(bs->blk, is_write); | 407 schedule_next_request(blk, is_write); |
401 qemu_mutex_unlock(&tg->lock); 402 } 403} 404 405static void read_timer_cb(void *opaque) 406{ 407 timer_cb(opaque, false); 408} --- 8 unchanged lines hidden (view full) --- 417 * throttling group with that name does not exist yet, it will be created. 418 * 419 * @blk: the BlockBackend to insert 420 * @groupname: the name of the group 421 */ 422void throttle_group_register_blk(BlockBackend *blk, const char *groupname) 423{ 424 int i; | 408 qemu_mutex_unlock(&tg->lock); 409 } 410} 411 412static void read_timer_cb(void *opaque) 413{ 414 timer_cb(opaque, false); 415} --- 8 unchanged lines hidden (view full) --- 424 * throttling group with that name does not exist yet, it will be created. 425 * 426 * @blk: the BlockBackend to insert 427 * @groupname: the name of the group 428 */ 429void throttle_group_register_blk(BlockBackend *blk, const char *groupname) 430{ 431 int i; |
425 BlockDriverState *bs = blk_bs(blk); | 432 BlockBackendPublic *blkp = blk_get_public(blk); |
426 ThrottleState *ts = throttle_group_incref(groupname); 427 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); 428 int clock_type = QEMU_CLOCK_REALTIME; 429 430 if (qtest_enabled()) { 431 /* For testing block IO throttling only */ 432 clock_type = QEMU_CLOCK_VIRTUAL; 433 } 434 | 433 ThrottleState *ts = throttle_group_incref(groupname); 434 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); 435 int clock_type = QEMU_CLOCK_REALTIME; 436 437 if (qtest_enabled()) { 438 /* For testing block IO throttling only */ 439 clock_type = QEMU_CLOCK_VIRTUAL; 440 } 441 |
435 bs->throttle_state = ts; | 442 blkp->throttle_state = ts; |
436 437 qemu_mutex_lock(&tg->lock); 438 /* If the ThrottleGroup is new set this BlockBackend as the token */ 439 for (i = 0; i < 2; i++) { 440 if (!tg->tokens[i]) { 441 tg->tokens[i] = blk; 442 } 443 } 444 | 443 444 qemu_mutex_lock(&tg->lock); 445 /* If the ThrottleGroup is new set this BlockBackend as the token */ 446 for (i = 0; i < 2; i++) { 447 if (!tg->tokens[i]) { 448 tg->tokens[i] = blk; 449 } 450 } 451 |
445 QLIST_INSERT_HEAD(&tg->head, blk_get_public(blk), round_robin); | 452 QLIST_INSERT_HEAD(&tg->head, blkp, round_robin); |
446 | 453 |
447 throttle_timers_init(&bs->throttle_timers, 448 bdrv_get_aio_context(bs), | 454 throttle_timers_init(&blkp->throttle_timers, 455 blk_get_aio_context(blk), |
449 clock_type, 450 read_timer_cb, 451 write_timer_cb, | 456 clock_type, 457 read_timer_cb, 458 write_timer_cb, |
452 bs); | 459 blk); |
453 454 qemu_mutex_unlock(&tg->lock); 455} 456 457/* Unregister a BlockBackend from its group, removing it from the list, 458 * destroying the timers and setting the throttle_state pointer to NULL. 459 * 460 * The BlockBackend must not have pending throttled requests, so the caller has 461 * to drain them first. 462 * 463 * The group will be destroyed if it's empty after this operation. 464 * 465 * @blk: the BlockBackend to remove 466 */ 467void throttle_group_unregister_blk(BlockBackend *blk) 468{ | 460 461 qemu_mutex_unlock(&tg->lock); 462} 463 464/* Unregister a BlockBackend from its group, removing it from the list, 465 * destroying the timers and setting the throttle_state pointer to NULL. 466 * 467 * The BlockBackend must not have pending throttled requests, so the caller has 468 * to drain them first. 469 * 470 * The group will be destroyed if it's empty after this operation. 471 * 472 * @blk: the BlockBackend to remove 473 */ 474void throttle_group_unregister_blk(BlockBackend *blk) 475{ |
469 BlockDriverState *bs = blk_bs(blk); 470 ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts); | 476 BlockBackendPublic *blkp = blk_get_public(blk); 477 ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); |
471 int i; 472 | 478 int i; 479 |
473 assert(bs->pending_reqs[0] == 0 && bs->pending_reqs[1] == 0); 474 assert(qemu_co_queue_empty(&bs->throttled_reqs[0])); 475 assert(qemu_co_queue_empty(&bs->throttled_reqs[1])); | 480 assert(blkp->pending_reqs[0] == 0 && blkp->pending_reqs[1] == 0); 481 assert(qemu_co_queue_empty(&blkp->throttled_reqs[0])); 482 assert(qemu_co_queue_empty(&blkp->throttled_reqs[1])); |
476 477 qemu_mutex_lock(&tg->lock); 478 for (i = 0; i < 2; i++) { 479 if (tg->tokens[i] == blk) { 480 BlockBackend *token = throttle_group_next_blk(blk); | 483 484 qemu_mutex_lock(&tg->lock); 485 for (i = 0; i < 2; i++) { 486 if (tg->tokens[i] == blk) { 487 BlockBackend *token = throttle_group_next_blk(blk); |
481 /* Take care of the case where this is the last bs in the group */ | 488 /* Take care of the case where this is the last blk in the group */ |
482 if (token == blk) { 483 token = NULL; 484 } 485 tg->tokens[i] = token; 486 } 487 } 488 | 489 if (token == blk) { 490 token = NULL; 491 } 492 tg->tokens[i] = token; 493 } 494 } 495 |
489 /* remove the current bs from the list */ 490 QLIST_REMOVE(blk_get_public(blk), round_robin); 491 throttle_timers_destroy(&bs->throttle_timers); | 496 /* remove the current blk from the list */ 497 QLIST_REMOVE(blkp, round_robin); 498 throttle_timers_destroy(&blkp->throttle_timers); |
492 qemu_mutex_unlock(&tg->lock); 493 494 throttle_group_unref(&tg->ts); | 499 qemu_mutex_unlock(&tg->lock); 500 501 throttle_group_unref(&tg->ts); |
495 bs->throttle_state = NULL; | 502 blkp->throttle_state = NULL; |
496} 497 498static void throttle_groups_init(void) 499{ 500 qemu_mutex_init(&throttle_groups_lock); 501} 502 503block_init(throttle_groups_init); | 503} 504 505static void throttle_groups_init(void) 506{ 507 qemu_mutex_init(&throttle_groups_lock); 508} 509 510block_init(throttle_groups_init); |