1 /* 2 * Copyright (C) 2012 Red Hat, Inc. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm.h" 8 #include "dm-bio-prison-v1.h" 9 #include "dm-bio-prison-v2.h" 10 11 #include <linux/spinlock.h> 12 #include <linux/mempool.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 16 /*----------------------------------------------------------------*/ 17 18 #define MIN_CELLS 1024 19 20 struct dm_bio_prison { 21 spinlock_t lock; 22 mempool_t *cell_pool; 23 struct rb_root cells; 24 }; 25 26 static struct kmem_cache *_cell_cache; 27 28 /*----------------------------------------------------------------*/ 29 30 /* 31 * @nr_cells should be the number of cells you want in use _concurrently_. 32 * Don't confuse it with the number of distinct keys. 33 */ 34 struct dm_bio_prison *dm_bio_prison_create(void) 35 { 36 struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL); 37 38 if (!prison) 39 return NULL; 40 41 spin_lock_init(&prison->lock); 42 43 prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache); 44 if (!prison->cell_pool) { 45 kfree(prison); 46 return NULL; 47 } 48 49 prison->cells = RB_ROOT; 50 51 return prison; 52 } 53 EXPORT_SYMBOL_GPL(dm_bio_prison_create); 54 55 void dm_bio_prison_destroy(struct dm_bio_prison *prison) 56 { 57 mempool_destroy(prison->cell_pool); 58 kfree(prison); 59 } 60 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy); 61 62 struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp) 63 { 64 return mempool_alloc(prison->cell_pool, gfp); 65 } 66 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell); 67 68 void dm_bio_prison_free_cell(struct dm_bio_prison *prison, 69 struct dm_bio_prison_cell *cell) 70 { 71 mempool_free(cell, prison->cell_pool); 72 } 73 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell); 74 75 static void __setup_new_cell(struct dm_cell_key *key, 76 struct bio *holder, 77 struct dm_bio_prison_cell *cell) 78 { 79 memcpy(&cell->key, key, sizeof(cell->key)); 80 cell->holder = holder; 81 bio_list_init(&cell->bios); 82 } 83 84 static int cmp_keys(struct dm_cell_key *lhs, 85 struct dm_cell_key *rhs) 86 { 87 if (lhs->virtual < rhs->virtual) 88 return -1; 89 90 if (lhs->virtual > rhs->virtual) 91 return 1; 92 93 if (lhs->dev < rhs->dev) 94 return -1; 95 96 if (lhs->dev > rhs->dev) 97 return 1; 98 99 if (lhs->block_end <= rhs->block_begin) 100 return -1; 101 102 if (lhs->block_begin >= rhs->block_end) 103 return 1; 104 105 return 0; 106 } 107 108 static int __bio_detain(struct dm_bio_prison *prison, 109 struct dm_cell_key *key, 110 struct bio *inmate, 111 struct dm_bio_prison_cell *cell_prealloc, 112 struct dm_bio_prison_cell **cell_result) 113 { 114 int r; 115 struct rb_node **new = &prison->cells.rb_node, *parent = NULL; 116 117 while (*new) { 118 struct dm_bio_prison_cell *cell = 119 rb_entry(*new, struct dm_bio_prison_cell, node); 120 121 r = cmp_keys(key, &cell->key); 122 123 parent = *new; 124 if (r < 0) 125 new = &((*new)->rb_left); 126 else if (r > 0) 127 new = &((*new)->rb_right); 128 else { 129 if (inmate) 130 bio_list_add(&cell->bios, inmate); 131 *cell_result = cell; 132 return 1; 133 } 134 } 135 136 __setup_new_cell(key, inmate, cell_prealloc); 137 *cell_result = cell_prealloc; 138 139 rb_link_node(&cell_prealloc->node, parent, new); 140 rb_insert_color(&cell_prealloc->node, &prison->cells); 141 142 return 0; 143 } 144 145 static int bio_detain(struct dm_bio_prison *prison, 146 struct dm_cell_key *key, 147 struct bio *inmate, 148 struct dm_bio_prison_cell *cell_prealloc, 149 struct dm_bio_prison_cell **cell_result) 150 { 151 int r; 152 unsigned long flags; 153 154 spin_lock_irqsave(&prison->lock, flags); 155 r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result); 156 spin_unlock_irqrestore(&prison->lock, flags); 157 158 return r; 159 } 160 161 int dm_bio_detain(struct dm_bio_prison *prison, 162 struct dm_cell_key *key, 163 struct bio *inmate, 164 struct dm_bio_prison_cell *cell_prealloc, 165 struct dm_bio_prison_cell **cell_result) 166 { 167 return bio_detain(prison, key, inmate, cell_prealloc, cell_result); 168 } 169 EXPORT_SYMBOL_GPL(dm_bio_detain); 170 171 int dm_get_cell(struct dm_bio_prison *prison, 172 struct dm_cell_key *key, 173 struct dm_bio_prison_cell *cell_prealloc, 174 struct dm_bio_prison_cell **cell_result) 175 { 176 return bio_detain(prison, key, NULL, cell_prealloc, cell_result); 177 } 178 EXPORT_SYMBOL_GPL(dm_get_cell); 179 180 /* 181 * @inmates must have been initialised prior to this call 182 */ 183 static void __cell_release(struct dm_bio_prison *prison, 184 struct dm_bio_prison_cell *cell, 185 struct bio_list *inmates) 186 { 187 rb_erase(&cell->node, &prison->cells); 188 189 if (inmates) { 190 if (cell->holder) 191 bio_list_add(inmates, cell->holder); 192 bio_list_merge(inmates, &cell->bios); 193 } 194 } 195 196 void dm_cell_release(struct dm_bio_prison *prison, 197 struct dm_bio_prison_cell *cell, 198 struct bio_list *bios) 199 { 200 unsigned long flags; 201 202 spin_lock_irqsave(&prison->lock, flags); 203 __cell_release(prison, cell, bios); 204 spin_unlock_irqrestore(&prison->lock, flags); 205 } 206 EXPORT_SYMBOL_GPL(dm_cell_release); 207 208 /* 209 * Sometimes we don't want the holder, just the additional bios. 210 */ 211 static void __cell_release_no_holder(struct dm_bio_prison *prison, 212 struct dm_bio_prison_cell *cell, 213 struct bio_list *inmates) 214 { 215 rb_erase(&cell->node, &prison->cells); 216 bio_list_merge(inmates, &cell->bios); 217 } 218 219 void dm_cell_release_no_holder(struct dm_bio_prison *prison, 220 struct dm_bio_prison_cell *cell, 221 struct bio_list *inmates) 222 { 223 unsigned long flags; 224 225 spin_lock_irqsave(&prison->lock, flags); 226 __cell_release_no_holder(prison, cell, inmates); 227 spin_unlock_irqrestore(&prison->lock, flags); 228 } 229 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); 230 231 void dm_cell_error(struct dm_bio_prison *prison, 232 struct dm_bio_prison_cell *cell, blk_status_t error) 233 { 234 struct bio_list bios; 235 struct bio *bio; 236 237 bio_list_init(&bios); 238 dm_cell_release(prison, cell, &bios); 239 240 while ((bio = bio_list_pop(&bios))) { 241 bio->bi_status = error; 242 bio_endio(bio); 243 } 244 } 245 EXPORT_SYMBOL_GPL(dm_cell_error); 246 247 void dm_cell_visit_release(struct dm_bio_prison *prison, 248 void (*visit_fn)(void *, struct dm_bio_prison_cell *), 249 void *context, 250 struct dm_bio_prison_cell *cell) 251 { 252 unsigned long flags; 253 254 spin_lock_irqsave(&prison->lock, flags); 255 visit_fn(context, cell); 256 rb_erase(&cell->node, &prison->cells); 257 spin_unlock_irqrestore(&prison->lock, flags); 258 } 259 EXPORT_SYMBOL_GPL(dm_cell_visit_release); 260 261 static int __promote_or_release(struct dm_bio_prison *prison, 262 struct dm_bio_prison_cell *cell) 263 { 264 if (bio_list_empty(&cell->bios)) { 265 rb_erase(&cell->node, &prison->cells); 266 return 1; 267 } 268 269 cell->holder = bio_list_pop(&cell->bios); 270 return 0; 271 } 272 273 int dm_cell_promote_or_release(struct dm_bio_prison *prison, 274 struct dm_bio_prison_cell *cell) 275 { 276 int r; 277 unsigned long flags; 278 279 spin_lock_irqsave(&prison->lock, flags); 280 r = __promote_or_release(prison, cell); 281 spin_unlock_irqrestore(&prison->lock, flags); 282 283 return r; 284 } 285 EXPORT_SYMBOL_GPL(dm_cell_promote_or_release); 286 287 /*----------------------------------------------------------------*/ 288 289 #define DEFERRED_SET_SIZE 64 290 291 struct dm_deferred_entry { 292 struct dm_deferred_set *ds; 293 unsigned count; 294 struct list_head work_items; 295 }; 296 297 struct dm_deferred_set { 298 spinlock_t lock; 299 unsigned current_entry; 300 unsigned sweeper; 301 struct dm_deferred_entry entries[DEFERRED_SET_SIZE]; 302 }; 303 304 struct dm_deferred_set *dm_deferred_set_create(void) 305 { 306 int i; 307 struct dm_deferred_set *ds; 308 309 ds = kmalloc(sizeof(*ds), GFP_KERNEL); 310 if (!ds) 311 return NULL; 312 313 spin_lock_init(&ds->lock); 314 ds->current_entry = 0; 315 ds->sweeper = 0; 316 for (i = 0; i < DEFERRED_SET_SIZE; i++) { 317 ds->entries[i].ds = ds; 318 ds->entries[i].count = 0; 319 INIT_LIST_HEAD(&ds->entries[i].work_items); 320 } 321 322 return ds; 323 } 324 EXPORT_SYMBOL_GPL(dm_deferred_set_create); 325 326 void dm_deferred_set_destroy(struct dm_deferred_set *ds) 327 { 328 kfree(ds); 329 } 330 EXPORT_SYMBOL_GPL(dm_deferred_set_destroy); 331 332 struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds) 333 { 334 unsigned long flags; 335 struct dm_deferred_entry *entry; 336 337 spin_lock_irqsave(&ds->lock, flags); 338 entry = ds->entries + ds->current_entry; 339 entry->count++; 340 spin_unlock_irqrestore(&ds->lock, flags); 341 342 return entry; 343 } 344 EXPORT_SYMBOL_GPL(dm_deferred_entry_inc); 345 346 static unsigned ds_next(unsigned index) 347 { 348 return (index + 1) % DEFERRED_SET_SIZE; 349 } 350 351 static void __sweep(struct dm_deferred_set *ds, struct list_head *head) 352 { 353 while ((ds->sweeper != ds->current_entry) && 354 !ds->entries[ds->sweeper].count) { 355 list_splice_init(&ds->entries[ds->sweeper].work_items, head); 356 ds->sweeper = ds_next(ds->sweeper); 357 } 358 359 if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count) 360 list_splice_init(&ds->entries[ds->sweeper].work_items, head); 361 } 362 363 void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head) 364 { 365 unsigned long flags; 366 367 spin_lock_irqsave(&entry->ds->lock, flags); 368 BUG_ON(!entry->count); 369 --entry->count; 370 __sweep(entry->ds, head); 371 spin_unlock_irqrestore(&entry->ds->lock, flags); 372 } 373 EXPORT_SYMBOL_GPL(dm_deferred_entry_dec); 374 375 /* 376 * Returns 1 if deferred or 0 if no pending items to delay job. 377 */ 378 int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work) 379 { 380 int r = 1; 381 unsigned long flags; 382 unsigned next_entry; 383 384 spin_lock_irqsave(&ds->lock, flags); 385 if ((ds->sweeper == ds->current_entry) && 386 !ds->entries[ds->current_entry].count) 387 r = 0; 388 else { 389 list_add(work, &ds->entries[ds->current_entry].work_items); 390 next_entry = ds_next(ds->current_entry); 391 if (!ds->entries[next_entry].count) 392 ds->current_entry = next_entry; 393 } 394 spin_unlock_irqrestore(&ds->lock, flags); 395 396 return r; 397 } 398 EXPORT_SYMBOL_GPL(dm_deferred_set_add_work); 399 400 /*----------------------------------------------------------------*/ 401 402 static int __init dm_bio_prison_init_v1(void) 403 { 404 _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); 405 if (!_cell_cache) 406 return -ENOMEM; 407 408 return 0; 409 } 410 411 static void dm_bio_prison_exit_v1(void) 412 { 413 kmem_cache_destroy(_cell_cache); 414 _cell_cache = NULL; 415 } 416 417 static int (*_inits[])(void) __initdata = { 418 dm_bio_prison_init_v1, 419 dm_bio_prison_init_v2, 420 }; 421 422 static void (*_exits[])(void) = { 423 dm_bio_prison_exit_v1, 424 dm_bio_prison_exit_v2, 425 }; 426 427 static int __init dm_bio_prison_init(void) 428 { 429 const int count = ARRAY_SIZE(_inits); 430 431 int r, i; 432 433 for (i = 0; i < count; i++) { 434 r = _inits[i](); 435 if (r) 436 goto bad; 437 } 438 439 return 0; 440 441 bad: 442 while (i--) 443 _exits[i](); 444 445 return r; 446 } 447 448 static void __exit dm_bio_prison_exit(void) 449 { 450 int i = ARRAY_SIZE(_exits); 451 452 while (i--) 453 _exits[i](); 454 } 455 456 /* 457 * module hooks 458 */ 459 module_init(dm_bio_prison_init); 460 module_exit(dm_bio_prison_exit); 461 462 MODULE_DESCRIPTION(DM_NAME " bio prison"); 463 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 464 MODULE_LICENSE("GPL"); 465