dm-table.c (447a8b858e4bda41c394b1bc7fdbc9dc0bdf44f6) dm-table.c (86f1152b117a404229fd6f08ec3faca779f37b92)
1/*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 196 unchanged lines hidden (view full) ---

205 }
206
207 t->mode = mode;
208 t->md = md;
209 *result = t;
210 return 0;
211}
212
1/*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 196 unchanged lines hidden (view full) ---

205 }
206
207 t->mode = mode;
208 t->md = md;
209 *result = t;
210 return 0;
211}
212
213static void free_devices(struct list_head *devices)
213static void free_devices(struct list_head *devices, struct mapped_device *md)
214{
215 struct list_head *tmp, *next;
216
217 list_for_each_safe(tmp, next, devices) {
218 struct dm_dev_internal *dd =
219 list_entry(tmp, struct dm_dev_internal, list);
214{
215 struct list_head *tmp, *next;
216
217 list_for_each_safe(tmp, next, devices) {
218 struct dm_dev_internal *dd =
219 list_entry(tmp, struct dm_dev_internal, list);
220 DMWARN("dm_table_destroy: dm_put_device call missing for %s",
221 dd->dm_dev.name);
220 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
221 dm_device_name(md), dd->dm_dev->name);
222 dm_put_table_device(md, dd->dm_dev);
222 kfree(dd);
223 }
224}
225
226void dm_table_destroy(struct dm_table *t)
227{
228 unsigned int i;
229

--- 12 unchanged lines hidden (view full) ---

242 tgt->type->dtr(tgt);
243
244 dm_put_target_type(tgt->type);
245 }
246
247 vfree(t->highs);
248
249 /* free the device list */
223 kfree(dd);
224 }
225}
226
227void dm_table_destroy(struct dm_table *t)
228{
229 unsigned int i;
230

--- 12 unchanged lines hidden (view full) ---

243 tgt->type->dtr(tgt);
244
245 dm_put_target_type(tgt->type);
246 }
247
248 vfree(t->highs);
249
250 /* free the device list */
250 free_devices(&t->devices);
251 free_devices(&t->devices, t->md);
251
252 dm_free_md_mempools(t->mempools);
253
254 kfree(t);
255}
256
257/*
258 * See if we've already got a device in the list.
259 */
260static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
261{
262 struct dm_dev_internal *dd;
263
264 list_for_each_entry (dd, l, list)
252
253 dm_free_md_mempools(t->mempools);
254
255 kfree(t);
256}
257
258/*
259 * See if we've already got a device in the list.
260 */
261static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
262{
263 struct dm_dev_internal *dd;
264
265 list_for_each_entry (dd, l, list)
265 if (dd->dm_dev.bdev->bd_dev == dev)
266 if (dd->dm_dev->bdev->bd_dev == dev)
266 return dd;
267
268 return NULL;
269}
270
271/*
267 return dd;
268
269 return NULL;
270}
271
272/*
272 * Open a device so we can use it as a map destination.
273 */
274static int open_dev(struct dm_dev_internal *d, dev_t dev,
275 struct mapped_device *md)
276{
277 static char *_claim_ptr = "I belong to device-mapper";
278 struct block_device *bdev;
279
280 int r;
281
282 BUG_ON(d->dm_dev.bdev);
283
284 bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
285 if (IS_ERR(bdev))
286 return PTR_ERR(bdev);
287
288 r = bd_link_disk_holder(bdev, dm_disk(md));
289 if (r) {
290 blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
291 return r;
292 }
293
294 d->dm_dev.bdev = bdev;
295 return 0;
296}
297
298/*
299 * Close a device that we've been using.
300 */
301static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
302{
303 if (!d->dm_dev.bdev)
304 return;
305
306 bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
307 blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
308 d->dm_dev.bdev = NULL;
309}
310
311/*
312 * If possible, this checks an area of a destination device is invalid.
313 */
314static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
315 sector_t start, sector_t len, void *data)
316{
317 struct request_queue *q;
318 struct queue_limits *limits = data;
319 struct block_device *bdev = dev->bdev;

--- 61 unchanged lines hidden (view full) ---

381 * careful to leave things as they were if we fail to reopen the
382 * device and not to touch the existing bdev field in case
383 * it is accessed concurrently inside dm_table_any_congested().
384 */
385static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
386 struct mapped_device *md)
387{
388 int r;
273 * If possible, this checks an area of a destination device is invalid.
274 */
275static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
276 sector_t start, sector_t len, void *data)
277{
278 struct request_queue *q;
279 struct queue_limits *limits = data;
280 struct block_device *bdev = dev->bdev;

--- 61 unchanged lines hidden (view full) ---

342 * careful to leave things as they were if we fail to reopen the
343 * device and not to touch the existing bdev field in case
344 * it is accessed concurrently inside dm_table_any_congested().
345 */
346static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
347 struct mapped_device *md)
348{
349 int r;
389 struct dm_dev_internal dd_new, dd_old;
350 struct dm_dev *old_dev, *new_dev;
390
351
391 dd_new = dd_old = *dd;
352 old_dev = dd->dm_dev;
392
353
393 dd_new.dm_dev.mode |= new_mode;
394 dd_new.dm_dev.bdev = NULL;
395
396 r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
354 r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
355 dd->dm_dev->mode | new_mode, &new_dev);
397 if (r)
398 return r;
399
356 if (r)
357 return r;
358
400 dd->dm_dev.mode |= new_mode;
401 close_dev(&dd_old, md);
359 dd->dm_dev = new_dev;
360 dm_put_table_device(md, old_dev);
402
403 return 0;
404}
405
406/*
407 * Add a device to the list, or just increment the usage count if
408 * it's already present.
409 */

--- 25 unchanged lines hidden (view full) ---

435 }
436
437 dd = find_device(&t->devices, dev);
438 if (!dd) {
439 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
440 if (!dd)
441 return -ENOMEM;
442
361
362 return 0;
363}
364
365/*
366 * Add a device to the list, or just increment the usage count if
367 * it's already present.
368 */

--- 25 unchanged lines hidden (view full) ---

394 }
395
396 dd = find_device(&t->devices, dev);
397 if (!dd) {
398 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
399 if (!dd)
400 return -ENOMEM;
401
443 dd->dm_dev.mode = mode;
444 dd->dm_dev.bdev = NULL;
445
446 if ((r = open_dev(dd, dev, t->md))) {
402 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
447 kfree(dd);
448 return r;
449 }
450
403 kfree(dd);
404 return r;
405 }
406
451 format_dev_t(dd->dm_dev.name, dev);
452
453 atomic_set(&dd->count, 0);
454 list_add(&dd->list, &t->devices);
455
407 atomic_set(&dd->count, 0);
408 list_add(&dd->list, &t->devices);
409
456 } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
410 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
457 r = upgrade_mode(dd, mode, t->md);
458 if (r)
459 return r;
460 }
461 atomic_inc(&dd->count);
462
411 r = upgrade_mode(dd, mode, t->md);
412 if (r)
413 return r;
414 }
415 atomic_inc(&dd->count);
416
463 *result = &dd->dm_dev;
417 *result = dd->dm_dev;
464 return 0;
465}
466EXPORT_SYMBOL(dm_get_device);
467
468static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
469 sector_t start, sector_t len, void *data)
470{
471 struct queue_limits *limits = data;

--- 28 unchanged lines hidden (view full) ---

500 return 0;
501}
502
503/*
504 * Decrement a device's use count and remove it if necessary.
505 */
506void dm_put_device(struct dm_target *ti, struct dm_dev *d)
507{
418 return 0;
419}
420EXPORT_SYMBOL(dm_get_device);
421
422static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
423 sector_t start, sector_t len, void *data)
424{
425 struct queue_limits *limits = data;

--- 28 unchanged lines hidden (view full) ---

454 return 0;
455}
456
457/*
458 * Decrement a device's use count and remove it if necessary.
459 */
460void dm_put_device(struct dm_target *ti, struct dm_dev *d)
461{
508 struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
509 dm_dev);
462 int found = 0;
463 struct list_head *devices = &ti->table->devices;
464 struct dm_dev_internal *dd;
510
465
466 list_for_each_entry(dd, devices, list) {
467 if (dd->dm_dev == d) {
468 found = 1;
469 break;
470 }
471 }
472 if (!found) {
473 DMWARN("%s: device %s not in table devices list",
474 dm_device_name(ti->table->md), d->name);
475 return;
476 }
511 if (atomic_dec_and_test(&dd->count)) {
477 if (atomic_dec_and_test(&dd->count)) {
512 close_dev(dd, ti->table->md);
478 dm_put_table_device(ti->table->md, d);
513 list_del(&dd->list);
514 kfree(dd);
515 }
516}
517EXPORT_SYMBOL(dm_put_device);
518
519/*
520 * Checks to see if the target joins onto the end of the table.

--- 380 unchanged lines hidden (view full) ---

901 return 0;
902 }
903
904 BUG_ON(!request_based); /* No targets in this table */
905
906 /* Non-request-stackable devices can't be used for request-based dm */
907 devices = dm_table_get_devices(t);
908 list_for_each_entry(dd, devices, list) {
479 list_del(&dd->list);
480 kfree(dd);
481 }
482}
483EXPORT_SYMBOL(dm_put_device);
484
485/*
486 * Checks to see if the target joins onto the end of the table.

--- 380 unchanged lines hidden (view full) ---

867 return 0;
868 }
869
870 BUG_ON(!request_based); /* No targets in this table */
871
872 /* Non-request-stackable devices can't be used for request-based dm */
873 devices = dm_table_get_devices(t);
874 list_for_each_entry(dd, devices, list) {
909 if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
875 if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev->bdev))) {
910 DMWARN("table load rejected: including"
911 " non-request-stackable devices");
912 return -EINVAL;
913 }
914 }
915
916 /*
917 * Request-based dm supports only tables that have a single target now.

--- 120 unchanged lines hidden (view full) ---

1038static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
1039 bool match_all)
1040{
1041 struct list_head *devices = dm_table_get_devices(t);
1042 struct dm_dev_internal *dd = NULL;
1043 struct gendisk *prev_disk = NULL, *template_disk = NULL;
1044
1045 list_for_each_entry(dd, devices, list) {
876 DMWARN("table load rejected: including"
877 " non-request-stackable devices");
878 return -EINVAL;
879 }
880 }
881
882 /*
883 * Request-based dm supports only tables that have a single target now.

--- 120 unchanged lines hidden (view full) ---

1004static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
1005 bool match_all)
1006{
1007 struct list_head *devices = dm_table_get_devices(t);
1008 struct dm_dev_internal *dd = NULL;
1009 struct gendisk *prev_disk = NULL, *template_disk = NULL;
1010
1011 list_for_each_entry(dd, devices, list) {
1046 template_disk = dd->dm_dev.bdev->bd_disk;
1012 template_disk = dd->dm_dev->bdev->bd_disk;
1047 if (!blk_get_integrity(template_disk))
1048 goto no_integrity;
1049 if (!match_all && !blk_integrity_is_initialized(template_disk))
1050 continue; /* skip uninitialized profiles */
1051 else if (prev_disk &&
1052 blk_integrity_compare(prev_disk, template_disk) < 0)
1053 goto no_integrity;
1054 prev_disk = template_disk;

--- 569 unchanged lines hidden (view full) ---

1624int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1625{
1626 struct dm_dev_internal *dd;
1627 struct list_head *devices = dm_table_get_devices(t);
1628 struct dm_target_callbacks *cb;
1629 int r = 0;
1630
1631 list_for_each_entry(dd, devices, list) {
1013 if (!blk_get_integrity(template_disk))
1014 goto no_integrity;
1015 if (!match_all && !blk_integrity_is_initialized(template_disk))
1016 continue; /* skip uninitialized profiles */
1017 else if (prev_disk &&
1018 blk_integrity_compare(prev_disk, template_disk) < 0)
1019 goto no_integrity;
1020 prev_disk = template_disk;

--- 569 unchanged lines hidden (view full) ---

1590int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1591{
1592 struct dm_dev_internal *dd;
1593 struct list_head *devices = dm_table_get_devices(t);
1594 struct dm_target_callbacks *cb;
1595 int r = 0;
1596
1597 list_for_each_entry(dd, devices, list) {
1632 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1598 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
1633 char b[BDEVNAME_SIZE];
1634
1635 if (likely(q))
1636 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1637 else
1638 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1639 dm_device_name(t->md),
1599 char b[BDEVNAME_SIZE];
1600
1601 if (likely(q))
1602 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1603 else
1604 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1605 dm_device_name(t->md),
1640 bdevname(dd->dm_dev.bdev, b));
1606 bdevname(dd->dm_dev->bdev, b));
1641 }
1642
1643 list_for_each_entry(cb, &t->target_callbacks, list)
1644 if (cb->congested_fn)
1645 r |= cb->congested_fn(cb, bdi_bits);
1646
1647 return r;
1648}

--- 40 unchanged lines hidden ---
1607 }
1608
1609 list_for_each_entry(cb, &t->target_callbacks, list)
1610 if (cb->congested_fn)
1611 r |= cb->congested_fn(cb, bdi_bits);
1612
1613 return r;
1614}

--- 40 unchanged lines hidden ---