1 /* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the LGPL. 6 */ 7 8 #ifndef _LINUX_DEVICE_MAPPER_H 9 #define _LINUX_DEVICE_MAPPER_H 10 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/dm-ioctl.h> 14 #include <linux/math64.h> 15 #include <linux/ratelimit.h> 16 17 struct dm_dev; 18 struct dm_target; 19 struct dm_table; 20 struct mapped_device; 21 struct bio_vec; 22 23 /* 24 * Type of table, mapped_device's mempool and request_queue 25 */ 26 enum dm_queue_mode { 27 DM_TYPE_NONE = 0, 28 DM_TYPE_BIO_BASED = 1, 29 DM_TYPE_REQUEST_BASED = 2, 30 DM_TYPE_DAX_BIO_BASED = 3, 31 DM_TYPE_NVME_BIO_BASED = 4, 32 }; 33 34 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; 35 36 union map_info { 37 void *ptr; 38 }; 39 40 /* 41 * In the constructor the target parameter will already have the 42 * table, type, begin and len fields filled in. 43 */ 44 typedef int (*dm_ctr_fn) (struct dm_target *target, 45 unsigned int argc, char **argv); 46 47 /* 48 * The destructor doesn't need to free the dm_target, just 49 * anything hidden ti->private. 50 */ 51 typedef void (*dm_dtr_fn) (struct dm_target *ti); 52 53 /* 54 * The map function must return: 55 * < 0: error 56 * = 0: The target will handle the io by resubmitting it later 57 * = 1: simple remap complete 58 * = 2: The target wants to push back the io 59 */ 60 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); 61 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, 62 struct request *rq, 63 union map_info *map_context, 64 struct request **clone); 65 typedef void (*dm_release_clone_request_fn) (struct request *clone, 66 union map_info *map_context); 67 68 /* 69 * Returns: 70 * < 0 : error (currently ignored) 71 * 0 : ended successfully 72 * 1 : for some reason the io has still not completed (eg, 73 * multipath target might want to requeue a failed io). 74 * 2 : The target wants to push back the io 75 */ 76 typedef int (*dm_endio_fn) (struct dm_target *ti, 77 struct bio *bio, blk_status_t *error); 78 typedef int (*dm_request_endio_fn) (struct dm_target *ti, 79 struct request *clone, blk_status_t error, 80 union map_info *map_context); 81 82 typedef void (*dm_presuspend_fn) (struct dm_target *ti); 83 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); 84 typedef void (*dm_postsuspend_fn) (struct dm_target *ti); 85 typedef int (*dm_preresume_fn) (struct dm_target *ti); 86 typedef void (*dm_resume_fn) (struct dm_target *ti); 87 88 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, 89 unsigned status_flags, char *result, unsigned maxlen); 90 91 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, 92 char *result, unsigned maxlen); 93 94 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); 95 96 typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector, 97 struct blk_zone *zones, 98 unsigned int *nr_zones); 99 100 /* 101 * These iteration functions are typically used to check (and combine) 102 * properties of underlying devices. 103 * E.g. Does at least one underlying device support flush? 104 * Does any underlying device not support WRITE_SAME? 105 * 106 * The callout function is called once for each contiguous section of 107 * an underlying device. State can be maintained in *data. 108 * Return non-zero to stop iterating through any further devices. 109 */ 110 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 111 struct dm_dev *dev, 112 sector_t start, sector_t len, 113 void *data); 114 115 /* 116 * This function must iterate through each section of device used by the 117 * target until it encounters a non-zero return code, which it then returns. 118 * Returns zero if no callout returned non-zero. 119 */ 120 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, 121 iterate_devices_callout_fn fn, 122 void *data); 123 124 typedef void (*dm_io_hints_fn) (struct dm_target *ti, 125 struct queue_limits *limits); 126 127 /* 128 * Returns: 129 * 0: The target can handle the next I/O immediately. 130 * 1: The target can't handle the next I/O immediately. 131 */ 132 typedef int (*dm_busy_fn) (struct dm_target *ti); 133 134 /* 135 * Returns: 136 * < 0 : error 137 * >= 0 : the number of bytes accessible at the address 138 */ 139 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, 140 long nr_pages, void **kaddr, pfn_t *pfn); 141 typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff, 142 void *addr, size_t bytes, struct iov_iter *i); 143 #define PAGE_SECTORS (PAGE_SIZE / 512) 144 145 void dm_error(const char *message); 146 147 struct dm_dev { 148 struct block_device *bdev; 149 struct dax_device *dax_dev; 150 fmode_t mode; 151 char name[16]; 152 }; 153 154 dev_t dm_get_dev_t(const char *path); 155 156 /* 157 * Constructors should call these functions to ensure destination devices 158 * are opened/closed correctly. 159 */ 160 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 161 struct dm_dev **result); 162 void dm_put_device(struct dm_target *ti, struct dm_dev *d); 163 164 /* 165 * Information about a target type 166 */ 167 168 struct target_type { 169 uint64_t features; 170 const char *name; 171 struct module *module; 172 unsigned version[3]; 173 dm_ctr_fn ctr; 174 dm_dtr_fn dtr; 175 dm_map_fn map; 176 dm_clone_and_map_request_fn clone_and_map_rq; 177 dm_release_clone_request_fn release_clone_rq; 178 dm_endio_fn end_io; 179 dm_request_endio_fn rq_end_io; 180 dm_presuspend_fn presuspend; 181 dm_presuspend_undo_fn presuspend_undo; 182 dm_postsuspend_fn postsuspend; 183 dm_preresume_fn preresume; 184 dm_resume_fn resume; 185 dm_status_fn status; 186 dm_message_fn message; 187 dm_prepare_ioctl_fn prepare_ioctl; 188 #ifdef CONFIG_BLK_DEV_ZONED 189 dm_report_zones_fn report_zones; 190 #endif 191 dm_busy_fn busy; 192 dm_iterate_devices_fn iterate_devices; 193 dm_io_hints_fn io_hints; 194 dm_dax_direct_access_fn direct_access; 195 dm_dax_copy_iter_fn dax_copy_from_iter; 196 dm_dax_copy_iter_fn dax_copy_to_iter; 197 198 /* For internal device-mapper use. */ 199 struct list_head list; 200 }; 201 202 /* 203 * Target features 204 */ 205 206 /* 207 * Any table that contains an instance of this target must have only one. 208 */ 209 #define DM_TARGET_SINGLETON 0x00000001 210 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) 211 212 /* 213 * Indicates that a target does not support read-only devices. 214 */ 215 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 216 #define dm_target_always_writeable(type) \ 217 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) 218 219 /* 220 * Any device that contains a table with an instance of this target may never 221 * have tables containing any different target type. 222 */ 223 #define DM_TARGET_IMMUTABLE 0x00000004 224 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) 225 226 /* 227 * Indicates that a target may replace any target; even immutable targets. 228 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. 229 */ 230 #define DM_TARGET_WILDCARD 0x00000008 231 #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) 232 233 /* 234 * A target implements own bio data integrity. 235 */ 236 #define DM_TARGET_INTEGRITY 0x00000010 237 #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) 238 239 /* 240 * A target passes integrity data to the lower device. 241 */ 242 #define DM_TARGET_PASSES_INTEGRITY 0x00000020 243 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) 244 245 /* 246 * Indicates that a target supports host-managed zoned block devices. 247 */ 248 #define DM_TARGET_ZONED_HM 0x00000040 249 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) 250 251 struct dm_target { 252 struct dm_table *table; 253 struct target_type *type; 254 255 /* target limits */ 256 sector_t begin; 257 sector_t len; 258 259 /* If non-zero, maximum size of I/O submitted to a target. */ 260 uint32_t max_io_len; 261 262 /* 263 * A number of zero-length barrier bios that will be submitted 264 * to the target for the purpose of flushing cache. 265 * 266 * The bio number can be accessed with dm_bio_get_target_bio_nr. 267 * It is a responsibility of the target driver to remap these bios 268 * to the real underlying devices. 269 */ 270 unsigned num_flush_bios; 271 272 /* 273 * The number of discard bios that will be submitted to the target. 274 * The bio number can be accessed with dm_bio_get_target_bio_nr. 275 */ 276 unsigned num_discard_bios; 277 278 /* 279 * The number of secure erase bios that will be submitted to the target. 280 * The bio number can be accessed with dm_bio_get_target_bio_nr. 281 */ 282 unsigned num_secure_erase_bios; 283 284 /* 285 * The number of WRITE SAME bios that will be submitted to the target. 286 * The bio number can be accessed with dm_bio_get_target_bio_nr. 287 */ 288 unsigned num_write_same_bios; 289 290 /* 291 * The number of WRITE ZEROES bios that will be submitted to the target. 292 * The bio number can be accessed with dm_bio_get_target_bio_nr. 293 */ 294 unsigned num_write_zeroes_bios; 295 296 /* 297 * The minimum number of extra bytes allocated in each io for the 298 * target to use. 299 */ 300 unsigned per_io_data_size; 301 302 /* target specific data */ 303 void *private; 304 305 /* Used to provide an error string from the ctr */ 306 char *error; 307 308 /* 309 * Set if this target needs to receive flushes regardless of 310 * whether or not its underlying devices have support. 311 */ 312 bool flush_supported:1; 313 314 /* 315 * Set if this target needs to receive discards regardless of 316 * whether or not its underlying devices have support. 317 */ 318 bool discards_supported:1; 319 }; 320 321 /* Each target can link one of these into the table */ 322 struct dm_target_callbacks { 323 struct list_head list; 324 int (*congested_fn) (struct dm_target_callbacks *, int); 325 }; 326 327 void *dm_per_bio_data(struct bio *bio, size_t data_size); 328 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); 329 unsigned dm_bio_get_target_bio_nr(const struct bio *bio); 330 331 int dm_register_target(struct target_type *t); 332 void dm_unregister_target(struct target_type *t); 333 334 /* 335 * Target argument parsing. 336 */ 337 struct dm_arg_set { 338 unsigned argc; 339 char **argv; 340 }; 341 342 /* 343 * The minimum and maximum value of a numeric argument, together with 344 * the error message to use if the number is found to be outside that range. 345 */ 346 struct dm_arg { 347 unsigned min; 348 unsigned max; 349 char *error; 350 }; 351 352 /* 353 * Validate the next argument, either returning it as *value or, if invalid, 354 * returning -EINVAL and setting *error. 355 */ 356 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 357 unsigned *value, char **error); 358 359 /* 360 * Process the next argument as the start of a group containing between 361 * arg->min and arg->max further arguments. Either return the size as 362 * *num_args or, if invalid, return -EINVAL and set *error. 363 */ 364 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, 365 unsigned *num_args, char **error); 366 367 /* 368 * Return the current argument and shift to the next. 369 */ 370 const char *dm_shift_arg(struct dm_arg_set *as); 371 372 /* 373 * Move through num_args arguments. 374 */ 375 void dm_consume_args(struct dm_arg_set *as, unsigned num_args); 376 377 /*----------------------------------------------------------------- 378 * Functions for creating and manipulating mapped devices. 379 * Drop the reference with dm_put when you finish with the object. 380 *---------------------------------------------------------------*/ 381 382 /* 383 * DM_ANY_MINOR chooses the next available minor number. 384 */ 385 #define DM_ANY_MINOR (-1) 386 int dm_create(int minor, struct mapped_device **md); 387 388 /* 389 * Reference counting for md. 390 */ 391 struct mapped_device *dm_get_md(dev_t dev); 392 void dm_get(struct mapped_device *md); 393 int dm_hold(struct mapped_device *md); 394 void dm_put(struct mapped_device *md); 395 396 /* 397 * An arbitrary pointer may be stored alongside a mapped device. 398 */ 399 void dm_set_mdptr(struct mapped_device *md, void *ptr); 400 void *dm_get_mdptr(struct mapped_device *md); 401 402 /* 403 * A device can still be used while suspended, but I/O is deferred. 404 */ 405 int dm_suspend(struct mapped_device *md, unsigned suspend_flags); 406 int dm_resume(struct mapped_device *md); 407 408 /* 409 * Event functions. 410 */ 411 uint32_t dm_get_event_nr(struct mapped_device *md); 412 int dm_wait_event(struct mapped_device *md, int event_nr); 413 uint32_t dm_next_uevent_seq(struct mapped_device *md); 414 void dm_uevent_add(struct mapped_device *md, struct list_head *elist); 415 416 /* 417 * Info functions. 418 */ 419 const char *dm_device_name(struct mapped_device *md); 420 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 421 struct gendisk *dm_disk(struct mapped_device *md); 422 int dm_suspended(struct dm_target *ti); 423 int dm_noflush_suspending(struct dm_target *ti); 424 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); 425 void dm_remap_zone_report(struct dm_target *ti, sector_t start, 426 struct blk_zone *zones, unsigned int *nr_zones); 427 union map_info *dm_get_rq_mapinfo(struct request *rq); 428 429 /* 430 * Device mapper functions to parse and create devices specified by the 431 * parameter "dm-mod.create=" 432 */ 433 int __init dm_early_create(struct dm_ioctl *dmi, 434 struct dm_target_spec **spec_array, 435 char **target_params_array); 436 437 struct queue_limits *dm_get_queue_limits(struct mapped_device *md); 438 439 /* 440 * Geometry functions. 441 */ 442 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 443 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 444 445 /*----------------------------------------------------------------- 446 * Functions for manipulating device-mapper tables. 447 *---------------------------------------------------------------*/ 448 449 /* 450 * First create an empty table. 451 */ 452 int dm_table_create(struct dm_table **result, fmode_t mode, 453 unsigned num_targets, struct mapped_device *md); 454 455 /* 456 * Then call this once for each target. 457 */ 458 int dm_table_add_target(struct dm_table *t, const char *type, 459 sector_t start, sector_t len, char *params); 460 461 /* 462 * Target_ctr should call this if it needs to add any callbacks. 463 */ 464 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); 465 466 /* 467 * Target can use this to set the table's type. 468 * Can only ever be called from a target's ctr. 469 * Useful for "hybrid" target (supports both bio-based 470 * and request-based). 471 */ 472 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); 473 474 /* 475 * Finally call this to make the table ready for use. 476 */ 477 int dm_table_complete(struct dm_table *t); 478 479 /* 480 * Destroy the table when finished. 481 */ 482 void dm_table_destroy(struct dm_table *t); 483 484 /* 485 * Target may require that it is never sent I/O larger than len. 486 */ 487 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); 488 489 /* 490 * Table reference counting. 491 */ 492 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); 493 void dm_put_live_table(struct mapped_device *md, int srcu_idx); 494 void dm_sync_table(struct mapped_device *md); 495 496 /* 497 * Queries 498 */ 499 sector_t dm_table_get_size(struct dm_table *t); 500 unsigned int dm_table_get_num_targets(struct dm_table *t); 501 fmode_t dm_table_get_mode(struct dm_table *t); 502 struct mapped_device *dm_table_get_md(struct dm_table *t); 503 const char *dm_table_device_name(struct dm_table *t); 504 505 /* 506 * Trigger an event. 507 */ 508 void dm_table_event(struct dm_table *t); 509 510 /* 511 * Run the queue for request-based targets. 512 */ 513 void dm_table_run_md_queue_async(struct dm_table *t); 514 515 /* 516 * The device must be suspended before calling this method. 517 * Returns the previous table, which the caller must destroy. 518 */ 519 struct dm_table *dm_swap_table(struct mapped_device *md, 520 struct dm_table *t); 521 522 /* 523 * A wrapper around vmalloc. 524 */ 525 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); 526 527 /*----------------------------------------------------------------- 528 * Macros. 529 *---------------------------------------------------------------*/ 530 #define DM_NAME "device-mapper" 531 532 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" 533 534 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) 535 536 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) 537 #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 538 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) 539 #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 540 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) 541 #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 542 543 #ifdef CONFIG_DM_DEBUG 544 #define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__) 545 #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 546 #else 547 #define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) 548 #define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__) 549 #endif 550 551 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 552 0 : scnprintf(result + sz, maxlen - sz, x)) 553 554 /* 555 * Definitions of return values from target end_io function. 556 */ 557 #define DM_ENDIO_DONE 0 558 #define DM_ENDIO_INCOMPLETE 1 559 #define DM_ENDIO_REQUEUE 2 560 #define DM_ENDIO_DELAY_REQUEUE 3 561 562 /* 563 * Definitions of return values from target map function. 564 */ 565 #define DM_MAPIO_SUBMITTED 0 566 #define DM_MAPIO_REMAPPED 1 567 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 568 #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE 569 #define DM_MAPIO_KILL 4 570 571 #define dm_sector_div64(x, y)( \ 572 { \ 573 u64 _res; \ 574 (x) = div64_u64_rem(x, y, &_res); \ 575 _res; \ 576 } \ 577 ) 578 579 /* 580 * Ceiling(n / sz) 581 */ 582 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) 583 584 #define dm_sector_div_up(n, sz) ( \ 585 { \ 586 sector_t _r = ((n) + (sz) - 1); \ 587 sector_div(_r, (sz)); \ 588 _r; \ 589 } \ 590 ) 591 592 /* 593 * ceiling(n / size) * size 594 */ 595 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) 596 597 #define dm_array_too_big(fixed, obj, num) \ 598 ((num) > (UINT_MAX - (fixed)) / (obj)) 599 600 /* 601 * Sector offset taken relative to the start of the target instead of 602 * relative to the start of the device. 603 */ 604 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) 605 606 static inline sector_t to_sector(unsigned long long n) 607 { 608 return (n >> SECTOR_SHIFT); 609 } 610 611 static inline unsigned long to_bytes(sector_t n) 612 { 613 return (n << SECTOR_SHIFT); 614 } 615 616 #endif /* _LINUX_DEVICE_MAPPER_H */ 617