1 /* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the LGPL. 6 */ 7 8 #ifndef _LINUX_DEVICE_MAPPER_H 9 #define _LINUX_DEVICE_MAPPER_H 10 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/dm-ioctl.h> 14 #include <linux/math64.h> 15 #include <linux/ratelimit.h> 16 17 struct dm_dev; 18 struct dm_target; 19 struct dm_table; 20 struct dm_report_zones_args; 21 struct mapped_device; 22 struct bio_vec; 23 24 /* 25 * Type of table, mapped_device's mempool and request_queue 26 */ 27 enum dm_queue_mode { 28 DM_TYPE_NONE = 0, 29 DM_TYPE_BIO_BASED = 1, 30 DM_TYPE_REQUEST_BASED = 2, 31 DM_TYPE_DAX_BIO_BASED = 3, 32 DM_TYPE_NVME_BIO_BASED = 4, 33 }; 34 35 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; 36 37 union map_info { 38 void *ptr; 39 }; 40 41 /* 42 * In the constructor the target parameter will already have the 43 * table, type, begin and len fields filled in. 44 */ 45 typedef int (*dm_ctr_fn) (struct dm_target *target, 46 unsigned int argc, char **argv); 47 48 /* 49 * The destructor doesn't need to free the dm_target, just 50 * anything hidden ti->private. 51 */ 52 typedef void (*dm_dtr_fn) (struct dm_target *ti); 53 54 /* 55 * The map function must return: 56 * < 0: error 57 * = 0: The target will handle the io by resubmitting it later 58 * = 1: simple remap complete 59 * = 2: The target wants to push back the io 60 */ 61 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); 62 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, 63 struct request *rq, 64 union map_info *map_context, 65 struct request **clone); 66 typedef void (*dm_release_clone_request_fn) (struct request *clone, 67 union map_info *map_context); 68 69 /* 70 * Returns: 71 * < 0 : error (currently ignored) 72 * 0 : ended successfully 73 * 1 : for some reason the io has still not completed (eg, 74 * multipath target might want to requeue a failed io). 75 * 2 : The target wants to push back the io 76 */ 77 typedef int (*dm_endio_fn) (struct dm_target *ti, 78 struct bio *bio, blk_status_t *error); 79 typedef int (*dm_request_endio_fn) (struct dm_target *ti, 80 struct request *clone, blk_status_t error, 81 union map_info *map_context); 82 83 typedef void (*dm_presuspend_fn) (struct dm_target *ti); 84 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); 85 typedef void (*dm_postsuspend_fn) (struct dm_target *ti); 86 typedef int (*dm_preresume_fn) (struct dm_target *ti); 87 typedef void (*dm_resume_fn) (struct dm_target *ti); 88 89 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, 90 unsigned status_flags, char *result, unsigned maxlen); 91 92 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, 93 char *result, unsigned maxlen); 94 95 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); 96 97 typedef int (*dm_report_zones_fn) (struct dm_target *ti, 98 struct dm_report_zones_args *args, 99 unsigned int nr_zones); 100 101 /* 102 * These iteration functions are typically used to check (and combine) 103 * properties of underlying devices. 104 * E.g. Does at least one underlying device support flush? 105 * Does any underlying device not support WRITE_SAME? 106 * 107 * The callout function is called once for each contiguous section of 108 * an underlying device. State can be maintained in *data. 109 * Return non-zero to stop iterating through any further devices. 110 */ 111 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 112 struct dm_dev *dev, 113 sector_t start, sector_t len, 114 void *data); 115 116 /* 117 * This function must iterate through each section of device used by the 118 * target until it encounters a non-zero return code, which it then returns. 119 * Returns zero if no callout returned non-zero. 120 */ 121 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, 122 iterate_devices_callout_fn fn, 123 void *data); 124 125 typedef void (*dm_io_hints_fn) (struct dm_target *ti, 126 struct queue_limits *limits); 127 128 /* 129 * Returns: 130 * 0: The target can handle the next I/O immediately. 131 * 1: The target can't handle the next I/O immediately. 132 */ 133 typedef int (*dm_busy_fn) (struct dm_target *ti); 134 135 /* 136 * Returns: 137 * < 0 : error 138 * >= 0 : the number of bytes accessible at the address 139 */ 140 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, 141 long nr_pages, void **kaddr, pfn_t *pfn); 142 typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff, 143 void *addr, size_t bytes, struct iov_iter *i); 144 #define PAGE_SECTORS (PAGE_SIZE / 512) 145 146 void dm_error(const char *message); 147 148 struct dm_dev { 149 struct block_device *bdev; 150 struct dax_device *dax_dev; 151 fmode_t mode; 152 char name[16]; 153 }; 154 155 dev_t dm_get_dev_t(const char *path); 156 157 /* 158 * Constructors should call these functions to ensure destination devices 159 * are opened/closed correctly. 160 */ 161 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 162 struct dm_dev **result); 163 void dm_put_device(struct dm_target *ti, struct dm_dev *d); 164 165 /* 166 * Information about a target type 167 */ 168 169 struct target_type { 170 uint64_t features; 171 const char *name; 172 struct module *module; 173 unsigned version[3]; 174 dm_ctr_fn ctr; 175 dm_dtr_fn dtr; 176 dm_map_fn map; 177 dm_clone_and_map_request_fn clone_and_map_rq; 178 dm_release_clone_request_fn release_clone_rq; 179 dm_endio_fn end_io; 180 dm_request_endio_fn rq_end_io; 181 dm_presuspend_fn presuspend; 182 dm_presuspend_undo_fn presuspend_undo; 183 dm_postsuspend_fn postsuspend; 184 dm_preresume_fn preresume; 185 dm_resume_fn resume; 186 dm_status_fn status; 187 dm_message_fn message; 188 dm_prepare_ioctl_fn prepare_ioctl; 189 #ifdef CONFIG_BLK_DEV_ZONED 190 dm_report_zones_fn report_zones; 191 #endif 192 dm_busy_fn busy; 193 dm_iterate_devices_fn iterate_devices; 194 dm_io_hints_fn io_hints; 195 dm_dax_direct_access_fn direct_access; 196 dm_dax_copy_iter_fn dax_copy_from_iter; 197 dm_dax_copy_iter_fn dax_copy_to_iter; 198 199 /* For internal device-mapper use. */ 200 struct list_head list; 201 }; 202 203 /* 204 * Target features 205 */ 206 207 /* 208 * Any table that contains an instance of this target must have only one. 209 */ 210 #define DM_TARGET_SINGLETON 0x00000001 211 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) 212 213 /* 214 * Indicates that a target does not support read-only devices. 215 */ 216 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 217 #define dm_target_always_writeable(type) \ 218 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) 219 220 /* 221 * Any device that contains a table with an instance of this target may never 222 * have tables containing any different target type. 223 */ 224 #define DM_TARGET_IMMUTABLE 0x00000004 225 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) 226 227 /* 228 * Indicates that a target may replace any target; even immutable targets. 229 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. 230 */ 231 #define DM_TARGET_WILDCARD 0x00000008 232 #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) 233 234 /* 235 * A target implements own bio data integrity. 236 */ 237 #define DM_TARGET_INTEGRITY 0x00000010 238 #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) 239 240 /* 241 * A target passes integrity data to the lower device. 242 */ 243 #define DM_TARGET_PASSES_INTEGRITY 0x00000020 244 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) 245 246 /* 247 * Indicates that a target supports host-managed zoned block devices. 248 */ 249 #define DM_TARGET_ZONED_HM 0x00000040 250 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) 251 252 struct dm_target { 253 struct dm_table *table; 254 struct target_type *type; 255 256 /* target limits */ 257 sector_t begin; 258 sector_t len; 259 260 /* If non-zero, maximum size of I/O submitted to a target. */ 261 uint32_t max_io_len; 262 263 /* 264 * A number of zero-length barrier bios that will be submitted 265 * to the target for the purpose of flushing cache. 266 * 267 * The bio number can be accessed with dm_bio_get_target_bio_nr. 268 * It is a responsibility of the target driver to remap these bios 269 * to the real underlying devices. 270 */ 271 unsigned num_flush_bios; 272 273 /* 274 * The number of discard bios that will be submitted to the target. 275 * The bio number can be accessed with dm_bio_get_target_bio_nr. 276 */ 277 unsigned num_discard_bios; 278 279 /* 280 * The number of secure erase bios that will be submitted to the target. 281 * The bio number can be accessed with dm_bio_get_target_bio_nr. 282 */ 283 unsigned num_secure_erase_bios; 284 285 /* 286 * The number of WRITE SAME bios that will be submitted to the target. 287 * The bio number can be accessed with dm_bio_get_target_bio_nr. 288 */ 289 unsigned num_write_same_bios; 290 291 /* 292 * The number of WRITE ZEROES bios that will be submitted to the target. 293 * The bio number can be accessed with dm_bio_get_target_bio_nr. 294 */ 295 unsigned num_write_zeroes_bios; 296 297 /* 298 * The minimum number of extra bytes allocated in each io for the 299 * target to use. 300 */ 301 unsigned per_io_data_size; 302 303 /* target specific data */ 304 void *private; 305 306 /* Used to provide an error string from the ctr */ 307 char *error; 308 309 /* 310 * Set if this target needs to receive flushes regardless of 311 * whether or not its underlying devices have support. 312 */ 313 bool flush_supported:1; 314 315 /* 316 * Set if this target needs to receive discards regardless of 317 * whether or not its underlying devices have support. 318 */ 319 bool discards_supported:1; 320 }; 321 322 /* Each target can link one of these into the table */ 323 struct dm_target_callbacks { 324 struct list_head list; 325 int (*congested_fn) (struct dm_target_callbacks *, int); 326 }; 327 328 void *dm_per_bio_data(struct bio *bio, size_t data_size); 329 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); 330 unsigned dm_bio_get_target_bio_nr(const struct bio *bio); 331 332 int dm_register_target(struct target_type *t); 333 void dm_unregister_target(struct target_type *t); 334 335 /* 336 * Target argument parsing. 337 */ 338 struct dm_arg_set { 339 unsigned argc; 340 char **argv; 341 }; 342 343 /* 344 * The minimum and maximum value of a numeric argument, together with 345 * the error message to use if the number is found to be outside that range. 346 */ 347 struct dm_arg { 348 unsigned min; 349 unsigned max; 350 char *error; 351 }; 352 353 /* 354 * Validate the next argument, either returning it as *value or, if invalid, 355 * returning -EINVAL and setting *error. 356 */ 357 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 358 unsigned *value, char **error); 359 360 /* 361 * Process the next argument as the start of a group containing between 362 * arg->min and arg->max further arguments. Either return the size as 363 * *num_args or, if invalid, return -EINVAL and set *error. 364 */ 365 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, 366 unsigned *num_args, char **error); 367 368 /* 369 * Return the current argument and shift to the next. 370 */ 371 const char *dm_shift_arg(struct dm_arg_set *as); 372 373 /* 374 * Move through num_args arguments. 375 */ 376 void dm_consume_args(struct dm_arg_set *as, unsigned num_args); 377 378 /*----------------------------------------------------------------- 379 * Functions for creating and manipulating mapped devices. 380 * Drop the reference with dm_put when you finish with the object. 381 *---------------------------------------------------------------*/ 382 383 /* 384 * DM_ANY_MINOR chooses the next available minor number. 385 */ 386 #define DM_ANY_MINOR (-1) 387 int dm_create(int minor, struct mapped_device **md); 388 389 /* 390 * Reference counting for md. 391 */ 392 struct mapped_device *dm_get_md(dev_t dev); 393 void dm_get(struct mapped_device *md); 394 int dm_hold(struct mapped_device *md); 395 void dm_put(struct mapped_device *md); 396 397 /* 398 * An arbitrary pointer may be stored alongside a mapped device. 399 */ 400 void dm_set_mdptr(struct mapped_device *md, void *ptr); 401 void *dm_get_mdptr(struct mapped_device *md); 402 403 /* 404 * A device can still be used while suspended, but I/O is deferred. 405 */ 406 int dm_suspend(struct mapped_device *md, unsigned suspend_flags); 407 int dm_resume(struct mapped_device *md); 408 409 /* 410 * Event functions. 411 */ 412 uint32_t dm_get_event_nr(struct mapped_device *md); 413 int dm_wait_event(struct mapped_device *md, int event_nr); 414 uint32_t dm_next_uevent_seq(struct mapped_device *md); 415 void dm_uevent_add(struct mapped_device *md, struct list_head *elist); 416 417 /* 418 * Info functions. 419 */ 420 const char *dm_device_name(struct mapped_device *md); 421 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 422 struct gendisk *dm_disk(struct mapped_device *md); 423 int dm_suspended(struct dm_target *ti); 424 int dm_noflush_suspending(struct dm_target *ti); 425 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); 426 union map_info *dm_get_rq_mapinfo(struct request *rq); 427 428 #ifdef CONFIG_BLK_DEV_ZONED 429 struct dm_report_zones_args { 430 struct dm_target *tgt; 431 sector_t next_sector; 432 433 void *orig_data; 434 report_zones_cb orig_cb; 435 unsigned int zone_idx; 436 437 /* must be filled by ->report_zones before calling dm_report_zones_cb */ 438 sector_t start; 439 }; 440 int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data); 441 #endif /* CONFIG_BLK_DEV_ZONED */ 442 443 /* 444 * Device mapper functions to parse and create devices specified by the 445 * parameter "dm-mod.create=" 446 */ 447 int __init dm_early_create(struct dm_ioctl *dmi, 448 struct dm_target_spec **spec_array, 449 char **target_params_array); 450 451 struct queue_limits *dm_get_queue_limits(struct mapped_device *md); 452 453 /* 454 * Geometry functions. 455 */ 456 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 457 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 458 459 /*----------------------------------------------------------------- 460 * Functions for manipulating device-mapper tables. 461 *---------------------------------------------------------------*/ 462 463 /* 464 * First create an empty table. 465 */ 466 int dm_table_create(struct dm_table **result, fmode_t mode, 467 unsigned num_targets, struct mapped_device *md); 468 469 /* 470 * Then call this once for each target. 471 */ 472 int dm_table_add_target(struct dm_table *t, const char *type, 473 sector_t start, sector_t len, char *params); 474 475 /* 476 * Target_ctr should call this if it needs to add any callbacks. 477 */ 478 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); 479 480 /* 481 * Target can use this to set the table's type. 482 * Can only ever be called from a target's ctr. 483 * Useful for "hybrid" target (supports both bio-based 484 * and request-based). 485 */ 486 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); 487 488 /* 489 * Finally call this to make the table ready for use. 490 */ 491 int dm_table_complete(struct dm_table *t); 492 493 /* 494 * Destroy the table when finished. 495 */ 496 void dm_table_destroy(struct dm_table *t); 497 498 /* 499 * Target may require that it is never sent I/O larger than len. 500 */ 501 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); 502 503 /* 504 * Table reference counting. 505 */ 506 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); 507 void dm_put_live_table(struct mapped_device *md, int srcu_idx); 508 void dm_sync_table(struct mapped_device *md); 509 510 /* 511 * Queries 512 */ 513 sector_t dm_table_get_size(struct dm_table *t); 514 unsigned int dm_table_get_num_targets(struct dm_table *t); 515 fmode_t dm_table_get_mode(struct dm_table *t); 516 struct mapped_device *dm_table_get_md(struct dm_table *t); 517 const char *dm_table_device_name(struct dm_table *t); 518 519 /* 520 * Trigger an event. 521 */ 522 void dm_table_event(struct dm_table *t); 523 524 /* 525 * Run the queue for request-based targets. 526 */ 527 void dm_table_run_md_queue_async(struct dm_table *t); 528 529 /* 530 * The device must be suspended before calling this method. 531 * Returns the previous table, which the caller must destroy. 532 */ 533 struct dm_table *dm_swap_table(struct mapped_device *md, 534 struct dm_table *t); 535 536 /* 537 * A wrapper around vmalloc. 538 */ 539 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); 540 541 /*----------------------------------------------------------------- 542 * Macros. 543 *---------------------------------------------------------------*/ 544 #define DM_NAME "device-mapper" 545 546 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" 547 548 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) 549 550 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) 551 #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 552 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) 553 #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 554 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) 555 #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 556 557 #ifdef CONFIG_DM_DEBUG 558 #define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__) 559 #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 560 #else 561 #define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) 562 #define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__) 563 #endif 564 565 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 566 0 : scnprintf(result + sz, maxlen - sz, x)) 567 568 /* 569 * Definitions of return values from target end_io function. 570 */ 571 #define DM_ENDIO_DONE 0 572 #define DM_ENDIO_INCOMPLETE 1 573 #define DM_ENDIO_REQUEUE 2 574 #define DM_ENDIO_DELAY_REQUEUE 3 575 576 /* 577 * Definitions of return values from target map function. 578 */ 579 #define DM_MAPIO_SUBMITTED 0 580 #define DM_MAPIO_REMAPPED 1 581 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 582 #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE 583 #define DM_MAPIO_KILL 4 584 585 #define dm_sector_div64(x, y)( \ 586 { \ 587 u64 _res; \ 588 (x) = div64_u64_rem(x, y, &_res); \ 589 _res; \ 590 } \ 591 ) 592 593 /* 594 * Ceiling(n / sz) 595 */ 596 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) 597 598 #define dm_sector_div_up(n, sz) ( \ 599 { \ 600 sector_t _r = ((n) + (sz) - 1); \ 601 sector_div(_r, (sz)); \ 602 _r; \ 603 } \ 604 ) 605 606 /* 607 * ceiling(n / size) * size 608 */ 609 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) 610 611 /* 612 * Sector offset taken relative to the start of the target instead of 613 * relative to the start of the device. 614 */ 615 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) 616 617 static inline sector_t to_sector(unsigned long long n) 618 { 619 return (n >> SECTOR_SHIFT); 620 } 621 622 static inline unsigned long to_bytes(sector_t n) 623 { 624 return (n << SECTOR_SHIFT); 625 } 626 627 #endif /* _LINUX_DEVICE_MAPPER_H */ 628