1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2001 Sistina Software (UK) Limited. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the LGPL. 7 */ 8 9 #ifndef _LINUX_DEVICE_MAPPER_H 10 #define _LINUX_DEVICE_MAPPER_H 11 12 #include <linux/bio.h> 13 #include <linux/blkdev.h> 14 #include <linux/dm-ioctl.h> 15 #include <linux/math64.h> 16 #include <linux/ratelimit.h> 17 18 struct dm_dev; 19 struct dm_target; 20 struct dm_table; 21 struct dm_report_zones_args; 22 struct mapped_device; 23 struct bio_vec; 24 enum dax_access_mode; 25 26 /* 27 * Type of table, mapped_device's mempool and request_queue 28 */ 29 enum dm_queue_mode { 30 DM_TYPE_NONE = 0, 31 DM_TYPE_BIO_BASED = 1, 32 DM_TYPE_REQUEST_BASED = 2, 33 DM_TYPE_DAX_BIO_BASED = 3, 34 }; 35 36 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t; 37 38 union map_info { 39 void *ptr; 40 }; 41 42 /* 43 * In the constructor the target parameter will already have the 44 * table, type, begin and len fields filled in. 45 */ 46 typedef int (*dm_ctr_fn) (struct dm_target *target, 47 unsigned int argc, char **argv); 48 49 /* 50 * The destructor doesn't need to free the dm_target, just 51 * anything hidden ti->private. 52 */ 53 typedef void (*dm_dtr_fn) (struct dm_target *ti); 54 55 /* 56 * The map function must return: 57 * < 0: error 58 * = 0: The target will handle the io by resubmitting it later 59 * = 1: simple remap complete 60 * = 2: The target wants to push back the io 61 */ 62 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); 63 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, 64 struct request *rq, 65 union map_info *map_context, 66 struct request **clone); 67 typedef void (*dm_release_clone_request_fn) (struct request *clone, 68 union map_info *map_context); 69 70 /* 71 * Returns: 72 * < 0 : error (currently ignored) 73 * 0 : ended successfully 74 * 1 : for some reason the io has still not completed (eg, 75 * multipath target might want to requeue a failed io). 76 * 2 : The target wants to push back the io 77 */ 78 typedef int (*dm_endio_fn) (struct dm_target *ti, 79 struct bio *bio, blk_status_t *error); 80 typedef int (*dm_request_endio_fn) (struct dm_target *ti, 81 struct request *clone, blk_status_t error, 82 union map_info *map_context); 83 84 typedef void (*dm_presuspend_fn) (struct dm_target *ti); 85 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); 86 typedef void (*dm_postsuspend_fn) (struct dm_target *ti); 87 typedef int (*dm_preresume_fn) (struct dm_target *ti); 88 typedef void (*dm_resume_fn) (struct dm_target *ti); 89 90 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, 91 unsigned int status_flags, char *result, unsigned int maxlen); 92 93 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv, 94 char *result, unsigned int maxlen); 95 96 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); 97 98 #ifdef CONFIG_BLK_DEV_ZONED 99 typedef int (*dm_report_zones_fn) (struct dm_target *ti, 100 struct dm_report_zones_args *args, 101 unsigned int nr_zones); 102 #else 103 /* 104 * Define dm_report_zones_fn so that targets can assign to NULL if 105 * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do 106 * awkward #ifdefs in their target_type, etc. 107 */ 108 typedef int (*dm_report_zones_fn) (struct dm_target *dummy); 109 #endif 110 111 /* 112 * These iteration functions are typically used to check (and combine) 113 * properties of underlying devices. 114 * E.g. Does at least one underlying device support flush? 115 * Does any underlying device not support WRITE_SAME? 116 * 117 * The callout function is called once for each contiguous section of 118 * an underlying device. State can be maintained in *data. 119 * Return non-zero to stop iterating through any further devices. 120 */ 121 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 122 struct dm_dev *dev, 123 sector_t start, sector_t len, 124 void *data); 125 126 /* 127 * This function must iterate through each section of device used by the 128 * target until it encounters a non-zero return code, which it then returns. 129 * Returns zero if no callout returned non-zero. 130 */ 131 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, 132 iterate_devices_callout_fn fn, 133 void *data); 134 135 typedef void (*dm_io_hints_fn) (struct dm_target *ti, 136 struct queue_limits *limits); 137 138 /* 139 * Returns: 140 * 0: The target can handle the next I/O immediately. 141 * 1: The target can't handle the next I/O immediately. 142 */ 143 typedef int (*dm_busy_fn) (struct dm_target *ti); 144 145 /* 146 * Returns: 147 * < 0 : error 148 * >= 0 : the number of bytes accessible at the address 149 */ 150 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, 151 long nr_pages, enum dax_access_mode node, void **kaddr, 152 pfn_t *pfn); 153 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, 154 size_t nr_pages); 155 156 /* 157 * Returns: 158 * != 0 : number of bytes transferred 159 * 0 : recovery write failed 160 */ 161 typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff, 162 void *addr, size_t bytes, struct iov_iter *i); 163 164 void dm_error(const char *message); 165 166 struct dm_dev { 167 struct block_device *bdev; 168 struct dax_device *dax_dev; 169 fmode_t mode; 170 char name[16]; 171 }; 172 173 dev_t dm_get_dev_t(const char *path); 174 175 /* 176 * Constructors should call these functions to ensure destination devices 177 * are opened/closed correctly. 178 */ 179 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 180 struct dm_dev **result); 181 void dm_put_device(struct dm_target *ti, struct dm_dev *d); 182 183 /* 184 * Information about a target type 185 */ 186 187 struct target_type { 188 uint64_t features; 189 const char *name; 190 struct module *module; 191 unsigned int version[3]; 192 dm_ctr_fn ctr; 193 dm_dtr_fn dtr; 194 dm_map_fn map; 195 dm_clone_and_map_request_fn clone_and_map_rq; 196 dm_release_clone_request_fn release_clone_rq; 197 dm_endio_fn end_io; 198 dm_request_endio_fn rq_end_io; 199 dm_presuspend_fn presuspend; 200 dm_presuspend_undo_fn presuspend_undo; 201 dm_postsuspend_fn postsuspend; 202 dm_preresume_fn preresume; 203 dm_resume_fn resume; 204 dm_status_fn status; 205 dm_message_fn message; 206 dm_prepare_ioctl_fn prepare_ioctl; 207 dm_report_zones_fn report_zones; 208 dm_busy_fn busy; 209 dm_iterate_devices_fn iterate_devices; 210 dm_io_hints_fn io_hints; 211 dm_dax_direct_access_fn direct_access; 212 dm_dax_zero_page_range_fn dax_zero_page_range; 213 dm_dax_recovery_write_fn dax_recovery_write; 214 215 /* For internal device-mapper use. */ 216 struct list_head list; 217 }; 218 219 /* 220 * Target features 221 */ 222 223 /* 224 * Any table that contains an instance of this target must have only one. 225 */ 226 #define DM_TARGET_SINGLETON 0x00000001 227 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) 228 229 /* 230 * Indicates that a target does not support read-only devices. 231 */ 232 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 233 #define dm_target_always_writeable(type) \ 234 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) 235 236 /* 237 * Any device that contains a table with an instance of this target may never 238 * have tables containing any different target type. 239 */ 240 #define DM_TARGET_IMMUTABLE 0x00000004 241 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) 242 243 /* 244 * Indicates that a target may replace any target; even immutable targets. 245 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. 246 */ 247 #define DM_TARGET_WILDCARD 0x00000008 248 #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) 249 250 /* 251 * A target implements own bio data integrity. 252 */ 253 #define DM_TARGET_INTEGRITY 0x00000010 254 #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) 255 256 /* 257 * A target passes integrity data to the lower device. 258 */ 259 #define DM_TARGET_PASSES_INTEGRITY 0x00000020 260 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) 261 262 /* 263 * Indicates support for zoned block devices: 264 * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned 265 * block devices but does not support combining different zoned models. 266 * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple 267 * devices with different zoned models. 268 */ 269 #ifdef CONFIG_BLK_DEV_ZONED 270 #define DM_TARGET_ZONED_HM 0x00000040 271 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) 272 #else 273 #define DM_TARGET_ZONED_HM 0x00000000 274 #define dm_target_supports_zoned_hm(type) (false) 275 #endif 276 277 /* 278 * A target handles REQ_NOWAIT 279 */ 280 #define DM_TARGET_NOWAIT 0x00000080 281 #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT) 282 283 /* 284 * A target supports passing through inline crypto support. 285 */ 286 #define DM_TARGET_PASSES_CRYPTO 0x00000100 287 #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO) 288 289 #ifdef CONFIG_BLK_DEV_ZONED 290 #define DM_TARGET_MIXED_ZONED_MODEL 0x00000200 291 #define dm_target_supports_mixed_zoned_model(type) \ 292 ((type)->features & DM_TARGET_MIXED_ZONED_MODEL) 293 #else 294 #define DM_TARGET_MIXED_ZONED_MODEL 0x00000000 295 #define dm_target_supports_mixed_zoned_model(type) (false) 296 #endif 297 298 struct dm_target { 299 struct dm_table *table; 300 struct target_type *type; 301 302 /* target limits */ 303 sector_t begin; 304 sector_t len; 305 306 /* If non-zero, maximum size of I/O submitted to a target. */ 307 uint32_t max_io_len; 308 309 /* 310 * A number of zero-length barrier bios that will be submitted 311 * to the target for the purpose of flushing cache. 312 * 313 * The bio number can be accessed with dm_bio_get_target_bio_nr. 314 * It is a responsibility of the target driver to remap these bios 315 * to the real underlying devices. 316 */ 317 unsigned int num_flush_bios; 318 319 /* 320 * The number of discard bios that will be submitted to the target. 321 * The bio number can be accessed with dm_bio_get_target_bio_nr. 322 */ 323 unsigned int num_discard_bios; 324 325 /* 326 * The number of secure erase bios that will be submitted to the target. 327 * The bio number can be accessed with dm_bio_get_target_bio_nr. 328 */ 329 unsigned int num_secure_erase_bios; 330 331 /* 332 * The number of WRITE ZEROES bios that will be submitted to the target. 333 * The bio number can be accessed with dm_bio_get_target_bio_nr. 334 */ 335 unsigned int num_write_zeroes_bios; 336 337 /* 338 * The minimum number of extra bytes allocated in each io for the 339 * target to use. 340 */ 341 unsigned int per_io_data_size; 342 343 /* target specific data */ 344 void *private; 345 346 /* Used to provide an error string from the ctr */ 347 char *error; 348 349 /* 350 * Set if this target needs to receive flushes regardless of 351 * whether or not its underlying devices have support. 352 */ 353 bool flush_supported:1; 354 355 /* 356 * Set if this target needs to receive discards regardless of 357 * whether or not its underlying devices have support. 358 */ 359 bool discards_supported:1; 360 361 /* 362 * Set if we need to limit the number of in-flight bios when swapping. 363 */ 364 bool limit_swap_bios:1; 365 366 /* 367 * Set if this target implements a zoned device and needs emulation of 368 * zone append operations using regular writes. 369 */ 370 bool emulate_zone_append:1; 371 372 /* 373 * Set if the target will submit IO using dm_submit_bio_remap() 374 * after returning DM_MAPIO_SUBMITTED from its map function. 375 */ 376 bool accounts_remapped_io:1; 377 378 /* 379 * Set if the target will submit the DM bio without first calling 380 * bio_set_dev(). NOTE: ideally a target should _not_ need this. 381 */ 382 bool needs_bio_set_dev:1; 383 }; 384 385 void *dm_per_bio_data(struct bio *bio, size_t data_size); 386 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); 387 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio); 388 389 u64 dm_start_time_ns_from_clone(struct bio *bio); 390 391 int dm_register_target(struct target_type *t); 392 void dm_unregister_target(struct target_type *t); 393 394 /* 395 * Target argument parsing. 396 */ 397 struct dm_arg_set { 398 unsigned int argc; 399 char **argv; 400 }; 401 402 /* 403 * The minimum and maximum value of a numeric argument, together with 404 * the error message to use if the number is found to be outside that range. 405 */ 406 struct dm_arg { 407 unsigned int min; 408 unsigned int max; 409 char *error; 410 }; 411 412 /* 413 * Validate the next argument, either returning it as *value or, if invalid, 414 * returning -EINVAL and setting *error. 415 */ 416 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 417 unsigned int *value, char **error); 418 419 /* 420 * Process the next argument as the start of a group containing between 421 * arg->min and arg->max further arguments. Either return the size as 422 * *num_args or, if invalid, return -EINVAL and set *error. 423 */ 424 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, 425 unsigned int *num_args, char **error); 426 427 /* 428 * Return the current argument and shift to the next. 429 */ 430 const char *dm_shift_arg(struct dm_arg_set *as); 431 432 /* 433 * Move through num_args arguments. 434 */ 435 void dm_consume_args(struct dm_arg_set *as, unsigned int num_args); 436 437 /* 438 *---------------------------------------------------------------- 439 * Functions for creating and manipulating mapped devices. 440 * Drop the reference with dm_put when you finish with the object. 441 *---------------------------------------------------------------- 442 */ 443 444 /* 445 * DM_ANY_MINOR chooses the next available minor number. 446 */ 447 #define DM_ANY_MINOR (-1) 448 int dm_create(int minor, struct mapped_device **md); 449 450 /* 451 * Reference counting for md. 452 */ 453 struct mapped_device *dm_get_md(dev_t dev); 454 void dm_get(struct mapped_device *md); 455 int dm_hold(struct mapped_device *md); 456 void dm_put(struct mapped_device *md); 457 458 /* 459 * An arbitrary pointer may be stored alongside a mapped device. 460 */ 461 void dm_set_mdptr(struct mapped_device *md, void *ptr); 462 void *dm_get_mdptr(struct mapped_device *md); 463 464 /* 465 * A device can still be used while suspended, but I/O is deferred. 466 */ 467 int dm_suspend(struct mapped_device *md, unsigned int suspend_flags); 468 int dm_resume(struct mapped_device *md); 469 470 /* 471 * Event functions. 472 */ 473 uint32_t dm_get_event_nr(struct mapped_device *md); 474 int dm_wait_event(struct mapped_device *md, int event_nr); 475 uint32_t dm_next_uevent_seq(struct mapped_device *md); 476 void dm_uevent_add(struct mapped_device *md, struct list_head *elist); 477 478 /* 479 * Info functions. 480 */ 481 const char *dm_device_name(struct mapped_device *md); 482 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 483 struct gendisk *dm_disk(struct mapped_device *md); 484 int dm_suspended(struct dm_target *ti); 485 int dm_post_suspending(struct dm_target *ti); 486 int dm_noflush_suspending(struct dm_target *ti); 487 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors); 488 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone); 489 union map_info *dm_get_rq_mapinfo(struct request *rq); 490 491 #ifdef CONFIG_BLK_DEV_ZONED 492 struct dm_report_zones_args { 493 struct dm_target *tgt; 494 sector_t next_sector; 495 496 void *orig_data; 497 report_zones_cb orig_cb; 498 unsigned int zone_idx; 499 500 /* must be filled by ->report_zones before calling dm_report_zones_cb */ 501 sector_t start; 502 }; 503 int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector, 504 struct dm_report_zones_args *args, unsigned int nr_zones); 505 #endif /* CONFIG_BLK_DEV_ZONED */ 506 507 /* 508 * Device mapper functions to parse and create devices specified by the 509 * parameter "dm-mod.create=" 510 */ 511 int __init dm_early_create(struct dm_ioctl *dmi, 512 struct dm_target_spec **spec_array, 513 char **target_params_array); 514 515 struct queue_limits *dm_get_queue_limits(struct mapped_device *md); 516 517 /* 518 * Geometry functions. 519 */ 520 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 521 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 522 523 /* 524 *--------------------------------------------------------------- 525 * Functions for manipulating device-mapper tables. 526 *--------------------------------------------------------------- 527 */ 528 529 /* 530 * First create an empty table. 531 */ 532 int dm_table_create(struct dm_table **result, fmode_t mode, 533 unsigned int num_targets, struct mapped_device *md); 534 535 /* 536 * Then call this once for each target. 537 */ 538 int dm_table_add_target(struct dm_table *t, const char *type, 539 sector_t start, sector_t len, char *params); 540 541 /* 542 * Target can use this to set the table's type. 543 * Can only ever be called from a target's ctr. 544 * Useful for "hybrid" target (supports both bio-based 545 * and request-based). 546 */ 547 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); 548 549 /* 550 * Finally call this to make the table ready for use. 551 */ 552 int dm_table_complete(struct dm_table *t); 553 554 /* 555 * Destroy the table when finished. 556 */ 557 void dm_table_destroy(struct dm_table *t); 558 559 /* 560 * Target may require that it is never sent I/O larger than len. 561 */ 562 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); 563 564 /* 565 * Table reference counting. 566 */ 567 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); 568 void dm_put_live_table(struct mapped_device *md, int srcu_idx); 569 void dm_sync_table(struct mapped_device *md); 570 571 /* 572 * Queries 573 */ 574 sector_t dm_table_get_size(struct dm_table *t); 575 fmode_t dm_table_get_mode(struct dm_table *t); 576 struct mapped_device *dm_table_get_md(struct dm_table *t); 577 const char *dm_table_device_name(struct dm_table *t); 578 579 /* 580 * Trigger an event. 581 */ 582 void dm_table_event(struct dm_table *t); 583 584 /* 585 * Run the queue for request-based targets. 586 */ 587 void dm_table_run_md_queue_async(struct dm_table *t); 588 589 /* 590 * The device must be suspended before calling this method. 591 * Returns the previous table, which the caller must destroy. 592 */ 593 struct dm_table *dm_swap_table(struct mapped_device *md, 594 struct dm_table *t); 595 596 /* 597 * Table blk_crypto_profile functions 598 */ 599 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile); 600 601 /* 602 *--------------------------------------------------------------- 603 * Macros. 604 *--------------------------------------------------------------- 605 */ 606 #define DM_NAME "device-mapper" 607 608 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" 609 610 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) 611 612 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) 613 #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 614 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) 615 #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 616 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) 617 #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 618 619 #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__) 620 #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) 621 622 #define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x))) 623 624 #define DMEMIT_TARGET_NAME_VERSION(y) \ 625 DMEMIT("target_name=%s,target_version=%u.%u.%u", \ 626 (y)->name, (y)->version[0], (y)->version[1], (y)->version[2]) 627 628 /* 629 * Definitions of return values from target end_io function. 630 */ 631 #define DM_ENDIO_DONE 0 632 #define DM_ENDIO_INCOMPLETE 1 633 #define DM_ENDIO_REQUEUE 2 634 #define DM_ENDIO_DELAY_REQUEUE 3 635 636 /* 637 * Definitions of return values from target map function. 638 */ 639 #define DM_MAPIO_SUBMITTED 0 640 #define DM_MAPIO_REMAPPED 1 641 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 642 #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE 643 #define DM_MAPIO_KILL 4 644 645 #define dm_sector_div64(x, y)( \ 646 { \ 647 u64 _res; \ 648 (x) = div64_u64_rem(x, y, &_res); \ 649 _res; \ 650 } \ 651 ) 652 653 /* 654 * Ceiling(n / sz) 655 */ 656 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) 657 658 #define dm_sector_div_up(n, sz) ( \ 659 { \ 660 sector_t _r = ((n) + (sz) - 1); \ 661 sector_div(_r, (sz)); \ 662 _r; \ 663 } \ 664 ) 665 666 /* 667 * ceiling(n / size) * size 668 */ 669 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) 670 671 /* 672 * Sector offset taken relative to the start of the target instead of 673 * relative to the start of the device. 674 */ 675 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) 676 677 static inline sector_t to_sector(unsigned long long n) 678 { 679 return (n >> SECTOR_SHIFT); 680 } 681 682 static inline unsigned long to_bytes(sector_t n) 683 { 684 return (n << SECTOR_SHIFT); 685 } 686 687 #endif /* _LINUX_DEVICE_MAPPER_H */ 688