xref: /openbmc/linux/include/linux/device-mapper.h (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * Copyright (C) 2001 Sistina Software (UK) Limited.
4   * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5   *
6   * This file is released under the LGPL.
7   */
8  
9  #ifndef _LINUX_DEVICE_MAPPER_H
10  #define _LINUX_DEVICE_MAPPER_H
11  
12  #include <linux/bio.h>
13  #include <linux/blkdev.h>
14  #include <linux/dm-ioctl.h>
15  #include <linux/math64.h>
16  #include <linux/ratelimit.h>
17  
18  struct dm_dev;
19  struct dm_target;
20  struct dm_table;
21  struct dm_report_zones_args;
22  struct mapped_device;
23  struct bio_vec;
24  enum dax_access_mode;
25  
26  /*
27   * Type of table, mapped_device's mempool and request_queue
28   */
29  enum dm_queue_mode {
30  	DM_TYPE_NONE		 = 0,
31  	DM_TYPE_BIO_BASED	 = 1,
32  	DM_TYPE_REQUEST_BASED	 = 2,
33  	DM_TYPE_DAX_BIO_BASED	 = 3,
34  };
35  
36  typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
37  
38  union map_info {
39  	void *ptr;
40  };
41  
42  /*
43   * In the constructor the target parameter will already have the
44   * table, type, begin and len fields filled in.
45   */
46  typedef int (*dm_ctr_fn) (struct dm_target *target,
47  			  unsigned int argc, char **argv);
48  
49  /*
50   * The destructor doesn't need to free the dm_target, just
51   * anything hidden ti->private.
52   */
53  typedef void (*dm_dtr_fn) (struct dm_target *ti);
54  
55  /*
56   * The map function must return:
57   * < 0: error
58   * = 0: The target will handle the io by resubmitting it later
59   * = 1: simple remap complete
60   * = 2: The target wants to push back the io
61   */
62  typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
63  typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
64  					    struct request *rq,
65  					    union map_info *map_context,
66  					    struct request **clone);
67  typedef void (*dm_release_clone_request_fn) (struct request *clone,
68  					     union map_info *map_context);
69  
70  /*
71   * Returns:
72   * < 0 : error (currently ignored)
73   * 0   : ended successfully
74   * 1   : for some reason the io has still not completed (eg,
75   *       multipath target might want to requeue a failed io).
76   * 2   : The target wants to push back the io
77   */
78  typedef int (*dm_endio_fn) (struct dm_target *ti,
79  			    struct bio *bio, blk_status_t *error);
80  typedef int (*dm_request_endio_fn) (struct dm_target *ti,
81  				    struct request *clone, blk_status_t error,
82  				    union map_info *map_context);
83  
84  typedef void (*dm_presuspend_fn) (struct dm_target *ti);
85  typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
86  typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
87  typedef int (*dm_preresume_fn) (struct dm_target *ti);
88  typedef void (*dm_resume_fn) (struct dm_target *ti);
89  
90  typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
91  			      unsigned int status_flags, char *result, unsigned int maxlen);
92  
93  typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
94  			      char *result, unsigned int maxlen);
95  
96  typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
97  
98  #ifdef CONFIG_BLK_DEV_ZONED
99  typedef int (*dm_report_zones_fn) (struct dm_target *ti,
100  				   struct dm_report_zones_args *args,
101  				   unsigned int nr_zones);
102  #else
103  /*
104   * Define dm_report_zones_fn so that targets can assign to NULL if
105   * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
106   * awkward #ifdefs in their target_type, etc.
107   */
108  typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
109  #endif
110  
111  /*
112   * These iteration functions are typically used to check (and combine)
113   * properties of underlying devices.
114   * E.g. Does at least one underlying device support flush?
115   *      Does any underlying device not support WRITE_SAME?
116   *
117   * The callout function is called once for each contiguous section of
118   * an underlying device.  State can be maintained in *data.
119   * Return non-zero to stop iterating through any further devices.
120   */
121  typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
122  					   struct dm_dev *dev,
123  					   sector_t start, sector_t len,
124  					   void *data);
125  
126  /*
127   * This function must iterate through each section of device used by the
128   * target until it encounters a non-zero return code, which it then returns.
129   * Returns zero if no callout returned non-zero.
130   */
131  typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
132  				      iterate_devices_callout_fn fn,
133  				      void *data);
134  
135  typedef void (*dm_io_hints_fn) (struct dm_target *ti,
136  				struct queue_limits *limits);
137  
138  /*
139   * Returns:
140   *    0: The target can handle the next I/O immediately.
141   *    1: The target can't handle the next I/O immediately.
142   */
143  typedef int (*dm_busy_fn) (struct dm_target *ti);
144  
145  /*
146   * Returns:
147   *  < 0 : error
148   * >= 0 : the number of bytes accessible at the address
149   */
150  typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
151  		long nr_pages, enum dax_access_mode node, void **kaddr,
152  		pfn_t *pfn);
153  typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
154  		size_t nr_pages);
155  
156  /*
157   * Returns:
158   * != 0 : number of bytes transferred
159   * 0    : recovery write failed
160   */
161  typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
162  		void *addr, size_t bytes, struct iov_iter *i);
163  
164  void dm_error(const char *message);
165  
166  struct dm_dev {
167  	struct block_device *bdev;
168  	struct dax_device *dax_dev;
169  	blk_mode_t mode;
170  	char name[16];
171  };
172  
173  /*
174   * Constructors should call these functions to ensure destination devices
175   * are opened/closed correctly.
176   */
177  int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
178  		  struct dm_dev **result);
179  void dm_put_device(struct dm_target *ti, struct dm_dev *d);
180  
181  /*
182   * Information about a target type
183   */
184  
185  struct target_type {
186  	uint64_t features;
187  	const char *name;
188  	struct module *module;
189  	unsigned int version[3];
190  	dm_ctr_fn ctr;
191  	dm_dtr_fn dtr;
192  	dm_map_fn map;
193  	dm_clone_and_map_request_fn clone_and_map_rq;
194  	dm_release_clone_request_fn release_clone_rq;
195  	dm_endio_fn end_io;
196  	dm_request_endio_fn rq_end_io;
197  	dm_presuspend_fn presuspend;
198  	dm_presuspend_undo_fn presuspend_undo;
199  	dm_postsuspend_fn postsuspend;
200  	dm_preresume_fn preresume;
201  	dm_resume_fn resume;
202  	dm_status_fn status;
203  	dm_message_fn message;
204  	dm_prepare_ioctl_fn prepare_ioctl;
205  	dm_report_zones_fn report_zones;
206  	dm_busy_fn busy;
207  	dm_iterate_devices_fn iterate_devices;
208  	dm_io_hints_fn io_hints;
209  	dm_dax_direct_access_fn direct_access;
210  	dm_dax_zero_page_range_fn dax_zero_page_range;
211  	dm_dax_recovery_write_fn dax_recovery_write;
212  
213  	/* For internal device-mapper use. */
214  	struct list_head list;
215  };
216  
217  /*
218   * Target features
219   */
220  
221  /*
222   * Any table that contains an instance of this target must have only one.
223   */
224  #define DM_TARGET_SINGLETON		0x00000001
225  #define dm_target_needs_singleton(type)	((type)->features & DM_TARGET_SINGLETON)
226  
227  /*
228   * Indicates that a target does not support read-only devices.
229   */
230  #define DM_TARGET_ALWAYS_WRITEABLE	0x00000002
231  #define dm_target_always_writeable(type) \
232  		((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
233  
234  /*
235   * Any device that contains a table with an instance of this target may never
236   * have tables containing any different target type.
237   */
238  #define DM_TARGET_IMMUTABLE		0x00000004
239  #define dm_target_is_immutable(type)	((type)->features & DM_TARGET_IMMUTABLE)
240  
241  /*
242   * Indicates that a target may replace any target; even immutable targets.
243   * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
244   */
245  #define DM_TARGET_WILDCARD		0x00000008
246  #define dm_target_is_wildcard(type)	((type)->features & DM_TARGET_WILDCARD)
247  
248  /*
249   * A target implements own bio data integrity.
250   */
251  #define DM_TARGET_INTEGRITY		0x00000010
252  #define dm_target_has_integrity(type)	((type)->features & DM_TARGET_INTEGRITY)
253  
254  /*
255   * A target passes integrity data to the lower device.
256   */
257  #define DM_TARGET_PASSES_INTEGRITY	0x00000020
258  #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
259  
260  /*
261   * Indicates support for zoned block devices:
262   * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
263   *   block devices but does not support combining different zoned models.
264   * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
265   *   devices with different zoned models.
266   */
267  #ifdef CONFIG_BLK_DEV_ZONED
268  #define DM_TARGET_ZONED_HM		0x00000040
269  #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
270  #else
271  #define DM_TARGET_ZONED_HM		0x00000000
272  #define dm_target_supports_zoned_hm(type) (false)
273  #endif
274  
275  /*
276   * A target handles REQ_NOWAIT
277   */
278  #define DM_TARGET_NOWAIT		0x00000080
279  #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
280  
281  /*
282   * A target supports passing through inline crypto support.
283   */
284  #define DM_TARGET_PASSES_CRYPTO		0x00000100
285  #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
286  
287  #ifdef CONFIG_BLK_DEV_ZONED
288  #define DM_TARGET_MIXED_ZONED_MODEL	0x00000200
289  #define dm_target_supports_mixed_zoned_model(type) \
290  	((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
291  #else
292  #define DM_TARGET_MIXED_ZONED_MODEL	0x00000000
293  #define dm_target_supports_mixed_zoned_model(type) (false)
294  #endif
295  
296  struct dm_target {
297  	struct dm_table *table;
298  	struct target_type *type;
299  
300  	/* target limits */
301  	sector_t begin;
302  	sector_t len;
303  
304  	/* If non-zero, maximum size of I/O submitted to a target. */
305  	uint32_t max_io_len;
306  
307  	/*
308  	 * A number of zero-length barrier bios that will be submitted
309  	 * to the target for the purpose of flushing cache.
310  	 *
311  	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
312  	 * It is a responsibility of the target driver to remap these bios
313  	 * to the real underlying devices.
314  	 */
315  	unsigned int num_flush_bios;
316  
317  	/*
318  	 * The number of discard bios that will be submitted to the target.
319  	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
320  	 */
321  	unsigned int num_discard_bios;
322  
323  	/*
324  	 * The number of secure erase bios that will be submitted to the target.
325  	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
326  	 */
327  	unsigned int num_secure_erase_bios;
328  
329  	/*
330  	 * The number of WRITE ZEROES bios that will be submitted to the target.
331  	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
332  	 */
333  	unsigned int num_write_zeroes_bios;
334  
335  	/*
336  	 * The minimum number of extra bytes allocated in each io for the
337  	 * target to use.
338  	 */
339  	unsigned int per_io_data_size;
340  
341  	/* target specific data */
342  	void *private;
343  
344  	/* Used to provide an error string from the ctr */
345  	char *error;
346  
347  	/*
348  	 * Set if this target needs to receive flushes regardless of
349  	 * whether or not its underlying devices have support.
350  	 */
351  	bool flush_supported:1;
352  
353  	/*
354  	 * Set if this target needs to receive discards regardless of
355  	 * whether or not its underlying devices have support.
356  	 */
357  	bool discards_supported:1;
358  
359  	/*
360  	 * Set if this target requires that discards be split on
361  	 * 'max_discard_sectors' boundaries.
362  	 */
363  	bool max_discard_granularity:1;
364  
365  	/*
366  	 * Set if this target requires that secure_erases be split on
367  	 * 'max_secure_erase_sectors' boundaries.
368  	 */
369  	bool max_secure_erase_granularity:1;
370  
371  	/*
372  	 * Set if this target requires that write_zeroes be split on
373  	 * 'max_write_zeroes_sectors' boundaries.
374  	 */
375  	bool max_write_zeroes_granularity:1;
376  
377  	/*
378  	 * Set if we need to limit the number of in-flight bios when swapping.
379  	 */
380  	bool limit_swap_bios:1;
381  
382  	/*
383  	 * Set if this target implements a zoned device and needs emulation of
384  	 * zone append operations using regular writes.
385  	 */
386  	bool emulate_zone_append:1;
387  
388  	/*
389  	 * Set if the target will submit IO using dm_submit_bio_remap()
390  	 * after returning DM_MAPIO_SUBMITTED from its map function.
391  	 */
392  	bool accounts_remapped_io:1;
393  
394  	/*
395  	 * Set if the target will submit the DM bio without first calling
396  	 * bio_set_dev(). NOTE: ideally a target should _not_ need this.
397  	 */
398  	bool needs_bio_set_dev:1;
399  };
400  
401  void *dm_per_bio_data(struct bio *bio, size_t data_size);
402  struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
403  unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
404  
405  u64 dm_start_time_ns_from_clone(struct bio *bio);
406  
407  int dm_register_target(struct target_type *t);
408  void dm_unregister_target(struct target_type *t);
409  
410  /*
411   * Target argument parsing.
412   */
413  struct dm_arg_set {
414  	unsigned int argc;
415  	char **argv;
416  };
417  
418  /*
419   * The minimum and maximum value of a numeric argument, together with
420   * the error message to use if the number is found to be outside that range.
421   */
422  struct dm_arg {
423  	unsigned int min;
424  	unsigned int max;
425  	char *error;
426  };
427  
428  /*
429   * Validate the next argument, either returning it as *value or, if invalid,
430   * returning -EINVAL and setting *error.
431   */
432  int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
433  		unsigned int *value, char **error);
434  
435  /*
436   * Process the next argument as the start of a group containing between
437   * arg->min and arg->max further arguments. Either return the size as
438   * *num_args or, if invalid, return -EINVAL and set *error.
439   */
440  int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
441  		      unsigned int *num_args, char **error);
442  
443  /*
444   * Return the current argument and shift to the next.
445   */
446  const char *dm_shift_arg(struct dm_arg_set *as);
447  
448  /*
449   * Move through num_args arguments.
450   */
451  void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
452  
453  /*
454   *----------------------------------------------------------------
455   * Functions for creating and manipulating mapped devices.
456   * Drop the reference with dm_put when you finish with the object.
457   *----------------------------------------------------------------
458   */
459  
460  /*
461   * DM_ANY_MINOR chooses the next available minor number.
462   */
463  #define DM_ANY_MINOR (-1)
464  int dm_create(int minor, struct mapped_device **md);
465  
466  /*
467   * Reference counting for md.
468   */
469  struct mapped_device *dm_get_md(dev_t dev);
470  void dm_get(struct mapped_device *md);
471  int dm_hold(struct mapped_device *md);
472  void dm_put(struct mapped_device *md);
473  
474  /*
475   * An arbitrary pointer may be stored alongside a mapped device.
476   */
477  void dm_set_mdptr(struct mapped_device *md, void *ptr);
478  void *dm_get_mdptr(struct mapped_device *md);
479  
480  /*
481   * A device can still be used while suspended, but I/O is deferred.
482   */
483  int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
484  int dm_resume(struct mapped_device *md);
485  
486  /*
487   * Event functions.
488   */
489  uint32_t dm_get_event_nr(struct mapped_device *md);
490  int dm_wait_event(struct mapped_device *md, int event_nr);
491  uint32_t dm_next_uevent_seq(struct mapped_device *md);
492  void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
493  
494  /*
495   * Info functions.
496   */
497  const char *dm_device_name(struct mapped_device *md);
498  int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
499  struct gendisk *dm_disk(struct mapped_device *md);
500  int dm_suspended(struct dm_target *ti);
501  int dm_post_suspending(struct dm_target *ti);
502  int dm_noflush_suspending(struct dm_target *ti);
503  void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
504  void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
505  union map_info *dm_get_rq_mapinfo(struct request *rq);
506  
507  #ifdef CONFIG_BLK_DEV_ZONED
508  struct dm_report_zones_args {
509  	struct dm_target *tgt;
510  	sector_t next_sector;
511  
512  	void *orig_data;
513  	report_zones_cb orig_cb;
514  	unsigned int zone_idx;
515  
516  	/* must be filled by ->report_zones before calling dm_report_zones_cb */
517  	sector_t start;
518  };
519  int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
520  		    struct dm_report_zones_args *args, unsigned int nr_zones);
521  #endif /* CONFIG_BLK_DEV_ZONED */
522  
523  /*
524   * Device mapper functions to parse and create devices specified by the
525   * parameter "dm-mod.create="
526   */
527  int __init dm_early_create(struct dm_ioctl *dmi,
528  			   struct dm_target_spec **spec_array,
529  			   char **target_params_array);
530  
531  /*
532   * Geometry functions.
533   */
534  int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
535  int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
536  
537  /*
538   *---------------------------------------------------------------
539   * Functions for manipulating device-mapper tables.
540   *---------------------------------------------------------------
541   */
542  
543  /*
544   * First create an empty table.
545   */
546  int dm_table_create(struct dm_table **result, blk_mode_t mode,
547  		    unsigned int num_targets, struct mapped_device *md);
548  
549  /*
550   * Then call this once for each target.
551   */
552  int dm_table_add_target(struct dm_table *t, const char *type,
553  			sector_t start, sector_t len, char *params);
554  
555  /*
556   * Target can use this to set the table's type.
557   * Can only ever be called from a target's ctr.
558   * Useful for "hybrid" target (supports both bio-based
559   * and request-based).
560   */
561  void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
562  
563  /*
564   * Finally call this to make the table ready for use.
565   */
566  int dm_table_complete(struct dm_table *t);
567  
568  /*
569   * Destroy the table when finished.
570   */
571  void dm_table_destroy(struct dm_table *t);
572  
573  /*
574   * Target may require that it is never sent I/O larger than len.
575   */
576  int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
577  
578  /*
579   * Table reference counting.
580   */
581  struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
582  void dm_put_live_table(struct mapped_device *md, int srcu_idx);
583  void dm_sync_table(struct mapped_device *md);
584  
585  /*
586   * Queries
587   */
588  sector_t dm_table_get_size(struct dm_table *t);
589  blk_mode_t dm_table_get_mode(struct dm_table *t);
590  struct mapped_device *dm_table_get_md(struct dm_table *t);
591  const char *dm_table_device_name(struct dm_table *t);
592  
593  /*
594   * Trigger an event.
595   */
596  void dm_table_event(struct dm_table *t);
597  
598  /*
599   * Run the queue for request-based targets.
600   */
601  void dm_table_run_md_queue_async(struct dm_table *t);
602  
603  /*
604   * The device must be suspended before calling this method.
605   * Returns the previous table, which the caller must destroy.
606   */
607  struct dm_table *dm_swap_table(struct mapped_device *md,
608  			       struct dm_table *t);
609  
610  /*
611   * Table blk_crypto_profile functions
612   */
613  void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
614  
615  /*
616   *---------------------------------------------------------------
617   * Macros.
618   *---------------------------------------------------------------
619   */
620  #define DM_NAME "device-mapper"
621  
622  #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
623  
624  #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
625  
626  #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
627  #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
628  #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
629  #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
630  #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
631  #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
632  
633  #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
634  #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
635  
636  #define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x)))
637  
638  #define DMEMIT_TARGET_NAME_VERSION(y) \
639  		DMEMIT("target_name=%s,target_version=%u.%u.%u", \
640  		       (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
641  
642  /**
643   * module_dm() - Helper macro for DM targets that don't do anything
644   * special in their module_init and module_exit.
645   * Each module may only use this macro once, and calling it replaces
646   * module_init() and module_exit().
647   *
648   * @name: DM target's name
649   */
650  #define module_dm(name) \
651  static int __init dm_##name##_init(void) \
652  { \
653  	return dm_register_target(&(name##_target)); \
654  } \
655  module_init(dm_##name##_init) \
656  static void __exit dm_##name##_exit(void) \
657  { \
658  	dm_unregister_target(&(name##_target)); \
659  } \
660  module_exit(dm_##name##_exit)
661  
662  /*
663   * Definitions of return values from target end_io function.
664   */
665  #define DM_ENDIO_DONE		0
666  #define DM_ENDIO_INCOMPLETE	1
667  #define DM_ENDIO_REQUEUE	2
668  #define DM_ENDIO_DELAY_REQUEUE	3
669  
670  /*
671   * Definitions of return values from target map function.
672   */
673  #define DM_MAPIO_SUBMITTED	0
674  #define DM_MAPIO_REMAPPED	1
675  #define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
676  #define DM_MAPIO_DELAY_REQUEUE	DM_ENDIO_DELAY_REQUEUE
677  #define DM_MAPIO_KILL		4
678  
679  #define dm_sector_div64(x, y)( \
680  { \
681  	u64 _res; \
682  	(x) = div64_u64_rem(x, y, &_res); \
683  	_res; \
684  } \
685  )
686  
687  /*
688   * Ceiling(n / sz)
689   */
690  #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
691  
692  #define dm_sector_div_up(n, sz) ( \
693  { \
694  	sector_t _r = ((n) + (sz) - 1); \
695  	sector_div(_r, (sz)); \
696  	_r; \
697  } \
698  )
699  
700  /*
701   * ceiling(n / size) * size
702   */
703  #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
704  
705  /*
706   * Sector offset taken relative to the start of the target instead of
707   * relative to the start of the device.
708   */
709  #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
710  
to_sector(unsigned long long n)711  static inline sector_t to_sector(unsigned long long n)
712  {
713  	return (n >> SECTOR_SHIFT);
714  }
715  
to_bytes(sector_t n)716  static inline unsigned long to_bytes(sector_t n)
717  {
718  	return (n << SECTOR_SHIFT);
719  }
720  
721  #endif	/* _LINUX_DEVICE_MAPPER_H */
722