xref: /openbmc/linux/drivers/md/dm.h (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Internal header file for device mapper
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2001, 2002 Sistina Software
52b06cfffSAlasdair G Kergon  * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * This file is released under the LGPL.
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
101da177e4SLinus Torvalds #ifndef DM_INTERNAL_H
111da177e4SLinus Torvalds #define DM_INTERNAL_H
121da177e4SLinus Torvalds 
131da177e4SLinus Torvalds #include <linux/fs.h>
141da177e4SLinus Torvalds #include <linux/device-mapper.h>
151da177e4SLinus Torvalds #include <linux/list.h>
164cc96131SMike Snitzer #include <linux/moduleparam.h>
171da177e4SLinus Torvalds #include <linux/blkdev.h>
1866114cadSTejun Heo #include <linux/backing-dev.h>
193ac51e74SDarrick J. Wong #include <linux/hdreg.h>
20be35f486SMikulas Patocka #include <linux/completion.h>
212995fa78SMikulas Patocka #include <linux/kobject.h>
222a0b4682SElena Reshetova #include <linux/refcount.h>
230bac3f2fSMike Snitzer #include <linux/log2.h>
241da177e4SLinus Torvalds 
25fd2ed4d2SMikulas Patocka #include "dm-stats.h"
26fd2ed4d2SMikulas Patocka 
2745cbcd79SKiyoshi Ueda /*
28a3d77d35SKiyoshi Ueda  * Suspend feature flags
29a3d77d35SKiyoshi Ueda  */
30a3d77d35SKiyoshi Ueda #define DM_SUSPEND_LOCKFS_FLAG		(1 << 0)
3181fdb096SKiyoshi Ueda #define DM_SUSPEND_NOFLUSH_FLAG		(1 << 1)
32a3d77d35SKiyoshi Ueda 
33a3d77d35SKiyoshi Ueda /*
341f4e0ff0SAlasdair G Kergon  * Status feature flags
351f4e0ff0SAlasdair G Kergon  */
361f4e0ff0SAlasdair G Kergon #define DM_STATUS_NOFLUSH_FLAG		(1 << 0)
371f4e0ff0SAlasdair G Kergon 
381f4e0ff0SAlasdair G Kergon /*
391da177e4SLinus Torvalds  * List of devices that a metadevice uses and should open/close.
401da177e4SLinus Torvalds  */
4182b1519bSMikulas Patocka struct dm_dev_internal {
421da177e4SLinus Torvalds 	struct list_head list;
432a0b4682SElena Reshetova 	refcount_t count;
4486f1152bSBenjamin Marzinski 	struct dm_dev *dm_dev;
451da177e4SLinus Torvalds };
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds struct dm_table;
48e6ee8c0bSKiyoshi Ueda struct dm_md_mempools;
49bb37d772SDamien Le Moal struct dm_target_io;
50bb37d772SDamien Le Moal struct dm_io;
511da177e4SLinus Torvalds 
52a4a82ce3SHeinz Mauelshagen /*
53a4a82ce3SHeinz Mauelshagen  *---------------------------------------------------------------
5417b2f66fSAlasdair G Kergon  * Internal table functions.
55a4a82ce3SHeinz Mauelshagen  *---------------------------------------------------------------
56a4a82ce3SHeinz Mauelshagen  */
571da177e4SLinus Torvalds void dm_table_event_callback(struct dm_table *t,
581da177e4SLinus Torvalds 			     void (*fn)(void *), void *context);
591da177e4SLinus Torvalds struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
603ae70656SMike Snitzer bool dm_table_has_no_data_devices(struct dm_table *table);
61754c5fc7SMike Snitzer int dm_calculate_queue_limits(struct dm_table *table,
62754c5fc7SMike Snitzer 			      struct queue_limits *limits);
63bb37d772SDamien Le Moal int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
64754c5fc7SMike Snitzer 			      struct queue_limits *limits);
651da177e4SLinus Torvalds struct list_head *dm_table_get_devices(struct dm_table *t);
661da177e4SLinus Torvalds void dm_table_presuspend_targets(struct dm_table *t);
67d67ee213SMike Snitzer void dm_table_presuspend_undo_targets(struct dm_table *t);
681da177e4SLinus Torvalds void dm_table_postsuspend_targets(struct dm_table *t);
698757b776SMilan Broz int dm_table_resume_targets(struct dm_table *t);
707e0d574fSBart Van Assche enum dm_queue_mode dm_table_get_type(struct dm_table *t);
7136a0456fSAlasdair G Kergon struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
7216f12266SMike Snitzer struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
73f083b09bSMike Snitzer struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
74545ed20eSToshi Kani bool dm_table_bio_based(struct dm_table *t);
75e6ee8c0bSKiyoshi Ueda bool dm_table_request_based(struct dm_table *t);
761da177e4SLinus Torvalds 
77a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md);
78a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md);
797e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
807e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
8136a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
82a5664dadSMike Snitzer 
83591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
844a0b4ddfSMike Snitzer 
85512875bdSJun'ichi Nomura /*
86169e2cc2SMike Snitzer  * To check whether the target type is bio-based or not (request-based).
87169e2cc2SMike Snitzer  */
88169e2cc2SMike Snitzer #define dm_target_bio_based(t) ((t)->type->map != NULL)
89169e2cc2SMike Snitzer 
90169e2cc2SMike Snitzer /*
91e6ee8c0bSKiyoshi Ueda  * To check whether the target type is request-based or not (bio-based).
92e6ee8c0bSKiyoshi Ueda  */
93eb8db831SChristoph Hellwig #define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
94e6ee8c0bSKiyoshi Ueda 
95169e2cc2SMike Snitzer /*
96169e2cc2SMike Snitzer  * To check whether the target type is a hybrid (capable of being
97169e2cc2SMike Snitzer  * either request-based or bio-based).
98169e2cc2SMike Snitzer  */
99169e2cc2SMike Snitzer #define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
100169e2cc2SMike Snitzer 
1017fc18728SDamien Le Moal /*
1027fc18728SDamien Le Moal  * Zoned targets related functions.
1037fc18728SDamien Le Moal  */
104bb37d772SDamien Le Moal int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q);
105bb37d772SDamien Le Moal void dm_zone_endio(struct dm_io *io, struct bio *clone);
1067fc18728SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED
107bb37d772SDamien Le Moal void dm_cleanup_zoned_dev(struct mapped_device *md);
1087fc18728SDamien Le Moal int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
1097fc18728SDamien Le Moal 			unsigned int nr_zones, report_zones_cb cb, void *data);
110bf14e2b2SDamien Le Moal bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
111bb37d772SDamien Le Moal int dm_zone_map_bio(struct dm_target_io *io);
1127fc18728SDamien Le Moal #else
dm_cleanup_zoned_dev(struct mapped_device * md)113bb37d772SDamien Le Moal static inline void dm_cleanup_zoned_dev(struct mapped_device *md) {}
1147fc18728SDamien Le Moal #define dm_blk_report_zones	NULL
dm_is_zone_write(struct mapped_device * md,struct bio * bio)115bf14e2b2SDamien Le Moal static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
116bf14e2b2SDamien Le Moal {
117bf14e2b2SDamien Le Moal 	return false;
118bf14e2b2SDamien Le Moal }
dm_zone_map_bio(struct dm_target_io * tio)119bb37d772SDamien Le Moal static inline int dm_zone_map_bio(struct dm_target_io *tio)
120bb37d772SDamien Le Moal {
121bb37d772SDamien Le Moal 	return DM_MAPIO_KILL;
122bb37d772SDamien Le Moal }
1237fc18728SDamien Le Moal #endif
1247fc18728SDamien Le Moal 
125a4a82ce3SHeinz Mauelshagen /*
126a4a82ce3SHeinz Mauelshagen  *---------------------------------------------------------------
1271da177e4SLinus Torvalds  * A registry of target types.
128a4a82ce3SHeinz Mauelshagen  *---------------------------------------------------------------
129a4a82ce3SHeinz Mauelshagen  */
1301da177e4SLinus Torvalds int dm_target_init(void);
1311da177e4SLinus Torvalds void dm_target_exit(void);
1321da177e4SLinus Torvalds struct target_type *dm_get_target_type(const char *name);
13345194e4fSCheng Renquan void dm_put_target_type(struct target_type *tt);
1341da177e4SLinus Torvalds int dm_target_iterate(void (*iter_func)(struct target_type *tt,
1351da177e4SLinus Torvalds 					void *param), void *param);
1361da177e4SLinus Torvalds 
1371da177e4SLinus Torvalds int dm_split_args(int *argc, char ***argvp, char *input);
1381da177e4SLinus Torvalds 
1391da177e4SLinus Torvalds /*
140432a212cSMike Anderson  * Is this mapped_device being deleted?
141432a212cSMike Anderson  */
142432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md);
143432a212cSMike Anderson 
144432a212cSMike Anderson /*
1454f186f8bSKiyoshi Ueda  * Is this mapped_device suspended?
1464f186f8bSKiyoshi Ueda  */
1474f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md);
1484f186f8bSKiyoshi Ueda 
1494f186f8bSKiyoshi Ueda /*
150ffcc3936SMike Snitzer  * Internal suspend and resume methods.
151ffcc3936SMike Snitzer  */
152ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md);
153ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md);
154ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md);
155ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md);
156ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md);
157ffcc3936SMike Snitzer 
158ffcc3936SMike Snitzer /*
1592c140a24SMikulas Patocka  * Test if the device is scheduled for deferred remove.
1602c140a24SMikulas Patocka  */
1612c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md);
1622c140a24SMikulas Patocka 
1632c140a24SMikulas Patocka /*
1642c140a24SMikulas Patocka  * Try to remove devices marked for deferred removal.
1652c140a24SMikulas Patocka  */
1662c140a24SMikulas Patocka void dm_deferred_remove(void);
1672c140a24SMikulas Patocka 
1682c140a24SMikulas Patocka /*
1691da177e4SLinus Torvalds  * The device-mapper can be driven through one of two interfaces;
1701da177e4SLinus Torvalds  * ioctl or filesystem, depending which patch you have applied.
1711da177e4SLinus Torvalds  */
1721da177e4SLinus Torvalds int dm_interface_init(void);
1731da177e4SLinus Torvalds void dm_interface_exit(void);
1741da177e4SLinus Torvalds 
1751da177e4SLinus Torvalds /*
176784aae73SMilan Broz  * sysfs interface
177784aae73SMilan Broz  */
178784aae73SMilan Broz int dm_sysfs_init(struct mapped_device *md);
179784aae73SMilan Broz void dm_sysfs_exit(struct mapped_device *md);
180784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md);
181784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
1822995fa78SMikulas Patocka 
1832995fa78SMikulas Patocka /*
1842995fa78SMikulas Patocka  * The kobject helper
1852995fa78SMikulas Patocka  */
1862995fa78SMikulas Patocka void dm_kobject_release(struct kobject *kobj);
187784aae73SMilan Broz 
188784aae73SMilan Broz /*
1891da177e4SLinus Torvalds  * Targets for linear and striped mappings
1901da177e4SLinus Torvalds  */
1911da177e4SLinus Torvalds int dm_linear_init(void);
1921da177e4SLinus Torvalds void dm_linear_exit(void);
1931da177e4SLinus Torvalds 
1941da177e4SLinus Torvalds int dm_stripe_init(void);
1951da177e4SLinus Torvalds void dm_stripe_exit(void);
1961da177e4SLinus Torvalds 
1973f77316dSKiyoshi Ueda /*
1983f77316dSKiyoshi Ueda  * mapped_device operations
1993f77316dSKiyoshi Ueda  */
2003f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md);
2013f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md);
2025c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md);
2032c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
2042c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md);
205fd2ed4d2SMikulas Patocka int dm_request_based(struct mapped_device *md);
20686f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
20786f1152bSBenjamin Marzinski 			struct dm_dev **result);
20886f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
2091da177e4SLinus Torvalds 
2103abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
21186a3238cSHeinz Mauelshagen 		      unsigned int cookie, bool need_resize_uevent);
21269267a30SAlasdair G Kergon 
213fd2ed4d2SMikulas Patocka int dm_io_init(void);
214fd2ed4d2SMikulas Patocka void dm_io_exit(void);
215fd2ed4d2SMikulas Patocka 
216952b3557SMikulas Patocka int dm_kcopyd_init(void);
217952b3557SMikulas Patocka void dm_kcopyd_exit(void);
218952b3557SMikulas Patocka 
219945fa4d2SMikulas Patocka /*
220945fa4d2SMikulas Patocka  * Mempool operations
221945fa4d2SMikulas Patocka  */
222e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools);
223e6ee8c0bSKiyoshi Ueda 
224e6ee8c0bSKiyoshi Ueda /*
225e6ee8c0bSKiyoshi Ueda  * Various helpers
226e6ee8c0bSKiyoshi Ueda  */
227fd2ed4d2SMikulas Patocka unsigned int dm_get_reserved_bio_based_ios(void);
2284cc96131SMike Snitzer 
229fd2ed4d2SMikulas Patocka #define DM_HASH_LOCKS_MAX 64
23086a3238cSHeinz Mauelshagen 
dm_num_hash_locks(void)2310ce65797SMike Snitzer static inline unsigned int dm_num_hash_locks(void)
2320bac3f2fSMike Snitzer {
2330bac3f2fSMike Snitzer 	unsigned int num_locks = roundup_pow_of_two(num_online_cpus()) << 1;
2340bac3f2fSMike Snitzer 
2350bac3f2fSMike Snitzer 	return min_t(unsigned int, num_locks, DM_HASH_LOCKS_MAX);
236*363b7fd7SJoe Thornber }
2370bac3f2fSMike Snitzer 
2380bac3f2fSMike Snitzer #define DM_HASH_LOCKS_MULT  4294967291ULL
2390bac3f2fSMike Snitzer #define DM_HASH_LOCKS_SHIFT 6
2400bac3f2fSMike Snitzer 
dm_hash_locks_index(sector_t block,unsigned int num_locks)241*363b7fd7SJoe Thornber static inline unsigned int dm_hash_locks_index(sector_t block,
242*363b7fd7SJoe Thornber 					       unsigned int num_locks)
243*363b7fd7SJoe Thornber {
244*363b7fd7SJoe Thornber 	sector_t h1 = (block * DM_HASH_LOCKS_MULT) >> DM_HASH_LOCKS_SHIFT;
245*363b7fd7SJoe Thornber 	sector_t h2 = h1 >> DM_HASH_LOCKS_SHIFT;
246*363b7fd7SJoe Thornber 
247*363b7fd7SJoe Thornber 	return (h1 ^ h2) & (num_locks - 1);
248*363b7fd7SJoe Thornber }
249*363b7fd7SJoe Thornber 
250*363b7fd7SJoe Thornber #endif
251*363b7fd7SJoe Thornber