xref: /openbmc/linux/drivers/md/dm-core.h (revision 9d20653fe84ebd772c3af71808e6a727603e0b71)
14cc96131SMike Snitzer /*
24cc96131SMike Snitzer  * Internal header file _only_ for device mapper core
34cc96131SMike Snitzer  *
44cc96131SMike Snitzer  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
54cc96131SMike Snitzer  *
64cc96131SMike Snitzer  * This file is released under the LGPL.
74cc96131SMike Snitzer  */
84cc96131SMike Snitzer 
94cc96131SMike Snitzer #ifndef DM_CORE_INTERNAL_H
104cc96131SMike Snitzer #define DM_CORE_INTERNAL_H
114cc96131SMike Snitzer 
124cc96131SMike Snitzer #include <linux/kthread.h>
134cc96131SMike Snitzer #include <linux/ktime.h>
144cc96131SMike Snitzer #include <linux/blk-mq.h>
151e8d44bdSEric Biggers #include <linux/blk-crypto-profile.h>
16442761fdSMike Snitzer #include <linux/jump_label.h>
174cc96131SMike Snitzer 
184cc96131SMike Snitzer #include <trace/events/block.h>
194cc96131SMike Snitzer 
204cc96131SMike Snitzer #include "dm.h"
2191ccbbacSTushar Sugandhi #include "dm-ima.h"
224cc96131SMike Snitzer 
234cc96131SMike Snitzer #define DM_RESERVED_MAX_IOS		1024
244cc96131SMike Snitzer 
254cc96131SMike Snitzer struct dm_kobject_holder {
264cc96131SMike Snitzer 	struct kobject kobj;
274cc96131SMike Snitzer 	struct completion completion;
284cc96131SMike Snitzer };
294cc96131SMike Snitzer 
304cc96131SMike Snitzer /*
3133bd6f06SMike Snitzer  * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
3233bd6f06SMike Snitzer  * DM targets must _not_ deference a mapped_device or dm_table to directly
3333bd6f06SMike Snitzer  * access their members!
344cc96131SMike Snitzer  */
3533bd6f06SMike Snitzer 
364cc96131SMike Snitzer struct mapped_device {
374cc96131SMike Snitzer 	struct mutex suspend_lock;
384cc96131SMike Snitzer 
3972d711c8SMike Snitzer 	struct mutex table_devices_lock;
4072d711c8SMike Snitzer 	struct list_head table_devices;
4172d711c8SMike Snitzer 
424cc96131SMike Snitzer 	/*
434cc96131SMike Snitzer 	 * The current mapping (struct dm_table *).
444cc96131SMike Snitzer 	 * Use dm_get_live_table{_fast} or take suspend_lock for
454cc96131SMike Snitzer 	 * dereference.
464cc96131SMike Snitzer 	 */
474cc96131SMike Snitzer 	void __rcu *map;
484cc96131SMike Snitzer 
494cc96131SMike Snitzer 	unsigned long flags;
504cc96131SMike Snitzer 
514cc96131SMike Snitzer 	/* Protect queue and type against concurrent access. */
524cc96131SMike Snitzer 	struct mutex type_lock;
5372d711c8SMike Snitzer 	enum dm_queue_mode type;
5472d711c8SMike Snitzer 
5572d711c8SMike Snitzer 	int numa_node_id;
5672d711c8SMike Snitzer 	struct request_queue *queue;
574cc96131SMike Snitzer 
584cc96131SMike Snitzer 	atomic_t holders;
594cc96131SMike Snitzer 	atomic_t open_count;
604cc96131SMike Snitzer 
614cc96131SMike Snitzer 	struct dm_target *immutable_target;
624cc96131SMike Snitzer 	struct target_type *immutable_target_type;
634cc96131SMike Snitzer 
6472d711c8SMike Snitzer 	char name[16];
654cc96131SMike Snitzer 	struct gendisk *disk;
66f26c5719SDan Williams 	struct dax_device *dax_dev;
674cc96131SMike Snitzer 
68205649d8SMike Snitzer 	wait_queue_head_t wait;
699f6dc633SMike Snitzer 	unsigned long __percpu *pending_io;
709f6dc633SMike Snitzer 
71205649d8SMike Snitzer 	/* forced geometry settings */
72205649d8SMike Snitzer 	struct hd_geometry geometry;
73205649d8SMike Snitzer 
74205649d8SMike Snitzer 	/*
75205649d8SMike Snitzer 	 * Processing queue (flush)
76205649d8SMike Snitzer 	 */
77205649d8SMike Snitzer 	struct workqueue_struct *wq;
78205649d8SMike Snitzer 
794cc96131SMike Snitzer 	/*
804cc96131SMike Snitzer 	 * A list of ios that arrived while we were suspended.
814cc96131SMike Snitzer 	 */
824cc96131SMike Snitzer 	struct work_struct work;
834cc96131SMike Snitzer 	spinlock_t deferred_lock;
844cc96131SMike Snitzer 	struct bio_list deferred;
854cc96131SMike Snitzer 
8672d711c8SMike Snitzer 	void *interface_ptr;
8772d711c8SMike Snitzer 
884cc96131SMike Snitzer 	/*
894cc96131SMike Snitzer 	 * Event handling.
904cc96131SMike Snitzer 	 */
914cc96131SMike Snitzer 	wait_queue_head_t eventq;
924cc96131SMike Snitzer 	atomic_t event_nr;
934cc96131SMike Snitzer 	atomic_t uevent_seq;
944cc96131SMike Snitzer 	struct list_head uevent_list;
954cc96131SMike Snitzer 	spinlock_t uevent_lock; /* Protect access to uevent_list */
964cc96131SMike Snitzer 
97205649d8SMike Snitzer 	/* for blk-mq request-based DM support */
98205649d8SMike Snitzer 	bool init_tio_pdu:1;
99205649d8SMike Snitzer 	struct blk_mq_tag_set *tag_set;
100205649d8SMike Snitzer 
101205649d8SMike Snitzer 	struct dm_stats stats;
102205649d8SMike Snitzer 
1034cc96131SMike Snitzer 	/* the number of internal suspends */
1044cc96131SMike Snitzer 	unsigned internal_suspend_count;
1054cc96131SMike Snitzer 
106205649d8SMike Snitzer 	int swap_bios;
107205649d8SMike Snitzer 	struct semaphore swap_bios_semaphore;
108205649d8SMike Snitzer 	struct mutex swap_bios_lock;
109205649d8SMike Snitzer 
1104cc96131SMike Snitzer 	/*
1114cc96131SMike Snitzer 	 * io objects are allocated from here.
1124cc96131SMike Snitzer 	 */
1136f1c819cSKent Overstreet 	struct bio_set io_bs;
1146f1c819cSKent Overstreet 	struct bio_set bs;
1154cc96131SMike Snitzer 
1164cc96131SMike Snitzer 	/* kobject and completion */
1174cc96131SMike Snitzer 	struct dm_kobject_holder kobj_holder;
1184cc96131SMike Snitzer 
119856eb091SMikulas Patocka 	struct srcu_struct io_barrier;
120bb37d772SDamien Le Moal 
121bb37d772SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED
122bb37d772SDamien Le Moal 	unsigned int nr_zones;
123bb37d772SDamien Le Moal 	unsigned int *zwp_offset;
124bb37d772SDamien Le Moal #endif
12591ccbbacSTushar Sugandhi 
12691ccbbacSTushar Sugandhi #ifdef CONFIG_IMA
12791ccbbacSTushar Sugandhi 	struct dm_ima_measurements ima;
12891ccbbacSTushar Sugandhi #endif
1294cc96131SMike Snitzer };
1304cc96131SMike Snitzer 
131e2118b3cSDamien Le Moal /*
132e2118b3cSDamien Le Moal  * Bits for the flags field of struct mapped_device.
133e2118b3cSDamien Le Moal  */
134e2118b3cSDamien Le Moal #define DMF_BLOCK_IO_FOR_SUSPEND 0
135e2118b3cSDamien Le Moal #define DMF_SUSPENDED 1
136e2118b3cSDamien Le Moal #define DMF_FROZEN 2
137e2118b3cSDamien Le Moal #define DMF_FREEING 3
138e2118b3cSDamien Le Moal #define DMF_DELETING 4
139e2118b3cSDamien Le Moal #define DMF_NOFLUSH_SUSPENDING 5
140e2118b3cSDamien Le Moal #define DMF_DEFERRED_REMOVE 6
141e2118b3cSDamien Le Moal #define DMF_SUSPENDED_INTERNALLY 7
142e2118b3cSDamien Le Moal #define DMF_POST_SUSPENDING 8
143bb37d772SDamien Le Moal #define DMF_EMULATE_ZONE_APPEND 9
144e2118b3cSDamien Le Moal 
145bcb44433SMike Snitzer void disable_discard(struct mapped_device *md);
146ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md);
1474cc96131SMike Snitzer 
14833bd6f06SMike Snitzer static inline sector_t dm_get_size(struct mapped_device *md)
14933bd6f06SMike Snitzer {
15033bd6f06SMike Snitzer 	return get_capacity(md->disk);
15133bd6f06SMike Snitzer }
15233bd6f06SMike Snitzer 
15333bd6f06SMike Snitzer static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
15433bd6f06SMike Snitzer {
15533bd6f06SMike Snitzer 	return &md->stats;
15633bd6f06SMike Snitzer }
15733bd6f06SMike Snitzer 
158442761fdSMike Snitzer DECLARE_STATIC_KEY_FALSE(stats_enabled);
159442761fdSMike Snitzer DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
160442761fdSMike Snitzer DECLARE_STATIC_KEY_FALSE(zoned_enabled);
161442761fdSMike Snitzer 
162bb37d772SDamien Le Moal static inline bool dm_emulate_zone_append(struct mapped_device *md)
163bb37d772SDamien Le Moal {
164bb37d772SDamien Le Moal 	if (blk_queue_is_zoned(md->queue))
165bb37d772SDamien Le Moal 		return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
166bb37d772SDamien Le Moal 	return false;
167bb37d772SDamien Le Moal }
168bb37d772SDamien Le Moal 
16933bd6f06SMike Snitzer #define DM_TABLE_MAX_DEPTH 16
17033bd6f06SMike Snitzer 
17133bd6f06SMike Snitzer struct dm_table {
17233bd6f06SMike Snitzer 	struct mapped_device *md;
17333bd6f06SMike Snitzer 	enum dm_queue_mode type;
17433bd6f06SMike Snitzer 
17533bd6f06SMike Snitzer 	/* btree table */
17633bd6f06SMike Snitzer 	unsigned int depth;
17733bd6f06SMike Snitzer 	unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
17833bd6f06SMike Snitzer 	sector_t *index[DM_TABLE_MAX_DEPTH];
17933bd6f06SMike Snitzer 
18033bd6f06SMike Snitzer 	unsigned int num_targets;
18133bd6f06SMike Snitzer 	unsigned int num_allocated;
18233bd6f06SMike Snitzer 	sector_t *highs;
18333bd6f06SMike Snitzer 	struct dm_target *targets;
18433bd6f06SMike Snitzer 
18533bd6f06SMike Snitzer 	struct target_type *immutable_target_type;
18633bd6f06SMike Snitzer 
18733bd6f06SMike Snitzer 	bool integrity_supported:1;
18833bd6f06SMike Snitzer 	bool singleton:1;
18933bd6f06SMike Snitzer 	unsigned integrity_added:1;
19033bd6f06SMike Snitzer 
19133bd6f06SMike Snitzer 	/*
19233bd6f06SMike Snitzer 	 * Indicates the rw permissions for the new logical
19333bd6f06SMike Snitzer 	 * device.  This should be a combination of FMODE_READ
19433bd6f06SMike Snitzer 	 * and FMODE_WRITE.
19533bd6f06SMike Snitzer 	 */
19633bd6f06SMike Snitzer 	fmode_t mode;
19733bd6f06SMike Snitzer 
19833bd6f06SMike Snitzer 	/* a list of devices used by this table */
19933bd6f06SMike Snitzer 	struct list_head devices;
20033bd6f06SMike Snitzer 
20133bd6f06SMike Snitzer 	/* events get handed up using this callback */
20233bd6f06SMike Snitzer 	void (*event_fn)(void *);
20333bd6f06SMike Snitzer 	void *event_context;
20433bd6f06SMike Snitzer 
20533bd6f06SMike Snitzer 	struct dm_md_mempools *mempools;
206aa6ce87aSSatya Tangirala 
207aa6ce87aSSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION
208cb77cb5aSEric Biggers 	struct blk_crypto_profile *crypto_profile;
209aa6ce87aSSatya Tangirala #endif
21033bd6f06SMike Snitzer };
21133bd6f06SMike Snitzer 
212e2118b3cSDamien Le Moal /*
213e2118b3cSDamien Le Moal  * One of these is allocated per clone bio.
214e2118b3cSDamien Le Moal  */
215bd4a6dd2SMike Snitzer #define DM_TIO_MAGIC 28714
216e2118b3cSDamien Le Moal struct dm_target_io {
217bd4a6dd2SMike Snitzer 	unsigned short magic;
218aad5b23eSMikulas Patocka 	blk_short_t flags;
219300432f5SMike Snitzer 	unsigned int target_bio_nr;
220e2118b3cSDamien Le Moal 	struct dm_io *io;
221e2118b3cSDamien Le Moal 	struct dm_target *ti;
222e2118b3cSDamien Le Moal 	unsigned int *len_ptr;
223743598f0SMike Snitzer 	sector_t old_sector;
224e2118b3cSDamien Le Moal 	struct bio clone;
225e2118b3cSDamien Le Moal };
226e2118b3cSDamien Le Moal 
227e2118b3cSDamien Le Moal /*
228655f3aadSMike Snitzer  * dm_target_io flags
229655f3aadSMike Snitzer  */
230655f3aadSMike Snitzer enum {
231655f3aadSMike Snitzer 	DM_TIO_INSIDE_DM_IO,
232655f3aadSMike Snitzer 	DM_TIO_IS_DUPLICATE_BIO
233655f3aadSMike Snitzer };
234655f3aadSMike Snitzer 
235655f3aadSMike Snitzer static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
236655f3aadSMike Snitzer {
237655f3aadSMike Snitzer 	return (tio->flags & (1U << bit)) != 0;
238655f3aadSMike Snitzer }
239655f3aadSMike Snitzer 
240655f3aadSMike Snitzer static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
241655f3aadSMike Snitzer {
242655f3aadSMike Snitzer 	tio->flags |= (1U << bit);
243655f3aadSMike Snitzer }
244655f3aadSMike Snitzer 
2453b03f7c1SMike Snitzer static inline bool dm_tio_is_normal(struct dm_target_io *tio)
2463b03f7c1SMike Snitzer {
2473b03f7c1SMike Snitzer 	return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
2483b03f7c1SMike Snitzer 		!dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
2493b03f7c1SMike Snitzer }
2503b03f7c1SMike Snitzer 
251655f3aadSMike Snitzer /*
252e2118b3cSDamien Le Moal  * One of these is allocated per original bio.
253e2118b3cSDamien Le Moal  * It contains the first clone used for that original.
254e2118b3cSDamien Le Moal  */
255bd4a6dd2SMike Snitzer #define DM_IO_MAGIC 19577
256e2118b3cSDamien Le Moal struct dm_io {
257bd4a6dd2SMike Snitzer 	unsigned short magic;
258*9d20653fSMike Snitzer 	blk_short_t flags;
2594d7bca13SMike Snitzer 	spinlock_t lock;
260e2118b3cSDamien Le Moal 	unsigned long start_time;
261b99fdcdcSMing Lei 	void *data;
262ec211631SMing Lei 	struct dm_io *next;
263e2118b3cSDamien Le Moal 	struct dm_stats_aux stats_aux;
264982b48aeSMike Snitzer 	blk_status_t status;
265982b48aeSMike Snitzer 	atomic_t io_count;
266982b48aeSMike Snitzer 	struct mapped_device *md;
2677dd76d1fSMing Lei 
2687dd76d1fSMing Lei 	/* The three fields represent mapped part of original bio */
269982b48aeSMike Snitzer 	struct bio *orig_bio;
2707dd76d1fSMing Lei 	unsigned int sector_offset; /* offset to end of orig_bio */
2717dd76d1fSMing Lei 	unsigned int sectors;
2727dd76d1fSMing Lei 
273e2118b3cSDamien Le Moal 	/* last member of dm_target_io is 'struct bio' */
274e2118b3cSDamien Le Moal 	struct dm_target_io tio;
275e2118b3cSDamien Le Moal };
276e2118b3cSDamien Le Moal 
27782f6cdccSMike Snitzer /*
27882f6cdccSMike Snitzer  * dm_io flags
27982f6cdccSMike Snitzer  */
28082f6cdccSMike Snitzer enum {
2817dd76d1fSMing Lei 	DM_IO_ACCOUNTED,
2827dd76d1fSMing Lei 	DM_IO_WAS_SPLIT
28382f6cdccSMike Snitzer };
28482f6cdccSMike Snitzer 
28582f6cdccSMike Snitzer static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
28682f6cdccSMike Snitzer {
28782f6cdccSMike Snitzer 	return (io->flags & (1U << bit)) != 0;
28882f6cdccSMike Snitzer }
28982f6cdccSMike Snitzer 
29082f6cdccSMike Snitzer static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
29182f6cdccSMike Snitzer {
29282f6cdccSMike Snitzer 	io->flags |= (1U << bit);
29382f6cdccSMike Snitzer }
29482f6cdccSMike Snitzer 
2954cc96131SMike Snitzer static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
2964cc96131SMike Snitzer {
2974cc96131SMike Snitzer 	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
2984cc96131SMike Snitzer }
2994cc96131SMike Snitzer 
3004cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
3014cc96131SMike Snitzer 
3024cc96131SMike Snitzer static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
3034cc96131SMike Snitzer {
3044cc96131SMike Snitzer 	return !maxlen || strlen(result) + 1 >= maxlen;
3054cc96131SMike Snitzer }
3064cc96131SMike Snitzer 
30793e6442cSMikulas Patocka extern atomic_t dm_global_event_nr;
30893e6442cSMikulas Patocka extern wait_queue_head_t dm_global_eventq;
30962e08243SMikulas Patocka void dm_issue_global_event(void);
31093e6442cSMikulas Patocka 
3114cc96131SMike Snitzer #endif
312