xref: /openbmc/linux/drivers/md/dm-core.h (revision bd4a6dd241ae0a0bf36274d61e1a1fbf80b99ecb)
14cc96131SMike Snitzer /*
24cc96131SMike Snitzer  * Internal header file _only_ for device mapper core
34cc96131SMike Snitzer  *
44cc96131SMike Snitzer  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
54cc96131SMike Snitzer  *
64cc96131SMike Snitzer  * This file is released under the LGPL.
74cc96131SMike Snitzer  */
84cc96131SMike Snitzer 
94cc96131SMike Snitzer #ifndef DM_CORE_INTERNAL_H
104cc96131SMike Snitzer #define DM_CORE_INTERNAL_H
114cc96131SMike Snitzer 
124cc96131SMike Snitzer #include <linux/kthread.h>
134cc96131SMike Snitzer #include <linux/ktime.h>
144cc96131SMike Snitzer #include <linux/blk-mq.h>
151e8d44bdSEric Biggers #include <linux/blk-crypto-profile.h>
164cc96131SMike Snitzer 
174cc96131SMike Snitzer #include <trace/events/block.h>
184cc96131SMike Snitzer 
194cc96131SMike Snitzer #include "dm.h"
2091ccbbacSTushar Sugandhi #include "dm-ima.h"
214cc96131SMike Snitzer 
224cc96131SMike Snitzer #define DM_RESERVED_MAX_IOS		1024
234cc96131SMike Snitzer 
244cc96131SMike Snitzer struct dm_kobject_holder {
254cc96131SMike Snitzer 	struct kobject kobj;
264cc96131SMike Snitzer 	struct completion completion;
274cc96131SMike Snitzer };
284cc96131SMike Snitzer 
294cc96131SMike Snitzer /*
3033bd6f06SMike Snitzer  * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
3133bd6f06SMike Snitzer  * DM targets must _not_ deference a mapped_device or dm_table to directly
3233bd6f06SMike Snitzer  * access their members!
334cc96131SMike Snitzer  */
3433bd6f06SMike Snitzer 
354cc96131SMike Snitzer struct mapped_device {
364cc96131SMike Snitzer 	struct mutex suspend_lock;
374cc96131SMike Snitzer 
3872d711c8SMike Snitzer 	struct mutex table_devices_lock;
3972d711c8SMike Snitzer 	struct list_head table_devices;
4072d711c8SMike Snitzer 
414cc96131SMike Snitzer 	/*
424cc96131SMike Snitzer 	 * The current mapping (struct dm_table *).
434cc96131SMike Snitzer 	 * Use dm_get_live_table{_fast} or take suspend_lock for
444cc96131SMike Snitzer 	 * dereference.
454cc96131SMike Snitzer 	 */
464cc96131SMike Snitzer 	void __rcu *map;
474cc96131SMike Snitzer 
484cc96131SMike Snitzer 	unsigned long flags;
494cc96131SMike Snitzer 
504cc96131SMike Snitzer 	/* Protect queue and type against concurrent access. */
514cc96131SMike Snitzer 	struct mutex type_lock;
5272d711c8SMike Snitzer 	enum dm_queue_mode type;
5372d711c8SMike Snitzer 
5472d711c8SMike Snitzer 	int numa_node_id;
5572d711c8SMike Snitzer 	struct request_queue *queue;
564cc96131SMike Snitzer 
574cc96131SMike Snitzer 	atomic_t holders;
584cc96131SMike Snitzer 	atomic_t open_count;
594cc96131SMike Snitzer 
604cc96131SMike Snitzer 	struct dm_target *immutable_target;
614cc96131SMike Snitzer 	struct target_type *immutable_target_type;
624cc96131SMike Snitzer 
6372d711c8SMike Snitzer 	char name[16];
644cc96131SMike Snitzer 	struct gendisk *disk;
65f26c5719SDan Williams 	struct dax_device *dax_dev;
664cc96131SMike Snitzer 
67205649d8SMike Snitzer 	wait_queue_head_t wait;
689f6dc633SMike Snitzer 	unsigned long __percpu *pending_io;
699f6dc633SMike Snitzer 
70205649d8SMike Snitzer 	/* forced geometry settings */
71205649d8SMike Snitzer 	struct hd_geometry geometry;
72205649d8SMike Snitzer 
73205649d8SMike Snitzer 	/*
74205649d8SMike Snitzer 	 * Processing queue (flush)
75205649d8SMike Snitzer 	 */
76205649d8SMike Snitzer 	struct workqueue_struct *wq;
77205649d8SMike Snitzer 
784cc96131SMike Snitzer 	/*
794cc96131SMike Snitzer 	 * A list of ios that arrived while we were suspended.
804cc96131SMike Snitzer 	 */
814cc96131SMike Snitzer 	struct work_struct work;
824cc96131SMike Snitzer 	spinlock_t deferred_lock;
834cc96131SMike Snitzer 	struct bio_list deferred;
844cc96131SMike Snitzer 
8572d711c8SMike Snitzer 	void *interface_ptr;
8672d711c8SMike Snitzer 
874cc96131SMike Snitzer 	/*
884cc96131SMike Snitzer 	 * Event handling.
894cc96131SMike Snitzer 	 */
904cc96131SMike Snitzer 	wait_queue_head_t eventq;
914cc96131SMike Snitzer 	atomic_t event_nr;
924cc96131SMike Snitzer 	atomic_t uevent_seq;
934cc96131SMike Snitzer 	struct list_head uevent_list;
944cc96131SMike Snitzer 	spinlock_t uevent_lock; /* Protect access to uevent_list */
954cc96131SMike Snitzer 
96205649d8SMike Snitzer 	/* for blk-mq request-based DM support */
97205649d8SMike Snitzer 	bool init_tio_pdu:1;
98205649d8SMike Snitzer 	struct blk_mq_tag_set *tag_set;
99205649d8SMike Snitzer 
100205649d8SMike Snitzer 	struct dm_stats stats;
101205649d8SMike Snitzer 
1024cc96131SMike Snitzer 	/* the number of internal suspends */
1034cc96131SMike Snitzer 	unsigned internal_suspend_count;
1044cc96131SMike Snitzer 
105205649d8SMike Snitzer 	int swap_bios;
106205649d8SMike Snitzer 	struct semaphore swap_bios_semaphore;
107205649d8SMike Snitzer 	struct mutex swap_bios_lock;
108205649d8SMike Snitzer 
1094cc96131SMike Snitzer 	/*
1104cc96131SMike Snitzer 	 * io objects are allocated from here.
1114cc96131SMike Snitzer 	 */
1126f1c819cSKent Overstreet 	struct bio_set io_bs;
1136f1c819cSKent Overstreet 	struct bio_set bs;
1144cc96131SMike Snitzer 
1154cc96131SMike Snitzer 	/* kobject and completion */
1164cc96131SMike Snitzer 	struct dm_kobject_holder kobj_holder;
1174cc96131SMike Snitzer 
118856eb091SMikulas Patocka 	struct srcu_struct io_barrier;
119bb37d772SDamien Le Moal 
120bb37d772SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED
121bb37d772SDamien Le Moal 	unsigned int nr_zones;
122bb37d772SDamien Le Moal 	unsigned int *zwp_offset;
123bb37d772SDamien Le Moal #endif
12491ccbbacSTushar Sugandhi 
12591ccbbacSTushar Sugandhi #ifdef CONFIG_IMA
12691ccbbacSTushar Sugandhi 	struct dm_ima_measurements ima;
12791ccbbacSTushar Sugandhi #endif
1284cc96131SMike Snitzer };
1294cc96131SMike Snitzer 
130e2118b3cSDamien Le Moal /*
131e2118b3cSDamien Le Moal  * Bits for the flags field of struct mapped_device.
132e2118b3cSDamien Le Moal  */
133e2118b3cSDamien Le Moal #define DMF_BLOCK_IO_FOR_SUSPEND 0
134e2118b3cSDamien Le Moal #define DMF_SUSPENDED 1
135e2118b3cSDamien Le Moal #define DMF_FROZEN 2
136e2118b3cSDamien Le Moal #define DMF_FREEING 3
137e2118b3cSDamien Le Moal #define DMF_DELETING 4
138e2118b3cSDamien Le Moal #define DMF_NOFLUSH_SUSPENDING 5
139e2118b3cSDamien Le Moal #define DMF_DEFERRED_REMOVE 6
140e2118b3cSDamien Le Moal #define DMF_SUSPENDED_INTERNALLY 7
141e2118b3cSDamien Le Moal #define DMF_POST_SUSPENDING 8
142bb37d772SDamien Le Moal #define DMF_EMULATE_ZONE_APPEND 9
143e2118b3cSDamien Le Moal 
144bcb44433SMike Snitzer void disable_discard(struct mapped_device *md);
1454cc96131SMike Snitzer void disable_write_same(struct mapped_device *md);
146ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md);
1474cc96131SMike Snitzer 
14833bd6f06SMike Snitzer static inline sector_t dm_get_size(struct mapped_device *md)
14933bd6f06SMike Snitzer {
15033bd6f06SMike Snitzer 	return get_capacity(md->disk);
15133bd6f06SMike Snitzer }
15233bd6f06SMike Snitzer 
15333bd6f06SMike Snitzer static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
15433bd6f06SMike Snitzer {
15533bd6f06SMike Snitzer 	return &md->stats;
15633bd6f06SMike Snitzer }
15733bd6f06SMike Snitzer 
158bb37d772SDamien Le Moal static inline bool dm_emulate_zone_append(struct mapped_device *md)
159bb37d772SDamien Le Moal {
160bb37d772SDamien Le Moal 	if (blk_queue_is_zoned(md->queue))
161bb37d772SDamien Le Moal 		return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
162bb37d772SDamien Le Moal 	return false;
163bb37d772SDamien Le Moal }
164bb37d772SDamien Le Moal 
16533bd6f06SMike Snitzer #define DM_TABLE_MAX_DEPTH 16
16633bd6f06SMike Snitzer 
16733bd6f06SMike Snitzer struct dm_table {
16833bd6f06SMike Snitzer 	struct mapped_device *md;
16933bd6f06SMike Snitzer 	enum dm_queue_mode type;
17033bd6f06SMike Snitzer 
17133bd6f06SMike Snitzer 	/* btree table */
17233bd6f06SMike Snitzer 	unsigned int depth;
17333bd6f06SMike Snitzer 	unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
17433bd6f06SMike Snitzer 	sector_t *index[DM_TABLE_MAX_DEPTH];
17533bd6f06SMike Snitzer 
17633bd6f06SMike Snitzer 	unsigned int num_targets;
17733bd6f06SMike Snitzer 	unsigned int num_allocated;
17833bd6f06SMike Snitzer 	sector_t *highs;
17933bd6f06SMike Snitzer 	struct dm_target *targets;
18033bd6f06SMike Snitzer 
18133bd6f06SMike Snitzer 	struct target_type *immutable_target_type;
18233bd6f06SMike Snitzer 
18333bd6f06SMike Snitzer 	bool integrity_supported:1;
18433bd6f06SMike Snitzer 	bool singleton:1;
18533bd6f06SMike Snitzer 	unsigned integrity_added:1;
18633bd6f06SMike Snitzer 
18733bd6f06SMike Snitzer 	/*
18833bd6f06SMike Snitzer 	 * Indicates the rw permissions for the new logical
18933bd6f06SMike Snitzer 	 * device.  This should be a combination of FMODE_READ
19033bd6f06SMike Snitzer 	 * and FMODE_WRITE.
19133bd6f06SMike Snitzer 	 */
19233bd6f06SMike Snitzer 	fmode_t mode;
19333bd6f06SMike Snitzer 
19433bd6f06SMike Snitzer 	/* a list of devices used by this table */
19533bd6f06SMike Snitzer 	struct list_head devices;
19633bd6f06SMike Snitzer 
19733bd6f06SMike Snitzer 	/* events get handed up using this callback */
19833bd6f06SMike Snitzer 	void (*event_fn)(void *);
19933bd6f06SMike Snitzer 	void *event_context;
20033bd6f06SMike Snitzer 
20133bd6f06SMike Snitzer 	struct dm_md_mempools *mempools;
202aa6ce87aSSatya Tangirala 
203aa6ce87aSSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION
204cb77cb5aSEric Biggers 	struct blk_crypto_profile *crypto_profile;
205aa6ce87aSSatya Tangirala #endif
20633bd6f06SMike Snitzer };
20733bd6f06SMike Snitzer 
208e2118b3cSDamien Le Moal /*
209e2118b3cSDamien Le Moal  * One of these is allocated per clone bio.
210e2118b3cSDamien Le Moal  */
211*bd4a6dd2SMike Snitzer #define DM_TIO_MAGIC 28714
212e2118b3cSDamien Le Moal struct dm_target_io {
213*bd4a6dd2SMike Snitzer 	unsigned short magic;
214*bd4a6dd2SMike Snitzer 	unsigned short flags;
215300432f5SMike Snitzer 	unsigned int target_bio_nr;
216e2118b3cSDamien Le Moal 	struct dm_io *io;
217e2118b3cSDamien Le Moal 	struct dm_target *ti;
218e2118b3cSDamien Le Moal 	unsigned int *len_ptr;
219743598f0SMike Snitzer 	sector_t old_sector;
220e2118b3cSDamien Le Moal 	struct bio clone;
221e2118b3cSDamien Le Moal };
222e2118b3cSDamien Le Moal 
223e2118b3cSDamien Le Moal /*
224655f3aadSMike Snitzer  * dm_target_io flags
225655f3aadSMike Snitzer  */
226655f3aadSMike Snitzer enum {
227655f3aadSMike Snitzer 	DM_TIO_INSIDE_DM_IO,
228655f3aadSMike Snitzer 	DM_TIO_IS_DUPLICATE_BIO
229655f3aadSMike Snitzer };
230655f3aadSMike Snitzer 
231655f3aadSMike Snitzer static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
232655f3aadSMike Snitzer {
233655f3aadSMike Snitzer 	return (tio->flags & (1U << bit)) != 0;
234655f3aadSMike Snitzer }
235655f3aadSMike Snitzer 
236655f3aadSMike Snitzer static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
237655f3aadSMike Snitzer {
238655f3aadSMike Snitzer 	tio->flags |= (1U << bit);
239655f3aadSMike Snitzer }
240655f3aadSMike Snitzer 
241655f3aadSMike Snitzer /*
242e2118b3cSDamien Le Moal  * One of these is allocated per original bio.
243e2118b3cSDamien Le Moal  * It contains the first clone used for that original.
244e2118b3cSDamien Le Moal  */
245*bd4a6dd2SMike Snitzer #define DM_IO_MAGIC 19577
246e2118b3cSDamien Le Moal struct dm_io {
247*bd4a6dd2SMike Snitzer 	unsigned short magic;
248*bd4a6dd2SMike Snitzer 	unsigned short flags;
249e2118b3cSDamien Le Moal 	atomic_t io_count;
250300432f5SMike Snitzer 	struct mapped_device *md;
251e2118b3cSDamien Le Moal 	struct bio *orig_bio;
252300432f5SMike Snitzer 	blk_status_t status;
253e2118b3cSDamien Le Moal 	unsigned long start_time;
254b99fdcdcSMing Lei 	void *data;
255b99fdcdcSMing Lei 	struct hlist_node node;
256b7f8dff0SMike Snitzer 	struct task_struct *map_task;
25782f6cdccSMike Snitzer 	spinlock_t startio_lock;
258e2118b3cSDamien Le Moal 	spinlock_t endio_lock;
259e2118b3cSDamien Le Moal 	struct dm_stats_aux stats_aux;
260e2118b3cSDamien Le Moal 	/* last member of dm_target_io is 'struct bio' */
261e2118b3cSDamien Le Moal 	struct dm_target_io tio;
262e2118b3cSDamien Le Moal };
263e2118b3cSDamien Le Moal 
26482f6cdccSMike Snitzer /*
26582f6cdccSMike Snitzer  * dm_io flags
26682f6cdccSMike Snitzer  */
26782f6cdccSMike Snitzer enum {
26882f6cdccSMike Snitzer 	DM_IO_START_ACCT,
26982f6cdccSMike Snitzer 	DM_IO_ACCOUNTED
27082f6cdccSMike Snitzer };
27182f6cdccSMike Snitzer 
27282f6cdccSMike Snitzer static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
27382f6cdccSMike Snitzer {
27482f6cdccSMike Snitzer 	return (io->flags & (1U << bit)) != 0;
27582f6cdccSMike Snitzer }
27682f6cdccSMike Snitzer 
27782f6cdccSMike Snitzer static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
27882f6cdccSMike Snitzer {
27982f6cdccSMike Snitzer 	io->flags |= (1U << bit);
28082f6cdccSMike Snitzer }
28182f6cdccSMike Snitzer 
282e2118b3cSDamien Le Moal static inline void dm_io_inc_pending(struct dm_io *io)
283e2118b3cSDamien Le Moal {
284e2118b3cSDamien Le Moal 	atomic_inc(&io->io_count);
285e2118b3cSDamien Le Moal }
286e2118b3cSDamien Le Moal 
287e2118b3cSDamien Le Moal void dm_io_dec_pending(struct dm_io *io, blk_status_t error);
288e2118b3cSDamien Le Moal 
2894cc96131SMike Snitzer static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
2904cc96131SMike Snitzer {
2914cc96131SMike Snitzer 	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
2924cc96131SMike Snitzer }
2934cc96131SMike Snitzer 
2944cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
2954cc96131SMike Snitzer 
2964cc96131SMike Snitzer static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
2974cc96131SMike Snitzer {
2984cc96131SMike Snitzer 	return !maxlen || strlen(result) + 1 >= maxlen;
2994cc96131SMike Snitzer }
3004cc96131SMike Snitzer 
30193e6442cSMikulas Patocka extern atomic_t dm_global_event_nr;
30293e6442cSMikulas Patocka extern wait_queue_head_t dm_global_eventq;
30362e08243SMikulas Patocka void dm_issue_global_event(void);
30493e6442cSMikulas Patocka 
3054cc96131SMike Snitzer #endif
306