xref: /openbmc/linux/drivers/md/dm-core.h (revision 4cc96131afce3eaae7c13dff41c6ba771cf10e96)
1*4cc96131SMike Snitzer /*
2*4cc96131SMike Snitzer  * Internal header file _only_ for device mapper core
3*4cc96131SMike Snitzer  *
4*4cc96131SMike Snitzer  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
5*4cc96131SMike Snitzer  *
6*4cc96131SMike Snitzer  * This file is released under the LGPL.
7*4cc96131SMike Snitzer  */
8*4cc96131SMike Snitzer 
9*4cc96131SMike Snitzer #ifndef DM_CORE_INTERNAL_H
10*4cc96131SMike Snitzer #define DM_CORE_INTERNAL_H
11*4cc96131SMike Snitzer 
12*4cc96131SMike Snitzer #include <linux/kthread.h>
13*4cc96131SMike Snitzer #include <linux/ktime.h>
14*4cc96131SMike Snitzer #include <linux/blk-mq.h>
15*4cc96131SMike Snitzer 
16*4cc96131SMike Snitzer #include <trace/events/block.h>
17*4cc96131SMike Snitzer 
18*4cc96131SMike Snitzer #include "dm.h"
19*4cc96131SMike Snitzer 
20*4cc96131SMike Snitzer #define DM_RESERVED_MAX_IOS		1024
21*4cc96131SMike Snitzer 
22*4cc96131SMike Snitzer struct dm_kobject_holder {
23*4cc96131SMike Snitzer 	struct kobject kobj;
24*4cc96131SMike Snitzer 	struct completion completion;
25*4cc96131SMike Snitzer };
26*4cc96131SMike Snitzer 
27*4cc96131SMike Snitzer /*
28*4cc96131SMike Snitzer  * DM core internal structure that used directly by dm.c and dm-rq.c
29*4cc96131SMike Snitzer  * DM targets must _not_ deference a mapped_device to directly access its members!
30*4cc96131SMike Snitzer  */
31*4cc96131SMike Snitzer struct mapped_device {
32*4cc96131SMike Snitzer 	struct srcu_struct io_barrier;
33*4cc96131SMike Snitzer 	struct mutex suspend_lock;
34*4cc96131SMike Snitzer 
35*4cc96131SMike Snitzer 	/*
36*4cc96131SMike Snitzer 	 * The current mapping (struct dm_table *).
37*4cc96131SMike Snitzer 	 * Use dm_get_live_table{_fast} or take suspend_lock for
38*4cc96131SMike Snitzer 	 * dereference.
39*4cc96131SMike Snitzer 	 */
40*4cc96131SMike Snitzer 	void __rcu *map;
41*4cc96131SMike Snitzer 
42*4cc96131SMike Snitzer 	struct list_head table_devices;
43*4cc96131SMike Snitzer 	struct mutex table_devices_lock;
44*4cc96131SMike Snitzer 
45*4cc96131SMike Snitzer 	unsigned long flags;
46*4cc96131SMike Snitzer 
47*4cc96131SMike Snitzer 	struct request_queue *queue;
48*4cc96131SMike Snitzer 	int numa_node_id;
49*4cc96131SMike Snitzer 
50*4cc96131SMike Snitzer 	unsigned type;
51*4cc96131SMike Snitzer 	/* Protect queue and type against concurrent access. */
52*4cc96131SMike Snitzer 	struct mutex type_lock;
53*4cc96131SMike Snitzer 
54*4cc96131SMike Snitzer 	atomic_t holders;
55*4cc96131SMike Snitzer 	atomic_t open_count;
56*4cc96131SMike Snitzer 
57*4cc96131SMike Snitzer 	struct dm_target *immutable_target;
58*4cc96131SMike Snitzer 	struct target_type *immutable_target_type;
59*4cc96131SMike Snitzer 
60*4cc96131SMike Snitzer 	struct gendisk *disk;
61*4cc96131SMike Snitzer 	char name[16];
62*4cc96131SMike Snitzer 
63*4cc96131SMike Snitzer 	void *interface_ptr;
64*4cc96131SMike Snitzer 
65*4cc96131SMike Snitzer 	/*
66*4cc96131SMike Snitzer 	 * A list of ios that arrived while we were suspended.
67*4cc96131SMike Snitzer 	 */
68*4cc96131SMike Snitzer 	atomic_t pending[2];
69*4cc96131SMike Snitzer 	wait_queue_head_t wait;
70*4cc96131SMike Snitzer 	struct work_struct work;
71*4cc96131SMike Snitzer 	spinlock_t deferred_lock;
72*4cc96131SMike Snitzer 	struct bio_list deferred;
73*4cc96131SMike Snitzer 
74*4cc96131SMike Snitzer 	/*
75*4cc96131SMike Snitzer 	 * Event handling.
76*4cc96131SMike Snitzer 	 */
77*4cc96131SMike Snitzer 	wait_queue_head_t eventq;
78*4cc96131SMike Snitzer 	atomic_t event_nr;
79*4cc96131SMike Snitzer 	atomic_t uevent_seq;
80*4cc96131SMike Snitzer 	struct list_head uevent_list;
81*4cc96131SMike Snitzer 	spinlock_t uevent_lock; /* Protect access to uevent_list */
82*4cc96131SMike Snitzer 
83*4cc96131SMike Snitzer 	/* the number of internal suspends */
84*4cc96131SMike Snitzer 	unsigned internal_suspend_count;
85*4cc96131SMike Snitzer 
86*4cc96131SMike Snitzer 	/*
87*4cc96131SMike Snitzer 	 * Processing queue (flush)
88*4cc96131SMike Snitzer 	 */
89*4cc96131SMike Snitzer 	struct workqueue_struct *wq;
90*4cc96131SMike Snitzer 
91*4cc96131SMike Snitzer 	/*
92*4cc96131SMike Snitzer 	 * io objects are allocated from here.
93*4cc96131SMike Snitzer 	 */
94*4cc96131SMike Snitzer 	mempool_t *io_pool;
95*4cc96131SMike Snitzer 	mempool_t *rq_pool;
96*4cc96131SMike Snitzer 
97*4cc96131SMike Snitzer 	struct bio_set *bs;
98*4cc96131SMike Snitzer 
99*4cc96131SMike Snitzer 	/*
100*4cc96131SMike Snitzer 	 * freeze/thaw support require holding onto a super block
101*4cc96131SMike Snitzer 	 */
102*4cc96131SMike Snitzer 	struct super_block *frozen_sb;
103*4cc96131SMike Snitzer 
104*4cc96131SMike Snitzer 	/* forced geometry settings */
105*4cc96131SMike Snitzer 	struct hd_geometry geometry;
106*4cc96131SMike Snitzer 
107*4cc96131SMike Snitzer 	struct block_device *bdev;
108*4cc96131SMike Snitzer 
109*4cc96131SMike Snitzer 	/* kobject and completion */
110*4cc96131SMike Snitzer 	struct dm_kobject_holder kobj_holder;
111*4cc96131SMike Snitzer 
112*4cc96131SMike Snitzer 	/* zero-length flush that will be cloned and submitted to targets */
113*4cc96131SMike Snitzer 	struct bio flush_bio;
114*4cc96131SMike Snitzer 
115*4cc96131SMike Snitzer 	struct dm_stats stats;
116*4cc96131SMike Snitzer 
117*4cc96131SMike Snitzer 	struct kthread_worker kworker;
118*4cc96131SMike Snitzer 	struct task_struct *kworker_task;
119*4cc96131SMike Snitzer 
120*4cc96131SMike Snitzer 	/* for request-based merge heuristic in dm_request_fn() */
121*4cc96131SMike Snitzer 	unsigned seq_rq_merge_deadline_usecs;
122*4cc96131SMike Snitzer 	int last_rq_rw;
123*4cc96131SMike Snitzer 	sector_t last_rq_pos;
124*4cc96131SMike Snitzer 	ktime_t last_rq_start_time;
125*4cc96131SMike Snitzer 
126*4cc96131SMike Snitzer 	/* for blk-mq request-based DM support */
127*4cc96131SMike Snitzer 	struct blk_mq_tag_set *tag_set;
128*4cc96131SMike Snitzer 	bool use_blk_mq:1;
129*4cc96131SMike Snitzer 	bool init_tio_pdu:1;
130*4cc96131SMike Snitzer };
131*4cc96131SMike Snitzer 
132*4cc96131SMike Snitzer void dm_init_md_queue(struct mapped_device *md);
133*4cc96131SMike Snitzer void dm_init_normal_md_queue(struct mapped_device *md);
134*4cc96131SMike Snitzer int md_in_flight(struct mapped_device *md);
135*4cc96131SMike Snitzer void disable_write_same(struct mapped_device *md);
136*4cc96131SMike Snitzer 
137*4cc96131SMike Snitzer static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
138*4cc96131SMike Snitzer {
139*4cc96131SMike Snitzer 	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
140*4cc96131SMike Snitzer }
141*4cc96131SMike Snitzer 
142*4cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
143*4cc96131SMike Snitzer 
144*4cc96131SMike Snitzer static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
145*4cc96131SMike Snitzer {
146*4cc96131SMike Snitzer 	return !maxlen || strlen(result) + 1 >= maxlen;
147*4cc96131SMike Snitzer }
148*4cc96131SMike Snitzer 
149*4cc96131SMike Snitzer #endif
150