xref: /openbmc/linux/drivers/md/dm.h (revision 31b90347)
1 /*
2  * Internal header file for device mapper
3  *
4  * Copyright (C) 2001, 2002 Sistina Software
5  * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6  *
7  * This file is released under the LGPL.
8  */
9 
10 #ifndef DM_INTERNAL_H
11 #define DM_INTERNAL_H
12 
13 #include <linux/fs.h>
14 #include <linux/device-mapper.h>
15 #include <linux/list.h>
16 #include <linux/blkdev.h>
17 #include <linux/hdreg.h>
18 
19 #include "dm-stats.h"
20 
21 /*
22  * Suspend feature flags
23  */
24 #define DM_SUSPEND_LOCKFS_FLAG		(1 << 0)
25 #define DM_SUSPEND_NOFLUSH_FLAG		(1 << 1)
26 
27 /*
28  * Status feature flags
29  */
30 #define DM_STATUS_NOFLUSH_FLAG		(1 << 0)
31 
32 /*
33  * Type of table and mapped_device's mempool
34  */
35 #define DM_TYPE_NONE		0
36 #define DM_TYPE_BIO_BASED	1
37 #define DM_TYPE_REQUEST_BASED	2
38 
39 /*
40  * List of devices that a metadevice uses and should open/close.
41  */
42 struct dm_dev_internal {
43 	struct list_head list;
44 	atomic_t count;
45 	struct dm_dev dm_dev;
46 };
47 
48 struct dm_table;
49 struct dm_md_mempools;
50 
51 /*-----------------------------------------------------------------
52  * Internal table functions.
53  *---------------------------------------------------------------*/
54 void dm_table_destroy(struct dm_table *t);
55 void dm_table_event_callback(struct dm_table *t,
56 			     void (*fn)(void *), void *context);
57 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
58 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
59 bool dm_table_has_no_data_devices(struct dm_table *table);
60 int dm_calculate_queue_limits(struct dm_table *table,
61 			      struct queue_limits *limits);
62 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
63 			       struct queue_limits *limits);
64 struct list_head *dm_table_get_devices(struct dm_table *t);
65 void dm_table_presuspend_targets(struct dm_table *t);
66 void dm_table_postsuspend_targets(struct dm_table *t);
67 int dm_table_resume_targets(struct dm_table *t);
68 int dm_table_any_congested(struct dm_table *t, int bdi_bits);
69 int dm_table_any_busy_target(struct dm_table *t);
70 unsigned dm_table_get_type(struct dm_table *t);
71 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
72 bool dm_table_request_based(struct dm_table *t);
73 bool dm_table_supports_discards(struct dm_table *t);
74 int dm_table_alloc_md_mempools(struct dm_table *t);
75 void dm_table_free_md_mempools(struct dm_table *t);
76 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
77 
78 int dm_queue_merge_is_compulsory(struct request_queue *q);
79 
80 void dm_lock_md_type(struct mapped_device *md);
81 void dm_unlock_md_type(struct mapped_device *md);
82 void dm_set_md_type(struct mapped_device *md, unsigned type);
83 unsigned dm_get_md_type(struct mapped_device *md);
84 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
85 
86 int dm_setup_md_queue(struct mapped_device *md);
87 
88 /*
89  * To check the return value from dm_table_find_target().
90  */
91 #define dm_target_is_valid(t) ((t)->table)
92 
93 /*
94  * To check whether the target type is bio-based or not (request-based).
95  */
96 #define dm_target_bio_based(t) ((t)->type->map != NULL)
97 
98 /*
99  * To check whether the target type is request-based or not (bio-based).
100  */
101 #define dm_target_request_based(t) ((t)->type->map_rq != NULL)
102 
103 /*
104  * To check whether the target type is a hybrid (capable of being
105  * either request-based or bio-based).
106  */
107 #define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
108 
109 /*-----------------------------------------------------------------
110  * A registry of target types.
111  *---------------------------------------------------------------*/
112 int dm_target_init(void);
113 void dm_target_exit(void);
114 struct target_type *dm_get_target_type(const char *name);
115 void dm_put_target_type(struct target_type *tt);
116 int dm_target_iterate(void (*iter_func)(struct target_type *tt,
117 					void *param), void *param);
118 
119 int dm_split_args(int *argc, char ***argvp, char *input);
120 
121 /*
122  * Is this mapped_device being deleted?
123  */
124 int dm_deleting_md(struct mapped_device *md);
125 
126 /*
127  * Is this mapped_device suspended?
128  */
129 int dm_suspended_md(struct mapped_device *md);
130 
131 /*
132  * Test if the device is scheduled for deferred remove.
133  */
134 int dm_test_deferred_remove_flag(struct mapped_device *md);
135 
136 /*
137  * Try to remove devices marked for deferred removal.
138  */
139 void dm_deferred_remove(void);
140 
141 /*
142  * The device-mapper can be driven through one of two interfaces;
143  * ioctl or filesystem, depending which patch you have applied.
144  */
145 int dm_interface_init(void);
146 void dm_interface_exit(void);
147 
148 /*
149  * sysfs interface
150  */
151 int dm_sysfs_init(struct mapped_device *md);
152 void dm_sysfs_exit(struct mapped_device *md);
153 struct kobject *dm_kobject(struct mapped_device *md);
154 struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
155 
156 /*
157  * Targets for linear and striped mappings
158  */
159 int dm_linear_init(void);
160 void dm_linear_exit(void);
161 
162 int dm_stripe_init(void);
163 void dm_stripe_exit(void);
164 
165 /*
166  * mapped_device operations
167  */
168 void dm_destroy(struct mapped_device *md);
169 void dm_destroy_immediate(struct mapped_device *md);
170 int dm_open_count(struct mapped_device *md);
171 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
172 int dm_cancel_deferred_remove(struct mapped_device *md);
173 int dm_request_based(struct mapped_device *md);
174 sector_t dm_get_size(struct mapped_device *md);
175 struct dm_stats *dm_get_stats(struct mapped_device *md);
176 
177 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
178 		      unsigned cookie);
179 
180 void dm_internal_suspend(struct mapped_device *md);
181 void dm_internal_resume(struct mapped_device *md);
182 
183 int dm_io_init(void);
184 void dm_io_exit(void);
185 
186 int dm_kcopyd_init(void);
187 void dm_kcopyd_exit(void);
188 
189 /*
190  * Mempool operations
191  */
192 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
193 void dm_free_md_mempools(struct dm_md_mempools *pools);
194 
195 /*
196  * Helpers that are used by DM core
197  */
198 unsigned dm_get_reserved_bio_based_ios(void);
199 unsigned dm_get_reserved_rq_based_ios(void);
200 
201 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
202 {
203 	return !maxlen || strlen(result) + 1 >= maxlen;
204 }
205 
206 #endif
207