xref: /openbmc/linux/drivers/md/dm-target.c (revision b362c733)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2001 Sistina Software (UK) Limited
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-core.h"
9 
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kmod.h>
13 #include <linux/bio.h>
14 #include <linux/dax.h>
15 
16 #define DM_MSG_PREFIX "target"
17 
18 static LIST_HEAD(_targets);
19 static DECLARE_RWSEM(_lock);
20 
__find_target_type(const char * name)21 static inline struct target_type *__find_target_type(const char *name)
22 {
23 	struct target_type *tt;
24 
25 	list_for_each_entry(tt, &_targets, list)
26 		if (!strcmp(name, tt->name))
27 			return tt;
28 
29 	return NULL;
30 }
31 
get_target_type(const char * name)32 static struct target_type *get_target_type(const char *name)
33 {
34 	struct target_type *tt;
35 
36 	down_read(&_lock);
37 
38 	tt = __find_target_type(name);
39 	if (tt && !try_module_get(tt->module))
40 		tt = NULL;
41 
42 	up_read(&_lock);
43 	return tt;
44 }
45 
load_module(const char * name)46 static void load_module(const char *name)
47 {
48 	request_module("dm-%s", name);
49 }
50 
dm_get_target_type(const char * name)51 struct target_type *dm_get_target_type(const char *name)
52 {
53 	struct target_type *tt = get_target_type(name);
54 
55 	if (!tt) {
56 		load_module(name);
57 		tt = get_target_type(name);
58 	}
59 
60 	return tt;
61 }
62 
dm_put_target_type(struct target_type * tt)63 void dm_put_target_type(struct target_type *tt)
64 {
65 	down_read(&_lock);
66 	module_put(tt->module);
67 	up_read(&_lock);
68 }
69 
dm_target_iterate(void (* iter_func)(struct target_type * tt,void * param),void * param)70 int dm_target_iterate(void (*iter_func)(struct target_type *tt,
71 					void *param), void *param)
72 {
73 	struct target_type *tt;
74 
75 	down_read(&_lock);
76 	list_for_each_entry(tt, &_targets, list)
77 		iter_func(tt, param);
78 	up_read(&_lock);
79 
80 	return 0;
81 }
82 
dm_register_target(struct target_type * tt)83 int dm_register_target(struct target_type *tt)
84 {
85 	int rv = 0;
86 
87 	down_write(&_lock);
88 	if (__find_target_type(tt->name)) {
89 		DMERR("%s: '%s' target already registered",
90 		      __func__, tt->name);
91 		rv = -EEXIST;
92 	} else {
93 		list_add(&tt->list, &_targets);
94 	}
95 	up_write(&_lock);
96 
97 	return rv;
98 }
99 EXPORT_SYMBOL(dm_register_target);
100 
dm_unregister_target(struct target_type * tt)101 void dm_unregister_target(struct target_type *tt)
102 {
103 	down_write(&_lock);
104 	if (!__find_target_type(tt->name)) {
105 		DMCRIT("Unregistering unrecognised target: %s", tt->name);
106 		BUG();
107 	}
108 
109 	list_del(&tt->list);
110 
111 	up_write(&_lock);
112 }
113 EXPORT_SYMBOL(dm_unregister_target);
114 
115 /*
116  * io-err: always fails an io, useful for bringing
117  * up LVs that have holes in them.
118  */
io_err_ctr(struct dm_target * tt,unsigned int argc,char ** args)119 static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
120 {
121 	/*
122 	 * Return error for discards instead of -EOPNOTSUPP
123 	 */
124 	tt->num_discard_bios = 1;
125 	tt->discards_supported = true;
126 
127 	return 0;
128 }
129 
io_err_dtr(struct dm_target * tt)130 static void io_err_dtr(struct dm_target *tt)
131 {
132 	/* empty */
133 }
134 
io_err_map(struct dm_target * tt,struct bio * bio)135 static int io_err_map(struct dm_target *tt, struct bio *bio)
136 {
137 	return DM_MAPIO_KILL;
138 }
139 
io_err_clone_and_map_rq(struct dm_target * ti,struct request * rq,union map_info * map_context,struct request ** clone)140 static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
141 				   union map_info *map_context,
142 				   struct request **clone)
143 {
144 	return DM_MAPIO_KILL;
145 }
146 
io_err_release_clone_rq(struct request * clone,union map_info * map_context)147 static void io_err_release_clone_rq(struct request *clone,
148 				    union map_info *map_context)
149 {
150 }
151 
io_err_io_hints(struct dm_target * ti,struct queue_limits * limits)152 static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
153 {
154 	limits->max_discard_sectors = UINT_MAX;
155 	limits->max_hw_discard_sectors = UINT_MAX;
156 	limits->discard_granularity = 512;
157 }
158 
io_err_dax_direct_access(struct dm_target * ti,pgoff_t pgoff,long nr_pages,enum dax_access_mode mode,void ** kaddr,pfn_t * pfn)159 static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
160 		long nr_pages, enum dax_access_mode mode, void **kaddr,
161 		pfn_t *pfn)
162 {
163 	return -EIO;
164 }
165 
166 static struct target_type error_target = {
167 	.name = "error",
168 	.version = {1, 6, 0},
169 	.features = DM_TARGET_WILDCARD,
170 	.ctr  = io_err_ctr,
171 	.dtr  = io_err_dtr,
172 	.map  = io_err_map,
173 	.clone_and_map_rq = io_err_clone_and_map_rq,
174 	.release_clone_rq = io_err_release_clone_rq,
175 	.io_hints = io_err_io_hints,
176 	.direct_access = io_err_dax_direct_access,
177 };
178 
dm_target_init(void)179 int __init dm_target_init(void)
180 {
181 	return dm_register_target(&error_target);
182 }
183 
dm_target_exit(void)184 void dm_target_exit(void)
185 {
186 	dm_unregister_target(&error_target);
187 }
188