xref: /openbmc/linux/drivers/md/dm-uevent.c (revision d42c521f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Device Mapper Uevent Support (dm-uevent)
4  *
5  * Copyright IBM Corporation, 2007
6  *	Author: Mike Anderson <andmike@linux.vnet.ibm.com>
7  */
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include <linux/kobject.h>
11 #include <linux/dm-ioctl.h>
12 #include <linux/export.h>
13 
14 #include "dm.h"
15 #include "dm-uevent.h"
16 
17 #define DM_MSG_PREFIX "uevent"
18 
19 static const struct {
20 	enum dm_uevent_type type;
21 	enum kobject_action action;
22 	char *name;
23 } _dm_uevent_type_names[] = {
24 	{DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"},
25 	{DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"},
26 };
27 
28 static struct kmem_cache *_dm_event_cache;
29 
30 struct dm_uevent {
31 	struct mapped_device *md;
32 	enum kobject_action action;
33 	struct kobj_uevent_env ku_env;
34 	struct list_head elist;
35 	char name[DM_NAME_LEN];
36 	char uuid[DM_UUID_LEN];
37 };
38 
39 static void dm_uevent_free(struct dm_uevent *event)
40 {
41 	kmem_cache_free(_dm_event_cache, event);
42 }
43 
44 static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md)
45 {
46 	struct dm_uevent *event;
47 
48 	event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC);
49 	if (!event)
50 		return NULL;
51 
52 	INIT_LIST_HEAD(&event->elist);
53 	event->md = md;
54 
55 	return event;
56 }
57 
58 static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
59 					      struct dm_target *ti,
60 					      enum kobject_action action,
61 					      const char *dm_action,
62 					      const char *path,
63 					      unsigned int nr_valid_paths)
64 {
65 	struct dm_uevent *event;
66 
67 	event = dm_uevent_alloc(md);
68 	if (!event) {
69 		DMERR("%s: dm_uevent_alloc() failed", __func__);
70 		goto err_nomem;
71 	}
72 
73 	event->action = action;
74 
75 	if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
76 		DMERR("%s: add_uevent_var() for DM_TARGET failed",
77 		      __func__);
78 		goto err_add;
79 	}
80 
81 	if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
82 		DMERR("%s: add_uevent_var() for DM_ACTION failed",
83 		      __func__);
84 		goto err_add;
85 	}
86 
87 	if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
88 			   dm_next_uevent_seq(md))) {
89 		DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
90 		      __func__);
91 		goto err_add;
92 	}
93 
94 	if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
95 		DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
96 		goto err_add;
97 	}
98 
99 	if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
100 			   nr_valid_paths)) {
101 		DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
102 		      __func__);
103 		goto err_add;
104 	}
105 
106 	return event;
107 
108 err_add:
109 	dm_uevent_free(event);
110 err_nomem:
111 	return ERR_PTR(-ENOMEM);
112 }
113 
114 /**
115  * dm_send_uevents - send uevents for given list
116  *
117  * @events:	list of events to send
118  * @kobj:	kobject generating event
119  *
120  */
121 void dm_send_uevents(struct list_head *events, struct kobject *kobj)
122 {
123 	int r;
124 	struct dm_uevent *event, *next;
125 
126 	list_for_each_entry_safe(event, next, events, elist) {
127 		list_del_init(&event->elist);
128 
129 		/*
130 		 * When a device is being removed this copy fails and we
131 		 * discard these unsent events.
132 		 */
133 		if (dm_copy_name_and_uuid(event->md, event->name,
134 					  event->uuid)) {
135 			DMINFO("%s: skipping sending uevent for lost device",
136 			       __func__);
137 			goto uevent_free;
138 		}
139 
140 		if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
141 			DMERR("%s: add_uevent_var() for DM_NAME failed",
142 			      __func__);
143 			goto uevent_free;
144 		}
145 
146 		if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
147 			DMERR("%s: add_uevent_var() for DM_UUID failed",
148 			      __func__);
149 			goto uevent_free;
150 		}
151 
152 		r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
153 		if (r)
154 			DMERR("%s: kobject_uevent_env failed", __func__);
155 uevent_free:
156 		dm_uevent_free(event);
157 	}
158 }
159 EXPORT_SYMBOL_GPL(dm_send_uevents);
160 
161 /**
162  * dm_path_uevent - called to create a new path event and queue it
163  *
164  * @event_type:	path event type enum
165  * @ti:			pointer to a dm_target
166  * @path:		string containing pathname
167  * @nr_valid_paths:	number of valid paths remaining
168  *
169  */
170 void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
171 		   const char *path, unsigned int nr_valid_paths)
172 {
173 	struct mapped_device *md = dm_table_get_md(ti->table);
174 	struct dm_uevent *event;
175 
176 	if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
177 		DMERR("%s: Invalid event_type %d", __func__, event_type);
178 		return;
179 	}
180 
181 	event = dm_build_path_uevent(md, ti,
182 				     _dm_uevent_type_names[event_type].action,
183 				     _dm_uevent_type_names[event_type].name,
184 				     path, nr_valid_paths);
185 	if (IS_ERR(event))
186 		return;
187 
188 	dm_uevent_add(md, &event->elist);
189 }
190 EXPORT_SYMBOL_GPL(dm_path_uevent);
191 
192 int dm_uevent_init(void)
193 {
194 	_dm_event_cache = KMEM_CACHE(dm_uevent, 0);
195 	if (!_dm_event_cache)
196 		return -ENOMEM;
197 
198 	DMINFO("version 1.0.3");
199 
200 	return 0;
201 }
202 
203 void dm_uevent_exit(void)
204 {
205 	kmem_cache_destroy(_dm_event_cache);
206 }
207