1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 */
6
7 #include <net/genetlink.h>
8 #define CREATE_TRACE_POINTS
9 #include <trace/events/devlink.h>
10
11 #include "devl_internal.h"
12
13 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
14 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
15 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report);
16
17 DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
18
devlink_priv(struct devlink * devlink)19 void *devlink_priv(struct devlink *devlink)
20 {
21 return &devlink->priv;
22 }
23 EXPORT_SYMBOL_GPL(devlink_priv);
24
priv_to_devlink(void * priv)25 struct devlink *priv_to_devlink(void *priv)
26 {
27 return container_of(priv, struct devlink, priv);
28 }
29 EXPORT_SYMBOL_GPL(priv_to_devlink);
30
devlink_to_dev(const struct devlink * devlink)31 struct device *devlink_to_dev(const struct devlink *devlink)
32 {
33 return devlink->dev;
34 }
35 EXPORT_SYMBOL_GPL(devlink_to_dev);
36
devlink_net(const struct devlink * devlink)37 struct net *devlink_net(const struct devlink *devlink)
38 {
39 return read_pnet(&devlink->_net);
40 }
41 EXPORT_SYMBOL_GPL(devlink_net);
42
devl_assert_locked(struct devlink * devlink)43 void devl_assert_locked(struct devlink *devlink)
44 {
45 lockdep_assert_held(&devlink->lock);
46 }
47 EXPORT_SYMBOL_GPL(devl_assert_locked);
48
49 #ifdef CONFIG_LOCKDEP
50 /* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
devl_lock_is_held(struct devlink * devlink)51 bool devl_lock_is_held(struct devlink *devlink)
52 {
53 return lockdep_is_held(&devlink->lock);
54 }
55 EXPORT_SYMBOL_GPL(devl_lock_is_held);
56 #endif
57
devl_lock(struct devlink * devlink)58 void devl_lock(struct devlink *devlink)
59 {
60 mutex_lock(&devlink->lock);
61 }
62 EXPORT_SYMBOL_GPL(devl_lock);
63
devl_trylock(struct devlink * devlink)64 int devl_trylock(struct devlink *devlink)
65 {
66 return mutex_trylock(&devlink->lock);
67 }
68 EXPORT_SYMBOL_GPL(devl_trylock);
69
devl_unlock(struct devlink * devlink)70 void devl_unlock(struct devlink *devlink)
71 {
72 mutex_unlock(&devlink->lock);
73 }
74 EXPORT_SYMBOL_GPL(devl_unlock);
75
76 /**
77 * devlink_try_get() - try to obtain a reference on a devlink instance
78 * @devlink: instance to reference
79 *
80 * Obtain a reference on a devlink instance. A reference on a devlink instance
81 * only implies that it's safe to take the instance lock. It does not imply
82 * that the instance is registered, use devl_is_registered() after taking
83 * the instance lock to check registration status.
84 */
devlink_try_get(struct devlink * devlink)85 struct devlink *__must_check devlink_try_get(struct devlink *devlink)
86 {
87 if (refcount_inc_not_zero(&devlink->refcount))
88 return devlink;
89 return NULL;
90 }
91
devlink_release(struct work_struct * work)92 static void devlink_release(struct work_struct *work)
93 {
94 struct devlink *devlink;
95
96 devlink = container_of(to_rcu_work(work), struct devlink, rwork);
97
98 mutex_destroy(&devlink->lock);
99 lockdep_unregister_key(&devlink->lock_key);
100 kfree(devlink);
101 }
102
devlink_put(struct devlink * devlink)103 void devlink_put(struct devlink *devlink)
104 {
105 if (refcount_dec_and_test(&devlink->refcount))
106 queue_rcu_work(system_wq, &devlink->rwork);
107 }
108
devlinks_xa_find_get(struct net * net,unsigned long * indexp)109 struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp)
110 {
111 struct devlink *devlink = NULL;
112
113 rcu_read_lock();
114 retry:
115 devlink = xa_find(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
116 if (!devlink)
117 goto unlock;
118
119 if (!devlink_try_get(devlink))
120 goto next;
121 if (!net_eq(devlink_net(devlink), net)) {
122 devlink_put(devlink);
123 goto next;
124 }
125 unlock:
126 rcu_read_unlock();
127 return devlink;
128
129 next:
130 (*indexp)++;
131 goto retry;
132 }
133
134 /**
135 * devl_register - Register devlink instance
136 * @devlink: devlink
137 */
devl_register(struct devlink * devlink)138 int devl_register(struct devlink *devlink)
139 {
140 ASSERT_DEVLINK_NOT_REGISTERED(devlink);
141 devl_assert_locked(devlink);
142
143 xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
144 devlink_notify_register(devlink);
145
146 return 0;
147 }
148 EXPORT_SYMBOL_GPL(devl_register);
149
devlink_register(struct devlink * devlink)150 void devlink_register(struct devlink *devlink)
151 {
152 devl_lock(devlink);
153 devl_register(devlink);
154 devl_unlock(devlink);
155 }
156 EXPORT_SYMBOL_GPL(devlink_register);
157
158 /**
159 * devl_unregister - Unregister devlink instance
160 * @devlink: devlink
161 */
devl_unregister(struct devlink * devlink)162 void devl_unregister(struct devlink *devlink)
163 {
164 ASSERT_DEVLINK_REGISTERED(devlink);
165 devl_assert_locked(devlink);
166
167 devlink_notify_unregister(devlink);
168 xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
169 }
170 EXPORT_SYMBOL_GPL(devl_unregister);
171
devlink_unregister(struct devlink * devlink)172 void devlink_unregister(struct devlink *devlink)
173 {
174 devl_lock(devlink);
175 devl_unregister(devlink);
176 devl_unlock(devlink);
177 }
178 EXPORT_SYMBOL_GPL(devlink_unregister);
179
180 /**
181 * devlink_alloc_ns - Allocate new devlink instance resources
182 * in specific namespace
183 *
184 * @ops: ops
185 * @priv_size: size of user private data
186 * @net: net namespace
187 * @dev: parent device
188 *
189 * Allocate new devlink instance resources, including devlink index
190 * and name.
191 */
devlink_alloc_ns(const struct devlink_ops * ops,size_t priv_size,struct net * net,struct device * dev)192 struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
193 size_t priv_size, struct net *net,
194 struct device *dev)
195 {
196 struct devlink *devlink;
197 static u32 last_id;
198 int ret;
199
200 WARN_ON(!ops || !dev);
201 if (!devlink_reload_actions_valid(ops))
202 return NULL;
203
204 devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
205 if (!devlink)
206 return NULL;
207
208 ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
209 &last_id, GFP_KERNEL);
210 if (ret < 0)
211 goto err_xa_alloc;
212
213 devlink->dev = dev;
214 devlink->ops = ops;
215 xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
216 xa_init_flags(&devlink->params, XA_FLAGS_ALLOC);
217 xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
218 write_pnet(&devlink->_net, net);
219 INIT_LIST_HEAD(&devlink->rate_list);
220 INIT_LIST_HEAD(&devlink->linecard_list);
221 INIT_LIST_HEAD(&devlink->sb_list);
222 INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
223 INIT_LIST_HEAD(&devlink->resource_list);
224 INIT_LIST_HEAD(&devlink->region_list);
225 INIT_LIST_HEAD(&devlink->reporter_list);
226 INIT_LIST_HEAD(&devlink->trap_list);
227 INIT_LIST_HEAD(&devlink->trap_group_list);
228 INIT_LIST_HEAD(&devlink->trap_policer_list);
229 INIT_RCU_WORK(&devlink->rwork, devlink_release);
230 lockdep_register_key(&devlink->lock_key);
231 mutex_init(&devlink->lock);
232 lockdep_set_class(&devlink->lock, &devlink->lock_key);
233 refcount_set(&devlink->refcount, 1);
234
235 return devlink;
236
237 err_xa_alloc:
238 kfree(devlink);
239 return NULL;
240 }
241 EXPORT_SYMBOL_GPL(devlink_alloc_ns);
242
243 /**
244 * devlink_free - Free devlink instance resources
245 *
246 * @devlink: devlink
247 */
devlink_free(struct devlink * devlink)248 void devlink_free(struct devlink *devlink)
249 {
250 ASSERT_DEVLINK_NOT_REGISTERED(devlink);
251
252 WARN_ON(!list_empty(&devlink->trap_policer_list));
253 WARN_ON(!list_empty(&devlink->trap_group_list));
254 WARN_ON(!list_empty(&devlink->trap_list));
255 WARN_ON(!list_empty(&devlink->reporter_list));
256 WARN_ON(!list_empty(&devlink->region_list));
257 WARN_ON(!list_empty(&devlink->resource_list));
258 WARN_ON(!list_empty(&devlink->dpipe_table_list));
259 WARN_ON(!list_empty(&devlink->sb_list));
260 WARN_ON(!list_empty(&devlink->rate_list));
261 WARN_ON(!list_empty(&devlink->linecard_list));
262 WARN_ON(!xa_empty(&devlink->ports));
263
264 xa_destroy(&devlink->snapshot_ids);
265 xa_destroy(&devlink->params);
266 xa_destroy(&devlink->ports);
267
268 xa_erase(&devlinks, devlink->index);
269
270 devlink_put(devlink);
271 }
272 EXPORT_SYMBOL_GPL(devlink_free);
273
devlink_pernet_pre_exit(struct net * net)274 static void __net_exit devlink_pernet_pre_exit(struct net *net)
275 {
276 struct devlink *devlink;
277 u32 actions_performed;
278 unsigned long index;
279 int err;
280
281 /* In case network namespace is getting destroyed, reload
282 * all devlink instances from this namespace into init_net.
283 */
284 devlinks_xa_for_each_registered_get(net, index, devlink) {
285 devl_lock(devlink);
286 err = 0;
287 if (devl_is_registered(devlink))
288 err = devlink_reload(devlink, &init_net,
289 DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
290 DEVLINK_RELOAD_LIMIT_UNSPEC,
291 &actions_performed, NULL);
292 devl_unlock(devlink);
293 devlink_put(devlink);
294 if (err && err != -EOPNOTSUPP)
295 pr_warn("Failed to reload devlink instance into init_net\n");
296 }
297 }
298
299 static struct pernet_operations devlink_pernet_ops __net_initdata = {
300 .pre_exit = devlink_pernet_pre_exit,
301 };
302
303 static struct notifier_block devlink_port_netdevice_nb = {
304 .notifier_call = devlink_port_netdevice_event,
305 };
306
devlink_init(void)307 static int __init devlink_init(void)
308 {
309 int err;
310
311 err = register_pernet_subsys(&devlink_pernet_ops);
312 if (err)
313 goto out;
314 err = genl_register_family(&devlink_nl_family);
315 if (err)
316 goto out_unreg_pernet_subsys;
317 err = register_netdevice_notifier(&devlink_port_netdevice_nb);
318 if (!err)
319 return 0;
320
321 genl_unregister_family(&devlink_nl_family);
322
323 out_unreg_pernet_subsys:
324 unregister_pernet_subsys(&devlink_pernet_ops);
325 out:
326 WARN_ON(err);
327 return err;
328 }
329
330 subsys_initcall(devlink_init);
331