xref: /openbmc/linux/net/mctp/device.c (revision 6f6249a599e52e1a5f0b632f8edff733cfa76450)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Management Component Transport Protocol (MCTP) - device implementation.
4  *
5  * Copyright (c) 2021 Code Construct
6  * Copyright (c) 2021 Google
7  */
8 
9 #include <linux/if_arp.h>
10 #include <linux/if_link.h>
11 #include <linux/mctp.h>
12 #include <linux/netdevice.h>
13 #include <linux/rcupdate.h>
14 #include <linux/rtnetlink.h>
15 
16 #include <net/addrconf.h>
17 #include <net/netlink.h>
18 #include <net/mctp.h>
19 #include <net/mctpdevice.h>
20 #include <net/sock.h>
21 
22 struct mctp_dump_cb {
23 	unsigned long ifindex;
24 	size_t a_idx;
25 };
26 
27 /* unlocked: caller must hold rcu_read_lock.
28  * Returned mctp_dev has its refcount incremented, or NULL if unset.
29  */
__mctp_dev_get(const struct net_device * dev)30 struct mctp_dev *__mctp_dev_get(const struct net_device *dev)
31 {
32 	struct mctp_dev *mdev = rcu_dereference(dev->mctp_ptr);
33 
34 	/* RCU guarantees that any mdev is still live.
35 	 * Zero refcount implies a pending free, return NULL.
36 	 */
37 	if (mdev)
38 		if (!refcount_inc_not_zero(&mdev->refs))
39 			return NULL;
40 	return mdev;
41 }
42 
43 /* Returned mctp_dev does not have refcount incremented. The returned pointer
44  * remains live while rtnl_lock is held, as that prevents mctp_unregister()
45  */
mctp_dev_get_rtnl(const struct net_device * dev)46 struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev)
47 {
48 	return rtnl_dereference(dev->mctp_ptr);
49 }
50 
mctp_addrinfo_size(void)51 static int mctp_addrinfo_size(void)
52 {
53 	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
54 		+ nla_total_size(1) // IFA_LOCAL
55 		+ nla_total_size(1) // IFA_ADDRESS
56 		;
57 }
58 
59 /* flag should be NLM_F_MULTI for dump calls */
mctp_fill_addrinfo(struct sk_buff * skb,struct mctp_dev * mdev,mctp_eid_t eid,int msg_type,u32 portid,u32 seq,int flag)60 static int mctp_fill_addrinfo(struct sk_buff *skb,
61 			      struct mctp_dev *mdev, mctp_eid_t eid,
62 			      int msg_type, u32 portid, u32 seq, int flag)
63 {
64 	struct ifaddrmsg *hdr;
65 	struct nlmsghdr *nlh;
66 
67 	nlh = nlmsg_put(skb, portid, seq,
68 			msg_type, sizeof(*hdr), flag);
69 	if (!nlh)
70 		return -EMSGSIZE;
71 
72 	hdr = nlmsg_data(nlh);
73 	hdr->ifa_family = AF_MCTP;
74 	hdr->ifa_prefixlen = 0;
75 	hdr->ifa_flags = 0;
76 	hdr->ifa_scope = 0;
77 	hdr->ifa_index = mdev->dev->ifindex;
78 
79 	if (nla_put_u8(skb, IFA_LOCAL, eid))
80 		goto cancel;
81 
82 	if (nla_put_u8(skb, IFA_ADDRESS, eid))
83 		goto cancel;
84 
85 	nlmsg_end(skb, nlh);
86 
87 	return 0;
88 
89 cancel:
90 	nlmsg_cancel(skb, nlh);
91 	return -EMSGSIZE;
92 }
93 
mctp_dump_dev_addrinfo(struct mctp_dev * mdev,struct sk_buff * skb,struct netlink_callback * cb)94 static int mctp_dump_dev_addrinfo(struct mctp_dev *mdev, struct sk_buff *skb,
95 				  struct netlink_callback *cb)
96 {
97 	struct mctp_dump_cb *mcb = (void *)cb->ctx;
98 	u32 portid, seq;
99 	int rc = 0;
100 
101 	portid = NETLINK_CB(cb->skb).portid;
102 	seq = cb->nlh->nlmsg_seq;
103 	for (; mcb->a_idx < mdev->num_addrs; mcb->a_idx++) {
104 		rc = mctp_fill_addrinfo(skb, mdev, mdev->addrs[mcb->a_idx],
105 					RTM_NEWADDR, portid, seq, NLM_F_MULTI);
106 		if (rc < 0)
107 			break;
108 	}
109 
110 	return rc;
111 }
112 
mctp_dump_addrinfo(struct sk_buff * skb,struct netlink_callback * cb)113 static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb)
114 {
115 	struct mctp_dump_cb *mcb = (void *)cb->ctx;
116 	struct net *net = sock_net(skb->sk);
117 	struct net_device *dev;
118 	struct ifaddrmsg *hdr;
119 	struct mctp_dev *mdev;
120 	int ifindex = 0, rc;
121 
122 	/* Filter by ifindex if a header is provided */
123 	if (cb->nlh->nlmsg_len >= nlmsg_msg_size(sizeof(*hdr))) {
124 		hdr = nlmsg_data(cb->nlh);
125 		ifindex = hdr->ifa_index;
126 	} else {
127 		if (cb->strict_check) {
128 			NL_SET_ERR_MSG(cb->extack, "mctp: Invalid header for addr dump request");
129 			return -EINVAL;
130 		}
131 	}
132 
133 	rcu_read_lock();
134 	for_each_netdev_dump(net, dev, mcb->ifindex) {
135 		if (ifindex && ifindex != dev->ifindex)
136 			continue;
137 		mdev = __mctp_dev_get(dev);
138 		if (!mdev)
139 			continue;
140 		rc = mctp_dump_dev_addrinfo(mdev, skb, cb);
141 		mctp_dev_put(mdev);
142 		if (rc < 0)
143 			break;
144 		mcb->a_idx = 0;
145 	}
146 	rcu_read_unlock();
147 
148 	return skb->len;
149 }
150 
mctp_addr_notify(struct mctp_dev * mdev,mctp_eid_t eid,int msg_type,struct sk_buff * req_skb,struct nlmsghdr * req_nlh)151 static void mctp_addr_notify(struct mctp_dev *mdev, mctp_eid_t eid, int msg_type,
152 			     struct sk_buff *req_skb, struct nlmsghdr *req_nlh)
153 {
154 	u32 portid = NETLINK_CB(req_skb).portid;
155 	struct net *net = dev_net(mdev->dev);
156 	struct sk_buff *skb;
157 	int rc = -ENOBUFS;
158 
159 	skb = nlmsg_new(mctp_addrinfo_size(), GFP_KERNEL);
160 	if (!skb)
161 		goto out;
162 
163 	rc = mctp_fill_addrinfo(skb, mdev, eid, msg_type,
164 				portid, req_nlh->nlmsg_seq, 0);
165 	if (rc < 0) {
166 		WARN_ON_ONCE(rc == -EMSGSIZE);
167 		goto out;
168 	}
169 
170 	rtnl_notify(skb, net, portid, RTNLGRP_MCTP_IFADDR, req_nlh, GFP_KERNEL);
171 	return;
172 out:
173 	kfree_skb(skb);
174 	rtnl_set_sk_err(net, RTNLGRP_MCTP_IFADDR, rc);
175 }
176 
177 static const struct nla_policy ifa_mctp_policy[IFA_MAX + 1] = {
178 	[IFA_ADDRESS]		= { .type = NLA_U8 },
179 	[IFA_LOCAL]		= { .type = NLA_U8 },
180 };
181 
mctp_rtm_newaddr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)182 static int mctp_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
183 			    struct netlink_ext_ack *extack)
184 {
185 	struct net *net = sock_net(skb->sk);
186 	struct nlattr *tb[IFA_MAX + 1];
187 	struct net_device *dev;
188 	struct mctp_addr *addr;
189 	struct mctp_dev *mdev;
190 	struct ifaddrmsg *ifm;
191 	unsigned long flags;
192 	u8 *tmp_addrs;
193 	int rc;
194 
195 	rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_mctp_policy,
196 			 extack);
197 	if (rc < 0)
198 		return rc;
199 
200 	ifm = nlmsg_data(nlh);
201 
202 	if (tb[IFA_LOCAL])
203 		addr = nla_data(tb[IFA_LOCAL]);
204 	else if (tb[IFA_ADDRESS])
205 		addr = nla_data(tb[IFA_ADDRESS]);
206 	else
207 		return -EINVAL;
208 
209 	/* find device */
210 	dev = __dev_get_by_index(net, ifm->ifa_index);
211 	if (!dev)
212 		return -ENODEV;
213 
214 	mdev = mctp_dev_get_rtnl(dev);
215 	if (!mdev)
216 		return -ENODEV;
217 
218 	if (!mctp_address_unicast(addr->s_addr))
219 		return -EINVAL;
220 
221 	/* Prevent duplicates. Under RTNL so don't need to lock for reading */
222 	if (memchr(mdev->addrs, addr->s_addr, mdev->num_addrs))
223 		return -EEXIST;
224 
225 	tmp_addrs = kmalloc(mdev->num_addrs + 1, GFP_KERNEL);
226 	if (!tmp_addrs)
227 		return -ENOMEM;
228 	memcpy(tmp_addrs, mdev->addrs, mdev->num_addrs);
229 	tmp_addrs[mdev->num_addrs] = addr->s_addr;
230 
231 	/* Lock to write */
232 	spin_lock_irqsave(&mdev->addrs_lock, flags);
233 	mdev->num_addrs++;
234 	swap(mdev->addrs, tmp_addrs);
235 	spin_unlock_irqrestore(&mdev->addrs_lock, flags);
236 
237 	kfree(tmp_addrs);
238 
239 	mctp_addr_notify(mdev, addr->s_addr, RTM_NEWADDR, skb, nlh);
240 	mctp_route_add_local(mdev, addr->s_addr);
241 
242 	return 0;
243 }
244 
mctp_rtm_deladdr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)245 static int mctp_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
246 			    struct netlink_ext_ack *extack)
247 {
248 	struct net *net = sock_net(skb->sk);
249 	struct nlattr *tb[IFA_MAX + 1];
250 	struct net_device *dev;
251 	struct mctp_addr *addr;
252 	struct mctp_dev *mdev;
253 	struct ifaddrmsg *ifm;
254 	unsigned long flags;
255 	u8 *pos;
256 	int rc;
257 
258 	rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_mctp_policy,
259 			 extack);
260 	if (rc < 0)
261 		return rc;
262 
263 	ifm = nlmsg_data(nlh);
264 
265 	if (tb[IFA_LOCAL])
266 		addr = nla_data(tb[IFA_LOCAL]);
267 	else if (tb[IFA_ADDRESS])
268 		addr = nla_data(tb[IFA_ADDRESS]);
269 	else
270 		return -EINVAL;
271 
272 	/* find device */
273 	dev = __dev_get_by_index(net, ifm->ifa_index);
274 	if (!dev)
275 		return -ENODEV;
276 
277 	mdev = mctp_dev_get_rtnl(dev);
278 	if (!mdev)
279 		return -ENODEV;
280 
281 	pos = memchr(mdev->addrs, addr->s_addr, mdev->num_addrs);
282 	if (!pos)
283 		return -ENOENT;
284 
285 	rc = mctp_route_remove_local(mdev, addr->s_addr);
286 	// we can ignore -ENOENT in the case a route was already removed
287 	if (rc < 0 && rc != -ENOENT)
288 		return rc;
289 
290 	spin_lock_irqsave(&mdev->addrs_lock, flags);
291 	memmove(pos, pos + 1, mdev->num_addrs - 1 - (pos - mdev->addrs));
292 	mdev->num_addrs--;
293 	spin_unlock_irqrestore(&mdev->addrs_lock, flags);
294 
295 	mctp_addr_notify(mdev, addr->s_addr, RTM_DELADDR, skb, nlh);
296 
297 	return 0;
298 }
299 
mctp_dev_hold(struct mctp_dev * mdev)300 void mctp_dev_hold(struct mctp_dev *mdev)
301 {
302 	refcount_inc(&mdev->refs);
303 }
304 
mctp_dev_put(struct mctp_dev * mdev)305 void mctp_dev_put(struct mctp_dev *mdev)
306 {
307 	if (mdev && refcount_dec_and_test(&mdev->refs)) {
308 		kfree(mdev->addrs);
309 		dev_put(mdev->dev);
310 		kfree_rcu(mdev, rcu);
311 	}
312 }
313 
mctp_dev_release_key(struct mctp_dev * dev,struct mctp_sk_key * key)314 void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key)
315 	__must_hold(&key->lock)
316 {
317 	if (!dev)
318 		return;
319 	if (dev->ops && dev->ops->release_flow)
320 		dev->ops->release_flow(dev, key);
321 	key->dev = NULL;
322 	mctp_dev_put(dev);
323 }
324 
mctp_dev_set_key(struct mctp_dev * dev,struct mctp_sk_key * key)325 void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key)
326 	__must_hold(&key->lock)
327 {
328 	mctp_dev_hold(dev);
329 	key->dev = dev;
330 }
331 
mctp_add_dev(struct net_device * dev)332 static struct mctp_dev *mctp_add_dev(struct net_device *dev)
333 {
334 	struct mctp_dev *mdev;
335 
336 	ASSERT_RTNL();
337 
338 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
339 	if (!mdev)
340 		return ERR_PTR(-ENOMEM);
341 
342 	spin_lock_init(&mdev->addrs_lock);
343 
344 	mdev->net = mctp_default_net(dev_net(dev));
345 
346 	/* associate to net_device */
347 	refcount_set(&mdev->refs, 1);
348 	rcu_assign_pointer(dev->mctp_ptr, mdev);
349 
350 	dev_hold(dev);
351 	mdev->dev = dev;
352 
353 	return mdev;
354 }
355 
mctp_fill_link_af(struct sk_buff * skb,const struct net_device * dev,u32 ext_filter_mask)356 static int mctp_fill_link_af(struct sk_buff *skb,
357 			     const struct net_device *dev, u32 ext_filter_mask)
358 {
359 	struct mctp_dev *mdev;
360 
361 	mdev = mctp_dev_get_rtnl(dev);
362 	if (!mdev)
363 		return -ENODATA;
364 	if (nla_put_u32(skb, IFLA_MCTP_NET, mdev->net))
365 		return -EMSGSIZE;
366 	if (nla_put_u8(skb, IFLA_MCTP_PHYS_BINDING, mdev->binding))
367 		return -EMSGSIZE;
368 	return 0;
369 }
370 
mctp_get_link_af_size(const struct net_device * dev,u32 ext_filter_mask)371 static size_t mctp_get_link_af_size(const struct net_device *dev,
372 				    u32 ext_filter_mask)
373 {
374 	struct mctp_dev *mdev;
375 	unsigned int ret;
376 
377 	/* caller holds RCU */
378 	mdev = __mctp_dev_get(dev);
379 	if (!mdev)
380 		return 0;
381 	ret = nla_total_size(4); /* IFLA_MCTP_NET */
382 	ret += nla_total_size(1); /* IFLA_MCTP_PHYS_BINDING */
383 	mctp_dev_put(mdev);
384 	return ret;
385 }
386 
387 static const struct nla_policy ifla_af_mctp_policy[IFLA_MCTP_MAX + 1] = {
388 	[IFLA_MCTP_NET]		= { .type = NLA_U32 },
389 };
390 
mctp_set_link_af(struct net_device * dev,const struct nlattr * attr,struct netlink_ext_ack * extack)391 static int mctp_set_link_af(struct net_device *dev, const struct nlattr *attr,
392 			    struct netlink_ext_ack *extack)
393 {
394 	struct nlattr *tb[IFLA_MCTP_MAX + 1];
395 	struct mctp_dev *mdev;
396 	int rc;
397 
398 	rc = nla_parse_nested(tb, IFLA_MCTP_MAX, attr, ifla_af_mctp_policy,
399 			      NULL);
400 	if (rc)
401 		return rc;
402 
403 	mdev = mctp_dev_get_rtnl(dev);
404 	if (!mdev)
405 		return 0;
406 
407 	if (tb[IFLA_MCTP_NET])
408 		WRITE_ONCE(mdev->net, nla_get_u32(tb[IFLA_MCTP_NET]));
409 
410 	return 0;
411 }
412 
413 /* Matches netdev types that should have MCTP handling */
mctp_known(struct net_device * dev)414 static bool mctp_known(struct net_device *dev)
415 {
416 	/* only register specific types (inc. NONE for TUN devices) */
417 	return dev->type == ARPHRD_MCTP ||
418 		   dev->type == ARPHRD_LOOPBACK ||
419 		   dev->type == ARPHRD_NONE;
420 }
421 
mctp_unregister(struct net_device * dev)422 static void mctp_unregister(struct net_device *dev)
423 {
424 	struct mctp_dev *mdev;
425 
426 	mdev = mctp_dev_get_rtnl(dev);
427 	if (!mdev)
428 		return;
429 
430 	RCU_INIT_POINTER(mdev->dev->mctp_ptr, NULL);
431 
432 	mctp_route_remove_dev(mdev);
433 	mctp_neigh_remove_dev(mdev);
434 
435 	mctp_dev_put(mdev);
436 }
437 
mctp_register(struct net_device * dev)438 static int mctp_register(struct net_device *dev)
439 {
440 	struct mctp_dev *mdev;
441 
442 	/* Already registered? */
443 	if (rtnl_dereference(dev->mctp_ptr))
444 		return 0;
445 
446 	/* only register specific types */
447 	if (!mctp_known(dev))
448 		return 0;
449 
450 	mdev = mctp_add_dev(dev);
451 	if (IS_ERR(mdev))
452 		return PTR_ERR(mdev);
453 
454 	return 0;
455 }
456 
mctp_dev_notify(struct notifier_block * this,unsigned long event,void * ptr)457 static int mctp_dev_notify(struct notifier_block *this, unsigned long event,
458 			   void *ptr)
459 {
460 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
461 	int rc;
462 
463 	switch (event) {
464 	case NETDEV_REGISTER:
465 		rc = mctp_register(dev);
466 		if (rc)
467 			return notifier_from_errno(rc);
468 		break;
469 	case NETDEV_UNREGISTER:
470 		mctp_unregister(dev);
471 		break;
472 	}
473 
474 	return NOTIFY_OK;
475 }
476 
mctp_register_netdevice(struct net_device * dev,const struct mctp_netdev_ops * ops,enum mctp_phys_binding binding)477 static int mctp_register_netdevice(struct net_device *dev,
478 				   const struct mctp_netdev_ops *ops,
479 				   enum mctp_phys_binding binding)
480 {
481 	struct mctp_dev *mdev;
482 
483 	mdev = mctp_add_dev(dev);
484 	if (IS_ERR(mdev))
485 		return PTR_ERR(mdev);
486 
487 	mdev->ops = ops;
488 	mdev->binding = binding;
489 
490 	return register_netdevice(dev);
491 }
492 
mctp_register_netdev(struct net_device * dev,const struct mctp_netdev_ops * ops,enum mctp_phys_binding binding)493 int mctp_register_netdev(struct net_device *dev,
494 			 const struct mctp_netdev_ops *ops,
495 			 enum mctp_phys_binding binding)
496 {
497 	int rc;
498 
499 	rtnl_lock();
500 	rc = mctp_register_netdevice(dev, ops, binding);
501 	rtnl_unlock();
502 
503 	return rc;
504 }
505 EXPORT_SYMBOL_GPL(mctp_register_netdev);
506 
mctp_unregister_netdev(struct net_device * dev)507 void mctp_unregister_netdev(struct net_device *dev)
508 {
509 	unregister_netdev(dev);
510 }
511 EXPORT_SYMBOL_GPL(mctp_unregister_netdev);
512 
513 static struct rtnl_af_ops mctp_af_ops = {
514 	.family = AF_MCTP,
515 	.fill_link_af = mctp_fill_link_af,
516 	.get_link_af_size = mctp_get_link_af_size,
517 	.set_link_af = mctp_set_link_af,
518 };
519 
520 static struct notifier_block mctp_dev_nb = {
521 	.notifier_call = mctp_dev_notify,
522 	.priority = ADDRCONF_NOTIFY_PRIORITY,
523 };
524 
525 static const struct rtnl_msg_handler mctp_device_rtnl_msg_handlers[] = {
526 	{.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_NEWADDR,
527 	 .doit = mctp_rtm_newaddr},
528 	{.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_DELADDR,
529 	 .doit = mctp_rtm_deladdr},
530 	{.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_GETADDR,
531 	 .dumpit = mctp_dump_addrinfo},
532 };
533 
mctp_device_init(void)534 int __init mctp_device_init(void)
535 {
536 	int err;
537 
538 	register_netdevice_notifier(&mctp_dev_nb);
539 	rtnl_af_register(&mctp_af_ops);
540 
541 	err = rtnl_register_many(mctp_device_rtnl_msg_handlers);
542 	if (err) {
543 		rtnl_af_unregister(&mctp_af_ops);
544 		unregister_netdevice_notifier(&mctp_dev_nb);
545 	}
546 
547 	return err;
548 }
549 
mctp_device_exit(void)550 void __exit mctp_device_exit(void)
551 {
552 	rtnl_unregister_many(mctp_device_rtnl_msg_handlers);
553 	rtnl_af_unregister(&mctp_af_ops);
554 	unregister_netdevice_notifier(&mctp_dev_nb);
555 }
556