xref: /openbmc/linux/drivers/infiniband/core/addr.c (revision b7bf17f4)
1 /*
2  * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
3  * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4  * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/mutex.h>
37 #include <linux/inetdevice.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <net/arp.h>
41 #include <net/neighbour.h>
42 #include <net/route.h>
43 #include <net/netevent.h>
44 #include <net/ipv6_stubs.h>
45 #include <net/ip6_route.h>
46 #include <rdma/ib_addr.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/ib_sa.h>
49 #include <rdma/ib.h>
50 #include <rdma/rdma_netlink.h>
51 #include <net/netlink.h>
52 
53 #include "core_priv.h"
54 
55 struct addr_req {
56 	struct list_head list;
57 	struct sockaddr_storage src_addr;
58 	struct sockaddr_storage dst_addr;
59 	struct rdma_dev_addr *addr;
60 	void *context;
61 	void (*callback)(int status, struct sockaddr *src_addr,
62 			 struct rdma_dev_addr *addr, void *context);
63 	unsigned long timeout;
64 	struct delayed_work work;
65 	bool resolve_by_gid_attr;	/* Consider gid attr in resolve phase */
66 	int status;
67 	u32 seq;
68 };
69 
70 static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0);
71 
72 static DEFINE_SPINLOCK(lock);
73 static LIST_HEAD(req_list);
74 static struct workqueue_struct *addr_wq;
75 
76 static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
77 	[LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
78 		.len = sizeof(struct rdma_nla_ls_gid),
79 		.validation_type = NLA_VALIDATE_MIN,
80 		.min = sizeof(struct rdma_nla_ls_gid)},
81 };
82 
ib_nl_is_good_ip_resp(const struct nlmsghdr * nlh)83 static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
84 {
85 	struct nlattr *tb[LS_NLA_TYPE_MAX] = {};
86 	int ret;
87 
88 	if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
89 		return false;
90 
91 	ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
92 				   nlmsg_len(nlh), ib_nl_addr_policy, NULL);
93 	if (ret)
94 		return false;
95 
96 	return true;
97 }
98 
ib_nl_process_good_ip_rsep(const struct nlmsghdr * nlh)99 static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
100 {
101 	const struct nlattr *head, *curr;
102 	union ib_gid gid;
103 	struct addr_req *req;
104 	int len, rem;
105 	int found = 0;
106 
107 	head = (const struct nlattr *)nlmsg_data(nlh);
108 	len = nlmsg_len(nlh);
109 
110 	nla_for_each_attr(curr, head, len, rem) {
111 		if (curr->nla_type == LS_NLA_TYPE_DGID)
112 			memcpy(&gid, nla_data(curr), nla_len(curr));
113 	}
114 
115 	spin_lock_bh(&lock);
116 	list_for_each_entry(req, &req_list, list) {
117 		if (nlh->nlmsg_seq != req->seq)
118 			continue;
119 		/* We set the DGID part, the rest was set earlier */
120 		rdma_addr_set_dgid(req->addr, &gid);
121 		req->status = 0;
122 		found = 1;
123 		break;
124 	}
125 	spin_unlock_bh(&lock);
126 
127 	if (!found)
128 		pr_info("Couldn't find request waiting for DGID: %pI6\n",
129 			&gid);
130 }
131 
ib_nl_handle_ip_res_resp(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)132 int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
133 			     struct nlmsghdr *nlh,
134 			     struct netlink_ext_ack *extack)
135 {
136 	if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
137 	    !(NETLINK_CB(skb).sk))
138 		return -EPERM;
139 
140 	if (ib_nl_is_good_ip_resp(nlh))
141 		ib_nl_process_good_ip_rsep(nlh);
142 
143 	return 0;
144 }
145 
ib_nl_ip_send_msg(struct rdma_dev_addr * dev_addr,const void * daddr,u32 seq,u16 family)146 static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
147 			     const void *daddr,
148 			     u32 seq, u16 family)
149 {
150 	struct sk_buff *skb = NULL;
151 	struct nlmsghdr *nlh;
152 	struct rdma_ls_ip_resolve_header *header;
153 	void *data;
154 	size_t size;
155 	int attrtype;
156 	int len;
157 
158 	if (family == AF_INET) {
159 		size = sizeof(struct in_addr);
160 		attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4;
161 	} else {
162 		size = sizeof(struct in6_addr);
163 		attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6;
164 	}
165 
166 	len = nla_total_size(sizeof(size));
167 	len += NLMSG_ALIGN(sizeof(*header));
168 
169 	skb = nlmsg_new(len, GFP_KERNEL);
170 	if (!skb)
171 		return -ENOMEM;
172 
173 	data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS,
174 			    RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST);
175 	if (!data) {
176 		nlmsg_free(skb);
177 		return -ENODATA;
178 	}
179 
180 	/* Construct the family header first */
181 	header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
182 	header->ifindex = dev_addr->bound_dev_if;
183 	nla_put(skb, attrtype, size, daddr);
184 
185 	/* Repair the nlmsg header length */
186 	nlmsg_end(skb, nlh);
187 	rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
188 
189 	/* Make the request retry, so when we get the response from userspace
190 	 * we will have something.
191 	 */
192 	return -ENODATA;
193 }
194 
rdma_addr_size(const struct sockaddr * addr)195 int rdma_addr_size(const struct sockaddr *addr)
196 {
197 	switch (addr->sa_family) {
198 	case AF_INET:
199 		return sizeof(struct sockaddr_in);
200 	case AF_INET6:
201 		return sizeof(struct sockaddr_in6);
202 	case AF_IB:
203 		return sizeof(struct sockaddr_ib);
204 	default:
205 		return 0;
206 	}
207 }
208 EXPORT_SYMBOL(rdma_addr_size);
209 
rdma_addr_size_in6(struct sockaddr_in6 * addr)210 int rdma_addr_size_in6(struct sockaddr_in6 *addr)
211 {
212 	int ret = rdma_addr_size((struct sockaddr *) addr);
213 
214 	return ret <= sizeof(*addr) ? ret : 0;
215 }
216 EXPORT_SYMBOL(rdma_addr_size_in6);
217 
rdma_addr_size_kss(struct __kernel_sockaddr_storage * addr)218 int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
219 {
220 	int ret = rdma_addr_size((struct sockaddr *) addr);
221 
222 	return ret <= sizeof(*addr) ? ret : 0;
223 }
224 EXPORT_SYMBOL(rdma_addr_size_kss);
225 
226 /**
227  * rdma_copy_src_l2_addr - Copy netdevice source addresses
228  * @dev_addr:	Destination address pointer where to copy the addresses
229  * @dev:	Netdevice whose source addresses to copy
230  *
231  * rdma_copy_src_l2_addr() copies source addresses from the specified netdevice.
232  * This includes unicast address, broadcast address, device type and
233  * interface index.
234  */
rdma_copy_src_l2_addr(struct rdma_dev_addr * dev_addr,const struct net_device * dev)235 void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
236 			   const struct net_device *dev)
237 {
238 	dev_addr->dev_type = dev->type;
239 	memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
240 	memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
241 	dev_addr->bound_dev_if = dev->ifindex;
242 }
243 EXPORT_SYMBOL(rdma_copy_src_l2_addr);
244 
245 static struct net_device *
rdma_find_ndev_for_src_ip_rcu(struct net * net,const struct sockaddr * src_in)246 rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in)
247 {
248 	struct net_device *dev = NULL;
249 	int ret = -EADDRNOTAVAIL;
250 
251 	switch (src_in->sa_family) {
252 	case AF_INET:
253 		dev = __ip_dev_find(net,
254 				    ((const struct sockaddr_in *)src_in)->sin_addr.s_addr,
255 				    false);
256 		if (dev)
257 			ret = 0;
258 		break;
259 #if IS_ENABLED(CONFIG_IPV6)
260 	case AF_INET6:
261 		for_each_netdev_rcu(net, dev) {
262 			if (ipv6_chk_addr(net,
263 					  &((const struct sockaddr_in6 *)src_in)->sin6_addr,
264 					  dev, 1)) {
265 				ret = 0;
266 				break;
267 			}
268 		}
269 		break;
270 #endif
271 	}
272 	if (!ret && dev && is_vlan_dev(dev))
273 		dev = vlan_dev_real_dev(dev);
274 	return ret ? ERR_PTR(ret) : dev;
275 }
276 
rdma_translate_ip(const struct sockaddr * addr,struct rdma_dev_addr * dev_addr)277 int rdma_translate_ip(const struct sockaddr *addr,
278 		      struct rdma_dev_addr *dev_addr)
279 {
280 	struct net_device *dev;
281 
282 	if (dev_addr->bound_dev_if) {
283 		dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
284 		if (!dev)
285 			return -ENODEV;
286 		rdma_copy_src_l2_addr(dev_addr, dev);
287 		dev_put(dev);
288 		return 0;
289 	}
290 
291 	rcu_read_lock();
292 	dev = rdma_find_ndev_for_src_ip_rcu(dev_addr->net, addr);
293 	if (!IS_ERR(dev))
294 		rdma_copy_src_l2_addr(dev_addr, dev);
295 	rcu_read_unlock();
296 	return PTR_ERR_OR_ZERO(dev);
297 }
298 EXPORT_SYMBOL(rdma_translate_ip);
299 
set_timeout(struct addr_req * req,unsigned long time)300 static void set_timeout(struct addr_req *req, unsigned long time)
301 {
302 	unsigned long delay;
303 
304 	delay = time - jiffies;
305 	if ((long)delay < 0)
306 		delay = 0;
307 
308 	mod_delayed_work(addr_wq, &req->work, delay);
309 }
310 
queue_req(struct addr_req * req)311 static void queue_req(struct addr_req *req)
312 {
313 	spin_lock_bh(&lock);
314 	list_add_tail(&req->list, &req_list);
315 	set_timeout(req, req->timeout);
316 	spin_unlock_bh(&lock);
317 }
318 
ib_nl_fetch_ha(struct rdma_dev_addr * dev_addr,const void * daddr,u32 seq,u16 family)319 static int ib_nl_fetch_ha(struct rdma_dev_addr *dev_addr,
320 			  const void *daddr, u32 seq, u16 family)
321 {
322 	if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS))
323 		return -EADDRNOTAVAIL;
324 
325 	return ib_nl_ip_send_msg(dev_addr, daddr, seq, family);
326 }
327 
dst_fetch_ha(const struct dst_entry * dst,struct rdma_dev_addr * dev_addr,const void * daddr)328 static int dst_fetch_ha(const struct dst_entry *dst,
329 			struct rdma_dev_addr *dev_addr,
330 			const void *daddr)
331 {
332 	struct neighbour *n;
333 	int ret = 0;
334 
335 	n = dst_neigh_lookup(dst, daddr);
336 	if (!n)
337 		return -ENODATA;
338 
339 	if (!(n->nud_state & NUD_VALID)) {
340 		neigh_event_send(n, NULL);
341 		ret = -ENODATA;
342 	} else {
343 		neigh_ha_snapshot(dev_addr->dst_dev_addr, n, dst->dev);
344 	}
345 
346 	neigh_release(n);
347 
348 	return ret;
349 }
350 
has_gateway(const struct dst_entry * dst,sa_family_t family)351 static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
352 {
353 	struct rtable *rt;
354 	struct rt6_info *rt6;
355 
356 	if (family == AF_INET) {
357 		rt = container_of(dst, struct rtable, dst);
358 		return rt->rt_uses_gateway;
359 	}
360 
361 	rt6 = container_of(dst, struct rt6_info, dst);
362 	return rt6->rt6i_flags & RTF_GATEWAY;
363 }
364 
fetch_ha(const struct dst_entry * dst,struct rdma_dev_addr * dev_addr,const struct sockaddr * dst_in,u32 seq)365 static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
366 		    const struct sockaddr *dst_in, u32 seq)
367 {
368 	const struct sockaddr_in *dst_in4 =
369 		(const struct sockaddr_in *)dst_in;
370 	const struct sockaddr_in6 *dst_in6 =
371 		(const struct sockaddr_in6 *)dst_in;
372 	const void *daddr = (dst_in->sa_family == AF_INET) ?
373 		(const void *)&dst_in4->sin_addr.s_addr :
374 		(const void *)&dst_in6->sin6_addr;
375 	sa_family_t family = dst_in->sa_family;
376 
377 	might_sleep();
378 
379 	/* If we have a gateway in IB mode then it must be an IB network */
380 	if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB)
381 		return ib_nl_fetch_ha(dev_addr, daddr, seq, family);
382 	else
383 		return dst_fetch_ha(dst, dev_addr, daddr);
384 }
385 
addr4_resolve(struct sockaddr * src_sock,const struct sockaddr * dst_sock,struct rdma_dev_addr * addr,struct rtable ** prt)386 static int addr4_resolve(struct sockaddr *src_sock,
387 			 const struct sockaddr *dst_sock,
388 			 struct rdma_dev_addr *addr,
389 			 struct rtable **prt)
390 {
391 	struct sockaddr_in *src_in = (struct sockaddr_in *)src_sock;
392 	const struct sockaddr_in *dst_in =
393 			(const struct sockaddr_in *)dst_sock;
394 
395 	__be32 src_ip = src_in->sin_addr.s_addr;
396 	__be32 dst_ip = dst_in->sin_addr.s_addr;
397 	struct rtable *rt;
398 	struct flowi4 fl4;
399 	int ret;
400 
401 	memset(&fl4, 0, sizeof(fl4));
402 	fl4.daddr = dst_ip;
403 	fl4.saddr = src_ip;
404 	fl4.flowi4_oif = addr->bound_dev_if;
405 	rt = ip_route_output_key(addr->net, &fl4);
406 	ret = PTR_ERR_OR_ZERO(rt);
407 	if (ret)
408 		return ret;
409 
410 	src_in->sin_addr.s_addr = fl4.saddr;
411 
412 	addr->hoplimit = ip4_dst_hoplimit(&rt->dst);
413 
414 	*prt = rt;
415 	return 0;
416 }
417 
418 #if IS_ENABLED(CONFIG_IPV6)
addr6_resolve(struct sockaddr * src_sock,const struct sockaddr * dst_sock,struct rdma_dev_addr * addr,struct dst_entry ** pdst)419 static int addr6_resolve(struct sockaddr *src_sock,
420 			 const struct sockaddr *dst_sock,
421 			 struct rdma_dev_addr *addr,
422 			 struct dst_entry **pdst)
423 {
424 	struct sockaddr_in6 *src_in = (struct sockaddr_in6 *)src_sock;
425 	const struct sockaddr_in6 *dst_in =
426 				(const struct sockaddr_in6 *)dst_sock;
427 	struct flowi6 fl6;
428 	struct dst_entry *dst;
429 
430 	memset(&fl6, 0, sizeof fl6);
431 	fl6.daddr = dst_in->sin6_addr;
432 	fl6.saddr = src_in->sin6_addr;
433 	fl6.flowi6_oif = addr->bound_dev_if;
434 
435 	dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL);
436 	if (IS_ERR(dst))
437 		return PTR_ERR(dst);
438 
439 	if (ipv6_addr_any(&src_in->sin6_addr))
440 		src_in->sin6_addr = fl6.saddr;
441 
442 	addr->hoplimit = ip6_dst_hoplimit(dst);
443 
444 	*pdst = dst;
445 	return 0;
446 }
447 #else
addr6_resolve(struct sockaddr * src_sock,const struct sockaddr * dst_sock,struct rdma_dev_addr * addr,struct dst_entry ** pdst)448 static int addr6_resolve(struct sockaddr *src_sock,
449 			 const struct sockaddr *dst_sock,
450 			 struct rdma_dev_addr *addr,
451 			 struct dst_entry **pdst)
452 {
453 	return -EADDRNOTAVAIL;
454 }
455 #endif
456 
addr_resolve_neigh(const struct dst_entry * dst,const struct sockaddr * dst_in,struct rdma_dev_addr * addr,unsigned int ndev_flags,u32 seq)457 static int addr_resolve_neigh(const struct dst_entry *dst,
458 			      const struct sockaddr *dst_in,
459 			      struct rdma_dev_addr *addr,
460 			      unsigned int ndev_flags,
461 			      u32 seq)
462 {
463 	int ret = 0;
464 
465 	if (ndev_flags & IFF_LOOPBACK) {
466 		memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
467 	} else {
468 		if (!(ndev_flags & IFF_NOARP)) {
469 			/* If the device doesn't do ARP internally */
470 			ret = fetch_ha(dst, addr, dst_in, seq);
471 		}
472 	}
473 	return ret;
474 }
475 
copy_src_l2_addr(struct rdma_dev_addr * dev_addr,const struct sockaddr * dst_in,const struct dst_entry * dst,const struct net_device * ndev)476 static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
477 			    const struct sockaddr *dst_in,
478 			    const struct dst_entry *dst,
479 			    const struct net_device *ndev)
480 {
481 	int ret = 0;
482 
483 	if (dst->dev->flags & IFF_LOOPBACK)
484 		ret = rdma_translate_ip(dst_in, dev_addr);
485 	else
486 		rdma_copy_src_l2_addr(dev_addr, dst->dev);
487 
488 	/*
489 	 * If there's a gateway and type of device not ARPHRD_INFINIBAND,
490 	 * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
491 	 * network type accordingly.
492 	 */
493 	if (has_gateway(dst, dst_in->sa_family) &&
494 	    ndev->type != ARPHRD_INFINIBAND)
495 		dev_addr->network = dst_in->sa_family == AF_INET ?
496 						RDMA_NETWORK_IPV4 :
497 						RDMA_NETWORK_IPV6;
498 	else
499 		dev_addr->network = RDMA_NETWORK_IB;
500 
501 	return ret;
502 }
503 
rdma_set_src_addr_rcu(struct rdma_dev_addr * dev_addr,unsigned int * ndev_flags,const struct sockaddr * dst_in,const struct dst_entry * dst)504 static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
505 				 unsigned int *ndev_flags,
506 				 const struct sockaddr *dst_in,
507 				 const struct dst_entry *dst)
508 {
509 	struct net_device *ndev = READ_ONCE(dst->dev);
510 
511 	*ndev_flags = ndev->flags;
512 	/* A physical device must be the RDMA device to use */
513 	if (ndev->flags & IFF_LOOPBACK) {
514 		/*
515 		 * RDMA (IB/RoCE, iWarp) doesn't run on lo interface or
516 		 * loopback IP address. So if route is resolved to loopback
517 		 * interface, translate that to a real ndev based on non
518 		 * loopback IP address.
519 		 */
520 		ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in);
521 		if (IS_ERR(ndev))
522 			return -ENODEV;
523 	}
524 
525 	return copy_src_l2_addr(dev_addr, dst_in, dst, ndev);
526 }
527 
set_addr_netns_by_gid_rcu(struct rdma_dev_addr * addr)528 static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr)
529 {
530 	struct net_device *ndev;
531 
532 	ndev = rdma_read_gid_attr_ndev_rcu(addr->sgid_attr);
533 	if (IS_ERR(ndev))
534 		return PTR_ERR(ndev);
535 
536 	/*
537 	 * Since we are holding the rcu, reading net and ifindex
538 	 * are safe without any additional reference; because
539 	 * change_net_namespace() in net/core/dev.c does rcu sync
540 	 * after it changes the state to IFF_DOWN and before
541 	 * updating netdev fields {net, ifindex}.
542 	 */
543 	addr->net = dev_net(ndev);
544 	addr->bound_dev_if = ndev->ifindex;
545 	return 0;
546 }
547 
rdma_addr_set_net_defaults(struct rdma_dev_addr * addr)548 static void rdma_addr_set_net_defaults(struct rdma_dev_addr *addr)
549 {
550 	addr->net = &init_net;
551 	addr->bound_dev_if = 0;
552 }
553 
addr_resolve(struct sockaddr * src_in,const struct sockaddr * dst_in,struct rdma_dev_addr * addr,bool resolve_neigh,bool resolve_by_gid_attr,u32 seq)554 static int addr_resolve(struct sockaddr *src_in,
555 			const struct sockaddr *dst_in,
556 			struct rdma_dev_addr *addr,
557 			bool resolve_neigh,
558 			bool resolve_by_gid_attr,
559 			u32 seq)
560 {
561 	struct dst_entry *dst = NULL;
562 	unsigned int ndev_flags = 0;
563 	struct rtable *rt = NULL;
564 	int ret;
565 
566 	if (!addr->net) {
567 		pr_warn_ratelimited("%s: missing namespace\n", __func__);
568 		return -EINVAL;
569 	}
570 
571 	rcu_read_lock();
572 	if (resolve_by_gid_attr) {
573 		if (!addr->sgid_attr) {
574 			rcu_read_unlock();
575 			pr_warn_ratelimited("%s: missing gid_attr\n", __func__);
576 			return -EINVAL;
577 		}
578 		/*
579 		 * If the request is for a specific gid attribute of the
580 		 * rdma_dev_addr, derive net from the netdevice of the
581 		 * GID attribute.
582 		 */
583 		ret = set_addr_netns_by_gid_rcu(addr);
584 		if (ret) {
585 			rcu_read_unlock();
586 			return ret;
587 		}
588 	}
589 	if (src_in->sa_family == AF_INET) {
590 		ret = addr4_resolve(src_in, dst_in, addr, &rt);
591 		dst = &rt->dst;
592 	} else {
593 		ret = addr6_resolve(src_in, dst_in, addr, &dst);
594 	}
595 	if (ret) {
596 		rcu_read_unlock();
597 		goto done;
598 	}
599 	ret = rdma_set_src_addr_rcu(addr, &ndev_flags, dst_in, dst);
600 	rcu_read_unlock();
601 
602 	/*
603 	 * Resolve neighbor destination address if requested and
604 	 * only if src addr translation didn't fail.
605 	 */
606 	if (!ret && resolve_neigh)
607 		ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq);
608 
609 	if (src_in->sa_family == AF_INET)
610 		ip_rt_put(rt);
611 	else
612 		dst_release(dst);
613 done:
614 	/*
615 	 * Clear the addr net to go back to its original state, only if it was
616 	 * derived from GID attribute in this context.
617 	 */
618 	if (resolve_by_gid_attr)
619 		rdma_addr_set_net_defaults(addr);
620 	return ret;
621 }
622 
process_one_req(struct work_struct * _work)623 static void process_one_req(struct work_struct *_work)
624 {
625 	struct addr_req *req;
626 	struct sockaddr *src_in, *dst_in;
627 
628 	req = container_of(_work, struct addr_req, work.work);
629 
630 	if (req->status == -ENODATA) {
631 		src_in = (struct sockaddr *)&req->src_addr;
632 		dst_in = (struct sockaddr *)&req->dst_addr;
633 		req->status = addr_resolve(src_in, dst_in, req->addr,
634 					   true, req->resolve_by_gid_attr,
635 					   req->seq);
636 		if (req->status && time_after_eq(jiffies, req->timeout)) {
637 			req->status = -ETIMEDOUT;
638 		} else if (req->status == -ENODATA) {
639 			/* requeue the work for retrying again */
640 			spin_lock_bh(&lock);
641 			if (!list_empty(&req->list))
642 				set_timeout(req, req->timeout);
643 			spin_unlock_bh(&lock);
644 			return;
645 		}
646 	}
647 
648 	req->callback(req->status, (struct sockaddr *)&req->src_addr,
649 		req->addr, req->context);
650 	req->callback = NULL;
651 
652 	spin_lock_bh(&lock);
653 	/*
654 	 * Although the work will normally have been canceled by the workqueue,
655 	 * it can still be requeued as long as it is on the req_list.
656 	 */
657 	cancel_delayed_work(&req->work);
658 	if (!list_empty(&req->list)) {
659 		list_del_init(&req->list);
660 		kfree(req);
661 	}
662 	spin_unlock_bh(&lock);
663 }
664 
rdma_resolve_ip(struct sockaddr * src_addr,const struct sockaddr * dst_addr,struct rdma_dev_addr * addr,unsigned long timeout_ms,void (* callback)(int status,struct sockaddr * src_addr,struct rdma_dev_addr * addr,void * context),bool resolve_by_gid_attr,void * context)665 int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
666 		    struct rdma_dev_addr *addr, unsigned long timeout_ms,
667 		    void (*callback)(int status, struct sockaddr *src_addr,
668 				     struct rdma_dev_addr *addr, void *context),
669 		    bool resolve_by_gid_attr, void *context)
670 {
671 	struct sockaddr *src_in, *dst_in;
672 	struct addr_req *req;
673 	int ret = 0;
674 
675 	req = kzalloc(sizeof *req, GFP_KERNEL);
676 	if (!req)
677 		return -ENOMEM;
678 
679 	src_in = (struct sockaddr *) &req->src_addr;
680 	dst_in = (struct sockaddr *) &req->dst_addr;
681 
682 	if (src_addr) {
683 		if (src_addr->sa_family != dst_addr->sa_family) {
684 			ret = -EINVAL;
685 			goto err;
686 		}
687 
688 		memcpy(src_in, src_addr, rdma_addr_size(src_addr));
689 	} else {
690 		src_in->sa_family = dst_addr->sa_family;
691 	}
692 
693 	memcpy(dst_in, dst_addr, rdma_addr_size(dst_addr));
694 	req->addr = addr;
695 	req->callback = callback;
696 	req->context = context;
697 	req->resolve_by_gid_attr = resolve_by_gid_attr;
698 	INIT_DELAYED_WORK(&req->work, process_one_req);
699 	req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
700 
701 	req->status = addr_resolve(src_in, dst_in, addr, true,
702 				   req->resolve_by_gid_attr, req->seq);
703 	switch (req->status) {
704 	case 0:
705 		req->timeout = jiffies;
706 		queue_req(req);
707 		break;
708 	case -ENODATA:
709 		req->timeout = msecs_to_jiffies(timeout_ms) + jiffies;
710 		queue_req(req);
711 		break;
712 	default:
713 		ret = req->status;
714 		goto err;
715 	}
716 	return ret;
717 err:
718 	kfree(req);
719 	return ret;
720 }
721 EXPORT_SYMBOL(rdma_resolve_ip);
722 
roce_resolve_route_from_path(struct sa_path_rec * rec,const struct ib_gid_attr * attr)723 int roce_resolve_route_from_path(struct sa_path_rec *rec,
724 				 const struct ib_gid_attr *attr)
725 {
726 	union {
727 		struct sockaddr     _sockaddr;
728 		struct sockaddr_in  _sockaddr_in;
729 		struct sockaddr_in6 _sockaddr_in6;
730 	} sgid, dgid;
731 	struct rdma_dev_addr dev_addr = {};
732 	int ret;
733 
734 	might_sleep();
735 
736 	if (rec->roce.route_resolved)
737 		return 0;
738 
739 	rdma_gid2ip((struct sockaddr *)&sgid, &rec->sgid);
740 	rdma_gid2ip((struct sockaddr *)&dgid, &rec->dgid);
741 
742 	if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family)
743 		return -EINVAL;
744 
745 	if (!attr || !attr->ndev)
746 		return -EINVAL;
747 
748 	dev_addr.net = &init_net;
749 	dev_addr.sgid_attr = attr;
750 
751 	ret = addr_resolve((struct sockaddr *)&sgid, (struct sockaddr *)&dgid,
752 			   &dev_addr, false, true, 0);
753 	if (ret)
754 		return ret;
755 
756 	if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
757 	     dev_addr.network == RDMA_NETWORK_IPV6) &&
758 	    rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
759 		return -EINVAL;
760 
761 	rec->roce.route_resolved = true;
762 	return 0;
763 }
764 
765 /**
766  * rdma_addr_cancel - Cancel resolve ip request
767  * @addr:	Pointer to address structure given previously
768  *		during rdma_resolve_ip().
769  * rdma_addr_cancel() is synchronous function which cancels any pending
770  * request if there is any.
771  */
rdma_addr_cancel(struct rdma_dev_addr * addr)772 void rdma_addr_cancel(struct rdma_dev_addr *addr)
773 {
774 	struct addr_req *req, *temp_req;
775 	struct addr_req *found = NULL;
776 
777 	spin_lock_bh(&lock);
778 	list_for_each_entry_safe(req, temp_req, &req_list, list) {
779 		if (req->addr == addr) {
780 			/*
781 			 * Removing from the list means we take ownership of
782 			 * the req
783 			 */
784 			list_del_init(&req->list);
785 			found = req;
786 			break;
787 		}
788 	}
789 	spin_unlock_bh(&lock);
790 
791 	if (!found)
792 		return;
793 
794 	/*
795 	 * sync canceling the work after removing it from the req_list
796 	 * guarentees no work is running and none will be started.
797 	 */
798 	cancel_delayed_work_sync(&found->work);
799 	kfree(found);
800 }
801 EXPORT_SYMBOL(rdma_addr_cancel);
802 
803 struct resolve_cb_context {
804 	struct completion comp;
805 	int status;
806 };
807 
resolve_cb(int status,struct sockaddr * src_addr,struct rdma_dev_addr * addr,void * context)808 static void resolve_cb(int status, struct sockaddr *src_addr,
809 	     struct rdma_dev_addr *addr, void *context)
810 {
811 	((struct resolve_cb_context *)context)->status = status;
812 	complete(&((struct resolve_cb_context *)context)->comp);
813 }
814 
rdma_addr_find_l2_eth_by_grh(const union ib_gid * sgid,const union ib_gid * dgid,u8 * dmac,const struct ib_gid_attr * sgid_attr,int * hoplimit)815 int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
816 				 const union ib_gid *dgid,
817 				 u8 *dmac, const struct ib_gid_attr *sgid_attr,
818 				 int *hoplimit)
819 {
820 	struct rdma_dev_addr dev_addr;
821 	struct resolve_cb_context ctx;
822 	union {
823 		struct sockaddr_in  _sockaddr_in;
824 		struct sockaddr_in6 _sockaddr_in6;
825 	} sgid_addr, dgid_addr;
826 	int ret;
827 
828 	rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
829 	rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid);
830 
831 	memset(&dev_addr, 0, sizeof(dev_addr));
832 	dev_addr.net = &init_net;
833 	dev_addr.sgid_attr = sgid_attr;
834 
835 	init_completion(&ctx.comp);
836 	ret = rdma_resolve_ip((struct sockaddr *)&sgid_addr,
837 			      (struct sockaddr *)&dgid_addr, &dev_addr, 1000,
838 			      resolve_cb, true, &ctx);
839 	if (ret)
840 		return ret;
841 
842 	wait_for_completion(&ctx.comp);
843 
844 	ret = ctx.status;
845 	if (ret)
846 		return ret;
847 
848 	memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
849 	*hoplimit = dev_addr.hoplimit;
850 	return 0;
851 }
852 
netevent_callback(struct notifier_block * self,unsigned long event,void * ctx)853 static int netevent_callback(struct notifier_block *self, unsigned long event,
854 	void *ctx)
855 {
856 	struct addr_req *req;
857 
858 	if (event == NETEVENT_NEIGH_UPDATE) {
859 		struct neighbour *neigh = ctx;
860 
861 		if (neigh->nud_state & NUD_VALID) {
862 			spin_lock_bh(&lock);
863 			list_for_each_entry(req, &req_list, list)
864 				set_timeout(req, jiffies);
865 			spin_unlock_bh(&lock);
866 		}
867 	}
868 	return 0;
869 }
870 
871 static struct notifier_block nb = {
872 	.notifier_call = netevent_callback
873 };
874 
addr_init(void)875 int addr_init(void)
876 {
877 	addr_wq = alloc_ordered_workqueue("ib_addr", 0);
878 	if (!addr_wq)
879 		return -ENOMEM;
880 
881 	register_netevent_notifier(&nb);
882 
883 	return 0;
884 }
885 
addr_cleanup(void)886 void addr_cleanup(void)
887 {
888 	unregister_netevent_notifier(&nb);
889 	destroy_workqueue(addr_wq);
890 	WARN_ON(!list_empty(&req_list));
891 }
892