xref: /openbmc/linux/drivers/infiniband/core/addr.c (revision a09d2831)
1 /*
2  * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
3  * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4  * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/mutex.h>
37 #include <linux/inetdevice.h>
38 #include <linux/workqueue.h>
39 #include <net/arp.h>
40 #include <net/neighbour.h>
41 #include <net/route.h>
42 #include <net/netevent.h>
43 #include <net/addrconf.h>
44 #include <net/ip6_route.h>
45 #include <rdma/ib_addr.h>
46 
47 MODULE_AUTHOR("Sean Hefty");
48 MODULE_DESCRIPTION("IB Address Translation");
49 MODULE_LICENSE("Dual BSD/GPL");
50 
51 struct addr_req {
52 	struct list_head list;
53 	struct sockaddr_storage src_addr;
54 	struct sockaddr_storage dst_addr;
55 	struct rdma_dev_addr *addr;
56 	struct rdma_addr_client *client;
57 	void *context;
58 	void (*callback)(int status, struct sockaddr *src_addr,
59 			 struct rdma_dev_addr *addr, void *context);
60 	unsigned long timeout;
61 	int status;
62 };
63 
64 static void process_req(struct work_struct *work);
65 
66 static DEFINE_MUTEX(lock);
67 static LIST_HEAD(req_list);
68 static DECLARE_DELAYED_WORK(work, process_req);
69 static struct workqueue_struct *addr_wq;
70 
71 void rdma_addr_register_client(struct rdma_addr_client *client)
72 {
73 	atomic_set(&client->refcount, 1);
74 	init_completion(&client->comp);
75 }
76 EXPORT_SYMBOL(rdma_addr_register_client);
77 
78 static inline void put_client(struct rdma_addr_client *client)
79 {
80 	if (atomic_dec_and_test(&client->refcount))
81 		complete(&client->comp);
82 }
83 
84 void rdma_addr_unregister_client(struct rdma_addr_client *client)
85 {
86 	put_client(client);
87 	wait_for_completion(&client->comp);
88 }
89 EXPORT_SYMBOL(rdma_addr_unregister_client);
90 
91 int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
92 		     const unsigned char *dst_dev_addr)
93 {
94 	dev_addr->dev_type = dev->type;
95 	memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
96 	memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
97 	if (dst_dev_addr)
98 		memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
99 	dev_addr->bound_dev_if = dev->ifindex;
100 	return 0;
101 }
102 EXPORT_SYMBOL(rdma_copy_addr);
103 
104 int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
105 {
106 	struct net_device *dev;
107 	int ret = -EADDRNOTAVAIL;
108 
109 	if (dev_addr->bound_dev_if) {
110 		dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
111 		if (!dev)
112 			return -ENODEV;
113 		ret = rdma_copy_addr(dev_addr, dev, NULL);
114 		dev_put(dev);
115 		return ret;
116 	}
117 
118 	switch (addr->sa_family) {
119 	case AF_INET:
120 		dev = ip_dev_find(&init_net,
121 			((struct sockaddr_in *) addr)->sin_addr.s_addr);
122 
123 		if (!dev)
124 			return ret;
125 
126 		ret = rdma_copy_addr(dev_addr, dev, NULL);
127 		dev_put(dev);
128 		break;
129 
130 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
131 	case AF_INET6:
132 		read_lock(&dev_base_lock);
133 		for_each_netdev(&init_net, dev) {
134 			if (ipv6_chk_addr(&init_net,
135 					  &((struct sockaddr_in6 *) addr)->sin6_addr,
136 					  dev, 1)) {
137 				ret = rdma_copy_addr(dev_addr, dev, NULL);
138 				break;
139 			}
140 		}
141 		read_unlock(&dev_base_lock);
142 		break;
143 #endif
144 	}
145 	return ret;
146 }
147 EXPORT_SYMBOL(rdma_translate_ip);
148 
149 static void set_timeout(unsigned long time)
150 {
151 	unsigned long delay;
152 
153 	cancel_delayed_work(&work);
154 
155 	delay = time - jiffies;
156 	if ((long)delay <= 0)
157 		delay = 1;
158 
159 	queue_delayed_work(addr_wq, &work, delay);
160 }
161 
162 static void queue_req(struct addr_req *req)
163 {
164 	struct addr_req *temp_req;
165 
166 	mutex_lock(&lock);
167 	list_for_each_entry_reverse(temp_req, &req_list, list) {
168 		if (time_after_eq(req->timeout, temp_req->timeout))
169 			break;
170 	}
171 
172 	list_add(&req->list, &temp_req->list);
173 
174 	if (req_list.next == &req->list)
175 		set_timeout(req->timeout);
176 	mutex_unlock(&lock);
177 }
178 
179 static int addr4_resolve(struct sockaddr_in *src_in,
180 			 struct sockaddr_in *dst_in,
181 			 struct rdma_dev_addr *addr)
182 {
183 	__be32 src_ip = src_in->sin_addr.s_addr;
184 	__be32 dst_ip = dst_in->sin_addr.s_addr;
185 	struct flowi fl;
186 	struct rtable *rt;
187 	struct neighbour *neigh;
188 	int ret;
189 
190 	memset(&fl, 0, sizeof fl);
191 	fl.nl_u.ip4_u.daddr = dst_ip;
192 	fl.nl_u.ip4_u.saddr = src_ip;
193 	fl.oif = addr->bound_dev_if;
194 
195 	ret = ip_route_output_key(&init_net, &rt, &fl);
196 	if (ret)
197 		goto out;
198 
199 	src_in->sin_family = AF_INET;
200 	src_in->sin_addr.s_addr = rt->rt_src;
201 
202 	if (rt->idev->dev->flags & IFF_LOOPBACK) {
203 		ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
204 		if (!ret)
205 			memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
206 		goto put;
207 	}
208 
209 	/* If the device does ARP internally, return 'done' */
210 	if (rt->idev->dev->flags & IFF_NOARP) {
211 		rdma_copy_addr(addr, rt->idev->dev, NULL);
212 		goto put;
213 	}
214 
215 	neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
216 	if (!neigh || !(neigh->nud_state & NUD_VALID)) {
217 		neigh_event_send(rt->u.dst.neighbour, NULL);
218 		ret = -ENODATA;
219 		if (neigh)
220 			goto release;
221 		goto put;
222 	}
223 
224 	ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
225 release:
226 	neigh_release(neigh);
227 put:
228 	ip_rt_put(rt);
229 out:
230 	return ret;
231 }
232 
233 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
234 static int addr6_resolve(struct sockaddr_in6 *src_in,
235 			 struct sockaddr_in6 *dst_in,
236 			 struct rdma_dev_addr *addr)
237 {
238 	struct flowi fl;
239 	struct neighbour *neigh;
240 	struct dst_entry *dst;
241 	int ret;
242 
243 	memset(&fl, 0, sizeof fl);
244 	ipv6_addr_copy(&fl.fl6_dst, &dst_in->sin6_addr);
245 	ipv6_addr_copy(&fl.fl6_src, &src_in->sin6_addr);
246 	fl.oif = addr->bound_dev_if;
247 
248 	dst = ip6_route_output(&init_net, NULL, &fl);
249 	if ((ret = dst->error))
250 		goto put;
251 
252 	if (ipv6_addr_any(&fl.fl6_src)) {
253 		ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
254 					 &fl.fl6_dst, 0, &fl.fl6_src);
255 		if (ret)
256 			goto put;
257 
258 		src_in->sin6_family = AF_INET6;
259 		ipv6_addr_copy(&src_in->sin6_addr, &fl.fl6_src);
260 	}
261 
262 	if (dst->dev->flags & IFF_LOOPBACK) {
263 		ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
264 		if (!ret)
265 			memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
266 		goto put;
267 	}
268 
269 	/* If the device does ARP internally, return 'done' */
270 	if (dst->dev->flags & IFF_NOARP) {
271 		ret = rdma_copy_addr(addr, dst->dev, NULL);
272 		goto put;
273 	}
274 
275 	neigh = dst->neighbour;
276 	if (!neigh || !(neigh->nud_state & NUD_VALID)) {
277 		neigh_event_send(dst->neighbour, NULL);
278 		ret = -ENODATA;
279 		goto put;
280 	}
281 
282 	ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
283 put:
284 	dst_release(dst);
285 	return ret;
286 }
287 #else
288 static int addr6_resolve(struct sockaddr_in6 *src_in,
289 			 struct sockaddr_in6 *dst_in,
290 			 struct rdma_dev_addr *addr)
291 {
292 	return -EADDRNOTAVAIL;
293 }
294 #endif
295 
296 static int addr_resolve(struct sockaddr *src_in,
297 			struct sockaddr *dst_in,
298 			struct rdma_dev_addr *addr)
299 {
300 	if (src_in->sa_family == AF_INET) {
301 		return addr4_resolve((struct sockaddr_in *) src_in,
302 			(struct sockaddr_in *) dst_in, addr);
303 	} else
304 		return addr6_resolve((struct sockaddr_in6 *) src_in,
305 			(struct sockaddr_in6 *) dst_in, addr);
306 }
307 
308 static void process_req(struct work_struct *work)
309 {
310 	struct addr_req *req, *temp_req;
311 	struct sockaddr *src_in, *dst_in;
312 	struct list_head done_list;
313 
314 	INIT_LIST_HEAD(&done_list);
315 
316 	mutex_lock(&lock);
317 	list_for_each_entry_safe(req, temp_req, &req_list, list) {
318 		if (req->status == -ENODATA) {
319 			src_in = (struct sockaddr *) &req->src_addr;
320 			dst_in = (struct sockaddr *) &req->dst_addr;
321 			req->status = addr_resolve(src_in, dst_in, req->addr);
322 			if (req->status && time_after_eq(jiffies, req->timeout))
323 				req->status = -ETIMEDOUT;
324 			else if (req->status == -ENODATA)
325 				continue;
326 		}
327 		list_move_tail(&req->list, &done_list);
328 	}
329 
330 	if (!list_empty(&req_list)) {
331 		req = list_entry(req_list.next, struct addr_req, list);
332 		set_timeout(req->timeout);
333 	}
334 	mutex_unlock(&lock);
335 
336 	list_for_each_entry_safe(req, temp_req, &done_list, list) {
337 		list_del(&req->list);
338 		req->callback(req->status, (struct sockaddr *) &req->src_addr,
339 			req->addr, req->context);
340 		put_client(req->client);
341 		kfree(req);
342 	}
343 }
344 
345 int rdma_resolve_ip(struct rdma_addr_client *client,
346 		    struct sockaddr *src_addr, struct sockaddr *dst_addr,
347 		    struct rdma_dev_addr *addr, int timeout_ms,
348 		    void (*callback)(int status, struct sockaddr *src_addr,
349 				     struct rdma_dev_addr *addr, void *context),
350 		    void *context)
351 {
352 	struct sockaddr *src_in, *dst_in;
353 	struct addr_req *req;
354 	int ret = 0;
355 
356 	req = kzalloc(sizeof *req, GFP_KERNEL);
357 	if (!req)
358 		return -ENOMEM;
359 
360 	src_in = (struct sockaddr *) &req->src_addr;
361 	dst_in = (struct sockaddr *) &req->dst_addr;
362 
363 	if (src_addr) {
364 		if (src_addr->sa_family != dst_addr->sa_family) {
365 			ret = -EINVAL;
366 			goto err;
367 		}
368 
369 		memcpy(src_in, src_addr, ip_addr_size(src_addr));
370 	} else {
371 		src_in->sa_family = dst_addr->sa_family;
372 	}
373 
374 	memcpy(dst_in, dst_addr, ip_addr_size(dst_addr));
375 	req->addr = addr;
376 	req->callback = callback;
377 	req->context = context;
378 	req->client = client;
379 	atomic_inc(&client->refcount);
380 
381 	req->status = addr_resolve(src_in, dst_in, addr);
382 	switch (req->status) {
383 	case 0:
384 		req->timeout = jiffies;
385 		queue_req(req);
386 		break;
387 	case -ENODATA:
388 		req->timeout = msecs_to_jiffies(timeout_ms) + jiffies;
389 		queue_req(req);
390 		break;
391 	default:
392 		ret = req->status;
393 		atomic_dec(&client->refcount);
394 		goto err;
395 	}
396 	return ret;
397 err:
398 	kfree(req);
399 	return ret;
400 }
401 EXPORT_SYMBOL(rdma_resolve_ip);
402 
403 void rdma_addr_cancel(struct rdma_dev_addr *addr)
404 {
405 	struct addr_req *req, *temp_req;
406 
407 	mutex_lock(&lock);
408 	list_for_each_entry_safe(req, temp_req, &req_list, list) {
409 		if (req->addr == addr) {
410 			req->status = -ECANCELED;
411 			req->timeout = jiffies;
412 			list_move(&req->list, &req_list);
413 			set_timeout(req->timeout);
414 			break;
415 		}
416 	}
417 	mutex_unlock(&lock);
418 }
419 EXPORT_SYMBOL(rdma_addr_cancel);
420 
421 static int netevent_callback(struct notifier_block *self, unsigned long event,
422 	void *ctx)
423 {
424 	if (event == NETEVENT_NEIGH_UPDATE) {
425 		struct neighbour *neigh = ctx;
426 
427 		if (neigh->nud_state & NUD_VALID) {
428 			set_timeout(jiffies);
429 		}
430 	}
431 	return 0;
432 }
433 
434 static struct notifier_block nb = {
435 	.notifier_call = netevent_callback
436 };
437 
438 static int __init addr_init(void)
439 {
440 	addr_wq = create_singlethread_workqueue("ib_addr");
441 	if (!addr_wq)
442 		return -ENOMEM;
443 
444 	register_netevent_notifier(&nb);
445 	return 0;
446 }
447 
448 static void __exit addr_cleanup(void)
449 {
450 	unregister_netevent_notifier(&nb);
451 	destroy_workqueue(addr_wq);
452 }
453 
454 module_init(addr_init);
455 module_exit(addr_cleanup);
456