xref: /openbmc/linux/drivers/infiniband/core/device.c (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <rdma/rdma_netlink.h>
45 #include <rdma/ib_addr.h>
46 #include <rdma/ib_cache.h>
47 
48 #include "core_priv.h"
49 
50 MODULE_AUTHOR("Roland Dreier");
51 MODULE_DESCRIPTION("core kernel InfiniBand API");
52 MODULE_LICENSE("Dual BSD/GPL");
53 
54 struct ib_client_data {
55 	struct list_head  list;
56 	struct ib_client *client;
57 	void *            data;
58 	/* The device or client is going down. Do not call client or device
59 	 * callbacks other than remove(). */
60 	bool		  going_down;
61 };
62 
63 struct workqueue_struct *ib_comp_wq;
64 struct workqueue_struct *ib_wq;
65 EXPORT_SYMBOL_GPL(ib_wq);
66 
67 /* The device_list and client_list contain devices and clients after their
68  * registration has completed, and the devices and clients are removed
69  * during unregistration. */
70 static LIST_HEAD(device_list);
71 static LIST_HEAD(client_list);
72 
73 /*
74  * device_mutex and lists_rwsem protect access to both device_list and
75  * client_list.  device_mutex protects writer access by device and client
76  * registration / de-registration.  lists_rwsem protects reader access to
77  * these lists.  Iterators of these lists must lock it for read, while updates
78  * to the lists must be done with a write lock. A special case is when the
79  * device_mutex is locked. In this case locking the lists for read access is
80  * not necessary as the device_mutex implies it.
81  *
82  * lists_rwsem also protects access to the client data list.
83  */
84 static DEFINE_MUTEX(device_mutex);
85 static DECLARE_RWSEM(lists_rwsem);
86 
87 static int ib_security_change(struct notifier_block *nb, unsigned long event,
88 			      void *lsm_data);
89 static void ib_policy_change_task(struct work_struct *work);
90 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
91 
92 static struct notifier_block ibdev_lsm_nb = {
93 	.notifier_call = ib_security_change,
94 };
95 
96 static int ib_device_check_mandatory(struct ib_device *device)
97 {
98 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
99 	static const struct {
100 		size_t offset;
101 		char  *name;
102 	} mandatory_table[] = {
103 		IB_MANDATORY_FUNC(query_device),
104 		IB_MANDATORY_FUNC(query_port),
105 		IB_MANDATORY_FUNC(query_pkey),
106 		IB_MANDATORY_FUNC(query_gid),
107 		IB_MANDATORY_FUNC(alloc_pd),
108 		IB_MANDATORY_FUNC(dealloc_pd),
109 		IB_MANDATORY_FUNC(create_ah),
110 		IB_MANDATORY_FUNC(destroy_ah),
111 		IB_MANDATORY_FUNC(create_qp),
112 		IB_MANDATORY_FUNC(modify_qp),
113 		IB_MANDATORY_FUNC(destroy_qp),
114 		IB_MANDATORY_FUNC(post_send),
115 		IB_MANDATORY_FUNC(post_recv),
116 		IB_MANDATORY_FUNC(create_cq),
117 		IB_MANDATORY_FUNC(destroy_cq),
118 		IB_MANDATORY_FUNC(poll_cq),
119 		IB_MANDATORY_FUNC(req_notify_cq),
120 		IB_MANDATORY_FUNC(get_dma_mr),
121 		IB_MANDATORY_FUNC(dereg_mr),
122 		IB_MANDATORY_FUNC(get_port_immutable)
123 	};
124 	int i;
125 
126 	for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
127 		if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
128 			pr_warn("Device %s is missing mandatory function %s\n",
129 				device->name, mandatory_table[i].name);
130 			return -EINVAL;
131 		}
132 	}
133 
134 	return 0;
135 }
136 
137 static struct ib_device *__ib_device_get_by_index(u32 index)
138 {
139 	struct ib_device *device;
140 
141 	list_for_each_entry(device, &device_list, core_list)
142 		if (device->index == index)
143 			return device;
144 
145 	return NULL;
146 }
147 
148 /*
149  * Caller is responsible to return refrerence count by calling put_device()
150  */
151 struct ib_device *ib_device_get_by_index(u32 index)
152 {
153 	struct ib_device *device;
154 
155 	down_read(&lists_rwsem);
156 	device = __ib_device_get_by_index(index);
157 	if (device)
158 		get_device(&device->dev);
159 
160 	up_read(&lists_rwsem);
161 	return device;
162 }
163 
164 static struct ib_device *__ib_device_get_by_name(const char *name)
165 {
166 	struct ib_device *device;
167 
168 	list_for_each_entry(device, &device_list, core_list)
169 		if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
170 			return device;
171 
172 	return NULL;
173 }
174 
175 static int alloc_name(char *name)
176 {
177 	unsigned long *inuse;
178 	char buf[IB_DEVICE_NAME_MAX];
179 	struct ib_device *device;
180 	int i;
181 
182 	inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
183 	if (!inuse)
184 		return -ENOMEM;
185 
186 	list_for_each_entry(device, &device_list, core_list) {
187 		if (!sscanf(device->name, name, &i))
188 			continue;
189 		if (i < 0 || i >= PAGE_SIZE * 8)
190 			continue;
191 		snprintf(buf, sizeof buf, name, i);
192 		if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
193 			set_bit(i, inuse);
194 	}
195 
196 	i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
197 	free_page((unsigned long) inuse);
198 	snprintf(buf, sizeof buf, name, i);
199 
200 	if (__ib_device_get_by_name(buf))
201 		return -ENFILE;
202 
203 	strlcpy(name, buf, IB_DEVICE_NAME_MAX);
204 	return 0;
205 }
206 
207 static void ib_device_release(struct device *device)
208 {
209 	struct ib_device *dev = container_of(device, struct ib_device, dev);
210 
211 	WARN_ON(dev->reg_state == IB_DEV_REGISTERED);
212 	if (dev->reg_state == IB_DEV_UNREGISTERED) {
213 		/*
214 		 * In IB_DEV_UNINITIALIZED state, cache or port table
215 		 * is not even created. Free cache and port table only when
216 		 * device reaches UNREGISTERED state.
217 		 */
218 		ib_cache_release_one(dev);
219 		kfree(dev->port_immutable);
220 	}
221 	kfree(dev);
222 }
223 
224 static int ib_device_uevent(struct device *device,
225 			    struct kobj_uevent_env *env)
226 {
227 	struct ib_device *dev = container_of(device, struct ib_device, dev);
228 
229 	if (add_uevent_var(env, "NAME=%s", dev->name))
230 		return -ENOMEM;
231 
232 	/*
233 	 * It would be nice to pass the node GUID with the event...
234 	 */
235 
236 	return 0;
237 }
238 
239 static struct class ib_class = {
240 	.name    = "infiniband",
241 	.dev_release = ib_device_release,
242 	.dev_uevent = ib_device_uevent,
243 };
244 
245 /**
246  * ib_alloc_device - allocate an IB device struct
247  * @size:size of structure to allocate
248  *
249  * Low-level drivers should use ib_alloc_device() to allocate &struct
250  * ib_device.  @size is the size of the structure to be allocated,
251  * including any private data used by the low-level driver.
252  * ib_dealloc_device() must be used to free structures allocated with
253  * ib_alloc_device().
254  */
255 struct ib_device *ib_alloc_device(size_t size)
256 {
257 	struct ib_device *device;
258 
259 	if (WARN_ON(size < sizeof(struct ib_device)))
260 		return NULL;
261 
262 	device = kzalloc(size, GFP_KERNEL);
263 	if (!device)
264 		return NULL;
265 
266 	rdma_restrack_init(&device->res);
267 
268 	device->dev.class = &ib_class;
269 	device_initialize(&device->dev);
270 
271 	dev_set_drvdata(&device->dev, device);
272 
273 	INIT_LIST_HEAD(&device->event_handler_list);
274 	spin_lock_init(&device->event_handler_lock);
275 	spin_lock_init(&device->client_data_lock);
276 	INIT_LIST_HEAD(&device->client_data_list);
277 	INIT_LIST_HEAD(&device->port_list);
278 
279 	return device;
280 }
281 EXPORT_SYMBOL(ib_alloc_device);
282 
283 /**
284  * ib_dealloc_device - free an IB device struct
285  * @device:structure to free
286  *
287  * Free a structure allocated with ib_alloc_device().
288  */
289 void ib_dealloc_device(struct ib_device *device)
290 {
291 	WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
292 		device->reg_state != IB_DEV_UNINITIALIZED);
293 	rdma_restrack_clean(&device->res);
294 	put_device(&device->dev);
295 }
296 EXPORT_SYMBOL(ib_dealloc_device);
297 
298 static int add_client_context(struct ib_device *device, struct ib_client *client)
299 {
300 	struct ib_client_data *context;
301 	unsigned long flags;
302 
303 	context = kmalloc(sizeof *context, GFP_KERNEL);
304 	if (!context)
305 		return -ENOMEM;
306 
307 	context->client = client;
308 	context->data   = NULL;
309 	context->going_down = false;
310 
311 	down_write(&lists_rwsem);
312 	spin_lock_irqsave(&device->client_data_lock, flags);
313 	list_add(&context->list, &device->client_data_list);
314 	spin_unlock_irqrestore(&device->client_data_lock, flags);
315 	up_write(&lists_rwsem);
316 
317 	return 0;
318 }
319 
320 static int verify_immutable(const struct ib_device *dev, u8 port)
321 {
322 	return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
323 			    rdma_max_mad_size(dev, port) != 0);
324 }
325 
326 static int read_port_immutable(struct ib_device *device)
327 {
328 	int ret;
329 	u8 start_port = rdma_start_port(device);
330 	u8 end_port = rdma_end_port(device);
331 	u8 port;
332 
333 	/**
334 	 * device->port_immutable is indexed directly by the port number to make
335 	 * access to this data as efficient as possible.
336 	 *
337 	 * Therefore port_immutable is declared as a 1 based array with
338 	 * potential empty slots at the beginning.
339 	 */
340 	device->port_immutable = kzalloc(sizeof(*device->port_immutable)
341 					 * (end_port + 1),
342 					 GFP_KERNEL);
343 	if (!device->port_immutable)
344 		return -ENOMEM;
345 
346 	for (port = start_port; port <= end_port; ++port) {
347 		ret = device->get_port_immutable(device, port,
348 						 &device->port_immutable[port]);
349 		if (ret)
350 			return ret;
351 
352 		if (verify_immutable(device, port))
353 			return -EINVAL;
354 	}
355 	return 0;
356 }
357 
358 void ib_get_device_fw_str(struct ib_device *dev, char *str)
359 {
360 	if (dev->get_dev_fw_str)
361 		dev->get_dev_fw_str(dev, str);
362 	else
363 		str[0] = '\0';
364 }
365 EXPORT_SYMBOL(ib_get_device_fw_str);
366 
367 static int setup_port_pkey_list(struct ib_device *device)
368 {
369 	int i;
370 
371 	/**
372 	 * device->port_pkey_list is indexed directly by the port number,
373 	 * Therefore it is declared as a 1 based array with potential empty
374 	 * slots at the beginning.
375 	 */
376 	device->port_pkey_list = kcalloc(rdma_end_port(device) + 1,
377 					 sizeof(*device->port_pkey_list),
378 					 GFP_KERNEL);
379 
380 	if (!device->port_pkey_list)
381 		return -ENOMEM;
382 
383 	for (i = 0; i < (rdma_end_port(device) + 1); i++) {
384 		spin_lock_init(&device->port_pkey_list[i].list_lock);
385 		INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list);
386 	}
387 
388 	return 0;
389 }
390 
391 static void ib_policy_change_task(struct work_struct *work)
392 {
393 	struct ib_device *dev;
394 
395 	down_read(&lists_rwsem);
396 	list_for_each_entry(dev, &device_list, core_list) {
397 		int i;
398 
399 		for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) {
400 			u64 sp;
401 			int ret = ib_get_cached_subnet_prefix(dev,
402 							      i,
403 							      &sp);
404 
405 			WARN_ONCE(ret,
406 				  "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
407 				  ret);
408 			if (!ret)
409 				ib_security_cache_change(dev, i, sp);
410 		}
411 	}
412 	up_read(&lists_rwsem);
413 }
414 
415 static int ib_security_change(struct notifier_block *nb, unsigned long event,
416 			      void *lsm_data)
417 {
418 	if (event != LSM_POLICY_CHANGE)
419 		return NOTIFY_DONE;
420 
421 	schedule_work(&ib_policy_change_work);
422 
423 	return NOTIFY_OK;
424 }
425 
426 /**
427  *	__dev_new_index	-	allocate an device index
428  *
429  *	Returns a suitable unique value for a new device interface
430  *	number.  It assumes that there are less than 2^32-1 ib devices
431  *	will be present in the system.
432  */
433 static u32 __dev_new_index(void)
434 {
435 	/*
436 	 * The device index to allow stable naming.
437 	 * Similar to struct net -> ifindex.
438 	 */
439 	static u32 index;
440 
441 	for (;;) {
442 		if (!(++index))
443 			index = 1;
444 
445 		if (!__ib_device_get_by_index(index))
446 			return index;
447 	}
448 }
449 
450 /**
451  * ib_register_device - Register an IB device with IB core
452  * @device:Device to register
453  *
454  * Low-level drivers use ib_register_device() to register their
455  * devices with the IB core.  All registered clients will receive a
456  * callback for each device that is added. @device must be allocated
457  * with ib_alloc_device().
458  */
459 int ib_register_device(struct ib_device *device,
460 		       int (*port_callback)(struct ib_device *,
461 					    u8, struct kobject *))
462 {
463 	int ret;
464 	struct ib_client *client;
465 	struct ib_udata uhw = {.outlen = 0, .inlen = 0};
466 	struct device *parent = device->dev.parent;
467 
468 	WARN_ON_ONCE(device->dma_device);
469 	if (device->dev.dma_ops) {
470 		/*
471 		 * The caller provided custom DMA operations. Copy the
472 		 * DMA-related fields that are used by e.g. dma_alloc_coherent()
473 		 * into device->dev.
474 		 */
475 		device->dma_device = &device->dev;
476 		if (!device->dev.dma_mask) {
477 			if (parent)
478 				device->dev.dma_mask = parent->dma_mask;
479 			else
480 				WARN_ON_ONCE(true);
481 		}
482 		if (!device->dev.coherent_dma_mask) {
483 			if (parent)
484 				device->dev.coherent_dma_mask =
485 					parent->coherent_dma_mask;
486 			else
487 				WARN_ON_ONCE(true);
488 		}
489 	} else {
490 		/*
491 		 * The caller did not provide custom DMA operations. Use the
492 		 * DMA mapping operations of the parent device.
493 		 */
494 		WARN_ON_ONCE(!parent);
495 		device->dma_device = parent;
496 	}
497 
498 	mutex_lock(&device_mutex);
499 
500 	if (strchr(device->name, '%')) {
501 		ret = alloc_name(device->name);
502 		if (ret)
503 			goto out;
504 	}
505 
506 	if (ib_device_check_mandatory(device)) {
507 		ret = -EINVAL;
508 		goto out;
509 	}
510 
511 	ret = read_port_immutable(device);
512 	if (ret) {
513 		pr_warn("Couldn't create per port immutable data %s\n",
514 			device->name);
515 		goto out;
516 	}
517 
518 	ret = setup_port_pkey_list(device);
519 	if (ret) {
520 		pr_warn("Couldn't create per port_pkey_list\n");
521 		goto out;
522 	}
523 
524 	ret = ib_cache_setup_one(device);
525 	if (ret) {
526 		pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
527 		goto port_cleanup;
528 	}
529 
530 	ret = ib_device_register_rdmacg(device);
531 	if (ret) {
532 		pr_warn("Couldn't register device with rdma cgroup\n");
533 		goto cache_cleanup;
534 	}
535 
536 	memset(&device->attrs, 0, sizeof(device->attrs));
537 	ret = device->query_device(device, &device->attrs, &uhw);
538 	if (ret) {
539 		pr_warn("Couldn't query the device attributes\n");
540 		goto cg_cleanup;
541 	}
542 
543 	ret = ib_device_register_sysfs(device, port_callback);
544 	if (ret) {
545 		pr_warn("Couldn't register device %s with driver model\n",
546 			device->name);
547 		goto cg_cleanup;
548 	}
549 
550 	device->reg_state = IB_DEV_REGISTERED;
551 
552 	list_for_each_entry(client, &client_list, list)
553 		if (!add_client_context(device, client) && client->add)
554 			client->add(device);
555 
556 	device->index = __dev_new_index();
557 	down_write(&lists_rwsem);
558 	list_add_tail(&device->core_list, &device_list);
559 	up_write(&lists_rwsem);
560 	mutex_unlock(&device_mutex);
561 	return 0;
562 
563 cg_cleanup:
564 	ib_device_unregister_rdmacg(device);
565 cache_cleanup:
566 	ib_cache_cleanup_one(device);
567 	ib_cache_release_one(device);
568 port_cleanup:
569 	kfree(device->port_immutable);
570 out:
571 	mutex_unlock(&device_mutex);
572 	return ret;
573 }
574 EXPORT_SYMBOL(ib_register_device);
575 
576 /**
577  * ib_unregister_device - Unregister an IB device
578  * @device:Device to unregister
579  *
580  * Unregister an IB device.  All clients will receive a remove callback.
581  */
582 void ib_unregister_device(struct ib_device *device)
583 {
584 	struct ib_client_data *context, *tmp;
585 	unsigned long flags;
586 
587 	mutex_lock(&device_mutex);
588 
589 	down_write(&lists_rwsem);
590 	list_del(&device->core_list);
591 	spin_lock_irqsave(&device->client_data_lock, flags);
592 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
593 		context->going_down = true;
594 	spin_unlock_irqrestore(&device->client_data_lock, flags);
595 	downgrade_write(&lists_rwsem);
596 
597 	list_for_each_entry_safe(context, tmp, &device->client_data_list,
598 				 list) {
599 		if (context->client->remove)
600 			context->client->remove(device, context->data);
601 	}
602 	up_read(&lists_rwsem);
603 
604 	ib_device_unregister_rdmacg(device);
605 	ib_device_unregister_sysfs(device);
606 
607 	mutex_unlock(&device_mutex);
608 
609 	ib_cache_cleanup_one(device);
610 
611 	ib_security_destroy_port_pkey_list(device);
612 	kfree(device->port_pkey_list);
613 
614 	down_write(&lists_rwsem);
615 	spin_lock_irqsave(&device->client_data_lock, flags);
616 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
617 		kfree(context);
618 	spin_unlock_irqrestore(&device->client_data_lock, flags);
619 	up_write(&lists_rwsem);
620 
621 	device->reg_state = IB_DEV_UNREGISTERED;
622 }
623 EXPORT_SYMBOL(ib_unregister_device);
624 
625 /**
626  * ib_register_client - Register an IB client
627  * @client:Client to register
628  *
629  * Upper level users of the IB drivers can use ib_register_client() to
630  * register callbacks for IB device addition and removal.  When an IB
631  * device is added, each registered client's add method will be called
632  * (in the order the clients were registered), and when a device is
633  * removed, each client's remove method will be called (in the reverse
634  * order that clients were registered).  In addition, when
635  * ib_register_client() is called, the client will receive an add
636  * callback for all devices already registered.
637  */
638 int ib_register_client(struct ib_client *client)
639 {
640 	struct ib_device *device;
641 
642 	mutex_lock(&device_mutex);
643 
644 	list_for_each_entry(device, &device_list, core_list)
645 		if (!add_client_context(device, client) && client->add)
646 			client->add(device);
647 
648 	down_write(&lists_rwsem);
649 	list_add_tail(&client->list, &client_list);
650 	up_write(&lists_rwsem);
651 
652 	mutex_unlock(&device_mutex);
653 
654 	return 0;
655 }
656 EXPORT_SYMBOL(ib_register_client);
657 
658 /**
659  * ib_unregister_client - Unregister an IB client
660  * @client:Client to unregister
661  *
662  * Upper level users use ib_unregister_client() to remove their client
663  * registration.  When ib_unregister_client() is called, the client
664  * will receive a remove callback for each IB device still registered.
665  */
666 void ib_unregister_client(struct ib_client *client)
667 {
668 	struct ib_client_data *context, *tmp;
669 	struct ib_device *device;
670 	unsigned long flags;
671 
672 	mutex_lock(&device_mutex);
673 
674 	down_write(&lists_rwsem);
675 	list_del(&client->list);
676 	up_write(&lists_rwsem);
677 
678 	list_for_each_entry(device, &device_list, core_list) {
679 		struct ib_client_data *found_context = NULL;
680 
681 		down_write(&lists_rwsem);
682 		spin_lock_irqsave(&device->client_data_lock, flags);
683 		list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
684 			if (context->client == client) {
685 				context->going_down = true;
686 				found_context = context;
687 				break;
688 			}
689 		spin_unlock_irqrestore(&device->client_data_lock, flags);
690 		up_write(&lists_rwsem);
691 
692 		if (client->remove)
693 			client->remove(device, found_context ?
694 					       found_context->data : NULL);
695 
696 		if (!found_context) {
697 			pr_warn("No client context found for %s/%s\n",
698 				device->name, client->name);
699 			continue;
700 		}
701 
702 		down_write(&lists_rwsem);
703 		spin_lock_irqsave(&device->client_data_lock, flags);
704 		list_del(&found_context->list);
705 		kfree(found_context);
706 		spin_unlock_irqrestore(&device->client_data_lock, flags);
707 		up_write(&lists_rwsem);
708 	}
709 
710 	mutex_unlock(&device_mutex);
711 }
712 EXPORT_SYMBOL(ib_unregister_client);
713 
714 /**
715  * ib_get_client_data - Get IB client context
716  * @device:Device to get context for
717  * @client:Client to get context for
718  *
719  * ib_get_client_data() returns client context set with
720  * ib_set_client_data().
721  */
722 void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
723 {
724 	struct ib_client_data *context;
725 	void *ret = NULL;
726 	unsigned long flags;
727 
728 	spin_lock_irqsave(&device->client_data_lock, flags);
729 	list_for_each_entry(context, &device->client_data_list, list)
730 		if (context->client == client) {
731 			ret = context->data;
732 			break;
733 		}
734 	spin_unlock_irqrestore(&device->client_data_lock, flags);
735 
736 	return ret;
737 }
738 EXPORT_SYMBOL(ib_get_client_data);
739 
740 /**
741  * ib_set_client_data - Set IB client context
742  * @device:Device to set context for
743  * @client:Client to set context for
744  * @data:Context to set
745  *
746  * ib_set_client_data() sets client context that can be retrieved with
747  * ib_get_client_data().
748  */
749 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
750 			void *data)
751 {
752 	struct ib_client_data *context;
753 	unsigned long flags;
754 
755 	spin_lock_irqsave(&device->client_data_lock, flags);
756 	list_for_each_entry(context, &device->client_data_list, list)
757 		if (context->client == client) {
758 			context->data = data;
759 			goto out;
760 		}
761 
762 	pr_warn("No client context found for %s/%s\n",
763 		device->name, client->name);
764 
765 out:
766 	spin_unlock_irqrestore(&device->client_data_lock, flags);
767 }
768 EXPORT_SYMBOL(ib_set_client_data);
769 
770 /**
771  * ib_register_event_handler - Register an IB event handler
772  * @event_handler:Handler to register
773  *
774  * ib_register_event_handler() registers an event handler that will be
775  * called back when asynchronous IB events occur (as defined in
776  * chapter 11 of the InfiniBand Architecture Specification).  This
777  * callback may occur in interrupt context.
778  */
779 void ib_register_event_handler(struct ib_event_handler *event_handler)
780 {
781 	unsigned long flags;
782 
783 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
784 	list_add_tail(&event_handler->list,
785 		      &event_handler->device->event_handler_list);
786 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
787 }
788 EXPORT_SYMBOL(ib_register_event_handler);
789 
790 /**
791  * ib_unregister_event_handler - Unregister an event handler
792  * @event_handler:Handler to unregister
793  *
794  * Unregister an event handler registered with
795  * ib_register_event_handler().
796  */
797 void ib_unregister_event_handler(struct ib_event_handler *event_handler)
798 {
799 	unsigned long flags;
800 
801 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
802 	list_del(&event_handler->list);
803 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
804 }
805 EXPORT_SYMBOL(ib_unregister_event_handler);
806 
807 /**
808  * ib_dispatch_event - Dispatch an asynchronous event
809  * @event:Event to dispatch
810  *
811  * Low-level drivers must call ib_dispatch_event() to dispatch the
812  * event to all registered event handlers when an asynchronous event
813  * occurs.
814  */
815 void ib_dispatch_event(struct ib_event *event)
816 {
817 	unsigned long flags;
818 	struct ib_event_handler *handler;
819 
820 	spin_lock_irqsave(&event->device->event_handler_lock, flags);
821 
822 	list_for_each_entry(handler, &event->device->event_handler_list, list)
823 		handler->handler(handler, event);
824 
825 	spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
826 }
827 EXPORT_SYMBOL(ib_dispatch_event);
828 
829 /**
830  * ib_query_port - Query IB port attributes
831  * @device:Device to query
832  * @port_num:Port number to query
833  * @port_attr:Port attributes
834  *
835  * ib_query_port() returns the attributes of a port through the
836  * @port_attr pointer.
837  */
838 int ib_query_port(struct ib_device *device,
839 		  u8 port_num,
840 		  struct ib_port_attr *port_attr)
841 {
842 	union ib_gid gid;
843 	int err;
844 
845 	if (!rdma_is_port_valid(device, port_num))
846 		return -EINVAL;
847 
848 	memset(port_attr, 0, sizeof(*port_attr));
849 	err = device->query_port(device, port_num, port_attr);
850 	if (err || port_attr->subnet_prefix)
851 		return err;
852 
853 	if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
854 		return 0;
855 
856 	err = ib_query_gid(device, port_num, 0, &gid, NULL);
857 	if (err)
858 		return err;
859 
860 	port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
861 	return 0;
862 }
863 EXPORT_SYMBOL(ib_query_port);
864 
865 /**
866  * ib_query_gid - Get GID table entry
867  * @device:Device to query
868  * @port_num:Port number to query
869  * @index:GID table index to query
870  * @gid:Returned GID
871  * @attr: Returned GID attributes related to this GID index (only in RoCE).
872  *   NULL means ignore.
873  *
874  * ib_query_gid() fetches the specified GID table entry.
875  */
876 int ib_query_gid(struct ib_device *device,
877 		 u8 port_num, int index, union ib_gid *gid,
878 		 struct ib_gid_attr *attr)
879 {
880 	if (rdma_cap_roce_gid_table(device, port_num))
881 		return ib_get_cached_gid(device, port_num, index, gid, attr);
882 
883 	if (attr)
884 		return -EINVAL;
885 
886 	return device->query_gid(device, port_num, index, gid);
887 }
888 EXPORT_SYMBOL(ib_query_gid);
889 
890 /**
891  * ib_enum_roce_netdev - enumerate all RoCE ports
892  * @ib_dev : IB device we want to query
893  * @filter: Should we call the callback?
894  * @filter_cookie: Cookie passed to filter
895  * @cb: Callback to call for each found RoCE ports
896  * @cookie: Cookie passed back to the callback
897  *
898  * Enumerates all of the physical RoCE ports of ib_dev
899  * which are related to netdevice and calls callback() on each
900  * device for which filter() function returns non zero.
901  */
902 void ib_enum_roce_netdev(struct ib_device *ib_dev,
903 			 roce_netdev_filter filter,
904 			 void *filter_cookie,
905 			 roce_netdev_callback cb,
906 			 void *cookie)
907 {
908 	u8 port;
909 
910 	for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
911 	     port++)
912 		if (rdma_protocol_roce(ib_dev, port)) {
913 			struct net_device *idev = NULL;
914 
915 			if (ib_dev->get_netdev)
916 				idev = ib_dev->get_netdev(ib_dev, port);
917 
918 			if (idev &&
919 			    idev->reg_state >= NETREG_UNREGISTERED) {
920 				dev_put(idev);
921 				idev = NULL;
922 			}
923 
924 			if (filter(ib_dev, port, idev, filter_cookie))
925 				cb(ib_dev, port, idev, cookie);
926 
927 			if (idev)
928 				dev_put(idev);
929 		}
930 }
931 
932 /**
933  * ib_enum_all_roce_netdevs - enumerate all RoCE devices
934  * @filter: Should we call the callback?
935  * @filter_cookie: Cookie passed to filter
936  * @cb: Callback to call for each found RoCE ports
937  * @cookie: Cookie passed back to the callback
938  *
939  * Enumerates all RoCE devices' physical ports which are related
940  * to netdevices and calls callback() on each device for which
941  * filter() function returns non zero.
942  */
943 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
944 			      void *filter_cookie,
945 			      roce_netdev_callback cb,
946 			      void *cookie)
947 {
948 	struct ib_device *dev;
949 
950 	down_read(&lists_rwsem);
951 	list_for_each_entry(dev, &device_list, core_list)
952 		ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
953 	up_read(&lists_rwsem);
954 }
955 
956 /**
957  * ib_enum_all_devs - enumerate all ib_devices
958  * @cb: Callback to call for each found ib_device
959  *
960  * Enumerates all ib_devices and calls callback() on each device.
961  */
962 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
963 		     struct netlink_callback *cb)
964 {
965 	struct ib_device *dev;
966 	unsigned int idx = 0;
967 	int ret = 0;
968 
969 	down_read(&lists_rwsem);
970 	list_for_each_entry(dev, &device_list, core_list) {
971 		ret = nldev_cb(dev, skb, cb, idx);
972 		if (ret)
973 			break;
974 		idx++;
975 	}
976 
977 	up_read(&lists_rwsem);
978 	return ret;
979 }
980 
981 /**
982  * ib_query_pkey - Get P_Key table entry
983  * @device:Device to query
984  * @port_num:Port number to query
985  * @index:P_Key table index to query
986  * @pkey:Returned P_Key
987  *
988  * ib_query_pkey() fetches the specified P_Key table entry.
989  */
990 int ib_query_pkey(struct ib_device *device,
991 		  u8 port_num, u16 index, u16 *pkey)
992 {
993 	return device->query_pkey(device, port_num, index, pkey);
994 }
995 EXPORT_SYMBOL(ib_query_pkey);
996 
997 /**
998  * ib_modify_device - Change IB device attributes
999  * @device:Device to modify
1000  * @device_modify_mask:Mask of attributes to change
1001  * @device_modify:New attribute values
1002  *
1003  * ib_modify_device() changes a device's attributes as specified by
1004  * the @device_modify_mask and @device_modify structure.
1005  */
1006 int ib_modify_device(struct ib_device *device,
1007 		     int device_modify_mask,
1008 		     struct ib_device_modify *device_modify)
1009 {
1010 	if (!device->modify_device)
1011 		return -ENOSYS;
1012 
1013 	return device->modify_device(device, device_modify_mask,
1014 				     device_modify);
1015 }
1016 EXPORT_SYMBOL(ib_modify_device);
1017 
1018 /**
1019  * ib_modify_port - Modifies the attributes for the specified port.
1020  * @device: The device to modify.
1021  * @port_num: The number of the port to modify.
1022  * @port_modify_mask: Mask used to specify which attributes of the port
1023  *   to change.
1024  * @port_modify: New attribute values for the port.
1025  *
1026  * ib_modify_port() changes a port's attributes as specified by the
1027  * @port_modify_mask and @port_modify structure.
1028  */
1029 int ib_modify_port(struct ib_device *device,
1030 		   u8 port_num, int port_modify_mask,
1031 		   struct ib_port_modify *port_modify)
1032 {
1033 	int rc;
1034 
1035 	if (!rdma_is_port_valid(device, port_num))
1036 		return -EINVAL;
1037 
1038 	if (device->modify_port)
1039 		rc = device->modify_port(device, port_num, port_modify_mask,
1040 					   port_modify);
1041 	else
1042 		rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
1043 	return rc;
1044 }
1045 EXPORT_SYMBOL(ib_modify_port);
1046 
1047 /**
1048  * ib_find_gid - Returns the port number and GID table index where
1049  *   a specified GID value occurs. Its searches only for IB link layer.
1050  * @device: The device to query.
1051  * @gid: The GID value to search for.
1052  * @ndev: The ndev related to the GID to search for.
1053  * @port_num: The port number of the device where the GID value was found.
1054  * @index: The index into the GID table where the GID was found.  This
1055  *   parameter may be NULL.
1056  */
1057 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1058 		struct net_device *ndev, u8 *port_num, u16 *index)
1059 {
1060 	union ib_gid tmp_gid;
1061 	int ret, port, i;
1062 
1063 	for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
1064 		if (rdma_cap_roce_gid_table(device, port))
1065 			continue;
1066 
1067 		for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
1068 			ret = ib_query_gid(device, port, i, &tmp_gid, NULL);
1069 			if (ret)
1070 				return ret;
1071 			if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
1072 				*port_num = port;
1073 				if (index)
1074 					*index = i;
1075 				return 0;
1076 			}
1077 		}
1078 	}
1079 
1080 	return -ENOENT;
1081 }
1082 EXPORT_SYMBOL(ib_find_gid);
1083 
1084 /**
1085  * ib_find_pkey - Returns the PKey table index where a specified
1086  *   PKey value occurs.
1087  * @device: The device to query.
1088  * @port_num: The port number of the device to search for the PKey.
1089  * @pkey: The PKey value to search for.
1090  * @index: The index into the PKey table where the PKey was found.
1091  */
1092 int ib_find_pkey(struct ib_device *device,
1093 		 u8 port_num, u16 pkey, u16 *index)
1094 {
1095 	int ret, i;
1096 	u16 tmp_pkey;
1097 	int partial_ix = -1;
1098 
1099 	for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
1100 		ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
1101 		if (ret)
1102 			return ret;
1103 		if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
1104 			/* if there is full-member pkey take it.*/
1105 			if (tmp_pkey & 0x8000) {
1106 				*index = i;
1107 				return 0;
1108 			}
1109 			if (partial_ix < 0)
1110 				partial_ix = i;
1111 		}
1112 	}
1113 
1114 	/*no full-member, if exists take the limited*/
1115 	if (partial_ix >= 0) {
1116 		*index = partial_ix;
1117 		return 0;
1118 	}
1119 	return -ENOENT;
1120 }
1121 EXPORT_SYMBOL(ib_find_pkey);
1122 
1123 /**
1124  * ib_get_net_dev_by_params() - Return the appropriate net_dev
1125  * for a received CM request
1126  * @dev:	An RDMA device on which the request has been received.
1127  * @port:	Port number on the RDMA device.
1128  * @pkey:	The Pkey the request came on.
1129  * @gid:	A GID that the net_dev uses to communicate.
1130  * @addr:	Contains the IP address that the request specified as its
1131  *		destination.
1132  */
1133 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
1134 					    u8 port,
1135 					    u16 pkey,
1136 					    const union ib_gid *gid,
1137 					    const struct sockaddr *addr)
1138 {
1139 	struct net_device *net_dev = NULL;
1140 	struct ib_client_data *context;
1141 
1142 	if (!rdma_protocol_ib(dev, port))
1143 		return NULL;
1144 
1145 	down_read(&lists_rwsem);
1146 
1147 	list_for_each_entry(context, &dev->client_data_list, list) {
1148 		struct ib_client *client = context->client;
1149 
1150 		if (context->going_down)
1151 			continue;
1152 
1153 		if (client->get_net_dev_by_params) {
1154 			net_dev = client->get_net_dev_by_params(dev, port, pkey,
1155 								gid, addr,
1156 								context->data);
1157 			if (net_dev)
1158 				break;
1159 		}
1160 	}
1161 
1162 	up_read(&lists_rwsem);
1163 
1164 	return net_dev;
1165 }
1166 EXPORT_SYMBOL(ib_get_net_dev_by_params);
1167 
1168 static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
1169 	[RDMA_NL_LS_OP_RESOLVE] = {
1170 		.doit = ib_nl_handle_resolve_resp,
1171 		.flags = RDMA_NL_ADMIN_PERM,
1172 	},
1173 	[RDMA_NL_LS_OP_SET_TIMEOUT] = {
1174 		.doit = ib_nl_handle_set_timeout,
1175 		.flags = RDMA_NL_ADMIN_PERM,
1176 	},
1177 	[RDMA_NL_LS_OP_IP_RESOLVE] = {
1178 		.doit = ib_nl_handle_ip_res_resp,
1179 		.flags = RDMA_NL_ADMIN_PERM,
1180 	},
1181 };
1182 
1183 static int __init ib_core_init(void)
1184 {
1185 	int ret;
1186 
1187 	ib_wq = alloc_workqueue("infiniband", 0, 0);
1188 	if (!ib_wq)
1189 		return -ENOMEM;
1190 
1191 	ib_comp_wq = alloc_workqueue("ib-comp-wq",
1192 			WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1193 	if (!ib_comp_wq) {
1194 		ret = -ENOMEM;
1195 		goto err;
1196 	}
1197 
1198 	ret = class_register(&ib_class);
1199 	if (ret) {
1200 		pr_warn("Couldn't create InfiniBand device class\n");
1201 		goto err_comp;
1202 	}
1203 
1204 	ret = rdma_nl_init();
1205 	if (ret) {
1206 		pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
1207 		goto err_sysfs;
1208 	}
1209 
1210 	ret = addr_init();
1211 	if (ret) {
1212 		pr_warn("Could't init IB address resolution\n");
1213 		goto err_ibnl;
1214 	}
1215 
1216 	ret = ib_mad_init();
1217 	if (ret) {
1218 		pr_warn("Couldn't init IB MAD\n");
1219 		goto err_addr;
1220 	}
1221 
1222 	ret = ib_sa_init();
1223 	if (ret) {
1224 		pr_warn("Couldn't init SA\n");
1225 		goto err_mad;
1226 	}
1227 
1228 	ret = register_lsm_notifier(&ibdev_lsm_nb);
1229 	if (ret) {
1230 		pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
1231 		goto err_sa;
1232 	}
1233 
1234 	nldev_init();
1235 	rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
1236 	ib_cache_setup();
1237 
1238 	return 0;
1239 
1240 err_sa:
1241 	ib_sa_cleanup();
1242 err_mad:
1243 	ib_mad_cleanup();
1244 err_addr:
1245 	addr_cleanup();
1246 err_ibnl:
1247 	rdma_nl_exit();
1248 err_sysfs:
1249 	class_unregister(&ib_class);
1250 err_comp:
1251 	destroy_workqueue(ib_comp_wq);
1252 err:
1253 	destroy_workqueue(ib_wq);
1254 	return ret;
1255 }
1256 
1257 static void __exit ib_core_cleanup(void)
1258 {
1259 	ib_cache_cleanup();
1260 	nldev_exit();
1261 	rdma_nl_unregister(RDMA_NL_LS);
1262 	unregister_lsm_notifier(&ibdev_lsm_nb);
1263 	ib_sa_cleanup();
1264 	ib_mad_cleanup();
1265 	addr_cleanup();
1266 	rdma_nl_exit();
1267 	class_unregister(&ib_class);
1268 	destroy_workqueue(ib_comp_wq);
1269 	/* Make sure that any pending umem accounting work is done. */
1270 	destroy_workqueue(ib_wq);
1271 }
1272 
1273 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
1274 
1275 subsys_initcall(ib_core_init);
1276 module_exit(ib_core_cleanup);
1277