xref: /openbmc/linux/drivers/infiniband/core/device.c (revision e3d786a3)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <rdma/rdma_netlink.h>
45 #include <rdma/ib_addr.h>
46 #include <rdma/ib_cache.h>
47 
48 #include "core_priv.h"
49 
50 MODULE_AUTHOR("Roland Dreier");
51 MODULE_DESCRIPTION("core kernel InfiniBand API");
52 MODULE_LICENSE("Dual BSD/GPL");
53 
54 struct ib_client_data {
55 	struct list_head  list;
56 	struct ib_client *client;
57 	void *            data;
58 	/* The device or client is going down. Do not call client or device
59 	 * callbacks other than remove(). */
60 	bool		  going_down;
61 };
62 
63 struct workqueue_struct *ib_comp_wq;
64 struct workqueue_struct *ib_comp_unbound_wq;
65 struct workqueue_struct *ib_wq;
66 EXPORT_SYMBOL_GPL(ib_wq);
67 
68 /* The device_list and client_list contain devices and clients after their
69  * registration has completed, and the devices and clients are removed
70  * during unregistration. */
71 static LIST_HEAD(device_list);
72 static LIST_HEAD(client_list);
73 
74 /*
75  * device_mutex and lists_rwsem protect access to both device_list and
76  * client_list.  device_mutex protects writer access by device and client
77  * registration / de-registration.  lists_rwsem protects reader access to
78  * these lists.  Iterators of these lists must lock it for read, while updates
79  * to the lists must be done with a write lock. A special case is when the
80  * device_mutex is locked. In this case locking the lists for read access is
81  * not necessary as the device_mutex implies it.
82  *
83  * lists_rwsem also protects access to the client data list.
84  */
85 static DEFINE_MUTEX(device_mutex);
86 static DECLARE_RWSEM(lists_rwsem);
87 
88 static int ib_security_change(struct notifier_block *nb, unsigned long event,
89 			      void *lsm_data);
90 static void ib_policy_change_task(struct work_struct *work);
91 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
92 
93 static struct notifier_block ibdev_lsm_nb = {
94 	.notifier_call = ib_security_change,
95 };
96 
97 static int ib_device_check_mandatory(struct ib_device *device)
98 {
99 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
100 	static const struct {
101 		size_t offset;
102 		char  *name;
103 	} mandatory_table[] = {
104 		IB_MANDATORY_FUNC(query_device),
105 		IB_MANDATORY_FUNC(query_port),
106 		IB_MANDATORY_FUNC(query_pkey),
107 		IB_MANDATORY_FUNC(alloc_pd),
108 		IB_MANDATORY_FUNC(dealloc_pd),
109 		IB_MANDATORY_FUNC(create_qp),
110 		IB_MANDATORY_FUNC(modify_qp),
111 		IB_MANDATORY_FUNC(destroy_qp),
112 		IB_MANDATORY_FUNC(post_send),
113 		IB_MANDATORY_FUNC(post_recv),
114 		IB_MANDATORY_FUNC(create_cq),
115 		IB_MANDATORY_FUNC(destroy_cq),
116 		IB_MANDATORY_FUNC(poll_cq),
117 		IB_MANDATORY_FUNC(req_notify_cq),
118 		IB_MANDATORY_FUNC(get_dma_mr),
119 		IB_MANDATORY_FUNC(dereg_mr),
120 		IB_MANDATORY_FUNC(get_port_immutable)
121 	};
122 	int i;
123 
124 	for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
125 		if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
126 			dev_warn(&device->dev,
127 				 "Device is missing mandatory function %s\n",
128 				 mandatory_table[i].name);
129 			return -EINVAL;
130 		}
131 	}
132 
133 	return 0;
134 }
135 
136 static struct ib_device *__ib_device_get_by_index(u32 index)
137 {
138 	struct ib_device *device;
139 
140 	list_for_each_entry(device, &device_list, core_list)
141 		if (device->index == index)
142 			return device;
143 
144 	return NULL;
145 }
146 
147 /*
148  * Caller is responsible to return refrerence count by calling put_device()
149  */
150 struct ib_device *ib_device_get_by_index(u32 index)
151 {
152 	struct ib_device *device;
153 
154 	down_read(&lists_rwsem);
155 	device = __ib_device_get_by_index(index);
156 	if (device)
157 		get_device(&device->dev);
158 
159 	up_read(&lists_rwsem);
160 	return device;
161 }
162 
163 static struct ib_device *__ib_device_get_by_name(const char *name)
164 {
165 	struct ib_device *device;
166 
167 	list_for_each_entry(device, &device_list, core_list)
168 		if (!strcmp(name, dev_name(&device->dev)))
169 			return device;
170 
171 	return NULL;
172 }
173 
174 int ib_device_rename(struct ib_device *ibdev, const char *name)
175 {
176 	struct ib_device *device;
177 	int ret = 0;
178 
179 	if (!strcmp(name, dev_name(&ibdev->dev)))
180 		return ret;
181 
182 	mutex_lock(&device_mutex);
183 	list_for_each_entry(device, &device_list, core_list) {
184 		if (!strcmp(name, dev_name(&device->dev))) {
185 			ret = -EEXIST;
186 			goto out;
187 		}
188 	}
189 
190 	ret = device_rename(&ibdev->dev, name);
191 	if (ret)
192 		goto out;
193 	strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
194 out:
195 	mutex_unlock(&device_mutex);
196 	return ret;
197 }
198 
199 static int alloc_name(struct ib_device *ibdev, const char *name)
200 {
201 	unsigned long *inuse;
202 	struct ib_device *device;
203 	int i;
204 
205 	inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
206 	if (!inuse)
207 		return -ENOMEM;
208 
209 	list_for_each_entry(device, &device_list, core_list) {
210 		char buf[IB_DEVICE_NAME_MAX];
211 
212 		if (sscanf(dev_name(&device->dev), name, &i) != 1)
213 			continue;
214 		if (i < 0 || i >= PAGE_SIZE * 8)
215 			continue;
216 		snprintf(buf, sizeof buf, name, i);
217 		if (!strcmp(buf, dev_name(&device->dev)))
218 			set_bit(i, inuse);
219 	}
220 
221 	i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
222 	free_page((unsigned long) inuse);
223 
224 	return dev_set_name(&ibdev->dev, name, i);
225 }
226 
227 static void ib_device_release(struct device *device)
228 {
229 	struct ib_device *dev = container_of(device, struct ib_device, dev);
230 
231 	WARN_ON(dev->reg_state == IB_DEV_REGISTERED);
232 	if (dev->reg_state == IB_DEV_UNREGISTERED) {
233 		/*
234 		 * In IB_DEV_UNINITIALIZED state, cache or port table
235 		 * is not even created. Free cache and port table only when
236 		 * device reaches UNREGISTERED state.
237 		 */
238 		ib_cache_release_one(dev);
239 		kfree(dev->port_immutable);
240 	}
241 	kfree(dev);
242 }
243 
244 static int ib_device_uevent(struct device *device,
245 			    struct kobj_uevent_env *env)
246 {
247 	if (add_uevent_var(env, "NAME=%s", dev_name(device)))
248 		return -ENOMEM;
249 
250 	/*
251 	 * It would be nice to pass the node GUID with the event...
252 	 */
253 
254 	return 0;
255 }
256 
257 static struct class ib_class = {
258 	.name    = "infiniband",
259 	.dev_release = ib_device_release,
260 	.dev_uevent = ib_device_uevent,
261 };
262 
263 /**
264  * ib_alloc_device - allocate an IB device struct
265  * @size:size of structure to allocate
266  *
267  * Low-level drivers should use ib_alloc_device() to allocate &struct
268  * ib_device.  @size is the size of the structure to be allocated,
269  * including any private data used by the low-level driver.
270  * ib_dealloc_device() must be used to free structures allocated with
271  * ib_alloc_device().
272  */
273 struct ib_device *ib_alloc_device(size_t size)
274 {
275 	struct ib_device *device;
276 
277 	if (WARN_ON(size < sizeof(struct ib_device)))
278 		return NULL;
279 
280 	device = kzalloc(size, GFP_KERNEL);
281 	if (!device)
282 		return NULL;
283 
284 	rdma_restrack_init(&device->res);
285 
286 	device->dev.class = &ib_class;
287 	device_initialize(&device->dev);
288 
289 	dev_set_drvdata(&device->dev, device);
290 
291 	INIT_LIST_HEAD(&device->event_handler_list);
292 	spin_lock_init(&device->event_handler_lock);
293 	rwlock_init(&device->client_data_lock);
294 	INIT_LIST_HEAD(&device->client_data_list);
295 	INIT_LIST_HEAD(&device->port_list);
296 
297 	return device;
298 }
299 EXPORT_SYMBOL(ib_alloc_device);
300 
301 /**
302  * ib_dealloc_device - free an IB device struct
303  * @device:structure to free
304  *
305  * Free a structure allocated with ib_alloc_device().
306  */
307 void ib_dealloc_device(struct ib_device *device)
308 {
309 	WARN_ON(!list_empty(&device->client_data_list));
310 	WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
311 		device->reg_state != IB_DEV_UNINITIALIZED);
312 	rdma_restrack_clean(&device->res);
313 	put_device(&device->dev);
314 }
315 EXPORT_SYMBOL(ib_dealloc_device);
316 
317 static int add_client_context(struct ib_device *device, struct ib_client *client)
318 {
319 	struct ib_client_data *context;
320 
321 	context = kmalloc(sizeof(*context), GFP_KERNEL);
322 	if (!context)
323 		return -ENOMEM;
324 
325 	context->client = client;
326 	context->data   = NULL;
327 	context->going_down = false;
328 
329 	down_write(&lists_rwsem);
330 	write_lock_irq(&device->client_data_lock);
331 	list_add(&context->list, &device->client_data_list);
332 	write_unlock_irq(&device->client_data_lock);
333 	up_write(&lists_rwsem);
334 
335 	return 0;
336 }
337 
338 static int verify_immutable(const struct ib_device *dev, u8 port)
339 {
340 	return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
341 			    rdma_max_mad_size(dev, port) != 0);
342 }
343 
344 static int read_port_immutable(struct ib_device *device)
345 {
346 	int ret;
347 	u8 start_port = rdma_start_port(device);
348 	u8 end_port = rdma_end_port(device);
349 	u8 port;
350 
351 	/**
352 	 * device->port_immutable is indexed directly by the port number to make
353 	 * access to this data as efficient as possible.
354 	 *
355 	 * Therefore port_immutable is declared as a 1 based array with
356 	 * potential empty slots at the beginning.
357 	 */
358 	device->port_immutable = kcalloc(end_port + 1,
359 					 sizeof(*device->port_immutable),
360 					 GFP_KERNEL);
361 	if (!device->port_immutable)
362 		return -ENOMEM;
363 
364 	for (port = start_port; port <= end_port; ++port) {
365 		ret = device->get_port_immutable(device, port,
366 						 &device->port_immutable[port]);
367 		if (ret)
368 			return ret;
369 
370 		if (verify_immutable(device, port))
371 			return -EINVAL;
372 	}
373 	return 0;
374 }
375 
376 void ib_get_device_fw_str(struct ib_device *dev, char *str)
377 {
378 	if (dev->get_dev_fw_str)
379 		dev->get_dev_fw_str(dev, str);
380 	else
381 		str[0] = '\0';
382 }
383 EXPORT_SYMBOL(ib_get_device_fw_str);
384 
385 static int setup_port_pkey_list(struct ib_device *device)
386 {
387 	int i;
388 
389 	/**
390 	 * device->port_pkey_list is indexed directly by the port number,
391 	 * Therefore it is declared as a 1 based array with potential empty
392 	 * slots at the beginning.
393 	 */
394 	device->port_pkey_list = kcalloc(rdma_end_port(device) + 1,
395 					 sizeof(*device->port_pkey_list),
396 					 GFP_KERNEL);
397 
398 	if (!device->port_pkey_list)
399 		return -ENOMEM;
400 
401 	for (i = 0; i < (rdma_end_port(device) + 1); i++) {
402 		spin_lock_init(&device->port_pkey_list[i].list_lock);
403 		INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list);
404 	}
405 
406 	return 0;
407 }
408 
409 static void ib_policy_change_task(struct work_struct *work)
410 {
411 	struct ib_device *dev;
412 
413 	down_read(&lists_rwsem);
414 	list_for_each_entry(dev, &device_list, core_list) {
415 		int i;
416 
417 		for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) {
418 			u64 sp;
419 			int ret = ib_get_cached_subnet_prefix(dev,
420 							      i,
421 							      &sp);
422 
423 			WARN_ONCE(ret,
424 				  "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
425 				  ret);
426 			if (!ret)
427 				ib_security_cache_change(dev, i, sp);
428 		}
429 	}
430 	up_read(&lists_rwsem);
431 }
432 
433 static int ib_security_change(struct notifier_block *nb, unsigned long event,
434 			      void *lsm_data)
435 {
436 	if (event != LSM_POLICY_CHANGE)
437 		return NOTIFY_DONE;
438 
439 	schedule_work(&ib_policy_change_work);
440 
441 	return NOTIFY_OK;
442 }
443 
444 /**
445  *	__dev_new_index	-	allocate an device index
446  *
447  *	Returns a suitable unique value for a new device interface
448  *	number.  It assumes that there are less than 2^32-1 ib devices
449  *	will be present in the system.
450  */
451 static u32 __dev_new_index(void)
452 {
453 	/*
454 	 * The device index to allow stable naming.
455 	 * Similar to struct net -> ifindex.
456 	 */
457 	static u32 index;
458 
459 	for (;;) {
460 		if (!(++index))
461 			index = 1;
462 
463 		if (!__ib_device_get_by_index(index))
464 			return index;
465 	}
466 }
467 
468 static void setup_dma_device(struct ib_device *device)
469 {
470 	struct device *parent = device->dev.parent;
471 
472 	WARN_ON_ONCE(device->dma_device);
473 	if (device->dev.dma_ops) {
474 		/*
475 		 * The caller provided custom DMA operations. Copy the
476 		 * DMA-related fields that are used by e.g. dma_alloc_coherent()
477 		 * into device->dev.
478 		 */
479 		device->dma_device = &device->dev;
480 		if (!device->dev.dma_mask) {
481 			if (parent)
482 				device->dev.dma_mask = parent->dma_mask;
483 			else
484 				WARN_ON_ONCE(true);
485 		}
486 		if (!device->dev.coherent_dma_mask) {
487 			if (parent)
488 				device->dev.coherent_dma_mask =
489 					parent->coherent_dma_mask;
490 			else
491 				WARN_ON_ONCE(true);
492 		}
493 	} else {
494 		/*
495 		 * The caller did not provide custom DMA operations. Use the
496 		 * DMA mapping operations of the parent device.
497 		 */
498 		WARN_ON_ONCE(!parent);
499 		device->dma_device = parent;
500 	}
501 }
502 
503 static void cleanup_device(struct ib_device *device)
504 {
505 	ib_cache_cleanup_one(device);
506 	ib_cache_release_one(device);
507 	kfree(device->port_pkey_list);
508 	kfree(device->port_immutable);
509 }
510 
511 static int setup_device(struct ib_device *device)
512 {
513 	struct ib_udata uhw = {.outlen = 0, .inlen = 0};
514 	int ret;
515 
516 	ret = ib_device_check_mandatory(device);
517 	if (ret)
518 		return ret;
519 
520 	ret = read_port_immutable(device);
521 	if (ret) {
522 		dev_warn(&device->dev,
523 			 "Couldn't create per port immutable data\n");
524 		return ret;
525 	}
526 
527 	memset(&device->attrs, 0, sizeof(device->attrs));
528 	ret = device->query_device(device, &device->attrs, &uhw);
529 	if (ret) {
530 		dev_warn(&device->dev,
531 			 "Couldn't query the device attributes\n");
532 		goto port_cleanup;
533 	}
534 
535 	ret = setup_port_pkey_list(device);
536 	if (ret) {
537 		dev_warn(&device->dev, "Couldn't create per port_pkey_list\n");
538 		goto port_cleanup;
539 	}
540 
541 	ret = ib_cache_setup_one(device);
542 	if (ret) {
543 		dev_warn(&device->dev,
544 			 "Couldn't set up InfiniBand P_Key/GID cache\n");
545 		goto pkey_cleanup;
546 	}
547 	return 0;
548 
549 pkey_cleanup:
550 	kfree(device->port_pkey_list);
551 port_cleanup:
552 	kfree(device->port_immutable);
553 	return ret;
554 }
555 
556 /**
557  * ib_register_device - Register an IB device with IB core
558  * @device:Device to register
559  *
560  * Low-level drivers use ib_register_device() to register their
561  * devices with the IB core.  All registered clients will receive a
562  * callback for each device that is added. @device must be allocated
563  * with ib_alloc_device().
564  */
565 int ib_register_device(struct ib_device *device, const char *name,
566 		       int (*port_callback)(struct ib_device *, u8,
567 					    struct kobject *))
568 {
569 	int ret;
570 	struct ib_client *client;
571 
572 	setup_dma_device(device);
573 
574 	mutex_lock(&device_mutex);
575 
576 	if (strchr(name, '%')) {
577 		ret = alloc_name(device, name);
578 		if (ret)
579 			goto out;
580 	} else {
581 		ret = dev_set_name(&device->dev, name);
582 		if (ret)
583 			goto out;
584 	}
585 	if (__ib_device_get_by_name(dev_name(&device->dev))) {
586 		ret = -ENFILE;
587 		goto out;
588 	}
589 	strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
590 
591 	ret = setup_device(device);
592 	if (ret)
593 		goto out;
594 
595 	device->index = __dev_new_index();
596 
597 	ret = ib_device_register_rdmacg(device);
598 	if (ret) {
599 		dev_warn(&device->dev,
600 			 "Couldn't register device with rdma cgroup\n");
601 		goto dev_cleanup;
602 	}
603 
604 	ret = ib_device_register_sysfs(device, port_callback);
605 	if (ret) {
606 		dev_warn(&device->dev,
607 			 "Couldn't register device with driver model\n");
608 		goto cg_cleanup;
609 	}
610 
611 	device->reg_state = IB_DEV_REGISTERED;
612 
613 	list_for_each_entry(client, &client_list, list)
614 		if (!add_client_context(device, client) && client->add)
615 			client->add(device);
616 
617 	down_write(&lists_rwsem);
618 	list_add_tail(&device->core_list, &device_list);
619 	up_write(&lists_rwsem);
620 	mutex_unlock(&device_mutex);
621 	return 0;
622 
623 cg_cleanup:
624 	ib_device_unregister_rdmacg(device);
625 dev_cleanup:
626 	cleanup_device(device);
627 out:
628 	mutex_unlock(&device_mutex);
629 	return ret;
630 }
631 EXPORT_SYMBOL(ib_register_device);
632 
633 /**
634  * ib_unregister_device - Unregister an IB device
635  * @device:Device to unregister
636  *
637  * Unregister an IB device.  All clients will receive a remove callback.
638  */
639 void ib_unregister_device(struct ib_device *device)
640 {
641 	struct ib_client_data *context, *tmp;
642 	unsigned long flags;
643 
644 	mutex_lock(&device_mutex);
645 
646 	down_write(&lists_rwsem);
647 	list_del(&device->core_list);
648 	write_lock_irq(&device->client_data_lock);
649 	list_for_each_entry(context, &device->client_data_list, list)
650 		context->going_down = true;
651 	write_unlock_irq(&device->client_data_lock);
652 	downgrade_write(&lists_rwsem);
653 
654 	list_for_each_entry(context, &device->client_data_list, list) {
655 		if (context->client->remove)
656 			context->client->remove(device, context->data);
657 	}
658 	up_read(&lists_rwsem);
659 
660 	ib_device_unregister_sysfs(device);
661 	ib_device_unregister_rdmacg(device);
662 
663 	mutex_unlock(&device_mutex);
664 
665 	ib_cache_cleanup_one(device);
666 
667 	ib_security_destroy_port_pkey_list(device);
668 	kfree(device->port_pkey_list);
669 
670 	down_write(&lists_rwsem);
671 	write_lock_irqsave(&device->client_data_lock, flags);
672 	list_for_each_entry_safe(context, tmp, &device->client_data_list,
673 				 list) {
674 		list_del(&context->list);
675 		kfree(context);
676 	}
677 	write_unlock_irqrestore(&device->client_data_lock, flags);
678 	up_write(&lists_rwsem);
679 
680 	device->reg_state = IB_DEV_UNREGISTERED;
681 }
682 EXPORT_SYMBOL(ib_unregister_device);
683 
684 /**
685  * ib_register_client - Register an IB client
686  * @client:Client to register
687  *
688  * Upper level users of the IB drivers can use ib_register_client() to
689  * register callbacks for IB device addition and removal.  When an IB
690  * device is added, each registered client's add method will be called
691  * (in the order the clients were registered), and when a device is
692  * removed, each client's remove method will be called (in the reverse
693  * order that clients were registered).  In addition, when
694  * ib_register_client() is called, the client will receive an add
695  * callback for all devices already registered.
696  */
697 int ib_register_client(struct ib_client *client)
698 {
699 	struct ib_device *device;
700 
701 	mutex_lock(&device_mutex);
702 
703 	list_for_each_entry(device, &device_list, core_list)
704 		if (!add_client_context(device, client) && client->add)
705 			client->add(device);
706 
707 	down_write(&lists_rwsem);
708 	list_add_tail(&client->list, &client_list);
709 	up_write(&lists_rwsem);
710 
711 	mutex_unlock(&device_mutex);
712 
713 	return 0;
714 }
715 EXPORT_SYMBOL(ib_register_client);
716 
717 /**
718  * ib_unregister_client - Unregister an IB client
719  * @client:Client to unregister
720  *
721  * Upper level users use ib_unregister_client() to remove their client
722  * registration.  When ib_unregister_client() is called, the client
723  * will receive a remove callback for each IB device still registered.
724  */
725 void ib_unregister_client(struct ib_client *client)
726 {
727 	struct ib_client_data *context;
728 	struct ib_device *device;
729 
730 	mutex_lock(&device_mutex);
731 
732 	down_write(&lists_rwsem);
733 	list_del(&client->list);
734 	up_write(&lists_rwsem);
735 
736 	list_for_each_entry(device, &device_list, core_list) {
737 		struct ib_client_data *found_context = NULL;
738 
739 		down_write(&lists_rwsem);
740 		write_lock_irq(&device->client_data_lock);
741 		list_for_each_entry(context, &device->client_data_list, list)
742 			if (context->client == client) {
743 				context->going_down = true;
744 				found_context = context;
745 				break;
746 			}
747 		write_unlock_irq(&device->client_data_lock);
748 		up_write(&lists_rwsem);
749 
750 		if (client->remove)
751 			client->remove(device, found_context ?
752 					       found_context->data : NULL);
753 
754 		if (!found_context) {
755 			dev_warn(&device->dev,
756 				 "No client context found for %s\n",
757 				 client->name);
758 			continue;
759 		}
760 
761 		down_write(&lists_rwsem);
762 		write_lock_irq(&device->client_data_lock);
763 		list_del(&found_context->list);
764 		write_unlock_irq(&device->client_data_lock);
765 		up_write(&lists_rwsem);
766 		kfree(found_context);
767 	}
768 
769 	mutex_unlock(&device_mutex);
770 }
771 EXPORT_SYMBOL(ib_unregister_client);
772 
773 /**
774  * ib_get_client_data - Get IB client context
775  * @device:Device to get context for
776  * @client:Client to get context for
777  *
778  * ib_get_client_data() returns client context set with
779  * ib_set_client_data().
780  */
781 void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
782 {
783 	struct ib_client_data *context;
784 	void *ret = NULL;
785 	unsigned long flags;
786 
787 	read_lock_irqsave(&device->client_data_lock, flags);
788 	list_for_each_entry(context, &device->client_data_list, list)
789 		if (context->client == client) {
790 			ret = context->data;
791 			break;
792 		}
793 	read_unlock_irqrestore(&device->client_data_lock, flags);
794 
795 	return ret;
796 }
797 EXPORT_SYMBOL(ib_get_client_data);
798 
799 /**
800  * ib_set_client_data - Set IB client context
801  * @device:Device to set context for
802  * @client:Client to set context for
803  * @data:Context to set
804  *
805  * ib_set_client_data() sets client context that can be retrieved with
806  * ib_get_client_data().
807  */
808 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
809 			void *data)
810 {
811 	struct ib_client_data *context;
812 	unsigned long flags;
813 
814 	write_lock_irqsave(&device->client_data_lock, flags);
815 	list_for_each_entry(context, &device->client_data_list, list)
816 		if (context->client == client) {
817 			context->data = data;
818 			goto out;
819 		}
820 
821 	dev_warn(&device->dev, "No client context found for %s\n",
822 		 client->name);
823 
824 out:
825 	write_unlock_irqrestore(&device->client_data_lock, flags);
826 }
827 EXPORT_SYMBOL(ib_set_client_data);
828 
829 /**
830  * ib_register_event_handler - Register an IB event handler
831  * @event_handler:Handler to register
832  *
833  * ib_register_event_handler() registers an event handler that will be
834  * called back when asynchronous IB events occur (as defined in
835  * chapter 11 of the InfiniBand Architecture Specification).  This
836  * callback may occur in interrupt context.
837  */
838 void ib_register_event_handler(struct ib_event_handler *event_handler)
839 {
840 	unsigned long flags;
841 
842 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
843 	list_add_tail(&event_handler->list,
844 		      &event_handler->device->event_handler_list);
845 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
846 }
847 EXPORT_SYMBOL(ib_register_event_handler);
848 
849 /**
850  * ib_unregister_event_handler - Unregister an event handler
851  * @event_handler:Handler to unregister
852  *
853  * Unregister an event handler registered with
854  * ib_register_event_handler().
855  */
856 void ib_unregister_event_handler(struct ib_event_handler *event_handler)
857 {
858 	unsigned long flags;
859 
860 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
861 	list_del(&event_handler->list);
862 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
863 }
864 EXPORT_SYMBOL(ib_unregister_event_handler);
865 
866 /**
867  * ib_dispatch_event - Dispatch an asynchronous event
868  * @event:Event to dispatch
869  *
870  * Low-level drivers must call ib_dispatch_event() to dispatch the
871  * event to all registered event handlers when an asynchronous event
872  * occurs.
873  */
874 void ib_dispatch_event(struct ib_event *event)
875 {
876 	unsigned long flags;
877 	struct ib_event_handler *handler;
878 
879 	spin_lock_irqsave(&event->device->event_handler_lock, flags);
880 
881 	list_for_each_entry(handler, &event->device->event_handler_list, list)
882 		handler->handler(handler, event);
883 
884 	spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
885 }
886 EXPORT_SYMBOL(ib_dispatch_event);
887 
888 /**
889  * ib_query_port - Query IB port attributes
890  * @device:Device to query
891  * @port_num:Port number to query
892  * @port_attr:Port attributes
893  *
894  * ib_query_port() returns the attributes of a port through the
895  * @port_attr pointer.
896  */
897 int ib_query_port(struct ib_device *device,
898 		  u8 port_num,
899 		  struct ib_port_attr *port_attr)
900 {
901 	union ib_gid gid;
902 	int err;
903 
904 	if (!rdma_is_port_valid(device, port_num))
905 		return -EINVAL;
906 
907 	memset(port_attr, 0, sizeof(*port_attr));
908 	err = device->query_port(device, port_num, port_attr);
909 	if (err || port_attr->subnet_prefix)
910 		return err;
911 
912 	if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
913 		return 0;
914 
915 	err = device->query_gid(device, port_num, 0, &gid);
916 	if (err)
917 		return err;
918 
919 	port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
920 	return 0;
921 }
922 EXPORT_SYMBOL(ib_query_port);
923 
924 /**
925  * ib_enum_roce_netdev - enumerate all RoCE ports
926  * @ib_dev : IB device we want to query
927  * @filter: Should we call the callback?
928  * @filter_cookie: Cookie passed to filter
929  * @cb: Callback to call for each found RoCE ports
930  * @cookie: Cookie passed back to the callback
931  *
932  * Enumerates all of the physical RoCE ports of ib_dev
933  * which are related to netdevice and calls callback() on each
934  * device for which filter() function returns non zero.
935  */
936 void ib_enum_roce_netdev(struct ib_device *ib_dev,
937 			 roce_netdev_filter filter,
938 			 void *filter_cookie,
939 			 roce_netdev_callback cb,
940 			 void *cookie)
941 {
942 	u8 port;
943 
944 	for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
945 	     port++)
946 		if (rdma_protocol_roce(ib_dev, port)) {
947 			struct net_device *idev = NULL;
948 
949 			if (ib_dev->get_netdev)
950 				idev = ib_dev->get_netdev(ib_dev, port);
951 
952 			if (idev &&
953 			    idev->reg_state >= NETREG_UNREGISTERED) {
954 				dev_put(idev);
955 				idev = NULL;
956 			}
957 
958 			if (filter(ib_dev, port, idev, filter_cookie))
959 				cb(ib_dev, port, idev, cookie);
960 
961 			if (idev)
962 				dev_put(idev);
963 		}
964 }
965 
966 /**
967  * ib_enum_all_roce_netdevs - enumerate all RoCE devices
968  * @filter: Should we call the callback?
969  * @filter_cookie: Cookie passed to filter
970  * @cb: Callback to call for each found RoCE ports
971  * @cookie: Cookie passed back to the callback
972  *
973  * Enumerates all RoCE devices' physical ports which are related
974  * to netdevices and calls callback() on each device for which
975  * filter() function returns non zero.
976  */
977 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
978 			      void *filter_cookie,
979 			      roce_netdev_callback cb,
980 			      void *cookie)
981 {
982 	struct ib_device *dev;
983 
984 	down_read(&lists_rwsem);
985 	list_for_each_entry(dev, &device_list, core_list)
986 		ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
987 	up_read(&lists_rwsem);
988 }
989 
990 /**
991  * ib_enum_all_devs - enumerate all ib_devices
992  * @cb: Callback to call for each found ib_device
993  *
994  * Enumerates all ib_devices and calls callback() on each device.
995  */
996 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
997 		     struct netlink_callback *cb)
998 {
999 	struct ib_device *dev;
1000 	unsigned int idx = 0;
1001 	int ret = 0;
1002 
1003 	down_read(&lists_rwsem);
1004 	list_for_each_entry(dev, &device_list, core_list) {
1005 		ret = nldev_cb(dev, skb, cb, idx);
1006 		if (ret)
1007 			break;
1008 		idx++;
1009 	}
1010 
1011 	up_read(&lists_rwsem);
1012 	return ret;
1013 }
1014 
1015 /**
1016  * ib_query_pkey - Get P_Key table entry
1017  * @device:Device to query
1018  * @port_num:Port number to query
1019  * @index:P_Key table index to query
1020  * @pkey:Returned P_Key
1021  *
1022  * ib_query_pkey() fetches the specified P_Key table entry.
1023  */
1024 int ib_query_pkey(struct ib_device *device,
1025 		  u8 port_num, u16 index, u16 *pkey)
1026 {
1027 	return device->query_pkey(device, port_num, index, pkey);
1028 }
1029 EXPORT_SYMBOL(ib_query_pkey);
1030 
1031 /**
1032  * ib_modify_device - Change IB device attributes
1033  * @device:Device to modify
1034  * @device_modify_mask:Mask of attributes to change
1035  * @device_modify:New attribute values
1036  *
1037  * ib_modify_device() changes a device's attributes as specified by
1038  * the @device_modify_mask and @device_modify structure.
1039  */
1040 int ib_modify_device(struct ib_device *device,
1041 		     int device_modify_mask,
1042 		     struct ib_device_modify *device_modify)
1043 {
1044 	if (!device->modify_device)
1045 		return -ENOSYS;
1046 
1047 	return device->modify_device(device, device_modify_mask,
1048 				     device_modify);
1049 }
1050 EXPORT_SYMBOL(ib_modify_device);
1051 
1052 /**
1053  * ib_modify_port - Modifies the attributes for the specified port.
1054  * @device: The device to modify.
1055  * @port_num: The number of the port to modify.
1056  * @port_modify_mask: Mask used to specify which attributes of the port
1057  *   to change.
1058  * @port_modify: New attribute values for the port.
1059  *
1060  * ib_modify_port() changes a port's attributes as specified by the
1061  * @port_modify_mask and @port_modify structure.
1062  */
1063 int ib_modify_port(struct ib_device *device,
1064 		   u8 port_num, int port_modify_mask,
1065 		   struct ib_port_modify *port_modify)
1066 {
1067 	int rc;
1068 
1069 	if (!rdma_is_port_valid(device, port_num))
1070 		return -EINVAL;
1071 
1072 	if (device->modify_port)
1073 		rc = device->modify_port(device, port_num, port_modify_mask,
1074 					   port_modify);
1075 	else
1076 		rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
1077 	return rc;
1078 }
1079 EXPORT_SYMBOL(ib_modify_port);
1080 
1081 /**
1082  * ib_find_gid - Returns the port number and GID table index where
1083  *   a specified GID value occurs. Its searches only for IB link layer.
1084  * @device: The device to query.
1085  * @gid: The GID value to search for.
1086  * @port_num: The port number of the device where the GID value was found.
1087  * @index: The index into the GID table where the GID was found.  This
1088  *   parameter may be NULL.
1089  */
1090 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1091 		u8 *port_num, u16 *index)
1092 {
1093 	union ib_gid tmp_gid;
1094 	int ret, port, i;
1095 
1096 	for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
1097 		if (!rdma_protocol_ib(device, port))
1098 			continue;
1099 
1100 		for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
1101 			ret = rdma_query_gid(device, port, i, &tmp_gid);
1102 			if (ret)
1103 				return ret;
1104 			if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
1105 				*port_num = port;
1106 				if (index)
1107 					*index = i;
1108 				return 0;
1109 			}
1110 		}
1111 	}
1112 
1113 	return -ENOENT;
1114 }
1115 EXPORT_SYMBOL(ib_find_gid);
1116 
1117 /**
1118  * ib_find_pkey - Returns the PKey table index where a specified
1119  *   PKey value occurs.
1120  * @device: The device to query.
1121  * @port_num: The port number of the device to search for the PKey.
1122  * @pkey: The PKey value to search for.
1123  * @index: The index into the PKey table where the PKey was found.
1124  */
1125 int ib_find_pkey(struct ib_device *device,
1126 		 u8 port_num, u16 pkey, u16 *index)
1127 {
1128 	int ret, i;
1129 	u16 tmp_pkey;
1130 	int partial_ix = -1;
1131 
1132 	for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
1133 		ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
1134 		if (ret)
1135 			return ret;
1136 		if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
1137 			/* if there is full-member pkey take it.*/
1138 			if (tmp_pkey & 0x8000) {
1139 				*index = i;
1140 				return 0;
1141 			}
1142 			if (partial_ix < 0)
1143 				partial_ix = i;
1144 		}
1145 	}
1146 
1147 	/*no full-member, if exists take the limited*/
1148 	if (partial_ix >= 0) {
1149 		*index = partial_ix;
1150 		return 0;
1151 	}
1152 	return -ENOENT;
1153 }
1154 EXPORT_SYMBOL(ib_find_pkey);
1155 
1156 /**
1157  * ib_get_net_dev_by_params() - Return the appropriate net_dev
1158  * for a received CM request
1159  * @dev:	An RDMA device on which the request has been received.
1160  * @port:	Port number on the RDMA device.
1161  * @pkey:	The Pkey the request came on.
1162  * @gid:	A GID that the net_dev uses to communicate.
1163  * @addr:	Contains the IP address that the request specified as its
1164  *		destination.
1165  */
1166 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
1167 					    u8 port,
1168 					    u16 pkey,
1169 					    const union ib_gid *gid,
1170 					    const struct sockaddr *addr)
1171 {
1172 	struct net_device *net_dev = NULL;
1173 	struct ib_client_data *context;
1174 
1175 	if (!rdma_protocol_ib(dev, port))
1176 		return NULL;
1177 
1178 	down_read(&lists_rwsem);
1179 
1180 	list_for_each_entry(context, &dev->client_data_list, list) {
1181 		struct ib_client *client = context->client;
1182 
1183 		if (context->going_down)
1184 			continue;
1185 
1186 		if (client->get_net_dev_by_params) {
1187 			net_dev = client->get_net_dev_by_params(dev, port, pkey,
1188 								gid, addr,
1189 								context->data);
1190 			if (net_dev)
1191 				break;
1192 		}
1193 	}
1194 
1195 	up_read(&lists_rwsem);
1196 
1197 	return net_dev;
1198 }
1199 EXPORT_SYMBOL(ib_get_net_dev_by_params);
1200 
1201 static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
1202 	[RDMA_NL_LS_OP_RESOLVE] = {
1203 		.doit = ib_nl_handle_resolve_resp,
1204 		.flags = RDMA_NL_ADMIN_PERM,
1205 	},
1206 	[RDMA_NL_LS_OP_SET_TIMEOUT] = {
1207 		.doit = ib_nl_handle_set_timeout,
1208 		.flags = RDMA_NL_ADMIN_PERM,
1209 	},
1210 	[RDMA_NL_LS_OP_IP_RESOLVE] = {
1211 		.doit = ib_nl_handle_ip_res_resp,
1212 		.flags = RDMA_NL_ADMIN_PERM,
1213 	},
1214 };
1215 
1216 static int __init ib_core_init(void)
1217 {
1218 	int ret;
1219 
1220 	ib_wq = alloc_workqueue("infiniband", 0, 0);
1221 	if (!ib_wq)
1222 		return -ENOMEM;
1223 
1224 	ib_comp_wq = alloc_workqueue("ib-comp-wq",
1225 			WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1226 	if (!ib_comp_wq) {
1227 		ret = -ENOMEM;
1228 		goto err;
1229 	}
1230 
1231 	ib_comp_unbound_wq =
1232 		alloc_workqueue("ib-comp-unb-wq",
1233 				WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
1234 				WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
1235 	if (!ib_comp_unbound_wq) {
1236 		ret = -ENOMEM;
1237 		goto err_comp;
1238 	}
1239 
1240 	ret = class_register(&ib_class);
1241 	if (ret) {
1242 		pr_warn("Couldn't create InfiniBand device class\n");
1243 		goto err_comp_unbound;
1244 	}
1245 
1246 	ret = rdma_nl_init();
1247 	if (ret) {
1248 		pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
1249 		goto err_sysfs;
1250 	}
1251 
1252 	ret = addr_init();
1253 	if (ret) {
1254 		pr_warn("Could't init IB address resolution\n");
1255 		goto err_ibnl;
1256 	}
1257 
1258 	ret = ib_mad_init();
1259 	if (ret) {
1260 		pr_warn("Couldn't init IB MAD\n");
1261 		goto err_addr;
1262 	}
1263 
1264 	ret = ib_sa_init();
1265 	if (ret) {
1266 		pr_warn("Couldn't init SA\n");
1267 		goto err_mad;
1268 	}
1269 
1270 	ret = register_lsm_notifier(&ibdev_lsm_nb);
1271 	if (ret) {
1272 		pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
1273 		goto err_sa;
1274 	}
1275 
1276 	nldev_init();
1277 	rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
1278 	roce_gid_mgmt_init();
1279 
1280 	return 0;
1281 
1282 err_sa:
1283 	ib_sa_cleanup();
1284 err_mad:
1285 	ib_mad_cleanup();
1286 err_addr:
1287 	addr_cleanup();
1288 err_ibnl:
1289 	rdma_nl_exit();
1290 err_sysfs:
1291 	class_unregister(&ib_class);
1292 err_comp_unbound:
1293 	destroy_workqueue(ib_comp_unbound_wq);
1294 err_comp:
1295 	destroy_workqueue(ib_comp_wq);
1296 err:
1297 	destroy_workqueue(ib_wq);
1298 	return ret;
1299 }
1300 
1301 static void __exit ib_core_cleanup(void)
1302 {
1303 	roce_gid_mgmt_cleanup();
1304 	nldev_exit();
1305 	rdma_nl_unregister(RDMA_NL_LS);
1306 	unregister_lsm_notifier(&ibdev_lsm_nb);
1307 	ib_sa_cleanup();
1308 	ib_mad_cleanup();
1309 	addr_cleanup();
1310 	rdma_nl_exit();
1311 	class_unregister(&ib_class);
1312 	destroy_workqueue(ib_comp_unbound_wq);
1313 	destroy_workqueue(ib_comp_wq);
1314 	/* Make sure that any pending umem accounting work is done. */
1315 	destroy_workqueue(ib_wq);
1316 }
1317 
1318 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
1319 
1320 subsys_initcall(ib_core_init);
1321 module_exit(ib_core_cleanup);
1322