xref: /openbmc/linux/drivers/infiniband/core/cache.c (revision 6d99a79c)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Intel Corporation. All rights reserved.
4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42 
43 #include <rdma/ib_cache.h>
44 
45 #include "core_priv.h"
46 
47 struct ib_pkey_cache {
48 	int             table_len;
49 	u16             table[0];
50 };
51 
52 struct ib_update_work {
53 	struct work_struct work;
54 	struct ib_device  *device;
55 	u8                 port_num;
56 	bool		   enforce_security;
57 };
58 
59 union ib_gid zgid;
60 EXPORT_SYMBOL(zgid);
61 
62 enum gid_attr_find_mask {
63 	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
64 	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
65 	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
66 	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
67 };
68 
69 enum gid_table_entry_state {
70 	GID_TABLE_ENTRY_INVALID		= 1,
71 	GID_TABLE_ENTRY_VALID		= 2,
72 	/*
73 	 * Indicates that entry is pending to be removed, there may
74 	 * be active users of this GID entry.
75 	 * When last user of the GID entry releases reference to it,
76 	 * GID entry is detached from the table.
77 	 */
78 	GID_TABLE_ENTRY_PENDING_DEL	= 3,
79 };
80 
81 struct ib_gid_table_entry {
82 	struct kref			kref;
83 	struct work_struct		del_work;
84 	struct ib_gid_attr		attr;
85 	void				*context;
86 	enum gid_table_entry_state	state;
87 };
88 
89 struct ib_gid_table {
90 	int				sz;
91 	/* In RoCE, adding a GID to the table requires:
92 	 * (a) Find if this GID is already exists.
93 	 * (b) Find a free space.
94 	 * (c) Write the new GID
95 	 *
96 	 * Delete requires different set of operations:
97 	 * (a) Find the GID
98 	 * (b) Delete it.
99 	 *
100 	 **/
101 	/* Any writer to data_vec must hold this lock and the write side of
102 	 * rwlock. Readers must hold only rwlock. All writers must be in a
103 	 * sleepable context.
104 	 */
105 	struct mutex			lock;
106 	/* rwlock protects data_vec[ix]->state and entry pointer.
107 	 */
108 	rwlock_t			rwlock;
109 	struct ib_gid_table_entry	**data_vec;
110 	/* bit field, each bit indicates the index of default GID */
111 	u32				default_gid_indices;
112 };
113 
114 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
115 {
116 	struct ib_event event;
117 
118 	event.device		= ib_dev;
119 	event.element.port_num	= port;
120 	event.event		= IB_EVENT_GID_CHANGE;
121 
122 	ib_dispatch_event(&event);
123 }
124 
125 static const char * const gid_type_str[] = {
126 	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
127 	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
128 };
129 
130 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
131 {
132 	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
133 		return gid_type_str[gid_type];
134 
135 	return "Invalid GID type";
136 }
137 EXPORT_SYMBOL(ib_cache_gid_type_str);
138 
139 /** rdma_is_zero_gid - Check if given GID is zero or not.
140  * @gid:	GID to check
141  * Returns true if given GID is zero, returns false otherwise.
142  */
143 bool rdma_is_zero_gid(const union ib_gid *gid)
144 {
145 	return !memcmp(gid, &zgid, sizeof(*gid));
146 }
147 EXPORT_SYMBOL(rdma_is_zero_gid);
148 
149 /** is_gid_index_default - Check if a given index belongs to
150  * reserved default GIDs or not.
151  * @table:	GID table pointer
152  * @index:	Index to check in GID table
153  * Returns true if index is one of the reserved default GID index otherwise
154  * returns false.
155  */
156 static bool is_gid_index_default(const struct ib_gid_table *table,
157 				 unsigned int index)
158 {
159 	return index < 32 && (BIT(index) & table->default_gid_indices);
160 }
161 
162 int ib_cache_gid_parse_type_str(const char *buf)
163 {
164 	unsigned int i;
165 	size_t len;
166 	int err = -EINVAL;
167 
168 	len = strlen(buf);
169 	if (len == 0)
170 		return -EINVAL;
171 
172 	if (buf[len - 1] == '\n')
173 		len--;
174 
175 	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
176 		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
177 		    len == strlen(gid_type_str[i])) {
178 			err = i;
179 			break;
180 		}
181 
182 	return err;
183 }
184 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
185 
186 static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
187 {
188 	return device->cache.ports[port - rdma_start_port(device)].gid;
189 }
190 
191 static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
192 {
193 	return !entry;
194 }
195 
196 static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
197 {
198 	return entry && entry->state == GID_TABLE_ENTRY_VALID;
199 }
200 
201 static void schedule_free_gid(struct kref *kref)
202 {
203 	struct ib_gid_table_entry *entry =
204 			container_of(kref, struct ib_gid_table_entry, kref);
205 
206 	queue_work(ib_wq, &entry->del_work);
207 }
208 
209 static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
210 {
211 	struct ib_device *device = entry->attr.device;
212 	u8 port_num = entry->attr.port_num;
213 	struct ib_gid_table *table = rdma_gid_table(device, port_num);
214 
215 	dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__,
216 		port_num, entry->attr.index, entry->attr.gid.raw);
217 
218 	if (rdma_cap_roce_gid_table(device, port_num) &&
219 	    entry->state != GID_TABLE_ENTRY_INVALID)
220 		device->del_gid(&entry->attr, &entry->context);
221 
222 	write_lock_irq(&table->rwlock);
223 
224 	/*
225 	 * The only way to avoid overwriting NULL in table is
226 	 * by comparing if it is same entry in table or not!
227 	 * If new entry in table is added by the time we free here,
228 	 * don't overwrite the table entry.
229 	 */
230 	if (entry == table->data_vec[entry->attr.index])
231 		table->data_vec[entry->attr.index] = NULL;
232 	/* Now this index is ready to be allocated */
233 	write_unlock_irq(&table->rwlock);
234 
235 	if (entry->attr.ndev)
236 		dev_put(entry->attr.ndev);
237 	kfree(entry);
238 }
239 
240 static void free_gid_entry(struct kref *kref)
241 {
242 	struct ib_gid_table_entry *entry =
243 			container_of(kref, struct ib_gid_table_entry, kref);
244 
245 	free_gid_entry_locked(entry);
246 }
247 
248 /**
249  * free_gid_work - Release reference to the GID entry
250  * @work: Work structure to refer to GID entry which needs to be
251  * deleted.
252  *
253  * free_gid_work() frees the entry from the HCA's hardware table
254  * if provider supports it. It releases reference to netdevice.
255  */
256 static void free_gid_work(struct work_struct *work)
257 {
258 	struct ib_gid_table_entry *entry =
259 		container_of(work, struct ib_gid_table_entry, del_work);
260 	struct ib_device *device = entry->attr.device;
261 	u8 port_num = entry->attr.port_num;
262 	struct ib_gid_table *table = rdma_gid_table(device, port_num);
263 
264 	mutex_lock(&table->lock);
265 	free_gid_entry_locked(entry);
266 	mutex_unlock(&table->lock);
267 }
268 
269 static struct ib_gid_table_entry *
270 alloc_gid_entry(const struct ib_gid_attr *attr)
271 {
272 	struct ib_gid_table_entry *entry;
273 
274 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
275 	if (!entry)
276 		return NULL;
277 	kref_init(&entry->kref);
278 	memcpy(&entry->attr, attr, sizeof(*attr));
279 	if (entry->attr.ndev)
280 		dev_hold(entry->attr.ndev);
281 	INIT_WORK(&entry->del_work, free_gid_work);
282 	entry->state = GID_TABLE_ENTRY_INVALID;
283 	return entry;
284 }
285 
286 static void store_gid_entry(struct ib_gid_table *table,
287 			    struct ib_gid_table_entry *entry)
288 {
289 	entry->state = GID_TABLE_ENTRY_VALID;
290 
291 	dev_dbg(&entry->attr.device->dev, "%s port=%d index=%d gid %pI6\n",
292 		__func__, entry->attr.port_num, entry->attr.index,
293 		entry->attr.gid.raw);
294 
295 	lockdep_assert_held(&table->lock);
296 	write_lock_irq(&table->rwlock);
297 	table->data_vec[entry->attr.index] = entry;
298 	write_unlock_irq(&table->rwlock);
299 }
300 
301 static void get_gid_entry(struct ib_gid_table_entry *entry)
302 {
303 	kref_get(&entry->kref);
304 }
305 
306 static void put_gid_entry(struct ib_gid_table_entry *entry)
307 {
308 	kref_put(&entry->kref, schedule_free_gid);
309 }
310 
311 static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
312 {
313 	kref_put(&entry->kref, free_gid_entry);
314 }
315 
316 static int add_roce_gid(struct ib_gid_table_entry *entry)
317 {
318 	const struct ib_gid_attr *attr = &entry->attr;
319 	int ret;
320 
321 	if (!attr->ndev) {
322 		dev_err(&attr->device->dev, "%s NULL netdev port=%d index=%d\n",
323 			__func__, attr->port_num, attr->index);
324 		return -EINVAL;
325 	}
326 	if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
327 		ret = attr->device->add_gid(attr, &entry->context);
328 		if (ret) {
329 			dev_err(&attr->device->dev,
330 				"%s GID add failed port=%d index=%d\n",
331 				__func__, attr->port_num, attr->index);
332 			return ret;
333 		}
334 	}
335 	return 0;
336 }
337 
338 /**
339  * del_gid - Delete GID table entry
340  *
341  * @ib_dev:	IB device whose GID entry to be deleted
342  * @port:	Port number of the IB device
343  * @table:	GID table of the IB device for a port
344  * @ix:		GID entry index to delete
345  *
346  */
347 static void del_gid(struct ib_device *ib_dev, u8 port,
348 		    struct ib_gid_table *table, int ix)
349 {
350 	struct ib_gid_table_entry *entry;
351 
352 	lockdep_assert_held(&table->lock);
353 
354 	dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port,
355 		ix, table->data_vec[ix]->attr.gid.raw);
356 
357 	write_lock_irq(&table->rwlock);
358 	entry = table->data_vec[ix];
359 	entry->state = GID_TABLE_ENTRY_PENDING_DEL;
360 	/*
361 	 * For non RoCE protocol, GID entry slot is ready to use.
362 	 */
363 	if (!rdma_protocol_roce(ib_dev, port))
364 		table->data_vec[ix] = NULL;
365 	write_unlock_irq(&table->rwlock);
366 
367 	put_gid_entry_locked(entry);
368 }
369 
370 /**
371  * add_modify_gid - Add or modify GID table entry
372  *
373  * @table:	GID table in which GID to be added or modified
374  * @attr:	Attributes of the GID
375  *
376  * Returns 0 on success or appropriate error code. It accepts zero
377  * GID addition for non RoCE ports for HCA's who report them as valid
378  * GID. However such zero GIDs are not added to the cache.
379  */
380 static int add_modify_gid(struct ib_gid_table *table,
381 			  const struct ib_gid_attr *attr)
382 {
383 	struct ib_gid_table_entry *entry;
384 	int ret = 0;
385 
386 	/*
387 	 * Invalidate any old entry in the table to make it safe to write to
388 	 * this index.
389 	 */
390 	if (is_gid_entry_valid(table->data_vec[attr->index]))
391 		del_gid(attr->device, attr->port_num, table, attr->index);
392 
393 	/*
394 	 * Some HCA's report multiple GID entries with only one valid GID, and
395 	 * leave other unused entries as the zero GID. Convert zero GIDs to
396 	 * empty table entries instead of storing them.
397 	 */
398 	if (rdma_is_zero_gid(&attr->gid))
399 		return 0;
400 
401 	entry = alloc_gid_entry(attr);
402 	if (!entry)
403 		return -ENOMEM;
404 
405 	if (rdma_protocol_roce(attr->device, attr->port_num)) {
406 		ret = add_roce_gid(entry);
407 		if (ret)
408 			goto done;
409 	}
410 
411 	store_gid_entry(table, entry);
412 	return 0;
413 
414 done:
415 	put_gid_entry(entry);
416 	return ret;
417 }
418 
419 /* rwlock should be read locked, or lock should be held */
420 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
421 		    const struct ib_gid_attr *val, bool default_gid,
422 		    unsigned long mask, int *pempty)
423 {
424 	int i = 0;
425 	int found = -1;
426 	int empty = pempty ? -1 : 0;
427 
428 	while (i < table->sz && (found < 0 || empty < 0)) {
429 		struct ib_gid_table_entry *data = table->data_vec[i];
430 		struct ib_gid_attr *attr;
431 		int curr_index = i;
432 
433 		i++;
434 
435 		/* find_gid() is used during GID addition where it is expected
436 		 * to return a free entry slot which is not duplicate.
437 		 * Free entry slot is requested and returned if pempty is set,
438 		 * so lookup free slot only if requested.
439 		 */
440 		if (pempty && empty < 0) {
441 			if (is_gid_entry_free(data) &&
442 			    default_gid ==
443 				is_gid_index_default(table, curr_index)) {
444 				/*
445 				 * Found an invalid (free) entry; allocate it.
446 				 * If default GID is requested, then our
447 				 * found slot must be one of the DEFAULT
448 				 * reserved slots or we fail.
449 				 * This ensures that only DEFAULT reserved
450 				 * slots are used for default property GIDs.
451 				 */
452 				empty = curr_index;
453 			}
454 		}
455 
456 		/*
457 		 * Additionally find_gid() is used to find valid entry during
458 		 * lookup operation; so ignore the entries which are marked as
459 		 * pending for removal and the entries which are marked as
460 		 * invalid.
461 		 */
462 		if (!is_gid_entry_valid(data))
463 			continue;
464 
465 		if (found >= 0)
466 			continue;
467 
468 		attr = &data->attr;
469 		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
470 		    attr->gid_type != val->gid_type)
471 			continue;
472 
473 		if (mask & GID_ATTR_FIND_MASK_GID &&
474 		    memcmp(gid, &data->attr.gid, sizeof(*gid)))
475 			continue;
476 
477 		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
478 		    attr->ndev != val->ndev)
479 			continue;
480 
481 		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
482 		    is_gid_index_default(table, curr_index) != default_gid)
483 			continue;
484 
485 		found = curr_index;
486 	}
487 
488 	if (pempty)
489 		*pempty = empty;
490 
491 	return found;
492 }
493 
494 static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
495 {
496 	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
497 	addrconf_ifid_eui48(&gid->raw[8], dev);
498 }
499 
500 static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
501 			      union ib_gid *gid, struct ib_gid_attr *attr,
502 			      unsigned long mask, bool default_gid)
503 {
504 	struct ib_gid_table *table;
505 	int ret = 0;
506 	int empty;
507 	int ix;
508 
509 	/* Do not allow adding zero GID in support of
510 	 * IB spec version 1.3 section 4.1.1 point (6) and
511 	 * section 12.7.10 and section 12.7.20
512 	 */
513 	if (rdma_is_zero_gid(gid))
514 		return -EINVAL;
515 
516 	table = rdma_gid_table(ib_dev, port);
517 
518 	mutex_lock(&table->lock);
519 
520 	ix = find_gid(table, gid, attr, default_gid, mask, &empty);
521 	if (ix >= 0)
522 		goto out_unlock;
523 
524 	if (empty < 0) {
525 		ret = -ENOSPC;
526 		goto out_unlock;
527 	}
528 	attr->device = ib_dev;
529 	attr->index = empty;
530 	attr->port_num = port;
531 	attr->gid = *gid;
532 	ret = add_modify_gid(table, attr);
533 	if (!ret)
534 		dispatch_gid_change_event(ib_dev, port);
535 
536 out_unlock:
537 	mutex_unlock(&table->lock);
538 	if (ret)
539 		pr_warn("%s: unable to add gid %pI6 error=%d\n",
540 			__func__, gid->raw, ret);
541 	return ret;
542 }
543 
544 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
545 		     union ib_gid *gid, struct ib_gid_attr *attr)
546 {
547 	struct net_device *idev;
548 	unsigned long mask;
549 	int ret;
550 
551 	if (ib_dev->get_netdev) {
552 		idev = ib_dev->get_netdev(ib_dev, port);
553 		if (idev && attr->ndev != idev) {
554 			union ib_gid default_gid;
555 
556 			/* Adding default GIDs in not permitted */
557 			make_default_gid(idev, &default_gid);
558 			if (!memcmp(gid, &default_gid, sizeof(*gid))) {
559 				dev_put(idev);
560 				return -EPERM;
561 			}
562 		}
563 		if (idev)
564 			dev_put(idev);
565 	}
566 
567 	mask = GID_ATTR_FIND_MASK_GID |
568 	       GID_ATTR_FIND_MASK_GID_TYPE |
569 	       GID_ATTR_FIND_MASK_NETDEV;
570 
571 	ret = __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
572 	return ret;
573 }
574 
575 static int
576 _ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
577 		  union ib_gid *gid, struct ib_gid_attr *attr,
578 		  unsigned long mask, bool default_gid)
579 {
580 	struct ib_gid_table *table;
581 	int ret = 0;
582 	int ix;
583 
584 	table = rdma_gid_table(ib_dev, port);
585 
586 	mutex_lock(&table->lock);
587 
588 	ix = find_gid(table, gid, attr, default_gid, mask, NULL);
589 	if (ix < 0) {
590 		ret = -EINVAL;
591 		goto out_unlock;
592 	}
593 
594 	del_gid(ib_dev, port, table, ix);
595 	dispatch_gid_change_event(ib_dev, port);
596 
597 out_unlock:
598 	mutex_unlock(&table->lock);
599 	if (ret)
600 		pr_debug("%s: can't delete gid %pI6 error=%d\n",
601 			 __func__, gid->raw, ret);
602 	return ret;
603 }
604 
605 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
606 		     union ib_gid *gid, struct ib_gid_attr *attr)
607 {
608 	unsigned long mask = GID_ATTR_FIND_MASK_GID	  |
609 			     GID_ATTR_FIND_MASK_GID_TYPE |
610 			     GID_ATTR_FIND_MASK_DEFAULT  |
611 			     GID_ATTR_FIND_MASK_NETDEV;
612 
613 	return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
614 }
615 
616 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
617 				     struct net_device *ndev)
618 {
619 	struct ib_gid_table *table;
620 	int ix;
621 	bool deleted = false;
622 
623 	table = rdma_gid_table(ib_dev, port);
624 
625 	mutex_lock(&table->lock);
626 
627 	for (ix = 0; ix < table->sz; ix++) {
628 		if (is_gid_entry_valid(table->data_vec[ix]) &&
629 		    table->data_vec[ix]->attr.ndev == ndev) {
630 			del_gid(ib_dev, port, table, ix);
631 			deleted = true;
632 		}
633 	}
634 
635 	mutex_unlock(&table->lock);
636 
637 	if (deleted)
638 		dispatch_gid_change_event(ib_dev, port);
639 
640 	return 0;
641 }
642 
643 /**
644  * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
645  * a valid GID entry for given search parameters. It searches for the specified
646  * GID value in the local software cache.
647  * @device: The device to query.
648  * @gid: The GID value to search for.
649  * @gid_type: The GID type to search for.
650  * @port_num: The port number of the device where the GID value should be
651  *   searched.
652  * @ndev: In RoCE, the net device of the device. NULL means ignore.
653  *
654  * Returns sgid attributes if the GID is found with valid reference or
655  * returns ERR_PTR for the error.
656  * The caller must invoke rdma_put_gid_attr() to release the reference.
657  */
658 const struct ib_gid_attr *
659 rdma_find_gid_by_port(struct ib_device *ib_dev,
660 		      const union ib_gid *gid,
661 		      enum ib_gid_type gid_type,
662 		      u8 port, struct net_device *ndev)
663 {
664 	int local_index;
665 	struct ib_gid_table *table;
666 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
667 			     GID_ATTR_FIND_MASK_GID_TYPE;
668 	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
669 	const struct ib_gid_attr *attr;
670 	unsigned long flags;
671 
672 	if (!rdma_is_port_valid(ib_dev, port))
673 		return ERR_PTR(-ENOENT);
674 
675 	table = rdma_gid_table(ib_dev, port);
676 
677 	if (ndev)
678 		mask |= GID_ATTR_FIND_MASK_NETDEV;
679 
680 	read_lock_irqsave(&table->rwlock, flags);
681 	local_index = find_gid(table, gid, &val, false, mask, NULL);
682 	if (local_index >= 0) {
683 		get_gid_entry(table->data_vec[local_index]);
684 		attr = &table->data_vec[local_index]->attr;
685 		read_unlock_irqrestore(&table->rwlock, flags);
686 		return attr;
687 	}
688 
689 	read_unlock_irqrestore(&table->rwlock, flags);
690 	return ERR_PTR(-ENOENT);
691 }
692 EXPORT_SYMBOL(rdma_find_gid_by_port);
693 
694 /**
695  * rdma_find_gid_by_filter - Returns the GID table attribute where a
696  * specified GID value occurs
697  * @device: The device to query.
698  * @gid: The GID value to search for.
699  * @port: The port number of the device where the GID value could be
700  *   searched.
701  * @filter: The filter function is executed on any matching GID in the table.
702  *   If the filter function returns true, the corresponding index is returned,
703  *   otherwise, we continue searching the GID table. It's guaranteed that
704  *   while filter is executed, ndev field is valid and the structure won't
705  *   change. filter is executed in an atomic context. filter must not be NULL.
706  *
707  * rdma_find_gid_by_filter() searches for the specified GID value
708  * of which the filter function returns true in the port's GID table.
709  *
710  */
711 const struct ib_gid_attr *rdma_find_gid_by_filter(
712 	struct ib_device *ib_dev, const union ib_gid *gid, u8 port,
713 	bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
714 		       void *),
715 	void *context)
716 {
717 	const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
718 	struct ib_gid_table *table;
719 	unsigned long flags;
720 	unsigned int i;
721 
722 	if (!rdma_is_port_valid(ib_dev, port))
723 		return ERR_PTR(-EINVAL);
724 
725 	table = rdma_gid_table(ib_dev, port);
726 
727 	read_lock_irqsave(&table->rwlock, flags);
728 	for (i = 0; i < table->sz; i++) {
729 		struct ib_gid_table_entry *entry = table->data_vec[i];
730 
731 		if (!is_gid_entry_valid(entry))
732 			continue;
733 
734 		if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
735 			continue;
736 
737 		if (filter(gid, &entry->attr, context)) {
738 			get_gid_entry(entry);
739 			res = &entry->attr;
740 			break;
741 		}
742 	}
743 	read_unlock_irqrestore(&table->rwlock, flags);
744 	return res;
745 }
746 
747 static struct ib_gid_table *alloc_gid_table(int sz)
748 {
749 	struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
750 
751 	if (!table)
752 		return NULL;
753 
754 	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
755 	if (!table->data_vec)
756 		goto err_free_table;
757 
758 	mutex_init(&table->lock);
759 
760 	table->sz = sz;
761 	rwlock_init(&table->rwlock);
762 	return table;
763 
764 err_free_table:
765 	kfree(table);
766 	return NULL;
767 }
768 
769 static void release_gid_table(struct ib_device *device, u8 port,
770 			      struct ib_gid_table *table)
771 {
772 	bool leak = false;
773 	int i;
774 
775 	if (!table)
776 		return;
777 
778 	for (i = 0; i < table->sz; i++) {
779 		if (is_gid_entry_free(table->data_vec[i]))
780 			continue;
781 		if (kref_read(&table->data_vec[i]->kref) > 1) {
782 			dev_err(&device->dev,
783 				"GID entry ref leak for index %d ref=%d\n", i,
784 				kref_read(&table->data_vec[i]->kref));
785 			leak = true;
786 		}
787 	}
788 	if (leak)
789 		return;
790 
791 	kfree(table->data_vec);
792 	kfree(table);
793 }
794 
795 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
796 				   struct ib_gid_table *table)
797 {
798 	int i;
799 	bool deleted = false;
800 
801 	if (!table)
802 		return;
803 
804 	mutex_lock(&table->lock);
805 	for (i = 0; i < table->sz; ++i) {
806 		if (is_gid_entry_valid(table->data_vec[i])) {
807 			del_gid(ib_dev, port, table, i);
808 			deleted = true;
809 		}
810 	}
811 	mutex_unlock(&table->lock);
812 
813 	if (deleted)
814 		dispatch_gid_change_event(ib_dev, port);
815 }
816 
817 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
818 				  struct net_device *ndev,
819 				  unsigned long gid_type_mask,
820 				  enum ib_cache_gid_default_mode mode)
821 {
822 	union ib_gid gid = { };
823 	struct ib_gid_attr gid_attr;
824 	unsigned int gid_type;
825 	unsigned long mask;
826 
827 	mask = GID_ATTR_FIND_MASK_GID_TYPE |
828 	       GID_ATTR_FIND_MASK_DEFAULT |
829 	       GID_ATTR_FIND_MASK_NETDEV;
830 	memset(&gid_attr, 0, sizeof(gid_attr));
831 	gid_attr.ndev = ndev;
832 
833 	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
834 		if (1UL << gid_type & ~gid_type_mask)
835 			continue;
836 
837 		gid_attr.gid_type = gid_type;
838 
839 		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
840 			make_default_gid(ndev, &gid);
841 			__ib_cache_gid_add(ib_dev, port, &gid,
842 					   &gid_attr, mask, true);
843 		} else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
844 			_ib_cache_gid_del(ib_dev, port, &gid,
845 					  &gid_attr, mask, true);
846 		}
847 	}
848 }
849 
850 static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
851 				      struct ib_gid_table *table)
852 {
853 	unsigned int i;
854 	unsigned long roce_gid_type_mask;
855 	unsigned int num_default_gids;
856 
857 	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
858 	num_default_gids = hweight_long(roce_gid_type_mask);
859 	/* Reserve starting indices for default GIDs */
860 	for (i = 0; i < num_default_gids && i < table->sz; i++)
861 		table->default_gid_indices |= BIT(i);
862 }
863 
864 
865 static void gid_table_release_one(struct ib_device *ib_dev)
866 {
867 	struct ib_gid_table *table;
868 	u8 port;
869 
870 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
871 		table = ib_dev->cache.ports[port].gid;
872 		release_gid_table(ib_dev, port, table);
873 		ib_dev->cache.ports[port].gid = NULL;
874 	}
875 }
876 
877 static int _gid_table_setup_one(struct ib_device *ib_dev)
878 {
879 	u8 port;
880 	struct ib_gid_table *table;
881 
882 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
883 		u8 rdma_port = port + rdma_start_port(ib_dev);
884 
885 		table =	alloc_gid_table(
886 				ib_dev->port_immutable[rdma_port].gid_tbl_len);
887 		if (!table)
888 			goto rollback_table_setup;
889 
890 		gid_table_reserve_default(ib_dev, rdma_port, table);
891 		ib_dev->cache.ports[port].gid = table;
892 	}
893 	return 0;
894 
895 rollback_table_setup:
896 	gid_table_release_one(ib_dev);
897 	return -ENOMEM;
898 }
899 
900 static void gid_table_cleanup_one(struct ib_device *ib_dev)
901 {
902 	struct ib_gid_table *table;
903 	u8 port;
904 
905 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
906 		table = ib_dev->cache.ports[port].gid;
907 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
908 				       table);
909 	}
910 }
911 
912 static int gid_table_setup_one(struct ib_device *ib_dev)
913 {
914 	int err;
915 
916 	err = _gid_table_setup_one(ib_dev);
917 
918 	if (err)
919 		return err;
920 
921 	rdma_roce_rescan_device(ib_dev);
922 
923 	return err;
924 }
925 
926 /**
927  * rdma_query_gid - Read the GID content from the GID software cache
928  * @device:		Device to query the GID
929  * @port_num:		Port number of the device
930  * @index:		Index of the GID table entry to read
931  * @gid:		Pointer to GID where to store the entry's GID
932  *
933  * rdma_query_gid() only reads the GID entry content for requested device,
934  * port and index. It reads for IB, RoCE and iWarp link layers.  It doesn't
935  * hold any reference to the GID table entry in the HCA or software cache.
936  *
937  * Returns 0 on success or appropriate error code.
938  *
939  */
940 int rdma_query_gid(struct ib_device *device, u8 port_num,
941 		   int index, union ib_gid *gid)
942 {
943 	struct ib_gid_table *table;
944 	unsigned long flags;
945 	int res = -EINVAL;
946 
947 	if (!rdma_is_port_valid(device, port_num))
948 		return -EINVAL;
949 
950 	table = rdma_gid_table(device, port_num);
951 	read_lock_irqsave(&table->rwlock, flags);
952 
953 	if (index < 0 || index >= table->sz ||
954 	    !is_gid_entry_valid(table->data_vec[index]))
955 		goto done;
956 
957 	memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
958 	res = 0;
959 
960 done:
961 	read_unlock_irqrestore(&table->rwlock, flags);
962 	return res;
963 }
964 EXPORT_SYMBOL(rdma_query_gid);
965 
966 /**
967  * rdma_find_gid - Returns SGID attributes if the matching GID is found.
968  * @device: The device to query.
969  * @gid: The GID value to search for.
970  * @gid_type: The GID type to search for.
971  * @ndev: In RoCE, the net device of the device. NULL means ignore.
972  *
973  * rdma_find_gid() searches for the specified GID value in the software cache.
974  *
975  * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
976  * error. The caller must invoke rdma_put_gid_attr() to release the reference.
977  *
978  */
979 const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
980 					const union ib_gid *gid,
981 					enum ib_gid_type gid_type,
982 					struct net_device *ndev)
983 {
984 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
985 			     GID_ATTR_FIND_MASK_GID_TYPE;
986 	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
987 	u8 p;
988 
989 	if (ndev)
990 		mask |= GID_ATTR_FIND_MASK_NETDEV;
991 
992 	for (p = 0; p < device->phys_port_cnt; p++) {
993 		struct ib_gid_table *table;
994 		unsigned long flags;
995 		int index;
996 
997 		table = device->cache.ports[p].gid;
998 		read_lock_irqsave(&table->rwlock, flags);
999 		index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
1000 		if (index >= 0) {
1001 			const struct ib_gid_attr *attr;
1002 
1003 			get_gid_entry(table->data_vec[index]);
1004 			attr = &table->data_vec[index]->attr;
1005 			read_unlock_irqrestore(&table->rwlock, flags);
1006 			return attr;
1007 		}
1008 		read_unlock_irqrestore(&table->rwlock, flags);
1009 	}
1010 
1011 	return ERR_PTR(-ENOENT);
1012 }
1013 EXPORT_SYMBOL(rdma_find_gid);
1014 
1015 int ib_get_cached_pkey(struct ib_device *device,
1016 		       u8                port_num,
1017 		       int               index,
1018 		       u16              *pkey)
1019 {
1020 	struct ib_pkey_cache *cache;
1021 	unsigned long flags;
1022 	int ret = 0;
1023 
1024 	if (!rdma_is_port_valid(device, port_num))
1025 		return -EINVAL;
1026 
1027 	read_lock_irqsave(&device->cache.lock, flags);
1028 
1029 	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1030 
1031 	if (index < 0 || index >= cache->table_len)
1032 		ret = -EINVAL;
1033 	else
1034 		*pkey = cache->table[index];
1035 
1036 	read_unlock_irqrestore(&device->cache.lock, flags);
1037 
1038 	return ret;
1039 }
1040 EXPORT_SYMBOL(ib_get_cached_pkey);
1041 
1042 int ib_get_cached_subnet_prefix(struct ib_device *device,
1043 				u8                port_num,
1044 				u64              *sn_pfx)
1045 {
1046 	unsigned long flags;
1047 	int p;
1048 
1049 	if (!rdma_is_port_valid(device, port_num))
1050 		return -EINVAL;
1051 
1052 	p = port_num - rdma_start_port(device);
1053 	read_lock_irqsave(&device->cache.lock, flags);
1054 	*sn_pfx = device->cache.ports[p].subnet_prefix;
1055 	read_unlock_irqrestore(&device->cache.lock, flags);
1056 
1057 	return 0;
1058 }
1059 EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
1060 
1061 int ib_find_cached_pkey(struct ib_device *device,
1062 			u8                port_num,
1063 			u16               pkey,
1064 			u16              *index)
1065 {
1066 	struct ib_pkey_cache *cache;
1067 	unsigned long flags;
1068 	int i;
1069 	int ret = -ENOENT;
1070 	int partial_ix = -1;
1071 
1072 	if (!rdma_is_port_valid(device, port_num))
1073 		return -EINVAL;
1074 
1075 	read_lock_irqsave(&device->cache.lock, flags);
1076 
1077 	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1078 
1079 	*index = -1;
1080 
1081 	for (i = 0; i < cache->table_len; ++i)
1082 		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
1083 			if (cache->table[i] & 0x8000) {
1084 				*index = i;
1085 				ret = 0;
1086 				break;
1087 			} else
1088 				partial_ix = i;
1089 		}
1090 
1091 	if (ret && partial_ix >= 0) {
1092 		*index = partial_ix;
1093 		ret = 0;
1094 	}
1095 
1096 	read_unlock_irqrestore(&device->cache.lock, flags);
1097 
1098 	return ret;
1099 }
1100 EXPORT_SYMBOL(ib_find_cached_pkey);
1101 
1102 int ib_find_exact_cached_pkey(struct ib_device *device,
1103 			      u8                port_num,
1104 			      u16               pkey,
1105 			      u16              *index)
1106 {
1107 	struct ib_pkey_cache *cache;
1108 	unsigned long flags;
1109 	int i;
1110 	int ret = -ENOENT;
1111 
1112 	if (!rdma_is_port_valid(device, port_num))
1113 		return -EINVAL;
1114 
1115 	read_lock_irqsave(&device->cache.lock, flags);
1116 
1117 	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1118 
1119 	*index = -1;
1120 
1121 	for (i = 0; i < cache->table_len; ++i)
1122 		if (cache->table[i] == pkey) {
1123 			*index = i;
1124 			ret = 0;
1125 			break;
1126 		}
1127 
1128 	read_unlock_irqrestore(&device->cache.lock, flags);
1129 
1130 	return ret;
1131 }
1132 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1133 
1134 int ib_get_cached_lmc(struct ib_device *device,
1135 		      u8                port_num,
1136 		      u8                *lmc)
1137 {
1138 	unsigned long flags;
1139 	int ret = 0;
1140 
1141 	if (!rdma_is_port_valid(device, port_num))
1142 		return -EINVAL;
1143 
1144 	read_lock_irqsave(&device->cache.lock, flags);
1145 	*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
1146 	read_unlock_irqrestore(&device->cache.lock, flags);
1147 
1148 	return ret;
1149 }
1150 EXPORT_SYMBOL(ib_get_cached_lmc);
1151 
1152 int ib_get_cached_port_state(struct ib_device   *device,
1153 			     u8                  port_num,
1154 			     enum ib_port_state *port_state)
1155 {
1156 	unsigned long flags;
1157 	int ret = 0;
1158 
1159 	if (!rdma_is_port_valid(device, port_num))
1160 		return -EINVAL;
1161 
1162 	read_lock_irqsave(&device->cache.lock, flags);
1163 	*port_state = device->cache.ports[port_num
1164 		- rdma_start_port(device)].port_state;
1165 	read_unlock_irqrestore(&device->cache.lock, flags);
1166 
1167 	return ret;
1168 }
1169 EXPORT_SYMBOL(ib_get_cached_port_state);
1170 
1171 /**
1172  * rdma_get_gid_attr - Returns GID attributes for a port of a device
1173  * at a requested gid_index, if a valid GID entry exists.
1174  * @device:		The device to query.
1175  * @port_num:		The port number on the device where the GID value
1176  *			is to be queried.
1177  * @index:		Index of the GID table entry whose attributes are to
1178  *                      be queried.
1179  *
1180  * rdma_get_gid_attr() acquires reference count of gid attributes from the
1181  * cached GID table. Caller must invoke rdma_put_gid_attr() to release
1182  * reference to gid attribute regardless of link layer.
1183  *
1184  * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
1185  * code.
1186  */
1187 const struct ib_gid_attr *
1188 rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
1189 {
1190 	const struct ib_gid_attr *attr = ERR_PTR(-EINVAL);
1191 	struct ib_gid_table *table;
1192 	unsigned long flags;
1193 
1194 	if (!rdma_is_port_valid(device, port_num))
1195 		return ERR_PTR(-EINVAL);
1196 
1197 	table = rdma_gid_table(device, port_num);
1198 	if (index < 0 || index >= table->sz)
1199 		return ERR_PTR(-EINVAL);
1200 
1201 	read_lock_irqsave(&table->rwlock, flags);
1202 	if (!is_gid_entry_valid(table->data_vec[index]))
1203 		goto done;
1204 
1205 	get_gid_entry(table->data_vec[index]);
1206 	attr = &table->data_vec[index]->attr;
1207 done:
1208 	read_unlock_irqrestore(&table->rwlock, flags);
1209 	return attr;
1210 }
1211 EXPORT_SYMBOL(rdma_get_gid_attr);
1212 
1213 /**
1214  * rdma_put_gid_attr - Release reference to the GID attribute
1215  * @attr:		Pointer to the GID attribute whose reference
1216  *			needs to be released.
1217  *
1218  * rdma_put_gid_attr() must be used to release reference whose
1219  * reference is acquired using rdma_get_gid_attr() or any APIs
1220  * which returns a pointer to the ib_gid_attr regardless of link layer
1221  * of IB or RoCE.
1222  *
1223  */
1224 void rdma_put_gid_attr(const struct ib_gid_attr *attr)
1225 {
1226 	struct ib_gid_table_entry *entry =
1227 		container_of(attr, struct ib_gid_table_entry, attr);
1228 
1229 	put_gid_entry(entry);
1230 }
1231 EXPORT_SYMBOL(rdma_put_gid_attr);
1232 
1233 /**
1234  * rdma_hold_gid_attr - Get reference to existing GID attribute
1235  *
1236  * @attr:		Pointer to the GID attribute whose reference
1237  *			needs to be taken.
1238  *
1239  * Increase the reference count to a GID attribute to keep it from being
1240  * freed. Callers are required to already be holding a reference to attribute.
1241  *
1242  */
1243 void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
1244 {
1245 	struct ib_gid_table_entry *entry =
1246 		container_of(attr, struct ib_gid_table_entry, attr);
1247 
1248 	get_gid_entry(entry);
1249 }
1250 EXPORT_SYMBOL(rdma_hold_gid_attr);
1251 
1252 /**
1253  * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
1254  * which must be in UP state.
1255  *
1256  * @attr:Pointer to the GID attribute
1257  *
1258  * Returns pointer to netdevice if the netdevice was attached to GID and
1259  * netdevice is in UP state. Caller must hold RCU lock as this API
1260  * reads the netdev flags which can change while netdevice migrates to
1261  * different net namespace. Returns ERR_PTR with error code otherwise.
1262  *
1263  */
1264 struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
1265 {
1266 	struct ib_gid_table_entry *entry =
1267 			container_of(attr, struct ib_gid_table_entry, attr);
1268 	struct ib_device *device = entry->attr.device;
1269 	struct net_device *ndev = ERR_PTR(-ENODEV);
1270 	u8 port_num = entry->attr.port_num;
1271 	struct ib_gid_table *table;
1272 	unsigned long flags;
1273 	bool valid;
1274 
1275 	table = rdma_gid_table(device, port_num);
1276 
1277 	read_lock_irqsave(&table->rwlock, flags);
1278 	valid = is_gid_entry_valid(table->data_vec[attr->index]);
1279 	if (valid && attr->ndev && (READ_ONCE(attr->ndev->flags) & IFF_UP))
1280 		ndev = attr->ndev;
1281 	read_unlock_irqrestore(&table->rwlock, flags);
1282 	return ndev;
1283 }
1284 
1285 static int config_non_roce_gid_cache(struct ib_device *device,
1286 				     u8 port, int gid_tbl_len)
1287 {
1288 	struct ib_gid_attr gid_attr = {};
1289 	struct ib_gid_table *table;
1290 	int ret = 0;
1291 	int i;
1292 
1293 	gid_attr.device = device;
1294 	gid_attr.port_num = port;
1295 	table = rdma_gid_table(device, port);
1296 
1297 	mutex_lock(&table->lock);
1298 	for (i = 0; i < gid_tbl_len; ++i) {
1299 		if (!device->query_gid)
1300 			continue;
1301 		ret = device->query_gid(device, port, i, &gid_attr.gid);
1302 		if (ret) {
1303 			dev_warn(&device->dev,
1304 				 "query_gid failed (%d) for index %d\n", ret,
1305 				 i);
1306 			goto err;
1307 		}
1308 		gid_attr.index = i;
1309 		add_modify_gid(table, &gid_attr);
1310 	}
1311 err:
1312 	mutex_unlock(&table->lock);
1313 	return ret;
1314 }
1315 
1316 static void ib_cache_update(struct ib_device *device,
1317 			    u8                port,
1318 			    bool	      enforce_security)
1319 {
1320 	struct ib_port_attr       *tprops = NULL;
1321 	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
1322 	int                        i;
1323 	int                        ret;
1324 
1325 	if (!rdma_is_port_valid(device, port))
1326 		return;
1327 
1328 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1329 	if (!tprops)
1330 		return;
1331 
1332 	ret = ib_query_port(device, port, tprops);
1333 	if (ret) {
1334 		dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
1335 		goto err;
1336 	}
1337 
1338 	if (!rdma_protocol_roce(device, port)) {
1339 		ret = config_non_roce_gid_cache(device, port,
1340 						tprops->gid_tbl_len);
1341 		if (ret)
1342 			goto err;
1343 	}
1344 
1345 	pkey_cache = kmalloc(struct_size(pkey_cache, table,
1346 					 tprops->pkey_tbl_len),
1347 			     GFP_KERNEL);
1348 	if (!pkey_cache)
1349 		goto err;
1350 
1351 	pkey_cache->table_len = tprops->pkey_tbl_len;
1352 
1353 	for (i = 0; i < pkey_cache->table_len; ++i) {
1354 		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1355 		if (ret) {
1356 			dev_warn(&device->dev,
1357 				 "ib_query_pkey failed (%d) for index %d\n",
1358 				 ret, i);
1359 			goto err;
1360 		}
1361 	}
1362 
1363 	write_lock_irq(&device->cache.lock);
1364 
1365 	old_pkey_cache = device->cache.ports[port -
1366 		rdma_start_port(device)].pkey;
1367 
1368 	device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
1369 	device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
1370 	device->cache.ports[port - rdma_start_port(device)].port_state =
1371 		tprops->state;
1372 
1373 	device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
1374 							tprops->subnet_prefix;
1375 	write_unlock_irq(&device->cache.lock);
1376 
1377 	if (enforce_security)
1378 		ib_security_cache_change(device,
1379 					 port,
1380 					 tprops->subnet_prefix);
1381 
1382 	kfree(old_pkey_cache);
1383 	kfree(tprops);
1384 	return;
1385 
1386 err:
1387 	kfree(pkey_cache);
1388 	kfree(tprops);
1389 }
1390 
1391 static void ib_cache_task(struct work_struct *_work)
1392 {
1393 	struct ib_update_work *work =
1394 		container_of(_work, struct ib_update_work, work);
1395 
1396 	ib_cache_update(work->device,
1397 			work->port_num,
1398 			work->enforce_security);
1399 	kfree(work);
1400 }
1401 
1402 static void ib_cache_event(struct ib_event_handler *handler,
1403 			   struct ib_event *event)
1404 {
1405 	struct ib_update_work *work;
1406 
1407 	if (event->event == IB_EVENT_PORT_ERR    ||
1408 	    event->event == IB_EVENT_PORT_ACTIVE ||
1409 	    event->event == IB_EVENT_LID_CHANGE  ||
1410 	    event->event == IB_EVENT_PKEY_CHANGE ||
1411 	    event->event == IB_EVENT_SM_CHANGE   ||
1412 	    event->event == IB_EVENT_CLIENT_REREGISTER ||
1413 	    event->event == IB_EVENT_GID_CHANGE) {
1414 		work = kmalloc(sizeof *work, GFP_ATOMIC);
1415 		if (work) {
1416 			INIT_WORK(&work->work, ib_cache_task);
1417 			work->device   = event->device;
1418 			work->port_num = event->element.port_num;
1419 			if (event->event == IB_EVENT_PKEY_CHANGE ||
1420 			    event->event == IB_EVENT_GID_CHANGE)
1421 				work->enforce_security = true;
1422 			else
1423 				work->enforce_security = false;
1424 
1425 			queue_work(ib_wq, &work->work);
1426 		}
1427 	}
1428 }
1429 
1430 int ib_cache_setup_one(struct ib_device *device)
1431 {
1432 	int p;
1433 	int err;
1434 
1435 	rwlock_init(&device->cache.lock);
1436 
1437 	device->cache.ports =
1438 		kcalloc(rdma_end_port(device) - rdma_start_port(device) + 1,
1439 			sizeof(*device->cache.ports),
1440 			GFP_KERNEL);
1441 	if (!device->cache.ports)
1442 		return -ENOMEM;
1443 
1444 	err = gid_table_setup_one(device);
1445 	if (err) {
1446 		kfree(device->cache.ports);
1447 		device->cache.ports = NULL;
1448 		return err;
1449 	}
1450 
1451 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1452 		ib_cache_update(device, p + rdma_start_port(device), true);
1453 
1454 	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1455 			      device, ib_cache_event);
1456 	ib_register_event_handler(&device->cache.event_handler);
1457 	return 0;
1458 }
1459 
1460 void ib_cache_release_one(struct ib_device *device)
1461 {
1462 	int p;
1463 
1464 	/*
1465 	 * The release function frees all the cache elements.
1466 	 * This function should be called as part of freeing
1467 	 * all the device's resources when the cache could no
1468 	 * longer be accessed.
1469 	 */
1470 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1471 		kfree(device->cache.ports[p].pkey);
1472 
1473 	gid_table_release_one(device);
1474 	kfree(device->cache.ports);
1475 }
1476 
1477 void ib_cache_cleanup_one(struct ib_device *device)
1478 {
1479 	/* The cleanup function unregisters the event handler,
1480 	 * waits for all in-progress workqueue elements and cleans
1481 	 * up the GID cache. This function should be called after
1482 	 * the device was removed from the devices list and all
1483 	 * clients were removed, so the cache exists but is
1484 	 * non-functional and shouldn't be updated anymore.
1485 	 */
1486 	ib_unregister_event_handler(&device->cache.event_handler);
1487 	flush_workqueue(ib_wq);
1488 	gid_table_cleanup_one(device);
1489 
1490 	/*
1491 	 * Flush the wq second time for any pending GID delete work.
1492 	 */
1493 	flush_workqueue(ib_wq);
1494 }
1495