xref: /openbmc/linux/drivers/infiniband/core/cache.c (revision d08754be)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Intel Corporation. All rights reserved.
4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/if_vlan.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42 
43 #include <rdma/ib_cache.h>
44 
45 #include "core_priv.h"
46 
47 struct ib_pkey_cache {
48 	int             table_len;
49 	u16             table[];
50 };
51 
52 struct ib_update_work {
53 	struct work_struct work;
54 	struct ib_event event;
55 	bool enforce_security;
56 };
57 
58 union ib_gid zgid;
59 EXPORT_SYMBOL(zgid);
60 
61 enum gid_attr_find_mask {
62 	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
63 	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
64 	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
65 	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
66 };
67 
68 enum gid_table_entry_state {
69 	GID_TABLE_ENTRY_INVALID		= 1,
70 	GID_TABLE_ENTRY_VALID		= 2,
71 	/*
72 	 * Indicates that entry is pending to be removed, there may
73 	 * be active users of this GID entry.
74 	 * When last user of the GID entry releases reference to it,
75 	 * GID entry is detached from the table.
76 	 */
77 	GID_TABLE_ENTRY_PENDING_DEL	= 3,
78 };
79 
80 struct roce_gid_ndev_storage {
81 	struct rcu_head rcu_head;
82 	struct net_device *ndev;
83 };
84 
85 struct ib_gid_table_entry {
86 	struct kref			kref;
87 	struct work_struct		del_work;
88 	struct ib_gid_attr		attr;
89 	void				*context;
90 	/* Store the ndev pointer to release reference later on in
91 	 * call_rcu context because by that time gid_table_entry
92 	 * and attr might be already freed. So keep a copy of it.
93 	 * ndev_storage is freed by rcu callback.
94 	 */
95 	struct roce_gid_ndev_storage	*ndev_storage;
96 	enum gid_table_entry_state	state;
97 };
98 
99 struct ib_gid_table {
100 	int				sz;
101 	/* In RoCE, adding a GID to the table requires:
102 	 * (a) Find if this GID is already exists.
103 	 * (b) Find a free space.
104 	 * (c) Write the new GID
105 	 *
106 	 * Delete requires different set of operations:
107 	 * (a) Find the GID
108 	 * (b) Delete it.
109 	 *
110 	 **/
111 	/* Any writer to data_vec must hold this lock and the write side of
112 	 * rwlock. Readers must hold only rwlock. All writers must be in a
113 	 * sleepable context.
114 	 */
115 	struct mutex			lock;
116 	/* rwlock protects data_vec[ix]->state and entry pointer.
117 	 */
118 	rwlock_t			rwlock;
119 	struct ib_gid_table_entry	**data_vec;
120 	/* bit field, each bit indicates the index of default GID */
121 	u32				default_gid_indices;
122 };
123 
dispatch_gid_change_event(struct ib_device * ib_dev,u32 port)124 static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port)
125 {
126 	struct ib_event event;
127 
128 	event.device		= ib_dev;
129 	event.element.port_num	= port;
130 	event.event		= IB_EVENT_GID_CHANGE;
131 
132 	ib_dispatch_event_clients(&event);
133 }
134 
135 static const char * const gid_type_str[] = {
136 	/* IB/RoCE v1 value is set for IB_GID_TYPE_IB and IB_GID_TYPE_ROCE for
137 	 * user space compatibility reasons.
138 	 */
139 	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
140 	[IB_GID_TYPE_ROCE]	= "IB/RoCE v1",
141 	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
142 };
143 
ib_cache_gid_type_str(enum ib_gid_type gid_type)144 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
145 {
146 	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
147 		return gid_type_str[gid_type];
148 
149 	return "Invalid GID type";
150 }
151 EXPORT_SYMBOL(ib_cache_gid_type_str);
152 
153 /** rdma_is_zero_gid - Check if given GID is zero or not.
154  * @gid:	GID to check
155  * Returns true if given GID is zero, returns false otherwise.
156  */
rdma_is_zero_gid(const union ib_gid * gid)157 bool rdma_is_zero_gid(const union ib_gid *gid)
158 {
159 	return !memcmp(gid, &zgid, sizeof(*gid));
160 }
161 EXPORT_SYMBOL(rdma_is_zero_gid);
162 
163 /** is_gid_index_default - Check if a given index belongs to
164  * reserved default GIDs or not.
165  * @table:	GID table pointer
166  * @index:	Index to check in GID table
167  * Returns true if index is one of the reserved default GID index otherwise
168  * returns false.
169  */
is_gid_index_default(const struct ib_gid_table * table,unsigned int index)170 static bool is_gid_index_default(const struct ib_gid_table *table,
171 				 unsigned int index)
172 {
173 	return index < 32 && (BIT(index) & table->default_gid_indices);
174 }
175 
ib_cache_gid_parse_type_str(const char * buf)176 int ib_cache_gid_parse_type_str(const char *buf)
177 {
178 	unsigned int i;
179 	size_t len;
180 	int err = -EINVAL;
181 
182 	len = strlen(buf);
183 	if (len == 0)
184 		return -EINVAL;
185 
186 	if (buf[len - 1] == '\n')
187 		len--;
188 
189 	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
190 		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
191 		    len == strlen(gid_type_str[i])) {
192 			err = i;
193 			break;
194 		}
195 
196 	return err;
197 }
198 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
199 
rdma_gid_table(struct ib_device * device,u32 port)200 static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port)
201 {
202 	return device->port_data[port].cache.gid;
203 }
204 
is_gid_entry_free(const struct ib_gid_table_entry * entry)205 static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
206 {
207 	return !entry;
208 }
209 
is_gid_entry_valid(const struct ib_gid_table_entry * entry)210 static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
211 {
212 	return entry && entry->state == GID_TABLE_ENTRY_VALID;
213 }
214 
schedule_free_gid(struct kref * kref)215 static void schedule_free_gid(struct kref *kref)
216 {
217 	struct ib_gid_table_entry *entry =
218 			container_of(kref, struct ib_gid_table_entry, kref);
219 
220 	queue_work(ib_wq, &entry->del_work);
221 }
222 
put_gid_ndev(struct rcu_head * head)223 static void put_gid_ndev(struct rcu_head *head)
224 {
225 	struct roce_gid_ndev_storage *storage =
226 		container_of(head, struct roce_gid_ndev_storage, rcu_head);
227 
228 	WARN_ON(!storage->ndev);
229 	/* At this point its safe to release netdev reference,
230 	 * as all callers working on gid_attr->ndev are done
231 	 * using this netdev.
232 	 */
233 	dev_put(storage->ndev);
234 	kfree(storage);
235 }
236 
free_gid_entry_locked(struct ib_gid_table_entry * entry)237 static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
238 {
239 	struct ib_device *device = entry->attr.device;
240 	u32 port_num = entry->attr.port_num;
241 	struct ib_gid_table *table = rdma_gid_table(device, port_num);
242 
243 	dev_dbg(&device->dev, "%s port=%u index=%u gid %pI6\n", __func__,
244 		port_num, entry->attr.index, entry->attr.gid.raw);
245 
246 	write_lock_irq(&table->rwlock);
247 
248 	/*
249 	 * The only way to avoid overwriting NULL in table is
250 	 * by comparing if it is same entry in table or not!
251 	 * If new entry in table is added by the time we free here,
252 	 * don't overwrite the table entry.
253 	 */
254 	if (entry == table->data_vec[entry->attr.index])
255 		table->data_vec[entry->attr.index] = NULL;
256 	/* Now this index is ready to be allocated */
257 	write_unlock_irq(&table->rwlock);
258 
259 	if (entry->ndev_storage)
260 		call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev);
261 	kfree(entry);
262 }
263 
free_gid_entry(struct kref * kref)264 static void free_gid_entry(struct kref *kref)
265 {
266 	struct ib_gid_table_entry *entry =
267 			container_of(kref, struct ib_gid_table_entry, kref);
268 
269 	free_gid_entry_locked(entry);
270 }
271 
272 /**
273  * free_gid_work - Release reference to the GID entry
274  * @work: Work structure to refer to GID entry which needs to be
275  * deleted.
276  *
277  * free_gid_work() frees the entry from the HCA's hardware table
278  * if provider supports it. It releases reference to netdevice.
279  */
free_gid_work(struct work_struct * work)280 static void free_gid_work(struct work_struct *work)
281 {
282 	struct ib_gid_table_entry *entry =
283 		container_of(work, struct ib_gid_table_entry, del_work);
284 	struct ib_device *device = entry->attr.device;
285 	u32 port_num = entry->attr.port_num;
286 	struct ib_gid_table *table = rdma_gid_table(device, port_num);
287 
288 	mutex_lock(&table->lock);
289 	free_gid_entry_locked(entry);
290 	mutex_unlock(&table->lock);
291 }
292 
293 static struct ib_gid_table_entry *
alloc_gid_entry(const struct ib_gid_attr * attr)294 alloc_gid_entry(const struct ib_gid_attr *attr)
295 {
296 	struct ib_gid_table_entry *entry;
297 	struct net_device *ndev;
298 
299 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
300 	if (!entry)
301 		return NULL;
302 
303 	ndev = rcu_dereference_protected(attr->ndev, 1);
304 	if (ndev) {
305 		entry->ndev_storage = kzalloc(sizeof(*entry->ndev_storage),
306 					      GFP_KERNEL);
307 		if (!entry->ndev_storage) {
308 			kfree(entry);
309 			return NULL;
310 		}
311 		dev_hold(ndev);
312 		entry->ndev_storage->ndev = ndev;
313 	}
314 	kref_init(&entry->kref);
315 	memcpy(&entry->attr, attr, sizeof(*attr));
316 	INIT_WORK(&entry->del_work, free_gid_work);
317 	entry->state = GID_TABLE_ENTRY_INVALID;
318 	return entry;
319 }
320 
store_gid_entry(struct ib_gid_table * table,struct ib_gid_table_entry * entry)321 static void store_gid_entry(struct ib_gid_table *table,
322 			    struct ib_gid_table_entry *entry)
323 {
324 	entry->state = GID_TABLE_ENTRY_VALID;
325 
326 	dev_dbg(&entry->attr.device->dev, "%s port=%u index=%u gid %pI6\n",
327 		__func__, entry->attr.port_num, entry->attr.index,
328 		entry->attr.gid.raw);
329 
330 	lockdep_assert_held(&table->lock);
331 	write_lock_irq(&table->rwlock);
332 	table->data_vec[entry->attr.index] = entry;
333 	write_unlock_irq(&table->rwlock);
334 }
335 
get_gid_entry(struct ib_gid_table_entry * entry)336 static void get_gid_entry(struct ib_gid_table_entry *entry)
337 {
338 	kref_get(&entry->kref);
339 }
340 
put_gid_entry(struct ib_gid_table_entry * entry)341 static void put_gid_entry(struct ib_gid_table_entry *entry)
342 {
343 	kref_put(&entry->kref, schedule_free_gid);
344 }
345 
put_gid_entry_locked(struct ib_gid_table_entry * entry)346 static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
347 {
348 	kref_put(&entry->kref, free_gid_entry);
349 }
350 
add_roce_gid(struct ib_gid_table_entry * entry)351 static int add_roce_gid(struct ib_gid_table_entry *entry)
352 {
353 	const struct ib_gid_attr *attr = &entry->attr;
354 	int ret;
355 
356 	if (!attr->ndev) {
357 		dev_err(&attr->device->dev, "%s NULL netdev port=%u index=%u\n",
358 			__func__, attr->port_num, attr->index);
359 		return -EINVAL;
360 	}
361 	if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
362 		ret = attr->device->ops.add_gid(attr, &entry->context);
363 		if (ret) {
364 			dev_err(&attr->device->dev,
365 				"%s GID add failed port=%u index=%u\n",
366 				__func__, attr->port_num, attr->index);
367 			return ret;
368 		}
369 	}
370 	return 0;
371 }
372 
373 /**
374  * del_gid - Delete GID table entry
375  *
376  * @ib_dev:	IB device whose GID entry to be deleted
377  * @port:	Port number of the IB device
378  * @table:	GID table of the IB device for a port
379  * @ix:		GID entry index to delete
380  *
381  */
del_gid(struct ib_device * ib_dev,u32 port,struct ib_gid_table * table,int ix)382 static void del_gid(struct ib_device *ib_dev, u32 port,
383 		    struct ib_gid_table *table, int ix)
384 {
385 	struct roce_gid_ndev_storage *ndev_storage;
386 	struct ib_gid_table_entry *entry;
387 
388 	lockdep_assert_held(&table->lock);
389 
390 	dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port,
391 		ix, table->data_vec[ix]->attr.gid.raw);
392 
393 	write_lock_irq(&table->rwlock);
394 	entry = table->data_vec[ix];
395 	entry->state = GID_TABLE_ENTRY_PENDING_DEL;
396 	/*
397 	 * For non RoCE protocol, GID entry slot is ready to use.
398 	 */
399 	if (!rdma_protocol_roce(ib_dev, port))
400 		table->data_vec[ix] = NULL;
401 	write_unlock_irq(&table->rwlock);
402 
403 	if (rdma_cap_roce_gid_table(ib_dev, port))
404 		ib_dev->ops.del_gid(&entry->attr, &entry->context);
405 
406 	ndev_storage = entry->ndev_storage;
407 	if (ndev_storage) {
408 		entry->ndev_storage = NULL;
409 		rcu_assign_pointer(entry->attr.ndev, NULL);
410 		call_rcu(&ndev_storage->rcu_head, put_gid_ndev);
411 	}
412 
413 	put_gid_entry_locked(entry);
414 }
415 
416 /**
417  * add_modify_gid - Add or modify GID table entry
418  *
419  * @table:	GID table in which GID to be added or modified
420  * @attr:	Attributes of the GID
421  *
422  * Returns 0 on success or appropriate error code. It accepts zero
423  * GID addition for non RoCE ports for HCA's who report them as valid
424  * GID. However such zero GIDs are not added to the cache.
425  */
add_modify_gid(struct ib_gid_table * table,const struct ib_gid_attr * attr)426 static int add_modify_gid(struct ib_gid_table *table,
427 			  const struct ib_gid_attr *attr)
428 {
429 	struct ib_gid_table_entry *entry;
430 	int ret = 0;
431 
432 	/*
433 	 * Invalidate any old entry in the table to make it safe to write to
434 	 * this index.
435 	 */
436 	if (is_gid_entry_valid(table->data_vec[attr->index]))
437 		del_gid(attr->device, attr->port_num, table, attr->index);
438 
439 	/*
440 	 * Some HCA's report multiple GID entries with only one valid GID, and
441 	 * leave other unused entries as the zero GID. Convert zero GIDs to
442 	 * empty table entries instead of storing them.
443 	 */
444 	if (rdma_is_zero_gid(&attr->gid))
445 		return 0;
446 
447 	entry = alloc_gid_entry(attr);
448 	if (!entry)
449 		return -ENOMEM;
450 
451 	if (rdma_protocol_roce(attr->device, attr->port_num)) {
452 		ret = add_roce_gid(entry);
453 		if (ret)
454 			goto done;
455 	}
456 
457 	store_gid_entry(table, entry);
458 	return 0;
459 
460 done:
461 	put_gid_entry(entry);
462 	return ret;
463 }
464 
465 /* rwlock should be read locked, or lock should be held */
find_gid(struct ib_gid_table * table,const union ib_gid * gid,const struct ib_gid_attr * val,bool default_gid,unsigned long mask,int * pempty)466 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
467 		    const struct ib_gid_attr *val, bool default_gid,
468 		    unsigned long mask, int *pempty)
469 {
470 	int i = 0;
471 	int found = -1;
472 	int empty = pempty ? -1 : 0;
473 
474 	while (i < table->sz && (found < 0 || empty < 0)) {
475 		struct ib_gid_table_entry *data = table->data_vec[i];
476 		struct ib_gid_attr *attr;
477 		int curr_index = i;
478 
479 		i++;
480 
481 		/* find_gid() is used during GID addition where it is expected
482 		 * to return a free entry slot which is not duplicate.
483 		 * Free entry slot is requested and returned if pempty is set,
484 		 * so lookup free slot only if requested.
485 		 */
486 		if (pempty && empty < 0) {
487 			if (is_gid_entry_free(data) &&
488 			    default_gid ==
489 				is_gid_index_default(table, curr_index)) {
490 				/*
491 				 * Found an invalid (free) entry; allocate it.
492 				 * If default GID is requested, then our
493 				 * found slot must be one of the DEFAULT
494 				 * reserved slots or we fail.
495 				 * This ensures that only DEFAULT reserved
496 				 * slots are used for default property GIDs.
497 				 */
498 				empty = curr_index;
499 			}
500 		}
501 
502 		/*
503 		 * Additionally find_gid() is used to find valid entry during
504 		 * lookup operation; so ignore the entries which are marked as
505 		 * pending for removal and the entries which are marked as
506 		 * invalid.
507 		 */
508 		if (!is_gid_entry_valid(data))
509 			continue;
510 
511 		if (found >= 0)
512 			continue;
513 
514 		attr = &data->attr;
515 		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
516 		    attr->gid_type != val->gid_type)
517 			continue;
518 
519 		if (mask & GID_ATTR_FIND_MASK_GID &&
520 		    memcmp(gid, &data->attr.gid, sizeof(*gid)))
521 			continue;
522 
523 		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
524 		    attr->ndev != val->ndev)
525 			continue;
526 
527 		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
528 		    is_gid_index_default(table, curr_index) != default_gid)
529 			continue;
530 
531 		found = curr_index;
532 	}
533 
534 	if (pempty)
535 		*pempty = empty;
536 
537 	return found;
538 }
539 
make_default_gid(struct net_device * dev,union ib_gid * gid)540 static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
541 {
542 	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
543 	addrconf_ifid_eui48(&gid->raw[8], dev);
544 }
545 
__ib_cache_gid_add(struct ib_device * ib_dev,u32 port,union ib_gid * gid,struct ib_gid_attr * attr,unsigned long mask,bool default_gid)546 static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
547 			      union ib_gid *gid, struct ib_gid_attr *attr,
548 			      unsigned long mask, bool default_gid)
549 {
550 	struct ib_gid_table *table;
551 	int ret = 0;
552 	int empty;
553 	int ix;
554 
555 	/* Do not allow adding zero GID in support of
556 	 * IB spec version 1.3 section 4.1.1 point (6) and
557 	 * section 12.7.10 and section 12.7.20
558 	 */
559 	if (rdma_is_zero_gid(gid))
560 		return -EINVAL;
561 
562 	table = rdma_gid_table(ib_dev, port);
563 
564 	mutex_lock(&table->lock);
565 
566 	ix = find_gid(table, gid, attr, default_gid, mask, &empty);
567 	if (ix >= 0)
568 		goto out_unlock;
569 
570 	if (empty < 0) {
571 		ret = -ENOSPC;
572 		goto out_unlock;
573 	}
574 	attr->device = ib_dev;
575 	attr->index = empty;
576 	attr->port_num = port;
577 	attr->gid = *gid;
578 	ret = add_modify_gid(table, attr);
579 	if (!ret)
580 		dispatch_gid_change_event(ib_dev, port);
581 
582 out_unlock:
583 	mutex_unlock(&table->lock);
584 	if (ret)
585 		pr_warn("%s: unable to add gid %pI6 error=%d\n",
586 			__func__, gid->raw, ret);
587 	return ret;
588 }
589 
ib_cache_gid_add(struct ib_device * ib_dev,u32 port,union ib_gid * gid,struct ib_gid_attr * attr)590 int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
591 		     union ib_gid *gid, struct ib_gid_attr *attr)
592 {
593 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
594 			     GID_ATTR_FIND_MASK_GID_TYPE |
595 			     GID_ATTR_FIND_MASK_NETDEV;
596 
597 	return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
598 }
599 
600 static int
_ib_cache_gid_del(struct ib_device * ib_dev,u32 port,union ib_gid * gid,struct ib_gid_attr * attr,unsigned long mask,bool default_gid)601 _ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
602 		  union ib_gid *gid, struct ib_gid_attr *attr,
603 		  unsigned long mask, bool default_gid)
604 {
605 	struct ib_gid_table *table;
606 	int ret = 0;
607 	int ix;
608 
609 	table = rdma_gid_table(ib_dev, port);
610 
611 	mutex_lock(&table->lock);
612 
613 	ix = find_gid(table, gid, attr, default_gid, mask, NULL);
614 	if (ix < 0) {
615 		ret = -EINVAL;
616 		goto out_unlock;
617 	}
618 
619 	del_gid(ib_dev, port, table, ix);
620 	dispatch_gid_change_event(ib_dev, port);
621 
622 out_unlock:
623 	mutex_unlock(&table->lock);
624 	if (ret)
625 		pr_debug("%s: can't delete gid %pI6 error=%d\n",
626 			 __func__, gid->raw, ret);
627 	return ret;
628 }
629 
ib_cache_gid_del(struct ib_device * ib_dev,u32 port,union ib_gid * gid,struct ib_gid_attr * attr)630 int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
631 		     union ib_gid *gid, struct ib_gid_attr *attr)
632 {
633 	unsigned long mask = GID_ATTR_FIND_MASK_GID	  |
634 			     GID_ATTR_FIND_MASK_GID_TYPE |
635 			     GID_ATTR_FIND_MASK_DEFAULT  |
636 			     GID_ATTR_FIND_MASK_NETDEV;
637 
638 	return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
639 }
640 
ib_cache_gid_del_all_netdev_gids(struct ib_device * ib_dev,u32 port,struct net_device * ndev)641 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
642 				     struct net_device *ndev)
643 {
644 	struct ib_gid_table *table;
645 	int ix;
646 	bool deleted = false;
647 
648 	table = rdma_gid_table(ib_dev, port);
649 
650 	mutex_lock(&table->lock);
651 
652 	for (ix = 0; ix < table->sz; ix++) {
653 		if (is_gid_entry_valid(table->data_vec[ix]) &&
654 		    table->data_vec[ix]->attr.ndev == ndev) {
655 			del_gid(ib_dev, port, table, ix);
656 			deleted = true;
657 		}
658 	}
659 
660 	mutex_unlock(&table->lock);
661 
662 	if (deleted)
663 		dispatch_gid_change_event(ib_dev, port);
664 
665 	return 0;
666 }
667 
668 /**
669  * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
670  * a valid GID entry for given search parameters. It searches for the specified
671  * GID value in the local software cache.
672  * @ib_dev: The device to query.
673  * @gid: The GID value to search for.
674  * @gid_type: The GID type to search for.
675  * @port: The port number of the device where the GID value should be searched.
676  * @ndev: In RoCE, the net device of the device. NULL means ignore.
677  *
678  * Returns sgid attributes if the GID is found with valid reference or
679  * returns ERR_PTR for the error.
680  * The caller must invoke rdma_put_gid_attr() to release the reference.
681  */
682 const struct ib_gid_attr *
rdma_find_gid_by_port(struct ib_device * ib_dev,const union ib_gid * gid,enum ib_gid_type gid_type,u32 port,struct net_device * ndev)683 rdma_find_gid_by_port(struct ib_device *ib_dev,
684 		      const union ib_gid *gid,
685 		      enum ib_gid_type gid_type,
686 		      u32 port, struct net_device *ndev)
687 {
688 	int local_index;
689 	struct ib_gid_table *table;
690 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
691 			     GID_ATTR_FIND_MASK_GID_TYPE;
692 	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
693 	const struct ib_gid_attr *attr;
694 	unsigned long flags;
695 
696 	if (!rdma_is_port_valid(ib_dev, port))
697 		return ERR_PTR(-ENOENT);
698 
699 	table = rdma_gid_table(ib_dev, port);
700 
701 	if (ndev)
702 		mask |= GID_ATTR_FIND_MASK_NETDEV;
703 
704 	read_lock_irqsave(&table->rwlock, flags);
705 	local_index = find_gid(table, gid, &val, false, mask, NULL);
706 	if (local_index >= 0) {
707 		get_gid_entry(table->data_vec[local_index]);
708 		attr = &table->data_vec[local_index]->attr;
709 		read_unlock_irqrestore(&table->rwlock, flags);
710 		return attr;
711 	}
712 
713 	read_unlock_irqrestore(&table->rwlock, flags);
714 	return ERR_PTR(-ENOENT);
715 }
716 EXPORT_SYMBOL(rdma_find_gid_by_port);
717 
718 /**
719  * rdma_find_gid_by_filter - Returns the GID table attribute where a
720  * specified GID value occurs
721  * @ib_dev: The device to query.
722  * @gid: The GID value to search for.
723  * @port: The port number of the device where the GID value could be
724  *   searched.
725  * @filter: The filter function is executed on any matching GID in the table.
726  *   If the filter function returns true, the corresponding index is returned,
727  *   otherwise, we continue searching the GID table. It's guaranteed that
728  *   while filter is executed, ndev field is valid and the structure won't
729  *   change. filter is executed in an atomic context. filter must not be NULL.
730  * @context: Private data to pass into the call-back.
731  *
732  * rdma_find_gid_by_filter() searches for the specified GID value
733  * of which the filter function returns true in the port's GID table.
734  *
735  */
rdma_find_gid_by_filter(struct ib_device * ib_dev,const union ib_gid * gid,u32 port,bool (* filter)(const union ib_gid * gid,const struct ib_gid_attr *,void *),void * context)736 const struct ib_gid_attr *rdma_find_gid_by_filter(
737 	struct ib_device *ib_dev, const union ib_gid *gid, u32 port,
738 	bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
739 		       void *),
740 	void *context)
741 {
742 	const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
743 	struct ib_gid_table *table;
744 	unsigned long flags;
745 	unsigned int i;
746 
747 	if (!rdma_is_port_valid(ib_dev, port))
748 		return ERR_PTR(-EINVAL);
749 
750 	table = rdma_gid_table(ib_dev, port);
751 
752 	read_lock_irqsave(&table->rwlock, flags);
753 	for (i = 0; i < table->sz; i++) {
754 		struct ib_gid_table_entry *entry = table->data_vec[i];
755 
756 		if (!is_gid_entry_valid(entry))
757 			continue;
758 
759 		if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
760 			continue;
761 
762 		if (filter(gid, &entry->attr, context)) {
763 			get_gid_entry(entry);
764 			res = &entry->attr;
765 			break;
766 		}
767 	}
768 	read_unlock_irqrestore(&table->rwlock, flags);
769 	return res;
770 }
771 
alloc_gid_table(int sz)772 static struct ib_gid_table *alloc_gid_table(int sz)
773 {
774 	struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
775 
776 	if (!table)
777 		return NULL;
778 
779 	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
780 	if (!table->data_vec)
781 		goto err_free_table;
782 
783 	mutex_init(&table->lock);
784 
785 	table->sz = sz;
786 	rwlock_init(&table->rwlock);
787 	return table;
788 
789 err_free_table:
790 	kfree(table);
791 	return NULL;
792 }
793 
release_gid_table(struct ib_device * device,struct ib_gid_table * table)794 static void release_gid_table(struct ib_device *device,
795 			      struct ib_gid_table *table)
796 {
797 	int i;
798 
799 	if (!table)
800 		return;
801 
802 	for (i = 0; i < table->sz; i++) {
803 		if (is_gid_entry_free(table->data_vec[i]))
804 			continue;
805 
806 		WARN_ONCE(true,
807 			  "GID entry ref leak for dev %s index %d ref=%u\n",
808 			  dev_name(&device->dev), i,
809 			  kref_read(&table->data_vec[i]->kref));
810 	}
811 
812 	mutex_destroy(&table->lock);
813 	kfree(table->data_vec);
814 	kfree(table);
815 }
816 
cleanup_gid_table_port(struct ib_device * ib_dev,u32 port,struct ib_gid_table * table)817 static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port,
818 				   struct ib_gid_table *table)
819 {
820 	int i;
821 
822 	if (!table)
823 		return;
824 
825 	mutex_lock(&table->lock);
826 	for (i = 0; i < table->sz; ++i) {
827 		if (is_gid_entry_valid(table->data_vec[i]))
828 			del_gid(ib_dev, port, table, i);
829 	}
830 	mutex_unlock(&table->lock);
831 }
832 
ib_cache_gid_set_default_gid(struct ib_device * ib_dev,u32 port,struct net_device * ndev,unsigned long gid_type_mask,enum ib_cache_gid_default_mode mode)833 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
834 				  struct net_device *ndev,
835 				  unsigned long gid_type_mask,
836 				  enum ib_cache_gid_default_mode mode)
837 {
838 	union ib_gid gid = { };
839 	struct ib_gid_attr gid_attr;
840 	unsigned int gid_type;
841 	unsigned long mask;
842 
843 	mask = GID_ATTR_FIND_MASK_GID_TYPE |
844 	       GID_ATTR_FIND_MASK_DEFAULT |
845 	       GID_ATTR_FIND_MASK_NETDEV;
846 	memset(&gid_attr, 0, sizeof(gid_attr));
847 	gid_attr.ndev = ndev;
848 
849 	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
850 		if (1UL << gid_type & ~gid_type_mask)
851 			continue;
852 
853 		gid_attr.gid_type = gid_type;
854 
855 		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
856 			make_default_gid(ndev, &gid);
857 			__ib_cache_gid_add(ib_dev, port, &gid,
858 					   &gid_attr, mask, true);
859 		} else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
860 			_ib_cache_gid_del(ib_dev, port, &gid,
861 					  &gid_attr, mask, true);
862 		}
863 	}
864 }
865 
gid_table_reserve_default(struct ib_device * ib_dev,u32 port,struct ib_gid_table * table)866 static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port,
867 				      struct ib_gid_table *table)
868 {
869 	unsigned int i;
870 	unsigned long roce_gid_type_mask;
871 	unsigned int num_default_gids;
872 
873 	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
874 	num_default_gids = hweight_long(roce_gid_type_mask);
875 	/* Reserve starting indices for default GIDs */
876 	for (i = 0; i < num_default_gids && i < table->sz; i++)
877 		table->default_gid_indices |= BIT(i);
878 }
879 
880 
gid_table_release_one(struct ib_device * ib_dev)881 static void gid_table_release_one(struct ib_device *ib_dev)
882 {
883 	u32 p;
884 
885 	rdma_for_each_port (ib_dev, p) {
886 		release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
887 		ib_dev->port_data[p].cache.gid = NULL;
888 	}
889 }
890 
_gid_table_setup_one(struct ib_device * ib_dev)891 static int _gid_table_setup_one(struct ib_device *ib_dev)
892 {
893 	struct ib_gid_table *table;
894 	u32 rdma_port;
895 
896 	rdma_for_each_port (ib_dev, rdma_port) {
897 		table = alloc_gid_table(
898 			ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
899 		if (!table)
900 			goto rollback_table_setup;
901 
902 		gid_table_reserve_default(ib_dev, rdma_port, table);
903 		ib_dev->port_data[rdma_port].cache.gid = table;
904 	}
905 	return 0;
906 
907 rollback_table_setup:
908 	gid_table_release_one(ib_dev);
909 	return -ENOMEM;
910 }
911 
gid_table_cleanup_one(struct ib_device * ib_dev)912 static void gid_table_cleanup_one(struct ib_device *ib_dev)
913 {
914 	u32 p;
915 
916 	rdma_for_each_port (ib_dev, p)
917 		cleanup_gid_table_port(ib_dev, p,
918 				       ib_dev->port_data[p].cache.gid);
919 }
920 
gid_table_setup_one(struct ib_device * ib_dev)921 static int gid_table_setup_one(struct ib_device *ib_dev)
922 {
923 	int err;
924 
925 	err = _gid_table_setup_one(ib_dev);
926 
927 	if (err)
928 		return err;
929 
930 	rdma_roce_rescan_device(ib_dev);
931 
932 	return err;
933 }
934 
935 /**
936  * rdma_query_gid - Read the GID content from the GID software cache
937  * @device:		Device to query the GID
938  * @port_num:		Port number of the device
939  * @index:		Index of the GID table entry to read
940  * @gid:		Pointer to GID where to store the entry's GID
941  *
942  * rdma_query_gid() only reads the GID entry content for requested device,
943  * port and index. It reads for IB, RoCE and iWarp link layers.  It doesn't
944  * hold any reference to the GID table entry in the HCA or software cache.
945  *
946  * Returns 0 on success or appropriate error code.
947  *
948  */
rdma_query_gid(struct ib_device * device,u32 port_num,int index,union ib_gid * gid)949 int rdma_query_gid(struct ib_device *device, u32 port_num,
950 		   int index, union ib_gid *gid)
951 {
952 	struct ib_gid_table *table;
953 	unsigned long flags;
954 	int res;
955 
956 	if (!rdma_is_port_valid(device, port_num))
957 		return -EINVAL;
958 
959 	table = rdma_gid_table(device, port_num);
960 	read_lock_irqsave(&table->rwlock, flags);
961 
962 	if (index < 0 || index >= table->sz) {
963 		res = -EINVAL;
964 		goto done;
965 	}
966 
967 	if (!is_gid_entry_valid(table->data_vec[index])) {
968 		res = -ENOENT;
969 		goto done;
970 	}
971 
972 	memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
973 	res = 0;
974 
975 done:
976 	read_unlock_irqrestore(&table->rwlock, flags);
977 	return res;
978 }
979 EXPORT_SYMBOL(rdma_query_gid);
980 
981 /**
982  * rdma_read_gid_hw_context - Read the HW GID context from GID attribute
983  * @attr:		Potinter to the GID attribute
984  *
985  * rdma_read_gid_hw_context() reads the drivers GID HW context corresponding
986  * to the SGID attr. Callers are required to already be holding the reference
987  * to an existing GID entry.
988  *
989  * Returns the HW GID context
990  *
991  */
rdma_read_gid_hw_context(const struct ib_gid_attr * attr)992 void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr)
993 {
994 	return container_of(attr, struct ib_gid_table_entry, attr)->context;
995 }
996 EXPORT_SYMBOL(rdma_read_gid_hw_context);
997 
998 /**
999  * rdma_find_gid - Returns SGID attributes if the matching GID is found.
1000  * @device: The device to query.
1001  * @gid: The GID value to search for.
1002  * @gid_type: The GID type to search for.
1003  * @ndev: In RoCE, the net device of the device. NULL means ignore.
1004  *
1005  * rdma_find_gid() searches for the specified GID value in the software cache.
1006  *
1007  * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
1008  * error. The caller must invoke rdma_put_gid_attr() to release the reference.
1009  *
1010  */
rdma_find_gid(struct ib_device * device,const union ib_gid * gid,enum ib_gid_type gid_type,struct net_device * ndev)1011 const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
1012 					const union ib_gid *gid,
1013 					enum ib_gid_type gid_type,
1014 					struct net_device *ndev)
1015 {
1016 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
1017 			     GID_ATTR_FIND_MASK_GID_TYPE;
1018 	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
1019 	u32 p;
1020 
1021 	if (ndev)
1022 		mask |= GID_ATTR_FIND_MASK_NETDEV;
1023 
1024 	rdma_for_each_port(device, p) {
1025 		struct ib_gid_table *table;
1026 		unsigned long flags;
1027 		int index;
1028 
1029 		table = device->port_data[p].cache.gid;
1030 		read_lock_irqsave(&table->rwlock, flags);
1031 		index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
1032 		if (index >= 0) {
1033 			const struct ib_gid_attr *attr;
1034 
1035 			get_gid_entry(table->data_vec[index]);
1036 			attr = &table->data_vec[index]->attr;
1037 			read_unlock_irqrestore(&table->rwlock, flags);
1038 			return attr;
1039 		}
1040 		read_unlock_irqrestore(&table->rwlock, flags);
1041 	}
1042 
1043 	return ERR_PTR(-ENOENT);
1044 }
1045 EXPORT_SYMBOL(rdma_find_gid);
1046 
ib_get_cached_pkey(struct ib_device * device,u32 port_num,int index,u16 * pkey)1047 int ib_get_cached_pkey(struct ib_device *device,
1048 		       u32               port_num,
1049 		       int               index,
1050 		       u16              *pkey)
1051 {
1052 	struct ib_pkey_cache *cache;
1053 	unsigned long flags;
1054 	int ret = 0;
1055 
1056 	if (!rdma_is_port_valid(device, port_num))
1057 		return -EINVAL;
1058 
1059 	read_lock_irqsave(&device->cache_lock, flags);
1060 
1061 	cache = device->port_data[port_num].cache.pkey;
1062 
1063 	if (!cache || index < 0 || index >= cache->table_len)
1064 		ret = -EINVAL;
1065 	else
1066 		*pkey = cache->table[index];
1067 
1068 	read_unlock_irqrestore(&device->cache_lock, flags);
1069 
1070 	return ret;
1071 }
1072 EXPORT_SYMBOL(ib_get_cached_pkey);
1073 
ib_get_cached_subnet_prefix(struct ib_device * device,u32 port_num,u64 * sn_pfx)1074 void ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
1075 				u64 *sn_pfx)
1076 {
1077 	unsigned long flags;
1078 
1079 	read_lock_irqsave(&device->cache_lock, flags);
1080 	*sn_pfx = device->port_data[port_num].cache.subnet_prefix;
1081 	read_unlock_irqrestore(&device->cache_lock, flags);
1082 }
1083 EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
1084 
ib_find_cached_pkey(struct ib_device * device,u32 port_num,u16 pkey,u16 * index)1085 int ib_find_cached_pkey(struct ib_device *device, u32 port_num,
1086 			u16 pkey, u16 *index)
1087 {
1088 	struct ib_pkey_cache *cache;
1089 	unsigned long flags;
1090 	int i;
1091 	int ret = -ENOENT;
1092 	int partial_ix = -1;
1093 
1094 	if (!rdma_is_port_valid(device, port_num))
1095 		return -EINVAL;
1096 
1097 	read_lock_irqsave(&device->cache_lock, flags);
1098 
1099 	cache = device->port_data[port_num].cache.pkey;
1100 	if (!cache) {
1101 		ret = -EINVAL;
1102 		goto err;
1103 	}
1104 
1105 	*index = -1;
1106 
1107 	for (i = 0; i < cache->table_len; ++i)
1108 		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
1109 			if (cache->table[i] & 0x8000) {
1110 				*index = i;
1111 				ret = 0;
1112 				break;
1113 			} else {
1114 				partial_ix = i;
1115 			}
1116 		}
1117 
1118 	if (ret && partial_ix >= 0) {
1119 		*index = partial_ix;
1120 		ret = 0;
1121 	}
1122 
1123 err:
1124 	read_unlock_irqrestore(&device->cache_lock, flags);
1125 
1126 	return ret;
1127 }
1128 EXPORT_SYMBOL(ib_find_cached_pkey);
1129 
ib_find_exact_cached_pkey(struct ib_device * device,u32 port_num,u16 pkey,u16 * index)1130 int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
1131 			      u16 pkey, u16 *index)
1132 {
1133 	struct ib_pkey_cache *cache;
1134 	unsigned long flags;
1135 	int i;
1136 	int ret = -ENOENT;
1137 
1138 	if (!rdma_is_port_valid(device, port_num))
1139 		return -EINVAL;
1140 
1141 	read_lock_irqsave(&device->cache_lock, flags);
1142 
1143 	cache = device->port_data[port_num].cache.pkey;
1144 	if (!cache) {
1145 		ret = -EINVAL;
1146 		goto err;
1147 	}
1148 
1149 	*index = -1;
1150 
1151 	for (i = 0; i < cache->table_len; ++i)
1152 		if (cache->table[i] == pkey) {
1153 			*index = i;
1154 			ret = 0;
1155 			break;
1156 		}
1157 
1158 err:
1159 	read_unlock_irqrestore(&device->cache_lock, flags);
1160 
1161 	return ret;
1162 }
1163 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1164 
ib_get_cached_lmc(struct ib_device * device,u32 port_num,u8 * lmc)1165 int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
1166 {
1167 	unsigned long flags;
1168 	int ret = 0;
1169 
1170 	if (!rdma_is_port_valid(device, port_num))
1171 		return -EINVAL;
1172 
1173 	read_lock_irqsave(&device->cache_lock, flags);
1174 	*lmc = device->port_data[port_num].cache.lmc;
1175 	read_unlock_irqrestore(&device->cache_lock, flags);
1176 
1177 	return ret;
1178 }
1179 EXPORT_SYMBOL(ib_get_cached_lmc);
1180 
ib_get_cached_port_state(struct ib_device * device,u32 port_num,enum ib_port_state * port_state)1181 int ib_get_cached_port_state(struct ib_device *device, u32 port_num,
1182 			     enum ib_port_state *port_state)
1183 {
1184 	unsigned long flags;
1185 	int ret = 0;
1186 
1187 	if (!rdma_is_port_valid(device, port_num))
1188 		return -EINVAL;
1189 
1190 	read_lock_irqsave(&device->cache_lock, flags);
1191 	*port_state = device->port_data[port_num].cache.port_state;
1192 	read_unlock_irqrestore(&device->cache_lock, flags);
1193 
1194 	return ret;
1195 }
1196 EXPORT_SYMBOL(ib_get_cached_port_state);
1197 
1198 /**
1199  * rdma_get_gid_attr - Returns GID attributes for a port of a device
1200  * at a requested gid_index, if a valid GID entry exists.
1201  * @device:		The device to query.
1202  * @port_num:		The port number on the device where the GID value
1203  *			is to be queried.
1204  * @index:		Index of the GID table entry whose attributes are to
1205  *                      be queried.
1206  *
1207  * rdma_get_gid_attr() acquires reference count of gid attributes from the
1208  * cached GID table. Caller must invoke rdma_put_gid_attr() to release
1209  * reference to gid attribute regardless of link layer.
1210  *
1211  * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
1212  * code.
1213  */
1214 const struct ib_gid_attr *
rdma_get_gid_attr(struct ib_device * device,u32 port_num,int index)1215 rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index)
1216 {
1217 	const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
1218 	struct ib_gid_table *table;
1219 	unsigned long flags;
1220 
1221 	if (!rdma_is_port_valid(device, port_num))
1222 		return ERR_PTR(-EINVAL);
1223 
1224 	table = rdma_gid_table(device, port_num);
1225 	if (index < 0 || index >= table->sz)
1226 		return ERR_PTR(-EINVAL);
1227 
1228 	read_lock_irqsave(&table->rwlock, flags);
1229 	if (!is_gid_entry_valid(table->data_vec[index]))
1230 		goto done;
1231 
1232 	get_gid_entry(table->data_vec[index]);
1233 	attr = &table->data_vec[index]->attr;
1234 done:
1235 	read_unlock_irqrestore(&table->rwlock, flags);
1236 	return attr;
1237 }
1238 EXPORT_SYMBOL(rdma_get_gid_attr);
1239 
1240 /**
1241  * rdma_query_gid_table - Reads GID table entries of all the ports of a device up to max_entries.
1242  * @device: The device to query.
1243  * @entries: Entries where GID entries are returned.
1244  * @max_entries: Maximum number of entries that can be returned.
1245  * Entries array must be allocated to hold max_entries number of entries.
1246  *
1247  * Returns number of entries on success or appropriate error code.
1248  */
rdma_query_gid_table(struct ib_device * device,struct ib_uverbs_gid_entry * entries,size_t max_entries)1249 ssize_t rdma_query_gid_table(struct ib_device *device,
1250 			     struct ib_uverbs_gid_entry *entries,
1251 			     size_t max_entries)
1252 {
1253 	const struct ib_gid_attr *gid_attr;
1254 	ssize_t num_entries = 0, ret;
1255 	struct ib_gid_table *table;
1256 	u32 port_num, i;
1257 	struct net_device *ndev;
1258 	unsigned long flags;
1259 
1260 	rdma_for_each_port(device, port_num) {
1261 		table = rdma_gid_table(device, port_num);
1262 		read_lock_irqsave(&table->rwlock, flags);
1263 		for (i = 0; i < table->sz; i++) {
1264 			if (!is_gid_entry_valid(table->data_vec[i]))
1265 				continue;
1266 			if (num_entries >= max_entries) {
1267 				ret = -EINVAL;
1268 				goto err;
1269 			}
1270 
1271 			gid_attr = &table->data_vec[i]->attr;
1272 
1273 			memcpy(&entries->gid, &gid_attr->gid,
1274 			       sizeof(gid_attr->gid));
1275 			entries->gid_index = gid_attr->index;
1276 			entries->port_num = gid_attr->port_num;
1277 			entries->gid_type = gid_attr->gid_type;
1278 			ndev = rcu_dereference_protected(
1279 				gid_attr->ndev,
1280 				lockdep_is_held(&table->rwlock));
1281 			if (ndev)
1282 				entries->netdev_ifindex = ndev->ifindex;
1283 
1284 			num_entries++;
1285 			entries++;
1286 		}
1287 		read_unlock_irqrestore(&table->rwlock, flags);
1288 	}
1289 
1290 	return num_entries;
1291 err:
1292 	read_unlock_irqrestore(&table->rwlock, flags);
1293 	return ret;
1294 }
1295 EXPORT_SYMBOL(rdma_query_gid_table);
1296 
1297 /**
1298  * rdma_put_gid_attr - Release reference to the GID attribute
1299  * @attr:		Pointer to the GID attribute whose reference
1300  *			needs to be released.
1301  *
1302  * rdma_put_gid_attr() must be used to release reference whose
1303  * reference is acquired using rdma_get_gid_attr() or any APIs
1304  * which returns a pointer to the ib_gid_attr regardless of link layer
1305  * of IB or RoCE.
1306  *
1307  */
rdma_put_gid_attr(const struct ib_gid_attr * attr)1308 void rdma_put_gid_attr(const struct ib_gid_attr *attr)
1309 {
1310 	struct ib_gid_table_entry *entry =
1311 		container_of(attr, struct ib_gid_table_entry, attr);
1312 
1313 	put_gid_entry(entry);
1314 }
1315 EXPORT_SYMBOL(rdma_put_gid_attr);
1316 
1317 /**
1318  * rdma_hold_gid_attr - Get reference to existing GID attribute
1319  *
1320  * @attr:		Pointer to the GID attribute whose reference
1321  *			needs to be taken.
1322  *
1323  * Increase the reference count to a GID attribute to keep it from being
1324  * freed. Callers are required to already be holding a reference to attribute.
1325  *
1326  */
rdma_hold_gid_attr(const struct ib_gid_attr * attr)1327 void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
1328 {
1329 	struct ib_gid_table_entry *entry =
1330 		container_of(attr, struct ib_gid_table_entry, attr);
1331 
1332 	get_gid_entry(entry);
1333 }
1334 EXPORT_SYMBOL(rdma_hold_gid_attr);
1335 
1336 /**
1337  * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
1338  * which must be in UP state.
1339  *
1340  * @attr:Pointer to the GID attribute
1341  *
1342  * Returns pointer to netdevice if the netdevice was attached to GID and
1343  * netdevice is in UP state. Caller must hold RCU lock as this API
1344  * reads the netdev flags which can change while netdevice migrates to
1345  * different net namespace. Returns ERR_PTR with error code otherwise.
1346  *
1347  */
rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr * attr)1348 struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
1349 {
1350 	struct ib_gid_table_entry *entry =
1351 			container_of(attr, struct ib_gid_table_entry, attr);
1352 	struct ib_device *device = entry->attr.device;
1353 	struct net_device *ndev = ERR_PTR(-EINVAL);
1354 	u32 port_num = entry->attr.port_num;
1355 	struct ib_gid_table *table;
1356 	unsigned long flags;
1357 	bool valid;
1358 
1359 	table = rdma_gid_table(device, port_num);
1360 
1361 	read_lock_irqsave(&table->rwlock, flags);
1362 	valid = is_gid_entry_valid(table->data_vec[attr->index]);
1363 	if (valid) {
1364 		ndev = rcu_dereference(attr->ndev);
1365 		if (!ndev)
1366 			ndev = ERR_PTR(-ENODEV);
1367 	}
1368 	read_unlock_irqrestore(&table->rwlock, flags);
1369 	return ndev;
1370 }
1371 EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu);
1372 
get_lower_dev_vlan(struct net_device * lower_dev,struct netdev_nested_priv * priv)1373 static int get_lower_dev_vlan(struct net_device *lower_dev,
1374 			      struct netdev_nested_priv *priv)
1375 {
1376 	u16 *vlan_id = (u16 *)priv->data;
1377 
1378 	if (is_vlan_dev(lower_dev))
1379 		*vlan_id = vlan_dev_vlan_id(lower_dev);
1380 
1381 	/* We are interested only in first level vlan device, so
1382 	 * always return 1 to stop iterating over next level devices.
1383 	 */
1384 	return 1;
1385 }
1386 
1387 /**
1388  * rdma_read_gid_l2_fields - Read the vlan ID and source MAC address
1389  *			     of a GID entry.
1390  *
1391  * @attr:	GID attribute pointer whose L2 fields to be read
1392  * @vlan_id:	Pointer to vlan id to fill up if the GID entry has
1393  *		vlan id. It is optional.
1394  * @smac:	Pointer to smac to fill up for a GID entry. It is optional.
1395  *
1396  * rdma_read_gid_l2_fields() returns 0 on success and returns vlan id
1397  * (if gid entry has vlan) and source MAC, or returns error.
1398  */
rdma_read_gid_l2_fields(const struct ib_gid_attr * attr,u16 * vlan_id,u8 * smac)1399 int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
1400 			    u16 *vlan_id, u8 *smac)
1401 {
1402 	struct netdev_nested_priv priv = {
1403 		.data = (void *)vlan_id,
1404 	};
1405 	struct net_device *ndev;
1406 
1407 	rcu_read_lock();
1408 	ndev = rcu_dereference(attr->ndev);
1409 	if (!ndev) {
1410 		rcu_read_unlock();
1411 		return -ENODEV;
1412 	}
1413 	if (smac)
1414 		ether_addr_copy(smac, ndev->dev_addr);
1415 	if (vlan_id) {
1416 		*vlan_id = 0xffff;
1417 		if (is_vlan_dev(ndev)) {
1418 			*vlan_id = vlan_dev_vlan_id(ndev);
1419 		} else {
1420 			/* If the netdev is upper device and if it's lower
1421 			 * device is vlan device, consider vlan id of
1422 			 * the lower vlan device for this gid entry.
1423 			 */
1424 			netdev_walk_all_lower_dev_rcu(attr->ndev,
1425 					get_lower_dev_vlan, &priv);
1426 		}
1427 	}
1428 	rcu_read_unlock();
1429 	return 0;
1430 }
1431 EXPORT_SYMBOL(rdma_read_gid_l2_fields);
1432 
config_non_roce_gid_cache(struct ib_device * device,u32 port,struct ib_port_attr * tprops)1433 static int config_non_roce_gid_cache(struct ib_device *device,
1434 				     u32 port, struct ib_port_attr *tprops)
1435 {
1436 	struct ib_gid_attr gid_attr = {};
1437 	struct ib_gid_table *table;
1438 	int ret = 0;
1439 	int i;
1440 
1441 	gid_attr.device = device;
1442 	gid_attr.port_num = port;
1443 	table = rdma_gid_table(device, port);
1444 
1445 	mutex_lock(&table->lock);
1446 	for (i = 0; i < tprops->gid_tbl_len; ++i) {
1447 		if (!device->ops.query_gid)
1448 			continue;
1449 		ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
1450 		if (ret) {
1451 			dev_warn(&device->dev,
1452 				 "query_gid failed (%d) for index %d\n", ret,
1453 				 i);
1454 			goto err;
1455 		}
1456 
1457 		if (rdma_protocol_iwarp(device, port)) {
1458 			struct net_device *ndev;
1459 
1460 			ndev = ib_device_get_netdev(device, port);
1461 			if (!ndev)
1462 				continue;
1463 			RCU_INIT_POINTER(gid_attr.ndev, ndev);
1464 			dev_put(ndev);
1465 		}
1466 
1467 		gid_attr.index = i;
1468 		tprops->subnet_prefix =
1469 			be64_to_cpu(gid_attr.gid.global.subnet_prefix);
1470 		add_modify_gid(table, &gid_attr);
1471 	}
1472 err:
1473 	mutex_unlock(&table->lock);
1474 	return ret;
1475 }
1476 
1477 static int
ib_cache_update(struct ib_device * device,u32 port,bool update_gids,bool update_pkeys,bool enforce_security)1478 ib_cache_update(struct ib_device *device, u32 port, bool update_gids,
1479 		bool update_pkeys, bool enforce_security)
1480 {
1481 	struct ib_port_attr       *tprops = NULL;
1482 	struct ib_pkey_cache      *pkey_cache = NULL;
1483 	struct ib_pkey_cache      *old_pkey_cache = NULL;
1484 	int                        i;
1485 	int                        ret;
1486 
1487 	if (!rdma_is_port_valid(device, port))
1488 		return -EINVAL;
1489 
1490 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1491 	if (!tprops)
1492 		return -ENOMEM;
1493 
1494 	ret = ib_query_port(device, port, tprops);
1495 	if (ret) {
1496 		dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
1497 		goto err;
1498 	}
1499 
1500 	if (!rdma_protocol_roce(device, port) && update_gids) {
1501 		ret = config_non_roce_gid_cache(device, port,
1502 						tprops);
1503 		if (ret)
1504 			goto err;
1505 	}
1506 
1507 	update_pkeys &= !!tprops->pkey_tbl_len;
1508 
1509 	if (update_pkeys) {
1510 		pkey_cache = kmalloc(struct_size(pkey_cache, table,
1511 						 tprops->pkey_tbl_len),
1512 				     GFP_KERNEL);
1513 		if (!pkey_cache) {
1514 			ret = -ENOMEM;
1515 			goto err;
1516 		}
1517 
1518 		pkey_cache->table_len = tprops->pkey_tbl_len;
1519 
1520 		for (i = 0; i < pkey_cache->table_len; ++i) {
1521 			ret = ib_query_pkey(device, port, i,
1522 					    pkey_cache->table + i);
1523 			if (ret) {
1524 				dev_warn(&device->dev,
1525 					 "ib_query_pkey failed (%d) for index %d\n",
1526 					 ret, i);
1527 				goto err;
1528 			}
1529 		}
1530 	}
1531 
1532 	write_lock_irq(&device->cache_lock);
1533 
1534 	if (update_pkeys) {
1535 		old_pkey_cache = device->port_data[port].cache.pkey;
1536 		device->port_data[port].cache.pkey = pkey_cache;
1537 	}
1538 	device->port_data[port].cache.lmc = tprops->lmc;
1539 	device->port_data[port].cache.port_state = tprops->state;
1540 
1541 	device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
1542 	write_unlock_irq(&device->cache_lock);
1543 
1544 	if (enforce_security)
1545 		ib_security_cache_change(device,
1546 					 port,
1547 					 tprops->subnet_prefix);
1548 
1549 	kfree(old_pkey_cache);
1550 	kfree(tprops);
1551 	return 0;
1552 
1553 err:
1554 	kfree(pkey_cache);
1555 	kfree(tprops);
1556 	return ret;
1557 }
1558 
ib_cache_event_task(struct work_struct * _work)1559 static void ib_cache_event_task(struct work_struct *_work)
1560 {
1561 	struct ib_update_work *work =
1562 		container_of(_work, struct ib_update_work, work);
1563 	int ret;
1564 
1565 	/* Before distributing the cache update event, first sync
1566 	 * the cache.
1567 	 */
1568 	ret = ib_cache_update(work->event.device, work->event.element.port_num,
1569 			      work->event.event == IB_EVENT_GID_CHANGE,
1570 			      work->event.event == IB_EVENT_PKEY_CHANGE,
1571 			      work->enforce_security);
1572 
1573 	/* GID event is notified already for individual GID entries by
1574 	 * dispatch_gid_change_event(). Hence, notifiy for rest of the
1575 	 * events.
1576 	 */
1577 	if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
1578 		ib_dispatch_event_clients(&work->event);
1579 
1580 	kfree(work);
1581 }
1582 
ib_generic_event_task(struct work_struct * _work)1583 static void ib_generic_event_task(struct work_struct *_work)
1584 {
1585 	struct ib_update_work *work =
1586 		container_of(_work, struct ib_update_work, work);
1587 
1588 	ib_dispatch_event_clients(&work->event);
1589 	kfree(work);
1590 }
1591 
is_cache_update_event(const struct ib_event * event)1592 static bool is_cache_update_event(const struct ib_event *event)
1593 {
1594 	return (event->event == IB_EVENT_PORT_ERR    ||
1595 		event->event == IB_EVENT_PORT_ACTIVE ||
1596 		event->event == IB_EVENT_LID_CHANGE  ||
1597 		event->event == IB_EVENT_PKEY_CHANGE ||
1598 		event->event == IB_EVENT_CLIENT_REREGISTER ||
1599 		event->event == IB_EVENT_GID_CHANGE);
1600 }
1601 
1602 /**
1603  * ib_dispatch_event - Dispatch an asynchronous event
1604  * @event:Event to dispatch
1605  *
1606  * Low-level drivers must call ib_dispatch_event() to dispatch the
1607  * event to all registered event handlers when an asynchronous event
1608  * occurs.
1609  */
ib_dispatch_event(const struct ib_event * event)1610 void ib_dispatch_event(const struct ib_event *event)
1611 {
1612 	struct ib_update_work *work;
1613 
1614 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
1615 	if (!work)
1616 		return;
1617 
1618 	if (is_cache_update_event(event))
1619 		INIT_WORK(&work->work, ib_cache_event_task);
1620 	else
1621 		INIT_WORK(&work->work, ib_generic_event_task);
1622 
1623 	work->event = *event;
1624 	if (event->event == IB_EVENT_PKEY_CHANGE ||
1625 	    event->event == IB_EVENT_GID_CHANGE)
1626 		work->enforce_security = true;
1627 
1628 	queue_work(ib_wq, &work->work);
1629 }
1630 EXPORT_SYMBOL(ib_dispatch_event);
1631 
ib_cache_setup_one(struct ib_device * device)1632 int ib_cache_setup_one(struct ib_device *device)
1633 {
1634 	u32 p;
1635 	int err;
1636 
1637 	err = gid_table_setup_one(device);
1638 	if (err)
1639 		return err;
1640 
1641 	rdma_for_each_port (device, p) {
1642 		err = ib_cache_update(device, p, true, true, true);
1643 		if (err) {
1644 			gid_table_cleanup_one(device);
1645 			return err;
1646 		}
1647 	}
1648 
1649 	return 0;
1650 }
1651 
ib_cache_release_one(struct ib_device * device)1652 void ib_cache_release_one(struct ib_device *device)
1653 {
1654 	u32 p;
1655 
1656 	/*
1657 	 * The release function frees all the cache elements.
1658 	 * This function should be called as part of freeing
1659 	 * all the device's resources when the cache could no
1660 	 * longer be accessed.
1661 	 */
1662 	rdma_for_each_port (device, p)
1663 		kfree(device->port_data[p].cache.pkey);
1664 
1665 	gid_table_release_one(device);
1666 }
1667 
ib_cache_cleanup_one(struct ib_device * device)1668 void ib_cache_cleanup_one(struct ib_device *device)
1669 {
1670 	/* The cleanup function waits for all in-progress workqueue
1671 	 * elements and cleans up the GID cache. This function should be
1672 	 * called after the device was removed from the devices list and
1673 	 * all clients were removed, so the cache exists but is
1674 	 * non-functional and shouldn't be updated anymore.
1675 	 */
1676 	flush_workqueue(ib_wq);
1677 	gid_table_cleanup_one(device);
1678 
1679 	/*
1680 	 * Flush the wq second time for any pending GID delete work.
1681 	 */
1682 	flush_workqueue(ib_wq);
1683 }
1684