xref: /openbmc/linux/drivers/infiniband/core/cache.c (revision 0edbfea5)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Intel Corporation. All rights reserved.
4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42 
43 #include <rdma/ib_cache.h>
44 
45 #include "core_priv.h"
46 
47 struct ib_pkey_cache {
48 	int             table_len;
49 	u16             table[0];
50 };
51 
52 struct ib_update_work {
53 	struct work_struct work;
54 	struct ib_device  *device;
55 	u8                 port_num;
56 };
57 
58 union ib_gid zgid;
59 EXPORT_SYMBOL(zgid);
60 
61 static const struct ib_gid_attr zattr;
62 
63 enum gid_attr_find_mask {
64 	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
65 	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
66 	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
67 	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
68 };
69 
70 enum gid_table_entry_props {
71 	GID_TABLE_ENTRY_INVALID		= 1UL << 0,
72 	GID_TABLE_ENTRY_DEFAULT		= 1UL << 1,
73 };
74 
75 enum gid_table_write_action {
76 	GID_TABLE_WRITE_ACTION_ADD,
77 	GID_TABLE_WRITE_ACTION_DEL,
78 	/* MODIFY only updates the GID table. Currently only used by
79 	 * ib_cache_update.
80 	 */
81 	GID_TABLE_WRITE_ACTION_MODIFY
82 };
83 
84 struct ib_gid_table_entry {
85 	unsigned long	    props;
86 	union ib_gid        gid;
87 	struct ib_gid_attr  attr;
88 	void		   *context;
89 };
90 
91 struct ib_gid_table {
92 	int                  sz;
93 	/* In RoCE, adding a GID to the table requires:
94 	 * (a) Find if this GID is already exists.
95 	 * (b) Find a free space.
96 	 * (c) Write the new GID
97 	 *
98 	 * Delete requires different set of operations:
99 	 * (a) Find the GID
100 	 * (b) Delete it.
101 	 *
102 	 * Add/delete should be carried out atomically.
103 	 * This is done by locking this mutex from multiple
104 	 * writers. We don't need this lock for IB, as the MAD
105 	 * layer replaces all entries. All data_vec entries
106 	 * are locked by this lock.
107 	 **/
108 	struct mutex         lock;
109 	/* This lock protects the table entries from being
110 	 * read and written simultaneously.
111 	 */
112 	rwlock_t	     rwlock;
113 	struct ib_gid_table_entry *data_vec;
114 };
115 
116 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
117 {
118 	if (rdma_cap_roce_gid_table(ib_dev, port)) {
119 		struct ib_event event;
120 
121 		event.device		= ib_dev;
122 		event.element.port_num	= port;
123 		event.event		= IB_EVENT_GID_CHANGE;
124 
125 		ib_dispatch_event(&event);
126 	}
127 }
128 
129 static const char * const gid_type_str[] = {
130 	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
131 	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
132 };
133 
134 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
135 {
136 	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
137 		return gid_type_str[gid_type];
138 
139 	return "Invalid GID type";
140 }
141 EXPORT_SYMBOL(ib_cache_gid_type_str);
142 
143 int ib_cache_gid_parse_type_str(const char *buf)
144 {
145 	unsigned int i;
146 	size_t len;
147 	int err = -EINVAL;
148 
149 	len = strlen(buf);
150 	if (len == 0)
151 		return -EINVAL;
152 
153 	if (buf[len - 1] == '\n')
154 		len--;
155 
156 	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
157 		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
158 		    len == strlen(gid_type_str[i])) {
159 			err = i;
160 			break;
161 		}
162 
163 	return err;
164 }
165 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
166 
167 /* This function expects that rwlock will be write locked in all
168  * scenarios and that lock will be locked in sleep-able (RoCE)
169  * scenarios.
170  */
171 static int write_gid(struct ib_device *ib_dev, u8 port,
172 		     struct ib_gid_table *table, int ix,
173 		     const union ib_gid *gid,
174 		     const struct ib_gid_attr *attr,
175 		     enum gid_table_write_action action,
176 		     bool  default_gid)
177 	__releases(&table->rwlock) __acquires(&table->rwlock)
178 {
179 	int ret = 0;
180 	struct net_device *old_net_dev;
181 
182 	/* in rdma_cap_roce_gid_table, this funciton should be protected by a
183 	 * sleep-able lock.
184 	 */
185 
186 	if (rdma_cap_roce_gid_table(ib_dev, port)) {
187 		table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
188 		write_unlock_irq(&table->rwlock);
189 		/* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
190 		 * RoCE providers and thus only updates the cache.
191 		 */
192 		if (action == GID_TABLE_WRITE_ACTION_ADD)
193 			ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
194 					      &table->data_vec[ix].context);
195 		else if (action == GID_TABLE_WRITE_ACTION_DEL)
196 			ret = ib_dev->del_gid(ib_dev, port, ix,
197 					      &table->data_vec[ix].context);
198 		write_lock_irq(&table->rwlock);
199 	}
200 
201 	old_net_dev = table->data_vec[ix].attr.ndev;
202 	if (old_net_dev && old_net_dev != attr->ndev)
203 		dev_put(old_net_dev);
204 	/* if modify_gid failed, just delete the old gid */
205 	if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
206 		gid = &zgid;
207 		attr = &zattr;
208 		table->data_vec[ix].context = NULL;
209 	}
210 	if (default_gid)
211 		table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
212 	memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
213 	memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
214 	if (table->data_vec[ix].attr.ndev &&
215 	    table->data_vec[ix].attr.ndev != old_net_dev)
216 		dev_hold(table->data_vec[ix].attr.ndev);
217 
218 	table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
219 
220 	return ret;
221 }
222 
223 static int add_gid(struct ib_device *ib_dev, u8 port,
224 		   struct ib_gid_table *table, int ix,
225 		   const union ib_gid *gid,
226 		   const struct ib_gid_attr *attr,
227 		   bool  default_gid) {
228 	return write_gid(ib_dev, port, table, ix, gid, attr,
229 			 GID_TABLE_WRITE_ACTION_ADD, default_gid);
230 }
231 
232 static int modify_gid(struct ib_device *ib_dev, u8 port,
233 		      struct ib_gid_table *table, int ix,
234 		      const union ib_gid *gid,
235 		      const struct ib_gid_attr *attr,
236 		      bool  default_gid) {
237 	return write_gid(ib_dev, port, table, ix, gid, attr,
238 			 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
239 }
240 
241 static int del_gid(struct ib_device *ib_dev, u8 port,
242 		   struct ib_gid_table *table, int ix,
243 		   bool  default_gid) {
244 	return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
245 			 GID_TABLE_WRITE_ACTION_DEL, default_gid);
246 }
247 
248 /* rwlock should be read locked */
249 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
250 		    const struct ib_gid_attr *val, bool default_gid,
251 		    unsigned long mask, int *pempty)
252 {
253 	int i = 0;
254 	int found = -1;
255 	int empty = pempty ? -1 : 0;
256 
257 	while (i < table->sz && (found < 0 || empty < 0)) {
258 		struct ib_gid_table_entry *data = &table->data_vec[i];
259 		struct ib_gid_attr *attr = &data->attr;
260 		int curr_index = i;
261 
262 		i++;
263 
264 		if (data->props & GID_TABLE_ENTRY_INVALID)
265 			continue;
266 
267 		if (empty < 0)
268 			if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
269 			    !memcmp(attr, &zattr, sizeof(*attr)) &&
270 			    !data->props)
271 				empty = curr_index;
272 
273 		if (found >= 0)
274 			continue;
275 
276 		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
277 		    attr->gid_type != val->gid_type)
278 			continue;
279 
280 		if (mask & GID_ATTR_FIND_MASK_GID &&
281 		    memcmp(gid, &data->gid, sizeof(*gid)))
282 			continue;
283 
284 		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
285 		    attr->ndev != val->ndev)
286 			continue;
287 
288 		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
289 		    !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
290 		    default_gid)
291 			continue;
292 
293 		found = curr_index;
294 	}
295 
296 	if (pempty)
297 		*pempty = empty;
298 
299 	return found;
300 }
301 
302 static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
303 {
304 	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
305 	addrconf_ifid_eui48(&gid->raw[8], dev);
306 }
307 
308 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
309 		     union ib_gid *gid, struct ib_gid_attr *attr)
310 {
311 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
312 	struct ib_gid_table *table;
313 	int ix;
314 	int ret = 0;
315 	struct net_device *idev;
316 	int empty;
317 
318 	table = ports_table[port - rdma_start_port(ib_dev)];
319 
320 	if (!memcmp(gid, &zgid, sizeof(*gid)))
321 		return -EINVAL;
322 
323 	if (ib_dev->get_netdev) {
324 		idev = ib_dev->get_netdev(ib_dev, port);
325 		if (idev && attr->ndev != idev) {
326 			union ib_gid default_gid;
327 
328 			/* Adding default GIDs in not permitted */
329 			make_default_gid(idev, &default_gid);
330 			if (!memcmp(gid, &default_gid, sizeof(*gid))) {
331 				dev_put(idev);
332 				return -EPERM;
333 			}
334 		}
335 		if (idev)
336 			dev_put(idev);
337 	}
338 
339 	mutex_lock(&table->lock);
340 	write_lock_irq(&table->rwlock);
341 
342 	ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
343 		      GID_ATTR_FIND_MASK_GID_TYPE |
344 		      GID_ATTR_FIND_MASK_NETDEV, &empty);
345 	if (ix >= 0)
346 		goto out_unlock;
347 
348 	if (empty < 0) {
349 		ret = -ENOSPC;
350 		goto out_unlock;
351 	}
352 
353 	ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
354 	if (!ret)
355 		dispatch_gid_change_event(ib_dev, port);
356 
357 out_unlock:
358 	write_unlock_irq(&table->rwlock);
359 	mutex_unlock(&table->lock);
360 	return ret;
361 }
362 
363 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
364 		     union ib_gid *gid, struct ib_gid_attr *attr)
365 {
366 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
367 	struct ib_gid_table *table;
368 	int ix;
369 
370 	table = ports_table[port - rdma_start_port(ib_dev)];
371 
372 	mutex_lock(&table->lock);
373 	write_lock_irq(&table->rwlock);
374 
375 	ix = find_gid(table, gid, attr, false,
376 		      GID_ATTR_FIND_MASK_GID	  |
377 		      GID_ATTR_FIND_MASK_GID_TYPE |
378 		      GID_ATTR_FIND_MASK_NETDEV	  |
379 		      GID_ATTR_FIND_MASK_DEFAULT,
380 		      NULL);
381 	if (ix < 0)
382 		goto out_unlock;
383 
384 	if (!del_gid(ib_dev, port, table, ix, false))
385 		dispatch_gid_change_event(ib_dev, port);
386 
387 out_unlock:
388 	write_unlock_irq(&table->rwlock);
389 	mutex_unlock(&table->lock);
390 	return 0;
391 }
392 
393 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
394 				     struct net_device *ndev)
395 {
396 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
397 	struct ib_gid_table *table;
398 	int ix;
399 	bool deleted = false;
400 
401 	table  = ports_table[port - rdma_start_port(ib_dev)];
402 
403 	mutex_lock(&table->lock);
404 	write_lock_irq(&table->rwlock);
405 
406 	for (ix = 0; ix < table->sz; ix++)
407 		if (table->data_vec[ix].attr.ndev == ndev)
408 			if (!del_gid(ib_dev, port, table, ix, false))
409 				deleted = true;
410 
411 	write_unlock_irq(&table->rwlock);
412 	mutex_unlock(&table->lock);
413 
414 	if (deleted)
415 		dispatch_gid_change_event(ib_dev, port);
416 
417 	return 0;
418 }
419 
420 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
421 			      union ib_gid *gid, struct ib_gid_attr *attr)
422 {
423 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
424 	struct ib_gid_table *table;
425 
426 	table = ports_table[port - rdma_start_port(ib_dev)];
427 
428 	if (index < 0 || index >= table->sz)
429 		return -EINVAL;
430 
431 	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
432 		return -EAGAIN;
433 
434 	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
435 	if (attr) {
436 		memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
437 		if (attr->ndev)
438 			dev_hold(attr->ndev);
439 	}
440 
441 	return 0;
442 }
443 
444 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
445 				    const union ib_gid *gid,
446 				    const struct ib_gid_attr *val,
447 				    unsigned long mask,
448 				    u8 *port, u16 *index)
449 {
450 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
451 	struct ib_gid_table *table;
452 	u8 p;
453 	int local_index;
454 	unsigned long flags;
455 
456 	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
457 		table = ports_table[p];
458 		read_lock_irqsave(&table->rwlock, flags);
459 		local_index = find_gid(table, gid, val, false, mask, NULL);
460 		if (local_index >= 0) {
461 			if (index)
462 				*index = local_index;
463 			if (port)
464 				*port = p + rdma_start_port(ib_dev);
465 			read_unlock_irqrestore(&table->rwlock, flags);
466 			return 0;
467 		}
468 		read_unlock_irqrestore(&table->rwlock, flags);
469 	}
470 
471 	return -ENOENT;
472 }
473 
474 static int ib_cache_gid_find(struct ib_device *ib_dev,
475 			     const union ib_gid *gid,
476 			     enum ib_gid_type gid_type,
477 			     struct net_device *ndev, u8 *port,
478 			     u16 *index)
479 {
480 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
481 			     GID_ATTR_FIND_MASK_GID_TYPE;
482 	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
483 
484 	if (ndev)
485 		mask |= GID_ATTR_FIND_MASK_NETDEV;
486 
487 	return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
488 					mask, port, index);
489 }
490 
491 int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
492 			       const union ib_gid *gid,
493 			       enum ib_gid_type gid_type,
494 			       u8 port, struct net_device *ndev,
495 			       u16 *index)
496 {
497 	int local_index;
498 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
499 	struct ib_gid_table *table;
500 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
501 			     GID_ATTR_FIND_MASK_GID_TYPE;
502 	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
503 	unsigned long flags;
504 
505 	if (port < rdma_start_port(ib_dev) ||
506 	    port > rdma_end_port(ib_dev))
507 		return -ENOENT;
508 
509 	table = ports_table[port - rdma_start_port(ib_dev)];
510 
511 	if (ndev)
512 		mask |= GID_ATTR_FIND_MASK_NETDEV;
513 
514 	read_lock_irqsave(&table->rwlock, flags);
515 	local_index = find_gid(table, gid, &val, false, mask, NULL);
516 	if (local_index >= 0) {
517 		if (index)
518 			*index = local_index;
519 		read_unlock_irqrestore(&table->rwlock, flags);
520 		return 0;
521 	}
522 
523 	read_unlock_irqrestore(&table->rwlock, flags);
524 	return -ENOENT;
525 }
526 EXPORT_SYMBOL(ib_find_cached_gid_by_port);
527 
528 /**
529  * ib_find_gid_by_filter - Returns the GID table index where a specified
530  * GID value occurs
531  * @device: The device to query.
532  * @gid: The GID value to search for.
533  * @port_num: The port number of the device where the GID value could be
534  *   searched.
535  * @filter: The filter function is executed on any matching GID in the table.
536  *   If the filter function returns true, the corresponding index is returned,
537  *   otherwise, we continue searching the GID table. It's guaranteed that
538  *   while filter is executed, ndev field is valid and the structure won't
539  *   change. filter is executed in an atomic context. filter must not be NULL.
540  * @index: The index into the cached GID table where the GID was found.  This
541  *   parameter may be NULL.
542  *
543  * ib_cache_gid_find_by_filter() searches for the specified GID value
544  * of which the filter function returns true in the port's GID table.
545  * This function is only supported on RoCE ports.
546  *
547  */
548 static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
549 				       const union ib_gid *gid,
550 				       u8 port,
551 				       bool (*filter)(const union ib_gid *,
552 						      const struct ib_gid_attr *,
553 						      void *),
554 				       void *context,
555 				       u16 *index)
556 {
557 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
558 	struct ib_gid_table *table;
559 	unsigned int i;
560 	unsigned long flags;
561 	bool found = false;
562 
563 	if (!ports_table)
564 		return -EOPNOTSUPP;
565 
566 	if (port < rdma_start_port(ib_dev) ||
567 	    port > rdma_end_port(ib_dev) ||
568 	    !rdma_protocol_roce(ib_dev, port))
569 		return -EPROTONOSUPPORT;
570 
571 	table = ports_table[port - rdma_start_port(ib_dev)];
572 
573 	read_lock_irqsave(&table->rwlock, flags);
574 	for (i = 0; i < table->sz; i++) {
575 		struct ib_gid_attr attr;
576 
577 		if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
578 			goto next;
579 
580 		if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
581 			goto next;
582 
583 		memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
584 
585 		if (filter(gid, &attr, context))
586 			found = true;
587 
588 next:
589 		if (found)
590 			break;
591 	}
592 	read_unlock_irqrestore(&table->rwlock, flags);
593 
594 	if (!found)
595 		return -ENOENT;
596 
597 	if (index)
598 		*index = i;
599 	return 0;
600 }
601 
602 static struct ib_gid_table *alloc_gid_table(int sz)
603 {
604 	struct ib_gid_table *table =
605 		kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
606 
607 	if (!table)
608 		return NULL;
609 
610 	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
611 	if (!table->data_vec)
612 		goto err_free_table;
613 
614 	mutex_init(&table->lock);
615 
616 	table->sz = sz;
617 	rwlock_init(&table->rwlock);
618 
619 	return table;
620 
621 err_free_table:
622 	kfree(table);
623 	return NULL;
624 }
625 
626 static void release_gid_table(struct ib_gid_table *table)
627 {
628 	if (table) {
629 		kfree(table->data_vec);
630 		kfree(table);
631 	}
632 }
633 
634 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
635 				   struct ib_gid_table *table)
636 {
637 	int i;
638 	bool deleted = false;
639 
640 	if (!table)
641 		return;
642 
643 	write_lock_irq(&table->rwlock);
644 	for (i = 0; i < table->sz; ++i) {
645 		if (memcmp(&table->data_vec[i].gid, &zgid,
646 			   sizeof(table->data_vec[i].gid)))
647 			if (!del_gid(ib_dev, port, table, i,
648 				     table->data_vec[i].props &
649 				     GID_ATTR_FIND_MASK_DEFAULT))
650 				deleted = true;
651 	}
652 	write_unlock_irq(&table->rwlock);
653 
654 	if (deleted)
655 		dispatch_gid_change_event(ib_dev, port);
656 }
657 
658 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
659 				  struct net_device *ndev,
660 				  unsigned long gid_type_mask,
661 				  enum ib_cache_gid_default_mode mode)
662 {
663 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
664 	union ib_gid gid;
665 	struct ib_gid_attr gid_attr;
666 	struct ib_gid_attr zattr_type = zattr;
667 	struct ib_gid_table *table;
668 	unsigned int gid_type;
669 
670 	table  = ports_table[port - rdma_start_port(ib_dev)];
671 
672 	make_default_gid(ndev, &gid);
673 	memset(&gid_attr, 0, sizeof(gid_attr));
674 	gid_attr.ndev = ndev;
675 
676 	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
677 		int ix;
678 		union ib_gid current_gid;
679 		struct ib_gid_attr current_gid_attr = {};
680 
681 		if (1UL << gid_type & ~gid_type_mask)
682 			continue;
683 
684 		gid_attr.gid_type = gid_type;
685 
686 		mutex_lock(&table->lock);
687 		write_lock_irq(&table->rwlock);
688 		ix = find_gid(table, NULL, &gid_attr, true,
689 			      GID_ATTR_FIND_MASK_GID_TYPE |
690 			      GID_ATTR_FIND_MASK_DEFAULT,
691 			      NULL);
692 
693 		/* Coudn't find default GID location */
694 		if (WARN_ON(ix < 0))
695 			goto release;
696 
697 		zattr_type.gid_type = gid_type;
698 
699 		if (!__ib_cache_gid_get(ib_dev, port, ix,
700 					&current_gid, &current_gid_attr) &&
701 		    mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
702 		    !memcmp(&gid, &current_gid, sizeof(gid)) &&
703 		    !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
704 			goto release;
705 
706 		if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
707 		    memcmp(&current_gid_attr, &zattr_type,
708 			   sizeof(current_gid_attr))) {
709 			if (del_gid(ib_dev, port, table, ix, true)) {
710 				pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
711 					ix, gid.raw);
712 				goto release;
713 			} else {
714 				dispatch_gid_change_event(ib_dev, port);
715 			}
716 		}
717 
718 		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
719 			if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
720 				pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
721 					gid.raw);
722 			else
723 				dispatch_gid_change_event(ib_dev, port);
724 		}
725 
726 release:
727 		if (current_gid_attr.ndev)
728 			dev_put(current_gid_attr.ndev);
729 		write_unlock_irq(&table->rwlock);
730 		mutex_unlock(&table->lock);
731 	}
732 }
733 
734 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
735 				     struct ib_gid_table *table)
736 {
737 	unsigned int i;
738 	unsigned long roce_gid_type_mask;
739 	unsigned int num_default_gids;
740 	unsigned int current_gid = 0;
741 
742 	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
743 	num_default_gids = hweight_long(roce_gid_type_mask);
744 	for (i = 0; i < num_default_gids && i < table->sz; i++) {
745 		struct ib_gid_table_entry *entry =
746 			&table->data_vec[i];
747 
748 		entry->props |= GID_TABLE_ENTRY_DEFAULT;
749 		current_gid = find_next_bit(&roce_gid_type_mask,
750 					    BITS_PER_LONG,
751 					    current_gid);
752 		entry->attr.gid_type = current_gid++;
753 	}
754 
755 	return 0;
756 }
757 
758 static int _gid_table_setup_one(struct ib_device *ib_dev)
759 {
760 	u8 port;
761 	struct ib_gid_table **table;
762 	int err = 0;
763 
764 	table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
765 
766 	if (!table) {
767 		pr_warn("failed to allocate ib gid cache for %s\n",
768 			ib_dev->name);
769 		return -ENOMEM;
770 	}
771 
772 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
773 		u8 rdma_port = port + rdma_start_port(ib_dev);
774 
775 		table[port] =
776 			alloc_gid_table(
777 				ib_dev->port_immutable[rdma_port].gid_tbl_len);
778 		if (!table[port]) {
779 			err = -ENOMEM;
780 			goto rollback_table_setup;
781 		}
782 
783 		err = gid_table_reserve_default(ib_dev,
784 						port + rdma_start_port(ib_dev),
785 						table[port]);
786 		if (err)
787 			goto rollback_table_setup;
788 	}
789 
790 	ib_dev->cache.gid_cache = table;
791 	return 0;
792 
793 rollback_table_setup:
794 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
795 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
796 				       table[port]);
797 		release_gid_table(table[port]);
798 	}
799 
800 	kfree(table);
801 	return err;
802 }
803 
804 static void gid_table_release_one(struct ib_device *ib_dev)
805 {
806 	struct ib_gid_table **table = ib_dev->cache.gid_cache;
807 	u8 port;
808 
809 	if (!table)
810 		return;
811 
812 	for (port = 0; port < ib_dev->phys_port_cnt; port++)
813 		release_gid_table(table[port]);
814 
815 	kfree(table);
816 	ib_dev->cache.gid_cache = NULL;
817 }
818 
819 static void gid_table_cleanup_one(struct ib_device *ib_dev)
820 {
821 	struct ib_gid_table **table = ib_dev->cache.gid_cache;
822 	u8 port;
823 
824 	if (!table)
825 		return;
826 
827 	for (port = 0; port < ib_dev->phys_port_cnt; port++)
828 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
829 				       table[port]);
830 }
831 
832 static int gid_table_setup_one(struct ib_device *ib_dev)
833 {
834 	int err;
835 
836 	err = _gid_table_setup_one(ib_dev);
837 
838 	if (err)
839 		return err;
840 
841 	err = roce_rescan_device(ib_dev);
842 
843 	if (err) {
844 		gid_table_cleanup_one(ib_dev);
845 		gid_table_release_one(ib_dev);
846 	}
847 
848 	return err;
849 }
850 
851 int ib_get_cached_gid(struct ib_device *device,
852 		      u8                port_num,
853 		      int               index,
854 		      union ib_gid     *gid,
855 		      struct ib_gid_attr *gid_attr)
856 {
857 	int res;
858 	unsigned long flags;
859 	struct ib_gid_table **ports_table = device->cache.gid_cache;
860 	struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
861 
862 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
863 		return -EINVAL;
864 
865 	read_lock_irqsave(&table->rwlock, flags);
866 	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
867 	read_unlock_irqrestore(&table->rwlock, flags);
868 
869 	return res;
870 }
871 EXPORT_SYMBOL(ib_get_cached_gid);
872 
873 int ib_find_cached_gid(struct ib_device *device,
874 		       const union ib_gid *gid,
875 		       enum ib_gid_type gid_type,
876 		       struct net_device *ndev,
877 		       u8               *port_num,
878 		       u16              *index)
879 {
880 	return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
881 }
882 EXPORT_SYMBOL(ib_find_cached_gid);
883 
884 int ib_find_gid_by_filter(struct ib_device *device,
885 			  const union ib_gid *gid,
886 			  u8 port_num,
887 			  bool (*filter)(const union ib_gid *gid,
888 					 const struct ib_gid_attr *,
889 					 void *),
890 			  void *context, u16 *index)
891 {
892 	/* Only RoCE GID table supports filter function */
893 	if (!rdma_cap_roce_gid_table(device, port_num) && filter)
894 		return -EPROTONOSUPPORT;
895 
896 	return ib_cache_gid_find_by_filter(device, gid,
897 					   port_num, filter,
898 					   context, index);
899 }
900 EXPORT_SYMBOL(ib_find_gid_by_filter);
901 
902 int ib_get_cached_pkey(struct ib_device *device,
903 		       u8                port_num,
904 		       int               index,
905 		       u16              *pkey)
906 {
907 	struct ib_pkey_cache *cache;
908 	unsigned long flags;
909 	int ret = 0;
910 
911 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
912 		return -EINVAL;
913 
914 	read_lock_irqsave(&device->cache.lock, flags);
915 
916 	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
917 
918 	if (index < 0 || index >= cache->table_len)
919 		ret = -EINVAL;
920 	else
921 		*pkey = cache->table[index];
922 
923 	read_unlock_irqrestore(&device->cache.lock, flags);
924 
925 	return ret;
926 }
927 EXPORT_SYMBOL(ib_get_cached_pkey);
928 
929 int ib_find_cached_pkey(struct ib_device *device,
930 			u8                port_num,
931 			u16               pkey,
932 			u16              *index)
933 {
934 	struct ib_pkey_cache *cache;
935 	unsigned long flags;
936 	int i;
937 	int ret = -ENOENT;
938 	int partial_ix = -1;
939 
940 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
941 		return -EINVAL;
942 
943 	read_lock_irqsave(&device->cache.lock, flags);
944 
945 	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
946 
947 	*index = -1;
948 
949 	for (i = 0; i < cache->table_len; ++i)
950 		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
951 			if (cache->table[i] & 0x8000) {
952 				*index = i;
953 				ret = 0;
954 				break;
955 			} else
956 				partial_ix = i;
957 		}
958 
959 	if (ret && partial_ix >= 0) {
960 		*index = partial_ix;
961 		ret = 0;
962 	}
963 
964 	read_unlock_irqrestore(&device->cache.lock, flags);
965 
966 	return ret;
967 }
968 EXPORT_SYMBOL(ib_find_cached_pkey);
969 
970 int ib_find_exact_cached_pkey(struct ib_device *device,
971 			      u8                port_num,
972 			      u16               pkey,
973 			      u16              *index)
974 {
975 	struct ib_pkey_cache *cache;
976 	unsigned long flags;
977 	int i;
978 	int ret = -ENOENT;
979 
980 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
981 		return -EINVAL;
982 
983 	read_lock_irqsave(&device->cache.lock, flags);
984 
985 	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
986 
987 	*index = -1;
988 
989 	for (i = 0; i < cache->table_len; ++i)
990 		if (cache->table[i] == pkey) {
991 			*index = i;
992 			ret = 0;
993 			break;
994 		}
995 
996 	read_unlock_irqrestore(&device->cache.lock, flags);
997 
998 	return ret;
999 }
1000 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1001 
1002 int ib_get_cached_lmc(struct ib_device *device,
1003 		      u8                port_num,
1004 		      u8                *lmc)
1005 {
1006 	unsigned long flags;
1007 	int ret = 0;
1008 
1009 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1010 		return -EINVAL;
1011 
1012 	read_lock_irqsave(&device->cache.lock, flags);
1013 	*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
1014 	read_unlock_irqrestore(&device->cache.lock, flags);
1015 
1016 	return ret;
1017 }
1018 EXPORT_SYMBOL(ib_get_cached_lmc);
1019 
1020 static void ib_cache_update(struct ib_device *device,
1021 			    u8                port)
1022 {
1023 	struct ib_port_attr       *tprops = NULL;
1024 	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
1025 	struct ib_gid_cache {
1026 		int             table_len;
1027 		union ib_gid    table[0];
1028 	}			  *gid_cache = NULL;
1029 	int                        i;
1030 	int                        ret;
1031 	struct ib_gid_table	  *table;
1032 	struct ib_gid_table	 **ports_table = device->cache.gid_cache;
1033 	bool			   use_roce_gid_table =
1034 					rdma_cap_roce_gid_table(device, port);
1035 
1036 	if (port < rdma_start_port(device) || port > rdma_end_port(device))
1037 		return;
1038 
1039 	table = ports_table[port - rdma_start_port(device)];
1040 
1041 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1042 	if (!tprops)
1043 		return;
1044 
1045 	ret = ib_query_port(device, port, tprops);
1046 	if (ret) {
1047 		pr_warn("ib_query_port failed (%d) for %s\n",
1048 			ret, device->name);
1049 		goto err;
1050 	}
1051 
1052 	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1053 			     sizeof *pkey_cache->table, GFP_KERNEL);
1054 	if (!pkey_cache)
1055 		goto err;
1056 
1057 	pkey_cache->table_len = tprops->pkey_tbl_len;
1058 
1059 	if (!use_roce_gid_table) {
1060 		gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1061 			    sizeof(*gid_cache->table), GFP_KERNEL);
1062 		if (!gid_cache)
1063 			goto err;
1064 
1065 		gid_cache->table_len = tprops->gid_tbl_len;
1066 	}
1067 
1068 	for (i = 0; i < pkey_cache->table_len; ++i) {
1069 		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1070 		if (ret) {
1071 			pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1072 				ret, device->name, i);
1073 			goto err;
1074 		}
1075 	}
1076 
1077 	if (!use_roce_gid_table) {
1078 		for (i = 0;  i < gid_cache->table_len; ++i) {
1079 			ret = ib_query_gid(device, port, i,
1080 					   gid_cache->table + i, NULL);
1081 			if (ret) {
1082 				pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1083 					ret, device->name, i);
1084 				goto err;
1085 			}
1086 		}
1087 	}
1088 
1089 	write_lock_irq(&device->cache.lock);
1090 
1091 	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
1092 
1093 	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
1094 	if (!use_roce_gid_table) {
1095 		write_lock(&table->rwlock);
1096 		for (i = 0; i < gid_cache->table_len; i++) {
1097 			modify_gid(device, port, table, i, gid_cache->table + i,
1098 				   &zattr, false);
1099 		}
1100 		write_unlock(&table->rwlock);
1101 	}
1102 
1103 	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
1104 
1105 	write_unlock_irq(&device->cache.lock);
1106 
1107 	kfree(gid_cache);
1108 	kfree(old_pkey_cache);
1109 	kfree(tprops);
1110 	return;
1111 
1112 err:
1113 	kfree(pkey_cache);
1114 	kfree(gid_cache);
1115 	kfree(tprops);
1116 }
1117 
1118 static void ib_cache_task(struct work_struct *_work)
1119 {
1120 	struct ib_update_work *work =
1121 		container_of(_work, struct ib_update_work, work);
1122 
1123 	ib_cache_update(work->device, work->port_num);
1124 	kfree(work);
1125 }
1126 
1127 static void ib_cache_event(struct ib_event_handler *handler,
1128 			   struct ib_event *event)
1129 {
1130 	struct ib_update_work *work;
1131 
1132 	if (event->event == IB_EVENT_PORT_ERR    ||
1133 	    event->event == IB_EVENT_PORT_ACTIVE ||
1134 	    event->event == IB_EVENT_LID_CHANGE  ||
1135 	    event->event == IB_EVENT_PKEY_CHANGE ||
1136 	    event->event == IB_EVENT_SM_CHANGE   ||
1137 	    event->event == IB_EVENT_CLIENT_REREGISTER ||
1138 	    event->event == IB_EVENT_GID_CHANGE) {
1139 		work = kmalloc(sizeof *work, GFP_ATOMIC);
1140 		if (work) {
1141 			INIT_WORK(&work->work, ib_cache_task);
1142 			work->device   = event->device;
1143 			work->port_num = event->element.port_num;
1144 			queue_work(ib_wq, &work->work);
1145 		}
1146 	}
1147 }
1148 
1149 int ib_cache_setup_one(struct ib_device *device)
1150 {
1151 	int p;
1152 	int err;
1153 
1154 	rwlock_init(&device->cache.lock);
1155 
1156 	device->cache.pkey_cache =
1157 		kzalloc(sizeof *device->cache.pkey_cache *
1158 			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1159 	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
1160 					  (rdma_end_port(device) -
1161 					   rdma_start_port(device) + 1),
1162 					  GFP_KERNEL);
1163 	if (!device->cache.pkey_cache ||
1164 	    !device->cache.lmc_cache) {
1165 		pr_warn("Couldn't allocate cache for %s\n", device->name);
1166 		return -ENOMEM;
1167 	}
1168 
1169 	err = gid_table_setup_one(device);
1170 	if (err)
1171 		/* Allocated memory will be cleaned in the release function */
1172 		return err;
1173 
1174 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1175 		ib_cache_update(device, p + rdma_start_port(device));
1176 
1177 	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1178 			      device, ib_cache_event);
1179 	err = ib_register_event_handler(&device->cache.event_handler);
1180 	if (err)
1181 		goto err;
1182 
1183 	return 0;
1184 
1185 err:
1186 	gid_table_cleanup_one(device);
1187 	return err;
1188 }
1189 
1190 void ib_cache_release_one(struct ib_device *device)
1191 {
1192 	int p;
1193 
1194 	/*
1195 	 * The release function frees all the cache elements.
1196 	 * This function should be called as part of freeing
1197 	 * all the device's resources when the cache could no
1198 	 * longer be accessed.
1199 	 */
1200 	if (device->cache.pkey_cache)
1201 		for (p = 0;
1202 		     p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1203 			kfree(device->cache.pkey_cache[p]);
1204 
1205 	gid_table_release_one(device);
1206 	kfree(device->cache.pkey_cache);
1207 	kfree(device->cache.lmc_cache);
1208 }
1209 
1210 void ib_cache_cleanup_one(struct ib_device *device)
1211 {
1212 	/* The cleanup function unregisters the event handler,
1213 	 * waits for all in-progress workqueue elements and cleans
1214 	 * up the GID cache. This function should be called after
1215 	 * the device was removed from the devices list and all
1216 	 * clients were removed, so the cache exists but is
1217 	 * non-functional and shouldn't be updated anymore.
1218 	 */
1219 	ib_unregister_event_handler(&device->cache.event_handler);
1220 	flush_workqueue(ib_wq);
1221 	gid_table_cleanup_one(device);
1222 }
1223 
1224 void __init ib_cache_setup(void)
1225 {
1226 	roce_gid_mgmt_init();
1227 }
1228 
1229 void __exit ib_cache_cleanup(void)
1230 {
1231 	roce_gid_mgmt_cleanup();
1232 }
1233