1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/if_vlan.h> 37 #include <linux/module.h> 38 #include <linux/errno.h> 39 #include <linux/slab.h> 40 #include <linux/workqueue.h> 41 #include <linux/netdevice.h> 42 #include <net/addrconf.h> 43 44 #include <rdma/ib_cache.h> 45 46 #include "core_priv.h" 47 48 struct ib_pkey_cache { 49 int table_len; 50 u16 table[]; 51 }; 52 53 struct ib_update_work { 54 struct work_struct work; 55 struct ib_event event; 56 bool enforce_security; 57 }; 58 59 union ib_gid zgid; 60 EXPORT_SYMBOL(zgid); 61 62 enum gid_attr_find_mask { 63 GID_ATTR_FIND_MASK_GID = 1UL << 0, 64 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1, 65 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2, 66 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3, 67 }; 68 69 enum gid_table_entry_state { 70 GID_TABLE_ENTRY_INVALID = 1, 71 GID_TABLE_ENTRY_VALID = 2, 72 /* 73 * Indicates that entry is pending to be removed, there may 74 * be active users of this GID entry. 75 * When last user of the GID entry releases reference to it, 76 * GID entry is detached from the table. 77 */ 78 GID_TABLE_ENTRY_PENDING_DEL = 3, 79 }; 80 81 struct roce_gid_ndev_storage { 82 struct rcu_head rcu_head; 83 struct net_device *ndev; 84 }; 85 86 struct ib_gid_table_entry { 87 struct kref kref; 88 struct work_struct del_work; 89 struct ib_gid_attr attr; 90 void *context; 91 /* Store the ndev pointer to release reference later on in 92 * call_rcu context because by that time gid_table_entry 93 * and attr might be already freed. So keep a copy of it. 94 * ndev_storage is freed by rcu callback. 95 */ 96 struct roce_gid_ndev_storage *ndev_storage; 97 enum gid_table_entry_state state; 98 }; 99 100 struct ib_gid_table { 101 int sz; 102 /* In RoCE, adding a GID to the table requires: 103 * (a) Find if this GID is already exists. 104 * (b) Find a free space. 105 * (c) Write the new GID 106 * 107 * Delete requires different set of operations: 108 * (a) Find the GID 109 * (b) Delete it. 110 * 111 **/ 112 /* Any writer to data_vec must hold this lock and the write side of 113 * rwlock. Readers must hold only rwlock. All writers must be in a 114 * sleepable context. 115 */ 116 struct mutex lock; 117 /* rwlock protects data_vec[ix]->state and entry pointer. 118 */ 119 rwlock_t rwlock; 120 struct ib_gid_table_entry **data_vec; 121 /* bit field, each bit indicates the index of default GID */ 122 u32 default_gid_indices; 123 }; 124 125 static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port) 126 { 127 struct ib_event event; 128 129 event.device = ib_dev; 130 event.element.port_num = port; 131 event.event = IB_EVENT_GID_CHANGE; 132 133 ib_dispatch_event_clients(&event); 134 } 135 136 static const char * const gid_type_str[] = { 137 /* IB/RoCE v1 value is set for IB_GID_TYPE_IB and IB_GID_TYPE_ROCE for 138 * user space compatibility reasons. 139 */ 140 [IB_GID_TYPE_IB] = "IB/RoCE v1", 141 [IB_GID_TYPE_ROCE] = "IB/RoCE v1", 142 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2", 143 }; 144 145 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type) 146 { 147 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type]) 148 return gid_type_str[gid_type]; 149 150 return "Invalid GID type"; 151 } 152 EXPORT_SYMBOL(ib_cache_gid_type_str); 153 154 /** rdma_is_zero_gid - Check if given GID is zero or not. 155 * @gid: GID to check 156 * Returns true if given GID is zero, returns false otherwise. 157 */ 158 bool rdma_is_zero_gid(const union ib_gid *gid) 159 { 160 return !memcmp(gid, &zgid, sizeof(*gid)); 161 } 162 EXPORT_SYMBOL(rdma_is_zero_gid); 163 164 /** is_gid_index_default - Check if a given index belongs to 165 * reserved default GIDs or not. 166 * @table: GID table pointer 167 * @index: Index to check in GID table 168 * Returns true if index is one of the reserved default GID index otherwise 169 * returns false. 170 */ 171 static bool is_gid_index_default(const struct ib_gid_table *table, 172 unsigned int index) 173 { 174 return index < 32 && (BIT(index) & table->default_gid_indices); 175 } 176 177 int ib_cache_gid_parse_type_str(const char *buf) 178 { 179 unsigned int i; 180 size_t len; 181 int err = -EINVAL; 182 183 len = strlen(buf); 184 if (len == 0) 185 return -EINVAL; 186 187 if (buf[len - 1] == '\n') 188 len--; 189 190 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i) 191 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) && 192 len == strlen(gid_type_str[i])) { 193 err = i; 194 break; 195 } 196 197 return err; 198 } 199 EXPORT_SYMBOL(ib_cache_gid_parse_type_str); 200 201 static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port) 202 { 203 return device->port_data[port].cache.gid; 204 } 205 206 static bool is_gid_entry_free(const struct ib_gid_table_entry *entry) 207 { 208 return !entry; 209 } 210 211 static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry) 212 { 213 return entry && entry->state == GID_TABLE_ENTRY_VALID; 214 } 215 216 static void schedule_free_gid(struct kref *kref) 217 { 218 struct ib_gid_table_entry *entry = 219 container_of(kref, struct ib_gid_table_entry, kref); 220 221 queue_work(ib_wq, &entry->del_work); 222 } 223 224 static void put_gid_ndev(struct rcu_head *head) 225 { 226 struct roce_gid_ndev_storage *storage = 227 container_of(head, struct roce_gid_ndev_storage, rcu_head); 228 229 WARN_ON(!storage->ndev); 230 /* At this point its safe to release netdev reference, 231 * as all callers working on gid_attr->ndev are done 232 * using this netdev. 233 */ 234 dev_put(storage->ndev); 235 kfree(storage); 236 } 237 238 static void free_gid_entry_locked(struct ib_gid_table_entry *entry) 239 { 240 struct ib_device *device = entry->attr.device; 241 u32 port_num = entry->attr.port_num; 242 struct ib_gid_table *table = rdma_gid_table(device, port_num); 243 244 dev_dbg(&device->dev, "%s port=%u index=%u gid %pI6\n", __func__, 245 port_num, entry->attr.index, entry->attr.gid.raw); 246 247 write_lock_irq(&table->rwlock); 248 249 /* 250 * The only way to avoid overwriting NULL in table is 251 * by comparing if it is same entry in table or not! 252 * If new entry in table is added by the time we free here, 253 * don't overwrite the table entry. 254 */ 255 if (entry == table->data_vec[entry->attr.index]) 256 table->data_vec[entry->attr.index] = NULL; 257 /* Now this index is ready to be allocated */ 258 write_unlock_irq(&table->rwlock); 259 260 if (entry->ndev_storage) 261 call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev); 262 kfree(entry); 263 } 264 265 static void free_gid_entry(struct kref *kref) 266 { 267 struct ib_gid_table_entry *entry = 268 container_of(kref, struct ib_gid_table_entry, kref); 269 270 free_gid_entry_locked(entry); 271 } 272 273 /** 274 * free_gid_work - Release reference to the GID entry 275 * @work: Work structure to refer to GID entry which needs to be 276 * deleted. 277 * 278 * free_gid_work() frees the entry from the HCA's hardware table 279 * if provider supports it. It releases reference to netdevice. 280 */ 281 static void free_gid_work(struct work_struct *work) 282 { 283 struct ib_gid_table_entry *entry = 284 container_of(work, struct ib_gid_table_entry, del_work); 285 struct ib_device *device = entry->attr.device; 286 u32 port_num = entry->attr.port_num; 287 struct ib_gid_table *table = rdma_gid_table(device, port_num); 288 289 mutex_lock(&table->lock); 290 free_gid_entry_locked(entry); 291 mutex_unlock(&table->lock); 292 } 293 294 static struct ib_gid_table_entry * 295 alloc_gid_entry(const struct ib_gid_attr *attr) 296 { 297 struct ib_gid_table_entry *entry; 298 struct net_device *ndev; 299 300 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 301 if (!entry) 302 return NULL; 303 304 ndev = rcu_dereference_protected(attr->ndev, 1); 305 if (ndev) { 306 entry->ndev_storage = kzalloc(sizeof(*entry->ndev_storage), 307 GFP_KERNEL); 308 if (!entry->ndev_storage) { 309 kfree(entry); 310 return NULL; 311 } 312 dev_hold(ndev); 313 entry->ndev_storage->ndev = ndev; 314 } 315 kref_init(&entry->kref); 316 memcpy(&entry->attr, attr, sizeof(*attr)); 317 INIT_WORK(&entry->del_work, free_gid_work); 318 entry->state = GID_TABLE_ENTRY_INVALID; 319 return entry; 320 } 321 322 static void store_gid_entry(struct ib_gid_table *table, 323 struct ib_gid_table_entry *entry) 324 { 325 entry->state = GID_TABLE_ENTRY_VALID; 326 327 dev_dbg(&entry->attr.device->dev, "%s port=%u index=%u gid %pI6\n", 328 __func__, entry->attr.port_num, entry->attr.index, 329 entry->attr.gid.raw); 330 331 lockdep_assert_held(&table->lock); 332 write_lock_irq(&table->rwlock); 333 table->data_vec[entry->attr.index] = entry; 334 write_unlock_irq(&table->rwlock); 335 } 336 337 static void get_gid_entry(struct ib_gid_table_entry *entry) 338 { 339 kref_get(&entry->kref); 340 } 341 342 static void put_gid_entry(struct ib_gid_table_entry *entry) 343 { 344 kref_put(&entry->kref, schedule_free_gid); 345 } 346 347 static void put_gid_entry_locked(struct ib_gid_table_entry *entry) 348 { 349 kref_put(&entry->kref, free_gid_entry); 350 } 351 352 static int add_roce_gid(struct ib_gid_table_entry *entry) 353 { 354 const struct ib_gid_attr *attr = &entry->attr; 355 int ret; 356 357 if (!attr->ndev) { 358 dev_err(&attr->device->dev, "%s NULL netdev port=%u index=%u\n", 359 __func__, attr->port_num, attr->index); 360 return -EINVAL; 361 } 362 if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) { 363 ret = attr->device->ops.add_gid(attr, &entry->context); 364 if (ret) { 365 dev_err(&attr->device->dev, 366 "%s GID add failed port=%u index=%u\n", 367 __func__, attr->port_num, attr->index); 368 return ret; 369 } 370 } 371 return 0; 372 } 373 374 /** 375 * del_gid - Delete GID table entry 376 * 377 * @ib_dev: IB device whose GID entry to be deleted 378 * @port: Port number of the IB device 379 * @table: GID table of the IB device for a port 380 * @ix: GID entry index to delete 381 * 382 */ 383 static void del_gid(struct ib_device *ib_dev, u32 port, 384 struct ib_gid_table *table, int ix) 385 { 386 struct roce_gid_ndev_storage *ndev_storage; 387 struct ib_gid_table_entry *entry; 388 389 lockdep_assert_held(&table->lock); 390 391 dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port, 392 ix, table->data_vec[ix]->attr.gid.raw); 393 394 write_lock_irq(&table->rwlock); 395 entry = table->data_vec[ix]; 396 entry->state = GID_TABLE_ENTRY_PENDING_DEL; 397 /* 398 * For non RoCE protocol, GID entry slot is ready to use. 399 */ 400 if (!rdma_protocol_roce(ib_dev, port)) 401 table->data_vec[ix] = NULL; 402 write_unlock_irq(&table->rwlock); 403 404 ndev_storage = entry->ndev_storage; 405 if (ndev_storage) { 406 entry->ndev_storage = NULL; 407 rcu_assign_pointer(entry->attr.ndev, NULL); 408 call_rcu(&ndev_storage->rcu_head, put_gid_ndev); 409 } 410 411 if (rdma_cap_roce_gid_table(ib_dev, port)) 412 ib_dev->ops.del_gid(&entry->attr, &entry->context); 413 414 put_gid_entry_locked(entry); 415 } 416 417 /** 418 * add_modify_gid - Add or modify GID table entry 419 * 420 * @table: GID table in which GID to be added or modified 421 * @attr: Attributes of the GID 422 * 423 * Returns 0 on success or appropriate error code. It accepts zero 424 * GID addition for non RoCE ports for HCA's who report them as valid 425 * GID. However such zero GIDs are not added to the cache. 426 */ 427 static int add_modify_gid(struct ib_gid_table *table, 428 const struct ib_gid_attr *attr) 429 { 430 struct ib_gid_table_entry *entry; 431 int ret = 0; 432 433 /* 434 * Invalidate any old entry in the table to make it safe to write to 435 * this index. 436 */ 437 if (is_gid_entry_valid(table->data_vec[attr->index])) 438 del_gid(attr->device, attr->port_num, table, attr->index); 439 440 /* 441 * Some HCA's report multiple GID entries with only one valid GID, and 442 * leave other unused entries as the zero GID. Convert zero GIDs to 443 * empty table entries instead of storing them. 444 */ 445 if (rdma_is_zero_gid(&attr->gid)) 446 return 0; 447 448 entry = alloc_gid_entry(attr); 449 if (!entry) 450 return -ENOMEM; 451 452 if (rdma_protocol_roce(attr->device, attr->port_num)) { 453 ret = add_roce_gid(entry); 454 if (ret) 455 goto done; 456 } 457 458 store_gid_entry(table, entry); 459 return 0; 460 461 done: 462 put_gid_entry(entry); 463 return ret; 464 } 465 466 /* rwlock should be read locked, or lock should be held */ 467 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, 468 const struct ib_gid_attr *val, bool default_gid, 469 unsigned long mask, int *pempty) 470 { 471 int i = 0; 472 int found = -1; 473 int empty = pempty ? -1 : 0; 474 475 while (i < table->sz && (found < 0 || empty < 0)) { 476 struct ib_gid_table_entry *data = table->data_vec[i]; 477 struct ib_gid_attr *attr; 478 int curr_index = i; 479 480 i++; 481 482 /* find_gid() is used during GID addition where it is expected 483 * to return a free entry slot which is not duplicate. 484 * Free entry slot is requested and returned if pempty is set, 485 * so lookup free slot only if requested. 486 */ 487 if (pempty && empty < 0) { 488 if (is_gid_entry_free(data) && 489 default_gid == 490 is_gid_index_default(table, curr_index)) { 491 /* 492 * Found an invalid (free) entry; allocate it. 493 * If default GID is requested, then our 494 * found slot must be one of the DEFAULT 495 * reserved slots or we fail. 496 * This ensures that only DEFAULT reserved 497 * slots are used for default property GIDs. 498 */ 499 empty = curr_index; 500 } 501 } 502 503 /* 504 * Additionally find_gid() is used to find valid entry during 505 * lookup operation; so ignore the entries which are marked as 506 * pending for removal and the entries which are marked as 507 * invalid. 508 */ 509 if (!is_gid_entry_valid(data)) 510 continue; 511 512 if (found >= 0) 513 continue; 514 515 attr = &data->attr; 516 if (mask & GID_ATTR_FIND_MASK_GID_TYPE && 517 attr->gid_type != val->gid_type) 518 continue; 519 520 if (mask & GID_ATTR_FIND_MASK_GID && 521 memcmp(gid, &data->attr.gid, sizeof(*gid))) 522 continue; 523 524 if (mask & GID_ATTR_FIND_MASK_NETDEV && 525 attr->ndev != val->ndev) 526 continue; 527 528 if (mask & GID_ATTR_FIND_MASK_DEFAULT && 529 is_gid_index_default(table, curr_index) != default_gid) 530 continue; 531 532 found = curr_index; 533 } 534 535 if (pempty) 536 *pempty = empty; 537 538 return found; 539 } 540 541 static void make_default_gid(struct net_device *dev, union ib_gid *gid) 542 { 543 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 544 addrconf_ifid_eui48(&gid->raw[8], dev); 545 } 546 547 static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port, 548 union ib_gid *gid, struct ib_gid_attr *attr, 549 unsigned long mask, bool default_gid) 550 { 551 struct ib_gid_table *table; 552 int ret = 0; 553 int empty; 554 int ix; 555 556 /* Do not allow adding zero GID in support of 557 * IB spec version 1.3 section 4.1.1 point (6) and 558 * section 12.7.10 and section 12.7.20 559 */ 560 if (rdma_is_zero_gid(gid)) 561 return -EINVAL; 562 563 table = rdma_gid_table(ib_dev, port); 564 565 mutex_lock(&table->lock); 566 567 ix = find_gid(table, gid, attr, default_gid, mask, &empty); 568 if (ix >= 0) 569 goto out_unlock; 570 571 if (empty < 0) { 572 ret = -ENOSPC; 573 goto out_unlock; 574 } 575 attr->device = ib_dev; 576 attr->index = empty; 577 attr->port_num = port; 578 attr->gid = *gid; 579 ret = add_modify_gid(table, attr); 580 if (!ret) 581 dispatch_gid_change_event(ib_dev, port); 582 583 out_unlock: 584 mutex_unlock(&table->lock); 585 if (ret) 586 pr_warn("%s: unable to add gid %pI6 error=%d\n", 587 __func__, gid->raw, ret); 588 return ret; 589 } 590 591 int ib_cache_gid_add(struct ib_device *ib_dev, u32 port, 592 union ib_gid *gid, struct ib_gid_attr *attr) 593 { 594 unsigned long mask = GID_ATTR_FIND_MASK_GID | 595 GID_ATTR_FIND_MASK_GID_TYPE | 596 GID_ATTR_FIND_MASK_NETDEV; 597 598 return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false); 599 } 600 601 static int 602 _ib_cache_gid_del(struct ib_device *ib_dev, u32 port, 603 union ib_gid *gid, struct ib_gid_attr *attr, 604 unsigned long mask, bool default_gid) 605 { 606 struct ib_gid_table *table; 607 int ret = 0; 608 int ix; 609 610 table = rdma_gid_table(ib_dev, port); 611 612 mutex_lock(&table->lock); 613 614 ix = find_gid(table, gid, attr, default_gid, mask, NULL); 615 if (ix < 0) { 616 ret = -EINVAL; 617 goto out_unlock; 618 } 619 620 del_gid(ib_dev, port, table, ix); 621 dispatch_gid_change_event(ib_dev, port); 622 623 out_unlock: 624 mutex_unlock(&table->lock); 625 if (ret) 626 pr_debug("%s: can't delete gid %pI6 error=%d\n", 627 __func__, gid->raw, ret); 628 return ret; 629 } 630 631 int ib_cache_gid_del(struct ib_device *ib_dev, u32 port, 632 union ib_gid *gid, struct ib_gid_attr *attr) 633 { 634 unsigned long mask = GID_ATTR_FIND_MASK_GID | 635 GID_ATTR_FIND_MASK_GID_TYPE | 636 GID_ATTR_FIND_MASK_DEFAULT | 637 GID_ATTR_FIND_MASK_NETDEV; 638 639 return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false); 640 } 641 642 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, 643 struct net_device *ndev) 644 { 645 struct ib_gid_table *table; 646 int ix; 647 bool deleted = false; 648 649 table = rdma_gid_table(ib_dev, port); 650 651 mutex_lock(&table->lock); 652 653 for (ix = 0; ix < table->sz; ix++) { 654 if (is_gid_entry_valid(table->data_vec[ix]) && 655 table->data_vec[ix]->attr.ndev == ndev) { 656 del_gid(ib_dev, port, table, ix); 657 deleted = true; 658 } 659 } 660 661 mutex_unlock(&table->lock); 662 663 if (deleted) 664 dispatch_gid_change_event(ib_dev, port); 665 666 return 0; 667 } 668 669 /** 670 * rdma_find_gid_by_port - Returns the GID entry attributes when it finds 671 * a valid GID entry for given search parameters. It searches for the specified 672 * GID value in the local software cache. 673 * @ib_dev: The device to query. 674 * @gid: The GID value to search for. 675 * @gid_type: The GID type to search for. 676 * @port: The port number of the device where the GID value should be searched. 677 * @ndev: In RoCE, the net device of the device. NULL means ignore. 678 * 679 * Returns sgid attributes if the GID is found with valid reference or 680 * returns ERR_PTR for the error. 681 * The caller must invoke rdma_put_gid_attr() to release the reference. 682 */ 683 const struct ib_gid_attr * 684 rdma_find_gid_by_port(struct ib_device *ib_dev, 685 const union ib_gid *gid, 686 enum ib_gid_type gid_type, 687 u32 port, struct net_device *ndev) 688 { 689 int local_index; 690 struct ib_gid_table *table; 691 unsigned long mask = GID_ATTR_FIND_MASK_GID | 692 GID_ATTR_FIND_MASK_GID_TYPE; 693 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type}; 694 const struct ib_gid_attr *attr; 695 unsigned long flags; 696 697 if (!rdma_is_port_valid(ib_dev, port)) 698 return ERR_PTR(-ENOENT); 699 700 table = rdma_gid_table(ib_dev, port); 701 702 if (ndev) 703 mask |= GID_ATTR_FIND_MASK_NETDEV; 704 705 read_lock_irqsave(&table->rwlock, flags); 706 local_index = find_gid(table, gid, &val, false, mask, NULL); 707 if (local_index >= 0) { 708 get_gid_entry(table->data_vec[local_index]); 709 attr = &table->data_vec[local_index]->attr; 710 read_unlock_irqrestore(&table->rwlock, flags); 711 return attr; 712 } 713 714 read_unlock_irqrestore(&table->rwlock, flags); 715 return ERR_PTR(-ENOENT); 716 } 717 EXPORT_SYMBOL(rdma_find_gid_by_port); 718 719 /** 720 * rdma_find_gid_by_filter - Returns the GID table attribute where a 721 * specified GID value occurs 722 * @ib_dev: The device to query. 723 * @gid: The GID value to search for. 724 * @port: The port number of the device where the GID value could be 725 * searched. 726 * @filter: The filter function is executed on any matching GID in the table. 727 * If the filter function returns true, the corresponding index is returned, 728 * otherwise, we continue searching the GID table. It's guaranteed that 729 * while filter is executed, ndev field is valid and the structure won't 730 * change. filter is executed in an atomic context. filter must not be NULL. 731 * @context: Private data to pass into the call-back. 732 * 733 * rdma_find_gid_by_filter() searches for the specified GID value 734 * of which the filter function returns true in the port's GID table. 735 * 736 */ 737 const struct ib_gid_attr *rdma_find_gid_by_filter( 738 struct ib_device *ib_dev, const union ib_gid *gid, u32 port, 739 bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *, 740 void *), 741 void *context) 742 { 743 const struct ib_gid_attr *res = ERR_PTR(-ENOENT); 744 struct ib_gid_table *table; 745 unsigned long flags; 746 unsigned int i; 747 748 if (!rdma_is_port_valid(ib_dev, port)) 749 return ERR_PTR(-EINVAL); 750 751 table = rdma_gid_table(ib_dev, port); 752 753 read_lock_irqsave(&table->rwlock, flags); 754 for (i = 0; i < table->sz; i++) { 755 struct ib_gid_table_entry *entry = table->data_vec[i]; 756 757 if (!is_gid_entry_valid(entry)) 758 continue; 759 760 if (memcmp(gid, &entry->attr.gid, sizeof(*gid))) 761 continue; 762 763 if (filter(gid, &entry->attr, context)) { 764 get_gid_entry(entry); 765 res = &entry->attr; 766 break; 767 } 768 } 769 read_unlock_irqrestore(&table->rwlock, flags); 770 return res; 771 } 772 773 static struct ib_gid_table *alloc_gid_table(int sz) 774 { 775 struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL); 776 777 if (!table) 778 return NULL; 779 780 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL); 781 if (!table->data_vec) 782 goto err_free_table; 783 784 mutex_init(&table->lock); 785 786 table->sz = sz; 787 rwlock_init(&table->rwlock); 788 return table; 789 790 err_free_table: 791 kfree(table); 792 return NULL; 793 } 794 795 static void release_gid_table(struct ib_device *device, 796 struct ib_gid_table *table) 797 { 798 bool leak = false; 799 int i; 800 801 if (!table) 802 return; 803 804 for (i = 0; i < table->sz; i++) { 805 if (is_gid_entry_free(table->data_vec[i])) 806 continue; 807 if (kref_read(&table->data_vec[i]->kref) > 1) { 808 dev_err(&device->dev, 809 "GID entry ref leak for index %d ref=%u\n", i, 810 kref_read(&table->data_vec[i]->kref)); 811 leak = true; 812 } 813 } 814 if (leak) 815 return; 816 817 mutex_destroy(&table->lock); 818 kfree(table->data_vec); 819 kfree(table); 820 } 821 822 static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port, 823 struct ib_gid_table *table) 824 { 825 int i; 826 827 if (!table) 828 return; 829 830 mutex_lock(&table->lock); 831 for (i = 0; i < table->sz; ++i) { 832 if (is_gid_entry_valid(table->data_vec[i])) 833 del_gid(ib_dev, port, table, i); 834 } 835 mutex_unlock(&table->lock); 836 } 837 838 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port, 839 struct net_device *ndev, 840 unsigned long gid_type_mask, 841 enum ib_cache_gid_default_mode mode) 842 { 843 union ib_gid gid = { }; 844 struct ib_gid_attr gid_attr; 845 unsigned int gid_type; 846 unsigned long mask; 847 848 mask = GID_ATTR_FIND_MASK_GID_TYPE | 849 GID_ATTR_FIND_MASK_DEFAULT | 850 GID_ATTR_FIND_MASK_NETDEV; 851 memset(&gid_attr, 0, sizeof(gid_attr)); 852 gid_attr.ndev = ndev; 853 854 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) { 855 if (1UL << gid_type & ~gid_type_mask) 856 continue; 857 858 gid_attr.gid_type = gid_type; 859 860 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) { 861 make_default_gid(ndev, &gid); 862 __ib_cache_gid_add(ib_dev, port, &gid, 863 &gid_attr, mask, true); 864 } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) { 865 _ib_cache_gid_del(ib_dev, port, &gid, 866 &gid_attr, mask, true); 867 } 868 } 869 } 870 871 static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port, 872 struct ib_gid_table *table) 873 { 874 unsigned int i; 875 unsigned long roce_gid_type_mask; 876 unsigned int num_default_gids; 877 878 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port); 879 num_default_gids = hweight_long(roce_gid_type_mask); 880 /* Reserve starting indices for default GIDs */ 881 for (i = 0; i < num_default_gids && i < table->sz; i++) 882 table->default_gid_indices |= BIT(i); 883 } 884 885 886 static void gid_table_release_one(struct ib_device *ib_dev) 887 { 888 u32 p; 889 890 rdma_for_each_port (ib_dev, p) { 891 release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid); 892 ib_dev->port_data[p].cache.gid = NULL; 893 } 894 } 895 896 static int _gid_table_setup_one(struct ib_device *ib_dev) 897 { 898 struct ib_gid_table *table; 899 u32 rdma_port; 900 901 rdma_for_each_port (ib_dev, rdma_port) { 902 table = alloc_gid_table( 903 ib_dev->port_data[rdma_port].immutable.gid_tbl_len); 904 if (!table) 905 goto rollback_table_setup; 906 907 gid_table_reserve_default(ib_dev, rdma_port, table); 908 ib_dev->port_data[rdma_port].cache.gid = table; 909 } 910 return 0; 911 912 rollback_table_setup: 913 gid_table_release_one(ib_dev); 914 return -ENOMEM; 915 } 916 917 static void gid_table_cleanup_one(struct ib_device *ib_dev) 918 { 919 u32 p; 920 921 rdma_for_each_port (ib_dev, p) 922 cleanup_gid_table_port(ib_dev, p, 923 ib_dev->port_data[p].cache.gid); 924 } 925 926 static int gid_table_setup_one(struct ib_device *ib_dev) 927 { 928 int err; 929 930 err = _gid_table_setup_one(ib_dev); 931 932 if (err) 933 return err; 934 935 rdma_roce_rescan_device(ib_dev); 936 937 return err; 938 } 939 940 /** 941 * rdma_query_gid - Read the GID content from the GID software cache 942 * @device: Device to query the GID 943 * @port_num: Port number of the device 944 * @index: Index of the GID table entry to read 945 * @gid: Pointer to GID where to store the entry's GID 946 * 947 * rdma_query_gid() only reads the GID entry content for requested device, 948 * port and index. It reads for IB, RoCE and iWarp link layers. It doesn't 949 * hold any reference to the GID table entry in the HCA or software cache. 950 * 951 * Returns 0 on success or appropriate error code. 952 * 953 */ 954 int rdma_query_gid(struct ib_device *device, u32 port_num, 955 int index, union ib_gid *gid) 956 { 957 struct ib_gid_table *table; 958 unsigned long flags; 959 int res; 960 961 if (!rdma_is_port_valid(device, port_num)) 962 return -EINVAL; 963 964 table = rdma_gid_table(device, port_num); 965 read_lock_irqsave(&table->rwlock, flags); 966 967 if (index < 0 || index >= table->sz) { 968 res = -EINVAL; 969 goto done; 970 } 971 972 if (!is_gid_entry_valid(table->data_vec[index])) { 973 res = -ENOENT; 974 goto done; 975 } 976 977 memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid)); 978 res = 0; 979 980 done: 981 read_unlock_irqrestore(&table->rwlock, flags); 982 return res; 983 } 984 EXPORT_SYMBOL(rdma_query_gid); 985 986 /** 987 * rdma_read_gid_hw_context - Read the HW GID context from GID attribute 988 * @attr: Potinter to the GID attribute 989 * 990 * rdma_read_gid_hw_context() reads the drivers GID HW context corresponding 991 * to the SGID attr. Callers are required to already be holding the reference 992 * to an existing GID entry. 993 * 994 * Returns the HW GID context 995 * 996 */ 997 void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr) 998 { 999 return container_of(attr, struct ib_gid_table_entry, attr)->context; 1000 } 1001 EXPORT_SYMBOL(rdma_read_gid_hw_context); 1002 1003 /** 1004 * rdma_find_gid - Returns SGID attributes if the matching GID is found. 1005 * @device: The device to query. 1006 * @gid: The GID value to search for. 1007 * @gid_type: The GID type to search for. 1008 * @ndev: In RoCE, the net device of the device. NULL means ignore. 1009 * 1010 * rdma_find_gid() searches for the specified GID value in the software cache. 1011 * 1012 * Returns GID attributes if a valid GID is found or returns ERR_PTR for the 1013 * error. The caller must invoke rdma_put_gid_attr() to release the reference. 1014 * 1015 */ 1016 const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, 1017 const union ib_gid *gid, 1018 enum ib_gid_type gid_type, 1019 struct net_device *ndev) 1020 { 1021 unsigned long mask = GID_ATTR_FIND_MASK_GID | 1022 GID_ATTR_FIND_MASK_GID_TYPE; 1023 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type}; 1024 u32 p; 1025 1026 if (ndev) 1027 mask |= GID_ATTR_FIND_MASK_NETDEV; 1028 1029 rdma_for_each_port(device, p) { 1030 struct ib_gid_table *table; 1031 unsigned long flags; 1032 int index; 1033 1034 table = device->port_data[p].cache.gid; 1035 read_lock_irqsave(&table->rwlock, flags); 1036 index = find_gid(table, gid, &gid_attr_val, false, mask, NULL); 1037 if (index >= 0) { 1038 const struct ib_gid_attr *attr; 1039 1040 get_gid_entry(table->data_vec[index]); 1041 attr = &table->data_vec[index]->attr; 1042 read_unlock_irqrestore(&table->rwlock, flags); 1043 return attr; 1044 } 1045 read_unlock_irqrestore(&table->rwlock, flags); 1046 } 1047 1048 return ERR_PTR(-ENOENT); 1049 } 1050 EXPORT_SYMBOL(rdma_find_gid); 1051 1052 int ib_get_cached_pkey(struct ib_device *device, 1053 u32 port_num, 1054 int index, 1055 u16 *pkey) 1056 { 1057 struct ib_pkey_cache *cache; 1058 unsigned long flags; 1059 int ret = 0; 1060 1061 if (!rdma_is_port_valid(device, port_num)) 1062 return -EINVAL; 1063 1064 read_lock_irqsave(&device->cache_lock, flags); 1065 1066 cache = device->port_data[port_num].cache.pkey; 1067 1068 if (!cache || index < 0 || index >= cache->table_len) 1069 ret = -EINVAL; 1070 else 1071 *pkey = cache->table[index]; 1072 1073 read_unlock_irqrestore(&device->cache_lock, flags); 1074 1075 return ret; 1076 } 1077 EXPORT_SYMBOL(ib_get_cached_pkey); 1078 1079 void ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num, 1080 u64 *sn_pfx) 1081 { 1082 unsigned long flags; 1083 1084 read_lock_irqsave(&device->cache_lock, flags); 1085 *sn_pfx = device->port_data[port_num].cache.subnet_prefix; 1086 read_unlock_irqrestore(&device->cache_lock, flags); 1087 } 1088 EXPORT_SYMBOL(ib_get_cached_subnet_prefix); 1089 1090 int ib_find_cached_pkey(struct ib_device *device, u32 port_num, 1091 u16 pkey, u16 *index) 1092 { 1093 struct ib_pkey_cache *cache; 1094 unsigned long flags; 1095 int i; 1096 int ret = -ENOENT; 1097 int partial_ix = -1; 1098 1099 if (!rdma_is_port_valid(device, port_num)) 1100 return -EINVAL; 1101 1102 read_lock_irqsave(&device->cache_lock, flags); 1103 1104 cache = device->port_data[port_num].cache.pkey; 1105 if (!cache) { 1106 ret = -EINVAL; 1107 goto err; 1108 } 1109 1110 *index = -1; 1111 1112 for (i = 0; i < cache->table_len; ++i) 1113 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { 1114 if (cache->table[i] & 0x8000) { 1115 *index = i; 1116 ret = 0; 1117 break; 1118 } else { 1119 partial_ix = i; 1120 } 1121 } 1122 1123 if (ret && partial_ix >= 0) { 1124 *index = partial_ix; 1125 ret = 0; 1126 } 1127 1128 err: 1129 read_unlock_irqrestore(&device->cache_lock, flags); 1130 1131 return ret; 1132 } 1133 EXPORT_SYMBOL(ib_find_cached_pkey); 1134 1135 int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num, 1136 u16 pkey, u16 *index) 1137 { 1138 struct ib_pkey_cache *cache; 1139 unsigned long flags; 1140 int i; 1141 int ret = -ENOENT; 1142 1143 if (!rdma_is_port_valid(device, port_num)) 1144 return -EINVAL; 1145 1146 read_lock_irqsave(&device->cache_lock, flags); 1147 1148 cache = device->port_data[port_num].cache.pkey; 1149 if (!cache) { 1150 ret = -EINVAL; 1151 goto err; 1152 } 1153 1154 *index = -1; 1155 1156 for (i = 0; i < cache->table_len; ++i) 1157 if (cache->table[i] == pkey) { 1158 *index = i; 1159 ret = 0; 1160 break; 1161 } 1162 1163 err: 1164 read_unlock_irqrestore(&device->cache_lock, flags); 1165 1166 return ret; 1167 } 1168 EXPORT_SYMBOL(ib_find_exact_cached_pkey); 1169 1170 int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc) 1171 { 1172 unsigned long flags; 1173 int ret = 0; 1174 1175 if (!rdma_is_port_valid(device, port_num)) 1176 return -EINVAL; 1177 1178 read_lock_irqsave(&device->cache_lock, flags); 1179 *lmc = device->port_data[port_num].cache.lmc; 1180 read_unlock_irqrestore(&device->cache_lock, flags); 1181 1182 return ret; 1183 } 1184 EXPORT_SYMBOL(ib_get_cached_lmc); 1185 1186 int ib_get_cached_port_state(struct ib_device *device, u32 port_num, 1187 enum ib_port_state *port_state) 1188 { 1189 unsigned long flags; 1190 int ret = 0; 1191 1192 if (!rdma_is_port_valid(device, port_num)) 1193 return -EINVAL; 1194 1195 read_lock_irqsave(&device->cache_lock, flags); 1196 *port_state = device->port_data[port_num].cache.port_state; 1197 read_unlock_irqrestore(&device->cache_lock, flags); 1198 1199 return ret; 1200 } 1201 EXPORT_SYMBOL(ib_get_cached_port_state); 1202 1203 /** 1204 * rdma_get_gid_attr - Returns GID attributes for a port of a device 1205 * at a requested gid_index, if a valid GID entry exists. 1206 * @device: The device to query. 1207 * @port_num: The port number on the device where the GID value 1208 * is to be queried. 1209 * @index: Index of the GID table entry whose attributes are to 1210 * be queried. 1211 * 1212 * rdma_get_gid_attr() acquires reference count of gid attributes from the 1213 * cached GID table. Caller must invoke rdma_put_gid_attr() to release 1214 * reference to gid attribute regardless of link layer. 1215 * 1216 * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error 1217 * code. 1218 */ 1219 const struct ib_gid_attr * 1220 rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index) 1221 { 1222 const struct ib_gid_attr *attr = ERR_PTR(-ENODATA); 1223 struct ib_gid_table *table; 1224 unsigned long flags; 1225 1226 if (!rdma_is_port_valid(device, port_num)) 1227 return ERR_PTR(-EINVAL); 1228 1229 table = rdma_gid_table(device, port_num); 1230 if (index < 0 || index >= table->sz) 1231 return ERR_PTR(-EINVAL); 1232 1233 read_lock_irqsave(&table->rwlock, flags); 1234 if (!is_gid_entry_valid(table->data_vec[index])) 1235 goto done; 1236 1237 get_gid_entry(table->data_vec[index]); 1238 attr = &table->data_vec[index]->attr; 1239 done: 1240 read_unlock_irqrestore(&table->rwlock, flags); 1241 return attr; 1242 } 1243 EXPORT_SYMBOL(rdma_get_gid_attr); 1244 1245 /** 1246 * rdma_query_gid_table - Reads GID table entries of all the ports of a device up to max_entries. 1247 * @device: The device to query. 1248 * @entries: Entries where GID entries are returned. 1249 * @max_entries: Maximum number of entries that can be returned. 1250 * Entries array must be allocated to hold max_entries number of entries. 1251 * 1252 * Returns number of entries on success or appropriate error code. 1253 */ 1254 ssize_t rdma_query_gid_table(struct ib_device *device, 1255 struct ib_uverbs_gid_entry *entries, 1256 size_t max_entries) 1257 { 1258 const struct ib_gid_attr *gid_attr; 1259 ssize_t num_entries = 0, ret; 1260 struct ib_gid_table *table; 1261 u32 port_num, i; 1262 struct net_device *ndev; 1263 unsigned long flags; 1264 1265 rdma_for_each_port(device, port_num) { 1266 table = rdma_gid_table(device, port_num); 1267 read_lock_irqsave(&table->rwlock, flags); 1268 for (i = 0; i < table->sz; i++) { 1269 if (!is_gid_entry_valid(table->data_vec[i])) 1270 continue; 1271 if (num_entries >= max_entries) { 1272 ret = -EINVAL; 1273 goto err; 1274 } 1275 1276 gid_attr = &table->data_vec[i]->attr; 1277 1278 memcpy(&entries->gid, &gid_attr->gid, 1279 sizeof(gid_attr->gid)); 1280 entries->gid_index = gid_attr->index; 1281 entries->port_num = gid_attr->port_num; 1282 entries->gid_type = gid_attr->gid_type; 1283 ndev = rcu_dereference_protected( 1284 gid_attr->ndev, 1285 lockdep_is_held(&table->rwlock)); 1286 if (ndev) 1287 entries->netdev_ifindex = ndev->ifindex; 1288 1289 num_entries++; 1290 entries++; 1291 } 1292 read_unlock_irqrestore(&table->rwlock, flags); 1293 } 1294 1295 return num_entries; 1296 err: 1297 read_unlock_irqrestore(&table->rwlock, flags); 1298 return ret; 1299 } 1300 EXPORT_SYMBOL(rdma_query_gid_table); 1301 1302 /** 1303 * rdma_put_gid_attr - Release reference to the GID attribute 1304 * @attr: Pointer to the GID attribute whose reference 1305 * needs to be released. 1306 * 1307 * rdma_put_gid_attr() must be used to release reference whose 1308 * reference is acquired using rdma_get_gid_attr() or any APIs 1309 * which returns a pointer to the ib_gid_attr regardless of link layer 1310 * of IB or RoCE. 1311 * 1312 */ 1313 void rdma_put_gid_attr(const struct ib_gid_attr *attr) 1314 { 1315 struct ib_gid_table_entry *entry = 1316 container_of(attr, struct ib_gid_table_entry, attr); 1317 1318 put_gid_entry(entry); 1319 } 1320 EXPORT_SYMBOL(rdma_put_gid_attr); 1321 1322 /** 1323 * rdma_hold_gid_attr - Get reference to existing GID attribute 1324 * 1325 * @attr: Pointer to the GID attribute whose reference 1326 * needs to be taken. 1327 * 1328 * Increase the reference count to a GID attribute to keep it from being 1329 * freed. Callers are required to already be holding a reference to attribute. 1330 * 1331 */ 1332 void rdma_hold_gid_attr(const struct ib_gid_attr *attr) 1333 { 1334 struct ib_gid_table_entry *entry = 1335 container_of(attr, struct ib_gid_table_entry, attr); 1336 1337 get_gid_entry(entry); 1338 } 1339 EXPORT_SYMBOL(rdma_hold_gid_attr); 1340 1341 /** 1342 * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice 1343 * which must be in UP state. 1344 * 1345 * @attr:Pointer to the GID attribute 1346 * 1347 * Returns pointer to netdevice if the netdevice was attached to GID and 1348 * netdevice is in UP state. Caller must hold RCU lock as this API 1349 * reads the netdev flags which can change while netdevice migrates to 1350 * different net namespace. Returns ERR_PTR with error code otherwise. 1351 * 1352 */ 1353 struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr) 1354 { 1355 struct ib_gid_table_entry *entry = 1356 container_of(attr, struct ib_gid_table_entry, attr); 1357 struct ib_device *device = entry->attr.device; 1358 struct net_device *ndev = ERR_PTR(-EINVAL); 1359 u32 port_num = entry->attr.port_num; 1360 struct ib_gid_table *table; 1361 unsigned long flags; 1362 bool valid; 1363 1364 table = rdma_gid_table(device, port_num); 1365 1366 read_lock_irqsave(&table->rwlock, flags); 1367 valid = is_gid_entry_valid(table->data_vec[attr->index]); 1368 if (valid) { 1369 ndev = rcu_dereference(attr->ndev); 1370 if (!ndev) 1371 ndev = ERR_PTR(-ENODEV); 1372 } 1373 read_unlock_irqrestore(&table->rwlock, flags); 1374 return ndev; 1375 } 1376 EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu); 1377 1378 static int get_lower_dev_vlan(struct net_device *lower_dev, 1379 struct netdev_nested_priv *priv) 1380 { 1381 u16 *vlan_id = (u16 *)priv->data; 1382 1383 if (is_vlan_dev(lower_dev)) 1384 *vlan_id = vlan_dev_vlan_id(lower_dev); 1385 1386 /* We are interested only in first level vlan device, so 1387 * always return 1 to stop iterating over next level devices. 1388 */ 1389 return 1; 1390 } 1391 1392 /** 1393 * rdma_read_gid_l2_fields - Read the vlan ID and source MAC address 1394 * of a GID entry. 1395 * 1396 * @attr: GID attribute pointer whose L2 fields to be read 1397 * @vlan_id: Pointer to vlan id to fill up if the GID entry has 1398 * vlan id. It is optional. 1399 * @smac: Pointer to smac to fill up for a GID entry. It is optional. 1400 * 1401 * rdma_read_gid_l2_fields() returns 0 on success and returns vlan id 1402 * (if gid entry has vlan) and source MAC, or returns error. 1403 */ 1404 int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr, 1405 u16 *vlan_id, u8 *smac) 1406 { 1407 struct netdev_nested_priv priv = { 1408 .data = (void *)vlan_id, 1409 }; 1410 struct net_device *ndev; 1411 1412 rcu_read_lock(); 1413 ndev = rcu_dereference(attr->ndev); 1414 if (!ndev) { 1415 rcu_read_unlock(); 1416 return -ENODEV; 1417 } 1418 if (smac) 1419 ether_addr_copy(smac, ndev->dev_addr); 1420 if (vlan_id) { 1421 *vlan_id = 0xffff; 1422 if (is_vlan_dev(ndev)) { 1423 *vlan_id = vlan_dev_vlan_id(ndev); 1424 } else { 1425 /* If the netdev is upper device and if it's lower 1426 * device is vlan device, consider vlan id of the 1427 * the lower vlan device for this gid entry. 1428 */ 1429 netdev_walk_all_lower_dev_rcu(attr->ndev, 1430 get_lower_dev_vlan, &priv); 1431 } 1432 } 1433 rcu_read_unlock(); 1434 return 0; 1435 } 1436 EXPORT_SYMBOL(rdma_read_gid_l2_fields); 1437 1438 static int config_non_roce_gid_cache(struct ib_device *device, 1439 u32 port, struct ib_port_attr *tprops) 1440 { 1441 struct ib_gid_attr gid_attr = {}; 1442 struct ib_gid_table *table; 1443 int ret = 0; 1444 int i; 1445 1446 gid_attr.device = device; 1447 gid_attr.port_num = port; 1448 table = rdma_gid_table(device, port); 1449 1450 mutex_lock(&table->lock); 1451 for (i = 0; i < tprops->gid_tbl_len; ++i) { 1452 if (!device->ops.query_gid) 1453 continue; 1454 ret = device->ops.query_gid(device, port, i, &gid_attr.gid); 1455 if (ret) { 1456 dev_warn(&device->dev, 1457 "query_gid failed (%d) for index %d\n", ret, 1458 i); 1459 goto err; 1460 } 1461 gid_attr.index = i; 1462 tprops->subnet_prefix = 1463 be64_to_cpu(gid_attr.gid.global.subnet_prefix); 1464 add_modify_gid(table, &gid_attr); 1465 } 1466 err: 1467 mutex_unlock(&table->lock); 1468 return ret; 1469 } 1470 1471 static int 1472 ib_cache_update(struct ib_device *device, u32 port, bool update_gids, 1473 bool update_pkeys, bool enforce_security) 1474 { 1475 struct ib_port_attr *tprops = NULL; 1476 struct ib_pkey_cache *pkey_cache = NULL; 1477 struct ib_pkey_cache *old_pkey_cache = NULL; 1478 int i; 1479 int ret; 1480 1481 if (!rdma_is_port_valid(device, port)) 1482 return -EINVAL; 1483 1484 tprops = kmalloc(sizeof *tprops, GFP_KERNEL); 1485 if (!tprops) 1486 return -ENOMEM; 1487 1488 ret = ib_query_port(device, port, tprops); 1489 if (ret) { 1490 dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret); 1491 goto err; 1492 } 1493 1494 if (!rdma_protocol_roce(device, port) && update_gids) { 1495 ret = config_non_roce_gid_cache(device, port, 1496 tprops); 1497 if (ret) 1498 goto err; 1499 } 1500 1501 update_pkeys &= !!tprops->pkey_tbl_len; 1502 1503 if (update_pkeys) { 1504 pkey_cache = kmalloc(struct_size(pkey_cache, table, 1505 tprops->pkey_tbl_len), 1506 GFP_KERNEL); 1507 if (!pkey_cache) { 1508 ret = -ENOMEM; 1509 goto err; 1510 } 1511 1512 pkey_cache->table_len = tprops->pkey_tbl_len; 1513 1514 for (i = 0; i < pkey_cache->table_len; ++i) { 1515 ret = ib_query_pkey(device, port, i, 1516 pkey_cache->table + i); 1517 if (ret) { 1518 dev_warn(&device->dev, 1519 "ib_query_pkey failed (%d) for index %d\n", 1520 ret, i); 1521 goto err; 1522 } 1523 } 1524 } 1525 1526 write_lock_irq(&device->cache_lock); 1527 1528 if (update_pkeys) { 1529 old_pkey_cache = device->port_data[port].cache.pkey; 1530 device->port_data[port].cache.pkey = pkey_cache; 1531 } 1532 device->port_data[port].cache.lmc = tprops->lmc; 1533 device->port_data[port].cache.port_state = tprops->state; 1534 1535 device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix; 1536 write_unlock_irq(&device->cache_lock); 1537 1538 if (enforce_security) 1539 ib_security_cache_change(device, 1540 port, 1541 tprops->subnet_prefix); 1542 1543 kfree(old_pkey_cache); 1544 kfree(tprops); 1545 return 0; 1546 1547 err: 1548 kfree(pkey_cache); 1549 kfree(tprops); 1550 return ret; 1551 } 1552 1553 static void ib_cache_event_task(struct work_struct *_work) 1554 { 1555 struct ib_update_work *work = 1556 container_of(_work, struct ib_update_work, work); 1557 int ret; 1558 1559 /* Before distributing the cache update event, first sync 1560 * the cache. 1561 */ 1562 ret = ib_cache_update(work->event.device, work->event.element.port_num, 1563 work->event.event == IB_EVENT_GID_CHANGE, 1564 work->event.event == IB_EVENT_PKEY_CHANGE, 1565 work->enforce_security); 1566 1567 /* GID event is notified already for individual GID entries by 1568 * dispatch_gid_change_event(). Hence, notifiy for rest of the 1569 * events. 1570 */ 1571 if (!ret && work->event.event != IB_EVENT_GID_CHANGE) 1572 ib_dispatch_event_clients(&work->event); 1573 1574 kfree(work); 1575 } 1576 1577 static void ib_generic_event_task(struct work_struct *_work) 1578 { 1579 struct ib_update_work *work = 1580 container_of(_work, struct ib_update_work, work); 1581 1582 ib_dispatch_event_clients(&work->event); 1583 kfree(work); 1584 } 1585 1586 static bool is_cache_update_event(const struct ib_event *event) 1587 { 1588 return (event->event == IB_EVENT_PORT_ERR || 1589 event->event == IB_EVENT_PORT_ACTIVE || 1590 event->event == IB_EVENT_LID_CHANGE || 1591 event->event == IB_EVENT_PKEY_CHANGE || 1592 event->event == IB_EVENT_CLIENT_REREGISTER || 1593 event->event == IB_EVENT_GID_CHANGE); 1594 } 1595 1596 /** 1597 * ib_dispatch_event - Dispatch an asynchronous event 1598 * @event:Event to dispatch 1599 * 1600 * Low-level drivers must call ib_dispatch_event() to dispatch the 1601 * event to all registered event handlers when an asynchronous event 1602 * occurs. 1603 */ 1604 void ib_dispatch_event(const struct ib_event *event) 1605 { 1606 struct ib_update_work *work; 1607 1608 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1609 if (!work) 1610 return; 1611 1612 if (is_cache_update_event(event)) 1613 INIT_WORK(&work->work, ib_cache_event_task); 1614 else 1615 INIT_WORK(&work->work, ib_generic_event_task); 1616 1617 work->event = *event; 1618 if (event->event == IB_EVENT_PKEY_CHANGE || 1619 event->event == IB_EVENT_GID_CHANGE) 1620 work->enforce_security = true; 1621 1622 queue_work(ib_wq, &work->work); 1623 } 1624 EXPORT_SYMBOL(ib_dispatch_event); 1625 1626 int ib_cache_setup_one(struct ib_device *device) 1627 { 1628 u32 p; 1629 int err; 1630 1631 err = gid_table_setup_one(device); 1632 if (err) 1633 return err; 1634 1635 rdma_for_each_port (device, p) { 1636 err = ib_cache_update(device, p, true, true, true); 1637 if (err) 1638 return err; 1639 } 1640 1641 return 0; 1642 } 1643 1644 void ib_cache_release_one(struct ib_device *device) 1645 { 1646 u32 p; 1647 1648 /* 1649 * The release function frees all the cache elements. 1650 * This function should be called as part of freeing 1651 * all the device's resources when the cache could no 1652 * longer be accessed. 1653 */ 1654 rdma_for_each_port (device, p) 1655 kfree(device->port_data[p].cache.pkey); 1656 1657 gid_table_release_one(device); 1658 } 1659 1660 void ib_cache_cleanup_one(struct ib_device *device) 1661 { 1662 /* The cleanup function waits for all in-progress workqueue 1663 * elements and cleans up the GID cache. This function should be 1664 * called after the device was removed from the devices list and 1665 * all clients were removed, so the cache exists but is 1666 * non-functional and shouldn't be updated anymore. 1667 */ 1668 flush_workqueue(ib_wq); 1669 gid_table_cleanup_one(device); 1670 1671 /* 1672 * Flush the wq second time for any pending GID delete work. 1673 */ 1674 flush_workqueue(ib_wq); 1675 } 1676