1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 #include <linux/acpi.h> 34 #include <linux/module.h> 35 #include <linux/pci.h> 36 #include <rdma/ib_addr.h> 37 #include <rdma/ib_smi.h> 38 #include <rdma/ib_user_verbs.h> 39 #include <rdma/ib_cache.h> 40 #include "hns_roce_common.h" 41 #include "hns_roce_device.h" 42 #include "hns_roce_hem.h" 43 44 static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, 45 const u8 *addr) 46 { 47 u8 phy_port; 48 u32 i; 49 50 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 51 return 0; 52 53 if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN)) 54 return 0; 55 56 for (i = 0; i < ETH_ALEN; i++) 57 hr_dev->dev_addr[port][i] = addr[i]; 58 59 phy_port = hr_dev->iboe.phy_port[port]; 60 return hr_dev->hw->set_mac(hr_dev, phy_port, addr); 61 } 62 63 static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context) 64 { 65 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); 66 u32 port = attr->port_num - 1; 67 int ret; 68 69 if (port >= hr_dev->caps.num_ports) 70 return -EINVAL; 71 72 ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr); 73 74 return ret; 75 } 76 77 static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context) 78 { 79 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); 80 u32 port = attr->port_num - 1; 81 int ret; 82 83 if (port >= hr_dev->caps.num_ports) 84 return -EINVAL; 85 86 ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL); 87 88 return ret; 89 } 90 91 static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port, 92 unsigned long event) 93 { 94 struct device *dev = hr_dev->dev; 95 struct net_device *netdev; 96 int ret = 0; 97 98 netdev = hr_dev->iboe.netdevs[port]; 99 if (!netdev) { 100 dev_err(dev, "can't find netdev on port(%u)!\n", port); 101 return -ENODEV; 102 } 103 104 switch (event) { 105 case NETDEV_UP: 106 case NETDEV_CHANGE: 107 case NETDEV_REGISTER: 108 case NETDEV_CHANGEADDR: 109 ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr); 110 break; 111 case NETDEV_DOWN: 112 /* 113 * In v1 engine, only support all ports closed together. 114 */ 115 break; 116 default: 117 dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event)); 118 break; 119 } 120 121 return ret; 122 } 123 124 static int hns_roce_netdev_event(struct notifier_block *self, 125 unsigned long event, void *ptr) 126 { 127 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 128 struct hns_roce_ib_iboe *iboe = NULL; 129 struct hns_roce_dev *hr_dev = NULL; 130 int ret; 131 u32 port; 132 133 hr_dev = container_of(self, struct hns_roce_dev, iboe.nb); 134 iboe = &hr_dev->iboe; 135 136 for (port = 0; port < hr_dev->caps.num_ports; port++) { 137 if (dev == iboe->netdevs[port]) { 138 ret = handle_en_event(hr_dev, port, event); 139 if (ret) 140 return NOTIFY_DONE; 141 break; 142 } 143 } 144 145 return NOTIFY_DONE; 146 } 147 148 static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev) 149 { 150 int ret; 151 u8 i; 152 153 for (i = 0; i < hr_dev->caps.num_ports; i++) { 154 ret = hns_roce_set_mac(hr_dev, i, 155 hr_dev->iboe.netdevs[i]->dev_addr); 156 if (ret) 157 return ret; 158 } 159 160 return 0; 161 } 162 163 static int hns_roce_query_device(struct ib_device *ib_dev, 164 struct ib_device_attr *props, 165 struct ib_udata *uhw) 166 { 167 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); 168 169 memset(props, 0, sizeof(*props)); 170 171 props->fw_ver = hr_dev->caps.fw_ver; 172 props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid); 173 props->max_mr_size = (u64)(~(0ULL)); 174 props->page_size_cap = hr_dev->caps.page_size_cap; 175 props->vendor_id = hr_dev->vendor_id; 176 props->vendor_part_id = hr_dev->vendor_part_id; 177 props->hw_ver = hr_dev->hw_rev; 178 props->max_qp = hr_dev->caps.num_qps; 179 props->max_qp_wr = hr_dev->caps.max_wqes; 180 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | 181 IB_DEVICE_RC_RNR_NAK_GEN; 182 props->max_send_sge = hr_dev->caps.max_sq_sg; 183 props->max_recv_sge = hr_dev->caps.max_rq_sg; 184 props->max_sge_rd = 1; 185 props->max_cq = hr_dev->caps.num_cqs; 186 props->max_cqe = hr_dev->caps.max_cqes; 187 props->max_mr = hr_dev->caps.num_mtpts; 188 props->max_pd = hr_dev->caps.num_pds; 189 props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma; 190 props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma; 191 props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ? 192 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 193 props->max_pkeys = 1; 194 props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay; 195 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { 196 props->max_srq = hr_dev->caps.num_srqs; 197 props->max_srq_wr = hr_dev->caps.max_srq_wrs; 198 props->max_srq_sge = hr_dev->caps.max_srq_sges; 199 } 200 201 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR && 202 hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 203 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 204 props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA; 205 } 206 207 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) 208 props->device_cap_flags |= IB_DEVICE_XRC; 209 210 return 0; 211 } 212 213 static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num, 214 struct ib_port_attr *props) 215 { 216 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); 217 struct device *dev = hr_dev->dev; 218 struct net_device *net_dev; 219 unsigned long flags; 220 enum ib_mtu mtu; 221 u32 port; 222 int ret; 223 224 port = port_num - 1; 225 226 /* props being zeroed by the caller, avoid zeroing it here */ 227 228 props->max_mtu = hr_dev->caps.max_mtu; 229 props->gid_tbl_len = hr_dev->caps.gid_table_len[port]; 230 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | 231 IB_PORT_VENDOR_CLASS_SUP | 232 IB_PORT_BOOT_MGMT_SUP; 233 props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN; 234 props->pkey_tbl_len = 1; 235 ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed, 236 &props->active_width); 237 if (ret) 238 ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret); 239 240 spin_lock_irqsave(&hr_dev->iboe.lock, flags); 241 242 net_dev = hr_dev->iboe.netdevs[port]; 243 if (!net_dev) { 244 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); 245 dev_err(dev, "find netdev %u failed!\n", port); 246 return -EINVAL; 247 } 248 249 mtu = iboe_get_mtu(net_dev->mtu); 250 props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256; 251 props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ? 252 IB_PORT_ACTIVE : 253 IB_PORT_DOWN; 254 props->phys_state = props->state == IB_PORT_ACTIVE ? 255 IB_PORT_PHYS_STATE_LINK_UP : 256 IB_PORT_PHYS_STATE_DISABLED; 257 258 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); 259 260 return 0; 261 } 262 263 static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, 264 u32 port_num) 265 { 266 return IB_LINK_LAYER_ETHERNET; 267 } 268 269 static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index, 270 u16 *pkey) 271 { 272 if (index > 0) 273 return -EINVAL; 274 275 *pkey = PKEY_ID; 276 277 return 0; 278 } 279 280 static int hns_roce_modify_device(struct ib_device *ib_dev, int mask, 281 struct ib_device_modify *props) 282 { 283 unsigned long flags; 284 285 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 286 return -EOPNOTSUPP; 287 288 if (mask & IB_DEVICE_MODIFY_NODE_DESC) { 289 spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags); 290 memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE); 291 spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags); 292 } 293 294 return 0; 295 } 296 297 struct hns_user_mmap_entry * 298 hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, 299 size_t length, 300 enum hns_roce_mmap_type mmap_type) 301 { 302 struct hns_user_mmap_entry *entry; 303 int ret; 304 305 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 306 if (!entry) 307 return NULL; 308 309 entry->address = address; 310 entry->mmap_type = mmap_type; 311 312 switch (mmap_type) { 313 /* pgoff 0 must be used by DB for compatibility */ 314 case HNS_ROCE_MMAP_TYPE_DB: 315 ret = rdma_user_mmap_entry_insert_exact( 316 ucontext, &entry->rdma_entry, length, 0); 317 break; 318 case HNS_ROCE_MMAP_TYPE_DWQE: 319 ret = rdma_user_mmap_entry_insert_range( 320 ucontext, &entry->rdma_entry, length, 1, 321 U32_MAX); 322 break; 323 default: 324 ret = -EINVAL; 325 break; 326 } 327 328 if (ret) { 329 kfree(entry); 330 return NULL; 331 } 332 333 return entry; 334 } 335 336 static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context) 337 { 338 if (context->db_mmap_entry) 339 rdma_user_mmap_entry_remove( 340 &context->db_mmap_entry->rdma_entry); 341 } 342 343 static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) 344 { 345 struct hns_roce_ucontext *context = to_hr_ucontext(uctx); 346 u64 address; 347 348 address = context->uar.pfn << PAGE_SHIFT; 349 context->db_mmap_entry = hns_roce_user_mmap_entry_insert( 350 uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB); 351 if (!context->db_mmap_entry) 352 return -ENOMEM; 353 354 return 0; 355 } 356 357 static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, 358 struct ib_udata *udata) 359 { 360 struct hns_roce_ucontext *context = to_hr_ucontext(uctx); 361 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); 362 struct hns_roce_ib_alloc_ucontext_resp resp = {}; 363 struct hns_roce_ib_alloc_ucontext ucmd = {}; 364 int ret; 365 366 if (!hr_dev->active) 367 return -EAGAIN; 368 369 resp.qp_tab_size = hr_dev->caps.num_qps; 370 resp.srq_tab_size = hr_dev->caps.num_srqs; 371 372 ret = ib_copy_from_udata(&ucmd, udata, 373 min(udata->inlen, sizeof(ucmd))); 374 if (ret) 375 return ret; 376 377 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 378 context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS; 379 380 if (context->config & HNS_ROCE_EXSGE_FLAGS) { 381 resp.config |= HNS_ROCE_RSP_EXSGE_FLAGS; 382 resp.max_inline_data = hr_dev->caps.max_sq_inline; 383 } 384 385 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { 386 context->config |= ucmd.config & HNS_ROCE_RQ_INLINE_FLAGS; 387 if (context->config & HNS_ROCE_RQ_INLINE_FLAGS) 388 resp.config |= HNS_ROCE_RSP_RQ_INLINE_FLAGS; 389 } 390 391 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQE_INLINE) { 392 context->config |= ucmd.config & HNS_ROCE_CQE_INLINE_FLAGS; 393 if (context->config & HNS_ROCE_CQE_INLINE_FLAGS) 394 resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS; 395 } 396 397 ret = hns_roce_uar_alloc(hr_dev, &context->uar); 398 if (ret) 399 goto error_fail_uar_alloc; 400 401 ret = hns_roce_alloc_uar_entry(uctx); 402 if (ret) 403 goto error_fail_uar_entry; 404 405 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || 406 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { 407 INIT_LIST_HEAD(&context->page_list); 408 mutex_init(&context->page_mutex); 409 } 410 411 resp.cqe_size = hr_dev->caps.cqe_sz; 412 413 ret = ib_copy_to_udata(udata, &resp, 414 min(udata->outlen, sizeof(resp))); 415 if (ret) 416 goto error_fail_copy_to_udata; 417 418 return 0; 419 420 error_fail_copy_to_udata: 421 hns_roce_dealloc_uar_entry(context); 422 423 error_fail_uar_entry: 424 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx); 425 426 error_fail_uar_alloc: 427 return ret; 428 } 429 430 static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) 431 { 432 struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); 433 struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device); 434 435 hns_roce_dealloc_uar_entry(context); 436 437 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx); 438 } 439 440 static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) 441 { 442 struct rdma_user_mmap_entry *rdma_entry; 443 struct hns_user_mmap_entry *entry; 444 phys_addr_t pfn; 445 pgprot_t prot; 446 int ret; 447 448 rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff); 449 if (!rdma_entry) 450 return -EINVAL; 451 452 entry = to_hns_mmap(rdma_entry); 453 pfn = entry->address >> PAGE_SHIFT; 454 455 switch (entry->mmap_type) { 456 case HNS_ROCE_MMAP_TYPE_DB: 457 case HNS_ROCE_MMAP_TYPE_DWQE: 458 prot = pgprot_device(vma->vm_page_prot); 459 break; 460 default: 461 ret = -EINVAL; 462 goto out; 463 } 464 465 ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE, 466 prot, rdma_entry); 467 468 out: 469 rdma_user_mmap_entry_put(rdma_entry); 470 return ret; 471 } 472 473 static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry) 474 { 475 struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry); 476 477 kfree(entry); 478 } 479 480 static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num, 481 struct ib_port_immutable *immutable) 482 { 483 struct ib_port_attr attr; 484 int ret; 485 486 ret = ib_query_port(ib_dev, port_num, &attr); 487 if (ret) 488 return ret; 489 490 immutable->pkey_tbl_len = attr.pkey_tbl_len; 491 immutable->gid_tbl_len = attr.gid_tbl_len; 492 493 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 494 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; 495 if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2) 496 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 497 498 return 0; 499 } 500 501 static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext) 502 { 503 } 504 505 static void hns_roce_get_fw_ver(struct ib_device *device, char *str) 506 { 507 u64 fw_ver = to_hr_dev(device)->caps.fw_ver; 508 unsigned int major, minor, sub_minor; 509 510 major = upper_32_bits(fw_ver); 511 minor = high_16_bits(lower_32_bits(fw_ver)); 512 sub_minor = low_16_bits(fw_ver); 513 514 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%04u", major, minor, 515 sub_minor); 516 } 517 518 static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev) 519 { 520 struct hns_roce_ib_iboe *iboe = &hr_dev->iboe; 521 522 hr_dev->active = false; 523 unregister_netdevice_notifier(&iboe->nb); 524 ib_unregister_device(&hr_dev->ib_dev); 525 } 526 527 static const struct ib_device_ops hns_roce_dev_ops = { 528 .owner = THIS_MODULE, 529 .driver_id = RDMA_DRIVER_HNS, 530 .uverbs_abi_ver = 1, 531 .uverbs_no_driver_id_binding = 1, 532 533 .get_dev_fw_str = hns_roce_get_fw_ver, 534 .add_gid = hns_roce_add_gid, 535 .alloc_pd = hns_roce_alloc_pd, 536 .alloc_ucontext = hns_roce_alloc_ucontext, 537 .create_ah = hns_roce_create_ah, 538 .create_user_ah = hns_roce_create_ah, 539 .create_cq = hns_roce_create_cq, 540 .create_qp = hns_roce_create_qp, 541 .dealloc_pd = hns_roce_dealloc_pd, 542 .dealloc_ucontext = hns_roce_dealloc_ucontext, 543 .del_gid = hns_roce_del_gid, 544 .dereg_mr = hns_roce_dereg_mr, 545 .destroy_ah = hns_roce_destroy_ah, 546 .destroy_cq = hns_roce_destroy_cq, 547 .disassociate_ucontext = hns_roce_disassociate_ucontext, 548 .get_dma_mr = hns_roce_get_dma_mr, 549 .get_link_layer = hns_roce_get_link_layer, 550 .get_port_immutable = hns_roce_port_immutable, 551 .mmap = hns_roce_mmap, 552 .mmap_free = hns_roce_free_mmap, 553 .modify_device = hns_roce_modify_device, 554 .modify_qp = hns_roce_modify_qp, 555 .query_ah = hns_roce_query_ah, 556 .query_device = hns_roce_query_device, 557 .query_pkey = hns_roce_query_pkey, 558 .query_port = hns_roce_query_port, 559 .reg_user_mr = hns_roce_reg_user_mr, 560 561 INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah), 562 INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq), 563 INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd), 564 INIT_RDMA_OBJ_SIZE(ib_qp, hns_roce_qp, ibqp), 565 INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext), 566 }; 567 568 static const struct ib_device_ops hns_roce_dev_mr_ops = { 569 .rereg_user_mr = hns_roce_rereg_user_mr, 570 }; 571 572 static const struct ib_device_ops hns_roce_dev_mw_ops = { 573 .alloc_mw = hns_roce_alloc_mw, 574 .dealloc_mw = hns_roce_dealloc_mw, 575 576 INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw), 577 }; 578 579 static const struct ib_device_ops hns_roce_dev_frmr_ops = { 580 .alloc_mr = hns_roce_alloc_mr, 581 .map_mr_sg = hns_roce_map_mr_sg, 582 }; 583 584 static const struct ib_device_ops hns_roce_dev_srq_ops = { 585 .create_srq = hns_roce_create_srq, 586 .destroy_srq = hns_roce_destroy_srq, 587 588 INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq), 589 }; 590 591 static const struct ib_device_ops hns_roce_dev_xrcd_ops = { 592 .alloc_xrcd = hns_roce_alloc_xrcd, 593 .dealloc_xrcd = hns_roce_dealloc_xrcd, 594 595 INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd), 596 }; 597 598 static const struct ib_device_ops hns_roce_dev_restrack_ops = { 599 .fill_res_cq_entry = hns_roce_fill_res_cq_entry, 600 .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw, 601 .fill_res_qp_entry = hns_roce_fill_res_qp_entry, 602 .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw, 603 .fill_res_mr_entry = hns_roce_fill_res_mr_entry, 604 .fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw, 605 }; 606 607 static int hns_roce_register_device(struct hns_roce_dev *hr_dev) 608 { 609 int ret; 610 struct hns_roce_ib_iboe *iboe = NULL; 611 struct ib_device *ib_dev = NULL; 612 struct device *dev = hr_dev->dev; 613 unsigned int i; 614 615 iboe = &hr_dev->iboe; 616 spin_lock_init(&iboe->lock); 617 618 ib_dev = &hr_dev->ib_dev; 619 620 ib_dev->node_type = RDMA_NODE_IB_CA; 621 ib_dev->dev.parent = dev; 622 623 ib_dev->phys_port_cnt = hr_dev->caps.num_ports; 624 ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey; 625 ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors; 626 627 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) 628 ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops); 629 630 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) 631 ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops); 632 633 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) 634 ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops); 635 636 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { 637 ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops); 638 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops); 639 } 640 641 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) 642 ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops); 643 644 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops); 645 ib_set_device_ops(ib_dev, &hns_roce_dev_ops); 646 ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops); 647 for (i = 0; i < hr_dev->caps.num_ports; i++) { 648 if (!hr_dev->iboe.netdevs[i]) 649 continue; 650 651 ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i], 652 i + 1); 653 if (ret) 654 return ret; 655 } 656 dma_set_max_seg_size(dev, UINT_MAX); 657 ret = ib_register_device(ib_dev, "hns_%d", dev); 658 if (ret) { 659 dev_err(dev, "ib_register_device failed!\n"); 660 return ret; 661 } 662 663 ret = hns_roce_setup_mtu_mac(hr_dev); 664 if (ret) { 665 dev_err(dev, "setup_mtu_mac failed!\n"); 666 goto error_failed_setup_mtu_mac; 667 } 668 669 iboe->nb.notifier_call = hns_roce_netdev_event; 670 ret = register_netdevice_notifier(&iboe->nb); 671 if (ret) { 672 dev_err(dev, "register_netdevice_notifier failed!\n"); 673 goto error_failed_setup_mtu_mac; 674 } 675 676 hr_dev->active = true; 677 return 0; 678 679 error_failed_setup_mtu_mac: 680 ib_unregister_device(ib_dev); 681 682 return ret; 683 } 684 685 static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) 686 { 687 struct device *dev = hr_dev->dev; 688 int ret; 689 690 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table, 691 HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz, 692 hr_dev->caps.num_mtpts); 693 if (ret) { 694 dev_err(dev, "failed to init MTPT context memory, aborting.\n"); 695 return ret; 696 } 697 698 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table, 699 HEM_TYPE_QPC, hr_dev->caps.qpc_sz, 700 hr_dev->caps.num_qps); 701 if (ret) { 702 dev_err(dev, "failed to init QP context memory, aborting.\n"); 703 goto err_unmap_dmpt; 704 } 705 706 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table, 707 HEM_TYPE_IRRL, 708 hr_dev->caps.irrl_entry_sz * 709 hr_dev->caps.max_qp_init_rdma, 710 hr_dev->caps.num_qps); 711 if (ret) { 712 dev_err(dev, "failed to init irrl_table memory, aborting.\n"); 713 goto err_unmap_qp; 714 } 715 716 if (hr_dev->caps.trrl_entry_sz) { 717 ret = hns_roce_init_hem_table(hr_dev, 718 &hr_dev->qp_table.trrl_table, 719 HEM_TYPE_TRRL, 720 hr_dev->caps.trrl_entry_sz * 721 hr_dev->caps.max_qp_dest_rdma, 722 hr_dev->caps.num_qps); 723 if (ret) { 724 dev_err(dev, 725 "failed to init trrl_table memory, aborting.\n"); 726 goto err_unmap_irrl; 727 } 728 } 729 730 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table, 731 HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz, 732 hr_dev->caps.num_cqs); 733 if (ret) { 734 dev_err(dev, "failed to init CQ context memory, aborting.\n"); 735 goto err_unmap_trrl; 736 } 737 738 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { 739 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table, 740 HEM_TYPE_SRQC, 741 hr_dev->caps.srqc_entry_sz, 742 hr_dev->caps.num_srqs); 743 if (ret) { 744 dev_err(dev, 745 "failed to init SRQ context memory, aborting.\n"); 746 goto err_unmap_cq; 747 } 748 } 749 750 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { 751 ret = hns_roce_init_hem_table(hr_dev, 752 &hr_dev->qp_table.sccc_table, 753 HEM_TYPE_SCCC, 754 hr_dev->caps.sccc_sz, 755 hr_dev->caps.num_qps); 756 if (ret) { 757 dev_err(dev, 758 "failed to init SCC context memory, aborting.\n"); 759 goto err_unmap_srq; 760 } 761 } 762 763 if (hr_dev->caps.qpc_timer_entry_sz) { 764 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table, 765 HEM_TYPE_QPC_TIMER, 766 hr_dev->caps.qpc_timer_entry_sz, 767 hr_dev->caps.qpc_timer_bt_num); 768 if (ret) { 769 dev_err(dev, 770 "failed to init QPC timer memory, aborting.\n"); 771 goto err_unmap_ctx; 772 } 773 } 774 775 if (hr_dev->caps.cqc_timer_entry_sz) { 776 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table, 777 HEM_TYPE_CQC_TIMER, 778 hr_dev->caps.cqc_timer_entry_sz, 779 hr_dev->caps.cqc_timer_bt_num); 780 if (ret) { 781 dev_err(dev, 782 "failed to init CQC timer memory, aborting.\n"); 783 goto err_unmap_qpc_timer; 784 } 785 } 786 787 if (hr_dev->caps.gmv_entry_sz) { 788 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table, 789 HEM_TYPE_GMV, 790 hr_dev->caps.gmv_entry_sz, 791 hr_dev->caps.gmv_entry_num); 792 if (ret) { 793 dev_err(dev, 794 "failed to init gmv table memory, ret = %d\n", 795 ret); 796 goto err_unmap_cqc_timer; 797 } 798 } 799 800 return 0; 801 802 err_unmap_cqc_timer: 803 if (hr_dev->caps.cqc_timer_entry_sz) 804 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cqc_timer_table); 805 806 err_unmap_qpc_timer: 807 if (hr_dev->caps.qpc_timer_entry_sz) 808 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table); 809 810 err_unmap_ctx: 811 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) 812 hns_roce_cleanup_hem_table(hr_dev, 813 &hr_dev->qp_table.sccc_table); 814 err_unmap_srq: 815 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) 816 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table); 817 818 err_unmap_cq: 819 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); 820 821 err_unmap_trrl: 822 if (hr_dev->caps.trrl_entry_sz) 823 hns_roce_cleanup_hem_table(hr_dev, 824 &hr_dev->qp_table.trrl_table); 825 826 err_unmap_irrl: 827 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); 828 829 err_unmap_qp: 830 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); 831 832 err_unmap_dmpt: 833 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table); 834 835 return ret; 836 } 837 838 /** 839 * hns_roce_setup_hca - setup host channel adapter 840 * @hr_dev: pointer to hns roce device 841 * Return : int 842 */ 843 static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) 844 { 845 struct device *dev = hr_dev->dev; 846 int ret; 847 848 spin_lock_init(&hr_dev->sm_lock); 849 850 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || 851 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { 852 INIT_LIST_HEAD(&hr_dev->pgdir_list); 853 mutex_init(&hr_dev->pgdir_mutex); 854 } 855 856 hns_roce_init_uar_table(hr_dev); 857 858 ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar); 859 if (ret) { 860 dev_err(dev, "failed to allocate priv_uar.\n"); 861 goto err_uar_table_free; 862 } 863 864 ret = hns_roce_init_qp_table(hr_dev); 865 if (ret) { 866 dev_err(dev, "failed to init qp_table.\n"); 867 goto err_uar_table_free; 868 } 869 870 hns_roce_init_pd_table(hr_dev); 871 872 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) 873 hns_roce_init_xrcd_table(hr_dev); 874 875 hns_roce_init_mr_table(hr_dev); 876 877 hns_roce_init_cq_table(hr_dev); 878 879 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) 880 hns_roce_init_srq_table(hr_dev); 881 882 return 0; 883 884 err_uar_table_free: 885 ida_destroy(&hr_dev->uar_ida.ida); 886 return ret; 887 } 888 889 static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq) 890 { 891 struct hns_roce_cq *hr_cq = to_hr_cq(cq); 892 unsigned long flags; 893 894 spin_lock_irqsave(&hr_cq->lock, flags); 895 if (cq->comp_handler) { 896 if (!hr_cq->is_armed) { 897 hr_cq->is_armed = 1; 898 list_add_tail(&hr_cq->node, cq_list); 899 } 900 } 901 spin_unlock_irqrestore(&hr_cq->lock, flags); 902 } 903 904 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev) 905 { 906 struct hns_roce_qp *hr_qp; 907 struct hns_roce_cq *hr_cq; 908 struct list_head cq_list; 909 unsigned long flags_qp; 910 unsigned long flags; 911 912 INIT_LIST_HEAD(&cq_list); 913 914 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); 915 list_for_each_entry(hr_qp, &hr_dev->qp_list, node) { 916 spin_lock_irqsave(&hr_qp->sq.lock, flags_qp); 917 if (hr_qp->sq.tail != hr_qp->sq.head) 918 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq); 919 spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp); 920 921 spin_lock_irqsave(&hr_qp->rq.lock, flags_qp); 922 if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head)) 923 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq); 924 spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp); 925 } 926 927 list_for_each_entry(hr_cq, &cq_list, node) 928 hns_roce_cq_completion(hr_dev, hr_cq->cqn); 929 930 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); 931 } 932 933 int hns_roce_init(struct hns_roce_dev *hr_dev) 934 { 935 struct device *dev = hr_dev->dev; 936 int ret; 937 938 hr_dev->is_reset = false; 939 940 if (hr_dev->hw->cmq_init) { 941 ret = hr_dev->hw->cmq_init(hr_dev); 942 if (ret) { 943 dev_err(dev, "init RoCE Command Queue failed!\n"); 944 return ret; 945 } 946 } 947 948 ret = hr_dev->hw->hw_profile(hr_dev); 949 if (ret) { 950 dev_err(dev, "get RoCE engine profile failed!\n"); 951 goto error_failed_cmd_init; 952 } 953 954 ret = hns_roce_cmd_init(hr_dev); 955 if (ret) { 956 dev_err(dev, "cmd init failed!\n"); 957 goto error_failed_cmd_init; 958 } 959 960 /* EQ depends on poll mode, event mode depends on EQ */ 961 ret = hr_dev->hw->init_eq(hr_dev); 962 if (ret) { 963 dev_err(dev, "eq init failed!\n"); 964 goto error_failed_eq_table; 965 } 966 967 if (hr_dev->cmd_mod) { 968 ret = hns_roce_cmd_use_events(hr_dev); 969 if (ret) 970 dev_warn(dev, 971 "Cmd event mode failed, set back to poll!\n"); 972 } 973 974 ret = hns_roce_init_hem(hr_dev); 975 if (ret) { 976 dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n"); 977 goto error_failed_init_hem; 978 } 979 980 ret = hns_roce_setup_hca(hr_dev); 981 if (ret) { 982 dev_err(dev, "setup hca failed!\n"); 983 goto error_failed_setup_hca; 984 } 985 986 if (hr_dev->hw->hw_init) { 987 ret = hr_dev->hw->hw_init(hr_dev); 988 if (ret) { 989 dev_err(dev, "hw_init failed!\n"); 990 goto error_failed_engine_init; 991 } 992 } 993 994 INIT_LIST_HEAD(&hr_dev->qp_list); 995 spin_lock_init(&hr_dev->qp_list_lock); 996 INIT_LIST_HEAD(&hr_dev->dip_list); 997 spin_lock_init(&hr_dev->dip_list_lock); 998 999 ret = hns_roce_register_device(hr_dev); 1000 if (ret) 1001 goto error_failed_register_device; 1002 1003 return 0; 1004 1005 error_failed_register_device: 1006 if (hr_dev->hw->hw_exit) 1007 hr_dev->hw->hw_exit(hr_dev); 1008 1009 error_failed_engine_init: 1010 hns_roce_cleanup_bitmap(hr_dev); 1011 1012 error_failed_setup_hca: 1013 hns_roce_cleanup_hem(hr_dev); 1014 1015 error_failed_init_hem: 1016 if (hr_dev->cmd_mod) 1017 hns_roce_cmd_use_polling(hr_dev); 1018 hr_dev->hw->cleanup_eq(hr_dev); 1019 1020 error_failed_eq_table: 1021 hns_roce_cmd_cleanup(hr_dev); 1022 1023 error_failed_cmd_init: 1024 if (hr_dev->hw->cmq_exit) 1025 hr_dev->hw->cmq_exit(hr_dev); 1026 1027 return ret; 1028 } 1029 1030 void hns_roce_exit(struct hns_roce_dev *hr_dev) 1031 { 1032 hns_roce_unregister_device(hr_dev); 1033 1034 if (hr_dev->hw->hw_exit) 1035 hr_dev->hw->hw_exit(hr_dev); 1036 hns_roce_cleanup_bitmap(hr_dev); 1037 hns_roce_cleanup_hem(hr_dev); 1038 1039 if (hr_dev->cmd_mod) 1040 hns_roce_cmd_use_polling(hr_dev); 1041 1042 hr_dev->hw->cleanup_eq(hr_dev); 1043 hns_roce_cmd_cleanup(hr_dev); 1044 if (hr_dev->hw->cmq_exit) 1045 hr_dev->hw->cmq_exit(hr_dev); 1046 } 1047 1048 MODULE_LICENSE("Dual BSD/GPL"); 1049 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); 1050 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>"); 1051 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); 1052 MODULE_DESCRIPTION("HNS RoCE Driver"); 1053