Lines Matching refs:dev
56 static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num, in qedr_ib_dispatch_event() argument
61 ibev.device = &dev->ibdev; in qedr_ib_dispatch_event()
124 struct qedr_dev *dev = in hw_rev_show() local
127 return sysfs_emit(buf, "0x%x\n", dev->attr.hw_ver); in hw_rev_show()
134 struct qedr_dev *dev = in hca_type_show() local
137 return sysfs_emit(buf, "FastLinQ QL%x %s\n", dev->pdev->device, in hca_type_show()
138 rdma_protocol_iwarp(&dev->ibdev, 1) ? "iWARP" : in hca_type_show()
166 static int qedr_iw_register_device(struct qedr_dev *dev) in qedr_iw_register_device() argument
168 dev->ibdev.node_type = RDMA_NODE_RNIC; in qedr_iw_register_device()
170 ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops); in qedr_iw_register_device()
172 memcpy(dev->ibdev.iw_ifname, in qedr_iw_register_device()
173 dev->ndev->name, sizeof(dev->ibdev.iw_ifname)); in qedr_iw_register_device()
185 static void qedr_roce_register_device(struct qedr_dev *dev) in qedr_roce_register_device() argument
187 dev->ibdev.node_type = RDMA_NODE_IB_CA; in qedr_roce_register_device()
189 ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops); in qedr_roce_register_device()
241 static int qedr_register_device(struct qedr_dev *dev) in qedr_register_device() argument
245 dev->ibdev.node_guid = dev->attr.node_guid; in qedr_register_device()
246 memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC)); in qedr_register_device()
248 if (IS_IWARP(dev)) { in qedr_register_device()
249 rc = qedr_iw_register_device(dev); in qedr_register_device()
253 qedr_roce_register_device(dev); in qedr_register_device()
256 dev->ibdev.phys_port_cnt = 1; in qedr_register_device()
257 dev->ibdev.num_comp_vectors = dev->num_cnq; in qedr_register_device()
258 dev->ibdev.dev.parent = &dev->pdev->dev; in qedr_register_device()
260 ib_set_device_ops(&dev->ibdev, &qedr_dev_ops); in qedr_register_device()
262 rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1); in qedr_register_device()
266 dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX); in qedr_register_device()
267 return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev); in qedr_register_device()
271 static int qedr_alloc_mem_sb(struct qedr_dev *dev, in qedr_alloc_mem_sb() argument
278 sb_virt = dma_alloc_coherent(&dev->pdev->dev, in qedr_alloc_mem_sb()
283 rc = dev->ops->common->sb_init(dev->cdev, sb_info, in qedr_alloc_mem_sb()
288 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt), in qedr_alloc_mem_sb()
296 static void qedr_free_mem_sb(struct qedr_dev *dev, in qedr_free_mem_sb() argument
300 dev->ops->common->sb_release(dev->cdev, sb_info, sb_id, in qedr_free_mem_sb()
302 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt), in qedr_free_mem_sb()
307 static void qedr_free_resources(struct qedr_dev *dev) in qedr_free_resources() argument
311 if (IS_IWARP(dev)) in qedr_free_resources()
312 destroy_workqueue(dev->iwarp_wq); in qedr_free_resources()
314 for (i = 0; i < dev->num_cnq; i++) { in qedr_free_resources()
315 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); in qedr_free_resources()
316 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); in qedr_free_resources()
319 kfree(dev->cnq_array); in qedr_free_resources()
320 kfree(dev->sb_array); in qedr_free_resources()
321 kfree(dev->sgid_tbl); in qedr_free_resources()
324 static int qedr_alloc_resources(struct qedr_dev *dev) in qedr_alloc_resources() argument
336 dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid), in qedr_alloc_resources()
338 if (!dev->sgid_tbl) in qedr_alloc_resources()
341 spin_lock_init(&dev->sgid_lock); in qedr_alloc_resources()
342 xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ); in qedr_alloc_resources()
344 if (IS_IWARP(dev)) { in qedr_alloc_resources()
345 xa_init(&dev->qps); in qedr_alloc_resources()
346 dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq"); in qedr_alloc_resources()
347 if (!dev->iwarp_wq) { in qedr_alloc_resources()
354 dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array), in qedr_alloc_resources()
356 if (!dev->sb_array) { in qedr_alloc_resources()
361 dev->cnq_array = kcalloc(dev->num_cnq, in qedr_alloc_resources()
362 sizeof(*dev->cnq_array), GFP_KERNEL); in qedr_alloc_resources()
363 if (!dev->cnq_array) { in qedr_alloc_resources()
368 dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev); in qedr_alloc_resources()
374 for (i = 0; i < dev->num_cnq; i++) { in qedr_alloc_resources()
375 cnq = &dev->cnq_array[i]; in qedr_alloc_resources()
377 rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i], in qedr_alloc_resources()
378 dev->sb_start + i); in qedr_alloc_resources()
382 rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl, in qedr_alloc_resources()
387 cnq->dev = dev; in qedr_alloc_resources()
388 cnq->sb = &dev->sb_array[i]; in qedr_alloc_resources()
389 cons_pi = dev->sb_array[i].sb_virt->pi_array; in qedr_alloc_resources()
392 sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev)); in qedr_alloc_resources()
394 DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n", in qedr_alloc_resources()
400 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); in qedr_alloc_resources()
403 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); in qedr_alloc_resources()
404 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); in qedr_alloc_resources()
406 kfree(dev->cnq_array); in qedr_alloc_resources()
408 kfree(dev->sb_array); in qedr_alloc_resources()
410 if (IS_IWARP(dev)) in qedr_alloc_resources()
411 destroy_workqueue(dev->iwarp_wq); in qedr_alloc_resources()
413 kfree(dev->sgid_tbl); in qedr_alloc_resources()
417 static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev) in qedr_pci_set_atomic() argument
423 dev->atomic_cap = IB_ATOMIC_NONE; in qedr_pci_set_atomic()
424 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n"); in qedr_pci_set_atomic()
426 dev->atomic_cap = IB_ATOMIC_GLOB; in qedr_pci_set_atomic()
427 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n"); in qedr_pci_set_atomic()
458 DP_ERR(cnq->dev, in qedr_irq_handler()
467 DP_ERR(cnq->dev, in qedr_irq_handler()
492 qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, in qedr_irq_handler()
500 static void qedr_sync_free_irqs(struct qedr_dev *dev) in qedr_sync_free_irqs() argument
506 for (i = 0; i < dev->int_info.used_cnt; i++) { in qedr_sync_free_irqs()
507 if (dev->int_info.msix_cnt) { in qedr_sync_free_irqs()
508 idx = i * dev->num_hwfns + dev->affin_hwfn_idx; in qedr_sync_free_irqs()
509 vector = dev->int_info.msix[idx].vector; in qedr_sync_free_irqs()
510 free_irq(vector, &dev->cnq_array[i]); in qedr_sync_free_irqs()
514 dev->int_info.used_cnt = 0; in qedr_sync_free_irqs()
517 static int qedr_req_msix_irqs(struct qedr_dev *dev) in qedr_req_msix_irqs() argument
522 if (dev->num_cnq > dev->int_info.msix_cnt) { in qedr_req_msix_irqs()
523 DP_ERR(dev, in qedr_req_msix_irqs()
525 dev->num_cnq, dev->int_info.msix_cnt); in qedr_req_msix_irqs()
529 for (i = 0; i < dev->num_cnq; i++) { in qedr_req_msix_irqs()
530 idx = i * dev->num_hwfns + dev->affin_hwfn_idx; in qedr_req_msix_irqs()
531 rc = request_irq(dev->int_info.msix[idx].vector, in qedr_req_msix_irqs()
532 qedr_irq_handler, 0, dev->cnq_array[i].name, in qedr_req_msix_irqs()
533 &dev->cnq_array[i]); in qedr_req_msix_irqs()
535 DP_ERR(dev, "Request cnq %d irq failed\n", i); in qedr_req_msix_irqs()
536 qedr_sync_free_irqs(dev); in qedr_req_msix_irqs()
538 DP_DEBUG(dev, QEDR_MSG_INIT, in qedr_req_msix_irqs()
540 dev->cnq_array[i].name, i, in qedr_req_msix_irqs()
541 &dev->cnq_array[i]); in qedr_req_msix_irqs()
542 dev->int_info.used_cnt++; in qedr_req_msix_irqs()
549 static int qedr_setup_irqs(struct qedr_dev *dev) in qedr_setup_irqs() argument
553 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n"); in qedr_setup_irqs()
556 rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq); in qedr_setup_irqs()
560 rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info); in qedr_setup_irqs()
562 DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n"); in qedr_setup_irqs()
566 if (dev->int_info.msix_cnt) { in qedr_setup_irqs()
567 DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n", in qedr_setup_irqs()
568 dev->int_info.msix_cnt); in qedr_setup_irqs()
569 rc = qedr_req_msix_irqs(dev); in qedr_setup_irqs()
574 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n"); in qedr_setup_irqs()
579 static int qedr_set_device_attr(struct qedr_dev *dev) in qedr_set_device_attr() argument
586 qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); in qedr_set_device_attr()
591 DP_ERR(dev, in qedr_set_device_attr()
598 attr = &dev->attr; in qedr_set_device_attr()
648 struct qedr_dev *dev = (struct qedr_dev *)context; in qedr_affiliated_event() local
662 if (IS_ROCE(dev)) { in qedr_affiliated_event()
705 DP_ERR(dev, "unsupported event %d on handle=%llx\n", in qedr_affiliated_event()
719 DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code, in qedr_affiliated_event()
738 DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq); in qedr_affiliated_event()
754 DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp); in qedr_affiliated_event()
758 xa_lock_irqsave(&dev->srqs, flags); in qedr_affiliated_event()
759 srq = xa_load(&dev->srqs, srq_id); in qedr_affiliated_event()
769 DP_NOTICE(dev, in qedr_affiliated_event()
773 xa_unlock_irqrestore(&dev->srqs, flags); in qedr_affiliated_event()
774 DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq); in qedr_affiliated_event()
781 static int qedr_init_hw(struct qedr_dev *dev) in qedr_init_hw() argument
798 in_params->desired_cnq = dev->num_cnq; in qedr_init_hw()
799 for (i = 0; i < dev->num_cnq; i++) { in qedr_init_hw()
802 page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl); in qedr_init_hw()
805 p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl); in qedr_init_hw()
811 events.context = dev; in qedr_init_hw()
815 in_params->max_mtu = dev->ndev->mtu; in qedr_init_hw()
816 dev->iwarp_max_mtu = dev->ndev->mtu; in qedr_init_hw()
817 ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr); in qedr_init_hw()
819 rc = dev->ops->rdma_init(dev->cdev, in_params); in qedr_init_hw()
823 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params); in qedr_init_hw()
827 dev->db_addr = out_params.dpi_addr; in qedr_init_hw()
828 dev->db_phys_addr = out_params.dpi_phys_addr; in qedr_init_hw()
829 dev->db_size = out_params.dpi_size; in qedr_init_hw()
830 dev->dpi = out_params.dpi; in qedr_init_hw()
832 rc = qedr_set_device_attr(dev); in qedr_init_hw()
836 DP_ERR(dev, "Init HW Failed rc = %d\n", rc); in qedr_init_hw()
841 static void qedr_stop_hw(struct qedr_dev *dev) in qedr_stop_hw() argument
843 dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi); in qedr_stop_hw()
844 dev->ops->rdma_stop(dev->rdma_ctx); in qedr_stop_hw()
851 struct qedr_dev *dev; in qedr_add() local
854 dev = ib_alloc_device(qedr_dev, ibdev); in qedr_add()
855 if (!dev) { in qedr_add()
860 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n"); in qedr_add()
862 dev->pdev = pdev; in qedr_add()
863 dev->ndev = ndev; in qedr_add()
864 dev->cdev = cdev; in qedr_add()
868 DP_ERR(dev, "Failed to get qed roce operations\n"); in qedr_add()
872 dev->ops = qed_ops; in qedr_add()
877 dev->user_dpm_enabled = dev_info.user_dpm_enabled; in qedr_add()
878 dev->rdma_type = dev_info.rdma_type; in qedr_add()
879 dev->num_hwfns = dev_info.common.num_hwfns; in qedr_add()
881 if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) { in qedr_add()
882 rc = dev->ops->iwarp_set_engine_affin(cdev, false); in qedr_add()
884 …DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable… in qedr_add()
888 dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev); in qedr_add()
890 dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev); in qedr_add()
892 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); in qedr_add()
893 if (!dev->num_cnq) { in qedr_add()
894 DP_ERR(dev, "Failed. At least one CNQ is required.\n"); in qedr_add()
899 dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT; in qedr_add()
901 qedr_pci_set_atomic(dev, pdev); in qedr_add()
903 rc = qedr_alloc_resources(dev); in qedr_add()
907 rc = qedr_init_hw(dev); in qedr_add()
911 rc = qedr_setup_irqs(dev); in qedr_add()
915 rc = qedr_register_device(dev); in qedr_add()
917 DP_ERR(dev, "Unable to allocate register device\n"); in qedr_add()
921 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) in qedr_add()
922 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); in qedr_add()
924 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); in qedr_add()
925 return dev; in qedr_add()
928 qedr_sync_free_irqs(dev); in qedr_add()
930 qedr_stop_hw(dev); in qedr_add()
932 qedr_free_resources(dev); in qedr_add()
934 ib_dealloc_device(&dev->ibdev); in qedr_add()
935 DP_ERR(dev, "qedr driver load failed rc=%d\n", rc); in qedr_add()
940 static void qedr_remove(struct qedr_dev *dev) in qedr_remove() argument
945 ib_unregister_device(&dev->ibdev); in qedr_remove()
947 qedr_stop_hw(dev); in qedr_remove()
948 qedr_sync_free_irqs(dev); in qedr_remove()
949 qedr_free_resources(dev); in qedr_remove()
951 if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) in qedr_remove()
952 dev->ops->iwarp_set_engine_affin(dev->cdev, true); in qedr_remove()
954 ib_dealloc_device(&dev->ibdev); in qedr_remove()
957 static void qedr_close(struct qedr_dev *dev) in qedr_close() argument
959 if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) in qedr_close()
960 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR); in qedr_close()
963 static void qedr_shutdown(struct qedr_dev *dev) in qedr_shutdown() argument
965 qedr_close(dev); in qedr_shutdown()
966 qedr_remove(dev); in qedr_shutdown()
969 static void qedr_open(struct qedr_dev *dev) in qedr_open() argument
971 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) in qedr_open()
972 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); in qedr_open()
975 static void qedr_mac_address_change(struct qedr_dev *dev) in qedr_mac_address_change() argument
977 union ib_gid *sgid = &dev->sgid_tbl[0]; in qedr_mac_address_change()
982 ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr); in qedr_mac_address_change()
995 rc = dev->ops->ll2_set_mac_filter(dev->cdev, in qedr_mac_address_change()
996 dev->gsi_ll2_mac_address, in qedr_mac_address_change()
997 dev->ndev->dev_addr); in qedr_mac_address_change()
999 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); in qedr_mac_address_change()
1001 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE); in qedr_mac_address_change()
1004 DP_ERR(dev, "Error updating mac filter\n"); in qedr_mac_address_change()
1011 static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event) in qedr_notify() argument
1015 qedr_open(dev); in qedr_notify()
1018 qedr_close(dev); in qedr_notify()
1021 qedr_shutdown(dev); in qedr_notify()
1024 qedr_mac_address_change(dev); in qedr_notify()
1027 if (rdma_protocol_iwarp(&dev->ibdev, 1)) in qedr_notify()
1028 if (dev->ndev->mtu != dev->iwarp_max_mtu) in qedr_notify()
1029 DP_NOTICE(dev, in qedr_notify()
1031 dev->iwarp_max_mtu, dev->ndev->mtu); in qedr_notify()