Lines Matching refs:dd

85 static int hfi1_create_kctxt(struct hfi1_devdata *dd,  in hfi1_create_kctxt()  argument
94 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); in hfi1_create_kctxt()
96 dd_dev_err(dd, "Kernel receive context allocation failed\n"); in hfi1_create_kctxt()
119 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); in hfi1_create_kctxt()
121 dd_dev_err(dd, "Kernel send context allocation failed\n"); in hfi1_create_kctxt()
132 int hfi1_create_kctxts(struct hfi1_devdata *dd) in hfi1_create_kctxts() argument
137 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), in hfi1_create_kctxts()
138 GFP_KERNEL, dd->node); in hfi1_create_kctxts()
139 if (!dd->rcd) in hfi1_create_kctxts()
142 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { in hfi1_create_kctxts()
143 ret = hfi1_create_kctxt(dd, dd->pport); in hfi1_create_kctxts()
150 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) in hfi1_create_kctxts()
151 hfi1_free_ctxt(dd->rcd[i]); in hfi1_create_kctxts()
154 kfree(dd->rcd); in hfi1_create_kctxts()
155 dd->rcd = NULL; in hfi1_create_kctxts()
178 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); in hfi1_rcd_free()
179 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free()
180 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); in hfi1_rcd_free()
182 hfi1_free_ctxtdata(rcd->dd, rcd); in hfi1_rcd_free()
225 static int allocate_rcd_index(struct hfi1_devdata *dd, in allocate_rcd_index() argument
231 spin_lock_irqsave(&dd->uctxt_lock, flags); in allocate_rcd_index()
232 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) in allocate_rcd_index()
233 if (!dd->rcd[ctxt]) in allocate_rcd_index()
236 if (ctxt < dd->num_rcv_contexts) { in allocate_rcd_index()
238 dd->rcd[ctxt] = rcd; in allocate_rcd_index()
241 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in allocate_rcd_index()
243 if (ctxt >= dd->num_rcv_contexts) in allocate_rcd_index()
263 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, in hfi1_rcd_get_by_index_safe() argument
266 if (ctxt < dd->num_rcv_contexts) in hfi1_rcd_get_by_index_safe()
267 return hfi1_rcd_get_by_index(dd, ctxt); in hfi1_rcd_get_by_index_safe()
284 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) in hfi1_rcd_get_by_index() argument
289 spin_lock_irqsave(&dd->uctxt_lock, flags); in hfi1_rcd_get_by_index()
290 if (dd->rcd[ctxt]) { in hfi1_rcd_get_by_index()
291 rcd = dd->rcd[ctxt]; in hfi1_rcd_get_by_index()
295 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in hfi1_rcd_get_by_index()
307 struct hfi1_devdata *dd = ppd->dd; in hfi1_create_ctxtdata() local
312 if (dd->rcv_entries.nctxt_extra > in hfi1_create_ctxtdata()
313 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) in hfi1_create_ctxtdata()
314 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - in hfi1_create_ctxtdata()
315 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); in hfi1_create_ctxtdata()
322 ret = allocate_rcd_index(dd, rcd, &ctxt); in hfi1_create_ctxtdata()
332 rcd->dd = dd; in hfi1_create_ctxtdata()
334 rcd->rcv_array_groups = dd->rcv_entries.ngroups; in hfi1_create_ctxtdata()
354 if (ctxt < dd->first_dyn_alloc_ctxt) { in hfi1_create_ctxtdata()
356 base = ctxt * (dd->rcv_entries.ngroups + 1); in hfi1_create_ctxtdata()
360 (ctxt * dd->rcv_entries.ngroups); in hfi1_create_ctxtdata()
363 u16 ct = ctxt - dd->first_dyn_alloc_ctxt; in hfi1_create_ctxtdata()
365 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + in hfi1_create_ctxtdata()
367 if (ct < dd->rcv_entries.nctxt_extra) { in hfi1_create_ctxtdata()
368 base += ct * (dd->rcv_entries.ngroups + 1); in hfi1_create_ctxtdata()
371 base += dd->rcv_entries.nctxt_extra + in hfi1_create_ctxtdata()
372 (ct * dd->rcv_entries.ngroups); in hfi1_create_ctxtdata()
375 rcd->eager_base = base * dd->rcv_entries.group_size; in hfi1_create_ctxtdata()
393 dd->rcv_entries.group_size; in hfi1_create_ctxtdata()
396 dd->rcv_entries.group_size); in hfi1_create_ctxtdata()
398 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", in hfi1_create_ctxtdata()
441 if (ctxt < dd->first_dyn_alloc_ctxt) { in hfi1_create_ctxtdata()
485 struct hfi1_devdata *dd = ppd->dd; in set_link_ipg() local
532 write_csr(dd, SEND_STATIC_RATE_CONTROL, src); in set_link_ipg()
590 struct hfi1_devdata *dd, u8 hw_pidx, u32 port) in hfi1_init_pportdata() argument
596 ppd->dd = dd; in hfi1_init_pportdata()
657 dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port); in hfi1_init_pportdata()
664 static int loadtime_init(struct hfi1_devdata *dd) in loadtime_init() argument
677 static int init_after_reset(struct hfi1_devdata *dd) in init_after_reset() argument
686 for (i = 0; i < dd->num_rcv_contexts; i++) { in init_after_reset()
687 rcd = hfi1_rcd_get_by_index(dd, i); in init_after_reset()
688 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | in init_after_reset()
693 pio_send_control(dd, PSC_GLOBAL_DISABLE); in init_after_reset()
694 for (i = 0; i < dd->num_send_contexts; i++) in init_after_reset()
695 sc_disable(dd->send_contexts[i].sc); in init_after_reset()
700 static void enable_chip(struct hfi1_devdata *dd) in enable_chip() argument
707 pio_send_control(dd, PSC_GLOBAL_ENABLE); in enable_chip()
713 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { in enable_chip()
714 rcd = hfi1_rcd_get_by_index(dd, i); in enable_chip()
728 hfi1_rcvctrl(dd, rcvmask, rcd); in enable_chip()
738 static int create_workqueues(struct hfi1_devdata *dd) in create_workqueues() argument
743 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in create_workqueues()
744 ppd = dd->pport + pidx; in create_workqueues()
752 dd->unit, pidx); in create_workqueues()
766 dd->unit, pidx); in create_workqueues()
774 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in create_workqueues()
775 ppd = dd->pport + pidx; in create_workqueues()
792 static void destroy_workqueues(struct hfi1_devdata *dd) in destroy_workqueues() argument
797 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in destroy_workqueues()
798 ppd = dd->pport + pidx; in destroy_workqueues()
817 static void enable_general_intr(struct hfi1_devdata *dd) in enable_general_intr() argument
819 set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true); in enable_general_intr()
820 set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true); in enable_general_intr()
821 set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true); in enable_general_intr()
822 set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true); in enable_general_intr()
823 set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true); in enable_general_intr()
824 set_intr_bits(dd, IS_DC_START, IS_DC_END, true); in enable_general_intr()
825 set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true); in enable_general_intr()
843 int hfi1_init(struct hfi1_devdata *dd, int reinit) in hfi1_init() argument
852 dd->process_pio_send = hfi1_verbs_send_pio; in hfi1_init()
853 dd->process_dma_send = hfi1_verbs_send_dma; in hfi1_init()
854 dd->pio_inline_send = pio_copy; in hfi1_init()
855 dd->process_vnic_dma_send = hfi1_vnic_send_dma; in hfi1_init()
857 if (is_ax(dd)) { in hfi1_init()
858 atomic_set(&dd->drop_packet, DROP_PACKET_ON); in hfi1_init()
859 dd->do_drop = true; in hfi1_init()
861 atomic_set(&dd->drop_packet, DROP_PACKET_OFF); in hfi1_init()
862 dd->do_drop = false; in hfi1_init()
866 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
867 ppd = dd->pport + pidx; in hfi1_init()
872 ret = init_after_reset(dd); in hfi1_init()
874 ret = loadtime_init(dd); in hfi1_init()
879 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { in hfi1_init()
886 rcd = hfi1_rcd_get_by_index(dd, i); in hfi1_init()
890 lastfail = hfi1_create_rcvhdrq(dd, rcd); in hfi1_init()
896 dd_dev_err(dd, in hfi1_init()
905 len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS * in hfi1_init()
906 sizeof(*dd->events)); in hfi1_init()
907 dd->events = vmalloc_user(len); in hfi1_init()
908 if (!dd->events) in hfi1_init()
909 dd_dev_err(dd, "Failed to allocate user events page\n"); in hfi1_init()
914 dd->status = vmalloc_user(PAGE_SIZE); in hfi1_init()
915 if (!dd->status) in hfi1_init()
916 dd_dev_err(dd, "Failed to allocate dev status page\n"); in hfi1_init()
917 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
918 ppd = dd->pport + pidx; in hfi1_init()
919 if (dd->status) in hfi1_init()
921 ppd->statusp = &dd->status->port; in hfi1_init()
927 enable_chip(dd); in hfi1_init()
934 if (dd->status) in hfi1_init()
935 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | in hfi1_init()
939 enable_general_intr(dd); in hfi1_init()
940 init_qsfp_int(dd); in hfi1_init()
943 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
944 ppd = dd->pport + pidx; in hfi1_init()
952 dd_dev_info(dd, in hfi1_init()
981 static void stop_timers(struct hfi1_devdata *dd) in stop_timers() argument
986 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in stop_timers()
987 ppd = dd->pport + pidx; in stop_timers()
1004 static void shutdown_device(struct hfi1_devdata *dd) in shutdown_device() argument
1011 if (dd->flags & HFI1_SHUTDOWN) in shutdown_device()
1013 dd->flags |= HFI1_SHUTDOWN; in shutdown_device()
1015 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1016 ppd = dd->pport + pidx; in shutdown_device()
1023 dd->flags &= ~HFI1_INITTED; in shutdown_device()
1026 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); in shutdown_device()
1027 msix_clean_up_interrupts(dd); in shutdown_device()
1029 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1030 ppd = dd->pport + pidx; in shutdown_device()
1031 for (i = 0; i < dd->num_rcv_contexts; i++) { in shutdown_device()
1032 rcd = hfi1_rcd_get_by_index(dd, i); in shutdown_device()
1033 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | in shutdown_device()
1044 for (i = 0; i < dd->num_send_contexts; i++) in shutdown_device()
1045 sc_flush(dd->send_contexts[i].sc); in shutdown_device()
1054 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1055 ppd = dd->pport + pidx; in shutdown_device()
1058 for (i = 0; i < dd->num_send_contexts; i++) in shutdown_device()
1059 sc_disable(dd->send_contexts[i].sc); in shutdown_device()
1061 pio_send_control(dd, PSC_GLOBAL_DISABLE); in shutdown_device()
1075 sdma_exit(dd); in shutdown_device()
1086 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) in hfi1_free_ctxtdata() argument
1094 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), in hfi1_free_ctxtdata()
1098 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, in hfi1_free_ctxtdata()
1111 dma_free_coherent(&dd->pcidev->dev, in hfi1_free_ctxtdata()
1139 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) in release_asic_data() argument
1144 if (!dd->asic_data) in release_asic_data()
1146 dd->asic_data->dds[dd->hfi1_id] = NULL; in release_asic_data()
1147 other = dd->hfi1_id ? 0 : 1; in release_asic_data()
1148 ad = dd->asic_data; in release_asic_data()
1149 dd->asic_data = NULL; in release_asic_data()
1154 static void finalize_asic_data(struct hfi1_devdata *dd, in finalize_asic_data() argument
1157 clean_up_i2c(dd, ad); in finalize_asic_data()
1168 void hfi1_free_devdata(struct hfi1_devdata *dd) in hfi1_free_devdata() argument
1174 __xa_erase(&hfi1_dev_table, dd->unit); in hfi1_free_devdata()
1175 ad = release_asic_data(dd); in hfi1_free_devdata()
1178 finalize_asic_data(dd, ad); in hfi1_free_devdata()
1179 free_platform_config(dd); in hfi1_free_devdata()
1181 free_percpu(dd->int_counter); in hfi1_free_devdata()
1182 free_percpu(dd->rcv_limit); in hfi1_free_devdata()
1183 free_percpu(dd->send_schedule); in hfi1_free_devdata()
1184 free_percpu(dd->tx_opstats); in hfi1_free_devdata()
1185 dd->int_counter = NULL; in hfi1_free_devdata()
1186 dd->rcv_limit = NULL; in hfi1_free_devdata()
1187 dd->send_schedule = NULL; in hfi1_free_devdata()
1188 dd->tx_opstats = NULL; in hfi1_free_devdata()
1189 kfree(dd->comp_vect); in hfi1_free_devdata()
1190 dd->comp_vect = NULL; in hfi1_free_devdata()
1191 if (dd->rcvhdrtail_dummy_kvaddr) in hfi1_free_devdata()
1192 dma_free_coherent(&dd->pcidev->dev, sizeof(u64), in hfi1_free_devdata()
1193 (void *)dd->rcvhdrtail_dummy_kvaddr, in hfi1_free_devdata()
1194 dd->rcvhdrtail_dummy_dma); in hfi1_free_devdata()
1195 dd->rcvhdrtail_dummy_kvaddr = NULL; in hfi1_free_devdata()
1196 sdma_clean(dd, dd->num_sdma); in hfi1_free_devdata()
1197 rvt_dealloc_device(&dd->verbs_dev.rdi); in hfi1_free_devdata()
1212 struct hfi1_devdata *dd; in hfi1_alloc_devdata() local
1218 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, in hfi1_alloc_devdata()
1220 if (!dd) in hfi1_alloc_devdata()
1222 dd->num_pports = nports; in hfi1_alloc_devdata()
1223 dd->pport = (struct hfi1_pportdata *)(dd + 1); in hfi1_alloc_devdata()
1224 dd->pcidev = pdev; in hfi1_alloc_devdata()
1225 pci_set_drvdata(pdev, dd); in hfi1_alloc_devdata()
1227 ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b, in hfi1_alloc_devdata()
1234 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); in hfi1_alloc_devdata()
1239 dd->node = pcibus_to_node(pdev->bus); in hfi1_alloc_devdata()
1240 if (dd->node == NUMA_NO_NODE) { in hfi1_alloc_devdata()
1241 dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n"); in hfi1_alloc_devdata()
1242 dd->node = 0; in hfi1_alloc_devdata()
1249 spin_lock_init(&dd->sc_lock); in hfi1_alloc_devdata()
1250 spin_lock_init(&dd->sendctrl_lock); in hfi1_alloc_devdata()
1251 spin_lock_init(&dd->rcvctrl_lock); in hfi1_alloc_devdata()
1252 spin_lock_init(&dd->uctxt_lock); in hfi1_alloc_devdata()
1253 spin_lock_init(&dd->hfi1_diag_trans_lock); in hfi1_alloc_devdata()
1254 spin_lock_init(&dd->sc_init_lock); in hfi1_alloc_devdata()
1255 spin_lock_init(&dd->dc8051_memlock); in hfi1_alloc_devdata()
1256 seqlock_init(&dd->sc2vl_lock); in hfi1_alloc_devdata()
1257 spin_lock_init(&dd->sde_map_lock); in hfi1_alloc_devdata()
1258 spin_lock_init(&dd->pio_map_lock); in hfi1_alloc_devdata()
1259 mutex_init(&dd->dc8051_lock); in hfi1_alloc_devdata()
1260 init_waitqueue_head(&dd->event_queue); in hfi1_alloc_devdata()
1261 spin_lock_init(&dd->irq_src_lock); in hfi1_alloc_devdata()
1263 dd->int_counter = alloc_percpu(u64); in hfi1_alloc_devdata()
1264 if (!dd->int_counter) { in hfi1_alloc_devdata()
1269 dd->rcv_limit = alloc_percpu(u64); in hfi1_alloc_devdata()
1270 if (!dd->rcv_limit) { in hfi1_alloc_devdata()
1275 dd->send_schedule = alloc_percpu(u64); in hfi1_alloc_devdata()
1276 if (!dd->send_schedule) { in hfi1_alloc_devdata()
1281 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); in hfi1_alloc_devdata()
1282 if (!dd->tx_opstats) { in hfi1_alloc_devdata()
1287 dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL); in hfi1_alloc_devdata()
1288 if (!dd->comp_vect) { in hfi1_alloc_devdata()
1294 dd->rcvhdrtail_dummy_kvaddr = in hfi1_alloc_devdata()
1295 dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64), in hfi1_alloc_devdata()
1296 &dd->rcvhdrtail_dummy_dma, GFP_KERNEL); in hfi1_alloc_devdata()
1297 if (!dd->rcvhdrtail_dummy_kvaddr) { in hfi1_alloc_devdata()
1302 atomic_set(&dd->ipoib_rsm_usr_num, 0); in hfi1_alloc_devdata()
1303 return dd; in hfi1_alloc_devdata()
1306 hfi1_free_devdata(dd); in hfi1_alloc_devdata()
1315 void hfi1_disable_after_error(struct hfi1_devdata *dd) in hfi1_disable_after_error() argument
1317 if (dd->flags & HFI1_INITTED) { in hfi1_disable_after_error()
1320 dd->flags &= ~HFI1_INITTED; in hfi1_disable_after_error()
1321 if (dd->pport) in hfi1_disable_after_error()
1322 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_disable_after_error()
1325 ppd = dd->pport + pidx; in hfi1_disable_after_error()
1326 if (dd->flags & HFI1_PRESENT) in hfi1_disable_after_error()
1339 if (dd->status) in hfi1_disable_after_error()
1340 dd->status->dev |= HFI1_STATUS_HWERROR; in hfi1_disable_after_error()
1481 static void cleanup_device_data(struct hfi1_devdata *dd) in cleanup_device_data() argument
1487 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in cleanup_device_data()
1488 struct hfi1_pportdata *ppd = &dd->pport[pidx]; in cleanup_device_data()
1507 free_credit_return(dd); in cleanup_device_data()
1513 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { in cleanup_device_data()
1514 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; in cleanup_device_data()
1522 kfree(dd->rcd); in cleanup_device_data()
1523 dd->rcd = NULL; in cleanup_device_data()
1525 free_pio_map(dd); in cleanup_device_data()
1527 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) in cleanup_device_data()
1528 sc_free(dd->send_contexts[ctxt].sc); in cleanup_device_data()
1529 dd->num_send_contexts = 0; in cleanup_device_data()
1530 kfree(dd->send_contexts); in cleanup_device_data()
1531 dd->send_contexts = NULL; in cleanup_device_data()
1532 kfree(dd->hw_to_sw); in cleanup_device_data()
1533 dd->hw_to_sw = NULL; in cleanup_device_data()
1534 kfree(dd->boardname); in cleanup_device_data()
1535 vfree(dd->events); in cleanup_device_data()
1536 vfree(dd->status); in cleanup_device_data()
1543 static void postinit_cleanup(struct hfi1_devdata *dd) in postinit_cleanup() argument
1545 hfi1_start_cleanup(dd); in postinit_cleanup()
1546 hfi1_comp_vectors_clean_up(dd); in postinit_cleanup()
1547 hfi1_dev_affinity_clean_up(dd); in postinit_cleanup()
1549 hfi1_pcie_ddcleanup(dd); in postinit_cleanup()
1550 hfi1_pcie_cleanup(dd->pcidev); in postinit_cleanup()
1552 cleanup_device_data(dd); in postinit_cleanup()
1554 hfi1_free_devdata(dd); in postinit_cleanup()
1560 struct hfi1_devdata *dd; in init_one() local
1576 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS * in init_one()
1578 if (IS_ERR(dd)) { in init_one()
1579 ret = PTR_ERR(dd); in init_one()
1584 ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt); in init_one()
1590 dd_dev_err(dd, "Invalid HdrQ Entry size %u\n", in init_one()
1613 dd_dev_info(dd, "Eager buffer size %u\n", in init_one()
1616 dd_dev_err(dd, "Invalid Eager buffer size of 0\n"); in init_one()
1624 ret = hfi1_pcie_init(dd); in init_one()
1632 ret = hfi1_init_dd(dd); in init_one()
1636 ret = create_workqueues(dd); in init_one()
1641 initfail = hfi1_init(dd, 0); in init_one()
1643 ret = hfi1_register_ib_device(dd); in init_one()
1652 dd->flags |= HFI1_INITTED; in init_one()
1654 hfi1_dbg_ibdev_init(&dd->verbs_dev); in init_one()
1657 j = hfi1_device_create(dd); in init_one()
1659 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); in init_one()
1662 msix_clean_up_interrupts(dd); in init_one()
1663 stop_timers(dd); in init_one()
1665 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in init_one()
1666 hfi1_quiet_serdes(dd->pport + pidx); in init_one()
1667 ppd = dd->pport + pidx; in init_one()
1678 hfi1_device_remove(dd); in init_one()
1680 hfi1_unregister_ib_device(dd); in init_one()
1681 postinit_cleanup(dd); in init_one()
1687 sdma_start(dd); in init_one()
1697 static void wait_for_clients(struct hfi1_devdata *dd) in wait_for_clients() argument
1703 if (refcount_dec_and_test(&dd->user_refcount)) in wait_for_clients()
1704 complete(&dd->user_comp); in wait_for_clients()
1706 wait_for_completion(&dd->user_comp); in wait_for_clients()
1711 struct hfi1_devdata *dd = pci_get_drvdata(pdev); in remove_one() local
1714 hfi1_dbg_ibdev_exit(&dd->verbs_dev); in remove_one()
1717 hfi1_device_remove(dd); in remove_one()
1720 wait_for_clients(dd); in remove_one()
1723 hfi1_unregister_ib_device(dd); in remove_one()
1726 hfi1_free_rx(dd); in remove_one()
1732 shutdown_device(dd); in remove_one()
1733 destroy_workqueues(dd); in remove_one()
1735 stop_timers(dd); in remove_one()
1740 postinit_cleanup(dd); in remove_one()
1745 struct hfi1_devdata *dd = pci_get_drvdata(pdev); in shutdown_one() local
1747 shutdown_device(dd); in shutdown_one()
1759 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) in hfi1_create_rcvhdrq() argument
1766 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, in hfi1_create_rcvhdrq()
1771 dd_dev_err(dd, in hfi1_create_rcvhdrq()
1779 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, in hfi1_create_rcvhdrq()
1788 set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize, in hfi1_create_rcvhdrq()
1794 dd_dev_err(dd, in hfi1_create_rcvhdrq()
1797 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, in hfi1_create_rcvhdrq()
1816 struct hfi1_devdata *dd = rcd->dd; in hfi1_setup_eagerbufs() local
1829 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) in hfi1_setup_eagerbufs()
1830 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; in hfi1_setup_eagerbufs()
1849 dma_alloc_coherent(&dd->pcidev->dev, in hfi1_setup_eagerbufs()
1875 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", in hfi1_setup_eagerbufs()
1939 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; in hfi1_setup_eagerbufs()
1940 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); in hfi1_setup_eagerbufs()
1959 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, in hfi1_setup_eagerbufs()
1970 dma_free_coherent(&dd->pcidev->dev, in hfi1_setup_eagerbufs()