Lines Matching refs:mdev

136 static int mthca_tune_pci(struct mthca_dev *mdev)  in mthca_tune_pci()  argument
142 if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) { in mthca_tune_pci()
143 if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) { in mthca_tune_pci()
144 mthca_err(mdev, "Couldn't set PCI-X max read count, " in mthca_tune_pci()
148 } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) in mthca_tune_pci()
149 mthca_info(mdev, "No PCI-X capability, not setting RBC.\n"); in mthca_tune_pci()
151 if (pci_is_pcie(mdev->pdev)) { in mthca_tune_pci()
152 if (pcie_set_readrq(mdev->pdev, 4096)) { in mthca_tune_pci()
153 mthca_err(mdev, "Couldn't write PCI Express read request, " in mthca_tune_pci()
157 } else if (mdev->mthca_flags & MTHCA_FLAG_PCIE) in mthca_tune_pci()
158 mthca_info(mdev, "No PCI Express capability, " in mthca_tune_pci()
164 static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) in mthca_dev_lim() argument
168 mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8; in mthca_dev_lim()
169 err = mthca_QUERY_DEV_LIM(mdev, dev_lim); in mthca_dev_lim()
171 mthca_err(mdev, "QUERY_DEV_LIM command returned %d" in mthca_dev_lim()
176 mthca_err(mdev, "HCA minimum page size of %d bigger than " in mthca_dev_lim()
182 mthca_err(mdev, "HCA has %d ports, but we only support %d, " in mthca_dev_lim()
188 if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) { in mthca_dev_lim()
189 mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than " in mthca_dev_lim()
192 (unsigned long long)pci_resource_len(mdev->pdev, 2)); in mthca_dev_lim()
196 mdev->limits.num_ports = dev_lim->num_ports; in mthca_dev_lim()
197 mdev->limits.vl_cap = dev_lim->max_vl; in mthca_dev_lim()
198 mdev->limits.mtu_cap = dev_lim->max_mtu; in mthca_dev_lim()
199 mdev->limits.gid_table_len = dev_lim->max_gids; in mthca_dev_lim()
200 mdev->limits.pkey_table_len = dev_lim->max_pkeys; in mthca_dev_lim()
201 mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; in mthca_dev_lim()
207 mdev->limits.max_sg = min_t(int, dev_lim->max_sg, in mthca_dev_lim()
210 (mthca_is_memfree(mdev) ? in mthca_dev_lim()
214 mdev->limits.max_wqes = dev_lim->max_qp_sz; in mthca_dev_lim()
215 mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; in mthca_dev_lim()
216 mdev->limits.reserved_qps = dev_lim->reserved_qps; in mthca_dev_lim()
217 mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; in mthca_dev_lim()
218 mdev->limits.reserved_srqs = dev_lim->reserved_srqs; in mthca_dev_lim()
219 mdev->limits.reserved_eecs = dev_lim->reserved_eecs; in mthca_dev_lim()
220 mdev->limits.max_desc_sz = dev_lim->max_desc_sz; in mthca_dev_lim()
221 mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev); in mthca_dev_lim()
227 mdev->limits.max_cqes = dev_lim->max_cq_sz - 1; in mthca_dev_lim()
228 mdev->limits.reserved_cqs = dev_lim->reserved_cqs; in mthca_dev_lim()
229 mdev->limits.reserved_eqs = dev_lim->reserved_eqs; in mthca_dev_lim()
230 mdev->limits.reserved_mtts = dev_lim->reserved_mtts; in mthca_dev_lim()
231 mdev->limits.reserved_mrws = dev_lim->reserved_mrws; in mthca_dev_lim()
232 mdev->limits.reserved_uars = dev_lim->reserved_uars; in mthca_dev_lim()
233 mdev->limits.reserved_pds = dev_lim->reserved_pds; in mthca_dev_lim()
234 mdev->limits.port_width_cap = dev_lim->max_port_width; in mthca_dev_lim()
235 mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1); in mthca_dev_lim()
236 mdev->limits.flags = dev_lim->flags; in mthca_dev_lim()
244 mdev->limits.stat_rate_support = dev_lim->stat_rate_support; in mthca_dev_lim()
245 else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) in mthca_dev_lim()
246 mdev->limits.stat_rate_support = 0xf; in mthca_dev_lim()
248 mdev->limits.stat_rate_support = 0x3; in mthca_dev_lim()
257 mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | in mthca_dev_lim()
263 mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; in mthca_dev_lim()
266 mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; in mthca_dev_lim()
269 mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI; in mthca_dev_lim()
272 mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; in mthca_dev_lim()
275 mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; in mthca_dev_lim()
278 mdev->mthca_flags |= MTHCA_FLAG_SRQ; in mthca_dev_lim()
280 if (mthca_is_memfree(mdev)) in mthca_dev_lim()
282 mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; in mthca_dev_lim()
287 static int mthca_init_tavor(struct mthca_dev *mdev) in mthca_init_tavor() argument
295 err = mthca_SYS_EN(mdev); in mthca_init_tavor()
297 mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err); in mthca_init_tavor()
301 err = mthca_QUERY_FW(mdev); in mthca_init_tavor()
303 mthca_err(mdev, "QUERY_FW command returned %d," in mthca_init_tavor()
307 err = mthca_QUERY_DDR(mdev); in mthca_init_tavor()
309 mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err); in mthca_init_tavor()
313 err = mthca_dev_lim(mdev, &dev_lim); in mthca_init_tavor()
315 mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err); in mthca_init_tavor()
322 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) in mthca_init_tavor()
325 size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); in mthca_init_tavor()
331 err = mthca_INIT_HCA(mdev, &init_hca); in mthca_init_tavor()
333 mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); in mthca_init_tavor()
340 mthca_SYS_DIS(mdev); in mthca_init_tavor()
345 static int mthca_load_fw(struct mthca_dev *mdev) in mthca_load_fw() argument
351 mdev->fw.arbel.fw_icm = in mthca_load_fw()
352 mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages, in mthca_load_fw()
354 if (!mdev->fw.arbel.fw_icm) { in mthca_load_fw()
355 mthca_err(mdev, "Couldn't allocate FW area, aborting.\n"); in mthca_load_fw()
359 err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm); in mthca_load_fw()
361 mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err); in mthca_load_fw()
364 err = mthca_RUN_FW(mdev); in mthca_load_fw()
366 mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err); in mthca_load_fw()
373 mthca_UNMAP_FA(mdev); in mthca_load_fw()
376 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); in mthca_load_fw()
380 static int mthca_init_icm(struct mthca_dev *mdev, in mthca_init_icm() argument
388 err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages); in mthca_init_icm()
390 mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err); in mthca_init_icm()
394 mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n", in mthca_init_icm()
398 mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages, in mthca_init_icm()
400 if (!mdev->fw.arbel.aux_icm) { in mthca_init_icm()
401 mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n"); in mthca_init_icm()
405 err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm); in mthca_init_icm()
407 mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err); in mthca_init_icm()
411 err = mthca_map_eq_icm(mdev, init_hca->eqc_base); in mthca_init_icm()
413 mthca_err(mdev, "Failed to map EQ context memory, aborting.\n"); in mthca_init_icm()
418 mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size, in mthca_init_icm()
419 dma_get_cache_alignment()) / mdev->limits.mtt_seg_size; in mthca_init_icm()
421 mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, in mthca_init_icm()
422 mdev->limits.mtt_seg_size, in mthca_init_icm()
423 mdev->limits.num_mtt_segs, in mthca_init_icm()
424 mdev->limits.reserved_mtts, in mthca_init_icm()
426 if (!mdev->mr_table.mtt_table) { in mthca_init_icm()
427 mthca_err(mdev, "Failed to map MTT context memory, aborting.\n"); in mthca_init_icm()
432 mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base, in mthca_init_icm()
434 mdev->limits.num_mpts, in mthca_init_icm()
435 mdev->limits.reserved_mrws, in mthca_init_icm()
437 if (!mdev->mr_table.mpt_table) { in mthca_init_icm()
438 mthca_err(mdev, "Failed to map MPT context memory, aborting.\n"); in mthca_init_icm()
443 mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, in mthca_init_icm()
445 mdev->limits.num_qps, in mthca_init_icm()
446 mdev->limits.reserved_qps, in mthca_init_icm()
448 if (!mdev->qp_table.qp_table) { in mthca_init_icm()
449 mthca_err(mdev, "Failed to map QP context memory, aborting.\n"); in mthca_init_icm()
454 mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, in mthca_init_icm()
456 mdev->limits.num_qps, in mthca_init_icm()
457 mdev->limits.reserved_qps, in mthca_init_icm()
459 if (!mdev->qp_table.eqp_table) { in mthca_init_icm()
460 mthca_err(mdev, "Failed to map EQP context memory, aborting.\n"); in mthca_init_icm()
465 mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, in mthca_init_icm()
467 mdev->limits.num_qps << in mthca_init_icm()
468 mdev->qp_table.rdb_shift, 0, in mthca_init_icm()
470 if (!mdev->qp_table.rdb_table) { in mthca_init_icm()
471 mthca_err(mdev, "Failed to map RDB context memory, aborting\n"); in mthca_init_icm()
476 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, in mthca_init_icm()
478 mdev->limits.num_cqs, in mthca_init_icm()
479 mdev->limits.reserved_cqs, in mthca_init_icm()
481 if (!mdev->cq_table.table) { in mthca_init_icm()
482 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); in mthca_init_icm()
487 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) { in mthca_init_icm()
488 mdev->srq_table.table = in mthca_init_icm()
489 mthca_alloc_icm_table(mdev, init_hca->srqc_base, in mthca_init_icm()
491 mdev->limits.num_srqs, in mthca_init_icm()
492 mdev->limits.reserved_srqs, in mthca_init_icm()
494 if (!mdev->srq_table.table) { in mthca_init_icm()
495 mthca_err(mdev, "Failed to map SRQ context memory, " in mthca_init_icm()
507 mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base, in mthca_init_icm()
509 mdev->limits.num_mgms + in mthca_init_icm()
510 mdev->limits.num_amgms, in mthca_init_icm()
511 mdev->limits.num_mgms + in mthca_init_icm()
512 mdev->limits.num_amgms, in mthca_init_icm()
514 if (!mdev->mcg_table.table) { in mthca_init_icm()
515 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); in mthca_init_icm()
523 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) in mthca_init_icm()
524 mthca_free_icm_table(mdev, mdev->srq_table.table); in mthca_init_icm()
527 mthca_free_icm_table(mdev, mdev->cq_table.table); in mthca_init_icm()
530 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); in mthca_init_icm()
533 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); in mthca_init_icm()
536 mthca_free_icm_table(mdev, mdev->qp_table.qp_table); in mthca_init_icm()
539 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); in mthca_init_icm()
542 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); in mthca_init_icm()
545 mthca_unmap_eq_icm(mdev); in mthca_init_icm()
548 mthca_UNMAP_ICM_AUX(mdev); in mthca_init_icm()
551 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); in mthca_init_icm()
556 static void mthca_free_icms(struct mthca_dev *mdev) in mthca_free_icms() argument
559 mthca_free_icm_table(mdev, mdev->mcg_table.table); in mthca_free_icms()
560 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) in mthca_free_icms()
561 mthca_free_icm_table(mdev, mdev->srq_table.table); in mthca_free_icms()
562 mthca_free_icm_table(mdev, mdev->cq_table.table); in mthca_free_icms()
563 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); in mthca_free_icms()
564 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); in mthca_free_icms()
565 mthca_free_icm_table(mdev, mdev->qp_table.qp_table); in mthca_free_icms()
566 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); in mthca_free_icms()
567 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); in mthca_free_icms()
568 mthca_unmap_eq_icm(mdev); in mthca_free_icms()
570 mthca_UNMAP_ICM_AUX(mdev); in mthca_free_icms()
571 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); in mthca_free_icms()
574 static int mthca_init_arbel(struct mthca_dev *mdev) in mthca_init_arbel() argument
582 err = mthca_QUERY_FW(mdev); in mthca_init_arbel()
584 mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err); in mthca_init_arbel()
588 err = mthca_ENABLE_LAM(mdev); in mthca_init_arbel()
590 mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n"); in mthca_init_arbel()
591 mdev->mthca_flags |= MTHCA_FLAG_NO_LAM; in mthca_init_arbel()
593 mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err); in mthca_init_arbel()
597 err = mthca_load_fw(mdev); in mthca_init_arbel()
599 mthca_err(mdev, "Loading FW returned %d, aborting.\n", err); in mthca_init_arbel()
603 err = mthca_dev_lim(mdev, &dev_lim); in mthca_init_arbel()
605 mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err); in mthca_init_arbel()
612 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) in mthca_init_arbel()
615 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); in mthca_init_arbel()
621 err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size); in mthca_init_arbel()
625 err = mthca_INIT_HCA(mdev, &init_hca); in mthca_init_arbel()
627 mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); in mthca_init_arbel()
634 mthca_free_icms(mdev); in mthca_init_arbel()
637 mthca_UNMAP_FA(mdev); in mthca_init_arbel()
638 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); in mthca_init_arbel()
641 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) in mthca_init_arbel()
642 mthca_DISABLE_LAM(mdev); in mthca_init_arbel()
647 static void mthca_close_hca(struct mthca_dev *mdev) in mthca_close_hca() argument
649 mthca_CLOSE_HCA(mdev, 0); in mthca_close_hca()
651 if (mthca_is_memfree(mdev)) { in mthca_close_hca()
652 mthca_free_icms(mdev); in mthca_close_hca()
654 mthca_UNMAP_FA(mdev); in mthca_close_hca()
655 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); in mthca_close_hca()
657 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) in mthca_close_hca()
658 mthca_DISABLE_LAM(mdev); in mthca_close_hca()
660 mthca_SYS_DIS(mdev); in mthca_close_hca()
663 static int mthca_init_hca(struct mthca_dev *mdev) in mthca_init_hca() argument
668 if (mthca_is_memfree(mdev)) in mthca_init_hca()
669 err = mthca_init_arbel(mdev); in mthca_init_hca()
671 err = mthca_init_tavor(mdev); in mthca_init_hca()
676 err = mthca_QUERY_ADAPTER(mdev, &adapter); in mthca_init_hca()
678 mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err); in mthca_init_hca()
682 mdev->eq_table.inta_pin = adapter.inta_pin; in mthca_init_hca()
683 if (!mthca_is_memfree(mdev)) in mthca_init_hca()
684 mdev->rev_id = adapter.revision_id; in mthca_init_hca()
685 memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); in mthca_init_hca()
690 mthca_close_hca(mdev); in mthca_init_hca()
851 static int mthca_enable_msi_x(struct mthca_dev *mdev) in mthca_enable_msi_x() argument
855 err = pci_alloc_irq_vectors(mdev->pdev, 3, 3, PCI_IRQ_MSIX); in mthca_enable_msi_x()
859 mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = in mthca_enable_msi_x()
860 pci_irq_vector(mdev->pdev, 0); in mthca_enable_msi_x()
861 mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = in mthca_enable_msi_x()
862 pci_irq_vector(mdev->pdev, 1); in mthca_enable_msi_x()
863 mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = in mthca_enable_msi_x()
864 pci_irq_vector(mdev->pdev, 2); in mthca_enable_msi_x()
901 struct mthca_dev *mdev; in __mthca_init_one() local
949 mdev = ib_alloc_device(mthca_dev, ib_dev); in __mthca_init_one()
950 if (!mdev) { in __mthca_init_one()
957 mdev->pdev = pdev; in __mthca_init_one()
959 mdev->mthca_flags = mthca_hca_table[hca_type].flags; in __mthca_init_one()
961 mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN; in __mthca_init_one()
968 err = mthca_reset(mdev); in __mthca_init_one()
970 mthca_err(mdev, "Failed to reset HCA, aborting.\n"); in __mthca_init_one()
974 err = mthca_cmd_init(mdev); in __mthca_init_one()
976 mthca_err(mdev, "Failed to init command interface, aborting.\n"); in __mthca_init_one()
980 err = mthca_tune_pci(mdev); in __mthca_init_one()
984 err = mthca_init_hca(mdev); in __mthca_init_one()
988 if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) { in __mthca_init_one()
989 mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n", in __mthca_init_one()
990 (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, in __mthca_init_one()
991 (int) (mdev->fw_ver & 0xffff), in __mthca_init_one()
995 mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n"); in __mthca_init_one()
998 if (msi_x && !mthca_enable_msi_x(mdev)) in __mthca_init_one()
999 mdev->mthca_flags |= MTHCA_FLAG_MSI_X; in __mthca_init_one()
1001 err = mthca_setup_hca(mdev); in __mthca_init_one()
1002 if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) { in __mthca_init_one()
1004 mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X; in __mthca_init_one()
1006 err = mthca_setup_hca(mdev); in __mthca_init_one()
1012 err = mthca_register_device(mdev); in __mthca_init_one()
1016 err = mthca_create_agents(mdev); in __mthca_init_one()
1020 pci_set_drvdata(pdev, mdev); in __mthca_init_one()
1021 mdev->hca_type = hca_type; in __mthca_init_one()
1023 mdev->active = true; in __mthca_init_one()
1028 mthca_unregister_device(mdev); in __mthca_init_one()
1031 mthca_cleanup_mcg_table(mdev); in __mthca_init_one()
1032 mthca_cleanup_av_table(mdev); in __mthca_init_one()
1033 mthca_cleanup_qp_table(mdev); in __mthca_init_one()
1034 mthca_cleanup_srq_table(mdev); in __mthca_init_one()
1035 mthca_cleanup_cq_table(mdev); in __mthca_init_one()
1036 mthca_cmd_use_polling(mdev); in __mthca_init_one()
1037 mthca_cleanup_eq_table(mdev); in __mthca_init_one()
1039 mthca_pd_free(mdev, &mdev->driver_pd); in __mthca_init_one()
1041 mthca_cleanup_mr_table(mdev); in __mthca_init_one()
1042 mthca_cleanup_pd_table(mdev); in __mthca_init_one()
1043 mthca_cleanup_uar_table(mdev); in __mthca_init_one()
1046 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) in __mthca_init_one()
1049 mthca_close_hca(mdev); in __mthca_init_one()
1052 mthca_cmd_cleanup(mdev); in __mthca_init_one()
1055 ib_dealloc_device(&mdev->ib_dev); in __mthca_init_one()
1068 struct mthca_dev *mdev = pci_get_drvdata(pdev); in __mthca_remove_one() local
1071 if (mdev) { in __mthca_remove_one()
1072 mthca_free_agents(mdev); in __mthca_remove_one()
1073 mthca_unregister_device(mdev); in __mthca_remove_one()
1075 for (p = 1; p <= mdev->limits.num_ports; ++p) in __mthca_remove_one()
1076 mthca_CLOSE_IB(mdev, p); in __mthca_remove_one()
1078 mthca_cleanup_mcg_table(mdev); in __mthca_remove_one()
1079 mthca_cleanup_av_table(mdev); in __mthca_remove_one()
1080 mthca_cleanup_qp_table(mdev); in __mthca_remove_one()
1081 mthca_cleanup_srq_table(mdev); in __mthca_remove_one()
1082 mthca_cleanup_cq_table(mdev); in __mthca_remove_one()
1083 mthca_cmd_use_polling(mdev); in __mthca_remove_one()
1084 mthca_cleanup_eq_table(mdev); in __mthca_remove_one()
1086 mthca_pd_free(mdev, &mdev->driver_pd); in __mthca_remove_one()
1088 mthca_cleanup_mr_table(mdev); in __mthca_remove_one()
1089 mthca_cleanup_pd_table(mdev); in __mthca_remove_one()
1091 iounmap(mdev->kar); in __mthca_remove_one()
1092 mthca_uar_free(mdev, &mdev->driver_uar); in __mthca_remove_one()
1093 mthca_cleanup_uar_table(mdev); in __mthca_remove_one()
1094 mthca_close_hca(mdev); in __mthca_remove_one()
1095 mthca_cmd_cleanup(mdev); in __mthca_remove_one()
1097 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) in __mthca_remove_one()
1100 ib_dealloc_device(&mdev->ib_dev); in __mthca_remove_one()
1109 struct mthca_dev *mdev; in __mthca_restart_one() local
1112 mdev = pci_get_drvdata(pdev); in __mthca_restart_one()
1113 if (!mdev) in __mthca_restart_one()
1115 hca_type = mdev->hca_type; in __mthca_restart_one()