1f931551bSRalph Campbell /* 2e2eed58bSVinit Agnihotri * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. 3551ace12SMike Marciniszyn * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 4f931551bSRalph Campbell * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5f931551bSRalph Campbell * 6f931551bSRalph Campbell * This software is available to you under a choice of one of two 7f931551bSRalph Campbell * licenses. You may choose to be licensed under the terms of the GNU 8f931551bSRalph Campbell * General Public License (GPL) Version 2, available from the file 9f931551bSRalph Campbell * COPYING in the main directory of this source tree, or the 10f931551bSRalph Campbell * OpenIB.org BSD license below: 11f931551bSRalph Campbell * 12f931551bSRalph Campbell * Redistribution and use in source and binary forms, with or 13f931551bSRalph Campbell * without modification, are permitted provided that the following 14f931551bSRalph Campbell * conditions are met: 15f931551bSRalph Campbell * 16f931551bSRalph Campbell * - Redistributions of source code must retain the above 17f931551bSRalph Campbell * copyright notice, this list of conditions and the following 18f931551bSRalph Campbell * disclaimer. 19f931551bSRalph Campbell * 20f931551bSRalph Campbell * - Redistributions in binary form must reproduce the above 21f931551bSRalph Campbell * copyright notice, this list of conditions and the following 22f931551bSRalph Campbell * disclaimer in the documentation and/or other materials 23f931551bSRalph Campbell * provided with the distribution. 24f931551bSRalph Campbell * 25f931551bSRalph Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26f931551bSRalph Campbell * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27f931551bSRalph Campbell * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28f931551bSRalph Campbell * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29f931551bSRalph Campbell * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30f931551bSRalph Campbell * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31f931551bSRalph Campbell * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32f931551bSRalph Campbell * SOFTWARE. 33f931551bSRalph Campbell */ 34f931551bSRalph Campbell 35f931551bSRalph Campbell #include <linux/pci.h> 36f931551bSRalph Campbell #include <linux/netdevice.h> 37f931551bSRalph Campbell #include <linux/vmalloc.h> 38f931551bSRalph Campbell #include <linux/delay.h> 39f931551bSRalph Campbell #include <linux/idr.h> 40e4dd23d7SPaul Gortmaker #include <linux/module.h> 417fac3301SMike Marciniszyn #include <linux/printk.h> 428469ba39SMike Marciniszyn #ifdef CONFIG_INFINIBAND_QIB_DCA 438469ba39SMike Marciniszyn #include <linux/dca.h> 448469ba39SMike Marciniszyn #endif 45f931551bSRalph Campbell 46f931551bSRalph Campbell #include "qib.h" 47f931551bSRalph Campbell #include "qib_common.h" 4836a8f01cSMike Marciniszyn #include "qib_mad.h" 49ddb88765SMike Marciniszyn #ifdef CONFIG_DEBUG_FS 50ddb88765SMike Marciniszyn #include "qib_debugfs.h" 51ddb88765SMike Marciniszyn #include "qib_verbs.h" 52ddb88765SMike Marciniszyn #endif 53f931551bSRalph Campbell 547fac3301SMike Marciniszyn #undef pr_fmt 557fac3301SMike Marciniszyn #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt 567fac3301SMike Marciniszyn 57f931551bSRalph Campbell /* 58f931551bSRalph Campbell * min buffers we want to have per context, after driver 59f931551bSRalph Campbell */ 60f931551bSRalph Campbell #define QIB_MIN_USER_CTXT_BUFCNT 7 61f931551bSRalph Campbell 62f931551bSRalph Campbell #define QLOGIC_IB_R_SOFTWARE_MASK 0xFF 63f931551bSRalph Campbell #define QLOGIC_IB_R_SOFTWARE_SHIFT 24 64f931551bSRalph Campbell #define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62) 65f931551bSRalph Campbell 66f931551bSRalph Campbell /* 67f931551bSRalph Campbell * Number of ctxts we are configured to use (to allow for more pio 68f931551bSRalph Campbell * buffers per ctxt, etc.) Zero means use chip value. 69f931551bSRalph Campbell */ 70f931551bSRalph Campbell ushort qib_cfgctxts; 71f931551bSRalph Campbell module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO); 72f931551bSRalph Campbell MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use"); 73f931551bSRalph Campbell 74e0f30bacSRamkrishna Vepa unsigned qib_numa_aware; 75e0f30bacSRamkrishna Vepa module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO); 76e0f30bacSRamkrishna Vepa MODULE_PARM_DESC(numa_aware, 77e0f30bacSRamkrishna Vepa "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process"); 78e0f30bacSRamkrishna Vepa 79f931551bSRalph Campbell /* 80f931551bSRalph Campbell * If set, do not write to any regs if avoidable, hack to allow 81f931551bSRalph Campbell * check for deranged default register values. 82f931551bSRalph Campbell */ 83f931551bSRalph Campbell ushort qib_mini_init; 84f931551bSRalph Campbell module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO); 85f931551bSRalph Campbell MODULE_PARM_DESC(mini_init, "If set, do minimal diag init"); 86f931551bSRalph Campbell 87f931551bSRalph Campbell unsigned qib_n_krcv_queues; 88f931551bSRalph Campbell module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO); 89f931551bSRalph Campbell MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); 90f931551bSRalph Campbell 9136a8f01cSMike Marciniszyn unsigned qib_cc_table_size; 9236a8f01cSMike Marciniszyn module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); 9336a8f01cSMike Marciniszyn MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); 94f931551bSRalph Campbell /* 95f931551bSRalph Campbell * qib_wc_pat parameter: 96f931551bSRalph Campbell * 0 is WC via MTRR 97f931551bSRalph Campbell * 1 is WC via PAT 98f931551bSRalph Campbell * If PAT initialization fails, code reverts back to MTRR 99f931551bSRalph Campbell */ 100f931551bSRalph Campbell unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ 101f931551bSRalph Campbell module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); 102f931551bSRalph Campbell MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); 103f931551bSRalph Campbell 104f931551bSRalph Campbell static void verify_interrupt(unsigned long); 105f931551bSRalph Campbell 106f931551bSRalph Campbell static struct idr qib_unit_table; 107f931551bSRalph Campbell u32 qib_cpulist_count; 108f931551bSRalph Campbell unsigned long *qib_cpulist; 109f931551bSRalph Campbell 110f931551bSRalph Campbell /* set number of contexts we'll actually use */ 111f931551bSRalph Campbell void qib_set_ctxtcnt(struct qib_devdata *dd) 112f931551bSRalph Campbell { 1135dbbcb97SMike Marciniszyn if (!qib_cfgctxts) { 1140502f94cSRalph Campbell dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); 1155dbbcb97SMike Marciniszyn if (dd->cfgctxts > dd->ctxtcnt) 1165dbbcb97SMike Marciniszyn dd->cfgctxts = dd->ctxtcnt; 1175dbbcb97SMike Marciniszyn } else if (qib_cfgctxts < dd->num_pports) 118f931551bSRalph Campbell dd->cfgctxts = dd->ctxtcnt; 119f931551bSRalph Campbell else if (qib_cfgctxts <= dd->ctxtcnt) 120f931551bSRalph Campbell dd->cfgctxts = qib_cfgctxts; 121f931551bSRalph Campbell else 122f931551bSRalph Campbell dd->cfgctxts = dd->ctxtcnt; 1236ceaadeeSMitko Haralanov dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 : 1246ceaadeeSMitko Haralanov dd->cfgctxts - dd->first_user_ctxt; 125f931551bSRalph Campbell } 126f931551bSRalph Campbell 127f931551bSRalph Campbell /* 128f931551bSRalph Campbell * Common code for creating the receive context array. 129f931551bSRalph Campbell */ 130f931551bSRalph Campbell int qib_create_ctxts(struct qib_devdata *dd) 131f931551bSRalph Campbell { 132f931551bSRalph Campbell unsigned i; 133e0f30bacSRamkrishna Vepa int local_node_id = pcibus_to_node(dd->pcidev->bus); 134e0f30bacSRamkrishna Vepa 135e0f30bacSRamkrishna Vepa if (local_node_id < 0) 136e0f30bacSRamkrishna Vepa local_node_id = numa_node_id(); 137e0f30bacSRamkrishna Vepa dd->assigned_node_id = local_node_id; 138f931551bSRalph Campbell 139f931551bSRalph Campbell /* 140f931551bSRalph Campbell * Allocate full ctxtcnt array, rather than just cfgctxts, because 141f931551bSRalph Campbell * cleanup iterates across all possible ctxts. 142f931551bSRalph Campbell */ 143f931551bSRalph Campbell dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); 144f931551bSRalph Campbell if (!dd->rcd) { 1457fac3301SMike Marciniszyn qib_dev_err(dd, 1467fac3301SMike Marciniszyn "Unable to allocate ctxtdata array, failing\n"); 14706064a10SDennis Dalessandro return -ENOMEM; 148f931551bSRalph Campbell } 149f931551bSRalph Campbell 150f931551bSRalph Campbell /* create (one or more) kctxt */ 151f931551bSRalph Campbell for (i = 0; i < dd->first_user_ctxt; ++i) { 152f931551bSRalph Campbell struct qib_pportdata *ppd; 153f931551bSRalph Campbell struct qib_ctxtdata *rcd; 154f931551bSRalph Campbell 155f931551bSRalph Campbell if (dd->skip_kctxt_mask & (1 << i)) 156f931551bSRalph Campbell continue; 157f931551bSRalph Campbell 158f931551bSRalph Campbell ppd = dd->pport + (i % dd->num_pports); 159e0f30bacSRamkrishna Vepa 160e0f30bacSRamkrishna Vepa rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id); 161f931551bSRalph Campbell if (!rcd) { 1627fac3301SMike Marciniszyn qib_dev_err(dd, 1637fac3301SMike Marciniszyn "Unable to allocate ctxtdata for Kernel ctxt, failing\n"); 16406064a10SDennis Dalessandro kfree(dd->rcd); 16506064a10SDennis Dalessandro dd->rcd = NULL; 16606064a10SDennis Dalessandro return -ENOMEM; 167f931551bSRalph Campbell } 168f931551bSRalph Campbell rcd->pkeys[0] = QIB_DEFAULT_P_KEY; 169f931551bSRalph Campbell rcd->seq_cnt = 1; 170f931551bSRalph Campbell } 17106064a10SDennis Dalessandro return 0; 172f931551bSRalph Campbell } 173f931551bSRalph Campbell 174f931551bSRalph Campbell /* 175f931551bSRalph Campbell * Common code for user and kernel context setup. 176f931551bSRalph Campbell */ 177e0f30bacSRamkrishna Vepa struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, 178e0f30bacSRamkrishna Vepa int node_id) 179f931551bSRalph Campbell { 180f931551bSRalph Campbell struct qib_devdata *dd = ppd->dd; 181f931551bSRalph Campbell struct qib_ctxtdata *rcd; 182f931551bSRalph Campbell 183e0f30bacSRamkrishna Vepa rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id); 184f931551bSRalph Campbell if (rcd) { 185f931551bSRalph Campbell INIT_LIST_HEAD(&rcd->qp_wait_list); 186e0f30bacSRamkrishna Vepa rcd->node_id = node_id; 187f931551bSRalph Campbell rcd->ppd = ppd; 188f931551bSRalph Campbell rcd->dd = dd; 189f931551bSRalph Campbell rcd->cnt = 1; 190f931551bSRalph Campbell rcd->ctxt = ctxt; 191f931551bSRalph Campbell dd->rcd[ctxt] = rcd; 192ddb88765SMike Marciniszyn #ifdef CONFIG_DEBUG_FS 193ddb88765SMike Marciniszyn if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ 194ddb88765SMike Marciniszyn rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), 195ddb88765SMike Marciniszyn GFP_KERNEL, node_id); 196ddb88765SMike Marciniszyn if (!rcd->opstats) { 197ddb88765SMike Marciniszyn kfree(rcd); 198ddb88765SMike Marciniszyn qib_dev_err(dd, 199ddb88765SMike Marciniszyn "Unable to allocate per ctxt stats buffer\n"); 200ddb88765SMike Marciniszyn return NULL; 201ddb88765SMike Marciniszyn } 202ddb88765SMike Marciniszyn } 203ddb88765SMike Marciniszyn #endif 204f931551bSRalph Campbell dd->f_init_ctxt(rcd); 205f931551bSRalph Campbell 206f931551bSRalph Campbell /* 207f931551bSRalph Campbell * To avoid wasting a lot of memory, we allocate 32KB chunks 208f931551bSRalph Campbell * of physically contiguous memory, advance through it until 209f931551bSRalph Campbell * used up and then allocate more. Of course, we need 210f931551bSRalph Campbell * memory to store those extra pointers, now. 32KB seems to 211f931551bSRalph Campbell * be the most that is "safe" under memory pressure 212f931551bSRalph Campbell * (creating large files and then copying them over 213f931551bSRalph Campbell * NFS while doing lots of MPI jobs). The OOM killer can 214f931551bSRalph Campbell * get invoked, even though we say we can sleep and this can 215f931551bSRalph Campbell * cause significant system problems.... 216f931551bSRalph Campbell */ 217f931551bSRalph Campbell rcd->rcvegrbuf_size = 0x8000; 218f931551bSRalph Campbell rcd->rcvegrbufs_perchunk = 219f931551bSRalph Campbell rcd->rcvegrbuf_size / dd->rcvegrbufsize; 220f931551bSRalph Campbell rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + 221f931551bSRalph Campbell rcd->rcvegrbufs_perchunk - 1) / 222f931551bSRalph Campbell rcd->rcvegrbufs_perchunk; 2239e1c0e43SMike Marciniszyn BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk)); 2249e1c0e43SMike Marciniszyn rcd->rcvegrbufs_perchunk_shift = 2259e1c0e43SMike Marciniszyn ilog2(rcd->rcvegrbufs_perchunk); 226f931551bSRalph Campbell } 227f931551bSRalph Campbell return rcd; 228f931551bSRalph Campbell } 229f931551bSRalph Campbell 230f931551bSRalph Campbell /* 231f931551bSRalph Campbell * Common code for initializing the physical port structure. 232f931551bSRalph Campbell */ 2337d7632adSMike Marciniszyn int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, 234f931551bSRalph Campbell u8 hw_pidx, u8 port) 235f931551bSRalph Campbell { 23636a8f01cSMike Marciniszyn int size; 237f931551bSRalph Campbell ppd->dd = dd; 238f931551bSRalph Campbell ppd->hw_pidx = hw_pidx; 239f931551bSRalph Campbell ppd->port = port; /* IB port number, not index */ 240f931551bSRalph Campbell 241f931551bSRalph Campbell spin_lock_init(&ppd->sdma_lock); 242f931551bSRalph Campbell spin_lock_init(&ppd->lflags_lock); 2437d7632adSMike Marciniszyn spin_lock_init(&ppd->cc_shadow_lock); 244f931551bSRalph Campbell init_waitqueue_head(&ppd->state_wait); 245f931551bSRalph Campbell 246f931551bSRalph Campbell init_timer(&ppd->symerr_clear_timer); 247f931551bSRalph Campbell ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup; 248f931551bSRalph Campbell ppd->symerr_clear_timer.data = (unsigned long)ppd; 249551ace12SMike Marciniszyn 250551ace12SMike Marciniszyn ppd->qib_wq = NULL; 2517d7632adSMike Marciniszyn ppd->ibport_data.pmastats = 2527d7632adSMike Marciniszyn alloc_percpu(struct qib_pma_counters); 2537d7632adSMike Marciniszyn if (!ppd->ibport_data.pmastats) 2547d7632adSMike Marciniszyn return -ENOMEM; 25536a8f01cSMike Marciniszyn 25636a8f01cSMike Marciniszyn if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) 25736a8f01cSMike Marciniszyn goto bail; 25836a8f01cSMike Marciniszyn 25936a8f01cSMike Marciniszyn ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size, 26036a8f01cSMike Marciniszyn IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT); 26136a8f01cSMike Marciniszyn 26236a8f01cSMike Marciniszyn ppd->cc_max_table_entries = 26336a8f01cSMike Marciniszyn ppd->cc_supported_table_entries/IB_CCT_ENTRIES; 26436a8f01cSMike Marciniszyn 26536a8f01cSMike Marciniszyn size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry) 26636a8f01cSMike Marciniszyn * IB_CCT_ENTRIES; 26736a8f01cSMike Marciniszyn ppd->ccti_entries = kzalloc(size, GFP_KERNEL); 26836a8f01cSMike Marciniszyn if (!ppd->ccti_entries) { 26936a8f01cSMike Marciniszyn qib_dev_err(dd, 27036a8f01cSMike Marciniszyn "failed to allocate congestion control table for port %d!\n", 27136a8f01cSMike Marciniszyn port); 27236a8f01cSMike Marciniszyn goto bail; 27336a8f01cSMike Marciniszyn } 27436a8f01cSMike Marciniszyn 27536a8f01cSMike Marciniszyn size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry); 27636a8f01cSMike Marciniszyn ppd->congestion_entries = kzalloc(size, GFP_KERNEL); 27736a8f01cSMike Marciniszyn if (!ppd->congestion_entries) { 27836a8f01cSMike Marciniszyn qib_dev_err(dd, 27936a8f01cSMike Marciniszyn "failed to allocate congestion setting list for port %d!\n", 28036a8f01cSMike Marciniszyn port); 28136a8f01cSMike Marciniszyn goto bail_1; 28236a8f01cSMike Marciniszyn } 28336a8f01cSMike Marciniszyn 28436a8f01cSMike Marciniszyn size = sizeof(struct cc_table_shadow); 28536a8f01cSMike Marciniszyn ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL); 28636a8f01cSMike Marciniszyn if (!ppd->ccti_entries_shadow) { 28736a8f01cSMike Marciniszyn qib_dev_err(dd, 28836a8f01cSMike Marciniszyn "failed to allocate shadow ccti list for port %d!\n", 28936a8f01cSMike Marciniszyn port); 29036a8f01cSMike Marciniszyn goto bail_2; 29136a8f01cSMike Marciniszyn } 29236a8f01cSMike Marciniszyn 29336a8f01cSMike Marciniszyn size = sizeof(struct ib_cc_congestion_setting_attr); 29436a8f01cSMike Marciniszyn ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL); 29536a8f01cSMike Marciniszyn if (!ppd->congestion_entries_shadow) { 29636a8f01cSMike Marciniszyn qib_dev_err(dd, 29736a8f01cSMike Marciniszyn "failed to allocate shadow congestion setting list for port %d!\n", 29836a8f01cSMike Marciniszyn port); 29936a8f01cSMike Marciniszyn goto bail_3; 30036a8f01cSMike Marciniszyn } 30136a8f01cSMike Marciniszyn 3027d7632adSMike Marciniszyn return 0; 30336a8f01cSMike Marciniszyn 30436a8f01cSMike Marciniszyn bail_3: 30536a8f01cSMike Marciniszyn kfree(ppd->ccti_entries_shadow); 30636a8f01cSMike Marciniszyn ppd->ccti_entries_shadow = NULL; 30736a8f01cSMike Marciniszyn bail_2: 30836a8f01cSMike Marciniszyn kfree(ppd->congestion_entries); 30936a8f01cSMike Marciniszyn ppd->congestion_entries = NULL; 31036a8f01cSMike Marciniszyn bail_1: 31136a8f01cSMike Marciniszyn kfree(ppd->ccti_entries); 31236a8f01cSMike Marciniszyn ppd->ccti_entries = NULL; 31336a8f01cSMike Marciniszyn bail: 31436a8f01cSMike Marciniszyn /* User is intentionally disabling the congestion control agent */ 31536a8f01cSMike Marciniszyn if (!qib_cc_table_size) 3167d7632adSMike Marciniszyn return 0; 31736a8f01cSMike Marciniszyn 31836a8f01cSMike Marciniszyn if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) { 31936a8f01cSMike Marciniszyn qib_cc_table_size = 0; 32036a8f01cSMike Marciniszyn qib_dev_err(dd, 32136a8f01cSMike Marciniszyn "Congestion Control table size %d less than minimum %d for port %d\n", 32236a8f01cSMike Marciniszyn qib_cc_table_size, IB_CCT_MIN_ENTRIES, port); 32336a8f01cSMike Marciniszyn } 32436a8f01cSMike Marciniszyn 32536a8f01cSMike Marciniszyn qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n", 32636a8f01cSMike Marciniszyn port); 3277d7632adSMike Marciniszyn return 0; 328f931551bSRalph Campbell } 329f931551bSRalph Campbell 330f931551bSRalph Campbell static int init_pioavailregs(struct qib_devdata *dd) 331f931551bSRalph Campbell { 332f931551bSRalph Campbell int ret, pidx; 333f931551bSRalph Campbell u64 *status_page; 334f931551bSRalph Campbell 335f931551bSRalph Campbell dd->pioavailregs_dma = dma_alloc_coherent( 336f931551bSRalph Campbell &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, 337f931551bSRalph Campbell GFP_KERNEL); 338f931551bSRalph Campbell if (!dd->pioavailregs_dma) { 3397fac3301SMike Marciniszyn qib_dev_err(dd, 3407fac3301SMike Marciniszyn "failed to allocate PIOavail reg area in memory\n"); 341f931551bSRalph Campbell ret = -ENOMEM; 342f931551bSRalph Campbell goto done; 343f931551bSRalph Campbell } 344f931551bSRalph Campbell 345f931551bSRalph Campbell /* 346f931551bSRalph Campbell * We really want L2 cache aligned, but for current CPUs of 347f931551bSRalph Campbell * interest, they are the same. 348f931551bSRalph Campbell */ 349f931551bSRalph Campbell status_page = (u64 *) 350f931551bSRalph Campbell ((char *) dd->pioavailregs_dma + 351f931551bSRalph Campbell ((2 * L1_CACHE_BYTES + 352f931551bSRalph Campbell dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); 353f931551bSRalph Campbell /* device status comes first, for backwards compatibility */ 354f931551bSRalph Campbell dd->devstatusp = status_page; 355f931551bSRalph Campbell *status_page++ = 0; 356f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; ++pidx) { 357f931551bSRalph Campbell dd->pport[pidx].statusp = status_page; 358f931551bSRalph Campbell *status_page++ = 0; 359f931551bSRalph Campbell } 360f931551bSRalph Campbell 361f931551bSRalph Campbell /* 362f931551bSRalph Campbell * Setup buffer to hold freeze and other messages, accessible to 363f931551bSRalph Campbell * apps, following statusp. This is per-unit, not per port. 364f931551bSRalph Campbell */ 365f931551bSRalph Campbell dd->freezemsg = (char *) status_page; 366f931551bSRalph Campbell *dd->freezemsg = 0; 367f931551bSRalph Campbell /* length of msg buffer is "whatever is left" */ 368f931551bSRalph Campbell ret = (char *) status_page - (char *) dd->pioavailregs_dma; 369f931551bSRalph Campbell dd->freezelen = PAGE_SIZE - ret; 370f931551bSRalph Campbell 371f931551bSRalph Campbell ret = 0; 372f931551bSRalph Campbell 373f931551bSRalph Campbell done: 374f931551bSRalph Campbell return ret; 375f931551bSRalph Campbell } 376f931551bSRalph Campbell 377f931551bSRalph Campbell /** 378f931551bSRalph Campbell * init_shadow_tids - allocate the shadow TID array 379f931551bSRalph Campbell * @dd: the qlogic_ib device 380f931551bSRalph Campbell * 381f931551bSRalph Campbell * allocate the shadow TID array, so we can qib_munlock previous 382f931551bSRalph Campbell * entries. It may make more sense to move the pageshadow to the 383f931551bSRalph Campbell * ctxt data structure, so we only allocate memory for ctxts actually 384f931551bSRalph Campbell * in use, since we at 8k per ctxt, now. 385f931551bSRalph Campbell * We don't want failures here to prevent use of the driver/chip, 386f931551bSRalph Campbell * so no return value. 387f931551bSRalph Campbell */ 388f931551bSRalph Campbell static void init_shadow_tids(struct qib_devdata *dd) 389f931551bSRalph Campbell { 390f931551bSRalph Campbell struct page **pages; 391f931551bSRalph Campbell dma_addr_t *addrs; 392f931551bSRalph Campbell 393948579cdSJoe Perches pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); 394f931551bSRalph Campbell if (!pages) { 3957fac3301SMike Marciniszyn qib_dev_err(dd, 3967fac3301SMike Marciniszyn "failed to allocate shadow page * array, no expected sends!\n"); 397f931551bSRalph Campbell goto bail; 398f931551bSRalph Campbell } 399f931551bSRalph Campbell 400948579cdSJoe Perches addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); 401f931551bSRalph Campbell if (!addrs) { 4027fac3301SMike Marciniszyn qib_dev_err(dd, 4037fac3301SMike Marciniszyn "failed to allocate shadow dma handle array, no expected sends!\n"); 404f931551bSRalph Campbell goto bail_free; 405f931551bSRalph Campbell } 406f931551bSRalph Campbell 407f931551bSRalph Campbell dd->pageshadow = pages; 408f931551bSRalph Campbell dd->physshadow = addrs; 409f931551bSRalph Campbell return; 410f931551bSRalph Campbell 411f931551bSRalph Campbell bail_free: 412f931551bSRalph Campbell vfree(pages); 413f931551bSRalph Campbell bail: 414f931551bSRalph Campbell dd->pageshadow = NULL; 415f931551bSRalph Campbell } 416f931551bSRalph Campbell 417f931551bSRalph Campbell /* 418f931551bSRalph Campbell * Do initialization for device that is only needed on 419f931551bSRalph Campbell * first detect, not on resets. 420f931551bSRalph Campbell */ 421f931551bSRalph Campbell static int loadtime_init(struct qib_devdata *dd) 422f931551bSRalph Campbell { 423f931551bSRalph Campbell int ret = 0; 424f931551bSRalph Campbell 425f931551bSRalph Campbell if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & 426f931551bSRalph Campbell QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) { 4277fac3301SMike Marciniszyn qib_dev_err(dd, 4287fac3301SMike Marciniszyn "Driver only handles version %d, chip swversion is %d (%llx), failng\n", 429f931551bSRalph Campbell QIB_CHIP_SWVERSION, 430f931551bSRalph Campbell (int)(dd->revision >> 431f931551bSRalph Campbell QLOGIC_IB_R_SOFTWARE_SHIFT) & 432f931551bSRalph Campbell QLOGIC_IB_R_SOFTWARE_MASK, 433f931551bSRalph Campbell (unsigned long long) dd->revision); 434f931551bSRalph Campbell ret = -ENOSYS; 435f931551bSRalph Campbell goto done; 436f931551bSRalph Campbell } 437f931551bSRalph Campbell 438f931551bSRalph Campbell if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK) 439f931551bSRalph Campbell qib_devinfo(dd->pcidev, "%s", dd->boardversion); 440f931551bSRalph Campbell 441f931551bSRalph Campbell spin_lock_init(&dd->pioavail_lock); 442f931551bSRalph Campbell spin_lock_init(&dd->sendctrl_lock); 443f931551bSRalph Campbell spin_lock_init(&dd->uctxt_lock); 444f931551bSRalph Campbell spin_lock_init(&dd->qib_diag_trans_lock); 445f931551bSRalph Campbell spin_lock_init(&dd->eep_st_lock); 446f931551bSRalph Campbell mutex_init(&dd->eep_lock); 447f931551bSRalph Campbell 448f931551bSRalph Campbell if (qib_mini_init) 449f931551bSRalph Campbell goto done; 450f931551bSRalph Campbell 451f931551bSRalph Campbell ret = init_pioavailregs(dd); 452f931551bSRalph Campbell init_shadow_tids(dd); 453f931551bSRalph Campbell 454f931551bSRalph Campbell qib_get_eeprom_info(dd); 455f931551bSRalph Campbell 456f931551bSRalph Campbell /* setup time (don't start yet) to verify we got interrupt */ 457f931551bSRalph Campbell init_timer(&dd->intrchk_timer); 458f931551bSRalph Campbell dd->intrchk_timer.function = verify_interrupt; 459f931551bSRalph Campbell dd->intrchk_timer.data = (unsigned long) dd; 460f931551bSRalph Campbell 46185caafe3SMike Marciniszyn ret = qib_cq_init(dd); 462f931551bSRalph Campbell done: 463f931551bSRalph Campbell return ret; 464f931551bSRalph Campbell } 465f931551bSRalph Campbell 466f931551bSRalph Campbell /** 467f931551bSRalph Campbell * init_after_reset - re-initialize after a reset 468f931551bSRalph Campbell * @dd: the qlogic_ib device 469f931551bSRalph Campbell * 470f931551bSRalph Campbell * sanity check at least some of the values after reset, and 47125985edcSLucas De Marchi * ensure no receive or transmit (explicitly, in case reset 472f931551bSRalph Campbell * failed 473f931551bSRalph Campbell */ 474f931551bSRalph Campbell static int init_after_reset(struct qib_devdata *dd) 475f931551bSRalph Campbell { 476f931551bSRalph Campbell int i; 477f931551bSRalph Campbell 478f931551bSRalph Campbell /* 479f931551bSRalph Campbell * Ensure chip does no sends or receives, tail updates, or 480f931551bSRalph Campbell * pioavail updates while we re-initialize. This is mostly 481f931551bSRalph Campbell * for the driver data structures, not chip registers. 482f931551bSRalph Campbell */ 483f931551bSRalph Campbell for (i = 0; i < dd->num_pports; ++i) { 484f931551bSRalph Campbell /* 485f931551bSRalph Campbell * ctxt == -1 means "all contexts". Only really safe for 486f931551bSRalph Campbell * _dis_abling things, as here. 487f931551bSRalph Campbell */ 488f931551bSRalph Campbell dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS | 489f931551bSRalph Campbell QIB_RCVCTRL_INTRAVAIL_DIS | 490f931551bSRalph Campbell QIB_RCVCTRL_TAILUPD_DIS, -1); 491f931551bSRalph Campbell /* Redundant across ports for some, but no big deal. */ 492f931551bSRalph Campbell dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS | 493f931551bSRalph Campbell QIB_SENDCTRL_AVAIL_DIS); 494f931551bSRalph Campbell } 495f931551bSRalph Campbell 496f931551bSRalph Campbell return 0; 497f931551bSRalph Campbell } 498f931551bSRalph Campbell 499f931551bSRalph Campbell static void enable_chip(struct qib_devdata *dd) 500f931551bSRalph Campbell { 501f931551bSRalph Campbell u64 rcvmask; 502f931551bSRalph Campbell int i; 503f931551bSRalph Campbell 504f931551bSRalph Campbell /* 505f931551bSRalph Campbell * Enable PIO send, and update of PIOavail regs to memory. 506f931551bSRalph Campbell */ 507f931551bSRalph Campbell for (i = 0; i < dd->num_pports; ++i) 508f931551bSRalph Campbell dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB | 509f931551bSRalph Campbell QIB_SENDCTRL_AVAIL_ENB); 510f931551bSRalph Campbell /* 511f931551bSRalph Campbell * Enable kernel ctxts' receive and receive interrupt. 512f931551bSRalph Campbell * Other ctxts done as user opens and inits them. 513f931551bSRalph Campbell */ 514f931551bSRalph Campbell rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB; 515f931551bSRalph Campbell rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ? 516f931551bSRalph Campbell QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB; 517f931551bSRalph Campbell for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { 518f931551bSRalph Campbell struct qib_ctxtdata *rcd = dd->rcd[i]; 519f931551bSRalph Campbell 520f931551bSRalph Campbell if (rcd) 521f931551bSRalph Campbell dd->f_rcvctrl(rcd->ppd, rcvmask, i); 522f931551bSRalph Campbell } 523f931551bSRalph Campbell } 524f931551bSRalph Campbell 525f931551bSRalph Campbell static void verify_interrupt(unsigned long opaque) 526f931551bSRalph Campbell { 527f931551bSRalph Campbell struct qib_devdata *dd = (struct qib_devdata *) opaque; 5281ed88dd7SMike Marciniszyn u64 int_counter; 529f931551bSRalph Campbell 530f931551bSRalph Campbell if (!dd) 531f931551bSRalph Campbell return; /* being torn down */ 532f931551bSRalph Campbell 533f931551bSRalph Campbell /* 534f931551bSRalph Campbell * If we don't have a lid or any interrupts, let the user know and 535f931551bSRalph Campbell * don't bother checking again. 536f931551bSRalph Campbell */ 5371ed88dd7SMike Marciniszyn int_counter = qib_int_counter(dd) - dd->z_int_counter; 5381ed88dd7SMike Marciniszyn if (int_counter == 0) { 539f931551bSRalph Campbell if (!dd->f_intr_fallback(dd)) 5407fac3301SMike Marciniszyn dev_err(&dd->pcidev->dev, 5417fac3301SMike Marciniszyn "No interrupts detected, not usable.\n"); 542f931551bSRalph Campbell else /* re-arm the timer to see if fallback works */ 543f931551bSRalph Campbell mod_timer(&dd->intrchk_timer, jiffies + HZ/2); 544f931551bSRalph Campbell } 545f931551bSRalph Campbell } 546f931551bSRalph Campbell 547f931551bSRalph Campbell static void init_piobuf_state(struct qib_devdata *dd) 548f931551bSRalph Campbell { 549f931551bSRalph Campbell int i, pidx; 550f931551bSRalph Campbell u32 uctxts; 551f931551bSRalph Campbell 552f931551bSRalph Campbell /* 553f931551bSRalph Campbell * Ensure all buffers are free, and fifos empty. Buffers 554f931551bSRalph Campbell * are common, so only do once for port 0. 555f931551bSRalph Campbell * 556f931551bSRalph Campbell * After enable and qib_chg_pioavailkernel so we can safely 557f931551bSRalph Campbell * enable pioavail updates and PIOENABLE. After this, packets 558f931551bSRalph Campbell * are ready and able to go out. 559f931551bSRalph Campbell */ 560f931551bSRalph Campbell dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL); 561f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; ++pidx) 562f931551bSRalph Campbell dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH); 563f931551bSRalph Campbell 564f931551bSRalph Campbell /* 565f931551bSRalph Campbell * If not all sendbufs are used, add the one to each of the lower 566f931551bSRalph Campbell * numbered contexts. pbufsctxt and lastctxt_piobuf are 567f931551bSRalph Campbell * calculated in chip-specific code because it may cause some 568f931551bSRalph Campbell * chip-specific adjustments to be made. 569f931551bSRalph Campbell */ 570f931551bSRalph Campbell uctxts = dd->cfgctxts - dd->first_user_ctxt; 571f931551bSRalph Campbell dd->ctxts_extrabuf = dd->pbufsctxt ? 572f931551bSRalph Campbell dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0; 573f931551bSRalph Campbell 574f931551bSRalph Campbell /* 575f931551bSRalph Campbell * Set up the shadow copies of the piobufavail registers, 576f931551bSRalph Campbell * which we compare against the chip registers for now, and 577f931551bSRalph Campbell * the in memory DMA'ed copies of the registers. 578f931551bSRalph Campbell * By now pioavail updates to memory should have occurred, so 579f931551bSRalph Campbell * copy them into our working/shadow registers; this is in 580f931551bSRalph Campbell * case something went wrong with abort, but mostly to get the 581f931551bSRalph Campbell * initial values of the generation bit correct. 582f931551bSRalph Campbell */ 583f931551bSRalph Campbell for (i = 0; i < dd->pioavregs; i++) { 584f931551bSRalph Campbell __le64 tmp; 585f931551bSRalph Campbell 586f931551bSRalph Campbell tmp = dd->pioavailregs_dma[i]; 587f931551bSRalph Campbell /* 588f931551bSRalph Campbell * Don't need to worry about pioavailkernel here 589f931551bSRalph Campbell * because we will call qib_chg_pioavailkernel() later 590f931551bSRalph Campbell * in initialization, to busy out buffers as needed. 591f931551bSRalph Campbell */ 592f931551bSRalph Campbell dd->pioavailshadow[i] = le64_to_cpu(tmp); 593f931551bSRalph Campbell } 594f931551bSRalph Campbell while (i < ARRAY_SIZE(dd->pioavailshadow)) 595f931551bSRalph Campbell dd->pioavailshadow[i++] = 0; /* for debugging sanity */ 596f931551bSRalph Campbell 597f931551bSRalph Campbell /* after pioavailshadow is setup */ 598f931551bSRalph Campbell qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k, 599f931551bSRalph Campbell TXCHK_CHG_TYPE_KERN, NULL); 600f931551bSRalph Campbell dd->f_initvl15_bufs(dd); 601f931551bSRalph Campbell } 602f931551bSRalph Campbell 603f931551bSRalph Campbell /** 604551ace12SMike Marciniszyn * qib_create_workqueues - create per port workqueues 605551ace12SMike Marciniszyn * @dd: the qlogic_ib device 606551ace12SMike Marciniszyn */ 607551ace12SMike Marciniszyn static int qib_create_workqueues(struct qib_devdata *dd) 608551ace12SMike Marciniszyn { 609551ace12SMike Marciniszyn int pidx; 610551ace12SMike Marciniszyn struct qib_pportdata *ppd; 611551ace12SMike Marciniszyn 612551ace12SMike Marciniszyn for (pidx = 0; pidx < dd->num_pports; ++pidx) { 613551ace12SMike Marciniszyn ppd = dd->pport + pidx; 614551ace12SMike Marciniszyn if (!ppd->qib_wq) { 615551ace12SMike Marciniszyn char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ 616551ace12SMike Marciniszyn snprintf(wq_name, sizeof(wq_name), "qib%d_%d", 617551ace12SMike Marciniszyn dd->unit, pidx); 618551ace12SMike Marciniszyn ppd->qib_wq = 619551ace12SMike Marciniszyn create_singlethread_workqueue(wq_name); 620551ace12SMike Marciniszyn if (!ppd->qib_wq) 621551ace12SMike Marciniszyn goto wq_error; 622551ace12SMike Marciniszyn } 623551ace12SMike Marciniszyn } 624551ace12SMike Marciniszyn return 0; 625551ace12SMike Marciniszyn wq_error: 6267fac3301SMike Marciniszyn pr_err("create_singlethread_workqueue failed for port %d\n", 627551ace12SMike Marciniszyn pidx + 1); 628551ace12SMike Marciniszyn for (pidx = 0; pidx < dd->num_pports; ++pidx) { 629551ace12SMike Marciniszyn ppd = dd->pport + pidx; 630551ace12SMike Marciniszyn if (ppd->qib_wq) { 631551ace12SMike Marciniszyn destroy_workqueue(ppd->qib_wq); 632551ace12SMike Marciniszyn ppd->qib_wq = NULL; 633551ace12SMike Marciniszyn } 634551ace12SMike Marciniszyn } 635551ace12SMike Marciniszyn return -ENOMEM; 636551ace12SMike Marciniszyn } 637551ace12SMike Marciniszyn 6387d7632adSMike Marciniszyn static void qib_free_pportdata(struct qib_pportdata *ppd) 6397d7632adSMike Marciniszyn { 6407d7632adSMike Marciniszyn free_percpu(ppd->ibport_data.pmastats); 6417d7632adSMike Marciniszyn ppd->ibport_data.pmastats = NULL; 6427d7632adSMike Marciniszyn } 6437d7632adSMike Marciniszyn 644551ace12SMike Marciniszyn /** 645f931551bSRalph Campbell * qib_init - do the actual initialization sequence on the chip 646f931551bSRalph Campbell * @dd: the qlogic_ib device 647f931551bSRalph Campbell * @reinit: reinitializing, so don't allocate new memory 648f931551bSRalph Campbell * 649f931551bSRalph Campbell * Do the actual initialization sequence on the chip. This is done 650f931551bSRalph Campbell * both from the init routine called from the PCI infrastructure, and 651f931551bSRalph Campbell * when we reset the chip, or detect that it was reset internally, 652f931551bSRalph Campbell * or it's administratively re-enabled. 653f931551bSRalph Campbell * 654f931551bSRalph Campbell * Memory allocation here and in called routines is only done in 655f931551bSRalph Campbell * the first case (reinit == 0). We have to be careful, because even 656f931551bSRalph Campbell * without memory allocation, we need to re-write all the chip registers 657f931551bSRalph Campbell * TIDs, etc. after the reset or enable has completed. 658f931551bSRalph Campbell */ 659f931551bSRalph Campbell int qib_init(struct qib_devdata *dd, int reinit) 660f931551bSRalph Campbell { 661f931551bSRalph Campbell int ret = 0, pidx, lastfail = 0; 662f931551bSRalph Campbell u32 portok = 0; 663f931551bSRalph Campbell unsigned i; 664f931551bSRalph Campbell struct qib_ctxtdata *rcd; 665f931551bSRalph Campbell struct qib_pportdata *ppd; 666f931551bSRalph Campbell unsigned long flags; 667f931551bSRalph Campbell 668f931551bSRalph Campbell /* Set linkstate to unknown, so we can watch for a transition. */ 669f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; ++pidx) { 670f931551bSRalph Campbell ppd = dd->pport + pidx; 671f931551bSRalph Campbell spin_lock_irqsave(&ppd->lflags_lock, flags); 672f931551bSRalph Campbell ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED | 673f931551bSRalph Campbell QIBL_LINKDOWN | QIBL_LINKINIT | 674f931551bSRalph Campbell QIBL_LINKV); 675f931551bSRalph Campbell spin_unlock_irqrestore(&ppd->lflags_lock, flags); 676f931551bSRalph Campbell } 677f931551bSRalph Campbell 678f931551bSRalph Campbell if (reinit) 679f931551bSRalph Campbell ret = init_after_reset(dd); 680f931551bSRalph Campbell else 681f931551bSRalph Campbell ret = loadtime_init(dd); 682f931551bSRalph Campbell if (ret) 683f931551bSRalph Campbell goto done; 684f931551bSRalph Campbell 685f931551bSRalph Campbell /* Bypass most chip-init, to get to device creation */ 686f931551bSRalph Campbell if (qib_mini_init) 687f931551bSRalph Campbell return 0; 688f931551bSRalph Campbell 689f931551bSRalph Campbell ret = dd->f_late_initreg(dd); 690f931551bSRalph Campbell if (ret) 691f931551bSRalph Campbell goto done; 692f931551bSRalph Campbell 693f931551bSRalph Campbell /* dd->rcd can be NULL if early init failed */ 694f931551bSRalph Campbell for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { 695f931551bSRalph Campbell /* 696f931551bSRalph Campbell * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 697f931551bSRalph Campbell * re-init, the simplest way to handle this is to free 698f931551bSRalph Campbell * existing, and re-allocate. 699f931551bSRalph Campbell * Need to re-create rest of ctxt 0 ctxtdata as well. 700f931551bSRalph Campbell */ 701f931551bSRalph Campbell rcd = dd->rcd[i]; 702f931551bSRalph Campbell if (!rcd) 703f931551bSRalph Campbell continue; 704f931551bSRalph Campbell 705f931551bSRalph Campbell lastfail = qib_create_rcvhdrq(dd, rcd); 706f931551bSRalph Campbell if (!lastfail) 707f931551bSRalph Campbell lastfail = qib_setup_eagerbufs(rcd); 708f931551bSRalph Campbell if (lastfail) { 7097fac3301SMike Marciniszyn qib_dev_err(dd, 7107fac3301SMike Marciniszyn "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 711f931551bSRalph Campbell continue; 712f931551bSRalph Campbell } 713f931551bSRalph Campbell } 714f931551bSRalph Campbell 715f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; ++pidx) { 716f931551bSRalph Campbell int mtu; 717f931551bSRalph Campbell if (lastfail) 718f931551bSRalph Campbell ret = lastfail; 719f931551bSRalph Campbell ppd = dd->pport + pidx; 720f931551bSRalph Campbell mtu = ib_mtu_enum_to_int(qib_ibmtu); 721f931551bSRalph Campbell if (mtu == -1) { 722f931551bSRalph Campbell mtu = QIB_DEFAULT_MTU; 723f931551bSRalph Campbell qib_ibmtu = 0; /* don't leave invalid value */ 724f931551bSRalph Campbell } 725f931551bSRalph Campbell /* set max we can ever have for this driver load */ 726f931551bSRalph Campbell ppd->init_ibmaxlen = min(mtu > 2048 ? 727f931551bSRalph Campbell dd->piosize4k : dd->piosize2k, 728f931551bSRalph Campbell dd->rcvegrbufsize + 729f931551bSRalph Campbell (dd->rcvhdrentsize << 2)); 730f931551bSRalph Campbell /* 731f931551bSRalph Campbell * Have to initialize ibmaxlen, but this will normally 732f931551bSRalph Campbell * change immediately in qib_set_mtu(). 733f931551bSRalph Campbell */ 734f931551bSRalph Campbell ppd->ibmaxlen = ppd->init_ibmaxlen; 735f931551bSRalph Campbell qib_set_mtu(ppd, mtu); 736f931551bSRalph Campbell 737f931551bSRalph Campbell spin_lock_irqsave(&ppd->lflags_lock, flags); 738f931551bSRalph Campbell ppd->lflags |= QIBL_IB_LINK_DISABLED; 739f931551bSRalph Campbell spin_unlock_irqrestore(&ppd->lflags_lock, flags); 740f931551bSRalph Campbell 741f931551bSRalph Campbell lastfail = dd->f_bringup_serdes(ppd); 742f931551bSRalph Campbell if (lastfail) { 743f931551bSRalph Campbell qib_devinfo(dd->pcidev, 744f931551bSRalph Campbell "Failed to bringup IB port %u\n", ppd->port); 745f931551bSRalph Campbell lastfail = -ENETDOWN; 746f931551bSRalph Campbell continue; 747f931551bSRalph Campbell } 748f931551bSRalph Campbell 749f931551bSRalph Campbell portok++; 750f931551bSRalph Campbell } 751f931551bSRalph Campbell 752f931551bSRalph Campbell if (!portok) { 753f931551bSRalph Campbell /* none of the ports initialized */ 754f931551bSRalph Campbell if (!ret && lastfail) 755f931551bSRalph Campbell ret = lastfail; 756f931551bSRalph Campbell else if (!ret) 757f931551bSRalph Campbell ret = -ENETDOWN; 758f931551bSRalph Campbell /* but continue on, so we can debug cause */ 759f931551bSRalph Campbell } 760f931551bSRalph Campbell 761f931551bSRalph Campbell enable_chip(dd); 762f931551bSRalph Campbell 763f931551bSRalph Campbell init_piobuf_state(dd); 764f931551bSRalph Campbell 765f931551bSRalph Campbell done: 766f931551bSRalph Campbell if (!ret) { 767f931551bSRalph Campbell /* chip is OK for user apps; mark it as initialized */ 768f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; ++pidx) { 769f931551bSRalph Campbell ppd = dd->pport + pidx; 770f931551bSRalph Campbell /* 771f931551bSRalph Campbell * Set status even if port serdes is not initialized 772f931551bSRalph Campbell * so that diags will work. 773f931551bSRalph Campbell */ 774f931551bSRalph Campbell *ppd->statusp |= QIB_STATUS_CHIP_PRESENT | 775f931551bSRalph Campbell QIB_STATUS_INITTED; 776f931551bSRalph Campbell if (!ppd->link_speed_enabled) 777f931551bSRalph Campbell continue; 778f931551bSRalph Campbell if (dd->flags & QIB_HAS_SEND_DMA) 779f931551bSRalph Campbell ret = qib_setup_sdma(ppd); 780f931551bSRalph Campbell init_timer(&ppd->hol_timer); 781f931551bSRalph Campbell ppd->hol_timer.function = qib_hol_event; 782f931551bSRalph Campbell ppd->hol_timer.data = (unsigned long)ppd; 783f931551bSRalph Campbell ppd->hol_state = QIB_HOL_UP; 784f931551bSRalph Campbell } 785f931551bSRalph Campbell 786f931551bSRalph Campbell /* now we can enable all interrupts from the chip */ 787f931551bSRalph Campbell dd->f_set_intr_state(dd, 1); 788f931551bSRalph Campbell 789f931551bSRalph Campbell /* 790f931551bSRalph Campbell * Setup to verify we get an interrupt, and fallback 791f931551bSRalph Campbell * to an alternate if necessary and possible. 792f931551bSRalph Campbell */ 793f931551bSRalph Campbell mod_timer(&dd->intrchk_timer, jiffies + HZ/2); 794f931551bSRalph Campbell /* start stats retrieval timer */ 795f931551bSRalph Campbell mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); 796f931551bSRalph Campbell } 797f931551bSRalph Campbell 798f931551bSRalph Campbell /* if ret is non-zero, we probably should do some cleanup here... */ 799f931551bSRalph Campbell return ret; 800f931551bSRalph Campbell } 801f931551bSRalph Campbell 802f931551bSRalph Campbell /* 803f931551bSRalph Campbell * These next two routines are placeholders in case we don't have per-arch 804f931551bSRalph Campbell * code for controlling write combining. If explicit control of write 805f931551bSRalph Campbell * combining is not available, performance will probably be awful. 806f931551bSRalph Campbell */ 807f931551bSRalph Campbell 808f931551bSRalph Campbell int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd) 809f931551bSRalph Campbell { 810f931551bSRalph Campbell return -EOPNOTSUPP; 811f931551bSRalph Campbell } 812f931551bSRalph Campbell 813f931551bSRalph Campbell void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd) 814f931551bSRalph Campbell { 815f931551bSRalph Campbell } 816f931551bSRalph Campbell 817f931551bSRalph Campbell static inline struct qib_devdata *__qib_lookup(int unit) 818f931551bSRalph Campbell { 819f931551bSRalph Campbell return idr_find(&qib_unit_table, unit); 820f931551bSRalph Campbell } 821f931551bSRalph Campbell 822f931551bSRalph Campbell struct qib_devdata *qib_lookup(int unit) 823f931551bSRalph Campbell { 824f931551bSRalph Campbell struct qib_devdata *dd; 825f931551bSRalph Campbell unsigned long flags; 826f931551bSRalph Campbell 827f931551bSRalph Campbell spin_lock_irqsave(&qib_devs_lock, flags); 828f931551bSRalph Campbell dd = __qib_lookup(unit); 829f931551bSRalph Campbell spin_unlock_irqrestore(&qib_devs_lock, flags); 830f931551bSRalph Campbell 831f931551bSRalph Campbell return dd; 832f931551bSRalph Campbell } 833f931551bSRalph Campbell 834f931551bSRalph Campbell /* 835f931551bSRalph Campbell * Stop the timers during unit shutdown, or after an error late 836f931551bSRalph Campbell * in initialization. 837f931551bSRalph Campbell */ 838f931551bSRalph Campbell static void qib_stop_timers(struct qib_devdata *dd) 839f931551bSRalph Campbell { 840f931551bSRalph Campbell struct qib_pportdata *ppd; 841f931551bSRalph Campbell int pidx; 842f931551bSRalph Campbell 843f931551bSRalph Campbell if (dd->stats_timer.data) { 844f931551bSRalph Campbell del_timer_sync(&dd->stats_timer); 845f931551bSRalph Campbell dd->stats_timer.data = 0; 846f931551bSRalph Campbell } 847f931551bSRalph Campbell if (dd->intrchk_timer.data) { 848f931551bSRalph Campbell del_timer_sync(&dd->intrchk_timer); 849f931551bSRalph Campbell dd->intrchk_timer.data = 0; 850f931551bSRalph Campbell } 851f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; ++pidx) { 852f931551bSRalph Campbell ppd = dd->pport + pidx; 853f931551bSRalph Campbell if (ppd->hol_timer.data) 854f931551bSRalph Campbell del_timer_sync(&ppd->hol_timer); 855f931551bSRalph Campbell if (ppd->led_override_timer.data) { 856f931551bSRalph Campbell del_timer_sync(&ppd->led_override_timer); 857f931551bSRalph Campbell atomic_set(&ppd->led_override_timer_active, 0); 858f931551bSRalph Campbell } 859f931551bSRalph Campbell if (ppd->symerr_clear_timer.data) 860f931551bSRalph Campbell del_timer_sync(&ppd->symerr_clear_timer); 861f931551bSRalph Campbell } 862f931551bSRalph Campbell } 863f931551bSRalph Campbell 864f931551bSRalph Campbell /** 865f931551bSRalph Campbell * qib_shutdown_device - shut down a device 866f931551bSRalph Campbell * @dd: the qlogic_ib device 867f931551bSRalph Campbell * 868f931551bSRalph Campbell * This is called to make the device quiet when we are about to 869f931551bSRalph Campbell * unload the driver, and also when the device is administratively 870f931551bSRalph Campbell * disabled. It does not free any data structures. 871f931551bSRalph Campbell * Everything it does has to be setup again by qib_init(dd, 1) 872f931551bSRalph Campbell */ 873f931551bSRalph Campbell static void qib_shutdown_device(struct qib_devdata *dd) 874f931551bSRalph Campbell { 875f931551bSRalph Campbell struct qib_pportdata *ppd; 876f931551bSRalph Campbell unsigned pidx; 877f931551bSRalph Campbell 878f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; ++pidx) { 879f931551bSRalph Campbell ppd = dd->pport + pidx; 880f931551bSRalph Campbell 881f931551bSRalph Campbell spin_lock_irq(&ppd->lflags_lock); 882f931551bSRalph Campbell ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT | 883f931551bSRalph Campbell QIBL_LINKARMED | QIBL_LINKACTIVE | 884f931551bSRalph Campbell QIBL_LINKV); 885f931551bSRalph Campbell spin_unlock_irq(&ppd->lflags_lock); 886f931551bSRalph Campbell *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY); 887f931551bSRalph Campbell } 888f931551bSRalph Campbell dd->flags &= ~QIB_INITTED; 889f931551bSRalph Campbell 890f931551bSRalph Campbell /* mask interrupts, but not errors */ 891f931551bSRalph Campbell dd->f_set_intr_state(dd, 0); 892f931551bSRalph Campbell 893f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; ++pidx) { 894f931551bSRalph Campbell ppd = dd->pport + pidx; 895f931551bSRalph Campbell dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS | 896f931551bSRalph Campbell QIB_RCVCTRL_CTXT_DIS | 897f931551bSRalph Campbell QIB_RCVCTRL_INTRAVAIL_DIS | 898f931551bSRalph Campbell QIB_RCVCTRL_PKEY_ENB, -1); 899f931551bSRalph Campbell /* 900f931551bSRalph Campbell * Gracefully stop all sends allowing any in progress to 901f931551bSRalph Campbell * trickle out first. 902f931551bSRalph Campbell */ 903f931551bSRalph Campbell dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR); 904f931551bSRalph Campbell } 905f931551bSRalph Campbell 906f931551bSRalph Campbell /* 907f931551bSRalph Campbell * Enough for anything that's going to trickle out to have actually 908f931551bSRalph Campbell * done so. 909f931551bSRalph Campbell */ 910f931551bSRalph Campbell udelay(20); 911f931551bSRalph Campbell 912f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; ++pidx) { 913f931551bSRalph Campbell ppd = dd->pport + pidx; 914f931551bSRalph Campbell dd->f_setextled(ppd, 0); /* make sure LEDs are off */ 915f931551bSRalph Campbell 916f931551bSRalph Campbell if (dd->flags & QIB_HAS_SEND_DMA) 917f931551bSRalph Campbell qib_teardown_sdma(ppd); 918f931551bSRalph Campbell 919f931551bSRalph Campbell dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS | 920f931551bSRalph Campbell QIB_SENDCTRL_SEND_DIS); 921f931551bSRalph Campbell /* 922f931551bSRalph Campbell * Clear SerdesEnable. 923f931551bSRalph Campbell * We can't count on interrupts since we are stopping. 924f931551bSRalph Campbell */ 925f931551bSRalph Campbell dd->f_quiet_serdes(ppd); 926551ace12SMike Marciniszyn 927551ace12SMike Marciniszyn if (ppd->qib_wq) { 928551ace12SMike Marciniszyn destroy_workqueue(ppd->qib_wq); 929551ace12SMike Marciniszyn ppd->qib_wq = NULL; 930551ace12SMike Marciniszyn } 9317d7632adSMike Marciniszyn qib_free_pportdata(ppd); 932f931551bSRalph Campbell } 933f931551bSRalph Campbell 934f931551bSRalph Campbell qib_update_eeprom_log(dd); 935f931551bSRalph Campbell } 936f931551bSRalph Campbell 937f931551bSRalph Campbell /** 938f931551bSRalph Campbell * qib_free_ctxtdata - free a context's allocated data 939f931551bSRalph Campbell * @dd: the qlogic_ib device 940f931551bSRalph Campbell * @rcd: the ctxtdata structure 941f931551bSRalph Campbell * 942f931551bSRalph Campbell * free up any allocated data for a context 943f931551bSRalph Campbell * This should not touch anything that would affect a simultaneous 944f931551bSRalph Campbell * re-allocation of context data, because it is called after qib_mutex 945f931551bSRalph Campbell * is released (and can be called from reinit as well). 946f931551bSRalph Campbell * It should never change any chip state, or global driver state. 947f931551bSRalph Campbell */ 948f931551bSRalph Campbell void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd) 949f931551bSRalph Campbell { 950f931551bSRalph Campbell if (!rcd) 951f931551bSRalph Campbell return; 952f931551bSRalph Campbell 953f931551bSRalph Campbell if (rcd->rcvhdrq) { 954f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, 955f931551bSRalph Campbell rcd->rcvhdrq, rcd->rcvhdrq_phys); 956f931551bSRalph Campbell rcd->rcvhdrq = NULL; 957f931551bSRalph Campbell if (rcd->rcvhdrtail_kvaddr) { 958f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 959f931551bSRalph Campbell rcd->rcvhdrtail_kvaddr, 960f931551bSRalph Campbell rcd->rcvhdrqtailaddr_phys); 961f931551bSRalph Campbell rcd->rcvhdrtail_kvaddr = NULL; 962f931551bSRalph Campbell } 963f931551bSRalph Campbell } 964f931551bSRalph Campbell if (rcd->rcvegrbuf) { 965f931551bSRalph Campbell unsigned e; 966f931551bSRalph Campbell 967f931551bSRalph Campbell for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { 968f931551bSRalph Campbell void *base = rcd->rcvegrbuf[e]; 969f931551bSRalph Campbell size_t size = rcd->rcvegrbuf_size; 970f931551bSRalph Campbell 971f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, size, 972f931551bSRalph Campbell base, rcd->rcvegrbuf_phys[e]); 973f931551bSRalph Campbell } 974f931551bSRalph Campbell kfree(rcd->rcvegrbuf); 975f931551bSRalph Campbell rcd->rcvegrbuf = NULL; 976f931551bSRalph Campbell kfree(rcd->rcvegrbuf_phys); 977f931551bSRalph Campbell rcd->rcvegrbuf_phys = NULL; 978f931551bSRalph Campbell rcd->rcvegrbuf_chunks = 0; 979f931551bSRalph Campbell } 980f931551bSRalph Campbell 981f931551bSRalph Campbell kfree(rcd->tid_pg_list); 982f931551bSRalph Campbell vfree(rcd->user_event_mask); 983f931551bSRalph Campbell vfree(rcd->subctxt_uregbase); 984f931551bSRalph Campbell vfree(rcd->subctxt_rcvegrbuf); 985f931551bSRalph Campbell vfree(rcd->subctxt_rcvhdr_base); 986ddb88765SMike Marciniszyn #ifdef CONFIG_DEBUG_FS 987ddb88765SMike Marciniszyn kfree(rcd->opstats); 988ddb88765SMike Marciniszyn rcd->opstats = NULL; 989ddb88765SMike Marciniszyn #endif 990f931551bSRalph Campbell kfree(rcd); 991f931551bSRalph Campbell } 992f931551bSRalph Campbell 993f931551bSRalph Campbell /* 994f931551bSRalph Campbell * Perform a PIO buffer bandwidth write test, to verify proper system 995f931551bSRalph Campbell * configuration. Even when all the setup calls work, occasionally 996f931551bSRalph Campbell * BIOS or other issues can prevent write combining from working, or 997f931551bSRalph Campbell * can cause other bandwidth problems to the chip. 998f931551bSRalph Campbell * 999f931551bSRalph Campbell * This test simply writes the same buffer over and over again, and 1000f931551bSRalph Campbell * measures close to the peak bandwidth to the chip (not testing 1001f931551bSRalph Campbell * data bandwidth to the wire). On chips that use an address-based 1002f931551bSRalph Campbell * trigger to send packets to the wire, this is easy. On chips that 1003f931551bSRalph Campbell * use a count to trigger, we want to make sure that the packet doesn't 1004f931551bSRalph Campbell * go out on the wire, or trigger flow control checks. 1005f931551bSRalph Campbell */ 1006f931551bSRalph Campbell static void qib_verify_pioperf(struct qib_devdata *dd) 1007f931551bSRalph Campbell { 1008f931551bSRalph Campbell u32 pbnum, cnt, lcnt; 1009f931551bSRalph Campbell u32 __iomem *piobuf; 1010f931551bSRalph Campbell u32 *addr; 1011f931551bSRalph Campbell u64 msecs, emsecs; 1012f931551bSRalph Campbell 1013f931551bSRalph Campbell piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum); 1014f931551bSRalph Campbell if (!piobuf) { 1015f931551bSRalph Campbell qib_devinfo(dd->pcidev, 1016f931551bSRalph Campbell "No PIObufs for checking perf, skipping\n"); 1017f931551bSRalph Campbell return; 1018f931551bSRalph Campbell } 1019f931551bSRalph Campbell 1020f931551bSRalph Campbell /* 1021f931551bSRalph Campbell * Enough to give us a reasonable test, less than piobuf size, and 1022f931551bSRalph Campbell * likely multiple of store buffer length. 1023f931551bSRalph Campbell */ 1024f931551bSRalph Campbell cnt = 1024; 1025f931551bSRalph Campbell 1026f931551bSRalph Campbell addr = vmalloc(cnt); 1027f931551bSRalph Campbell if (!addr) { 1028f931551bSRalph Campbell qib_devinfo(dd->pcidev, 1029f931551bSRalph Campbell "Couldn't get memory for checking PIO perf," 1030f931551bSRalph Campbell " skipping\n"); 1031f931551bSRalph Campbell goto done; 1032f931551bSRalph Campbell } 1033f931551bSRalph Campbell 1034f931551bSRalph Campbell preempt_disable(); /* we want reasonably accurate elapsed time */ 1035f931551bSRalph Campbell msecs = 1 + jiffies_to_msecs(jiffies); 1036f931551bSRalph Campbell for (lcnt = 0; lcnt < 10000U; lcnt++) { 1037f931551bSRalph Campbell /* wait until we cross msec boundary */ 1038f931551bSRalph Campbell if (jiffies_to_msecs(jiffies) >= msecs) 1039f931551bSRalph Campbell break; 1040f931551bSRalph Campbell udelay(1); 1041f931551bSRalph Campbell } 1042f931551bSRalph Campbell 1043f931551bSRalph Campbell dd->f_set_armlaunch(dd, 0); 1044f931551bSRalph Campbell 1045f931551bSRalph Campbell /* 1046f931551bSRalph Campbell * length 0, no dwords actually sent 1047f931551bSRalph Campbell */ 1048f931551bSRalph Campbell writeq(0, piobuf); 1049f931551bSRalph Campbell qib_flush_wc(); 1050f931551bSRalph Campbell 1051f931551bSRalph Campbell /* 1052f931551bSRalph Campbell * This is only roughly accurate, since even with preempt we 1053f931551bSRalph Campbell * still take interrupts that could take a while. Running for 1054f931551bSRalph Campbell * >= 5 msec seems to get us "close enough" to accurate values. 1055f931551bSRalph Campbell */ 1056f931551bSRalph Campbell msecs = jiffies_to_msecs(jiffies); 1057f931551bSRalph Campbell for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) { 1058f931551bSRalph Campbell qib_pio_copy(piobuf + 64, addr, cnt >> 2); 1059f931551bSRalph Campbell emsecs = jiffies_to_msecs(jiffies) - msecs; 1060f931551bSRalph Campbell } 1061f931551bSRalph Campbell 1062f931551bSRalph Campbell /* 1 GiB/sec, slightly over IB SDR line rate */ 1063f931551bSRalph Campbell if (lcnt < (emsecs * 1024U)) 1064f931551bSRalph Campbell qib_dev_err(dd, 10657fac3301SMike Marciniszyn "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n", 1066f931551bSRalph Campbell lcnt / (u32) emsecs); 1067f931551bSRalph Campbell 1068f931551bSRalph Campbell preempt_enable(); 1069f931551bSRalph Campbell 1070f931551bSRalph Campbell vfree(addr); 1071f931551bSRalph Campbell 1072f931551bSRalph Campbell done: 1073f931551bSRalph Campbell /* disarm piobuf, so it's available again */ 1074f931551bSRalph Campbell dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum)); 1075f931551bSRalph Campbell qib_sendbuf_done(dd, pbnum); 1076f931551bSRalph Campbell dd->f_set_armlaunch(dd, 1); 1077f931551bSRalph Campbell } 1078f931551bSRalph Campbell 1079f931551bSRalph Campbell void qib_free_devdata(struct qib_devdata *dd) 1080f931551bSRalph Campbell { 1081f931551bSRalph Campbell unsigned long flags; 1082f931551bSRalph Campbell 1083f931551bSRalph Campbell spin_lock_irqsave(&qib_devs_lock, flags); 1084f931551bSRalph Campbell idr_remove(&qib_unit_table, dd->unit); 1085f931551bSRalph Campbell list_del(&dd->list); 1086f931551bSRalph Campbell spin_unlock_irqrestore(&qib_devs_lock, flags); 1087f931551bSRalph Campbell 1088ddb88765SMike Marciniszyn #ifdef CONFIG_DEBUG_FS 1089ddb88765SMike Marciniszyn qib_dbg_ibdev_exit(&dd->verbs_dev); 1090ddb88765SMike Marciniszyn #endif 10911ed88dd7SMike Marciniszyn free_percpu(dd->int_counter); 1092f931551bSRalph Campbell ib_dealloc_device(&dd->verbs_dev.ibdev); 1093f931551bSRalph Campbell } 1094f931551bSRalph Campbell 10951ed88dd7SMike Marciniszyn u64 qib_int_counter(struct qib_devdata *dd) 10961ed88dd7SMike Marciniszyn { 10971ed88dd7SMike Marciniszyn int cpu; 10981ed88dd7SMike Marciniszyn u64 int_counter = 0; 10991ed88dd7SMike Marciniszyn 11001ed88dd7SMike Marciniszyn for_each_possible_cpu(cpu) 11011ed88dd7SMike Marciniszyn int_counter += *per_cpu_ptr(dd->int_counter, cpu); 11021ed88dd7SMike Marciniszyn return int_counter; 11031ed88dd7SMike Marciniszyn } 11041ed88dd7SMike Marciniszyn 11051ed88dd7SMike Marciniszyn u64 qib_sps_ints(void) 11061ed88dd7SMike Marciniszyn { 11071ed88dd7SMike Marciniszyn unsigned long flags; 11081ed88dd7SMike Marciniszyn struct qib_devdata *dd; 11091ed88dd7SMike Marciniszyn u64 sps_ints = 0; 11101ed88dd7SMike Marciniszyn 11111ed88dd7SMike Marciniszyn spin_lock_irqsave(&qib_devs_lock, flags); 11121ed88dd7SMike Marciniszyn list_for_each_entry(dd, &qib_dev_list, list) { 11131ed88dd7SMike Marciniszyn sps_ints += qib_int_counter(dd); 11141ed88dd7SMike Marciniszyn } 11151ed88dd7SMike Marciniszyn spin_unlock_irqrestore(&qib_devs_lock, flags); 11161ed88dd7SMike Marciniszyn return sps_ints; 11171ed88dd7SMike Marciniszyn } 11181ed88dd7SMike Marciniszyn 1119f931551bSRalph Campbell /* 1120f931551bSRalph Campbell * Allocate our primary per-unit data structure. Must be done via verbs 1121f931551bSRalph Campbell * allocator, because the verbs cleanup process both does cleanup and 1122f931551bSRalph Campbell * free of the data structure. 1123f931551bSRalph Campbell * "extra" is for chip-specific data. 1124f931551bSRalph Campbell * 1125f931551bSRalph Campbell * Use the idr mechanism to get a unit number for this unit. 1126f931551bSRalph Campbell */ 1127f931551bSRalph Campbell struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) 1128f931551bSRalph Campbell { 1129f931551bSRalph Campbell unsigned long flags; 1130f931551bSRalph Campbell struct qib_devdata *dd; 1131f931551bSRalph Campbell int ret; 1132f931551bSRalph Campbell 1133f931551bSRalph Campbell dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); 1134f8b6c47aSMike Marciniszyn if (!dd) 1135f8b6c47aSMike Marciniszyn return ERR_PTR(-ENOMEM); 1136f931551bSRalph Campbell 1137f8b6c47aSMike Marciniszyn INIT_LIST_HEAD(&dd->list); 1138ddb88765SMike Marciniszyn 113980f22b44STejun Heo idr_preload(GFP_KERNEL); 1140f931551bSRalph Campbell spin_lock_irqsave(&qib_devs_lock, flags); 114180f22b44STejun Heo 114280f22b44STejun Heo ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT); 114380f22b44STejun Heo if (ret >= 0) { 114480f22b44STejun Heo dd->unit = ret; 1145f931551bSRalph Campbell list_add(&dd->list, &qib_dev_list); 114680f22b44STejun Heo } 114780f22b44STejun Heo 1148f931551bSRalph Campbell spin_unlock_irqrestore(&qib_devs_lock, flags); 114980f22b44STejun Heo idr_preload_end(); 1150f931551bSRalph Campbell 1151f931551bSRalph Campbell if (ret < 0) { 1152f931551bSRalph Campbell qib_early_err(&pdev->dev, 1153f931551bSRalph Campbell "Could not allocate unit ID: error %d\n", -ret); 1154f931551bSRalph Campbell goto bail; 1155f931551bSRalph Campbell } 11561ed88dd7SMike Marciniszyn dd->int_counter = alloc_percpu(u64); 11571ed88dd7SMike Marciniszyn if (!dd->int_counter) { 11581ed88dd7SMike Marciniszyn ret = -ENOMEM; 11591ed88dd7SMike Marciniszyn qib_early_err(&pdev->dev, 11601ed88dd7SMike Marciniszyn "Could not allocate per-cpu int_counter\n"); 11611ed88dd7SMike Marciniszyn goto bail; 11621ed88dd7SMike Marciniszyn } 1163f931551bSRalph Campbell 1164f931551bSRalph Campbell if (!qib_cpulist_count) { 1165f931551bSRalph Campbell u32 count = num_online_cpus(); 1166f931551bSRalph Campbell qib_cpulist = kzalloc(BITS_TO_LONGS(count) * 1167f931551bSRalph Campbell sizeof(long), GFP_KERNEL); 1168f931551bSRalph Campbell if (qib_cpulist) 1169f931551bSRalph Campbell qib_cpulist_count = count; 1170f931551bSRalph Campbell else 11717fac3301SMike Marciniszyn qib_early_err(&pdev->dev, 11727fac3301SMike Marciniszyn "Could not alloc cpulist info, cpu affinity might be wrong\n"); 1173f931551bSRalph Campbell } 1174f8b6c47aSMike Marciniszyn #ifdef CONFIG_DEBUG_FS 1175f8b6c47aSMike Marciniszyn qib_dbg_ibdev_init(&dd->verbs_dev); 1176f8b6c47aSMike Marciniszyn #endif 1177f931551bSRalph Campbell return dd; 1178f8b6c47aSMike Marciniszyn bail: 1179f8b6c47aSMike Marciniszyn if (!list_empty(&dd->list)) 1180f8b6c47aSMike Marciniszyn list_del_init(&dd->list); 1181f8b6c47aSMike Marciniszyn ib_dealloc_device(&dd->verbs_dev.ibdev); 1182f8b6c47aSMike Marciniszyn return ERR_PTR(ret);; 1183f931551bSRalph Campbell } 1184f931551bSRalph Campbell 1185f931551bSRalph Campbell /* 1186f931551bSRalph Campbell * Called from freeze mode handlers, and from PCI error 1187f931551bSRalph Campbell * reporting code. Should be paranoid about state of 1188f931551bSRalph Campbell * system and data structures. 1189f931551bSRalph Campbell */ 1190f931551bSRalph Campbell void qib_disable_after_error(struct qib_devdata *dd) 1191f931551bSRalph Campbell { 1192f931551bSRalph Campbell if (dd->flags & QIB_INITTED) { 1193f931551bSRalph Campbell u32 pidx; 1194f931551bSRalph Campbell 1195f931551bSRalph Campbell dd->flags &= ~QIB_INITTED; 1196f931551bSRalph Campbell if (dd->pport) 1197f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1198f931551bSRalph Campbell struct qib_pportdata *ppd; 1199f931551bSRalph Campbell 1200f931551bSRalph Campbell ppd = dd->pport + pidx; 1201f931551bSRalph Campbell if (dd->flags & QIB_PRESENT) { 1202f931551bSRalph Campbell qib_set_linkstate(ppd, 1203f931551bSRalph Campbell QIB_IB_LINKDOWN_DISABLE); 1204f931551bSRalph Campbell dd->f_setextled(ppd, 0); 1205f931551bSRalph Campbell } 1206f931551bSRalph Campbell *ppd->statusp &= ~QIB_STATUS_IB_READY; 1207f931551bSRalph Campbell } 1208f931551bSRalph Campbell } 1209f931551bSRalph Campbell 1210f931551bSRalph Campbell /* 1211f931551bSRalph Campbell * Mark as having had an error for driver, and also 1212f931551bSRalph Campbell * for /sys and status word mapped to user programs. 1213f931551bSRalph Campbell * This marks unit as not usable, until reset. 1214f931551bSRalph Campbell */ 1215f931551bSRalph Campbell if (dd->devstatusp) 1216f931551bSRalph Campbell *dd->devstatusp |= QIB_STATUS_HWERROR; 1217f931551bSRalph Campbell } 1218f931551bSRalph Campbell 12191e6d9abeSGreg Kroah-Hartman static void qib_remove_one(struct pci_dev *); 12201e6d9abeSGreg Kroah-Hartman static int qib_init_one(struct pci_dev *, const struct pci_device_id *); 1221f931551bSRalph Campbell 1222e2eed58bSVinit Agnihotri #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: " 1223f931551bSRalph Campbell #define PFX QIB_DRV_NAME ": " 1224f931551bSRalph Campbell 1225865b64beSMike Marciniszyn static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = { 1226f931551bSRalph Campbell { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) }, 1227f931551bSRalph Campbell { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) }, 1228f931551bSRalph Campbell { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) }, 1229f931551bSRalph Campbell { 0, } 1230f931551bSRalph Campbell }; 1231f931551bSRalph Campbell 1232f931551bSRalph Campbell MODULE_DEVICE_TABLE(pci, qib_pci_tbl); 1233f931551bSRalph Campbell 1234bea25e82SPaul Bolle static struct pci_driver qib_driver = { 1235f931551bSRalph Campbell .name = QIB_DRV_NAME, 1236f931551bSRalph Campbell .probe = qib_init_one, 12371e6d9abeSGreg Kroah-Hartman .remove = qib_remove_one, 1238f931551bSRalph Campbell .id_table = qib_pci_tbl, 1239f931551bSRalph Campbell .err_handler = &qib_pci_err_handler, 1240f931551bSRalph Campbell }; 1241f931551bSRalph Campbell 12428469ba39SMike Marciniszyn #ifdef CONFIG_INFINIBAND_QIB_DCA 12438469ba39SMike Marciniszyn 12448469ba39SMike Marciniszyn static int qib_notify_dca(struct notifier_block *, unsigned long, void *); 12458469ba39SMike Marciniszyn static struct notifier_block dca_notifier = { 12468469ba39SMike Marciniszyn .notifier_call = qib_notify_dca, 12478469ba39SMike Marciniszyn .next = NULL, 12488469ba39SMike Marciniszyn .priority = 0 12498469ba39SMike Marciniszyn }; 12508469ba39SMike Marciniszyn 12518469ba39SMike Marciniszyn static int qib_notify_dca_device(struct device *device, void *data) 12528469ba39SMike Marciniszyn { 12538469ba39SMike Marciniszyn struct qib_devdata *dd = dev_get_drvdata(device); 12548469ba39SMike Marciniszyn unsigned long event = *(unsigned long *)data; 12558469ba39SMike Marciniszyn 12568469ba39SMike Marciniszyn return dd->f_notify_dca(dd, event); 12578469ba39SMike Marciniszyn } 12588469ba39SMike Marciniszyn 12598469ba39SMike Marciniszyn static int qib_notify_dca(struct notifier_block *nb, unsigned long event, 12608469ba39SMike Marciniszyn void *p) 12618469ba39SMike Marciniszyn { 12628469ba39SMike Marciniszyn int rval; 12638469ba39SMike Marciniszyn 12648469ba39SMike Marciniszyn rval = driver_for_each_device(&qib_driver.driver, NULL, 12658469ba39SMike Marciniszyn &event, qib_notify_dca_device); 12668469ba39SMike Marciniszyn return rval ? NOTIFY_BAD : NOTIFY_DONE; 12678469ba39SMike Marciniszyn } 12688469ba39SMike Marciniszyn 12698469ba39SMike Marciniszyn #endif 12708469ba39SMike Marciniszyn 1271f931551bSRalph Campbell /* 1272f931551bSRalph Campbell * Do all the generic driver unit- and chip-independent memory 1273f931551bSRalph Campbell * allocation and initialization. 1274f931551bSRalph Campbell */ 12750a66d2bdSVinit Agnihotri static int __init qib_ib_init(void) 1276f931551bSRalph Campbell { 1277f931551bSRalph Campbell int ret; 1278f931551bSRalph Campbell 1279f931551bSRalph Campbell ret = qib_dev_init(); 1280f931551bSRalph Campbell if (ret) 1281f931551bSRalph Campbell goto bail; 1282f931551bSRalph Campbell 1283f931551bSRalph Campbell /* 1284f931551bSRalph Campbell * These must be called before the driver is registered with 1285f931551bSRalph Campbell * the PCI subsystem. 1286f931551bSRalph Campbell */ 1287f931551bSRalph Campbell idr_init(&qib_unit_table); 1288f931551bSRalph Campbell 12898469ba39SMike Marciniszyn #ifdef CONFIG_INFINIBAND_QIB_DCA 12908469ba39SMike Marciniszyn dca_register_notify(&dca_notifier); 12918469ba39SMike Marciniszyn #endif 1292ddb88765SMike Marciniszyn #ifdef CONFIG_DEBUG_FS 1293ddb88765SMike Marciniszyn qib_dbg_init(); 1294ddb88765SMike Marciniszyn #endif 1295f931551bSRalph Campbell ret = pci_register_driver(&qib_driver); 1296f931551bSRalph Campbell if (ret < 0) { 12977fac3301SMike Marciniszyn pr_err("Unable to register driver: error %d\n", -ret); 129885caafe3SMike Marciniszyn goto bail_dev; 1299f931551bSRalph Campbell } 1300f931551bSRalph Campbell 1301f931551bSRalph Campbell /* not fatal if it doesn't work */ 1302f931551bSRalph Campbell if (qib_init_qibfs()) 13037fac3301SMike Marciniszyn pr_err("Unable to register ipathfs\n"); 1304f931551bSRalph Campbell goto bail; /* all OK */ 1305f931551bSRalph Campbell 130685caafe3SMike Marciniszyn bail_dev: 13078469ba39SMike Marciniszyn #ifdef CONFIG_INFINIBAND_QIB_DCA 13088469ba39SMike Marciniszyn dca_unregister_notify(&dca_notifier); 13098469ba39SMike Marciniszyn #endif 1310ddb88765SMike Marciniszyn #ifdef CONFIG_DEBUG_FS 1311ddb88765SMike Marciniszyn qib_dbg_exit(); 1312ddb88765SMike Marciniszyn #endif 1313f931551bSRalph Campbell idr_destroy(&qib_unit_table); 1314f931551bSRalph Campbell qib_dev_cleanup(); 1315f931551bSRalph Campbell bail: 1316f931551bSRalph Campbell return ret; 1317f931551bSRalph Campbell } 1318f931551bSRalph Campbell 13190a66d2bdSVinit Agnihotri module_init(qib_ib_init); 1320f931551bSRalph Campbell 1321f931551bSRalph Campbell /* 1322f931551bSRalph Campbell * Do the non-unit driver cleanup, memory free, etc. at unload. 1323f931551bSRalph Campbell */ 13240a66d2bdSVinit Agnihotri static void __exit qib_ib_cleanup(void) 1325f931551bSRalph Campbell { 1326f931551bSRalph Campbell int ret; 1327f931551bSRalph Campbell 1328f931551bSRalph Campbell ret = qib_exit_qibfs(); 1329f931551bSRalph Campbell if (ret) 13307fac3301SMike Marciniszyn pr_err( 13317fac3301SMike Marciniszyn "Unable to cleanup counter filesystem: error %d\n", 13327fac3301SMike Marciniszyn -ret); 1333f931551bSRalph Campbell 13348469ba39SMike Marciniszyn #ifdef CONFIG_INFINIBAND_QIB_DCA 13358469ba39SMike Marciniszyn dca_unregister_notify(&dca_notifier); 13368469ba39SMike Marciniszyn #endif 1337f931551bSRalph Campbell pci_unregister_driver(&qib_driver); 1338ddb88765SMike Marciniszyn #ifdef CONFIG_DEBUG_FS 1339ddb88765SMike Marciniszyn qib_dbg_exit(); 1340ddb88765SMike Marciniszyn #endif 1341f931551bSRalph Campbell 1342f931551bSRalph Campbell qib_cpulist_count = 0; 1343f931551bSRalph Campbell kfree(qib_cpulist); 1344f931551bSRalph Campbell 1345f931551bSRalph Campbell idr_destroy(&qib_unit_table); 1346f931551bSRalph Campbell qib_dev_cleanup(); 1347f931551bSRalph Campbell } 1348f931551bSRalph Campbell 13490a66d2bdSVinit Agnihotri module_exit(qib_ib_cleanup); 1350f931551bSRalph Campbell 1351f931551bSRalph Campbell /* this can only be called after a successful initialization */ 1352f931551bSRalph Campbell static void cleanup_device_data(struct qib_devdata *dd) 1353f931551bSRalph Campbell { 1354f931551bSRalph Campbell int ctxt; 1355f931551bSRalph Campbell int pidx; 1356f931551bSRalph Campbell struct qib_ctxtdata **tmp; 1357f931551bSRalph Campbell unsigned long flags; 1358f931551bSRalph Campbell 1359f931551bSRalph Campbell /* users can't do anything more with chip */ 136036a8f01cSMike Marciniszyn for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1361f931551bSRalph Campbell if (dd->pport[pidx].statusp) 1362f931551bSRalph Campbell *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; 1363f931551bSRalph Campbell 136436a8f01cSMike Marciniszyn spin_lock(&dd->pport[pidx].cc_shadow_lock); 136536a8f01cSMike Marciniszyn 136636a8f01cSMike Marciniszyn kfree(dd->pport[pidx].congestion_entries); 136736a8f01cSMike Marciniszyn dd->pport[pidx].congestion_entries = NULL; 136836a8f01cSMike Marciniszyn kfree(dd->pport[pidx].ccti_entries); 136936a8f01cSMike Marciniszyn dd->pport[pidx].ccti_entries = NULL; 137036a8f01cSMike Marciniszyn kfree(dd->pport[pidx].ccti_entries_shadow); 137136a8f01cSMike Marciniszyn dd->pport[pidx].ccti_entries_shadow = NULL; 137236a8f01cSMike Marciniszyn kfree(dd->pport[pidx].congestion_entries_shadow); 137336a8f01cSMike Marciniszyn dd->pport[pidx].congestion_entries_shadow = NULL; 137436a8f01cSMike Marciniszyn 137536a8f01cSMike Marciniszyn spin_unlock(&dd->pport[pidx].cc_shadow_lock); 137636a8f01cSMike Marciniszyn } 137736a8f01cSMike Marciniszyn 1378f931551bSRalph Campbell if (!qib_wc_pat) 1379f931551bSRalph Campbell qib_disable_wc(dd); 1380f931551bSRalph Campbell 1381f931551bSRalph Campbell if (dd->pioavailregs_dma) { 1382f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1383f931551bSRalph Campbell (void *) dd->pioavailregs_dma, 1384f931551bSRalph Campbell dd->pioavailregs_phys); 1385f931551bSRalph Campbell dd->pioavailregs_dma = NULL; 1386f931551bSRalph Campbell } 1387f931551bSRalph Campbell 1388f931551bSRalph Campbell if (dd->pageshadow) { 1389f931551bSRalph Campbell struct page **tmpp = dd->pageshadow; 1390f931551bSRalph Campbell dma_addr_t *tmpd = dd->physshadow; 1391308c813bSMike Marciniszyn int i; 1392f931551bSRalph Campbell 1393f931551bSRalph Campbell for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { 1394f931551bSRalph Campbell int ctxt_tidbase = ctxt * dd->rcvtidcnt; 1395f931551bSRalph Campbell int maxtid = ctxt_tidbase + dd->rcvtidcnt; 1396f931551bSRalph Campbell 1397f931551bSRalph Campbell for (i = ctxt_tidbase; i < maxtid; i++) { 1398f931551bSRalph Campbell if (!tmpp[i]) 1399f931551bSRalph Campbell continue; 1400f931551bSRalph Campbell pci_unmap_page(dd->pcidev, tmpd[i], 1401f931551bSRalph Campbell PAGE_SIZE, PCI_DMA_FROMDEVICE); 1402f931551bSRalph Campbell qib_release_user_pages(&tmpp[i], 1); 1403f931551bSRalph Campbell tmpp[i] = NULL; 1404f931551bSRalph Campbell } 1405f931551bSRalph Campbell } 1406f931551bSRalph Campbell 1407f931551bSRalph Campbell dd->pageshadow = NULL; 1408f931551bSRalph Campbell vfree(tmpp); 1409308c813bSMike Marciniszyn dd->physshadow = NULL; 1410308c813bSMike Marciniszyn vfree(tmpd); 1411f931551bSRalph Campbell } 1412f931551bSRalph Campbell 1413f931551bSRalph Campbell /* 1414f931551bSRalph Campbell * Free any resources still in use (usually just kernel contexts) 1415f931551bSRalph Campbell * at unload; we do for ctxtcnt, because that's what we allocate. 1416f931551bSRalph Campbell * We acquire lock to be really paranoid that rcd isn't being 1417f931551bSRalph Campbell * accessed from some interrupt-related code (that should not happen, 1418f931551bSRalph Campbell * but best to be sure). 1419f931551bSRalph Campbell */ 1420f931551bSRalph Campbell spin_lock_irqsave(&dd->uctxt_lock, flags); 1421f931551bSRalph Campbell tmp = dd->rcd; 1422f931551bSRalph Campbell dd->rcd = NULL; 1423f931551bSRalph Campbell spin_unlock_irqrestore(&dd->uctxt_lock, flags); 1424f931551bSRalph Campbell for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { 1425f931551bSRalph Campbell struct qib_ctxtdata *rcd = tmp[ctxt]; 1426f931551bSRalph Campbell 1427f931551bSRalph Campbell tmp[ctxt] = NULL; /* debugging paranoia */ 1428f931551bSRalph Campbell qib_free_ctxtdata(dd, rcd); 1429f931551bSRalph Campbell } 1430f931551bSRalph Campbell kfree(tmp); 1431f931551bSRalph Campbell kfree(dd->boardname); 143285caafe3SMike Marciniszyn qib_cq_exit(dd); 1433f931551bSRalph Campbell } 1434f931551bSRalph Campbell 1435f931551bSRalph Campbell /* 1436f931551bSRalph Campbell * Clean up on unit shutdown, or error during unit load after 1437f931551bSRalph Campbell * successful initialization. 1438f931551bSRalph Campbell */ 1439f931551bSRalph Campbell static void qib_postinit_cleanup(struct qib_devdata *dd) 1440f931551bSRalph Campbell { 1441f931551bSRalph Campbell /* 1442f931551bSRalph Campbell * Clean up chip-specific stuff. 1443f931551bSRalph Campbell * We check for NULL here, because it's outside 1444f931551bSRalph Campbell * the kregbase check, and we need to call it 1445f931551bSRalph Campbell * after the free_irq. Thus it's possible that 1446f931551bSRalph Campbell * the function pointers were never initialized. 1447f931551bSRalph Campbell */ 1448f931551bSRalph Campbell if (dd->f_cleanup) 1449f931551bSRalph Campbell dd->f_cleanup(dd); 1450f931551bSRalph Campbell 1451f931551bSRalph Campbell qib_pcie_ddcleanup(dd); 1452f931551bSRalph Campbell 1453f931551bSRalph Campbell cleanup_device_data(dd); 1454f931551bSRalph Campbell 1455f931551bSRalph Campbell qib_free_devdata(dd); 1456f931551bSRalph Campbell } 1457f931551bSRalph Campbell 14581e6d9abeSGreg Kroah-Hartman static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1459f931551bSRalph Campbell { 1460f931551bSRalph Campbell int ret, j, pidx, initfail; 1461f931551bSRalph Campbell struct qib_devdata *dd = NULL; 1462f931551bSRalph Campbell 1463f931551bSRalph Campbell ret = qib_pcie_init(pdev, ent); 1464f931551bSRalph Campbell if (ret) 1465f931551bSRalph Campbell goto bail; 1466f931551bSRalph Campbell 1467f931551bSRalph Campbell /* 1468f931551bSRalph Campbell * Do device-specific initialiation, function table setup, dd 1469f931551bSRalph Campbell * allocation, etc. 1470f931551bSRalph Campbell */ 1471f931551bSRalph Campbell switch (ent->device) { 1472f931551bSRalph Campbell case PCI_DEVICE_ID_QLOGIC_IB_6120: 14737e3a1f4aSRalph Campbell #ifdef CONFIG_PCI_MSI 1474f931551bSRalph Campbell dd = qib_init_iba6120_funcs(pdev, ent); 14757e3a1f4aSRalph Campbell #else 14767fac3301SMike Marciniszyn qib_early_err(&pdev->dev, 1477e2eed58bSVinit Agnihotri "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", 14787e3a1f4aSRalph Campbell ent->device); 14799e43e010SRalph Campbell dd = ERR_PTR(-ENODEV); 14807e3a1f4aSRalph Campbell #endif 1481f931551bSRalph Campbell break; 1482f931551bSRalph Campbell 1483f931551bSRalph Campbell case PCI_DEVICE_ID_QLOGIC_IB_7220: 1484f931551bSRalph Campbell dd = qib_init_iba7220_funcs(pdev, ent); 1485f931551bSRalph Campbell break; 1486f931551bSRalph Campbell 1487f931551bSRalph Campbell case PCI_DEVICE_ID_QLOGIC_IB_7322: 1488f931551bSRalph Campbell dd = qib_init_iba7322_funcs(pdev, ent); 1489f931551bSRalph Campbell break; 1490f931551bSRalph Campbell 1491f931551bSRalph Campbell default: 14927fac3301SMike Marciniszyn qib_early_err(&pdev->dev, 1493e2eed58bSVinit Agnihotri "Failing on unknown Intel deviceid 0x%x\n", 14947fac3301SMike Marciniszyn ent->device); 1495f931551bSRalph Campbell ret = -ENODEV; 1496f931551bSRalph Campbell } 1497f931551bSRalph Campbell 1498f931551bSRalph Campbell if (IS_ERR(dd)) 1499f931551bSRalph Campbell ret = PTR_ERR(dd); 1500f931551bSRalph Campbell if (ret) 1501f931551bSRalph Campbell goto bail; /* error already printed */ 1502f931551bSRalph Campbell 1503551ace12SMike Marciniszyn ret = qib_create_workqueues(dd); 1504551ace12SMike Marciniszyn if (ret) 1505551ace12SMike Marciniszyn goto bail; 1506551ace12SMike Marciniszyn 1507f931551bSRalph Campbell /* do the generic initialization */ 1508f931551bSRalph Campbell initfail = qib_init(dd, 0); 1509f931551bSRalph Campbell 1510f931551bSRalph Campbell ret = qib_register_ib_device(dd); 1511f931551bSRalph Campbell 1512f931551bSRalph Campbell /* 1513f931551bSRalph Campbell * Now ready for use. this should be cleared whenever we 1514f931551bSRalph Campbell * detect a reset, or initiate one. If earlier failure, 1515f931551bSRalph Campbell * we still create devices, so diags, etc. can be used 1516f931551bSRalph Campbell * to determine cause of problem. 1517f931551bSRalph Campbell */ 1518f931551bSRalph Campbell if (!qib_mini_init && !initfail && !ret) 1519f931551bSRalph Campbell dd->flags |= QIB_INITTED; 1520f931551bSRalph Campbell 1521f931551bSRalph Campbell j = qib_device_create(dd); 1522f931551bSRalph Campbell if (j) 1523f931551bSRalph Campbell qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1524f931551bSRalph Campbell j = qibfs_add(dd); 1525f931551bSRalph Campbell if (j) 1526f931551bSRalph Campbell qib_dev_err(dd, "Failed filesystem setup for counters: %d\n", 1527f931551bSRalph Campbell -j); 1528f931551bSRalph Campbell 1529f931551bSRalph Campbell if (qib_mini_init || initfail || ret) { 1530f931551bSRalph Campbell qib_stop_timers(dd); 1531f0626710STejun Heo flush_workqueue(ib_wq); 1532f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; ++pidx) 1533f931551bSRalph Campbell dd->f_quiet_serdes(dd->pport + pidx); 1534756a33b8SRalph Campbell if (qib_mini_init) 1535756a33b8SRalph Campbell goto bail; 1536756a33b8SRalph Campbell if (!j) { 1537756a33b8SRalph Campbell (void) qibfs_remove(dd); 1538756a33b8SRalph Campbell qib_device_remove(dd); 1539756a33b8SRalph Campbell } 1540756a33b8SRalph Campbell if (!ret) 1541756a33b8SRalph Campbell qib_unregister_ib_device(dd); 1542756a33b8SRalph Campbell qib_postinit_cleanup(dd); 1543f931551bSRalph Campbell if (initfail) 1544f931551bSRalph Campbell ret = initfail; 1545f931551bSRalph Campbell goto bail; 1546f931551bSRalph Campbell } 1547f931551bSRalph Campbell 1548f931551bSRalph Campbell if (!qib_wc_pat) { 1549f931551bSRalph Campbell ret = qib_enable_wc(dd); 1550f931551bSRalph Campbell if (ret) { 15517fac3301SMike Marciniszyn qib_dev_err(dd, 15527fac3301SMike Marciniszyn "Write combining not enabled (err %d): performance may be poor\n", 1553f931551bSRalph Campbell -ret); 1554f931551bSRalph Campbell ret = 0; 1555f931551bSRalph Campbell } 1556f931551bSRalph Campbell } 1557f931551bSRalph Campbell 1558f931551bSRalph Campbell qib_verify_pioperf(dd); 1559f931551bSRalph Campbell bail: 1560f931551bSRalph Campbell return ret; 1561f931551bSRalph Campbell } 1562f931551bSRalph Campbell 15631e6d9abeSGreg Kroah-Hartman static void qib_remove_one(struct pci_dev *pdev) 1564f931551bSRalph Campbell { 1565f931551bSRalph Campbell struct qib_devdata *dd = pci_get_drvdata(pdev); 1566f931551bSRalph Campbell int ret; 1567f931551bSRalph Campbell 1568f931551bSRalph Campbell /* unregister from IB core */ 1569f931551bSRalph Campbell qib_unregister_ib_device(dd); 1570f931551bSRalph Campbell 1571f931551bSRalph Campbell /* 1572f931551bSRalph Campbell * Disable the IB link, disable interrupts on the device, 1573f931551bSRalph Campbell * clear dma engines, etc. 1574f931551bSRalph Campbell */ 1575f931551bSRalph Campbell if (!qib_mini_init) 1576f931551bSRalph Campbell qib_shutdown_device(dd); 1577f931551bSRalph Campbell 1578f931551bSRalph Campbell qib_stop_timers(dd); 1579f931551bSRalph Campbell 1580f0626710STejun Heo /* wait until all of our (qsfp) queue_work() calls complete */ 1581f0626710STejun Heo flush_workqueue(ib_wq); 1582f931551bSRalph Campbell 1583f931551bSRalph Campbell ret = qibfs_remove(dd); 1584f931551bSRalph Campbell if (ret) 1585f931551bSRalph Campbell qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n", 1586f931551bSRalph Campbell -ret); 1587f931551bSRalph Campbell 1588f931551bSRalph Campbell qib_device_remove(dd); 1589f931551bSRalph Campbell 1590f931551bSRalph Campbell qib_postinit_cleanup(dd); 1591f931551bSRalph Campbell } 1592f931551bSRalph Campbell 1593f931551bSRalph Campbell /** 1594f931551bSRalph Campbell * qib_create_rcvhdrq - create a receive header queue 1595f931551bSRalph Campbell * @dd: the qlogic_ib device 1596f931551bSRalph Campbell * @rcd: the context data 1597f931551bSRalph Campbell * 1598f931551bSRalph Campbell * This must be contiguous memory (from an i/o perspective), and must be 1599f931551bSRalph Campbell * DMA'able (which means for some systems, it will go through an IOMMU, 1600f931551bSRalph Campbell * or be forced into a low address range). 1601f931551bSRalph Campbell */ 1602f931551bSRalph Campbell int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) 1603f931551bSRalph Campbell { 1604f931551bSRalph Campbell unsigned amt; 1605e0f30bacSRamkrishna Vepa int old_node_id; 1606f931551bSRalph Campbell 1607f931551bSRalph Campbell if (!rcd->rcvhdrq) { 1608f931551bSRalph Campbell dma_addr_t phys_hdrqtail; 1609f931551bSRalph Campbell gfp_t gfp_flags; 1610f931551bSRalph Campbell 1611f931551bSRalph Campbell amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * 1612f931551bSRalph Campbell sizeof(u32), PAGE_SIZE); 1613f931551bSRalph Campbell gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? 1614f931551bSRalph Campbell GFP_USER : GFP_KERNEL; 1615e0f30bacSRamkrishna Vepa 1616e0f30bacSRamkrishna Vepa old_node_id = dev_to_node(&dd->pcidev->dev); 1617e0f30bacSRamkrishna Vepa set_dev_node(&dd->pcidev->dev, rcd->node_id); 1618f931551bSRalph Campbell rcd->rcvhdrq = dma_alloc_coherent( 1619f931551bSRalph Campbell &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, 1620f931551bSRalph Campbell gfp_flags | __GFP_COMP); 1621e0f30bacSRamkrishna Vepa set_dev_node(&dd->pcidev->dev, old_node_id); 1622f931551bSRalph Campbell 1623f931551bSRalph Campbell if (!rcd->rcvhdrq) { 16247fac3301SMike Marciniszyn qib_dev_err(dd, 16257fac3301SMike Marciniszyn "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1626f931551bSRalph Campbell amt, rcd->ctxt); 1627f931551bSRalph Campbell goto bail; 1628f931551bSRalph Campbell } 1629f931551bSRalph Campbell 1630f931551bSRalph Campbell if (rcd->ctxt >= dd->first_user_ctxt) { 1631f931551bSRalph Campbell rcd->user_event_mask = vmalloc_user(PAGE_SIZE); 1632f931551bSRalph Campbell if (!rcd->user_event_mask) 1633f931551bSRalph Campbell goto bail_free_hdrq; 1634f931551bSRalph Campbell } 1635f931551bSRalph Campbell 1636f931551bSRalph Campbell if (!(dd->flags & QIB_NODMA_RTAIL)) { 1637e0f30bacSRamkrishna Vepa set_dev_node(&dd->pcidev->dev, rcd->node_id); 1638f931551bSRalph Campbell rcd->rcvhdrtail_kvaddr = dma_alloc_coherent( 1639f931551bSRalph Campbell &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, 1640f931551bSRalph Campbell gfp_flags); 1641e0f30bacSRamkrishna Vepa set_dev_node(&dd->pcidev->dev, old_node_id); 1642f931551bSRalph Campbell if (!rcd->rcvhdrtail_kvaddr) 1643f931551bSRalph Campbell goto bail_free; 1644f931551bSRalph Campbell rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; 1645f931551bSRalph Campbell } 1646f931551bSRalph Campbell 1647f931551bSRalph Campbell rcd->rcvhdrq_size = amt; 1648f931551bSRalph Campbell } 1649f931551bSRalph Campbell 1650f931551bSRalph Campbell /* clear for security and sanity on each use */ 1651f931551bSRalph Campbell memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size); 1652f931551bSRalph Campbell if (rcd->rcvhdrtail_kvaddr) 1653f931551bSRalph Campbell memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE); 1654f931551bSRalph Campbell return 0; 1655f931551bSRalph Campbell 1656f931551bSRalph Campbell bail_free: 16577fac3301SMike Marciniszyn qib_dev_err(dd, 16587fac3301SMike Marciniszyn "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 16597fac3301SMike Marciniszyn rcd->ctxt); 1660f931551bSRalph Campbell vfree(rcd->user_event_mask); 1661f931551bSRalph Campbell rcd->user_event_mask = NULL; 1662f931551bSRalph Campbell bail_free_hdrq: 1663f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 1664f931551bSRalph Campbell rcd->rcvhdrq_phys); 1665f931551bSRalph Campbell rcd->rcvhdrq = NULL; 1666f931551bSRalph Campbell bail: 1667f931551bSRalph Campbell return -ENOMEM; 1668f931551bSRalph Campbell } 1669f931551bSRalph Campbell 1670f931551bSRalph Campbell /** 1671f931551bSRalph Campbell * allocate eager buffers, both kernel and user contexts. 1672f931551bSRalph Campbell * @rcd: the context we are setting up. 1673f931551bSRalph Campbell * 1674f931551bSRalph Campbell * Allocate the eager TID buffers and program them into hip. 1675f931551bSRalph Campbell * They are no longer completely contiguous, we do multiple allocation 1676f931551bSRalph Campbell * calls. Otherwise we get the OOM code involved, by asking for too 1677f931551bSRalph Campbell * much per call, with disastrous results on some kernels. 1678f931551bSRalph Campbell */ 1679f931551bSRalph Campbell int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) 1680f931551bSRalph Campbell { 1681f931551bSRalph Campbell struct qib_devdata *dd = rcd->dd; 1682f931551bSRalph Campbell unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; 1683f931551bSRalph Campbell size_t size; 1684f931551bSRalph Campbell gfp_t gfp_flags; 1685e0f30bacSRamkrishna Vepa int old_node_id; 1686f931551bSRalph Campbell 1687f931551bSRalph Campbell /* 1688f931551bSRalph Campbell * GFP_USER, but without GFP_FS, so buffer cache can be 1689f931551bSRalph Campbell * coalesced (we hope); otherwise, even at order 4, 1690f931551bSRalph Campbell * heavy filesystem activity makes these fail, and we can 1691f931551bSRalph Campbell * use compound pages. 1692f931551bSRalph Campbell */ 1693f931551bSRalph Campbell gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; 1694f931551bSRalph Campbell 1695f931551bSRalph Campbell egrcnt = rcd->rcvegrcnt; 1696f931551bSRalph Campbell egroff = rcd->rcvegr_tid_base; 1697f931551bSRalph Campbell egrsize = dd->rcvegrbufsize; 1698f931551bSRalph Campbell 1699f931551bSRalph Campbell chunk = rcd->rcvegrbuf_chunks; 1700f931551bSRalph Campbell egrperchunk = rcd->rcvegrbufs_perchunk; 1701f931551bSRalph Campbell size = rcd->rcvegrbuf_size; 1702f931551bSRalph Campbell if (!rcd->rcvegrbuf) { 1703f931551bSRalph Campbell rcd->rcvegrbuf = 1704e0f30bacSRamkrishna Vepa kzalloc_node(chunk * sizeof(rcd->rcvegrbuf[0]), 1705e0f30bacSRamkrishna Vepa GFP_KERNEL, rcd->node_id); 1706f931551bSRalph Campbell if (!rcd->rcvegrbuf) 1707f931551bSRalph Campbell goto bail; 1708f931551bSRalph Campbell } 1709f931551bSRalph Campbell if (!rcd->rcvegrbuf_phys) { 1710f931551bSRalph Campbell rcd->rcvegrbuf_phys = 1711e0f30bacSRamkrishna Vepa kmalloc_node(chunk * sizeof(rcd->rcvegrbuf_phys[0]), 1712e0f30bacSRamkrishna Vepa GFP_KERNEL, rcd->node_id); 1713f931551bSRalph Campbell if (!rcd->rcvegrbuf_phys) 1714f931551bSRalph Campbell goto bail_rcvegrbuf; 1715f931551bSRalph Campbell } 1716f931551bSRalph Campbell for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { 1717f931551bSRalph Campbell if (rcd->rcvegrbuf[e]) 1718f931551bSRalph Campbell continue; 1719e0f30bacSRamkrishna Vepa 1720e0f30bacSRamkrishna Vepa old_node_id = dev_to_node(&dd->pcidev->dev); 1721e0f30bacSRamkrishna Vepa set_dev_node(&dd->pcidev->dev, rcd->node_id); 1722f931551bSRalph Campbell rcd->rcvegrbuf[e] = 1723f931551bSRalph Campbell dma_alloc_coherent(&dd->pcidev->dev, size, 1724f931551bSRalph Campbell &rcd->rcvegrbuf_phys[e], 1725f931551bSRalph Campbell gfp_flags); 1726e0f30bacSRamkrishna Vepa set_dev_node(&dd->pcidev->dev, old_node_id); 1727f931551bSRalph Campbell if (!rcd->rcvegrbuf[e]) 1728f931551bSRalph Campbell goto bail_rcvegrbuf_phys; 1729f931551bSRalph Campbell } 1730f931551bSRalph Campbell 1731f931551bSRalph Campbell rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0]; 1732f931551bSRalph Campbell 1733f931551bSRalph Campbell for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) { 1734f931551bSRalph Campbell dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; 1735f931551bSRalph Campbell unsigned i; 1736f931551bSRalph Campbell 17375df4223aSRalph Campbell /* clear for security and sanity on each use */ 17385df4223aSRalph Campbell memset(rcd->rcvegrbuf[chunk], 0, size); 17395df4223aSRalph Campbell 1740f931551bSRalph Campbell for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { 1741f931551bSRalph Campbell dd->f_put_tid(dd, e + egroff + 1742f931551bSRalph Campbell (u64 __iomem *) 1743f931551bSRalph Campbell ((char __iomem *) 1744f931551bSRalph Campbell dd->kregbase + 1745f931551bSRalph Campbell dd->rcvegrbase), 1746f931551bSRalph Campbell RCVHQ_RCV_TYPE_EAGER, pa); 1747f931551bSRalph Campbell pa += egrsize; 1748f931551bSRalph Campbell } 1749f931551bSRalph Campbell cond_resched(); /* don't hog the cpu */ 1750f931551bSRalph Campbell } 1751f931551bSRalph Campbell 1752f931551bSRalph Campbell return 0; 1753f931551bSRalph Campbell 1754f931551bSRalph Campbell bail_rcvegrbuf_phys: 1755f931551bSRalph Campbell for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++) 1756f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, size, 1757f931551bSRalph Campbell rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]); 1758f931551bSRalph Campbell kfree(rcd->rcvegrbuf_phys); 1759f931551bSRalph Campbell rcd->rcvegrbuf_phys = NULL; 1760f931551bSRalph Campbell bail_rcvegrbuf: 1761f931551bSRalph Campbell kfree(rcd->rcvegrbuf); 1762f931551bSRalph Campbell rcd->rcvegrbuf = NULL; 1763f931551bSRalph Campbell bail: 1764f931551bSRalph Campbell return -ENOMEM; 1765f931551bSRalph Campbell } 1766f931551bSRalph Campbell 1767fce24a9dSDave Olson /* 1768fce24a9dSDave Olson * Note: Changes to this routine should be mirrored 1769fce24a9dSDave Olson * for the diagnostics routine qib_remap_ioaddr32(). 1770fce24a9dSDave Olson * There is also related code for VL15 buffers in qib_init_7322_variables(). 1771fce24a9dSDave Olson * The teardown code that unmaps is in qib_pcie_ddcleanup() 1772fce24a9dSDave Olson */ 1773f931551bSRalph Campbell int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) 1774f931551bSRalph Campbell { 1775f931551bSRalph Campbell u64 __iomem *qib_kregbase = NULL; 1776f931551bSRalph Campbell void __iomem *qib_piobase = NULL; 1777f931551bSRalph Campbell u64 __iomem *qib_userbase = NULL; 1778f931551bSRalph Campbell u64 qib_kreglen; 1779f931551bSRalph Campbell u64 qib_pio2koffset = dd->piobufbase & 0xffffffff; 1780f931551bSRalph Campbell u64 qib_pio4koffset = dd->piobufbase >> 32; 1781f931551bSRalph Campbell u64 qib_pio2klen = dd->piobcnt2k * dd->palign; 1782f931551bSRalph Campbell u64 qib_pio4klen = dd->piobcnt4k * dd->align4k; 1783f931551bSRalph Campbell u64 qib_physaddr = dd->physaddr; 1784f931551bSRalph Campbell u64 qib_piolen; 1785f931551bSRalph Campbell u64 qib_userlen = 0; 1786f931551bSRalph Campbell 1787f931551bSRalph Campbell /* 1788f931551bSRalph Campbell * Free the old mapping because the kernel will try to reuse the 1789f931551bSRalph Campbell * old mapping and not create a new mapping with the 1790f931551bSRalph Campbell * write combining attribute. 1791f931551bSRalph Campbell */ 1792f931551bSRalph Campbell iounmap(dd->kregbase); 1793f931551bSRalph Campbell dd->kregbase = NULL; 1794f931551bSRalph Campbell 1795f931551bSRalph Campbell /* 1796f931551bSRalph Campbell * Assumes chip address space looks like: 1797f931551bSRalph Campbell * - kregs + sregs + cregs + uregs (in any order) 1798f931551bSRalph Campbell * - piobufs (2K and 4K bufs in either order) 1799f931551bSRalph Campbell * or: 1800f931551bSRalph Campbell * - kregs + sregs + cregs (in any order) 1801f931551bSRalph Campbell * - piobufs (2K and 4K bufs in either order) 1802f931551bSRalph Campbell * - uregs 1803f931551bSRalph Campbell */ 1804f931551bSRalph Campbell if (dd->piobcnt4k == 0) { 1805f931551bSRalph Campbell qib_kreglen = qib_pio2koffset; 1806f931551bSRalph Campbell qib_piolen = qib_pio2klen; 1807f931551bSRalph Campbell } else if (qib_pio2koffset < qib_pio4koffset) { 1808f931551bSRalph Campbell qib_kreglen = qib_pio2koffset; 1809f931551bSRalph Campbell qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen; 1810f931551bSRalph Campbell } else { 1811f931551bSRalph Campbell qib_kreglen = qib_pio4koffset; 1812f931551bSRalph Campbell qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen; 1813f931551bSRalph Campbell } 1814f931551bSRalph Campbell qib_piolen += vl15buflen; 1815f931551bSRalph Campbell /* Map just the configured ports (not all hw ports) */ 1816f931551bSRalph Campbell if (dd->uregbase > qib_kreglen) 1817f931551bSRalph Campbell qib_userlen = dd->ureg_align * dd->cfgctxts; 1818f931551bSRalph Campbell 1819f931551bSRalph Campbell /* Sanity checks passed, now create the new mappings */ 1820f931551bSRalph Campbell qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen); 1821f931551bSRalph Campbell if (!qib_kregbase) 1822f931551bSRalph Campbell goto bail; 1823f931551bSRalph Campbell 1824f931551bSRalph Campbell qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen); 1825f931551bSRalph Campbell if (!qib_piobase) 1826f931551bSRalph Campbell goto bail_kregbase; 1827f931551bSRalph Campbell 1828f931551bSRalph Campbell if (qib_userlen) { 1829f931551bSRalph Campbell qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase, 1830f931551bSRalph Campbell qib_userlen); 1831f931551bSRalph Campbell if (!qib_userbase) 1832f931551bSRalph Campbell goto bail_piobase; 1833f931551bSRalph Campbell } 1834f931551bSRalph Campbell 1835f931551bSRalph Campbell dd->kregbase = qib_kregbase; 1836f931551bSRalph Campbell dd->kregend = (u64 __iomem *) 1837f931551bSRalph Campbell ((char __iomem *) qib_kregbase + qib_kreglen); 1838f931551bSRalph Campbell dd->piobase = qib_piobase; 1839f931551bSRalph Campbell dd->pio2kbase = (void __iomem *) 1840f931551bSRalph Campbell (((char __iomem *) dd->piobase) + 1841f931551bSRalph Campbell qib_pio2koffset - qib_kreglen); 1842f931551bSRalph Campbell if (dd->piobcnt4k) 1843f931551bSRalph Campbell dd->pio4kbase = (void __iomem *) 1844f931551bSRalph Campbell (((char __iomem *) dd->piobase) + 1845f931551bSRalph Campbell qib_pio4koffset - qib_kreglen); 1846f931551bSRalph Campbell if (qib_userlen) 1847f931551bSRalph Campbell /* ureg will now be accessed relative to dd->userbase */ 1848f931551bSRalph Campbell dd->userbase = qib_userbase; 1849f931551bSRalph Campbell return 0; 1850f931551bSRalph Campbell 1851f931551bSRalph Campbell bail_piobase: 1852f931551bSRalph Campbell iounmap(qib_piobase); 1853f931551bSRalph Campbell bail_kregbase: 1854f931551bSRalph Campbell iounmap(qib_kregbase); 1855f931551bSRalph Campbell bail: 1856f931551bSRalph Campbell return -ENOMEM; 1857f931551bSRalph Campbell } 1858