1f48ad614SDennis Dalessandro /* 2f48ad614SDennis Dalessandro * Copyright(c) 2015, 2016 Intel Corporation. 3f48ad614SDennis Dalessandro * 4f48ad614SDennis Dalessandro * This file is provided under a dual BSD/GPLv2 license. When using or 5f48ad614SDennis Dalessandro * redistributing this file, you may do so under either license. 6f48ad614SDennis Dalessandro * 7f48ad614SDennis Dalessandro * GPL LICENSE SUMMARY 8f48ad614SDennis Dalessandro * 9f48ad614SDennis Dalessandro * This program is free software; you can redistribute it and/or modify 10f48ad614SDennis Dalessandro * it under the terms of version 2 of the GNU General Public License as 11f48ad614SDennis Dalessandro * published by the Free Software Foundation. 12f48ad614SDennis Dalessandro * 13f48ad614SDennis Dalessandro * This program is distributed in the hope that it will be useful, but 14f48ad614SDennis Dalessandro * WITHOUT ANY WARRANTY; without even the implied warranty of 15f48ad614SDennis Dalessandro * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16f48ad614SDennis Dalessandro * General Public License for more details. 17f48ad614SDennis Dalessandro * 18f48ad614SDennis Dalessandro * BSD LICENSE 19f48ad614SDennis Dalessandro * 20f48ad614SDennis Dalessandro * Redistribution and use in source and binary forms, with or without 21f48ad614SDennis Dalessandro * modification, are permitted provided that the following conditions 22f48ad614SDennis Dalessandro * are met: 23f48ad614SDennis Dalessandro * 24f48ad614SDennis Dalessandro * - Redistributions of source code must retain the above copyright 25f48ad614SDennis Dalessandro * notice, this list of conditions and the following disclaimer. 26f48ad614SDennis Dalessandro * - Redistributions in binary form must reproduce the above copyright 27f48ad614SDennis Dalessandro * notice, this list of conditions and the following disclaimer in 28f48ad614SDennis Dalessandro * the documentation and/or other materials provided with the 29f48ad614SDennis Dalessandro * distribution. 30f48ad614SDennis Dalessandro * - Neither the name of Intel Corporation nor the names of its 31f48ad614SDennis Dalessandro * contributors may be used to endorse or promote products derived 32f48ad614SDennis Dalessandro * from this software without specific prior written permission. 33f48ad614SDennis Dalessandro * 34f48ad614SDennis Dalessandro * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35f48ad614SDennis Dalessandro * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36f48ad614SDennis Dalessandro * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37f48ad614SDennis Dalessandro * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38f48ad614SDennis Dalessandro * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39f48ad614SDennis Dalessandro * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40f48ad614SDennis Dalessandro * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41f48ad614SDennis Dalessandro * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42f48ad614SDennis Dalessandro * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43f48ad614SDennis Dalessandro * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44f48ad614SDennis Dalessandro * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45f48ad614SDennis Dalessandro * 46f48ad614SDennis Dalessandro */ 47f48ad614SDennis Dalessandro 48f48ad614SDennis Dalessandro #include <linux/pci.h> 49f48ad614SDennis Dalessandro #include <linux/netdevice.h> 50f48ad614SDennis Dalessandro #include <linux/vmalloc.h> 51f48ad614SDennis Dalessandro #include <linux/delay.h> 52f48ad614SDennis Dalessandro #include <linux/idr.h> 53f48ad614SDennis Dalessandro #include <linux/module.h> 54f48ad614SDennis Dalessandro #include <linux/printk.h> 55f48ad614SDennis Dalessandro #include <linux/hrtimer.h> 56f48ad614SDennis Dalessandro #include <rdma/rdma_vt.h> 57f48ad614SDennis Dalessandro 58f48ad614SDennis Dalessandro #include "hfi.h" 59f48ad614SDennis Dalessandro #include "device.h" 60f48ad614SDennis Dalessandro #include "common.h" 61f48ad614SDennis Dalessandro #include "trace.h" 62f48ad614SDennis Dalessandro #include "mad.h" 63f48ad614SDennis Dalessandro #include "sdma.h" 64f48ad614SDennis Dalessandro #include "debugfs.h" 65f48ad614SDennis Dalessandro #include "verbs.h" 66f48ad614SDennis Dalessandro #include "aspm.h" 67f48ad614SDennis Dalessandro 68f48ad614SDennis Dalessandro #undef pr_fmt 69f48ad614SDennis Dalessandro #define pr_fmt(fmt) DRIVER_NAME ": " fmt 70f48ad614SDennis Dalessandro 71f48ad614SDennis Dalessandro /* 72f48ad614SDennis Dalessandro * min buffers we want to have per context, after driver 73f48ad614SDennis Dalessandro */ 74f48ad614SDennis Dalessandro #define HFI1_MIN_USER_CTXT_BUFCNT 7 75f48ad614SDennis Dalessandro 76f48ad614SDennis Dalessandro #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 77f48ad614SDennis Dalessandro #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 78f48ad614SDennis Dalessandro #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ 79f48ad614SDennis Dalessandro #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ 80f48ad614SDennis Dalessandro 81f48ad614SDennis Dalessandro /* 82f48ad614SDennis Dalessandro * Number of user receive contexts we are configured to use (to allow for more 83f48ad614SDennis Dalessandro * pio buffers per ctxt, etc.) Zero means use one user context per CPU. 84f48ad614SDennis Dalessandro */ 85f48ad614SDennis Dalessandro int num_user_contexts = -1; 86f48ad614SDennis Dalessandro module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO); 87f48ad614SDennis Dalessandro MODULE_PARM_DESC( 88f48ad614SDennis Dalessandro num_user_contexts, "Set max number of user contexts to use"); 89f48ad614SDennis Dalessandro 90f48ad614SDennis Dalessandro uint krcvqs[RXE_NUM_DATA_VL]; 91f48ad614SDennis Dalessandro int krcvqsset; 92f48ad614SDennis Dalessandro module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); 93f48ad614SDennis Dalessandro MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); 94f48ad614SDennis Dalessandro 95f48ad614SDennis Dalessandro /* computed based on above array */ 96f48ad614SDennis Dalessandro unsigned n_krcvqs; 97f48ad614SDennis Dalessandro 98f48ad614SDennis Dalessandro static unsigned hfi1_rcvarr_split = 25; 99f48ad614SDennis Dalessandro module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 100f48ad614SDennis Dalessandro MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); 101f48ad614SDennis Dalessandro 102f48ad614SDennis Dalessandro static uint eager_buffer_size = (2 << 20); /* 2MB */ 103f48ad614SDennis Dalessandro module_param(eager_buffer_size, uint, S_IRUGO); 104f48ad614SDennis Dalessandro MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB"); 105f48ad614SDennis Dalessandro 106f48ad614SDennis Dalessandro static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ 107f48ad614SDennis Dalessandro module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); 108f48ad614SDennis Dalessandro MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); 109f48ad614SDennis Dalessandro 110f48ad614SDennis Dalessandro static uint hfi1_hdrq_entsize = 32; 111f48ad614SDennis Dalessandro module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO); 112f48ad614SDennis Dalessandro MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B"); 113f48ad614SDennis Dalessandro 114f48ad614SDennis Dalessandro unsigned int user_credit_return_threshold = 33; /* default is 33% */ 115f48ad614SDennis Dalessandro module_param(user_credit_return_threshold, uint, S_IRUGO); 116f48ad614SDennis Dalessandro MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); 117f48ad614SDennis Dalessandro 118f48ad614SDennis Dalessandro static inline u64 encode_rcv_header_entry_size(u16); 119f48ad614SDennis Dalessandro 120f48ad614SDennis Dalessandro static struct idr hfi1_unit_table; 121f48ad614SDennis Dalessandro u32 hfi1_cpulist_count; 122f48ad614SDennis Dalessandro unsigned long *hfi1_cpulist; 123f48ad614SDennis Dalessandro 124f48ad614SDennis Dalessandro /* 125f48ad614SDennis Dalessandro * Common code for creating the receive context array. 126f48ad614SDennis Dalessandro */ 127f48ad614SDennis Dalessandro int hfi1_create_ctxts(struct hfi1_devdata *dd) 128f48ad614SDennis Dalessandro { 129f48ad614SDennis Dalessandro unsigned i; 130f48ad614SDennis Dalessandro int ret; 131f48ad614SDennis Dalessandro 132f48ad614SDennis Dalessandro /* Control context has to be always 0 */ 133f48ad614SDennis Dalessandro BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); 134f48ad614SDennis Dalessandro 135f48ad614SDennis Dalessandro dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd), 136f48ad614SDennis Dalessandro GFP_KERNEL, dd->node); 137f48ad614SDennis Dalessandro if (!dd->rcd) 138f48ad614SDennis Dalessandro goto nomem; 139f48ad614SDennis Dalessandro 140f48ad614SDennis Dalessandro /* create one or more kernel contexts */ 141f48ad614SDennis Dalessandro for (i = 0; i < dd->first_user_ctxt; ++i) { 142f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 143f48ad614SDennis Dalessandro struct hfi1_ctxtdata *rcd; 144f48ad614SDennis Dalessandro 145f48ad614SDennis Dalessandro ppd = dd->pport + (i % dd->num_pports); 146f48ad614SDennis Dalessandro rcd = hfi1_create_ctxtdata(ppd, i, dd->node); 147f48ad614SDennis Dalessandro if (!rcd) { 148f48ad614SDennis Dalessandro dd_dev_err(dd, 149f48ad614SDennis Dalessandro "Unable to allocate kernel receive context, failing\n"); 150f48ad614SDennis Dalessandro goto nomem; 151f48ad614SDennis Dalessandro } 152f48ad614SDennis Dalessandro /* 153f48ad614SDennis Dalessandro * Set up the kernel context flags here and now because they 154f48ad614SDennis Dalessandro * use default values for all receive side memories. User 155f48ad614SDennis Dalessandro * contexts will be handled as they are created. 156f48ad614SDennis Dalessandro */ 157f48ad614SDennis Dalessandro rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | 158f48ad614SDennis Dalessandro HFI1_CAP_KGET(NODROP_RHQ_FULL) | 159f48ad614SDennis Dalessandro HFI1_CAP_KGET(NODROP_EGR_FULL) | 160f48ad614SDennis Dalessandro HFI1_CAP_KGET(DMA_RTAIL); 161f48ad614SDennis Dalessandro 162f48ad614SDennis Dalessandro /* Control context must use DMA_RTAIL */ 163f48ad614SDennis Dalessandro if (rcd->ctxt == HFI1_CTRL_CTXT) 164f48ad614SDennis Dalessandro rcd->flags |= HFI1_CAP_DMA_RTAIL; 165f48ad614SDennis Dalessandro rcd->seq_cnt = 1; 166f48ad614SDennis Dalessandro 167f48ad614SDennis Dalessandro rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); 168f48ad614SDennis Dalessandro if (!rcd->sc) { 169f48ad614SDennis Dalessandro dd_dev_err(dd, 170f48ad614SDennis Dalessandro "Unable to allocate kernel send context, failing\n"); 171f48ad614SDennis Dalessandro dd->rcd[rcd->ctxt] = NULL; 172f48ad614SDennis Dalessandro hfi1_free_ctxtdata(dd, rcd); 173f48ad614SDennis Dalessandro goto nomem; 174f48ad614SDennis Dalessandro } 175f48ad614SDennis Dalessandro 176f48ad614SDennis Dalessandro ret = hfi1_init_ctxt(rcd->sc); 177f48ad614SDennis Dalessandro if (ret < 0) { 178f48ad614SDennis Dalessandro dd_dev_err(dd, 179f48ad614SDennis Dalessandro "Failed to setup kernel receive context, failing\n"); 180f48ad614SDennis Dalessandro sc_free(rcd->sc); 181f48ad614SDennis Dalessandro dd->rcd[rcd->ctxt] = NULL; 182f48ad614SDennis Dalessandro hfi1_free_ctxtdata(dd, rcd); 183f48ad614SDennis Dalessandro ret = -EFAULT; 184f48ad614SDennis Dalessandro goto bail; 185f48ad614SDennis Dalessandro } 186f48ad614SDennis Dalessandro } 187f48ad614SDennis Dalessandro 188f48ad614SDennis Dalessandro /* 189f48ad614SDennis Dalessandro * Initialize aspm, to be done after gen3 transition and setting up 190f48ad614SDennis Dalessandro * contexts and before enabling interrupts 191f48ad614SDennis Dalessandro */ 192f48ad614SDennis Dalessandro aspm_init(dd); 193f48ad614SDennis Dalessandro 194f48ad614SDennis Dalessandro return 0; 195f48ad614SDennis Dalessandro nomem: 196f48ad614SDennis Dalessandro ret = -ENOMEM; 197f48ad614SDennis Dalessandro bail: 198f48ad614SDennis Dalessandro kfree(dd->rcd); 199f48ad614SDennis Dalessandro dd->rcd = NULL; 200f48ad614SDennis Dalessandro return ret; 201f48ad614SDennis Dalessandro } 202f48ad614SDennis Dalessandro 203f48ad614SDennis Dalessandro /* 204f48ad614SDennis Dalessandro * Common code for user and kernel context setup. 205f48ad614SDennis Dalessandro */ 206f48ad614SDennis Dalessandro struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, 207f48ad614SDennis Dalessandro int numa) 208f48ad614SDennis Dalessandro { 209f48ad614SDennis Dalessandro struct hfi1_devdata *dd = ppd->dd; 210f48ad614SDennis Dalessandro struct hfi1_ctxtdata *rcd; 211f48ad614SDennis Dalessandro unsigned kctxt_ngroups = 0; 212f48ad614SDennis Dalessandro u32 base; 213f48ad614SDennis Dalessandro 214f48ad614SDennis Dalessandro if (dd->rcv_entries.nctxt_extra > 215f48ad614SDennis Dalessandro dd->num_rcv_contexts - dd->first_user_ctxt) 216f48ad614SDennis Dalessandro kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 217f48ad614SDennis Dalessandro (dd->num_rcv_contexts - dd->first_user_ctxt)); 218f48ad614SDennis Dalessandro rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); 219f48ad614SDennis Dalessandro if (rcd) { 220f48ad614SDennis Dalessandro u32 rcvtids, max_entries; 221f48ad614SDennis Dalessandro 222f48ad614SDennis Dalessandro hfi1_cdbg(PROC, "setting up context %u\n", ctxt); 223f48ad614SDennis Dalessandro 224f48ad614SDennis Dalessandro INIT_LIST_HEAD(&rcd->qp_wait_list); 225f48ad614SDennis Dalessandro rcd->ppd = ppd; 226f48ad614SDennis Dalessandro rcd->dd = dd; 227f48ad614SDennis Dalessandro rcd->cnt = 1; 228f48ad614SDennis Dalessandro rcd->ctxt = ctxt; 229f48ad614SDennis Dalessandro dd->rcd[ctxt] = rcd; 230f48ad614SDennis Dalessandro rcd->numa_id = numa; 231f48ad614SDennis Dalessandro rcd->rcv_array_groups = dd->rcv_entries.ngroups; 232f48ad614SDennis Dalessandro 233f48ad614SDennis Dalessandro mutex_init(&rcd->exp_lock); 234f48ad614SDennis Dalessandro 235f48ad614SDennis Dalessandro /* 236f48ad614SDennis Dalessandro * Calculate the context's RcvArray entry starting point. 237f48ad614SDennis Dalessandro * We do this here because we have to take into account all 238f48ad614SDennis Dalessandro * the RcvArray entries that previous context would have 239f48ad614SDennis Dalessandro * taken and we have to account for any extra groups 240f48ad614SDennis Dalessandro * assigned to the kernel or user contexts. 241f48ad614SDennis Dalessandro */ 242f48ad614SDennis Dalessandro if (ctxt < dd->first_user_ctxt) { 243f48ad614SDennis Dalessandro if (ctxt < kctxt_ngroups) { 244f48ad614SDennis Dalessandro base = ctxt * (dd->rcv_entries.ngroups + 1); 245f48ad614SDennis Dalessandro rcd->rcv_array_groups++; 246f48ad614SDennis Dalessandro } else 247f48ad614SDennis Dalessandro base = kctxt_ngroups + 248f48ad614SDennis Dalessandro (ctxt * dd->rcv_entries.ngroups); 249f48ad614SDennis Dalessandro } else { 250f48ad614SDennis Dalessandro u16 ct = ctxt - dd->first_user_ctxt; 251f48ad614SDennis Dalessandro 252f48ad614SDennis Dalessandro base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + 253f48ad614SDennis Dalessandro kctxt_ngroups); 254f48ad614SDennis Dalessandro if (ct < dd->rcv_entries.nctxt_extra) { 255f48ad614SDennis Dalessandro base += ct * (dd->rcv_entries.ngroups + 1); 256f48ad614SDennis Dalessandro rcd->rcv_array_groups++; 257f48ad614SDennis Dalessandro } else 258f48ad614SDennis Dalessandro base += dd->rcv_entries.nctxt_extra + 259f48ad614SDennis Dalessandro (ct * dd->rcv_entries.ngroups); 260f48ad614SDennis Dalessandro } 261f48ad614SDennis Dalessandro rcd->eager_base = base * dd->rcv_entries.group_size; 262f48ad614SDennis Dalessandro 263f48ad614SDennis Dalessandro /* Validate and initialize Rcv Hdr Q variables */ 264f48ad614SDennis Dalessandro if (rcvhdrcnt % HDRQ_INCREMENT) { 265f48ad614SDennis Dalessandro dd_dev_err(dd, 266f48ad614SDennis Dalessandro "ctxt%u: header queue count %d must be divisible by %lu\n", 267f48ad614SDennis Dalessandro rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT); 268f48ad614SDennis Dalessandro goto bail; 269f48ad614SDennis Dalessandro } 270f48ad614SDennis Dalessandro rcd->rcvhdrq_cnt = rcvhdrcnt; 271f48ad614SDennis Dalessandro rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 272f48ad614SDennis Dalessandro /* 273f48ad614SDennis Dalessandro * Simple Eager buffer allocation: we have already pre-allocated 274f48ad614SDennis Dalessandro * the number of RcvArray entry groups. Each ctxtdata structure 275f48ad614SDennis Dalessandro * holds the number of groups for that context. 276f48ad614SDennis Dalessandro * 277f48ad614SDennis Dalessandro * To follow CSR requirements and maintain cacheline alignment, 278f48ad614SDennis Dalessandro * make sure all sizes and bases are multiples of group_size. 279f48ad614SDennis Dalessandro * 280f48ad614SDennis Dalessandro * The expected entry count is what is left after assigning 281f48ad614SDennis Dalessandro * eager. 282f48ad614SDennis Dalessandro */ 283f48ad614SDennis Dalessandro max_entries = rcd->rcv_array_groups * 284f48ad614SDennis Dalessandro dd->rcv_entries.group_size; 285f48ad614SDennis Dalessandro rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); 286f48ad614SDennis Dalessandro rcd->egrbufs.count = round_down(rcvtids, 287f48ad614SDennis Dalessandro dd->rcv_entries.group_size); 288f48ad614SDennis Dalessandro if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { 289f48ad614SDennis Dalessandro dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", 290f48ad614SDennis Dalessandro rcd->ctxt); 291f48ad614SDennis Dalessandro rcd->egrbufs.count = MAX_EAGER_ENTRIES; 292f48ad614SDennis Dalessandro } 293f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 294f48ad614SDennis Dalessandro "ctxt%u: max Eager buffer RcvArray entries: %u\n", 295f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.count); 296f48ad614SDennis Dalessandro 297f48ad614SDennis Dalessandro /* 298f48ad614SDennis Dalessandro * Allocate array that will hold the eager buffer accounting 299f48ad614SDennis Dalessandro * data. 300f48ad614SDennis Dalessandro * This will allocate the maximum possible buffer count based 301f48ad614SDennis Dalessandro * on the value of the RcvArray split parameter. 302f48ad614SDennis Dalessandro * The resulting value will be rounded down to the closest 303f48ad614SDennis Dalessandro * multiple of dd->rcv_entries.group_size. 304f48ad614SDennis Dalessandro */ 305f48ad614SDennis Dalessandro rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count, 306f48ad614SDennis Dalessandro sizeof(*rcd->egrbufs.buffers), 307f48ad614SDennis Dalessandro GFP_KERNEL); 308f48ad614SDennis Dalessandro if (!rcd->egrbufs.buffers) 309f48ad614SDennis Dalessandro goto bail; 310f48ad614SDennis Dalessandro rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count, 311f48ad614SDennis Dalessandro sizeof(*rcd->egrbufs.rcvtids), 312f48ad614SDennis Dalessandro GFP_KERNEL); 313f48ad614SDennis Dalessandro if (!rcd->egrbufs.rcvtids) 314f48ad614SDennis Dalessandro goto bail; 315f48ad614SDennis Dalessandro rcd->egrbufs.size = eager_buffer_size; 316f48ad614SDennis Dalessandro /* 317f48ad614SDennis Dalessandro * The size of the buffers programmed into the RcvArray 318f48ad614SDennis Dalessandro * entries needs to be big enough to handle the highest 319f48ad614SDennis Dalessandro * MTU supported. 320f48ad614SDennis Dalessandro */ 321f48ad614SDennis Dalessandro if (rcd->egrbufs.size < hfi1_max_mtu) { 322f48ad614SDennis Dalessandro rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); 323f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 324f48ad614SDennis Dalessandro "ctxt%u: eager bufs size too small. Adjusting to %zu\n", 325f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.size); 326f48ad614SDennis Dalessandro } 327f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; 328f48ad614SDennis Dalessandro 329f48ad614SDennis Dalessandro if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ 330f48ad614SDennis Dalessandro rcd->opstats = kzalloc(sizeof(*rcd->opstats), 331f48ad614SDennis Dalessandro GFP_KERNEL); 332f48ad614SDennis Dalessandro if (!rcd->opstats) 333f48ad614SDennis Dalessandro goto bail; 334f48ad614SDennis Dalessandro } 335f48ad614SDennis Dalessandro } 336f48ad614SDennis Dalessandro return rcd; 337f48ad614SDennis Dalessandro bail: 338f48ad614SDennis Dalessandro kfree(rcd->egrbufs.rcvtids); 339f48ad614SDennis Dalessandro kfree(rcd->egrbufs.buffers); 340f48ad614SDennis Dalessandro kfree(rcd); 341f48ad614SDennis Dalessandro return NULL; 342f48ad614SDennis Dalessandro } 343f48ad614SDennis Dalessandro 344f48ad614SDennis Dalessandro /* 345f48ad614SDennis Dalessandro * Convert a receive header entry size that to the encoding used in the CSR. 346f48ad614SDennis Dalessandro * 347f48ad614SDennis Dalessandro * Return a zero if the given size is invalid. 348f48ad614SDennis Dalessandro */ 349f48ad614SDennis Dalessandro static inline u64 encode_rcv_header_entry_size(u16 size) 350f48ad614SDennis Dalessandro { 351f48ad614SDennis Dalessandro /* there are only 3 valid receive header entry sizes */ 352f48ad614SDennis Dalessandro if (size == 2) 353f48ad614SDennis Dalessandro return 1; 354f48ad614SDennis Dalessandro if (size == 16) 355f48ad614SDennis Dalessandro return 2; 356f48ad614SDennis Dalessandro else if (size == 32) 357f48ad614SDennis Dalessandro return 4; 358f48ad614SDennis Dalessandro return 0; /* invalid */ 359f48ad614SDennis Dalessandro } 360f48ad614SDennis Dalessandro 361f48ad614SDennis Dalessandro /* 362f48ad614SDennis Dalessandro * Select the largest ccti value over all SLs to determine the intra- 363f48ad614SDennis Dalessandro * packet gap for the link. 364f48ad614SDennis Dalessandro * 365f48ad614SDennis Dalessandro * called with cca_timer_lock held (to protect access to cca_timer 366f48ad614SDennis Dalessandro * array), and rcu_read_lock() (to protect access to cc_state). 367f48ad614SDennis Dalessandro */ 368f48ad614SDennis Dalessandro void set_link_ipg(struct hfi1_pportdata *ppd) 369f48ad614SDennis Dalessandro { 370f48ad614SDennis Dalessandro struct hfi1_devdata *dd = ppd->dd; 371f48ad614SDennis Dalessandro struct cc_state *cc_state; 372f48ad614SDennis Dalessandro int i; 373f48ad614SDennis Dalessandro u16 cce, ccti_limit, max_ccti = 0; 374f48ad614SDennis Dalessandro u16 shift, mult; 375f48ad614SDennis Dalessandro u64 src; 376f48ad614SDennis Dalessandro u32 current_egress_rate; /* Mbits /sec */ 377f48ad614SDennis Dalessandro u32 max_pkt_time; 378f48ad614SDennis Dalessandro /* 379f48ad614SDennis Dalessandro * max_pkt_time is the maximum packet egress time in units 380f48ad614SDennis Dalessandro * of the fabric clock period 1/(805 MHz). 381f48ad614SDennis Dalessandro */ 382f48ad614SDennis Dalessandro 383f48ad614SDennis Dalessandro cc_state = get_cc_state(ppd); 384f48ad614SDennis Dalessandro 385f48ad614SDennis Dalessandro if (!cc_state) 386f48ad614SDennis Dalessandro /* 387f48ad614SDennis Dalessandro * This should _never_ happen - rcu_read_lock() is held, 388f48ad614SDennis Dalessandro * and set_link_ipg() should not be called if cc_state 389f48ad614SDennis Dalessandro * is NULL. 390f48ad614SDennis Dalessandro */ 391f48ad614SDennis Dalessandro return; 392f48ad614SDennis Dalessandro 393f48ad614SDennis Dalessandro for (i = 0; i < OPA_MAX_SLS; i++) { 394f48ad614SDennis Dalessandro u16 ccti = ppd->cca_timer[i].ccti; 395f48ad614SDennis Dalessandro 396f48ad614SDennis Dalessandro if (ccti > max_ccti) 397f48ad614SDennis Dalessandro max_ccti = ccti; 398f48ad614SDennis Dalessandro } 399f48ad614SDennis Dalessandro 400f48ad614SDennis Dalessandro ccti_limit = cc_state->cct.ccti_limit; 401f48ad614SDennis Dalessandro if (max_ccti > ccti_limit) 402f48ad614SDennis Dalessandro max_ccti = ccti_limit; 403f48ad614SDennis Dalessandro 404f48ad614SDennis Dalessandro cce = cc_state->cct.entries[max_ccti].entry; 405f48ad614SDennis Dalessandro shift = (cce & 0xc000) >> 14; 406f48ad614SDennis Dalessandro mult = (cce & 0x3fff); 407f48ad614SDennis Dalessandro 408f48ad614SDennis Dalessandro current_egress_rate = active_egress_rate(ppd); 409f48ad614SDennis Dalessandro 410f48ad614SDennis Dalessandro max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); 411f48ad614SDennis Dalessandro 412f48ad614SDennis Dalessandro src = (max_pkt_time >> shift) * mult; 413f48ad614SDennis Dalessandro 414f48ad614SDennis Dalessandro src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; 415f48ad614SDennis Dalessandro src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; 416f48ad614SDennis Dalessandro 417f48ad614SDennis Dalessandro write_csr(dd, SEND_STATIC_RATE_CONTROL, src); 418f48ad614SDennis Dalessandro } 419f48ad614SDennis Dalessandro 420f48ad614SDennis Dalessandro static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) 421f48ad614SDennis Dalessandro { 422f48ad614SDennis Dalessandro struct cca_timer *cca_timer; 423f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 424f48ad614SDennis Dalessandro int sl; 425f48ad614SDennis Dalessandro u16 ccti_timer, ccti_min; 426f48ad614SDennis Dalessandro struct cc_state *cc_state; 427f48ad614SDennis Dalessandro unsigned long flags; 428f48ad614SDennis Dalessandro enum hrtimer_restart ret = HRTIMER_NORESTART; 429f48ad614SDennis Dalessandro 430f48ad614SDennis Dalessandro cca_timer = container_of(t, struct cca_timer, hrtimer); 431f48ad614SDennis Dalessandro ppd = cca_timer->ppd; 432f48ad614SDennis Dalessandro sl = cca_timer->sl; 433f48ad614SDennis Dalessandro 434f48ad614SDennis Dalessandro rcu_read_lock(); 435f48ad614SDennis Dalessandro 436f48ad614SDennis Dalessandro cc_state = get_cc_state(ppd); 437f48ad614SDennis Dalessandro 438f48ad614SDennis Dalessandro if (!cc_state) { 439f48ad614SDennis Dalessandro rcu_read_unlock(); 440f48ad614SDennis Dalessandro return HRTIMER_NORESTART; 441f48ad614SDennis Dalessandro } 442f48ad614SDennis Dalessandro 443f48ad614SDennis Dalessandro /* 444f48ad614SDennis Dalessandro * 1) decrement ccti for SL 445f48ad614SDennis Dalessandro * 2) calculate IPG for link (set_link_ipg()) 446f48ad614SDennis Dalessandro * 3) restart timer, unless ccti is at min value 447f48ad614SDennis Dalessandro */ 448f48ad614SDennis Dalessandro 449f48ad614SDennis Dalessandro ccti_min = cc_state->cong_setting.entries[sl].ccti_min; 450f48ad614SDennis Dalessandro ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 451f48ad614SDennis Dalessandro 452f48ad614SDennis Dalessandro spin_lock_irqsave(&ppd->cca_timer_lock, flags); 453f48ad614SDennis Dalessandro 454f48ad614SDennis Dalessandro if (cca_timer->ccti > ccti_min) { 455f48ad614SDennis Dalessandro cca_timer->ccti--; 456f48ad614SDennis Dalessandro set_link_ipg(ppd); 457f48ad614SDennis Dalessandro } 458f48ad614SDennis Dalessandro 459f48ad614SDennis Dalessandro if (cca_timer->ccti > ccti_min) { 460f48ad614SDennis Dalessandro unsigned long nsec = 1024 * ccti_timer; 461f48ad614SDennis Dalessandro /* ccti_timer is in units of 1.024 usec */ 462f48ad614SDennis Dalessandro hrtimer_forward_now(t, ns_to_ktime(nsec)); 463f48ad614SDennis Dalessandro ret = HRTIMER_RESTART; 464f48ad614SDennis Dalessandro } 465f48ad614SDennis Dalessandro 466f48ad614SDennis Dalessandro spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 467f48ad614SDennis Dalessandro rcu_read_unlock(); 468f48ad614SDennis Dalessandro return ret; 469f48ad614SDennis Dalessandro } 470f48ad614SDennis Dalessandro 471f48ad614SDennis Dalessandro /* 472f48ad614SDennis Dalessandro * Common code for initializing the physical port structure. 473f48ad614SDennis Dalessandro */ 474f48ad614SDennis Dalessandro void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 475f48ad614SDennis Dalessandro struct hfi1_devdata *dd, u8 hw_pidx, u8 port) 476f48ad614SDennis Dalessandro { 477f48ad614SDennis Dalessandro int i, size; 478f48ad614SDennis Dalessandro uint default_pkey_idx; 479f48ad614SDennis Dalessandro 480f48ad614SDennis Dalessandro ppd->dd = dd; 481f48ad614SDennis Dalessandro ppd->hw_pidx = hw_pidx; 482f48ad614SDennis Dalessandro ppd->port = port; /* IB port number, not index */ 483f48ad614SDennis Dalessandro 484f48ad614SDennis Dalessandro default_pkey_idx = 1; 485f48ad614SDennis Dalessandro 486f48ad614SDennis Dalessandro ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; 487f48ad614SDennis Dalessandro if (loopback) { 488f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, 489f48ad614SDennis Dalessandro "Faking data partition 0x8001 in idx %u\n", 490f48ad614SDennis Dalessandro !default_pkey_idx); 491f48ad614SDennis Dalessandro ppd->pkeys[!default_pkey_idx] = 0x8001; 492f48ad614SDennis Dalessandro } 493f48ad614SDennis Dalessandro 494f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_vc_work, handle_verify_cap); 495f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_up_work, handle_link_up); 496f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_down_work, handle_link_down); 497f48ad614SDennis Dalessandro INIT_WORK(&ppd->freeze_work, handle_freeze); 498f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); 499f48ad614SDennis Dalessandro INIT_WORK(&ppd->sma_message_work, handle_sma_message); 500f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); 501f48ad614SDennis Dalessandro INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); 502f48ad614SDennis Dalessandro INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 503f48ad614SDennis Dalessandro 504f48ad614SDennis Dalessandro mutex_init(&ppd->hls_lock); 505f48ad614SDennis Dalessandro spin_lock_init(&ppd->sdma_alllock); 506f48ad614SDennis Dalessandro spin_lock_init(&ppd->qsfp_info.qsfp_lock); 507f48ad614SDennis Dalessandro 508f48ad614SDennis Dalessandro ppd->qsfp_info.ppd = ppd; 509f48ad614SDennis Dalessandro ppd->sm_trap_qp = 0x0; 510f48ad614SDennis Dalessandro ppd->sa_qp = 0x1; 511f48ad614SDennis Dalessandro 512f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 513f48ad614SDennis Dalessandro 514f48ad614SDennis Dalessandro spin_lock_init(&ppd->cca_timer_lock); 515f48ad614SDennis Dalessandro 516f48ad614SDennis Dalessandro for (i = 0; i < OPA_MAX_SLS; i++) { 517f48ad614SDennis Dalessandro hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, 518f48ad614SDennis Dalessandro HRTIMER_MODE_REL); 519f48ad614SDennis Dalessandro ppd->cca_timer[i].ppd = ppd; 520f48ad614SDennis Dalessandro ppd->cca_timer[i].sl = i; 521f48ad614SDennis Dalessandro ppd->cca_timer[i].ccti = 0; 522f48ad614SDennis Dalessandro ppd->cca_timer[i].hrtimer.function = cca_timer_fn; 523f48ad614SDennis Dalessandro } 524f48ad614SDennis Dalessandro 525f48ad614SDennis Dalessandro ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; 526f48ad614SDennis Dalessandro 527f48ad614SDennis Dalessandro spin_lock_init(&ppd->cc_state_lock); 528f48ad614SDennis Dalessandro spin_lock_init(&ppd->cc_log_lock); 529f48ad614SDennis Dalessandro size = sizeof(struct cc_state); 530f48ad614SDennis Dalessandro RCU_INIT_POINTER(ppd->cc_state, kzalloc(size, GFP_KERNEL)); 531f48ad614SDennis Dalessandro if (!rcu_dereference(ppd->cc_state)) 532f48ad614SDennis Dalessandro goto bail; 533f48ad614SDennis Dalessandro return; 534f48ad614SDennis Dalessandro 535f48ad614SDennis Dalessandro bail: 536f48ad614SDennis Dalessandro 537f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, 538f48ad614SDennis Dalessandro "Congestion Control Agent disabled for port %d\n", port); 539f48ad614SDennis Dalessandro } 540f48ad614SDennis Dalessandro 541f48ad614SDennis Dalessandro /* 542f48ad614SDennis Dalessandro * Do initialization for device that is only needed on 543f48ad614SDennis Dalessandro * first detect, not on resets. 544f48ad614SDennis Dalessandro */ 545f48ad614SDennis Dalessandro static int loadtime_init(struct hfi1_devdata *dd) 546f48ad614SDennis Dalessandro { 547f48ad614SDennis Dalessandro return 0; 548f48ad614SDennis Dalessandro } 549f48ad614SDennis Dalessandro 550f48ad614SDennis Dalessandro /** 551f48ad614SDennis Dalessandro * init_after_reset - re-initialize after a reset 552f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 553f48ad614SDennis Dalessandro * 554f48ad614SDennis Dalessandro * sanity check at least some of the values after reset, and 555f48ad614SDennis Dalessandro * ensure no receive or transmit (explicitly, in case reset 556f48ad614SDennis Dalessandro * failed 557f48ad614SDennis Dalessandro */ 558f48ad614SDennis Dalessandro static int init_after_reset(struct hfi1_devdata *dd) 559f48ad614SDennis Dalessandro { 560f48ad614SDennis Dalessandro int i; 561f48ad614SDennis Dalessandro 562f48ad614SDennis Dalessandro /* 563f48ad614SDennis Dalessandro * Ensure chip does no sends or receives, tail updates, or 564f48ad614SDennis Dalessandro * pioavail updates while we re-initialize. This is mostly 565f48ad614SDennis Dalessandro * for the driver data structures, not chip registers. 566f48ad614SDennis Dalessandro */ 567f48ad614SDennis Dalessandro for (i = 0; i < dd->num_rcv_contexts; i++) 568f48ad614SDennis Dalessandro hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | 569f48ad614SDennis Dalessandro HFI1_RCVCTRL_INTRAVAIL_DIS | 570f48ad614SDennis Dalessandro HFI1_RCVCTRL_TAILUPD_DIS, i); 571f48ad614SDennis Dalessandro pio_send_control(dd, PSC_GLOBAL_DISABLE); 572f48ad614SDennis Dalessandro for (i = 0; i < dd->num_send_contexts; i++) 573f48ad614SDennis Dalessandro sc_disable(dd->send_contexts[i].sc); 574f48ad614SDennis Dalessandro 575f48ad614SDennis Dalessandro return 0; 576f48ad614SDennis Dalessandro } 577f48ad614SDennis Dalessandro 578f48ad614SDennis Dalessandro static void enable_chip(struct hfi1_devdata *dd) 579f48ad614SDennis Dalessandro { 580f48ad614SDennis Dalessandro u32 rcvmask; 581f48ad614SDennis Dalessandro u32 i; 582f48ad614SDennis Dalessandro 583f48ad614SDennis Dalessandro /* enable PIO send */ 584f48ad614SDennis Dalessandro pio_send_control(dd, PSC_GLOBAL_ENABLE); 585f48ad614SDennis Dalessandro 586f48ad614SDennis Dalessandro /* 587f48ad614SDennis Dalessandro * Enable kernel ctxts' receive and receive interrupt. 588f48ad614SDennis Dalessandro * Other ctxts done as user opens and initializes them. 589f48ad614SDennis Dalessandro */ 590f48ad614SDennis Dalessandro for (i = 0; i < dd->first_user_ctxt; ++i) { 591f48ad614SDennis Dalessandro rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; 592f48ad614SDennis Dalessandro rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ? 593f48ad614SDennis Dalessandro HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 594f48ad614SDennis Dalessandro if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR)) 595f48ad614SDennis Dalessandro rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; 596f48ad614SDennis Dalessandro if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL)) 597f48ad614SDennis Dalessandro rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; 598f48ad614SDennis Dalessandro if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL)) 599f48ad614SDennis Dalessandro rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; 600f48ad614SDennis Dalessandro hfi1_rcvctrl(dd, rcvmask, i); 601f48ad614SDennis Dalessandro sc_enable(dd->rcd[i]->sc); 602f48ad614SDennis Dalessandro } 603f48ad614SDennis Dalessandro } 604f48ad614SDennis Dalessandro 605f48ad614SDennis Dalessandro /** 606f48ad614SDennis Dalessandro * create_workqueues - create per port workqueues 607f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 608f48ad614SDennis Dalessandro */ 609f48ad614SDennis Dalessandro static int create_workqueues(struct hfi1_devdata *dd) 610f48ad614SDennis Dalessandro { 611f48ad614SDennis Dalessandro int pidx; 612f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 613f48ad614SDennis Dalessandro 614f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 615f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 616f48ad614SDennis Dalessandro if (!ppd->hfi1_wq) { 617f48ad614SDennis Dalessandro ppd->hfi1_wq = 618f48ad614SDennis Dalessandro alloc_workqueue( 619f48ad614SDennis Dalessandro "hfi%d_%d", 620f48ad614SDennis Dalessandro WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 621f48ad614SDennis Dalessandro dd->num_sdma, 622f48ad614SDennis Dalessandro dd->unit, pidx); 623f48ad614SDennis Dalessandro if (!ppd->hfi1_wq) 624f48ad614SDennis Dalessandro goto wq_error; 625f48ad614SDennis Dalessandro } 626f48ad614SDennis Dalessandro } 627f48ad614SDennis Dalessandro return 0; 628f48ad614SDennis Dalessandro wq_error: 629f48ad614SDennis Dalessandro pr_err("alloc_workqueue failed for port %d\n", pidx + 1); 630f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 631f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 632f48ad614SDennis Dalessandro if (ppd->hfi1_wq) { 633f48ad614SDennis Dalessandro destroy_workqueue(ppd->hfi1_wq); 634f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 635f48ad614SDennis Dalessandro } 636f48ad614SDennis Dalessandro } 637f48ad614SDennis Dalessandro return -ENOMEM; 638f48ad614SDennis Dalessandro } 639f48ad614SDennis Dalessandro 640f48ad614SDennis Dalessandro /** 641f48ad614SDennis Dalessandro * hfi1_init - do the actual initialization sequence on the chip 642f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 643f48ad614SDennis Dalessandro * @reinit: re-initializing, so don't allocate new memory 644f48ad614SDennis Dalessandro * 645f48ad614SDennis Dalessandro * Do the actual initialization sequence on the chip. This is done 646f48ad614SDennis Dalessandro * both from the init routine called from the PCI infrastructure, and 647f48ad614SDennis Dalessandro * when we reset the chip, or detect that it was reset internally, 648f48ad614SDennis Dalessandro * or it's administratively re-enabled. 649f48ad614SDennis Dalessandro * 650f48ad614SDennis Dalessandro * Memory allocation here and in called routines is only done in 651f48ad614SDennis Dalessandro * the first case (reinit == 0). We have to be careful, because even 652f48ad614SDennis Dalessandro * without memory allocation, we need to re-write all the chip registers 653f48ad614SDennis Dalessandro * TIDs, etc. after the reset or enable has completed. 654f48ad614SDennis Dalessandro */ 655f48ad614SDennis Dalessandro int hfi1_init(struct hfi1_devdata *dd, int reinit) 656f48ad614SDennis Dalessandro { 657f48ad614SDennis Dalessandro int ret = 0, pidx, lastfail = 0; 658f48ad614SDennis Dalessandro unsigned i, len; 659f48ad614SDennis Dalessandro struct hfi1_ctxtdata *rcd; 660f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 661f48ad614SDennis Dalessandro 662f48ad614SDennis Dalessandro /* Set up recv low level handlers */ 663f48ad614SDennis Dalessandro dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] = 664f48ad614SDennis Dalessandro kdeth_process_expected; 665f48ad614SDennis Dalessandro dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] = 666f48ad614SDennis Dalessandro kdeth_process_eager; 667f48ad614SDennis Dalessandro dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib; 668f48ad614SDennis Dalessandro dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] = 669f48ad614SDennis Dalessandro process_receive_error; 670f48ad614SDennis Dalessandro dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] = 671f48ad614SDennis Dalessandro process_receive_bypass; 672f48ad614SDennis Dalessandro dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] = 673f48ad614SDennis Dalessandro process_receive_invalid; 674f48ad614SDennis Dalessandro dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] = 675f48ad614SDennis Dalessandro process_receive_invalid; 676f48ad614SDennis Dalessandro dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] = 677f48ad614SDennis Dalessandro process_receive_invalid; 678f48ad614SDennis Dalessandro dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions; 679f48ad614SDennis Dalessandro 680f48ad614SDennis Dalessandro /* Set up send low level handlers */ 681f48ad614SDennis Dalessandro dd->process_pio_send = hfi1_verbs_send_pio; 682f48ad614SDennis Dalessandro dd->process_dma_send = hfi1_verbs_send_dma; 683f48ad614SDennis Dalessandro dd->pio_inline_send = pio_copy; 684f48ad614SDennis Dalessandro 685f48ad614SDennis Dalessandro if (is_ax(dd)) { 686f48ad614SDennis Dalessandro atomic_set(&dd->drop_packet, DROP_PACKET_ON); 687f48ad614SDennis Dalessandro dd->do_drop = 1; 688f48ad614SDennis Dalessandro } else { 689f48ad614SDennis Dalessandro atomic_set(&dd->drop_packet, DROP_PACKET_OFF); 690f48ad614SDennis Dalessandro dd->do_drop = 0; 691f48ad614SDennis Dalessandro } 692f48ad614SDennis Dalessandro 693f48ad614SDennis Dalessandro /* make sure the link is not "up" */ 694f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 695f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 696f48ad614SDennis Dalessandro ppd->linkup = 0; 697f48ad614SDennis Dalessandro } 698f48ad614SDennis Dalessandro 699f48ad614SDennis Dalessandro if (reinit) 700f48ad614SDennis Dalessandro ret = init_after_reset(dd); 701f48ad614SDennis Dalessandro else 702f48ad614SDennis Dalessandro ret = loadtime_init(dd); 703f48ad614SDennis Dalessandro if (ret) 704f48ad614SDennis Dalessandro goto done; 705f48ad614SDennis Dalessandro 706f48ad614SDennis Dalessandro /* allocate dummy tail memory for all receive contexts */ 707f48ad614SDennis Dalessandro dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( 708f48ad614SDennis Dalessandro &dd->pcidev->dev, sizeof(u64), 709f48ad614SDennis Dalessandro &dd->rcvhdrtail_dummy_physaddr, 710f48ad614SDennis Dalessandro GFP_KERNEL); 711f48ad614SDennis Dalessandro 712f48ad614SDennis Dalessandro if (!dd->rcvhdrtail_dummy_kvaddr) { 713f48ad614SDennis Dalessandro dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 714f48ad614SDennis Dalessandro ret = -ENOMEM; 715f48ad614SDennis Dalessandro goto done; 716f48ad614SDennis Dalessandro } 717f48ad614SDennis Dalessandro 718f48ad614SDennis Dalessandro /* dd->rcd can be NULL if early initialization failed */ 719f48ad614SDennis Dalessandro for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { 720f48ad614SDennis Dalessandro /* 721f48ad614SDennis Dalessandro * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 722f48ad614SDennis Dalessandro * re-init, the simplest way to handle this is to free 723f48ad614SDennis Dalessandro * existing, and re-allocate. 724f48ad614SDennis Dalessandro * Need to re-create rest of ctxt 0 ctxtdata as well. 725f48ad614SDennis Dalessandro */ 726f48ad614SDennis Dalessandro rcd = dd->rcd[i]; 727f48ad614SDennis Dalessandro if (!rcd) 728f48ad614SDennis Dalessandro continue; 729f48ad614SDennis Dalessandro 730f48ad614SDennis Dalessandro rcd->do_interrupt = &handle_receive_interrupt; 731f48ad614SDennis Dalessandro 732f48ad614SDennis Dalessandro lastfail = hfi1_create_rcvhdrq(dd, rcd); 733f48ad614SDennis Dalessandro if (!lastfail) 734f48ad614SDennis Dalessandro lastfail = hfi1_setup_eagerbufs(rcd); 735f48ad614SDennis Dalessandro if (lastfail) { 736f48ad614SDennis Dalessandro dd_dev_err(dd, 737f48ad614SDennis Dalessandro "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 738f48ad614SDennis Dalessandro ret = lastfail; 739f48ad614SDennis Dalessandro } 740f48ad614SDennis Dalessandro } 741f48ad614SDennis Dalessandro 742f48ad614SDennis Dalessandro /* Allocate enough memory for user event notification. */ 743f48ad614SDennis Dalessandro len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * 744f48ad614SDennis Dalessandro sizeof(*dd->events)); 745f48ad614SDennis Dalessandro dd->events = vmalloc_user(len); 746f48ad614SDennis Dalessandro if (!dd->events) 747f48ad614SDennis Dalessandro dd_dev_err(dd, "Failed to allocate user events page\n"); 748f48ad614SDennis Dalessandro /* 749f48ad614SDennis Dalessandro * Allocate a page for device and port status. 750f48ad614SDennis Dalessandro * Page will be shared amongst all user processes. 751f48ad614SDennis Dalessandro */ 752f48ad614SDennis Dalessandro dd->status = vmalloc_user(PAGE_SIZE); 753f48ad614SDennis Dalessandro if (!dd->status) 754f48ad614SDennis Dalessandro dd_dev_err(dd, "Failed to allocate dev status page\n"); 755f48ad614SDennis Dalessandro else 756f48ad614SDennis Dalessandro dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) - 757f48ad614SDennis Dalessandro sizeof(dd->status->freezemsg)); 758f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 759f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 760f48ad614SDennis Dalessandro if (dd->status) 761f48ad614SDennis Dalessandro /* Currently, we only have one port */ 762f48ad614SDennis Dalessandro ppd->statusp = &dd->status->port; 763f48ad614SDennis Dalessandro 764f48ad614SDennis Dalessandro set_mtu(ppd); 765f48ad614SDennis Dalessandro } 766f48ad614SDennis Dalessandro 767f48ad614SDennis Dalessandro /* enable chip even if we have an error, so we can debug cause */ 768f48ad614SDennis Dalessandro enable_chip(dd); 769f48ad614SDennis Dalessandro 770f48ad614SDennis Dalessandro done: 771f48ad614SDennis Dalessandro /* 772f48ad614SDennis Dalessandro * Set status even if port serdes is not initialized 773f48ad614SDennis Dalessandro * so that diags will work. 774f48ad614SDennis Dalessandro */ 775f48ad614SDennis Dalessandro if (dd->status) 776f48ad614SDennis Dalessandro dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | 777f48ad614SDennis Dalessandro HFI1_STATUS_INITTED; 778f48ad614SDennis Dalessandro if (!ret) { 779f48ad614SDennis Dalessandro /* enable all interrupts from the chip */ 780f48ad614SDennis Dalessandro set_intr_state(dd, 1); 781f48ad614SDennis Dalessandro 782f48ad614SDennis Dalessandro /* chip is OK for user apps; mark it as initialized */ 783f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 784f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 785f48ad614SDennis Dalessandro 786f48ad614SDennis Dalessandro /* 787f48ad614SDennis Dalessandro * start the serdes - must be after interrupts are 788f48ad614SDennis Dalessandro * enabled so we are notified when the link goes up 789f48ad614SDennis Dalessandro */ 790f48ad614SDennis Dalessandro lastfail = bringup_serdes(ppd); 791f48ad614SDennis Dalessandro if (lastfail) 792f48ad614SDennis Dalessandro dd_dev_info(dd, 793f48ad614SDennis Dalessandro "Failed to bring up port %u\n", 794f48ad614SDennis Dalessandro ppd->port); 795f48ad614SDennis Dalessandro 796f48ad614SDennis Dalessandro /* 797f48ad614SDennis Dalessandro * Set status even if port serdes is not initialized 798f48ad614SDennis Dalessandro * so that diags will work. 799f48ad614SDennis Dalessandro */ 800f48ad614SDennis Dalessandro if (ppd->statusp) 801f48ad614SDennis Dalessandro *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | 802f48ad614SDennis Dalessandro HFI1_STATUS_INITTED; 803f48ad614SDennis Dalessandro if (!ppd->link_speed_enabled) 804f48ad614SDennis Dalessandro continue; 805f48ad614SDennis Dalessandro } 806f48ad614SDennis Dalessandro } 807f48ad614SDennis Dalessandro 808f48ad614SDennis Dalessandro /* if ret is non-zero, we probably should do some cleanup here... */ 809f48ad614SDennis Dalessandro return ret; 810f48ad614SDennis Dalessandro } 811f48ad614SDennis Dalessandro 812f48ad614SDennis Dalessandro static inline struct hfi1_devdata *__hfi1_lookup(int unit) 813f48ad614SDennis Dalessandro { 814f48ad614SDennis Dalessandro return idr_find(&hfi1_unit_table, unit); 815f48ad614SDennis Dalessandro } 816f48ad614SDennis Dalessandro 817f48ad614SDennis Dalessandro struct hfi1_devdata *hfi1_lookup(int unit) 818f48ad614SDennis Dalessandro { 819f48ad614SDennis Dalessandro struct hfi1_devdata *dd; 820f48ad614SDennis Dalessandro unsigned long flags; 821f48ad614SDennis Dalessandro 822f48ad614SDennis Dalessandro spin_lock_irqsave(&hfi1_devs_lock, flags); 823f48ad614SDennis Dalessandro dd = __hfi1_lookup(unit); 824f48ad614SDennis Dalessandro spin_unlock_irqrestore(&hfi1_devs_lock, flags); 825f48ad614SDennis Dalessandro 826f48ad614SDennis Dalessandro return dd; 827f48ad614SDennis Dalessandro } 828f48ad614SDennis Dalessandro 829f48ad614SDennis Dalessandro /* 830f48ad614SDennis Dalessandro * Stop the timers during unit shutdown, or after an error late 831f48ad614SDennis Dalessandro * in initialization. 832f48ad614SDennis Dalessandro */ 833f48ad614SDennis Dalessandro static void stop_timers(struct hfi1_devdata *dd) 834f48ad614SDennis Dalessandro { 835f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 836f48ad614SDennis Dalessandro int pidx; 837f48ad614SDennis Dalessandro 838f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 839f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 840f48ad614SDennis Dalessandro if (ppd->led_override_timer.data) { 841f48ad614SDennis Dalessandro del_timer_sync(&ppd->led_override_timer); 842f48ad614SDennis Dalessandro atomic_set(&ppd->led_override_timer_active, 0); 843f48ad614SDennis Dalessandro } 844f48ad614SDennis Dalessandro } 845f48ad614SDennis Dalessandro } 846f48ad614SDennis Dalessandro 847f48ad614SDennis Dalessandro /** 848f48ad614SDennis Dalessandro * shutdown_device - shut down a device 849f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 850f48ad614SDennis Dalessandro * 851f48ad614SDennis Dalessandro * This is called to make the device quiet when we are about to 852f48ad614SDennis Dalessandro * unload the driver, and also when the device is administratively 853f48ad614SDennis Dalessandro * disabled. It does not free any data structures. 854f48ad614SDennis Dalessandro * Everything it does has to be setup again by hfi1_init(dd, 1) 855f48ad614SDennis Dalessandro */ 856f48ad614SDennis Dalessandro static void shutdown_device(struct hfi1_devdata *dd) 857f48ad614SDennis Dalessandro { 858f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 859f48ad614SDennis Dalessandro unsigned pidx; 860f48ad614SDennis Dalessandro int i; 861f48ad614SDennis Dalessandro 862f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 863f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 864f48ad614SDennis Dalessandro 865f48ad614SDennis Dalessandro ppd->linkup = 0; 866f48ad614SDennis Dalessandro if (ppd->statusp) 867f48ad614SDennis Dalessandro *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 868f48ad614SDennis Dalessandro HFI1_STATUS_IB_READY); 869f48ad614SDennis Dalessandro } 870f48ad614SDennis Dalessandro dd->flags &= ~HFI1_INITTED; 871f48ad614SDennis Dalessandro 872f48ad614SDennis Dalessandro /* mask interrupts, but not errors */ 873f48ad614SDennis Dalessandro set_intr_state(dd, 0); 874f48ad614SDennis Dalessandro 875f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 876f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 877f48ad614SDennis Dalessandro for (i = 0; i < dd->num_rcv_contexts; i++) 878f48ad614SDennis Dalessandro hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | 879f48ad614SDennis Dalessandro HFI1_RCVCTRL_CTXT_DIS | 880f48ad614SDennis Dalessandro HFI1_RCVCTRL_INTRAVAIL_DIS | 881f48ad614SDennis Dalessandro HFI1_RCVCTRL_PKEY_DIS | 882f48ad614SDennis Dalessandro HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i); 883f48ad614SDennis Dalessandro /* 884f48ad614SDennis Dalessandro * Gracefully stop all sends allowing any in progress to 885f48ad614SDennis Dalessandro * trickle out first. 886f48ad614SDennis Dalessandro */ 887f48ad614SDennis Dalessandro for (i = 0; i < dd->num_send_contexts; i++) 888f48ad614SDennis Dalessandro sc_flush(dd->send_contexts[i].sc); 889f48ad614SDennis Dalessandro } 890f48ad614SDennis Dalessandro 891f48ad614SDennis Dalessandro /* 892f48ad614SDennis Dalessandro * Enough for anything that's going to trickle out to have actually 893f48ad614SDennis Dalessandro * done so. 894f48ad614SDennis Dalessandro */ 895f48ad614SDennis Dalessandro udelay(20); 896f48ad614SDennis Dalessandro 897f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 898f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 899f48ad614SDennis Dalessandro 900f48ad614SDennis Dalessandro /* disable all contexts */ 901f48ad614SDennis Dalessandro for (i = 0; i < dd->num_send_contexts; i++) 902f48ad614SDennis Dalessandro sc_disable(dd->send_contexts[i].sc); 903f48ad614SDennis Dalessandro /* disable the send device */ 904f48ad614SDennis Dalessandro pio_send_control(dd, PSC_GLOBAL_DISABLE); 905f48ad614SDennis Dalessandro 906f48ad614SDennis Dalessandro shutdown_led_override(ppd); 907f48ad614SDennis Dalessandro 908f48ad614SDennis Dalessandro /* 909f48ad614SDennis Dalessandro * Clear SerdesEnable. 910f48ad614SDennis Dalessandro * We can't count on interrupts since we are stopping. 911f48ad614SDennis Dalessandro */ 912f48ad614SDennis Dalessandro hfi1_quiet_serdes(ppd); 913f48ad614SDennis Dalessandro 914f48ad614SDennis Dalessandro if (ppd->hfi1_wq) { 915f48ad614SDennis Dalessandro destroy_workqueue(ppd->hfi1_wq); 916f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 917f48ad614SDennis Dalessandro } 918f48ad614SDennis Dalessandro } 919f48ad614SDennis Dalessandro sdma_exit(dd); 920f48ad614SDennis Dalessandro } 921f48ad614SDennis Dalessandro 922f48ad614SDennis Dalessandro /** 923f48ad614SDennis Dalessandro * hfi1_free_ctxtdata - free a context's allocated data 924f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 925f48ad614SDennis Dalessandro * @rcd: the ctxtdata structure 926f48ad614SDennis Dalessandro * 927f48ad614SDennis Dalessandro * free up any allocated data for a context 928f48ad614SDennis Dalessandro * This should not touch anything that would affect a simultaneous 929f48ad614SDennis Dalessandro * re-allocation of context data, because it is called after hfi1_mutex 930f48ad614SDennis Dalessandro * is released (and can be called from reinit as well). 931f48ad614SDennis Dalessandro * It should never change any chip state, or global driver state. 932f48ad614SDennis Dalessandro */ 933f48ad614SDennis Dalessandro void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 934f48ad614SDennis Dalessandro { 935f48ad614SDennis Dalessandro unsigned e; 936f48ad614SDennis Dalessandro 937f48ad614SDennis Dalessandro if (!rcd) 938f48ad614SDennis Dalessandro return; 939f48ad614SDennis Dalessandro 940f48ad614SDennis Dalessandro if (rcd->rcvhdrq) { 941f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, 942f48ad614SDennis Dalessandro rcd->rcvhdrq, rcd->rcvhdrq_phys); 943f48ad614SDennis Dalessandro rcd->rcvhdrq = NULL; 944f48ad614SDennis Dalessandro if (rcd->rcvhdrtail_kvaddr) { 945f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 946f48ad614SDennis Dalessandro (void *)rcd->rcvhdrtail_kvaddr, 947f48ad614SDennis Dalessandro rcd->rcvhdrqtailaddr_phys); 948f48ad614SDennis Dalessandro rcd->rcvhdrtail_kvaddr = NULL; 949f48ad614SDennis Dalessandro } 950f48ad614SDennis Dalessandro } 951f48ad614SDennis Dalessandro 952f48ad614SDennis Dalessandro /* all the RcvArray entries should have been cleared by now */ 953f48ad614SDennis Dalessandro kfree(rcd->egrbufs.rcvtids); 954f48ad614SDennis Dalessandro 955f48ad614SDennis Dalessandro for (e = 0; e < rcd->egrbufs.alloced; e++) { 956f48ad614SDennis Dalessandro if (rcd->egrbufs.buffers[e].phys) 957f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, 958f48ad614SDennis Dalessandro rcd->egrbufs.buffers[e].len, 959f48ad614SDennis Dalessandro rcd->egrbufs.buffers[e].addr, 960f48ad614SDennis Dalessandro rcd->egrbufs.buffers[e].phys); 961f48ad614SDennis Dalessandro } 962f48ad614SDennis Dalessandro kfree(rcd->egrbufs.buffers); 963f48ad614SDennis Dalessandro 964f48ad614SDennis Dalessandro sc_free(rcd->sc); 965f48ad614SDennis Dalessandro vfree(rcd->user_event_mask); 966f48ad614SDennis Dalessandro vfree(rcd->subctxt_uregbase); 967f48ad614SDennis Dalessandro vfree(rcd->subctxt_rcvegrbuf); 968f48ad614SDennis Dalessandro vfree(rcd->subctxt_rcvhdr_base); 969f48ad614SDennis Dalessandro kfree(rcd->opstats); 970f48ad614SDennis Dalessandro kfree(rcd); 971f48ad614SDennis Dalessandro } 972f48ad614SDennis Dalessandro 973f48ad614SDennis Dalessandro /* 974f48ad614SDennis Dalessandro * Release our hold on the shared asic data. If we are the last one, 975f48ad614SDennis Dalessandro * free the structure. Must be holding hfi1_devs_lock. 976f48ad614SDennis Dalessandro */ 977f48ad614SDennis Dalessandro static void release_asic_data(struct hfi1_devdata *dd) 978f48ad614SDennis Dalessandro { 979f48ad614SDennis Dalessandro int other; 980f48ad614SDennis Dalessandro 981f48ad614SDennis Dalessandro if (!dd->asic_data) 982f48ad614SDennis Dalessandro return; 983f48ad614SDennis Dalessandro dd->asic_data->dds[dd->hfi1_id] = NULL; 984f48ad614SDennis Dalessandro other = dd->hfi1_id ? 0 : 1; 985f48ad614SDennis Dalessandro if (!dd->asic_data->dds[other]) { 986f48ad614SDennis Dalessandro /* we are the last holder, free it */ 987f48ad614SDennis Dalessandro kfree(dd->asic_data); 988f48ad614SDennis Dalessandro } 989f48ad614SDennis Dalessandro dd->asic_data = NULL; 990f48ad614SDennis Dalessandro } 991f48ad614SDennis Dalessandro 992f48ad614SDennis Dalessandro static void __hfi1_free_devdata(struct kobject *kobj) 993f48ad614SDennis Dalessandro { 994f48ad614SDennis Dalessandro struct hfi1_devdata *dd = 995f48ad614SDennis Dalessandro container_of(kobj, struct hfi1_devdata, kobj); 996f48ad614SDennis Dalessandro unsigned long flags; 997f48ad614SDennis Dalessandro 998f48ad614SDennis Dalessandro spin_lock_irqsave(&hfi1_devs_lock, flags); 999f48ad614SDennis Dalessandro idr_remove(&hfi1_unit_table, dd->unit); 1000f48ad614SDennis Dalessandro list_del(&dd->list); 1001f48ad614SDennis Dalessandro release_asic_data(dd); 1002f48ad614SDennis Dalessandro spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1003f48ad614SDennis Dalessandro free_platform_config(dd); 1004f48ad614SDennis Dalessandro rcu_barrier(); /* wait for rcu callbacks to complete */ 1005f48ad614SDennis Dalessandro free_percpu(dd->int_counter); 1006f48ad614SDennis Dalessandro free_percpu(dd->rcv_limit); 1007f48ad614SDennis Dalessandro hfi1_dev_affinity_free(dd); 1008f48ad614SDennis Dalessandro free_percpu(dd->send_schedule); 1009f48ad614SDennis Dalessandro rvt_dealloc_device(&dd->verbs_dev.rdi); 1010f48ad614SDennis Dalessandro } 1011f48ad614SDennis Dalessandro 1012f48ad614SDennis Dalessandro static struct kobj_type hfi1_devdata_type = { 1013f48ad614SDennis Dalessandro .release = __hfi1_free_devdata, 1014f48ad614SDennis Dalessandro }; 1015f48ad614SDennis Dalessandro 1016f48ad614SDennis Dalessandro void hfi1_free_devdata(struct hfi1_devdata *dd) 1017f48ad614SDennis Dalessandro { 1018f48ad614SDennis Dalessandro kobject_put(&dd->kobj); 1019f48ad614SDennis Dalessandro } 1020f48ad614SDennis Dalessandro 1021f48ad614SDennis Dalessandro /* 1022f48ad614SDennis Dalessandro * Allocate our primary per-unit data structure. Must be done via verbs 1023f48ad614SDennis Dalessandro * allocator, because the verbs cleanup process both does cleanup and 1024f48ad614SDennis Dalessandro * free of the data structure. 1025f48ad614SDennis Dalessandro * "extra" is for chip-specific data. 1026f48ad614SDennis Dalessandro * 1027f48ad614SDennis Dalessandro * Use the idr mechanism to get a unit number for this unit. 1028f48ad614SDennis Dalessandro */ 1029f48ad614SDennis Dalessandro struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) 1030f48ad614SDennis Dalessandro { 1031f48ad614SDennis Dalessandro unsigned long flags; 1032f48ad614SDennis Dalessandro struct hfi1_devdata *dd; 1033f48ad614SDennis Dalessandro int ret, nports; 1034f48ad614SDennis Dalessandro 1035f48ad614SDennis Dalessandro /* extra is * number of ports */ 1036f48ad614SDennis Dalessandro nports = extra / sizeof(struct hfi1_pportdata); 1037f48ad614SDennis Dalessandro 1038f48ad614SDennis Dalessandro dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, 1039f48ad614SDennis Dalessandro nports); 1040f48ad614SDennis Dalessandro if (!dd) 1041f48ad614SDennis Dalessandro return ERR_PTR(-ENOMEM); 1042f48ad614SDennis Dalessandro dd->num_pports = nports; 1043f48ad614SDennis Dalessandro dd->pport = (struct hfi1_pportdata *)(dd + 1); 1044f48ad614SDennis Dalessandro 1045f48ad614SDennis Dalessandro INIT_LIST_HEAD(&dd->list); 1046f48ad614SDennis Dalessandro idr_preload(GFP_KERNEL); 1047f48ad614SDennis Dalessandro spin_lock_irqsave(&hfi1_devs_lock, flags); 1048f48ad614SDennis Dalessandro 1049f48ad614SDennis Dalessandro ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); 1050f48ad614SDennis Dalessandro if (ret >= 0) { 1051f48ad614SDennis Dalessandro dd->unit = ret; 1052f48ad614SDennis Dalessandro list_add(&dd->list, &hfi1_dev_list); 1053f48ad614SDennis Dalessandro } 1054f48ad614SDennis Dalessandro 1055f48ad614SDennis Dalessandro spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1056f48ad614SDennis Dalessandro idr_preload_end(); 1057f48ad614SDennis Dalessandro 1058f48ad614SDennis Dalessandro if (ret < 0) { 1059f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, 1060f48ad614SDennis Dalessandro "Could not allocate unit ID: error %d\n", -ret); 1061f48ad614SDennis Dalessandro goto bail; 1062f48ad614SDennis Dalessandro } 1063f48ad614SDennis Dalessandro /* 1064f48ad614SDennis Dalessandro * Initialize all locks for the device. This needs to be as early as 1065f48ad614SDennis Dalessandro * possible so locks are usable. 1066f48ad614SDennis Dalessandro */ 1067f48ad614SDennis Dalessandro spin_lock_init(&dd->sc_lock); 1068f48ad614SDennis Dalessandro spin_lock_init(&dd->sendctrl_lock); 1069f48ad614SDennis Dalessandro spin_lock_init(&dd->rcvctrl_lock); 1070f48ad614SDennis Dalessandro spin_lock_init(&dd->uctxt_lock); 1071f48ad614SDennis Dalessandro spin_lock_init(&dd->hfi1_diag_trans_lock); 1072f48ad614SDennis Dalessandro spin_lock_init(&dd->sc_init_lock); 1073f48ad614SDennis Dalessandro spin_lock_init(&dd->dc8051_lock); 1074f48ad614SDennis Dalessandro spin_lock_init(&dd->dc8051_memlock); 1075f48ad614SDennis Dalessandro seqlock_init(&dd->sc2vl_lock); 1076f48ad614SDennis Dalessandro spin_lock_init(&dd->sde_map_lock); 1077f48ad614SDennis Dalessandro spin_lock_init(&dd->pio_map_lock); 1078f48ad614SDennis Dalessandro init_waitqueue_head(&dd->event_queue); 1079f48ad614SDennis Dalessandro 1080f48ad614SDennis Dalessandro dd->int_counter = alloc_percpu(u64); 1081f48ad614SDennis Dalessandro if (!dd->int_counter) { 1082f48ad614SDennis Dalessandro ret = -ENOMEM; 1083f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, 1084f48ad614SDennis Dalessandro "Could not allocate per-cpu int_counter\n"); 1085f48ad614SDennis Dalessandro goto bail; 1086f48ad614SDennis Dalessandro } 1087f48ad614SDennis Dalessandro 1088f48ad614SDennis Dalessandro dd->rcv_limit = alloc_percpu(u64); 1089f48ad614SDennis Dalessandro if (!dd->rcv_limit) { 1090f48ad614SDennis Dalessandro ret = -ENOMEM; 1091f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, 1092f48ad614SDennis Dalessandro "Could not allocate per-cpu rcv_limit\n"); 1093f48ad614SDennis Dalessandro goto bail; 1094f48ad614SDennis Dalessandro } 1095f48ad614SDennis Dalessandro 1096f48ad614SDennis Dalessandro dd->send_schedule = alloc_percpu(u64); 1097f48ad614SDennis Dalessandro if (!dd->send_schedule) { 1098f48ad614SDennis Dalessandro ret = -ENOMEM; 1099f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, 1100f48ad614SDennis Dalessandro "Could not allocate per-cpu int_counter\n"); 1101f48ad614SDennis Dalessandro goto bail; 1102f48ad614SDennis Dalessandro } 1103f48ad614SDennis Dalessandro 1104f48ad614SDennis Dalessandro if (!hfi1_cpulist_count) { 1105f48ad614SDennis Dalessandro u32 count = num_online_cpus(); 1106f48ad614SDennis Dalessandro 1107f48ad614SDennis Dalessandro hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long), 1108f48ad614SDennis Dalessandro GFP_KERNEL); 1109f48ad614SDennis Dalessandro if (hfi1_cpulist) 1110f48ad614SDennis Dalessandro hfi1_cpulist_count = count; 1111f48ad614SDennis Dalessandro else 1112f48ad614SDennis Dalessandro hfi1_early_err( 1113f48ad614SDennis Dalessandro &pdev->dev, 1114f48ad614SDennis Dalessandro "Could not alloc cpulist info, cpu affinity might be wrong\n"); 1115f48ad614SDennis Dalessandro } 1116f48ad614SDennis Dalessandro kobject_init(&dd->kobj, &hfi1_devdata_type); 1117f48ad614SDennis Dalessandro return dd; 1118f48ad614SDennis Dalessandro 1119f48ad614SDennis Dalessandro bail: 1120f48ad614SDennis Dalessandro if (!list_empty(&dd->list)) 1121f48ad614SDennis Dalessandro list_del_init(&dd->list); 1122f48ad614SDennis Dalessandro rvt_dealloc_device(&dd->verbs_dev.rdi); 1123f48ad614SDennis Dalessandro return ERR_PTR(ret); 1124f48ad614SDennis Dalessandro } 1125f48ad614SDennis Dalessandro 1126f48ad614SDennis Dalessandro /* 1127f48ad614SDennis Dalessandro * Called from freeze mode handlers, and from PCI error 1128f48ad614SDennis Dalessandro * reporting code. Should be paranoid about state of 1129f48ad614SDennis Dalessandro * system and data structures. 1130f48ad614SDennis Dalessandro */ 1131f48ad614SDennis Dalessandro void hfi1_disable_after_error(struct hfi1_devdata *dd) 1132f48ad614SDennis Dalessandro { 1133f48ad614SDennis Dalessandro if (dd->flags & HFI1_INITTED) { 1134f48ad614SDennis Dalessandro u32 pidx; 1135f48ad614SDennis Dalessandro 1136f48ad614SDennis Dalessandro dd->flags &= ~HFI1_INITTED; 1137f48ad614SDennis Dalessandro if (dd->pport) 1138f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1139f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 1140f48ad614SDennis Dalessandro 1141f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1142f48ad614SDennis Dalessandro if (dd->flags & HFI1_PRESENT) 1143f48ad614SDennis Dalessandro set_link_state(ppd, HLS_DN_DISABLE); 1144f48ad614SDennis Dalessandro 1145f48ad614SDennis Dalessandro if (ppd->statusp) 1146f48ad614SDennis Dalessandro *ppd->statusp &= ~HFI1_STATUS_IB_READY; 1147f48ad614SDennis Dalessandro } 1148f48ad614SDennis Dalessandro } 1149f48ad614SDennis Dalessandro 1150f48ad614SDennis Dalessandro /* 1151f48ad614SDennis Dalessandro * Mark as having had an error for driver, and also 1152f48ad614SDennis Dalessandro * for /sys and status word mapped to user programs. 1153f48ad614SDennis Dalessandro * This marks unit as not usable, until reset. 1154f48ad614SDennis Dalessandro */ 1155f48ad614SDennis Dalessandro if (dd->status) 1156f48ad614SDennis Dalessandro dd->status->dev |= HFI1_STATUS_HWERROR; 1157f48ad614SDennis Dalessandro } 1158f48ad614SDennis Dalessandro 1159f48ad614SDennis Dalessandro static void remove_one(struct pci_dev *); 1160f48ad614SDennis Dalessandro static int init_one(struct pci_dev *, const struct pci_device_id *); 1161f48ad614SDennis Dalessandro 1162f48ad614SDennis Dalessandro #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " 1163f48ad614SDennis Dalessandro #define PFX DRIVER_NAME ": " 1164f48ad614SDennis Dalessandro 1165f48ad614SDennis Dalessandro static const struct pci_device_id hfi1_pci_tbl[] = { 1166f48ad614SDennis Dalessandro { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, 1167f48ad614SDennis Dalessandro { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, 1168f48ad614SDennis Dalessandro { 0, } 1169f48ad614SDennis Dalessandro }; 1170f48ad614SDennis Dalessandro 1171f48ad614SDennis Dalessandro MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); 1172f48ad614SDennis Dalessandro 1173f48ad614SDennis Dalessandro static struct pci_driver hfi1_pci_driver = { 1174f48ad614SDennis Dalessandro .name = DRIVER_NAME, 1175f48ad614SDennis Dalessandro .probe = init_one, 1176f48ad614SDennis Dalessandro .remove = remove_one, 1177f48ad614SDennis Dalessandro .id_table = hfi1_pci_tbl, 1178f48ad614SDennis Dalessandro .err_handler = &hfi1_pci_err_handler, 1179f48ad614SDennis Dalessandro }; 1180f48ad614SDennis Dalessandro 1181f48ad614SDennis Dalessandro static void __init compute_krcvqs(void) 1182f48ad614SDennis Dalessandro { 1183f48ad614SDennis Dalessandro int i; 1184f48ad614SDennis Dalessandro 1185f48ad614SDennis Dalessandro for (i = 0; i < krcvqsset; i++) 1186f48ad614SDennis Dalessandro n_krcvqs += krcvqs[i]; 1187f48ad614SDennis Dalessandro } 1188f48ad614SDennis Dalessandro 1189f48ad614SDennis Dalessandro /* 1190f48ad614SDennis Dalessandro * Do all the generic driver unit- and chip-independent memory 1191f48ad614SDennis Dalessandro * allocation and initialization. 1192f48ad614SDennis Dalessandro */ 1193f48ad614SDennis Dalessandro static int __init hfi1_mod_init(void) 1194f48ad614SDennis Dalessandro { 1195f48ad614SDennis Dalessandro int ret; 1196f48ad614SDennis Dalessandro 1197f48ad614SDennis Dalessandro ret = dev_init(); 1198f48ad614SDennis Dalessandro if (ret) 1199f48ad614SDennis Dalessandro goto bail; 1200f48ad614SDennis Dalessandro 1201f48ad614SDennis Dalessandro /* validate max MTU before any devices start */ 1202f48ad614SDennis Dalessandro if (!valid_opa_max_mtu(hfi1_max_mtu)) { 1203f48ad614SDennis Dalessandro pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", 1204f48ad614SDennis Dalessandro hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); 1205f48ad614SDennis Dalessandro hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 1206f48ad614SDennis Dalessandro } 1207f48ad614SDennis Dalessandro /* valid CUs run from 1-128 in powers of 2 */ 1208f48ad614SDennis Dalessandro if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) 1209f48ad614SDennis Dalessandro hfi1_cu = 1; 1210f48ad614SDennis Dalessandro /* valid credit return threshold is 0-100, variable is unsigned */ 1211f48ad614SDennis Dalessandro if (user_credit_return_threshold > 100) 1212f48ad614SDennis Dalessandro user_credit_return_threshold = 100; 1213f48ad614SDennis Dalessandro 1214f48ad614SDennis Dalessandro compute_krcvqs(); 1215f48ad614SDennis Dalessandro /* 1216f48ad614SDennis Dalessandro * sanitize receive interrupt count, time must wait until after 1217f48ad614SDennis Dalessandro * the hardware type is known 1218f48ad614SDennis Dalessandro */ 1219f48ad614SDennis Dalessandro if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) 1220f48ad614SDennis Dalessandro rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; 1221f48ad614SDennis Dalessandro /* reject invalid combinations */ 1222f48ad614SDennis Dalessandro if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { 1223f48ad614SDennis Dalessandro pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); 1224f48ad614SDennis Dalessandro rcv_intr_count = 1; 1225f48ad614SDennis Dalessandro } 1226f48ad614SDennis Dalessandro if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { 1227f48ad614SDennis Dalessandro /* 1228f48ad614SDennis Dalessandro * Avoid indefinite packet delivery by requiring a timeout 1229f48ad614SDennis Dalessandro * if count is > 1. 1230f48ad614SDennis Dalessandro */ 1231f48ad614SDennis Dalessandro pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); 1232f48ad614SDennis Dalessandro rcv_intr_timeout = 1; 1233f48ad614SDennis Dalessandro } 1234f48ad614SDennis Dalessandro if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { 1235f48ad614SDennis Dalessandro /* 1236f48ad614SDennis Dalessandro * The dynamic algorithm expects a non-zero timeout 1237f48ad614SDennis Dalessandro * and a count > 1. 1238f48ad614SDennis Dalessandro */ 1239f48ad614SDennis Dalessandro pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); 1240f48ad614SDennis Dalessandro rcv_intr_dynamic = 0; 1241f48ad614SDennis Dalessandro } 1242f48ad614SDennis Dalessandro 1243f48ad614SDennis Dalessandro /* sanitize link CRC options */ 1244f48ad614SDennis Dalessandro link_crc_mask &= SUPPORTED_CRCS; 1245f48ad614SDennis Dalessandro 1246f48ad614SDennis Dalessandro /* 1247f48ad614SDennis Dalessandro * These must be called before the driver is registered with 1248f48ad614SDennis Dalessandro * the PCI subsystem. 1249f48ad614SDennis Dalessandro */ 1250f48ad614SDennis Dalessandro idr_init(&hfi1_unit_table); 1251f48ad614SDennis Dalessandro 1252f48ad614SDennis Dalessandro hfi1_dbg_init(); 1253f48ad614SDennis Dalessandro ret = hfi1_wss_init(); 1254f48ad614SDennis Dalessandro if (ret < 0) 1255f48ad614SDennis Dalessandro goto bail_wss; 1256f48ad614SDennis Dalessandro ret = pci_register_driver(&hfi1_pci_driver); 1257f48ad614SDennis Dalessandro if (ret < 0) { 1258f48ad614SDennis Dalessandro pr_err("Unable to register driver: error %d\n", -ret); 1259f48ad614SDennis Dalessandro goto bail_dev; 1260f48ad614SDennis Dalessandro } 1261f48ad614SDennis Dalessandro goto bail; /* all OK */ 1262f48ad614SDennis Dalessandro 1263f48ad614SDennis Dalessandro bail_dev: 1264f48ad614SDennis Dalessandro hfi1_wss_exit(); 1265f48ad614SDennis Dalessandro bail_wss: 1266f48ad614SDennis Dalessandro hfi1_dbg_exit(); 1267f48ad614SDennis Dalessandro idr_destroy(&hfi1_unit_table); 1268f48ad614SDennis Dalessandro dev_cleanup(); 1269f48ad614SDennis Dalessandro bail: 1270f48ad614SDennis Dalessandro return ret; 1271f48ad614SDennis Dalessandro } 1272f48ad614SDennis Dalessandro 1273f48ad614SDennis Dalessandro module_init(hfi1_mod_init); 1274f48ad614SDennis Dalessandro 1275f48ad614SDennis Dalessandro /* 1276f48ad614SDennis Dalessandro * Do the non-unit driver cleanup, memory free, etc. at unload. 1277f48ad614SDennis Dalessandro */ 1278f48ad614SDennis Dalessandro static void __exit hfi1_mod_cleanup(void) 1279f48ad614SDennis Dalessandro { 1280f48ad614SDennis Dalessandro pci_unregister_driver(&hfi1_pci_driver); 1281f48ad614SDennis Dalessandro hfi1_wss_exit(); 1282f48ad614SDennis Dalessandro hfi1_dbg_exit(); 1283f48ad614SDennis Dalessandro hfi1_cpulist_count = 0; 1284f48ad614SDennis Dalessandro kfree(hfi1_cpulist); 1285f48ad614SDennis Dalessandro 1286f48ad614SDennis Dalessandro idr_destroy(&hfi1_unit_table); 1287f48ad614SDennis Dalessandro dispose_firmware(); /* asymmetric with obtain_firmware() */ 1288f48ad614SDennis Dalessandro dev_cleanup(); 1289f48ad614SDennis Dalessandro } 1290f48ad614SDennis Dalessandro 1291f48ad614SDennis Dalessandro module_exit(hfi1_mod_cleanup); 1292f48ad614SDennis Dalessandro 1293f48ad614SDennis Dalessandro /* this can only be called after a successful initialization */ 1294f48ad614SDennis Dalessandro static void cleanup_device_data(struct hfi1_devdata *dd) 1295f48ad614SDennis Dalessandro { 1296f48ad614SDennis Dalessandro int ctxt; 1297f48ad614SDennis Dalessandro int pidx; 1298f48ad614SDennis Dalessandro struct hfi1_ctxtdata **tmp; 1299f48ad614SDennis Dalessandro unsigned long flags; 1300f48ad614SDennis Dalessandro 1301f48ad614SDennis Dalessandro /* users can't do anything more with chip */ 1302f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1303f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd = &dd->pport[pidx]; 1304f48ad614SDennis Dalessandro struct cc_state *cc_state; 1305f48ad614SDennis Dalessandro int i; 1306f48ad614SDennis Dalessandro 1307f48ad614SDennis Dalessandro if (ppd->statusp) 1308f48ad614SDennis Dalessandro *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; 1309f48ad614SDennis Dalessandro 1310f48ad614SDennis Dalessandro for (i = 0; i < OPA_MAX_SLS; i++) 1311f48ad614SDennis Dalessandro hrtimer_cancel(&ppd->cca_timer[i].hrtimer); 1312f48ad614SDennis Dalessandro 1313f48ad614SDennis Dalessandro spin_lock(&ppd->cc_state_lock); 1314f48ad614SDennis Dalessandro cc_state = get_cc_state(ppd); 1315f48ad614SDennis Dalessandro RCU_INIT_POINTER(ppd->cc_state, NULL); 1316f48ad614SDennis Dalessandro spin_unlock(&ppd->cc_state_lock); 1317f48ad614SDennis Dalessandro 1318f48ad614SDennis Dalessandro if (cc_state) 1319f48ad614SDennis Dalessandro call_rcu(&cc_state->rcu, cc_state_reclaim); 1320f48ad614SDennis Dalessandro } 1321f48ad614SDennis Dalessandro 1322f48ad614SDennis Dalessandro free_credit_return(dd); 1323f48ad614SDennis Dalessandro 1324f48ad614SDennis Dalessandro /* 1325f48ad614SDennis Dalessandro * Free any resources still in use (usually just kernel contexts) 1326f48ad614SDennis Dalessandro * at unload; we do for ctxtcnt, because that's what we allocate. 1327f48ad614SDennis Dalessandro * We acquire lock to be really paranoid that rcd isn't being 1328f48ad614SDennis Dalessandro * accessed from some interrupt-related code (that should not happen, 1329f48ad614SDennis Dalessandro * but best to be sure). 1330f48ad614SDennis Dalessandro */ 1331f48ad614SDennis Dalessandro spin_lock_irqsave(&dd->uctxt_lock, flags); 1332f48ad614SDennis Dalessandro tmp = dd->rcd; 1333f48ad614SDennis Dalessandro dd->rcd = NULL; 1334f48ad614SDennis Dalessandro spin_unlock_irqrestore(&dd->uctxt_lock, flags); 1335f48ad614SDennis Dalessandro 1336f48ad614SDennis Dalessandro if (dd->rcvhdrtail_dummy_kvaddr) { 1337f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, sizeof(u64), 1338f48ad614SDennis Dalessandro (void *)dd->rcvhdrtail_dummy_kvaddr, 1339f48ad614SDennis Dalessandro dd->rcvhdrtail_dummy_physaddr); 1340f48ad614SDennis Dalessandro dd->rcvhdrtail_dummy_kvaddr = NULL; 1341f48ad614SDennis Dalessandro } 1342f48ad614SDennis Dalessandro 1343f48ad614SDennis Dalessandro for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { 1344f48ad614SDennis Dalessandro struct hfi1_ctxtdata *rcd = tmp[ctxt]; 1345f48ad614SDennis Dalessandro 1346f48ad614SDennis Dalessandro tmp[ctxt] = NULL; /* debugging paranoia */ 1347f48ad614SDennis Dalessandro if (rcd) { 1348f48ad614SDennis Dalessandro hfi1_clear_tids(rcd); 1349f48ad614SDennis Dalessandro hfi1_free_ctxtdata(dd, rcd); 1350f48ad614SDennis Dalessandro } 1351f48ad614SDennis Dalessandro } 1352f48ad614SDennis Dalessandro kfree(tmp); 1353f48ad614SDennis Dalessandro free_pio_map(dd); 1354f48ad614SDennis Dalessandro /* must follow rcv context free - need to remove rcv's hooks */ 1355f48ad614SDennis Dalessandro for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) 1356f48ad614SDennis Dalessandro sc_free(dd->send_contexts[ctxt].sc); 1357f48ad614SDennis Dalessandro dd->num_send_contexts = 0; 1358f48ad614SDennis Dalessandro kfree(dd->send_contexts); 1359f48ad614SDennis Dalessandro dd->send_contexts = NULL; 1360f48ad614SDennis Dalessandro kfree(dd->hw_to_sw); 1361f48ad614SDennis Dalessandro dd->hw_to_sw = NULL; 1362f48ad614SDennis Dalessandro kfree(dd->boardname); 1363f48ad614SDennis Dalessandro vfree(dd->events); 1364f48ad614SDennis Dalessandro vfree(dd->status); 1365f48ad614SDennis Dalessandro } 1366f48ad614SDennis Dalessandro 1367f48ad614SDennis Dalessandro /* 1368f48ad614SDennis Dalessandro * Clean up on unit shutdown, or error during unit load after 1369f48ad614SDennis Dalessandro * successful initialization. 1370f48ad614SDennis Dalessandro */ 1371f48ad614SDennis Dalessandro static void postinit_cleanup(struct hfi1_devdata *dd) 1372f48ad614SDennis Dalessandro { 1373f48ad614SDennis Dalessandro hfi1_start_cleanup(dd); 1374f48ad614SDennis Dalessandro 1375f48ad614SDennis Dalessandro hfi1_pcie_ddcleanup(dd); 1376f48ad614SDennis Dalessandro hfi1_pcie_cleanup(dd->pcidev); 1377f48ad614SDennis Dalessandro 1378f48ad614SDennis Dalessandro cleanup_device_data(dd); 1379f48ad614SDennis Dalessandro 1380f48ad614SDennis Dalessandro hfi1_free_devdata(dd); 1381f48ad614SDennis Dalessandro } 1382f48ad614SDennis Dalessandro 1383f48ad614SDennis Dalessandro static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1384f48ad614SDennis Dalessandro { 1385f48ad614SDennis Dalessandro int ret = 0, j, pidx, initfail; 138693dd0a09STadeusz Struk struct hfi1_devdata *dd = ERR_PTR(-EINVAL); 1387f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 1388f48ad614SDennis Dalessandro 1389f48ad614SDennis Dalessandro /* First, lock the non-writable module parameters */ 1390f48ad614SDennis Dalessandro HFI1_CAP_LOCK(); 1391f48ad614SDennis Dalessandro 1392f48ad614SDennis Dalessandro /* Validate some global module parameters */ 1393f48ad614SDennis Dalessandro if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 1394f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, "Header queue count too small\n"); 1395f48ad614SDennis Dalessandro ret = -EINVAL; 1396f48ad614SDennis Dalessandro goto bail; 1397f48ad614SDennis Dalessandro } 1398f48ad614SDennis Dalessandro if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 1399f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, 1400f48ad614SDennis Dalessandro "Receive header queue count cannot be greater than %u\n", 1401f48ad614SDennis Dalessandro HFI1_MAX_HDRQ_EGRBUF_CNT); 1402f48ad614SDennis Dalessandro ret = -EINVAL; 1403f48ad614SDennis Dalessandro goto bail; 1404f48ad614SDennis Dalessandro } 1405f48ad614SDennis Dalessandro /* use the encoding function as a sanitization check */ 1406f48ad614SDennis Dalessandro if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 1407f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", 1408f48ad614SDennis Dalessandro hfi1_hdrq_entsize); 1409f48ad614SDennis Dalessandro ret = -EINVAL; 1410f48ad614SDennis Dalessandro goto bail; 1411f48ad614SDennis Dalessandro } 1412f48ad614SDennis Dalessandro 1413f48ad614SDennis Dalessandro /* The receive eager buffer size must be set before the receive 1414f48ad614SDennis Dalessandro * contexts are created. 1415f48ad614SDennis Dalessandro * 1416f48ad614SDennis Dalessandro * Set the eager buffer size. Validate that it falls in a range 1417f48ad614SDennis Dalessandro * allowed by the hardware - all powers of 2 between the min and 1418f48ad614SDennis Dalessandro * max. The maximum valid MTU is within the eager buffer range 1419f48ad614SDennis Dalessandro * so we do not need to cap the max_mtu by an eager buffer size 1420f48ad614SDennis Dalessandro * setting. 1421f48ad614SDennis Dalessandro */ 1422f48ad614SDennis Dalessandro if (eager_buffer_size) { 1423f48ad614SDennis Dalessandro if (!is_power_of_2(eager_buffer_size)) 1424f48ad614SDennis Dalessandro eager_buffer_size = 1425f48ad614SDennis Dalessandro roundup_pow_of_two(eager_buffer_size); 1426f48ad614SDennis Dalessandro eager_buffer_size = 1427f48ad614SDennis Dalessandro clamp_val(eager_buffer_size, 1428f48ad614SDennis Dalessandro MIN_EAGER_BUFFER * 8, 1429f48ad614SDennis Dalessandro MAX_EAGER_BUFFER_TOTAL); 1430f48ad614SDennis Dalessandro hfi1_early_info(&pdev->dev, "Eager buffer size %u\n", 1431f48ad614SDennis Dalessandro eager_buffer_size); 1432f48ad614SDennis Dalessandro } else { 1433f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n"); 1434f48ad614SDennis Dalessandro ret = -EINVAL; 1435f48ad614SDennis Dalessandro goto bail; 1436f48ad614SDennis Dalessandro } 1437f48ad614SDennis Dalessandro 1438f48ad614SDennis Dalessandro /* restrict value of hfi1_rcvarr_split */ 1439f48ad614SDennis Dalessandro hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); 1440f48ad614SDennis Dalessandro 1441f48ad614SDennis Dalessandro ret = hfi1_pcie_init(pdev, ent); 1442f48ad614SDennis Dalessandro if (ret) 1443f48ad614SDennis Dalessandro goto bail; 1444f48ad614SDennis Dalessandro 1445f48ad614SDennis Dalessandro /* 1446f48ad614SDennis Dalessandro * Do device-specific initialization, function table setup, dd 1447f48ad614SDennis Dalessandro * allocation, etc. 1448f48ad614SDennis Dalessandro */ 1449f48ad614SDennis Dalessandro switch (ent->device) { 1450f48ad614SDennis Dalessandro case PCI_DEVICE_ID_INTEL0: 1451f48ad614SDennis Dalessandro case PCI_DEVICE_ID_INTEL1: 1452f48ad614SDennis Dalessandro dd = hfi1_init_dd(pdev, ent); 1453f48ad614SDennis Dalessandro break; 1454f48ad614SDennis Dalessandro default: 1455f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, 1456f48ad614SDennis Dalessandro "Failing on unknown Intel deviceid 0x%x\n", 1457f48ad614SDennis Dalessandro ent->device); 1458f48ad614SDennis Dalessandro ret = -ENODEV; 1459f48ad614SDennis Dalessandro } 1460f48ad614SDennis Dalessandro 1461f48ad614SDennis Dalessandro if (IS_ERR(dd)) 1462f48ad614SDennis Dalessandro ret = PTR_ERR(dd); 1463f48ad614SDennis Dalessandro if (ret) 1464f48ad614SDennis Dalessandro goto clean_bail; /* error already printed */ 1465f48ad614SDennis Dalessandro 1466f48ad614SDennis Dalessandro ret = create_workqueues(dd); 1467f48ad614SDennis Dalessandro if (ret) 1468f48ad614SDennis Dalessandro goto clean_bail; 1469f48ad614SDennis Dalessandro 1470f48ad614SDennis Dalessandro /* do the generic initialization */ 1471f48ad614SDennis Dalessandro initfail = hfi1_init(dd, 0); 1472f48ad614SDennis Dalessandro 1473f48ad614SDennis Dalessandro ret = hfi1_register_ib_device(dd); 1474f48ad614SDennis Dalessandro 1475f48ad614SDennis Dalessandro /* 1476f48ad614SDennis Dalessandro * Now ready for use. this should be cleared whenever we 1477f48ad614SDennis Dalessandro * detect a reset, or initiate one. If earlier failure, 1478f48ad614SDennis Dalessandro * we still create devices, so diags, etc. can be used 1479f48ad614SDennis Dalessandro * to determine cause of problem. 1480f48ad614SDennis Dalessandro */ 1481f48ad614SDennis Dalessandro if (!initfail && !ret) { 1482f48ad614SDennis Dalessandro dd->flags |= HFI1_INITTED; 1483f48ad614SDennis Dalessandro /* create debufs files after init and ib register */ 1484f48ad614SDennis Dalessandro hfi1_dbg_ibdev_init(&dd->verbs_dev); 1485f48ad614SDennis Dalessandro } 1486f48ad614SDennis Dalessandro 1487f48ad614SDennis Dalessandro j = hfi1_device_create(dd); 1488f48ad614SDennis Dalessandro if (j) 1489f48ad614SDennis Dalessandro dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1490f48ad614SDennis Dalessandro 1491f48ad614SDennis Dalessandro if (initfail || ret) { 1492f48ad614SDennis Dalessandro stop_timers(dd); 1493f48ad614SDennis Dalessandro flush_workqueue(ib_wq); 1494f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1495f48ad614SDennis Dalessandro hfi1_quiet_serdes(dd->pport + pidx); 1496f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1497f48ad614SDennis Dalessandro if (ppd->hfi1_wq) { 1498f48ad614SDennis Dalessandro destroy_workqueue(ppd->hfi1_wq); 1499f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 1500f48ad614SDennis Dalessandro } 1501f48ad614SDennis Dalessandro } 1502f48ad614SDennis Dalessandro if (!j) 1503f48ad614SDennis Dalessandro hfi1_device_remove(dd); 1504f48ad614SDennis Dalessandro if (!ret) 1505f48ad614SDennis Dalessandro hfi1_unregister_ib_device(dd); 1506f48ad614SDennis Dalessandro postinit_cleanup(dd); 1507f48ad614SDennis Dalessandro if (initfail) 1508f48ad614SDennis Dalessandro ret = initfail; 1509f48ad614SDennis Dalessandro goto bail; /* everything already cleaned */ 1510f48ad614SDennis Dalessandro } 1511f48ad614SDennis Dalessandro 1512f48ad614SDennis Dalessandro sdma_start(dd); 1513f48ad614SDennis Dalessandro 1514f48ad614SDennis Dalessandro return 0; 1515f48ad614SDennis Dalessandro 1516f48ad614SDennis Dalessandro clean_bail: 1517f48ad614SDennis Dalessandro hfi1_pcie_cleanup(pdev); 1518f48ad614SDennis Dalessandro bail: 1519f48ad614SDennis Dalessandro return ret; 1520f48ad614SDennis Dalessandro } 1521f48ad614SDennis Dalessandro 1522f48ad614SDennis Dalessandro static void remove_one(struct pci_dev *pdev) 1523f48ad614SDennis Dalessandro { 1524f48ad614SDennis Dalessandro struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1525f48ad614SDennis Dalessandro 1526f48ad614SDennis Dalessandro /* close debugfs files before ib unregister */ 1527f48ad614SDennis Dalessandro hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1528f48ad614SDennis Dalessandro /* unregister from IB core */ 1529f48ad614SDennis Dalessandro hfi1_unregister_ib_device(dd); 1530f48ad614SDennis Dalessandro 1531f48ad614SDennis Dalessandro /* 1532f48ad614SDennis Dalessandro * Disable the IB link, disable interrupts on the device, 1533f48ad614SDennis Dalessandro * clear dma engines, etc. 1534f48ad614SDennis Dalessandro */ 1535f48ad614SDennis Dalessandro shutdown_device(dd); 1536f48ad614SDennis Dalessandro 1537f48ad614SDennis Dalessandro stop_timers(dd); 1538f48ad614SDennis Dalessandro 1539f48ad614SDennis Dalessandro /* wait until all of our (qsfp) queue_work() calls complete */ 1540f48ad614SDennis Dalessandro flush_workqueue(ib_wq); 1541f48ad614SDennis Dalessandro 1542f48ad614SDennis Dalessandro hfi1_device_remove(dd); 1543f48ad614SDennis Dalessandro 1544f48ad614SDennis Dalessandro postinit_cleanup(dd); 1545f48ad614SDennis Dalessandro } 1546f48ad614SDennis Dalessandro 1547f48ad614SDennis Dalessandro /** 1548f48ad614SDennis Dalessandro * hfi1_create_rcvhdrq - create a receive header queue 1549f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 1550f48ad614SDennis Dalessandro * @rcd: the context data 1551f48ad614SDennis Dalessandro * 1552f48ad614SDennis Dalessandro * This must be contiguous memory (from an i/o perspective), and must be 1553f48ad614SDennis Dalessandro * DMA'able (which means for some systems, it will go through an IOMMU, 1554f48ad614SDennis Dalessandro * or be forced into a low address range). 1555f48ad614SDennis Dalessandro */ 1556f48ad614SDennis Dalessandro int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1557f48ad614SDennis Dalessandro { 1558f48ad614SDennis Dalessandro unsigned amt; 1559f48ad614SDennis Dalessandro u64 reg; 1560f48ad614SDennis Dalessandro 1561f48ad614SDennis Dalessandro if (!rcd->rcvhdrq) { 1562f48ad614SDennis Dalessandro dma_addr_t phys_hdrqtail; 1563f48ad614SDennis Dalessandro gfp_t gfp_flags; 1564f48ad614SDennis Dalessandro 1565f48ad614SDennis Dalessandro /* 1566f48ad614SDennis Dalessandro * rcvhdrqentsize is in DWs, so we have to convert to bytes 1567f48ad614SDennis Dalessandro * (* sizeof(u32)). 1568f48ad614SDennis Dalessandro */ 1569f48ad614SDennis Dalessandro amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize * 1570f48ad614SDennis Dalessandro sizeof(u32)); 1571f48ad614SDennis Dalessandro 1572f48ad614SDennis Dalessandro gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? 1573f48ad614SDennis Dalessandro GFP_USER : GFP_KERNEL; 1574f48ad614SDennis Dalessandro rcd->rcvhdrq = dma_zalloc_coherent( 1575f48ad614SDennis Dalessandro &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, 1576f48ad614SDennis Dalessandro gfp_flags | __GFP_COMP); 1577f48ad614SDennis Dalessandro 1578f48ad614SDennis Dalessandro if (!rcd->rcvhdrq) { 1579f48ad614SDennis Dalessandro dd_dev_err(dd, 1580f48ad614SDennis Dalessandro "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1581f48ad614SDennis Dalessandro amt, rcd->ctxt); 1582f48ad614SDennis Dalessandro goto bail; 1583f48ad614SDennis Dalessandro } 1584f48ad614SDennis Dalessandro 1585f48ad614SDennis Dalessandro if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { 1586f48ad614SDennis Dalessandro rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( 1587f48ad614SDennis Dalessandro &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, 1588f48ad614SDennis Dalessandro gfp_flags); 1589f48ad614SDennis Dalessandro if (!rcd->rcvhdrtail_kvaddr) 1590f48ad614SDennis Dalessandro goto bail_free; 1591f48ad614SDennis Dalessandro rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; 1592f48ad614SDennis Dalessandro } 1593f48ad614SDennis Dalessandro 1594f48ad614SDennis Dalessandro rcd->rcvhdrq_size = amt; 1595f48ad614SDennis Dalessandro } 1596f48ad614SDennis Dalessandro /* 1597f48ad614SDennis Dalessandro * These values are per-context: 1598f48ad614SDennis Dalessandro * RcvHdrCnt 1599f48ad614SDennis Dalessandro * RcvHdrEntSize 1600f48ad614SDennis Dalessandro * RcvHdrSize 1601f48ad614SDennis Dalessandro */ 1602f48ad614SDennis Dalessandro reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) 1603f48ad614SDennis Dalessandro & RCV_HDR_CNT_CNT_MASK) 1604f48ad614SDennis Dalessandro << RCV_HDR_CNT_CNT_SHIFT; 1605f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); 1606f48ad614SDennis Dalessandro reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) 1607f48ad614SDennis Dalessandro & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) 1608f48ad614SDennis Dalessandro << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; 1609f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); 1610f48ad614SDennis Dalessandro reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK) 1611f48ad614SDennis Dalessandro << RCV_HDR_SIZE_HDR_SIZE_SHIFT; 1612f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); 1613f48ad614SDennis Dalessandro 1614f48ad614SDennis Dalessandro /* 1615f48ad614SDennis Dalessandro * Program dummy tail address for every receive context 1616f48ad614SDennis Dalessandro * before enabling any receive context 1617f48ad614SDennis Dalessandro */ 1618f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, 1619f48ad614SDennis Dalessandro dd->rcvhdrtail_dummy_physaddr); 1620f48ad614SDennis Dalessandro 1621f48ad614SDennis Dalessandro return 0; 1622f48ad614SDennis Dalessandro 1623f48ad614SDennis Dalessandro bail_free: 1624f48ad614SDennis Dalessandro dd_dev_err(dd, 1625f48ad614SDennis Dalessandro "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1626f48ad614SDennis Dalessandro rcd->ctxt); 1627f48ad614SDennis Dalessandro vfree(rcd->user_event_mask); 1628f48ad614SDennis Dalessandro rcd->user_event_mask = NULL; 1629f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 1630f48ad614SDennis Dalessandro rcd->rcvhdrq_phys); 1631f48ad614SDennis Dalessandro rcd->rcvhdrq = NULL; 1632f48ad614SDennis Dalessandro bail: 1633f48ad614SDennis Dalessandro return -ENOMEM; 1634f48ad614SDennis Dalessandro } 1635f48ad614SDennis Dalessandro 1636f48ad614SDennis Dalessandro /** 1637f48ad614SDennis Dalessandro * allocate eager buffers, both kernel and user contexts. 1638f48ad614SDennis Dalessandro * @rcd: the context we are setting up. 1639f48ad614SDennis Dalessandro * 1640f48ad614SDennis Dalessandro * Allocate the eager TID buffers and program them into hip. 1641f48ad614SDennis Dalessandro * They are no longer completely contiguous, we do multiple allocation 1642f48ad614SDennis Dalessandro * calls. Otherwise we get the OOM code involved, by asking for too 1643f48ad614SDennis Dalessandro * much per call, with disastrous results on some kernels. 1644f48ad614SDennis Dalessandro */ 1645f48ad614SDennis Dalessandro int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) 1646f48ad614SDennis Dalessandro { 1647f48ad614SDennis Dalessandro struct hfi1_devdata *dd = rcd->dd; 1648f48ad614SDennis Dalessandro u32 max_entries, egrtop, alloced_bytes = 0, idx = 0; 1649f48ad614SDennis Dalessandro gfp_t gfp_flags; 1650f48ad614SDennis Dalessandro u16 order; 1651f48ad614SDennis Dalessandro int ret = 0; 1652f48ad614SDennis Dalessandro u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); 1653f48ad614SDennis Dalessandro 1654f48ad614SDennis Dalessandro /* 1655f48ad614SDennis Dalessandro * GFP_USER, but without GFP_FS, so buffer cache can be 1656f48ad614SDennis Dalessandro * coalesced (we hope); otherwise, even at order 4, 1657f48ad614SDennis Dalessandro * heavy filesystem activity makes these fail, and we can 1658f48ad614SDennis Dalessandro * use compound pages. 1659f48ad614SDennis Dalessandro */ 1660f48ad614SDennis Dalessandro gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1661f48ad614SDennis Dalessandro 1662f48ad614SDennis Dalessandro /* 1663f48ad614SDennis Dalessandro * The minimum size of the eager buffers is a groups of MTU-sized 1664f48ad614SDennis Dalessandro * buffers. 1665f48ad614SDennis Dalessandro * The global eager_buffer_size parameter is checked against the 1666f48ad614SDennis Dalessandro * theoretical lower limit of the value. Here, we check against the 1667f48ad614SDennis Dalessandro * MTU. 1668f48ad614SDennis Dalessandro */ 1669f48ad614SDennis Dalessandro if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) 1670f48ad614SDennis Dalessandro rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; 1671f48ad614SDennis Dalessandro /* 1672f48ad614SDennis Dalessandro * If using one-pkt-per-egr-buffer, lower the eager buffer 1673f48ad614SDennis Dalessandro * size to the max MTU (page-aligned). 1674f48ad614SDennis Dalessandro */ 1675f48ad614SDennis Dalessandro if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 1676f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = round_mtu; 1677f48ad614SDennis Dalessandro 1678f48ad614SDennis Dalessandro /* 1679f48ad614SDennis Dalessandro * Eager buffers sizes of 1MB or less require smaller TID sizes 1680f48ad614SDennis Dalessandro * to satisfy the "multiple of 8 RcvArray entries" requirement. 1681f48ad614SDennis Dalessandro */ 1682f48ad614SDennis Dalessandro if (rcd->egrbufs.size <= (1 << 20)) 1683f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, 1684f48ad614SDennis Dalessandro rounddown_pow_of_two(rcd->egrbufs.size / 8)); 1685f48ad614SDennis Dalessandro 1686f48ad614SDennis Dalessandro while (alloced_bytes < rcd->egrbufs.size && 1687f48ad614SDennis Dalessandro rcd->egrbufs.alloced < rcd->egrbufs.count) { 1688f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr = 1689f48ad614SDennis Dalessandro dma_zalloc_coherent(&dd->pcidev->dev, 1690f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size, 1691f48ad614SDennis Dalessandro &rcd->egrbufs.buffers[idx].phys, 1692f48ad614SDennis Dalessandro gfp_flags); 1693f48ad614SDennis Dalessandro if (rcd->egrbufs.buffers[idx].addr) { 1694f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].len = 1695f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size; 1696f48ad614SDennis Dalessandro rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = 1697f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr; 1698f48ad614SDennis Dalessandro rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].phys = 1699f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].phys; 1700f48ad614SDennis Dalessandro rcd->egrbufs.alloced++; 1701f48ad614SDennis Dalessandro alloced_bytes += rcd->egrbufs.rcvtid_size; 1702f48ad614SDennis Dalessandro idx++; 1703f48ad614SDennis Dalessandro } else { 1704f48ad614SDennis Dalessandro u32 new_size, i, j; 1705f48ad614SDennis Dalessandro u64 offset = 0; 1706f48ad614SDennis Dalessandro 1707f48ad614SDennis Dalessandro /* 1708f48ad614SDennis Dalessandro * Fail the eager buffer allocation if: 1709f48ad614SDennis Dalessandro * - we are already using the lowest acceptable size 1710f48ad614SDennis Dalessandro * - we are using one-pkt-per-egr-buffer (this implies 1711f48ad614SDennis Dalessandro * that we are accepting only one size) 1712f48ad614SDennis Dalessandro */ 1713f48ad614SDennis Dalessandro if (rcd->egrbufs.rcvtid_size == round_mtu || 1714f48ad614SDennis Dalessandro !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { 1715f48ad614SDennis Dalessandro dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", 1716f48ad614SDennis Dalessandro rcd->ctxt); 1717f48ad614SDennis Dalessandro goto bail_rcvegrbuf_phys; 1718f48ad614SDennis Dalessandro } 1719f48ad614SDennis Dalessandro 1720f48ad614SDennis Dalessandro new_size = rcd->egrbufs.rcvtid_size / 2; 1721f48ad614SDennis Dalessandro 1722f48ad614SDennis Dalessandro /* 1723f48ad614SDennis Dalessandro * If the first attempt to allocate memory failed, don't 1724f48ad614SDennis Dalessandro * fail everything but continue with the next lower 1725f48ad614SDennis Dalessandro * size. 1726f48ad614SDennis Dalessandro */ 1727f48ad614SDennis Dalessandro if (idx == 0) { 1728f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = new_size; 1729f48ad614SDennis Dalessandro continue; 1730f48ad614SDennis Dalessandro } 1731f48ad614SDennis Dalessandro 1732f48ad614SDennis Dalessandro /* 1733f48ad614SDennis Dalessandro * Re-partition already allocated buffers to a smaller 1734f48ad614SDennis Dalessandro * size. 1735f48ad614SDennis Dalessandro */ 1736f48ad614SDennis Dalessandro rcd->egrbufs.alloced = 0; 1737f48ad614SDennis Dalessandro for (i = 0, j = 0, offset = 0; j < idx; i++) { 1738f48ad614SDennis Dalessandro if (i >= rcd->egrbufs.count) 1739f48ad614SDennis Dalessandro break; 1740f48ad614SDennis Dalessandro rcd->egrbufs.rcvtids[i].phys = 1741f48ad614SDennis Dalessandro rcd->egrbufs.buffers[j].phys + offset; 1742f48ad614SDennis Dalessandro rcd->egrbufs.rcvtids[i].addr = 1743f48ad614SDennis Dalessandro rcd->egrbufs.buffers[j].addr + offset; 1744f48ad614SDennis Dalessandro rcd->egrbufs.alloced++; 1745f48ad614SDennis Dalessandro if ((rcd->egrbufs.buffers[j].phys + offset + 1746f48ad614SDennis Dalessandro new_size) == 1747f48ad614SDennis Dalessandro (rcd->egrbufs.buffers[j].phys + 1748f48ad614SDennis Dalessandro rcd->egrbufs.buffers[j].len)) { 1749f48ad614SDennis Dalessandro j++; 1750f48ad614SDennis Dalessandro offset = 0; 1751f48ad614SDennis Dalessandro } else { 1752f48ad614SDennis Dalessandro offset += new_size; 1753f48ad614SDennis Dalessandro } 1754f48ad614SDennis Dalessandro } 1755f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = new_size; 1756f48ad614SDennis Dalessandro } 1757f48ad614SDennis Dalessandro } 1758f48ad614SDennis Dalessandro rcd->egrbufs.numbufs = idx; 1759f48ad614SDennis Dalessandro rcd->egrbufs.size = alloced_bytes; 1760f48ad614SDennis Dalessandro 1761f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 1762f48ad614SDennis Dalessandro "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", 1763f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size, 1764f48ad614SDennis Dalessandro rcd->egrbufs.size); 1765f48ad614SDennis Dalessandro 1766f48ad614SDennis Dalessandro /* 1767f48ad614SDennis Dalessandro * Set the contexts rcv array head update threshold to the closest 1768f48ad614SDennis Dalessandro * power of 2 (so we can use a mask instead of modulo) below half 1769f48ad614SDennis Dalessandro * the allocated entries. 1770f48ad614SDennis Dalessandro */ 1771f48ad614SDennis Dalessandro rcd->egrbufs.threshold = 1772f48ad614SDennis Dalessandro rounddown_pow_of_two(rcd->egrbufs.alloced / 2); 1773f48ad614SDennis Dalessandro /* 1774f48ad614SDennis Dalessandro * Compute the expected RcvArray entry base. This is done after 1775f48ad614SDennis Dalessandro * allocating the eager buffers in order to maximize the 1776f48ad614SDennis Dalessandro * expected RcvArray entries for the context. 1777f48ad614SDennis Dalessandro */ 1778f48ad614SDennis Dalessandro max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; 1779f48ad614SDennis Dalessandro egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); 1780f48ad614SDennis Dalessandro rcd->expected_count = max_entries - egrtop; 1781f48ad614SDennis Dalessandro if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) 1782f48ad614SDennis Dalessandro rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; 1783f48ad614SDennis Dalessandro 1784f48ad614SDennis Dalessandro rcd->expected_base = rcd->eager_base + egrtop; 1785f48ad614SDennis Dalessandro hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", 1786f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, 1787f48ad614SDennis Dalessandro rcd->eager_base, rcd->expected_base); 1788f48ad614SDennis Dalessandro 1789f48ad614SDennis Dalessandro if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { 1790f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 1791f48ad614SDennis Dalessandro "ctxt%u: current Eager buffer size is invalid %u\n", 1792f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.rcvtid_size); 1793f48ad614SDennis Dalessandro ret = -EINVAL; 1794f48ad614SDennis Dalessandro goto bail; 1795f48ad614SDennis Dalessandro } 1796f48ad614SDennis Dalessandro 1797f48ad614SDennis Dalessandro for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { 1798f48ad614SDennis Dalessandro hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, 1799f48ad614SDennis Dalessandro rcd->egrbufs.rcvtids[idx].phys, order); 1800f48ad614SDennis Dalessandro cond_resched(); 1801f48ad614SDennis Dalessandro } 1802f48ad614SDennis Dalessandro goto bail; 1803f48ad614SDennis Dalessandro 1804f48ad614SDennis Dalessandro bail_rcvegrbuf_phys: 1805f48ad614SDennis Dalessandro for (idx = 0; idx < rcd->egrbufs.alloced && 1806f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr; 1807f48ad614SDennis Dalessandro idx++) { 1808f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, 1809f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].len, 1810f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr, 1811f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].phys); 1812f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr = NULL; 1813f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].phys = 0; 1814f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].len = 0; 1815f48ad614SDennis Dalessandro } 1816f48ad614SDennis Dalessandro bail: 1817f48ad614SDennis Dalessandro return ret; 1818f48ad614SDennis Dalessandro } 1819