1f48ad614SDennis Dalessandro /* 25d18ee67SSebastian Sanchez * Copyright(c) 2015 - 2018 Intel Corporation. 3f48ad614SDennis Dalessandro * 4f48ad614SDennis Dalessandro * This file is provided under a dual BSD/GPLv2 license. When using or 5f48ad614SDennis Dalessandro * redistributing this file, you may do so under either license. 6f48ad614SDennis Dalessandro * 7f48ad614SDennis Dalessandro * GPL LICENSE SUMMARY 8f48ad614SDennis Dalessandro * 9f48ad614SDennis Dalessandro * This program is free software; you can redistribute it and/or modify 10f48ad614SDennis Dalessandro * it under the terms of version 2 of the GNU General Public License as 11f48ad614SDennis Dalessandro * published by the Free Software Foundation. 12f48ad614SDennis Dalessandro * 13f48ad614SDennis Dalessandro * This program is distributed in the hope that it will be useful, but 14f48ad614SDennis Dalessandro * WITHOUT ANY WARRANTY; without even the implied warranty of 15f48ad614SDennis Dalessandro * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16f48ad614SDennis Dalessandro * General Public License for more details. 17f48ad614SDennis Dalessandro * 18f48ad614SDennis Dalessandro * BSD LICENSE 19f48ad614SDennis Dalessandro * 20f48ad614SDennis Dalessandro * Redistribution and use in source and binary forms, with or without 21f48ad614SDennis Dalessandro * modification, are permitted provided that the following conditions 22f48ad614SDennis Dalessandro * are met: 23f48ad614SDennis Dalessandro * 24f48ad614SDennis Dalessandro * - Redistributions of source code must retain the above copyright 25f48ad614SDennis Dalessandro * notice, this list of conditions and the following disclaimer. 26f48ad614SDennis Dalessandro * - Redistributions in binary form must reproduce the above copyright 27f48ad614SDennis Dalessandro * notice, this list of conditions and the following disclaimer in 28f48ad614SDennis Dalessandro * the documentation and/or other materials provided with the 29f48ad614SDennis Dalessandro * distribution. 30f48ad614SDennis Dalessandro * - Neither the name of Intel Corporation nor the names of its 31f48ad614SDennis Dalessandro * contributors may be used to endorse or promote products derived 32f48ad614SDennis Dalessandro * from this software without specific prior written permission. 33f48ad614SDennis Dalessandro * 34f48ad614SDennis Dalessandro * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35f48ad614SDennis Dalessandro * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36f48ad614SDennis Dalessandro * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37f48ad614SDennis Dalessandro * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38f48ad614SDennis Dalessandro * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39f48ad614SDennis Dalessandro * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40f48ad614SDennis Dalessandro * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41f48ad614SDennis Dalessandro * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42f48ad614SDennis Dalessandro * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43f48ad614SDennis Dalessandro * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44f48ad614SDennis Dalessandro * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45f48ad614SDennis Dalessandro * 46f48ad614SDennis Dalessandro */ 47f48ad614SDennis Dalessandro 48f48ad614SDennis Dalessandro #include <linux/pci.h> 49f48ad614SDennis Dalessandro #include <linux/netdevice.h> 50f48ad614SDennis Dalessandro #include <linux/vmalloc.h> 51f48ad614SDennis Dalessandro #include <linux/delay.h> 52f48ad614SDennis Dalessandro #include <linux/idr.h> 53f48ad614SDennis Dalessandro #include <linux/module.h> 54f48ad614SDennis Dalessandro #include <linux/printk.h> 55f48ad614SDennis Dalessandro #include <linux/hrtimer.h> 568737ce95SMichael J. Ruhl #include <linux/bitmap.h> 57f48ad614SDennis Dalessandro #include <rdma/rdma_vt.h> 58f48ad614SDennis Dalessandro 59f48ad614SDennis Dalessandro #include "hfi.h" 60f48ad614SDennis Dalessandro #include "device.h" 61f48ad614SDennis Dalessandro #include "common.h" 62f48ad614SDennis Dalessandro #include "trace.h" 63f48ad614SDennis Dalessandro #include "mad.h" 64f48ad614SDennis Dalessandro #include "sdma.h" 65f48ad614SDennis Dalessandro #include "debugfs.h" 66f48ad614SDennis Dalessandro #include "verbs.h" 67f48ad614SDennis Dalessandro #include "aspm.h" 684197344bSDennis Dalessandro #include "affinity.h" 69d4829ea6SVishwanathapura, Niranjana #include "vnic.h" 70fe4e74eeSMichael J. Ruhl #include "exp_rcv.h" 71f48ad614SDennis Dalessandro 72f48ad614SDennis Dalessandro #undef pr_fmt 73f48ad614SDennis Dalessandro #define pr_fmt(fmt) DRIVER_NAME ": " fmt 74f48ad614SDennis Dalessandro 75f48ad614SDennis Dalessandro /* 76f48ad614SDennis Dalessandro * min buffers we want to have per context, after driver 77f48ad614SDennis Dalessandro */ 78f48ad614SDennis Dalessandro #define HFI1_MIN_USER_CTXT_BUFCNT 7 79f48ad614SDennis Dalessandro 80f48ad614SDennis Dalessandro #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 81f48ad614SDennis Dalessandro #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 82f48ad614SDennis Dalessandro #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ 83f48ad614SDennis Dalessandro #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ 84f48ad614SDennis Dalessandro 8557f97e96SMichael J. Ruhl #define NUM_IB_PORTS 1 8657f97e96SMichael J. Ruhl 87f48ad614SDennis Dalessandro /* 88f48ad614SDennis Dalessandro * Number of user receive contexts we are configured to use (to allow for more 89f48ad614SDennis Dalessandro * pio buffers per ctxt, etc.) Zero means use one user context per CPU. 90f48ad614SDennis Dalessandro */ 91f48ad614SDennis Dalessandro int num_user_contexts = -1; 925da9e742SMichael J. Ruhl module_param_named(num_user_contexts, num_user_contexts, int, 0444); 93f48ad614SDennis Dalessandro MODULE_PARM_DESC( 945da9e742SMichael J. Ruhl num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)"); 95f48ad614SDennis Dalessandro 96f48ad614SDennis Dalessandro uint krcvqs[RXE_NUM_DATA_VL]; 97f48ad614SDennis Dalessandro int krcvqsset; 98f48ad614SDennis Dalessandro module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); 99f48ad614SDennis Dalessandro MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); 100f48ad614SDennis Dalessandro 101f48ad614SDennis Dalessandro /* computed based on above array */ 102429b6a72SHarish Chegondi unsigned long n_krcvqs; 103f48ad614SDennis Dalessandro 104f48ad614SDennis Dalessandro static unsigned hfi1_rcvarr_split = 25; 105f48ad614SDennis Dalessandro module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 106f48ad614SDennis Dalessandro MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); 107f48ad614SDennis Dalessandro 1089746fa43STymoteusz Kielan static uint eager_buffer_size = (8 << 20); /* 8MB */ 109f48ad614SDennis Dalessandro module_param(eager_buffer_size, uint, S_IRUGO); 1109746fa43STymoteusz Kielan MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB"); 111f48ad614SDennis Dalessandro 112f48ad614SDennis Dalessandro static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ 113f48ad614SDennis Dalessandro module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); 114f48ad614SDennis Dalessandro MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); 115f48ad614SDennis Dalessandro 116f48ad614SDennis Dalessandro static uint hfi1_hdrq_entsize = 32; 117d9a6ce68SMike Marciniszyn module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444); 118d9a6ce68SMike Marciniszyn MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)"); 119f48ad614SDennis Dalessandro 120f48ad614SDennis Dalessandro unsigned int user_credit_return_threshold = 33; /* default is 33% */ 121f48ad614SDennis Dalessandro module_param(user_credit_return_threshold, uint, S_IRUGO); 122f48ad614SDennis Dalessandro MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); 123f48ad614SDennis Dalessandro 124f4cd8765SMichael J. Ruhl static inline u64 encode_rcv_header_entry_size(u16 size); 125f48ad614SDennis Dalessandro 126f48ad614SDennis Dalessandro static struct idr hfi1_unit_table; 127f48ad614SDennis Dalessandro 128f2a3bc00SMichael J. Ruhl static int hfi1_create_kctxt(struct hfi1_devdata *dd, 129f2a3bc00SMichael J. Ruhl struct hfi1_pportdata *ppd) 130f48ad614SDennis Dalessandro { 131f2a3bc00SMichael J. Ruhl struct hfi1_ctxtdata *rcd; 132f48ad614SDennis Dalessandro int ret; 133f48ad614SDennis Dalessandro 134f48ad614SDennis Dalessandro /* Control context has to be always 0 */ 135f48ad614SDennis Dalessandro BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); 136f48ad614SDennis Dalessandro 137f2a3bc00SMichael J. Ruhl ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); 138f2a3bc00SMichael J. Ruhl if (ret < 0) { 139f2a3bc00SMichael J. Ruhl dd_dev_err(dd, "Kernel receive context allocation failed\n"); 140f2a3bc00SMichael J. Ruhl return ret; 141f48ad614SDennis Dalessandro } 142f2a3bc00SMichael J. Ruhl 143f48ad614SDennis Dalessandro /* 144f2a3bc00SMichael J. Ruhl * Set up the kernel context flags here and now because they use 145f2a3bc00SMichael J. Ruhl * default values for all receive side memories. User contexts will 146f2a3bc00SMichael J. Ruhl * be handled as they are created. 147f48ad614SDennis Dalessandro */ 148f48ad614SDennis Dalessandro rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | 149f48ad614SDennis Dalessandro HFI1_CAP_KGET(NODROP_RHQ_FULL) | 150f48ad614SDennis Dalessandro HFI1_CAP_KGET(NODROP_EGR_FULL) | 151f48ad614SDennis Dalessandro HFI1_CAP_KGET(DMA_RTAIL); 152f48ad614SDennis Dalessandro 153f48ad614SDennis Dalessandro /* Control context must use DMA_RTAIL */ 154f48ad614SDennis Dalessandro if (rcd->ctxt == HFI1_CTRL_CTXT) 155f48ad614SDennis Dalessandro rcd->flags |= HFI1_CAP_DMA_RTAIL; 156f48ad614SDennis Dalessandro rcd->seq_cnt = 1; 157f48ad614SDennis Dalessandro 158f48ad614SDennis Dalessandro rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); 159f48ad614SDennis Dalessandro if (!rcd->sc) { 160f2a3bc00SMichael J. Ruhl dd_dev_err(dd, "Kernel send context allocation failed\n"); 161f2a3bc00SMichael J. Ruhl return -ENOMEM; 162f48ad614SDennis Dalessandro } 1639b60d2cbSMichael J. Ruhl hfi1_init_ctxt(rcd->sc); 164f2a3bc00SMichael J. Ruhl 165f2a3bc00SMichael J. Ruhl return 0; 166f48ad614SDennis Dalessandro } 167f48ad614SDennis Dalessandro 168f48ad614SDennis Dalessandro /* 169f2a3bc00SMichael J. Ruhl * Create the receive context array and one or more kernel contexts 170f48ad614SDennis Dalessandro */ 171f2a3bc00SMichael J. Ruhl int hfi1_create_kctxts(struct hfi1_devdata *dd) 172f2a3bc00SMichael J. Ruhl { 173f2a3bc00SMichael J. Ruhl u16 i; 174f2a3bc00SMichael J. Ruhl int ret; 175f2a3bc00SMichael J. Ruhl 176953a9cebSKamenee Arumugam dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), 177f2a3bc00SMichael J. Ruhl GFP_KERNEL, dd->node); 178f2a3bc00SMichael J. Ruhl if (!dd->rcd) 179f2a3bc00SMichael J. Ruhl return -ENOMEM; 180f2a3bc00SMichael J. Ruhl 181f2a3bc00SMichael J. Ruhl for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 182f2a3bc00SMichael J. Ruhl ret = hfi1_create_kctxt(dd, dd->pport); 183f2a3bc00SMichael J. Ruhl if (ret) 184f2a3bc00SMichael J. Ruhl goto bail; 185f2a3bc00SMichael J. Ruhl } 186f48ad614SDennis Dalessandro 187f48ad614SDennis Dalessandro return 0; 188f2a3bc00SMichael J. Ruhl bail: 189f683c80cSMichael J. Ruhl for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) 190d295dbebSMichael J. Ruhl hfi1_free_ctxt(dd->rcd[i]); 191f683c80cSMichael J. Ruhl 192f683c80cSMichael J. Ruhl /* All the contexts should be freed, free the array */ 193f48ad614SDennis Dalessandro kfree(dd->rcd); 194f48ad614SDennis Dalessandro dd->rcd = NULL; 195f48ad614SDennis Dalessandro return ret; 196f48ad614SDennis Dalessandro } 197f48ad614SDennis Dalessandro 198f48ad614SDennis Dalessandro /* 199d295dbebSMichael J. Ruhl * Helper routines for the receive context reference count (rcd and uctxt). 200f683c80cSMichael J. Ruhl */ 201f683c80cSMichael J. Ruhl static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd) 202f683c80cSMichael J. Ruhl { 203f683c80cSMichael J. Ruhl kref_init(&rcd->kref); 204f683c80cSMichael J. Ruhl } 205f683c80cSMichael J. Ruhl 206f2a3bc00SMichael J. Ruhl /** 207f2a3bc00SMichael J. Ruhl * hfi1_rcd_free - When reference is zero clean up. 208f2a3bc00SMichael J. Ruhl * @kref: pointer to an initialized rcd data structure 209f2a3bc00SMichael J. Ruhl * 210f2a3bc00SMichael J. Ruhl */ 211f683c80cSMichael J. Ruhl static void hfi1_rcd_free(struct kref *kref) 212f683c80cSMichael J. Ruhl { 213d295dbebSMichael J. Ruhl unsigned long flags; 214f683c80cSMichael J. Ruhl struct hfi1_ctxtdata *rcd = 215f683c80cSMichael J. Ruhl container_of(kref, struct hfi1_ctxtdata, kref); 216f683c80cSMichael J. Ruhl 217f683c80cSMichael J. Ruhl hfi1_free_ctxtdata(rcd->dd, rcd); 218d295dbebSMichael J. Ruhl 219d295dbebSMichael J. Ruhl spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); 220d295dbebSMichael J. Ruhl rcd->dd->rcd[rcd->ctxt] = NULL; 221d295dbebSMichael J. Ruhl spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); 222d295dbebSMichael J. Ruhl 223f683c80cSMichael J. Ruhl kfree(rcd); 224f683c80cSMichael J. Ruhl } 225f683c80cSMichael J. Ruhl 226f2a3bc00SMichael J. Ruhl /** 227f2a3bc00SMichael J. Ruhl * hfi1_rcd_put - decrement reference for rcd 228f2a3bc00SMichael J. Ruhl * @rcd: pointer to an initialized rcd data structure 229f2a3bc00SMichael J. Ruhl * 230f2a3bc00SMichael J. Ruhl * Use this to put a reference after the init. 231f2a3bc00SMichael J. Ruhl */ 232f683c80cSMichael J. Ruhl int hfi1_rcd_put(struct hfi1_ctxtdata *rcd) 233f683c80cSMichael J. Ruhl { 234f683c80cSMichael J. Ruhl if (rcd) 235f683c80cSMichael J. Ruhl return kref_put(&rcd->kref, hfi1_rcd_free); 236f683c80cSMichael J. Ruhl 237f683c80cSMichael J. Ruhl return 0; 238f683c80cSMichael J. Ruhl } 239f683c80cSMichael J. Ruhl 240f2a3bc00SMichael J. Ruhl /** 241f2a3bc00SMichael J. Ruhl * hfi1_rcd_get - increment reference for rcd 242f2a3bc00SMichael J. Ruhl * @rcd: pointer to an initialized rcd data structure 243f2a3bc00SMichael J. Ruhl * 244f2a3bc00SMichael J. Ruhl * Use this to get a reference after the init. 245f2a3bc00SMichael J. Ruhl */ 246f683c80cSMichael J. Ruhl void hfi1_rcd_get(struct hfi1_ctxtdata *rcd) 247f683c80cSMichael J. Ruhl { 248f683c80cSMichael J. Ruhl kref_get(&rcd->kref); 249f683c80cSMichael J. Ruhl } 250f683c80cSMichael J. Ruhl 251f2a3bc00SMichael J. Ruhl /** 252f2a3bc00SMichael J. Ruhl * allocate_rcd_index - allocate an rcd index from the rcd array 253f2a3bc00SMichael J. Ruhl * @dd: pointer to a valid devdata structure 254f2a3bc00SMichael J. Ruhl * @rcd: rcd data structure to assign 255f2a3bc00SMichael J. Ruhl * @index: pointer to index that is allocated 256f2a3bc00SMichael J. Ruhl * 257f2a3bc00SMichael J. Ruhl * Find an empty index in the rcd array, and assign the given rcd to it. 258f2a3bc00SMichael J. Ruhl * If the array is full, we are EBUSY. 259f2a3bc00SMichael J. Ruhl * 260f2a3bc00SMichael J. Ruhl */ 261d295dbebSMichael J. Ruhl static int allocate_rcd_index(struct hfi1_devdata *dd, 262f2a3bc00SMichael J. Ruhl struct hfi1_ctxtdata *rcd, u16 *index) 263f2a3bc00SMichael J. Ruhl { 264f2a3bc00SMichael J. Ruhl unsigned long flags; 265f2a3bc00SMichael J. Ruhl u16 ctxt; 266f2a3bc00SMichael J. Ruhl 267f2a3bc00SMichael J. Ruhl spin_lock_irqsave(&dd->uctxt_lock, flags); 268f2a3bc00SMichael J. Ruhl for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) 269f2a3bc00SMichael J. Ruhl if (!dd->rcd[ctxt]) 270f2a3bc00SMichael J. Ruhl break; 271f2a3bc00SMichael J. Ruhl 272f2a3bc00SMichael J. Ruhl if (ctxt < dd->num_rcv_contexts) { 273f2a3bc00SMichael J. Ruhl rcd->ctxt = ctxt; 274f2a3bc00SMichael J. Ruhl dd->rcd[ctxt] = rcd; 275f2a3bc00SMichael J. Ruhl hfi1_rcd_init(rcd); 276f2a3bc00SMichael J. Ruhl } 277f2a3bc00SMichael J. Ruhl spin_unlock_irqrestore(&dd->uctxt_lock, flags); 278f2a3bc00SMichael J. Ruhl 279f2a3bc00SMichael J. Ruhl if (ctxt >= dd->num_rcv_contexts) 280f2a3bc00SMichael J. Ruhl return -EBUSY; 281f2a3bc00SMichael J. Ruhl 282f2a3bc00SMichael J. Ruhl *index = ctxt; 283f2a3bc00SMichael J. Ruhl 284f2a3bc00SMichael J. Ruhl return 0; 285f2a3bc00SMichael J. Ruhl } 286f2a3bc00SMichael J. Ruhl 287d295dbebSMichael J. Ruhl /** 288d59075adSMichael J. Ruhl * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the 289d59075adSMichael J. Ruhl * array 290d59075adSMichael J. Ruhl * @dd: pointer to a valid devdata structure 291d59075adSMichael J. Ruhl * @ctxt: the index of an possilbe rcd 292d59075adSMichael J. Ruhl * 293d59075adSMichael J. Ruhl * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given 294d59075adSMichael J. Ruhl * ctxt index is valid. 295d59075adSMichael J. Ruhl * 296d59075adSMichael J. Ruhl * The caller is responsible for making the _put(). 297d59075adSMichael J. Ruhl * 298d59075adSMichael J. Ruhl */ 299d59075adSMichael J. Ruhl struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, 300d59075adSMichael J. Ruhl u16 ctxt) 301d59075adSMichael J. Ruhl { 302d59075adSMichael J. Ruhl if (ctxt < dd->num_rcv_contexts) 303d59075adSMichael J. Ruhl return hfi1_rcd_get_by_index(dd, ctxt); 304d59075adSMichael J. Ruhl 305d59075adSMichael J. Ruhl return NULL; 306d59075adSMichael J. Ruhl } 307d59075adSMichael J. Ruhl 308d59075adSMichael J. Ruhl /** 309d295dbebSMichael J. Ruhl * hfi1_rcd_get_by_index 310d295dbebSMichael J. Ruhl * @dd: pointer to a valid devdata structure 311d295dbebSMichael J. Ruhl * @ctxt: the index of an possilbe rcd 312d295dbebSMichael J. Ruhl * 313d295dbebSMichael J. Ruhl * We need to protect access to the rcd array. If access is needed to 314d295dbebSMichael J. Ruhl * one or more index, get the protecting spinlock and then increment the 315d295dbebSMichael J. Ruhl * kref. 316d295dbebSMichael J. Ruhl * 317d295dbebSMichael J. Ruhl * The caller is responsible for making the _put(). 318d295dbebSMichael J. Ruhl * 319d295dbebSMichael J. Ruhl */ 320d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) 321d295dbebSMichael J. Ruhl { 322d295dbebSMichael J. Ruhl unsigned long flags; 323d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *rcd = NULL; 324d295dbebSMichael J. Ruhl 325d295dbebSMichael J. Ruhl spin_lock_irqsave(&dd->uctxt_lock, flags); 326d295dbebSMichael J. Ruhl if (dd->rcd[ctxt]) { 327d295dbebSMichael J. Ruhl rcd = dd->rcd[ctxt]; 328d295dbebSMichael J. Ruhl hfi1_rcd_get(rcd); 329d295dbebSMichael J. Ruhl } 330d295dbebSMichael J. Ruhl spin_unlock_irqrestore(&dd->uctxt_lock, flags); 331d295dbebSMichael J. Ruhl 332d295dbebSMichael J. Ruhl return rcd; 333d295dbebSMichael J. Ruhl } 334d295dbebSMichael J. Ruhl 335f683c80cSMichael J. Ruhl /* 336d295dbebSMichael J. Ruhl * Common code for user and kernel context create and setup. 337d295dbebSMichael J. Ruhl * NOTE: the initial kref is done here (hf1_rcd_init()). 338f48ad614SDennis Dalessandro */ 339f2a3bc00SMichael J. Ruhl int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, 340f2a3bc00SMichael J. Ruhl struct hfi1_ctxtdata **context) 341f48ad614SDennis Dalessandro { 342f48ad614SDennis Dalessandro struct hfi1_devdata *dd = ppd->dd; 343f48ad614SDennis Dalessandro struct hfi1_ctxtdata *rcd; 344f48ad614SDennis Dalessandro unsigned kctxt_ngroups = 0; 345f48ad614SDennis Dalessandro u32 base; 346f48ad614SDennis Dalessandro 347f48ad614SDennis Dalessandro if (dd->rcv_entries.nctxt_extra > 3482280740fSVishwanathapura, Niranjana dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) 349f48ad614SDennis Dalessandro kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 3502280740fSVishwanathapura, Niranjana (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); 3514dfe7cceSJianxin Xiong rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); 352f48ad614SDennis Dalessandro if (rcd) { 353f48ad614SDennis Dalessandro u32 rcvtids, max_entries; 354f2a3bc00SMichael J. Ruhl u16 ctxt; 355f2a3bc00SMichael J. Ruhl int ret; 356f48ad614SDennis Dalessandro 357f2a3bc00SMichael J. Ruhl ret = allocate_rcd_index(dd, rcd, &ctxt); 358f2a3bc00SMichael J. Ruhl if (ret) { 359f2a3bc00SMichael J. Ruhl *context = NULL; 360f2a3bc00SMichael J. Ruhl kfree(rcd); 361f2a3bc00SMichael J. Ruhl return ret; 362f2a3bc00SMichael J. Ruhl } 363f2a3bc00SMichael J. Ruhl 364f48ad614SDennis Dalessandro INIT_LIST_HEAD(&rcd->qp_wait_list); 365c8314811SMike Marciniszyn hfi1_exp_tid_group_init(rcd); 366f48ad614SDennis Dalessandro rcd->ppd = ppd; 367f48ad614SDennis Dalessandro rcd->dd = dd; 368f48ad614SDennis Dalessandro rcd->numa_id = numa; 369f48ad614SDennis Dalessandro rcd->rcv_array_groups = dd->rcv_entries.ngroups; 370b0ba3c18SMike Marciniszyn rcd->rhf_rcv_function_map = normal_rhf_rcv_functions; 371f48ad614SDennis Dalessandro 372ed71e86aSKaike Wan mutex_init(&rcd->exp_mutex); 37337356e78SKaike Wan spin_lock_init(&rcd->exp_lock); 37437356e78SKaike Wan INIT_LIST_HEAD(&rcd->flow_queue.queue_head); 375838b6fd2SKaike Wan INIT_LIST_HEAD(&rcd->rarr_queue.queue_head); 376f48ad614SDennis Dalessandro 377d295dbebSMichael J. Ruhl hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); 378d295dbebSMichael J. Ruhl 379f48ad614SDennis Dalessandro /* 380f48ad614SDennis Dalessandro * Calculate the context's RcvArray entry starting point. 381f48ad614SDennis Dalessandro * We do this here because we have to take into account all 382f48ad614SDennis Dalessandro * the RcvArray entries that previous context would have 3832280740fSVishwanathapura, Niranjana * taken and we have to account for any extra groups assigned 3842280740fSVishwanathapura, Niranjana * to the static (kernel) or dynamic (vnic/user) contexts. 385f48ad614SDennis Dalessandro */ 3862280740fSVishwanathapura, Niranjana if (ctxt < dd->first_dyn_alloc_ctxt) { 387f48ad614SDennis Dalessandro if (ctxt < kctxt_ngroups) { 388f48ad614SDennis Dalessandro base = ctxt * (dd->rcv_entries.ngroups + 1); 389f48ad614SDennis Dalessandro rcd->rcv_array_groups++; 390ee495adaSDennis Dalessandro } else { 391f48ad614SDennis Dalessandro base = kctxt_ngroups + 392f48ad614SDennis Dalessandro (ctxt * dd->rcv_entries.ngroups); 393ee495adaSDennis Dalessandro } 394f48ad614SDennis Dalessandro } else { 3952280740fSVishwanathapura, Niranjana u16 ct = ctxt - dd->first_dyn_alloc_ctxt; 396f48ad614SDennis Dalessandro 397f48ad614SDennis Dalessandro base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + 398f48ad614SDennis Dalessandro kctxt_ngroups); 399f48ad614SDennis Dalessandro if (ct < dd->rcv_entries.nctxt_extra) { 400f48ad614SDennis Dalessandro base += ct * (dd->rcv_entries.ngroups + 1); 401f48ad614SDennis Dalessandro rcd->rcv_array_groups++; 402ee495adaSDennis Dalessandro } else { 403f48ad614SDennis Dalessandro base += dd->rcv_entries.nctxt_extra + 404f48ad614SDennis Dalessandro (ct * dd->rcv_entries.ngroups); 405f48ad614SDennis Dalessandro } 406ee495adaSDennis Dalessandro } 407f48ad614SDennis Dalessandro rcd->eager_base = base * dd->rcv_entries.group_size; 408f48ad614SDennis Dalessandro 409f48ad614SDennis Dalessandro rcd->rcvhdrq_cnt = rcvhdrcnt; 410f48ad614SDennis Dalessandro rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 41140442b30SMike Marciniszyn rcd->rhf_offset = 41240442b30SMike Marciniszyn rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32); 413f48ad614SDennis Dalessandro /* 414f48ad614SDennis Dalessandro * Simple Eager buffer allocation: we have already pre-allocated 415f48ad614SDennis Dalessandro * the number of RcvArray entry groups. Each ctxtdata structure 416f48ad614SDennis Dalessandro * holds the number of groups for that context. 417f48ad614SDennis Dalessandro * 418f48ad614SDennis Dalessandro * To follow CSR requirements and maintain cacheline alignment, 419f48ad614SDennis Dalessandro * make sure all sizes and bases are multiples of group_size. 420f48ad614SDennis Dalessandro * 421f48ad614SDennis Dalessandro * The expected entry count is what is left after assigning 422f48ad614SDennis Dalessandro * eager. 423f48ad614SDennis Dalessandro */ 424f48ad614SDennis Dalessandro max_entries = rcd->rcv_array_groups * 425f48ad614SDennis Dalessandro dd->rcv_entries.group_size; 426f48ad614SDennis Dalessandro rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); 427f48ad614SDennis Dalessandro rcd->egrbufs.count = round_down(rcvtids, 428f48ad614SDennis Dalessandro dd->rcv_entries.group_size); 429f48ad614SDennis Dalessandro if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { 430f48ad614SDennis Dalessandro dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", 431f48ad614SDennis Dalessandro rcd->ctxt); 432f48ad614SDennis Dalessandro rcd->egrbufs.count = MAX_EAGER_ENTRIES; 433f48ad614SDennis Dalessandro } 434f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 435f48ad614SDennis Dalessandro "ctxt%u: max Eager buffer RcvArray entries: %u\n", 436f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.count); 437f48ad614SDennis Dalessandro 438f48ad614SDennis Dalessandro /* 439f48ad614SDennis Dalessandro * Allocate array that will hold the eager buffer accounting 440f48ad614SDennis Dalessandro * data. 441f48ad614SDennis Dalessandro * This will allocate the maximum possible buffer count based 442f48ad614SDennis Dalessandro * on the value of the RcvArray split parameter. 443f48ad614SDennis Dalessandro * The resulting value will be rounded down to the closest 444f48ad614SDennis Dalessandro * multiple of dd->rcv_entries.group_size. 445f48ad614SDennis Dalessandro */ 446953a9cebSKamenee Arumugam rcd->egrbufs.buffers = 447953a9cebSKamenee Arumugam kcalloc_node(rcd->egrbufs.count, 448953a9cebSKamenee Arumugam sizeof(*rcd->egrbufs.buffers), 449b448bf9aSSebastian Sanchez GFP_KERNEL, numa); 450f48ad614SDennis Dalessandro if (!rcd->egrbufs.buffers) 451f48ad614SDennis Dalessandro goto bail; 452953a9cebSKamenee Arumugam rcd->egrbufs.rcvtids = 453953a9cebSKamenee Arumugam kcalloc_node(rcd->egrbufs.count, 454f48ad614SDennis Dalessandro sizeof(*rcd->egrbufs.rcvtids), 455b448bf9aSSebastian Sanchez GFP_KERNEL, numa); 456f48ad614SDennis Dalessandro if (!rcd->egrbufs.rcvtids) 457f48ad614SDennis Dalessandro goto bail; 458f48ad614SDennis Dalessandro rcd->egrbufs.size = eager_buffer_size; 459f48ad614SDennis Dalessandro /* 460f48ad614SDennis Dalessandro * The size of the buffers programmed into the RcvArray 461f48ad614SDennis Dalessandro * entries needs to be big enough to handle the highest 462f48ad614SDennis Dalessandro * MTU supported. 463f48ad614SDennis Dalessandro */ 464f48ad614SDennis Dalessandro if (rcd->egrbufs.size < hfi1_max_mtu) { 465f48ad614SDennis Dalessandro rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); 466f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 467f48ad614SDennis Dalessandro "ctxt%u: eager bufs size too small. Adjusting to %zu\n", 468f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.size); 469f48ad614SDennis Dalessandro } 470f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; 471f48ad614SDennis Dalessandro 4722280740fSVishwanathapura, Niranjana /* Applicable only for statically created kernel contexts */ 4732280740fSVishwanathapura, Niranjana if (ctxt < dd->first_dyn_alloc_ctxt) { 474b448bf9aSSebastian Sanchez rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), 475b448bf9aSSebastian Sanchez GFP_KERNEL, numa); 476f48ad614SDennis Dalessandro if (!rcd->opstats) 477f48ad614SDennis Dalessandro goto bail; 47837356e78SKaike Wan 47937356e78SKaike Wan /* Initialize TID flow generations for the context */ 48037356e78SKaike Wan hfi1_kern_init_ctxt_generations(rcd); 481f48ad614SDennis Dalessandro } 482f683c80cSMichael J. Ruhl 483f2a3bc00SMichael J. Ruhl *context = rcd; 484f2a3bc00SMichael J. Ruhl return 0; 485f48ad614SDennis Dalessandro } 486f2a3bc00SMichael J. Ruhl 487f48ad614SDennis Dalessandro bail: 488f2a3bc00SMichael J. Ruhl *context = NULL; 489d295dbebSMichael J. Ruhl hfi1_free_ctxt(rcd); 490f2a3bc00SMichael J. Ruhl return -ENOMEM; 491f2a3bc00SMichael J. Ruhl } 492f2a3bc00SMichael J. Ruhl 493f2a3bc00SMichael J. Ruhl /** 494f2a3bc00SMichael J. Ruhl * hfi1_free_ctxt 495f2a3bc00SMichael J. Ruhl * @rcd: pointer to an initialized rcd data structure 496f2a3bc00SMichael J. Ruhl * 497d295dbebSMichael J. Ruhl * This wrapper is the free function that matches hfi1_create_ctxtdata(). 498d295dbebSMichael J. Ruhl * When a context is done being used (kernel or user), this function is called 499d295dbebSMichael J. Ruhl * for the "final" put to match the kref init from hf1i_create_ctxtdata(). 500d295dbebSMichael J. Ruhl * Other users of the context do a get/put sequence to make sure that the 501d295dbebSMichael J. Ruhl * structure isn't removed while in use. 502f2a3bc00SMichael J. Ruhl */ 503d295dbebSMichael J. Ruhl void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd) 504f2a3bc00SMichael J. Ruhl { 505f2a3bc00SMichael J. Ruhl hfi1_rcd_put(rcd); 506f2a3bc00SMichael J. Ruhl } 507f48ad614SDennis Dalessandro 508f48ad614SDennis Dalessandro /* 509f48ad614SDennis Dalessandro * Convert a receive header entry size that to the encoding used in the CSR. 510f48ad614SDennis Dalessandro * 511f48ad614SDennis Dalessandro * Return a zero if the given size is invalid. 512f48ad614SDennis Dalessandro */ 513f48ad614SDennis Dalessandro static inline u64 encode_rcv_header_entry_size(u16 size) 514f48ad614SDennis Dalessandro { 515f48ad614SDennis Dalessandro /* there are only 3 valid receive header entry sizes */ 516f48ad614SDennis Dalessandro if (size == 2) 517f48ad614SDennis Dalessandro return 1; 518f48ad614SDennis Dalessandro if (size == 16) 519f48ad614SDennis Dalessandro return 2; 520f48ad614SDennis Dalessandro else if (size == 32) 521f48ad614SDennis Dalessandro return 4; 522f48ad614SDennis Dalessandro return 0; /* invalid */ 523f48ad614SDennis Dalessandro } 524f48ad614SDennis Dalessandro 525f48ad614SDennis Dalessandro /* 526f48ad614SDennis Dalessandro * Select the largest ccti value over all SLs to determine the intra- 527f48ad614SDennis Dalessandro * packet gap for the link. 528f48ad614SDennis Dalessandro * 529f48ad614SDennis Dalessandro * called with cca_timer_lock held (to protect access to cca_timer 530f48ad614SDennis Dalessandro * array), and rcu_read_lock() (to protect access to cc_state). 531f48ad614SDennis Dalessandro */ 532f48ad614SDennis Dalessandro void set_link_ipg(struct hfi1_pportdata *ppd) 533f48ad614SDennis Dalessandro { 534f48ad614SDennis Dalessandro struct hfi1_devdata *dd = ppd->dd; 535f48ad614SDennis Dalessandro struct cc_state *cc_state; 536f48ad614SDennis Dalessandro int i; 537f48ad614SDennis Dalessandro u16 cce, ccti_limit, max_ccti = 0; 538f48ad614SDennis Dalessandro u16 shift, mult; 539f48ad614SDennis Dalessandro u64 src; 540f48ad614SDennis Dalessandro u32 current_egress_rate; /* Mbits /sec */ 541f48ad614SDennis Dalessandro u32 max_pkt_time; 542f48ad614SDennis Dalessandro /* 543f48ad614SDennis Dalessandro * max_pkt_time is the maximum packet egress time in units 544f48ad614SDennis Dalessandro * of the fabric clock period 1/(805 MHz). 545f48ad614SDennis Dalessandro */ 546f48ad614SDennis Dalessandro 547f48ad614SDennis Dalessandro cc_state = get_cc_state(ppd); 548f48ad614SDennis Dalessandro 549f48ad614SDennis Dalessandro if (!cc_state) 550f48ad614SDennis Dalessandro /* 551f48ad614SDennis Dalessandro * This should _never_ happen - rcu_read_lock() is held, 552f48ad614SDennis Dalessandro * and set_link_ipg() should not be called if cc_state 553f48ad614SDennis Dalessandro * is NULL. 554f48ad614SDennis Dalessandro */ 555f48ad614SDennis Dalessandro return; 556f48ad614SDennis Dalessandro 557f48ad614SDennis Dalessandro for (i = 0; i < OPA_MAX_SLS; i++) { 558f48ad614SDennis Dalessandro u16 ccti = ppd->cca_timer[i].ccti; 559f48ad614SDennis Dalessandro 560f48ad614SDennis Dalessandro if (ccti > max_ccti) 561f48ad614SDennis Dalessandro max_ccti = ccti; 562f48ad614SDennis Dalessandro } 563f48ad614SDennis Dalessandro 564f48ad614SDennis Dalessandro ccti_limit = cc_state->cct.ccti_limit; 565f48ad614SDennis Dalessandro if (max_ccti > ccti_limit) 566f48ad614SDennis Dalessandro max_ccti = ccti_limit; 567f48ad614SDennis Dalessandro 568f48ad614SDennis Dalessandro cce = cc_state->cct.entries[max_ccti].entry; 569f48ad614SDennis Dalessandro shift = (cce & 0xc000) >> 14; 570f48ad614SDennis Dalessandro mult = (cce & 0x3fff); 571f48ad614SDennis Dalessandro 572f48ad614SDennis Dalessandro current_egress_rate = active_egress_rate(ppd); 573f48ad614SDennis Dalessandro 574f48ad614SDennis Dalessandro max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); 575f48ad614SDennis Dalessandro 576f48ad614SDennis Dalessandro src = (max_pkt_time >> shift) * mult; 577f48ad614SDennis Dalessandro 578f48ad614SDennis Dalessandro src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; 579f48ad614SDennis Dalessandro src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; 580f48ad614SDennis Dalessandro 581f48ad614SDennis Dalessandro write_csr(dd, SEND_STATIC_RATE_CONTROL, src); 582f48ad614SDennis Dalessandro } 583f48ad614SDennis Dalessandro 584f48ad614SDennis Dalessandro static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) 585f48ad614SDennis Dalessandro { 586f48ad614SDennis Dalessandro struct cca_timer *cca_timer; 587f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 588f48ad614SDennis Dalessandro int sl; 589f48ad614SDennis Dalessandro u16 ccti_timer, ccti_min; 590f48ad614SDennis Dalessandro struct cc_state *cc_state; 591f48ad614SDennis Dalessandro unsigned long flags; 592f48ad614SDennis Dalessandro enum hrtimer_restart ret = HRTIMER_NORESTART; 593f48ad614SDennis Dalessandro 594f48ad614SDennis Dalessandro cca_timer = container_of(t, struct cca_timer, hrtimer); 595f48ad614SDennis Dalessandro ppd = cca_timer->ppd; 596f48ad614SDennis Dalessandro sl = cca_timer->sl; 597f48ad614SDennis Dalessandro 598f48ad614SDennis Dalessandro rcu_read_lock(); 599f48ad614SDennis Dalessandro 600f48ad614SDennis Dalessandro cc_state = get_cc_state(ppd); 601f48ad614SDennis Dalessandro 602f48ad614SDennis Dalessandro if (!cc_state) { 603f48ad614SDennis Dalessandro rcu_read_unlock(); 604f48ad614SDennis Dalessandro return HRTIMER_NORESTART; 605f48ad614SDennis Dalessandro } 606f48ad614SDennis Dalessandro 607f48ad614SDennis Dalessandro /* 608f48ad614SDennis Dalessandro * 1) decrement ccti for SL 609f48ad614SDennis Dalessandro * 2) calculate IPG for link (set_link_ipg()) 610f48ad614SDennis Dalessandro * 3) restart timer, unless ccti is at min value 611f48ad614SDennis Dalessandro */ 612f48ad614SDennis Dalessandro 613f48ad614SDennis Dalessandro ccti_min = cc_state->cong_setting.entries[sl].ccti_min; 614f48ad614SDennis Dalessandro ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 615f48ad614SDennis Dalessandro 616f48ad614SDennis Dalessandro spin_lock_irqsave(&ppd->cca_timer_lock, flags); 617f48ad614SDennis Dalessandro 618f48ad614SDennis Dalessandro if (cca_timer->ccti > ccti_min) { 619f48ad614SDennis Dalessandro cca_timer->ccti--; 620f48ad614SDennis Dalessandro set_link_ipg(ppd); 621f48ad614SDennis Dalessandro } 622f48ad614SDennis Dalessandro 623f48ad614SDennis Dalessandro if (cca_timer->ccti > ccti_min) { 624f48ad614SDennis Dalessandro unsigned long nsec = 1024 * ccti_timer; 625f48ad614SDennis Dalessandro /* ccti_timer is in units of 1.024 usec */ 626f48ad614SDennis Dalessandro hrtimer_forward_now(t, ns_to_ktime(nsec)); 627f48ad614SDennis Dalessandro ret = HRTIMER_RESTART; 628f48ad614SDennis Dalessandro } 629f48ad614SDennis Dalessandro 630f48ad614SDennis Dalessandro spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 631f48ad614SDennis Dalessandro rcu_read_unlock(); 632f48ad614SDennis Dalessandro return ret; 633f48ad614SDennis Dalessandro } 634f48ad614SDennis Dalessandro 635f48ad614SDennis Dalessandro /* 636f48ad614SDennis Dalessandro * Common code for initializing the physical port structure. 637f48ad614SDennis Dalessandro */ 638f48ad614SDennis Dalessandro void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 639f48ad614SDennis Dalessandro struct hfi1_devdata *dd, u8 hw_pidx, u8 port) 640f48ad614SDennis Dalessandro { 6418adf71faSJianxin Xiong int i; 642f48ad614SDennis Dalessandro uint default_pkey_idx; 6438adf71faSJianxin Xiong struct cc_state *cc_state; 644f48ad614SDennis Dalessandro 645f48ad614SDennis Dalessandro ppd->dd = dd; 646f48ad614SDennis Dalessandro ppd->hw_pidx = hw_pidx; 647f48ad614SDennis Dalessandro ppd->port = port; /* IB port number, not index */ 64807190076SKamenee Arumugam ppd->prev_link_width = LINK_WIDTH_DEFAULT; 64907190076SKamenee Arumugam /* 65007190076SKamenee Arumugam * There are C_VL_COUNT number of PortVLXmitWait counters. 65107190076SKamenee Arumugam * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. 65207190076SKamenee Arumugam */ 65307190076SKamenee Arumugam for (i = 0; i < C_VL_COUNT + 1; i++) { 65407190076SKamenee Arumugam ppd->port_vl_xmit_wait_last[i] = 0; 65507190076SKamenee Arumugam ppd->vl_xmit_flit_cnt[i] = 0; 65607190076SKamenee Arumugam } 657f48ad614SDennis Dalessandro 658f48ad614SDennis Dalessandro default_pkey_idx = 1; 659f48ad614SDennis Dalessandro 660f48ad614SDennis Dalessandro ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; 66153526500SNeel Desai ppd->part_enforce |= HFI1_PART_ENFORCE_IN; 66253526500SNeel Desai 663f48ad614SDennis Dalessandro if (loopback) { 66457f97e96SMichael J. Ruhl dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n", 665f48ad614SDennis Dalessandro !default_pkey_idx); 666f48ad614SDennis Dalessandro ppd->pkeys[!default_pkey_idx] = 0x8001; 667f48ad614SDennis Dalessandro } 668f48ad614SDennis Dalessandro 669f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_vc_work, handle_verify_cap); 670f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_up_work, handle_link_up); 671f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_down_work, handle_link_down); 672f48ad614SDennis Dalessandro INIT_WORK(&ppd->freeze_work, handle_freeze); 673f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); 674f48ad614SDennis Dalessandro INIT_WORK(&ppd->sma_message_work, handle_sma_message); 675f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); 676673b975fSDean Luick INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); 677f48ad614SDennis Dalessandro INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); 678f48ad614SDennis Dalessandro INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 679f48ad614SDennis Dalessandro 680f48ad614SDennis Dalessandro mutex_init(&ppd->hls_lock); 681f48ad614SDennis Dalessandro spin_lock_init(&ppd->qsfp_info.qsfp_lock); 682f48ad614SDennis Dalessandro 683f48ad614SDennis Dalessandro ppd->qsfp_info.ppd = ppd; 684f48ad614SDennis Dalessandro ppd->sm_trap_qp = 0x0; 685f48ad614SDennis Dalessandro ppd->sa_qp = 0x1; 686f48ad614SDennis Dalessandro 687f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 688f48ad614SDennis Dalessandro 689f48ad614SDennis Dalessandro spin_lock_init(&ppd->cca_timer_lock); 690f48ad614SDennis Dalessandro 691f48ad614SDennis Dalessandro for (i = 0; i < OPA_MAX_SLS; i++) { 692f48ad614SDennis Dalessandro hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, 693f48ad614SDennis Dalessandro HRTIMER_MODE_REL); 694f48ad614SDennis Dalessandro ppd->cca_timer[i].ppd = ppd; 695f48ad614SDennis Dalessandro ppd->cca_timer[i].sl = i; 696f48ad614SDennis Dalessandro ppd->cca_timer[i].ccti = 0; 697f48ad614SDennis Dalessandro ppd->cca_timer[i].hrtimer.function = cca_timer_fn; 698f48ad614SDennis Dalessandro } 699f48ad614SDennis Dalessandro 700f48ad614SDennis Dalessandro ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; 701f48ad614SDennis Dalessandro 702f48ad614SDennis Dalessandro spin_lock_init(&ppd->cc_state_lock); 703f48ad614SDennis Dalessandro spin_lock_init(&ppd->cc_log_lock); 7048adf71faSJianxin Xiong cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL); 7058adf71faSJianxin Xiong RCU_INIT_POINTER(ppd->cc_state, cc_state); 7068adf71faSJianxin Xiong if (!cc_state) 707f48ad614SDennis Dalessandro goto bail; 708f48ad614SDennis Dalessandro return; 709f48ad614SDennis Dalessandro 710f48ad614SDennis Dalessandro bail: 71157f97e96SMichael J. Ruhl dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port); 712f48ad614SDennis Dalessandro } 713f48ad614SDennis Dalessandro 714f48ad614SDennis Dalessandro /* 715f48ad614SDennis Dalessandro * Do initialization for device that is only needed on 716f48ad614SDennis Dalessandro * first detect, not on resets. 717f48ad614SDennis Dalessandro */ 718f48ad614SDennis Dalessandro static int loadtime_init(struct hfi1_devdata *dd) 719f48ad614SDennis Dalessandro { 720f48ad614SDennis Dalessandro return 0; 721f48ad614SDennis Dalessandro } 722f48ad614SDennis Dalessandro 723f48ad614SDennis Dalessandro /** 724f48ad614SDennis Dalessandro * init_after_reset - re-initialize after a reset 725f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 726f48ad614SDennis Dalessandro * 727f48ad614SDennis Dalessandro * sanity check at least some of the values after reset, and 728f48ad614SDennis Dalessandro * ensure no receive or transmit (explicitly, in case reset 729f48ad614SDennis Dalessandro * failed 730f48ad614SDennis Dalessandro */ 731f48ad614SDennis Dalessandro static int init_after_reset(struct hfi1_devdata *dd) 732f48ad614SDennis Dalessandro { 733f48ad614SDennis Dalessandro int i; 734d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *rcd; 735f48ad614SDennis Dalessandro /* 736f48ad614SDennis Dalessandro * Ensure chip does no sends or receives, tail updates, or 737f48ad614SDennis Dalessandro * pioavail updates while we re-initialize. This is mostly 738f48ad614SDennis Dalessandro * for the driver data structures, not chip registers. 739f48ad614SDennis Dalessandro */ 740d295dbebSMichael J. Ruhl for (i = 0; i < dd->num_rcv_contexts; i++) { 741d295dbebSMichael J. Ruhl rcd = hfi1_rcd_get_by_index(dd, i); 742f48ad614SDennis Dalessandro hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | 743f48ad614SDennis Dalessandro HFI1_RCVCTRL_INTRAVAIL_DIS | 744d295dbebSMichael J. Ruhl HFI1_RCVCTRL_TAILUPD_DIS, rcd); 745d295dbebSMichael J. Ruhl hfi1_rcd_put(rcd); 746d295dbebSMichael J. Ruhl } 747f48ad614SDennis Dalessandro pio_send_control(dd, PSC_GLOBAL_DISABLE); 748f48ad614SDennis Dalessandro for (i = 0; i < dd->num_send_contexts; i++) 749f48ad614SDennis Dalessandro sc_disable(dd->send_contexts[i].sc); 750f48ad614SDennis Dalessandro 751f48ad614SDennis Dalessandro return 0; 752f48ad614SDennis Dalessandro } 753f48ad614SDennis Dalessandro 754f48ad614SDennis Dalessandro static void enable_chip(struct hfi1_devdata *dd) 755f48ad614SDennis Dalessandro { 756d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *rcd; 757f48ad614SDennis Dalessandro u32 rcvmask; 758e6f7622dSMichael J. Ruhl u16 i; 759f48ad614SDennis Dalessandro 760f48ad614SDennis Dalessandro /* enable PIO send */ 761f48ad614SDennis Dalessandro pio_send_control(dd, PSC_GLOBAL_ENABLE); 762f48ad614SDennis Dalessandro 763f48ad614SDennis Dalessandro /* 764f48ad614SDennis Dalessandro * Enable kernel ctxts' receive and receive interrupt. 765f48ad614SDennis Dalessandro * Other ctxts done as user opens and initializes them. 766f48ad614SDennis Dalessandro */ 7672280740fSVishwanathapura, Niranjana for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 768d295dbebSMichael J. Ruhl rcd = hfi1_rcd_get_by_index(dd, i); 769d295dbebSMichael J. Ruhl if (!rcd) 770d295dbebSMichael J. Ruhl continue; 771f48ad614SDennis Dalessandro rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; 772d295dbebSMichael J. Ruhl rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? 773f48ad614SDennis Dalessandro HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 774d295dbebSMichael J. Ruhl if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 775f48ad614SDennis Dalessandro rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; 776d295dbebSMichael J. Ruhl if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL)) 777f48ad614SDennis Dalessandro rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; 778d295dbebSMichael J. Ruhl if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) 779f48ad614SDennis Dalessandro rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; 78037356e78SKaike Wan if (HFI1_CAP_IS_KSET(TID_RDMA)) 78137356e78SKaike Wan rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB; 782d295dbebSMichael J. Ruhl hfi1_rcvctrl(dd, rcvmask, rcd); 783d295dbebSMichael J. Ruhl sc_enable(rcd->sc); 784d295dbebSMichael J. Ruhl hfi1_rcd_put(rcd); 785f48ad614SDennis Dalessandro } 786f48ad614SDennis Dalessandro } 787f48ad614SDennis Dalessandro 788f48ad614SDennis Dalessandro /** 789f48ad614SDennis Dalessandro * create_workqueues - create per port workqueues 790f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 791f48ad614SDennis Dalessandro */ 792f48ad614SDennis Dalessandro static int create_workqueues(struct hfi1_devdata *dd) 793f48ad614SDennis Dalessandro { 794f48ad614SDennis Dalessandro int pidx; 795f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 796f48ad614SDennis Dalessandro 797f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 798f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 799f48ad614SDennis Dalessandro if (!ppd->hfi1_wq) { 800f48ad614SDennis Dalessandro ppd->hfi1_wq = 801f48ad614SDennis Dalessandro alloc_workqueue( 802f48ad614SDennis Dalessandro "hfi%d_%d", 803f48ad614SDennis Dalessandro WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 804dd1ed108SMike Marciniszyn HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, 805f48ad614SDennis Dalessandro dd->unit, pidx); 806f48ad614SDennis Dalessandro if (!ppd->hfi1_wq) 807f48ad614SDennis Dalessandro goto wq_error; 808f48ad614SDennis Dalessandro } 80971d47008SSebastian Sanchez if (!ppd->link_wq) { 81071d47008SSebastian Sanchez /* 81171d47008SSebastian Sanchez * Make the link workqueue single-threaded to enforce 81271d47008SSebastian Sanchez * serialization. 81371d47008SSebastian Sanchez */ 81471d47008SSebastian Sanchez ppd->link_wq = 81571d47008SSebastian Sanchez alloc_workqueue( 81671d47008SSebastian Sanchez "hfi_link_%d_%d", 81771d47008SSebastian Sanchez WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 81871d47008SSebastian Sanchez 1, /* max_active */ 81971d47008SSebastian Sanchez dd->unit, pidx); 82071d47008SSebastian Sanchez if (!ppd->link_wq) 82171d47008SSebastian Sanchez goto wq_error; 82271d47008SSebastian Sanchez } 823f48ad614SDennis Dalessandro } 824f48ad614SDennis Dalessandro return 0; 825f48ad614SDennis Dalessandro wq_error: 826f48ad614SDennis Dalessandro pr_err("alloc_workqueue failed for port %d\n", pidx + 1); 827f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 828f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 829f48ad614SDennis Dalessandro if (ppd->hfi1_wq) { 830f48ad614SDennis Dalessandro destroy_workqueue(ppd->hfi1_wq); 831f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 832f48ad614SDennis Dalessandro } 83371d47008SSebastian Sanchez if (ppd->link_wq) { 83471d47008SSebastian Sanchez destroy_workqueue(ppd->link_wq); 83571d47008SSebastian Sanchez ppd->link_wq = NULL; 83671d47008SSebastian Sanchez } 837f48ad614SDennis Dalessandro } 838f48ad614SDennis Dalessandro return -ENOMEM; 839f48ad614SDennis Dalessandro } 840f48ad614SDennis Dalessandro 841f48ad614SDennis Dalessandro /** 842a2f7bbdcSMichael J. Ruhl * enable_general_intr() - Enable the IRQs that will be handled by the 843a2f7bbdcSMichael J. Ruhl * general interrupt handler. 844a2f7bbdcSMichael J. Ruhl * @dd: valid devdata 845a2f7bbdcSMichael J. Ruhl * 846a2f7bbdcSMichael J. Ruhl */ 847a2f7bbdcSMichael J. Ruhl static void enable_general_intr(struct hfi1_devdata *dd) 848a2f7bbdcSMichael J. Ruhl { 849a2f7bbdcSMichael J. Ruhl set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true); 850a2f7bbdcSMichael J. Ruhl set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true); 851a2f7bbdcSMichael J. Ruhl set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true); 852a2f7bbdcSMichael J. Ruhl set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true); 853a2f7bbdcSMichael J. Ruhl set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true); 854a2f7bbdcSMichael J. Ruhl set_intr_bits(dd, IS_DC_START, IS_DC_END, true); 855a2f7bbdcSMichael J. Ruhl set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true); 856a2f7bbdcSMichael J. Ruhl } 857a2f7bbdcSMichael J. Ruhl 858a2f7bbdcSMichael J. Ruhl /** 859f48ad614SDennis Dalessandro * hfi1_init - do the actual initialization sequence on the chip 860f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 861f48ad614SDennis Dalessandro * @reinit: re-initializing, so don't allocate new memory 862f48ad614SDennis Dalessandro * 863f48ad614SDennis Dalessandro * Do the actual initialization sequence on the chip. This is done 864f48ad614SDennis Dalessandro * both from the init routine called from the PCI infrastructure, and 865f48ad614SDennis Dalessandro * when we reset the chip, or detect that it was reset internally, 866f48ad614SDennis Dalessandro * or it's administratively re-enabled. 867f48ad614SDennis Dalessandro * 868f48ad614SDennis Dalessandro * Memory allocation here and in called routines is only done in 869f48ad614SDennis Dalessandro * the first case (reinit == 0). We have to be careful, because even 870f48ad614SDennis Dalessandro * without memory allocation, we need to re-write all the chip registers 871f48ad614SDennis Dalessandro * TIDs, etc. after the reset or enable has completed. 872f48ad614SDennis Dalessandro */ 873f48ad614SDennis Dalessandro int hfi1_init(struct hfi1_devdata *dd, int reinit) 874f48ad614SDennis Dalessandro { 875f48ad614SDennis Dalessandro int ret = 0, pidx, lastfail = 0; 876e6f7622dSMichael J. Ruhl unsigned long len; 877e6f7622dSMichael J. Ruhl u16 i; 878f48ad614SDennis Dalessandro struct hfi1_ctxtdata *rcd; 879f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 880f48ad614SDennis Dalessandro 881f48ad614SDennis Dalessandro /* Set up send low level handlers */ 882f48ad614SDennis Dalessandro dd->process_pio_send = hfi1_verbs_send_pio; 883f48ad614SDennis Dalessandro dd->process_dma_send = hfi1_verbs_send_dma; 884f48ad614SDennis Dalessandro dd->pio_inline_send = pio_copy; 88564551edeSVishwanathapura, Niranjana dd->process_vnic_dma_send = hfi1_vnic_send_dma; 886f48ad614SDennis Dalessandro 887f48ad614SDennis Dalessandro if (is_ax(dd)) { 888f48ad614SDennis Dalessandro atomic_set(&dd->drop_packet, DROP_PACKET_ON); 889f48ad614SDennis Dalessandro dd->do_drop = 1; 890f48ad614SDennis Dalessandro } else { 891f48ad614SDennis Dalessandro atomic_set(&dd->drop_packet, DROP_PACKET_OFF); 892f48ad614SDennis Dalessandro dd->do_drop = 0; 893f48ad614SDennis Dalessandro } 894f48ad614SDennis Dalessandro 895f48ad614SDennis Dalessandro /* make sure the link is not "up" */ 896f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 897f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 898f48ad614SDennis Dalessandro ppd->linkup = 0; 899f48ad614SDennis Dalessandro } 900f48ad614SDennis Dalessandro 901f48ad614SDennis Dalessandro if (reinit) 902f48ad614SDennis Dalessandro ret = init_after_reset(dd); 903f48ad614SDennis Dalessandro else 904f48ad614SDennis Dalessandro ret = loadtime_init(dd); 905f48ad614SDennis Dalessandro if (ret) 906f48ad614SDennis Dalessandro goto done; 907f48ad614SDennis Dalessandro 908f48ad614SDennis Dalessandro /* allocate dummy tail memory for all receive contexts */ 909750afb08SLuis Chamberlain dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, 910750afb08SLuis Chamberlain sizeof(u64), 91160368186STymoteusz Kielan &dd->rcvhdrtail_dummy_dma, 912f48ad614SDennis Dalessandro GFP_KERNEL); 913f48ad614SDennis Dalessandro 914f48ad614SDennis Dalessandro if (!dd->rcvhdrtail_dummy_kvaddr) { 915f48ad614SDennis Dalessandro dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 916f48ad614SDennis Dalessandro ret = -ENOMEM; 917f48ad614SDennis Dalessandro goto done; 918f48ad614SDennis Dalessandro } 919f48ad614SDennis Dalessandro 920f48ad614SDennis Dalessandro /* dd->rcd can be NULL if early initialization failed */ 9212280740fSVishwanathapura, Niranjana for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { 922f48ad614SDennis Dalessandro /* 923f48ad614SDennis Dalessandro * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 924f48ad614SDennis Dalessandro * re-init, the simplest way to handle this is to free 925f48ad614SDennis Dalessandro * existing, and re-allocate. 926f48ad614SDennis Dalessandro * Need to re-create rest of ctxt 0 ctxtdata as well. 927f48ad614SDennis Dalessandro */ 928d295dbebSMichael J. Ruhl rcd = hfi1_rcd_get_by_index(dd, i); 929f48ad614SDennis Dalessandro if (!rcd) 930f48ad614SDennis Dalessandro continue; 931f48ad614SDennis Dalessandro 932f48ad614SDennis Dalessandro rcd->do_interrupt = &handle_receive_interrupt; 933f48ad614SDennis Dalessandro 934f48ad614SDennis Dalessandro lastfail = hfi1_create_rcvhdrq(dd, rcd); 935f48ad614SDennis Dalessandro if (!lastfail) 936f48ad614SDennis Dalessandro lastfail = hfi1_setup_eagerbufs(rcd); 937d22a207dSKaike Wan if (!lastfail) 938d22a207dSKaike Wan lastfail = hfi1_kern_exp_rcv_init(rcd, reinit); 939f48ad614SDennis Dalessandro if (lastfail) { 940f48ad614SDennis Dalessandro dd_dev_err(dd, 941f48ad614SDennis Dalessandro "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 942f48ad614SDennis Dalessandro ret = lastfail; 943f48ad614SDennis Dalessandro } 944a2f7bbdcSMichael J. Ruhl /* enable IRQ */ 945d295dbebSMichael J. Ruhl hfi1_rcd_put(rcd); 946f48ad614SDennis Dalessandro } 947f48ad614SDennis Dalessandro 948f48ad614SDennis Dalessandro /* Allocate enough memory for user event notification. */ 94906e81e3eSMike Marciniszyn len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS * 950f48ad614SDennis Dalessandro sizeof(*dd->events)); 951f48ad614SDennis Dalessandro dd->events = vmalloc_user(len); 952f48ad614SDennis Dalessandro if (!dd->events) 953f48ad614SDennis Dalessandro dd_dev_err(dd, "Failed to allocate user events page\n"); 954f48ad614SDennis Dalessandro /* 955f48ad614SDennis Dalessandro * Allocate a page for device and port status. 956f48ad614SDennis Dalessandro * Page will be shared amongst all user processes. 957f48ad614SDennis Dalessandro */ 958f48ad614SDennis Dalessandro dd->status = vmalloc_user(PAGE_SIZE); 959f48ad614SDennis Dalessandro if (!dd->status) 960f48ad614SDennis Dalessandro dd_dev_err(dd, "Failed to allocate dev status page\n"); 961f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 962f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 963f48ad614SDennis Dalessandro if (dd->status) 964f48ad614SDennis Dalessandro /* Currently, we only have one port */ 965f48ad614SDennis Dalessandro ppd->statusp = &dd->status->port; 966f48ad614SDennis Dalessandro 967f48ad614SDennis Dalessandro set_mtu(ppd); 968f48ad614SDennis Dalessandro } 969f48ad614SDennis Dalessandro 970f48ad614SDennis Dalessandro /* enable chip even if we have an error, so we can debug cause */ 971f48ad614SDennis Dalessandro enable_chip(dd); 972f48ad614SDennis Dalessandro 973f48ad614SDennis Dalessandro done: 974f48ad614SDennis Dalessandro /* 975f48ad614SDennis Dalessandro * Set status even if port serdes is not initialized 976f48ad614SDennis Dalessandro * so that diags will work. 977f48ad614SDennis Dalessandro */ 978f48ad614SDennis Dalessandro if (dd->status) 979f48ad614SDennis Dalessandro dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | 980f48ad614SDennis Dalessandro HFI1_STATUS_INITTED; 981f48ad614SDennis Dalessandro if (!ret) { 982f48ad614SDennis Dalessandro /* enable all interrupts from the chip */ 983a2f7bbdcSMichael J. Ruhl enable_general_intr(dd); 984a2f7bbdcSMichael J. Ruhl init_qsfp_int(dd); 985f48ad614SDennis Dalessandro 986f48ad614SDennis Dalessandro /* chip is OK for user apps; mark it as initialized */ 987f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 988f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 989f48ad614SDennis Dalessandro 990f48ad614SDennis Dalessandro /* 991f48ad614SDennis Dalessandro * start the serdes - must be after interrupts are 992f48ad614SDennis Dalessandro * enabled so we are notified when the link goes up 993f48ad614SDennis Dalessandro */ 994f48ad614SDennis Dalessandro lastfail = bringup_serdes(ppd); 995f48ad614SDennis Dalessandro if (lastfail) 996f48ad614SDennis Dalessandro dd_dev_info(dd, 997f48ad614SDennis Dalessandro "Failed to bring up port %u\n", 998f48ad614SDennis Dalessandro ppd->port); 999f48ad614SDennis Dalessandro 1000f48ad614SDennis Dalessandro /* 1001f48ad614SDennis Dalessandro * Set status even if port serdes is not initialized 1002f48ad614SDennis Dalessandro * so that diags will work. 1003f48ad614SDennis Dalessandro */ 1004f48ad614SDennis Dalessandro if (ppd->statusp) 1005f48ad614SDennis Dalessandro *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | 1006f48ad614SDennis Dalessandro HFI1_STATUS_INITTED; 1007f48ad614SDennis Dalessandro if (!ppd->link_speed_enabled) 1008f48ad614SDennis Dalessandro continue; 1009f48ad614SDennis Dalessandro } 1010f48ad614SDennis Dalessandro } 1011f48ad614SDennis Dalessandro 1012f48ad614SDennis Dalessandro /* if ret is non-zero, we probably should do some cleanup here... */ 1013f48ad614SDennis Dalessandro return ret; 1014f48ad614SDennis Dalessandro } 1015f48ad614SDennis Dalessandro 1016f48ad614SDennis Dalessandro static inline struct hfi1_devdata *__hfi1_lookup(int unit) 1017f48ad614SDennis Dalessandro { 1018f48ad614SDennis Dalessandro return idr_find(&hfi1_unit_table, unit); 1019f48ad614SDennis Dalessandro } 1020f48ad614SDennis Dalessandro 1021f48ad614SDennis Dalessandro struct hfi1_devdata *hfi1_lookup(int unit) 1022f48ad614SDennis Dalessandro { 1023f48ad614SDennis Dalessandro struct hfi1_devdata *dd; 1024f48ad614SDennis Dalessandro unsigned long flags; 1025f48ad614SDennis Dalessandro 1026f48ad614SDennis Dalessandro spin_lock_irqsave(&hfi1_devs_lock, flags); 1027f48ad614SDennis Dalessandro dd = __hfi1_lookup(unit); 1028f48ad614SDennis Dalessandro spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1029f48ad614SDennis Dalessandro 1030f48ad614SDennis Dalessandro return dd; 1031f48ad614SDennis Dalessandro } 1032f48ad614SDennis Dalessandro 1033f48ad614SDennis Dalessandro /* 1034f48ad614SDennis Dalessandro * Stop the timers during unit shutdown, or after an error late 1035f48ad614SDennis Dalessandro * in initialization. 1036f48ad614SDennis Dalessandro */ 1037f48ad614SDennis Dalessandro static void stop_timers(struct hfi1_devdata *dd) 1038f48ad614SDennis Dalessandro { 1039f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 1040f48ad614SDennis Dalessandro int pidx; 1041f48ad614SDennis Dalessandro 1042f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1043f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 10448064135eSKees Cook if (ppd->led_override_timer.function) { 1045f48ad614SDennis Dalessandro del_timer_sync(&ppd->led_override_timer); 1046f48ad614SDennis Dalessandro atomic_set(&ppd->led_override_timer_active, 0); 1047f48ad614SDennis Dalessandro } 1048f48ad614SDennis Dalessandro } 1049f48ad614SDennis Dalessandro } 1050f48ad614SDennis Dalessandro 1051f48ad614SDennis Dalessandro /** 1052f48ad614SDennis Dalessandro * shutdown_device - shut down a device 1053f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 1054f48ad614SDennis Dalessandro * 1055f48ad614SDennis Dalessandro * This is called to make the device quiet when we are about to 1056f48ad614SDennis Dalessandro * unload the driver, and also when the device is administratively 1057f48ad614SDennis Dalessandro * disabled. It does not free any data structures. 1058f48ad614SDennis Dalessandro * Everything it does has to be setup again by hfi1_init(dd, 1) 1059f48ad614SDennis Dalessandro */ 1060f48ad614SDennis Dalessandro static void shutdown_device(struct hfi1_devdata *dd) 1061f48ad614SDennis Dalessandro { 1062f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 1063d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *rcd; 1064f48ad614SDennis Dalessandro unsigned pidx; 1065f48ad614SDennis Dalessandro int i; 1066f48ad614SDennis Dalessandro 10678d3e7113SAlex Estrin if (dd->flags & HFI1_SHUTDOWN) 10688d3e7113SAlex Estrin return; 10698d3e7113SAlex Estrin dd->flags |= HFI1_SHUTDOWN; 10708d3e7113SAlex Estrin 1071f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1072f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1073f48ad614SDennis Dalessandro 1074f48ad614SDennis Dalessandro ppd->linkup = 0; 1075f48ad614SDennis Dalessandro if (ppd->statusp) 1076f48ad614SDennis Dalessandro *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 1077f48ad614SDennis Dalessandro HFI1_STATUS_IB_READY); 1078f48ad614SDennis Dalessandro } 1079f48ad614SDennis Dalessandro dd->flags &= ~HFI1_INITTED; 1080f48ad614SDennis Dalessandro 1081a2f7bbdcSMichael J. Ruhl /* mask and clean up interrupts */ 1082a2f7bbdcSMichael J. Ruhl set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); 10836eb4eb10SMichael J. Ruhl msix_clean_up_interrupts(dd); 1084f48ad614SDennis Dalessandro 1085f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1086f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1087d295dbebSMichael J. Ruhl for (i = 0; i < dd->num_rcv_contexts; i++) { 1088d295dbebSMichael J. Ruhl rcd = hfi1_rcd_get_by_index(dd, i); 1089f48ad614SDennis Dalessandro hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | 1090f48ad614SDennis Dalessandro HFI1_RCVCTRL_CTXT_DIS | 1091f48ad614SDennis Dalessandro HFI1_RCVCTRL_INTRAVAIL_DIS | 1092f48ad614SDennis Dalessandro HFI1_RCVCTRL_PKEY_DIS | 1093d295dbebSMichael J. Ruhl HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd); 1094d295dbebSMichael J. Ruhl hfi1_rcd_put(rcd); 1095d295dbebSMichael J. Ruhl } 1096f48ad614SDennis Dalessandro /* 1097f48ad614SDennis Dalessandro * Gracefully stop all sends allowing any in progress to 1098f48ad614SDennis Dalessandro * trickle out first. 1099f48ad614SDennis Dalessandro */ 1100f48ad614SDennis Dalessandro for (i = 0; i < dd->num_send_contexts; i++) 1101f48ad614SDennis Dalessandro sc_flush(dd->send_contexts[i].sc); 1102f48ad614SDennis Dalessandro } 1103f48ad614SDennis Dalessandro 1104f48ad614SDennis Dalessandro /* 1105f48ad614SDennis Dalessandro * Enough for anything that's going to trickle out to have actually 1106f48ad614SDennis Dalessandro * done so. 1107f48ad614SDennis Dalessandro */ 1108f48ad614SDennis Dalessandro udelay(20); 1109f48ad614SDennis Dalessandro 1110f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1111f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1112f48ad614SDennis Dalessandro 1113f48ad614SDennis Dalessandro /* disable all contexts */ 1114f48ad614SDennis Dalessandro for (i = 0; i < dd->num_send_contexts; i++) 1115f48ad614SDennis Dalessandro sc_disable(dd->send_contexts[i].sc); 1116f48ad614SDennis Dalessandro /* disable the send device */ 1117f48ad614SDennis Dalessandro pio_send_control(dd, PSC_GLOBAL_DISABLE); 1118f48ad614SDennis Dalessandro 1119f48ad614SDennis Dalessandro shutdown_led_override(ppd); 1120f48ad614SDennis Dalessandro 1121f48ad614SDennis Dalessandro /* 1122f48ad614SDennis Dalessandro * Clear SerdesEnable. 1123f48ad614SDennis Dalessandro * We can't count on interrupts since we are stopping. 1124f48ad614SDennis Dalessandro */ 1125f48ad614SDennis Dalessandro hfi1_quiet_serdes(ppd); 1126f48ad614SDennis Dalessandro 1127f48ad614SDennis Dalessandro if (ppd->hfi1_wq) { 1128f48ad614SDennis Dalessandro destroy_workqueue(ppd->hfi1_wq); 1129f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 1130f48ad614SDennis Dalessandro } 113171d47008SSebastian Sanchez if (ppd->link_wq) { 113271d47008SSebastian Sanchez destroy_workqueue(ppd->link_wq); 113371d47008SSebastian Sanchez ppd->link_wq = NULL; 113471d47008SSebastian Sanchez } 1135f48ad614SDennis Dalessandro } 1136f48ad614SDennis Dalessandro sdma_exit(dd); 1137f48ad614SDennis Dalessandro } 1138f48ad614SDennis Dalessandro 1139f48ad614SDennis Dalessandro /** 1140f48ad614SDennis Dalessandro * hfi1_free_ctxtdata - free a context's allocated data 1141f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 1142f48ad614SDennis Dalessandro * @rcd: the ctxtdata structure 1143f48ad614SDennis Dalessandro * 1144f48ad614SDennis Dalessandro * free up any allocated data for a context 1145f48ad614SDennis Dalessandro * It should never change any chip state, or global driver state. 1146f48ad614SDennis Dalessandro */ 1147f48ad614SDennis Dalessandro void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1148f48ad614SDennis Dalessandro { 1149f683c80cSMichael J. Ruhl u32 e; 1150f48ad614SDennis Dalessandro 1151f48ad614SDennis Dalessandro if (!rcd) 1152f48ad614SDennis Dalessandro return; 1153f48ad614SDennis Dalessandro 1154f48ad614SDennis Dalessandro if (rcd->rcvhdrq) { 1155b2578431SMike Marciniszyn dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), 115660368186STymoteusz Kielan rcd->rcvhdrq, rcd->rcvhdrq_dma); 1157f48ad614SDennis Dalessandro rcd->rcvhdrq = NULL; 1158f48ad614SDennis Dalessandro if (rcd->rcvhdrtail_kvaddr) { 1159f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1160f48ad614SDennis Dalessandro (void *)rcd->rcvhdrtail_kvaddr, 116160368186STymoteusz Kielan rcd->rcvhdrqtailaddr_dma); 1162f48ad614SDennis Dalessandro rcd->rcvhdrtail_kvaddr = NULL; 1163f48ad614SDennis Dalessandro } 1164f48ad614SDennis Dalessandro } 1165f48ad614SDennis Dalessandro 1166f48ad614SDennis Dalessandro /* all the RcvArray entries should have been cleared by now */ 1167f48ad614SDennis Dalessandro kfree(rcd->egrbufs.rcvtids); 1168f683c80cSMichael J. Ruhl rcd->egrbufs.rcvtids = NULL; 1169f48ad614SDennis Dalessandro 1170f48ad614SDennis Dalessandro for (e = 0; e < rcd->egrbufs.alloced; e++) { 117160368186STymoteusz Kielan if (rcd->egrbufs.buffers[e].dma) 1172f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, 1173f48ad614SDennis Dalessandro rcd->egrbufs.buffers[e].len, 1174f48ad614SDennis Dalessandro rcd->egrbufs.buffers[e].addr, 117560368186STymoteusz Kielan rcd->egrbufs.buffers[e].dma); 1176f48ad614SDennis Dalessandro } 1177f48ad614SDennis Dalessandro kfree(rcd->egrbufs.buffers); 1178f683c80cSMichael J. Ruhl rcd->egrbufs.alloced = 0; 1179f683c80cSMichael J. Ruhl rcd->egrbufs.buffers = NULL; 1180f48ad614SDennis Dalessandro 1181f48ad614SDennis Dalessandro sc_free(rcd->sc); 1182f683c80cSMichael J. Ruhl rcd->sc = NULL; 1183f683c80cSMichael J. Ruhl 1184f48ad614SDennis Dalessandro vfree(rcd->subctxt_uregbase); 1185f48ad614SDennis Dalessandro vfree(rcd->subctxt_rcvegrbuf); 1186f48ad614SDennis Dalessandro vfree(rcd->subctxt_rcvhdr_base); 1187f48ad614SDennis Dalessandro kfree(rcd->opstats); 1188f683c80cSMichael J. Ruhl 1189f683c80cSMichael J. Ruhl rcd->subctxt_uregbase = NULL; 1190f683c80cSMichael J. Ruhl rcd->subctxt_rcvegrbuf = NULL; 1191f683c80cSMichael J. Ruhl rcd->subctxt_rcvhdr_base = NULL; 1192f683c80cSMichael J. Ruhl rcd->opstats = NULL; 1193f48ad614SDennis Dalessandro } 1194f48ad614SDennis Dalessandro 1195f48ad614SDennis Dalessandro /* 1196f48ad614SDennis Dalessandro * Release our hold on the shared asic data. If we are the last one, 1197dba715f0SDean Luick * return the structure to be finalized outside the lock. Must be 1198dba715f0SDean Luick * holding hfi1_devs_lock. 1199f48ad614SDennis Dalessandro */ 1200dba715f0SDean Luick static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) 1201f48ad614SDennis Dalessandro { 1202dba715f0SDean Luick struct hfi1_asic_data *ad; 1203f48ad614SDennis Dalessandro int other; 1204f48ad614SDennis Dalessandro 1205f48ad614SDennis Dalessandro if (!dd->asic_data) 1206dba715f0SDean Luick return NULL; 1207f48ad614SDennis Dalessandro dd->asic_data->dds[dd->hfi1_id] = NULL; 1208f48ad614SDennis Dalessandro other = dd->hfi1_id ? 0 : 1; 1209dba715f0SDean Luick ad = dd->asic_data; 1210f48ad614SDennis Dalessandro dd->asic_data = NULL; 1211dba715f0SDean Luick /* return NULL if the other dd still has a link */ 1212dba715f0SDean Luick return ad->dds[other] ? NULL : ad; 1213dba715f0SDean Luick } 1214dba715f0SDean Luick 1215dba715f0SDean Luick static void finalize_asic_data(struct hfi1_devdata *dd, 1216dba715f0SDean Luick struct hfi1_asic_data *ad) 1217dba715f0SDean Luick { 1218dba715f0SDean Luick clean_up_i2c(dd, ad); 1219dba715f0SDean Luick kfree(ad); 1220f48ad614SDennis Dalessandro } 1221f48ad614SDennis Dalessandro 1222e9777ad4SSebastian Sanchez /** 1223e9777ad4SSebastian Sanchez * hfi1_clean_devdata - cleans up per-unit data structure 1224e9777ad4SSebastian Sanchez * @dd: pointer to a valid devdata structure 1225e9777ad4SSebastian Sanchez * 1226e9777ad4SSebastian Sanchez * It cleans up all data structures set up by 1227e9777ad4SSebastian Sanchez * by hfi1_alloc_devdata(). 1228e9777ad4SSebastian Sanchez */ 1229e9777ad4SSebastian Sanchez static void hfi1_clean_devdata(struct hfi1_devdata *dd) 1230f48ad614SDennis Dalessandro { 1231dba715f0SDean Luick struct hfi1_asic_data *ad; 1232f48ad614SDennis Dalessandro unsigned long flags; 1233f48ad614SDennis Dalessandro 1234f48ad614SDennis Dalessandro spin_lock_irqsave(&hfi1_devs_lock, flags); 1235e9777ad4SSebastian Sanchez if (!list_empty(&dd->list)) { 1236f48ad614SDennis Dalessandro idr_remove(&hfi1_unit_table, dd->unit); 1237e9777ad4SSebastian Sanchez list_del_init(&dd->list); 1238e9777ad4SSebastian Sanchez } 1239dba715f0SDean Luick ad = release_asic_data(dd); 1240f48ad614SDennis Dalessandro spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1241e9777ad4SSebastian Sanchez 1242dba715f0SDean Luick finalize_asic_data(dd, ad); 1243f48ad614SDennis Dalessandro free_platform_config(dd); 1244f48ad614SDennis Dalessandro rcu_barrier(); /* wait for rcu callbacks to complete */ 1245f48ad614SDennis Dalessandro free_percpu(dd->int_counter); 1246f48ad614SDennis Dalessandro free_percpu(dd->rcv_limit); 1247f48ad614SDennis Dalessandro free_percpu(dd->send_schedule); 12481b311f89SMike Marciniszyn free_percpu(dd->tx_opstats); 1249e9777ad4SSebastian Sanchez dd->int_counter = NULL; 1250e9777ad4SSebastian Sanchez dd->rcv_limit = NULL; 1251e9777ad4SSebastian Sanchez dd->send_schedule = NULL; 1252e9777ad4SSebastian Sanchez dd->tx_opstats = NULL; 12535d18ee67SSebastian Sanchez kfree(dd->comp_vect); 12545d18ee67SSebastian Sanchez dd->comp_vect = NULL; 1255473291b3SAlex Estrin sdma_clean(dd, dd->num_sdma); 1256f48ad614SDennis Dalessandro rvt_dealloc_device(&dd->verbs_dev.rdi); 1257f48ad614SDennis Dalessandro } 1258f48ad614SDennis Dalessandro 1259e9777ad4SSebastian Sanchez static void __hfi1_free_devdata(struct kobject *kobj) 1260e9777ad4SSebastian Sanchez { 1261e9777ad4SSebastian Sanchez struct hfi1_devdata *dd = 1262e9777ad4SSebastian Sanchez container_of(kobj, struct hfi1_devdata, kobj); 1263e9777ad4SSebastian Sanchez 1264e9777ad4SSebastian Sanchez hfi1_clean_devdata(dd); 1265e9777ad4SSebastian Sanchez } 1266e9777ad4SSebastian Sanchez 1267f48ad614SDennis Dalessandro static struct kobj_type hfi1_devdata_type = { 1268f48ad614SDennis Dalessandro .release = __hfi1_free_devdata, 1269f48ad614SDennis Dalessandro }; 1270f48ad614SDennis Dalessandro 1271f48ad614SDennis Dalessandro void hfi1_free_devdata(struct hfi1_devdata *dd) 1272f48ad614SDennis Dalessandro { 1273f48ad614SDennis Dalessandro kobject_put(&dd->kobj); 1274f48ad614SDennis Dalessandro } 1275f48ad614SDennis Dalessandro 127657f97e96SMichael J. Ruhl /** 127757f97e96SMichael J. Ruhl * hfi1_alloc_devdata - Allocate our primary per-unit data structure. 127857f97e96SMichael J. Ruhl * @pdev: Valid PCI device 127957f97e96SMichael J. Ruhl * @extra: How many bytes to alloc past the default 128057f97e96SMichael J. Ruhl * 128157f97e96SMichael J. Ruhl * Must be done via verbs allocator, because the verbs cleanup process 128257f97e96SMichael J. Ruhl * both does cleanup and free of the data structure. 1283f48ad614SDennis Dalessandro * "extra" is for chip-specific data. 1284f48ad614SDennis Dalessandro * 1285f48ad614SDennis Dalessandro * Use the idr mechanism to get a unit number for this unit. 1286f48ad614SDennis Dalessandro */ 128757f97e96SMichael J. Ruhl static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, 128857f97e96SMichael J. Ruhl size_t extra) 1289f48ad614SDennis Dalessandro { 1290f48ad614SDennis Dalessandro unsigned long flags; 1291f48ad614SDennis Dalessandro struct hfi1_devdata *dd; 1292f48ad614SDennis Dalessandro int ret, nports; 1293f48ad614SDennis Dalessandro 1294f48ad614SDennis Dalessandro /* extra is * number of ports */ 1295f48ad614SDennis Dalessandro nports = extra / sizeof(struct hfi1_pportdata); 1296f48ad614SDennis Dalessandro 1297f48ad614SDennis Dalessandro dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, 1298f48ad614SDennis Dalessandro nports); 1299f48ad614SDennis Dalessandro if (!dd) 1300f48ad614SDennis Dalessandro return ERR_PTR(-ENOMEM); 1301f48ad614SDennis Dalessandro dd->num_pports = nports; 1302f48ad614SDennis Dalessandro dd->pport = (struct hfi1_pportdata *)(dd + 1); 130345d92457SSebastian Sanchez dd->pcidev = pdev; 130445d92457SSebastian Sanchez pci_set_drvdata(pdev, dd); 1305f48ad614SDennis Dalessandro 1306f48ad614SDennis Dalessandro INIT_LIST_HEAD(&dd->list); 1307f48ad614SDennis Dalessandro idr_preload(GFP_KERNEL); 1308f48ad614SDennis Dalessandro spin_lock_irqsave(&hfi1_devs_lock, flags); 1309f48ad614SDennis Dalessandro 1310f48ad614SDennis Dalessandro ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); 1311f48ad614SDennis Dalessandro if (ret >= 0) { 1312f48ad614SDennis Dalessandro dd->unit = ret; 1313f48ad614SDennis Dalessandro list_add(&dd->list, &hfi1_dev_list); 1314f48ad614SDennis Dalessandro } 13155d18ee67SSebastian Sanchez dd->node = -1; 1316f48ad614SDennis Dalessandro 1317f48ad614SDennis Dalessandro spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1318f48ad614SDennis Dalessandro idr_preload_end(); 1319f48ad614SDennis Dalessandro 1320f48ad614SDennis Dalessandro if (ret < 0) { 132157f97e96SMichael J. Ruhl dev_err(&pdev->dev, 1322f48ad614SDennis Dalessandro "Could not allocate unit ID: error %d\n", -ret); 1323f48ad614SDennis Dalessandro goto bail; 1324f48ad614SDennis Dalessandro } 13255084c8ffSMichael J. Ruhl rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); 13265084c8ffSMichael J. Ruhl 1327f48ad614SDennis Dalessandro /* 1328f48ad614SDennis Dalessandro * Initialize all locks for the device. This needs to be as early as 1329f48ad614SDennis Dalessandro * possible so locks are usable. 1330f48ad614SDennis Dalessandro */ 1331f48ad614SDennis Dalessandro spin_lock_init(&dd->sc_lock); 1332f48ad614SDennis Dalessandro spin_lock_init(&dd->sendctrl_lock); 1333f48ad614SDennis Dalessandro spin_lock_init(&dd->rcvctrl_lock); 1334f48ad614SDennis Dalessandro spin_lock_init(&dd->uctxt_lock); 1335f48ad614SDennis Dalessandro spin_lock_init(&dd->hfi1_diag_trans_lock); 1336f48ad614SDennis Dalessandro spin_lock_init(&dd->sc_init_lock); 1337f48ad614SDennis Dalessandro spin_lock_init(&dd->dc8051_memlock); 1338f48ad614SDennis Dalessandro seqlock_init(&dd->sc2vl_lock); 1339f48ad614SDennis Dalessandro spin_lock_init(&dd->sde_map_lock); 1340f48ad614SDennis Dalessandro spin_lock_init(&dd->pio_map_lock); 134122546b74STadeusz Struk mutex_init(&dd->dc8051_lock); 1342f48ad614SDennis Dalessandro init_waitqueue_head(&dd->event_queue); 1343a2f7bbdcSMichael J. Ruhl spin_lock_init(&dd->irq_src_lock); 1344f48ad614SDennis Dalessandro 1345f48ad614SDennis Dalessandro dd->int_counter = alloc_percpu(u64); 1346f48ad614SDennis Dalessandro if (!dd->int_counter) { 1347f48ad614SDennis Dalessandro ret = -ENOMEM; 1348f48ad614SDennis Dalessandro goto bail; 1349f48ad614SDennis Dalessandro } 1350f48ad614SDennis Dalessandro 1351f48ad614SDennis Dalessandro dd->rcv_limit = alloc_percpu(u64); 1352f48ad614SDennis Dalessandro if (!dd->rcv_limit) { 1353f48ad614SDennis Dalessandro ret = -ENOMEM; 1354f48ad614SDennis Dalessandro goto bail; 1355f48ad614SDennis Dalessandro } 1356f48ad614SDennis Dalessandro 1357f48ad614SDennis Dalessandro dd->send_schedule = alloc_percpu(u64); 1358f48ad614SDennis Dalessandro if (!dd->send_schedule) { 1359f48ad614SDennis Dalessandro ret = -ENOMEM; 1360f48ad614SDennis Dalessandro goto bail; 1361f48ad614SDennis Dalessandro } 1362f48ad614SDennis Dalessandro 13631b311f89SMike Marciniszyn dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); 13641b311f89SMike Marciniszyn if (!dd->tx_opstats) { 13651b311f89SMike Marciniszyn ret = -ENOMEM; 13661b311f89SMike Marciniszyn goto bail; 13671b311f89SMike Marciniszyn } 13681b311f89SMike Marciniszyn 13695d18ee67SSebastian Sanchez dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL); 13705d18ee67SSebastian Sanchez if (!dd->comp_vect) { 13715d18ee67SSebastian Sanchez ret = -ENOMEM; 13725d18ee67SSebastian Sanchez goto bail; 13735d18ee67SSebastian Sanchez } 13745d18ee67SSebastian Sanchez 1375f48ad614SDennis Dalessandro kobject_init(&dd->kobj, &hfi1_devdata_type); 1376f48ad614SDennis Dalessandro return dd; 1377f48ad614SDennis Dalessandro 1378f48ad614SDennis Dalessandro bail: 1379e9777ad4SSebastian Sanchez hfi1_clean_devdata(dd); 1380f48ad614SDennis Dalessandro return ERR_PTR(ret); 1381f48ad614SDennis Dalessandro } 1382f48ad614SDennis Dalessandro 1383f48ad614SDennis Dalessandro /* 1384f48ad614SDennis Dalessandro * Called from freeze mode handlers, and from PCI error 1385f48ad614SDennis Dalessandro * reporting code. Should be paranoid about state of 1386f48ad614SDennis Dalessandro * system and data structures. 1387f48ad614SDennis Dalessandro */ 1388f48ad614SDennis Dalessandro void hfi1_disable_after_error(struct hfi1_devdata *dd) 1389f48ad614SDennis Dalessandro { 1390f48ad614SDennis Dalessandro if (dd->flags & HFI1_INITTED) { 1391f48ad614SDennis Dalessandro u32 pidx; 1392f48ad614SDennis Dalessandro 1393f48ad614SDennis Dalessandro dd->flags &= ~HFI1_INITTED; 1394f48ad614SDennis Dalessandro if (dd->pport) 1395f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1396f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 1397f48ad614SDennis Dalessandro 1398f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1399f48ad614SDennis Dalessandro if (dd->flags & HFI1_PRESENT) 1400f48ad614SDennis Dalessandro set_link_state(ppd, HLS_DN_DISABLE); 1401f48ad614SDennis Dalessandro 1402f48ad614SDennis Dalessandro if (ppd->statusp) 1403f48ad614SDennis Dalessandro *ppd->statusp &= ~HFI1_STATUS_IB_READY; 1404f48ad614SDennis Dalessandro } 1405f48ad614SDennis Dalessandro } 1406f48ad614SDennis Dalessandro 1407f48ad614SDennis Dalessandro /* 1408f48ad614SDennis Dalessandro * Mark as having had an error for driver, and also 1409f48ad614SDennis Dalessandro * for /sys and status word mapped to user programs. 1410f48ad614SDennis Dalessandro * This marks unit as not usable, until reset. 1411f48ad614SDennis Dalessandro */ 1412f48ad614SDennis Dalessandro if (dd->status) 1413f48ad614SDennis Dalessandro dd->status->dev |= HFI1_STATUS_HWERROR; 1414f48ad614SDennis Dalessandro } 1415f48ad614SDennis Dalessandro 1416f48ad614SDennis Dalessandro static void remove_one(struct pci_dev *); 1417f48ad614SDennis Dalessandro static int init_one(struct pci_dev *, const struct pci_device_id *); 14188d3e7113SAlex Estrin static void shutdown_one(struct pci_dev *); 1419f48ad614SDennis Dalessandro 1420f48ad614SDennis Dalessandro #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " 1421f48ad614SDennis Dalessandro #define PFX DRIVER_NAME ": " 1422f48ad614SDennis Dalessandro 1423d6373019SSebastian Sanchez const struct pci_device_id hfi1_pci_tbl[] = { 1424f48ad614SDennis Dalessandro { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, 1425f48ad614SDennis Dalessandro { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, 1426f48ad614SDennis Dalessandro { 0, } 1427f48ad614SDennis Dalessandro }; 1428f48ad614SDennis Dalessandro 1429f48ad614SDennis Dalessandro MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); 1430f48ad614SDennis Dalessandro 1431f48ad614SDennis Dalessandro static struct pci_driver hfi1_pci_driver = { 1432f48ad614SDennis Dalessandro .name = DRIVER_NAME, 1433f48ad614SDennis Dalessandro .probe = init_one, 1434f48ad614SDennis Dalessandro .remove = remove_one, 14358d3e7113SAlex Estrin .shutdown = shutdown_one, 1436f48ad614SDennis Dalessandro .id_table = hfi1_pci_tbl, 1437f48ad614SDennis Dalessandro .err_handler = &hfi1_pci_err_handler, 1438f48ad614SDennis Dalessandro }; 1439f48ad614SDennis Dalessandro 1440f48ad614SDennis Dalessandro static void __init compute_krcvqs(void) 1441f48ad614SDennis Dalessandro { 1442f48ad614SDennis Dalessandro int i; 1443f48ad614SDennis Dalessandro 1444f48ad614SDennis Dalessandro for (i = 0; i < krcvqsset; i++) 1445f48ad614SDennis Dalessandro n_krcvqs += krcvqs[i]; 1446f48ad614SDennis Dalessandro } 1447f48ad614SDennis Dalessandro 1448f48ad614SDennis Dalessandro /* 1449f48ad614SDennis Dalessandro * Do all the generic driver unit- and chip-independent memory 1450f48ad614SDennis Dalessandro * allocation and initialization. 1451f48ad614SDennis Dalessandro */ 1452f48ad614SDennis Dalessandro static int __init hfi1_mod_init(void) 1453f48ad614SDennis Dalessandro { 1454f48ad614SDennis Dalessandro int ret; 1455f48ad614SDennis Dalessandro 1456f48ad614SDennis Dalessandro ret = dev_init(); 1457f48ad614SDennis Dalessandro if (ret) 1458f48ad614SDennis Dalessandro goto bail; 1459f48ad614SDennis Dalessandro 1460d6373019SSebastian Sanchez ret = node_affinity_init(); 1461d6373019SSebastian Sanchez if (ret) 1462d6373019SSebastian Sanchez goto bail; 14634197344bSDennis Dalessandro 1464f48ad614SDennis Dalessandro /* validate max MTU before any devices start */ 1465f48ad614SDennis Dalessandro if (!valid_opa_max_mtu(hfi1_max_mtu)) { 1466f48ad614SDennis Dalessandro pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", 1467f48ad614SDennis Dalessandro hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); 1468f48ad614SDennis Dalessandro hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 1469f48ad614SDennis Dalessandro } 1470f48ad614SDennis Dalessandro /* valid CUs run from 1-128 in powers of 2 */ 1471f48ad614SDennis Dalessandro if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) 1472f48ad614SDennis Dalessandro hfi1_cu = 1; 1473f48ad614SDennis Dalessandro /* valid credit return threshold is 0-100, variable is unsigned */ 1474f48ad614SDennis Dalessandro if (user_credit_return_threshold > 100) 1475f48ad614SDennis Dalessandro user_credit_return_threshold = 100; 1476f48ad614SDennis Dalessandro 1477f48ad614SDennis Dalessandro compute_krcvqs(); 1478f48ad614SDennis Dalessandro /* 1479f48ad614SDennis Dalessandro * sanitize receive interrupt count, time must wait until after 1480f48ad614SDennis Dalessandro * the hardware type is known 1481f48ad614SDennis Dalessandro */ 1482f48ad614SDennis Dalessandro if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) 1483f48ad614SDennis Dalessandro rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; 1484f48ad614SDennis Dalessandro /* reject invalid combinations */ 1485f48ad614SDennis Dalessandro if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { 1486f48ad614SDennis Dalessandro pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); 1487f48ad614SDennis Dalessandro rcv_intr_count = 1; 1488f48ad614SDennis Dalessandro } 1489f48ad614SDennis Dalessandro if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { 1490f48ad614SDennis Dalessandro /* 1491f48ad614SDennis Dalessandro * Avoid indefinite packet delivery by requiring a timeout 1492f48ad614SDennis Dalessandro * if count is > 1. 1493f48ad614SDennis Dalessandro */ 1494f48ad614SDennis Dalessandro pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); 1495f48ad614SDennis Dalessandro rcv_intr_timeout = 1; 1496f48ad614SDennis Dalessandro } 1497f48ad614SDennis Dalessandro if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { 1498f48ad614SDennis Dalessandro /* 1499f48ad614SDennis Dalessandro * The dynamic algorithm expects a non-zero timeout 1500f48ad614SDennis Dalessandro * and a count > 1. 1501f48ad614SDennis Dalessandro */ 1502f48ad614SDennis Dalessandro pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); 1503f48ad614SDennis Dalessandro rcv_intr_dynamic = 0; 1504f48ad614SDennis Dalessandro } 1505f48ad614SDennis Dalessandro 1506f48ad614SDennis Dalessandro /* sanitize link CRC options */ 1507f48ad614SDennis Dalessandro link_crc_mask &= SUPPORTED_CRCS; 1508f48ad614SDennis Dalessandro 150948a615dcSKaike Wan ret = opfn_init(); 151048a615dcSKaike Wan if (ret < 0) { 151148a615dcSKaike Wan pr_err("Failed to allocate opfn_wq"); 151248a615dcSKaike Wan goto bail_dev; 151348a615dcSKaike Wan } 151448a615dcSKaike Wan 1515f48ad614SDennis Dalessandro /* 1516f48ad614SDennis Dalessandro * These must be called before the driver is registered with 1517f48ad614SDennis Dalessandro * the PCI subsystem. 1518f48ad614SDennis Dalessandro */ 1519f48ad614SDennis Dalessandro idr_init(&hfi1_unit_table); 1520f48ad614SDennis Dalessandro 1521f48ad614SDennis Dalessandro hfi1_dbg_init(); 1522f48ad614SDennis Dalessandro ret = pci_register_driver(&hfi1_pci_driver); 1523f48ad614SDennis Dalessandro if (ret < 0) { 1524f48ad614SDennis Dalessandro pr_err("Unable to register driver: error %d\n", -ret); 1525f48ad614SDennis Dalessandro goto bail_dev; 1526f48ad614SDennis Dalessandro } 1527f48ad614SDennis Dalessandro goto bail; /* all OK */ 1528f48ad614SDennis Dalessandro 1529f48ad614SDennis Dalessandro bail_dev: 1530f48ad614SDennis Dalessandro hfi1_dbg_exit(); 1531f48ad614SDennis Dalessandro idr_destroy(&hfi1_unit_table); 1532f48ad614SDennis Dalessandro dev_cleanup(); 1533f48ad614SDennis Dalessandro bail: 1534f48ad614SDennis Dalessandro return ret; 1535f48ad614SDennis Dalessandro } 1536f48ad614SDennis Dalessandro 1537f48ad614SDennis Dalessandro module_init(hfi1_mod_init); 1538f48ad614SDennis Dalessandro 1539f48ad614SDennis Dalessandro /* 1540f48ad614SDennis Dalessandro * Do the non-unit driver cleanup, memory free, etc. at unload. 1541f48ad614SDennis Dalessandro */ 1542f48ad614SDennis Dalessandro static void __exit hfi1_mod_cleanup(void) 1543f48ad614SDennis Dalessandro { 1544f48ad614SDennis Dalessandro pci_unregister_driver(&hfi1_pci_driver); 154548a615dcSKaike Wan opfn_exit(); 15465d18ee67SSebastian Sanchez node_affinity_destroy_all(); 1547f48ad614SDennis Dalessandro hfi1_dbg_exit(); 1548f48ad614SDennis Dalessandro 1549f48ad614SDennis Dalessandro idr_destroy(&hfi1_unit_table); 1550f48ad614SDennis Dalessandro dispose_firmware(); /* asymmetric with obtain_firmware() */ 1551f48ad614SDennis Dalessandro dev_cleanup(); 1552f48ad614SDennis Dalessandro } 1553f48ad614SDennis Dalessandro 1554f48ad614SDennis Dalessandro module_exit(hfi1_mod_cleanup); 1555f48ad614SDennis Dalessandro 1556f48ad614SDennis Dalessandro /* this can only be called after a successful initialization */ 1557f48ad614SDennis Dalessandro static void cleanup_device_data(struct hfi1_devdata *dd) 1558f48ad614SDennis Dalessandro { 1559f48ad614SDennis Dalessandro int ctxt; 1560f48ad614SDennis Dalessandro int pidx; 1561f48ad614SDennis Dalessandro 1562f48ad614SDennis Dalessandro /* users can't do anything more with chip */ 1563f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1564f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd = &dd->pport[pidx]; 1565f48ad614SDennis Dalessandro struct cc_state *cc_state; 1566f48ad614SDennis Dalessandro int i; 1567f48ad614SDennis Dalessandro 1568f48ad614SDennis Dalessandro if (ppd->statusp) 1569f48ad614SDennis Dalessandro *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; 1570f48ad614SDennis Dalessandro 1571f48ad614SDennis Dalessandro for (i = 0; i < OPA_MAX_SLS; i++) 1572f48ad614SDennis Dalessandro hrtimer_cancel(&ppd->cca_timer[i].hrtimer); 1573f48ad614SDennis Dalessandro 1574f48ad614SDennis Dalessandro spin_lock(&ppd->cc_state_lock); 15758adf71faSJianxin Xiong cc_state = get_cc_state_protected(ppd); 1576f48ad614SDennis Dalessandro RCU_INIT_POINTER(ppd->cc_state, NULL); 1577f48ad614SDennis Dalessandro spin_unlock(&ppd->cc_state_lock); 1578f48ad614SDennis Dalessandro 1579f48ad614SDennis Dalessandro if (cc_state) 1580476d95bdSWei Yongjun kfree_rcu(cc_state, rcu); 1581f48ad614SDennis Dalessandro } 1582f48ad614SDennis Dalessandro 1583f48ad614SDennis Dalessandro free_credit_return(dd); 1584f48ad614SDennis Dalessandro 1585f48ad614SDennis Dalessandro if (dd->rcvhdrtail_dummy_kvaddr) { 1586f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, sizeof(u64), 1587f48ad614SDennis Dalessandro (void *)dd->rcvhdrtail_dummy_kvaddr, 158860368186STymoteusz Kielan dd->rcvhdrtail_dummy_dma); 1589f48ad614SDennis Dalessandro dd->rcvhdrtail_dummy_kvaddr = NULL; 1590f48ad614SDennis Dalessandro } 1591f48ad614SDennis Dalessandro 1592d295dbebSMichael J. Ruhl /* 1593d295dbebSMichael J. Ruhl * Free any resources still in use (usually just kernel contexts) 1594d295dbebSMichael J. Ruhl * at unload; we do for ctxtcnt, because that's what we allocate. 1595d295dbebSMichael J. Ruhl */ 1596d295dbebSMichael J. Ruhl for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { 1597d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; 1598f48ad614SDennis Dalessandro 1599f48ad614SDennis Dalessandro if (rcd) { 1600838b6fd2SKaike Wan hfi1_free_ctxt_rcv_groups(rcd); 1601d295dbebSMichael J. Ruhl hfi1_free_ctxt(rcd); 1602f48ad614SDennis Dalessandro } 1603f48ad614SDennis Dalessandro } 1604d295dbebSMichael J. Ruhl 1605d295dbebSMichael J. Ruhl kfree(dd->rcd); 1606d295dbebSMichael J. Ruhl dd->rcd = NULL; 1607d295dbebSMichael J. Ruhl 1608f48ad614SDennis Dalessandro free_pio_map(dd); 1609f48ad614SDennis Dalessandro /* must follow rcv context free - need to remove rcv's hooks */ 1610f48ad614SDennis Dalessandro for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) 1611f48ad614SDennis Dalessandro sc_free(dd->send_contexts[ctxt].sc); 1612f48ad614SDennis Dalessandro dd->num_send_contexts = 0; 1613f48ad614SDennis Dalessandro kfree(dd->send_contexts); 1614f48ad614SDennis Dalessandro dd->send_contexts = NULL; 1615f48ad614SDennis Dalessandro kfree(dd->hw_to_sw); 1616f48ad614SDennis Dalessandro dd->hw_to_sw = NULL; 1617f48ad614SDennis Dalessandro kfree(dd->boardname); 1618f48ad614SDennis Dalessandro vfree(dd->events); 1619f48ad614SDennis Dalessandro vfree(dd->status); 1620f48ad614SDennis Dalessandro } 1621f48ad614SDennis Dalessandro 1622f48ad614SDennis Dalessandro /* 1623f48ad614SDennis Dalessandro * Clean up on unit shutdown, or error during unit load after 1624f48ad614SDennis Dalessandro * successful initialization. 1625f48ad614SDennis Dalessandro */ 1626f48ad614SDennis Dalessandro static void postinit_cleanup(struct hfi1_devdata *dd) 1627f48ad614SDennis Dalessandro { 1628f48ad614SDennis Dalessandro hfi1_start_cleanup(dd); 16295d18ee67SSebastian Sanchez hfi1_comp_vectors_clean_up(dd); 16305d18ee67SSebastian Sanchez hfi1_dev_affinity_clean_up(dd); 1631f48ad614SDennis Dalessandro 1632f48ad614SDennis Dalessandro hfi1_pcie_ddcleanup(dd); 1633f48ad614SDennis Dalessandro hfi1_pcie_cleanup(dd->pcidev); 1634f48ad614SDennis Dalessandro 1635f48ad614SDennis Dalessandro cleanup_device_data(dd); 1636f48ad614SDennis Dalessandro 1637f48ad614SDennis Dalessandro hfi1_free_devdata(dd); 1638f48ad614SDennis Dalessandro } 1639f48ad614SDennis Dalessandro 164057f97e96SMichael J. Ruhl static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt) 164111501ab9SKrzysztof Blaszkowski { 164211501ab9SKrzysztof Blaszkowski if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 164357f97e96SMichael J. Ruhl dd_dev_err(dd, "Receive header queue count too small\n"); 164411501ab9SKrzysztof Blaszkowski return -EINVAL; 164511501ab9SKrzysztof Blaszkowski } 164611501ab9SKrzysztof Blaszkowski 164711501ab9SKrzysztof Blaszkowski if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 164857f97e96SMichael J. Ruhl dd_dev_err(dd, 164911501ab9SKrzysztof Blaszkowski "Receive header queue count cannot be greater than %u\n", 165011501ab9SKrzysztof Blaszkowski HFI1_MAX_HDRQ_EGRBUF_CNT); 165111501ab9SKrzysztof Blaszkowski return -EINVAL; 165211501ab9SKrzysztof Blaszkowski } 165311501ab9SKrzysztof Blaszkowski 165411501ab9SKrzysztof Blaszkowski if (thecnt % HDRQ_INCREMENT) { 165557f97e96SMichael J. Ruhl dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n", 165611501ab9SKrzysztof Blaszkowski thecnt, HDRQ_INCREMENT); 165711501ab9SKrzysztof Blaszkowski return -EINVAL; 165811501ab9SKrzysztof Blaszkowski } 165911501ab9SKrzysztof Blaszkowski 166011501ab9SKrzysztof Blaszkowski return 0; 166111501ab9SKrzysztof Blaszkowski } 166211501ab9SKrzysztof Blaszkowski 1663f48ad614SDennis Dalessandro static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1664f48ad614SDennis Dalessandro { 1665f48ad614SDennis Dalessandro int ret = 0, j, pidx, initfail; 166683fb4af6SKrzysztof Blaszkowski struct hfi1_devdata *dd; 1667f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 1668f48ad614SDennis Dalessandro 1669f48ad614SDennis Dalessandro /* First, lock the non-writable module parameters */ 1670f48ad614SDennis Dalessandro HFI1_CAP_LOCK(); 1671f48ad614SDennis Dalessandro 16725d6f08afSTadeusz Struk /* Validate dev ids */ 16735d6f08afSTadeusz Struk if (!(ent->device == PCI_DEVICE_ID_INTEL0 || 16745d6f08afSTadeusz Struk ent->device == PCI_DEVICE_ID_INTEL1)) { 167557f97e96SMichael J. Ruhl dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n", 16765d6f08afSTadeusz Struk ent->device); 16775d6f08afSTadeusz Struk ret = -ENODEV; 16785d6f08afSTadeusz Struk goto bail; 16795d6f08afSTadeusz Struk } 16805d6f08afSTadeusz Struk 168157f97e96SMichael J. Ruhl /* Allocate the dd so we can get to work */ 168257f97e96SMichael J. Ruhl dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS * 168357f97e96SMichael J. Ruhl sizeof(struct hfi1_pportdata)); 168457f97e96SMichael J. Ruhl if (IS_ERR(dd)) { 168557f97e96SMichael J. Ruhl ret = PTR_ERR(dd); 168657f97e96SMichael J. Ruhl goto bail; 168757f97e96SMichael J. Ruhl } 168857f97e96SMichael J. Ruhl 1689f48ad614SDennis Dalessandro /* Validate some global module parameters */ 169057f97e96SMichael J. Ruhl ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt); 169111501ab9SKrzysztof Blaszkowski if (ret) 1692f48ad614SDennis Dalessandro goto bail; 169311501ab9SKrzysztof Blaszkowski 1694f48ad614SDennis Dalessandro /* use the encoding function as a sanitization check */ 1695f48ad614SDennis Dalessandro if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 169657f97e96SMichael J. Ruhl dd_dev_err(dd, "Invalid HdrQ Entry size %u\n", 1697f48ad614SDennis Dalessandro hfi1_hdrq_entsize); 1698f48ad614SDennis Dalessandro ret = -EINVAL; 1699f48ad614SDennis Dalessandro goto bail; 1700f48ad614SDennis Dalessandro } 1701f48ad614SDennis Dalessandro 1702f48ad614SDennis Dalessandro /* The receive eager buffer size must be set before the receive 1703f48ad614SDennis Dalessandro * contexts are created. 1704f48ad614SDennis Dalessandro * 1705f48ad614SDennis Dalessandro * Set the eager buffer size. Validate that it falls in a range 1706f48ad614SDennis Dalessandro * allowed by the hardware - all powers of 2 between the min and 1707f48ad614SDennis Dalessandro * max. The maximum valid MTU is within the eager buffer range 1708f48ad614SDennis Dalessandro * so we do not need to cap the max_mtu by an eager buffer size 1709f48ad614SDennis Dalessandro * setting. 1710f48ad614SDennis Dalessandro */ 1711f48ad614SDennis Dalessandro if (eager_buffer_size) { 1712f48ad614SDennis Dalessandro if (!is_power_of_2(eager_buffer_size)) 1713f48ad614SDennis Dalessandro eager_buffer_size = 1714f48ad614SDennis Dalessandro roundup_pow_of_two(eager_buffer_size); 1715f48ad614SDennis Dalessandro eager_buffer_size = 1716f48ad614SDennis Dalessandro clamp_val(eager_buffer_size, 1717f48ad614SDennis Dalessandro MIN_EAGER_BUFFER * 8, 1718f48ad614SDennis Dalessandro MAX_EAGER_BUFFER_TOTAL); 171957f97e96SMichael J. Ruhl dd_dev_info(dd, "Eager buffer size %u\n", 1720f48ad614SDennis Dalessandro eager_buffer_size); 1721f48ad614SDennis Dalessandro } else { 172257f97e96SMichael J. Ruhl dd_dev_err(dd, "Invalid Eager buffer size of 0\n"); 1723f48ad614SDennis Dalessandro ret = -EINVAL; 1724f48ad614SDennis Dalessandro goto bail; 1725f48ad614SDennis Dalessandro } 1726f48ad614SDennis Dalessandro 1727f48ad614SDennis Dalessandro /* restrict value of hfi1_rcvarr_split */ 1728f48ad614SDennis Dalessandro hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); 1729f48ad614SDennis Dalessandro 173057f97e96SMichael J. Ruhl ret = hfi1_pcie_init(dd); 1731f48ad614SDennis Dalessandro if (ret) 1732f48ad614SDennis Dalessandro goto bail; 1733f48ad614SDennis Dalessandro 173483fb4af6SKrzysztof Blaszkowski /* 173583fb4af6SKrzysztof Blaszkowski * Do device-specific initialization, function table setup, dd 173683fb4af6SKrzysztof Blaszkowski * allocation, etc. 173783fb4af6SKrzysztof Blaszkowski */ 173857f97e96SMichael J. Ruhl ret = hfi1_init_dd(dd); 173957f97e96SMichael J. Ruhl if (ret) 1740f48ad614SDennis Dalessandro goto clean_bail; /* error already printed */ 1741f48ad614SDennis Dalessandro 1742f48ad614SDennis Dalessandro ret = create_workqueues(dd); 1743f48ad614SDennis Dalessandro if (ret) 1744f48ad614SDennis Dalessandro goto clean_bail; 1745f48ad614SDennis Dalessandro 1746f48ad614SDennis Dalessandro /* do the generic initialization */ 1747f48ad614SDennis Dalessandro initfail = hfi1_init(dd, 0); 1748f48ad614SDennis Dalessandro 1749d4829ea6SVishwanathapura, Niranjana /* setup vnic */ 1750d4829ea6SVishwanathapura, Niranjana hfi1_vnic_setup(dd); 1751d4829ea6SVishwanathapura, Niranjana 1752f48ad614SDennis Dalessandro ret = hfi1_register_ib_device(dd); 1753f48ad614SDennis Dalessandro 1754f48ad614SDennis Dalessandro /* 1755f48ad614SDennis Dalessandro * Now ready for use. this should be cleared whenever we 1756f48ad614SDennis Dalessandro * detect a reset, or initiate one. If earlier failure, 1757f48ad614SDennis Dalessandro * we still create devices, so diags, etc. can be used 1758f48ad614SDennis Dalessandro * to determine cause of problem. 1759f48ad614SDennis Dalessandro */ 1760f48ad614SDennis Dalessandro if (!initfail && !ret) { 1761f48ad614SDennis Dalessandro dd->flags |= HFI1_INITTED; 1762f48ad614SDennis Dalessandro /* create debufs files after init and ib register */ 1763f48ad614SDennis Dalessandro hfi1_dbg_ibdev_init(&dd->verbs_dev); 1764f48ad614SDennis Dalessandro } 1765f48ad614SDennis Dalessandro 1766f48ad614SDennis Dalessandro j = hfi1_device_create(dd); 1767f48ad614SDennis Dalessandro if (j) 1768f48ad614SDennis Dalessandro dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1769f48ad614SDennis Dalessandro 1770f48ad614SDennis Dalessandro if (initfail || ret) { 17716eb4eb10SMichael J. Ruhl msix_clean_up_interrupts(dd); 1772f48ad614SDennis Dalessandro stop_timers(dd); 1773f48ad614SDennis Dalessandro flush_workqueue(ib_wq); 1774f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1775f48ad614SDennis Dalessandro hfi1_quiet_serdes(dd->pport + pidx); 1776f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1777f48ad614SDennis Dalessandro if (ppd->hfi1_wq) { 1778f48ad614SDennis Dalessandro destroy_workqueue(ppd->hfi1_wq); 1779f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 1780f48ad614SDennis Dalessandro } 178171d47008SSebastian Sanchez if (ppd->link_wq) { 178271d47008SSebastian Sanchez destroy_workqueue(ppd->link_wq); 178371d47008SSebastian Sanchez ppd->link_wq = NULL; 178471d47008SSebastian Sanchez } 1785f48ad614SDennis Dalessandro } 1786f48ad614SDennis Dalessandro if (!j) 1787f48ad614SDennis Dalessandro hfi1_device_remove(dd); 1788f48ad614SDennis Dalessandro if (!ret) 1789f48ad614SDennis Dalessandro hfi1_unregister_ib_device(dd); 17902280740fSVishwanathapura, Niranjana hfi1_vnic_cleanup(dd); 1791f48ad614SDennis Dalessandro postinit_cleanup(dd); 1792f48ad614SDennis Dalessandro if (initfail) 1793f48ad614SDennis Dalessandro ret = initfail; 1794f48ad614SDennis Dalessandro goto bail; /* everything already cleaned */ 1795f48ad614SDennis Dalessandro } 1796f48ad614SDennis Dalessandro 1797f48ad614SDennis Dalessandro sdma_start(dd); 1798f48ad614SDennis Dalessandro 1799f48ad614SDennis Dalessandro return 0; 1800f48ad614SDennis Dalessandro 1801f48ad614SDennis Dalessandro clean_bail: 1802f48ad614SDennis Dalessandro hfi1_pcie_cleanup(pdev); 1803f48ad614SDennis Dalessandro bail: 1804f48ad614SDennis Dalessandro return ret; 1805f48ad614SDennis Dalessandro } 1806f48ad614SDennis Dalessandro 1807acd7c8feSTadeusz Struk static void wait_for_clients(struct hfi1_devdata *dd) 1808acd7c8feSTadeusz Struk { 1809acd7c8feSTadeusz Struk /* 1810acd7c8feSTadeusz Struk * Remove the device init value and complete the device if there is 1811acd7c8feSTadeusz Struk * no clients or wait for active clients to finish. 1812acd7c8feSTadeusz Struk */ 1813acd7c8feSTadeusz Struk if (atomic_dec_and_test(&dd->user_refcount)) 1814acd7c8feSTadeusz Struk complete(&dd->user_comp); 1815acd7c8feSTadeusz Struk 1816acd7c8feSTadeusz Struk wait_for_completion(&dd->user_comp); 1817acd7c8feSTadeusz Struk } 1818acd7c8feSTadeusz Struk 1819f48ad614SDennis Dalessandro static void remove_one(struct pci_dev *pdev) 1820f48ad614SDennis Dalessandro { 1821f48ad614SDennis Dalessandro struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1822f48ad614SDennis Dalessandro 1823f48ad614SDennis Dalessandro /* close debugfs files before ib unregister */ 1824f48ad614SDennis Dalessandro hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1825acd7c8feSTadeusz Struk 1826acd7c8feSTadeusz Struk /* remove the /dev hfi1 interface */ 1827acd7c8feSTadeusz Struk hfi1_device_remove(dd); 1828acd7c8feSTadeusz Struk 1829acd7c8feSTadeusz Struk /* wait for existing user space clients to finish */ 1830acd7c8feSTadeusz Struk wait_for_clients(dd); 1831acd7c8feSTadeusz Struk 1832f48ad614SDennis Dalessandro /* unregister from IB core */ 1833f48ad614SDennis Dalessandro hfi1_unregister_ib_device(dd); 1834f48ad614SDennis Dalessandro 1835d4829ea6SVishwanathapura, Niranjana /* cleanup vnic */ 1836d4829ea6SVishwanathapura, Niranjana hfi1_vnic_cleanup(dd); 1837d4829ea6SVishwanathapura, Niranjana 1838f48ad614SDennis Dalessandro /* 1839f48ad614SDennis Dalessandro * Disable the IB link, disable interrupts on the device, 1840f48ad614SDennis Dalessandro * clear dma engines, etc. 1841f48ad614SDennis Dalessandro */ 1842f48ad614SDennis Dalessandro shutdown_device(dd); 1843f48ad614SDennis Dalessandro 1844f48ad614SDennis Dalessandro stop_timers(dd); 1845f48ad614SDennis Dalessandro 1846f48ad614SDennis Dalessandro /* wait until all of our (qsfp) queue_work() calls complete */ 1847f48ad614SDennis Dalessandro flush_workqueue(ib_wq); 1848f48ad614SDennis Dalessandro 1849f48ad614SDennis Dalessandro postinit_cleanup(dd); 1850f48ad614SDennis Dalessandro } 1851f48ad614SDennis Dalessandro 18528d3e7113SAlex Estrin static void shutdown_one(struct pci_dev *pdev) 18538d3e7113SAlex Estrin { 18548d3e7113SAlex Estrin struct hfi1_devdata *dd = pci_get_drvdata(pdev); 18558d3e7113SAlex Estrin 18568d3e7113SAlex Estrin shutdown_device(dd); 18578d3e7113SAlex Estrin } 18588d3e7113SAlex Estrin 1859f48ad614SDennis Dalessandro /** 1860f48ad614SDennis Dalessandro * hfi1_create_rcvhdrq - create a receive header queue 1861f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 1862f48ad614SDennis Dalessandro * @rcd: the context data 1863f48ad614SDennis Dalessandro * 1864f48ad614SDennis Dalessandro * This must be contiguous memory (from an i/o perspective), and must be 1865f48ad614SDennis Dalessandro * DMA'able (which means for some systems, it will go through an IOMMU, 1866f48ad614SDennis Dalessandro * or be forced into a low address range). 1867f48ad614SDennis Dalessandro */ 1868f48ad614SDennis Dalessandro int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1869f48ad614SDennis Dalessandro { 1870f48ad614SDennis Dalessandro unsigned amt; 1871f48ad614SDennis Dalessandro u64 reg; 1872f48ad614SDennis Dalessandro 1873f48ad614SDennis Dalessandro if (!rcd->rcvhdrq) { 1874f48ad614SDennis Dalessandro gfp_t gfp_flags; 1875f48ad614SDennis Dalessandro 1876b2578431SMike Marciniszyn amt = rcvhdrq_size(rcd); 1877f48ad614SDennis Dalessandro 1878cc9a97eaSNiranjana Vishwanathapura if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) 18792280740fSVishwanathapura, Niranjana gfp_flags = GFP_KERNEL; 18802280740fSVishwanathapura, Niranjana else 18812280740fSVishwanathapura, Niranjana gfp_flags = GFP_USER; 1882750afb08SLuis Chamberlain rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, 1883750afb08SLuis Chamberlain &rcd->rcvhdrq_dma, 1884f48ad614SDennis Dalessandro gfp_flags | __GFP_COMP); 1885f48ad614SDennis Dalessandro 1886f48ad614SDennis Dalessandro if (!rcd->rcvhdrq) { 1887f48ad614SDennis Dalessandro dd_dev_err(dd, 1888f48ad614SDennis Dalessandro "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1889f48ad614SDennis Dalessandro amt, rcd->ctxt); 1890f48ad614SDennis Dalessandro goto bail; 1891f48ad614SDennis Dalessandro } 1892f48ad614SDennis Dalessandro 18931bc0299dSMike Marciniszyn if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || 18941bc0299dSMike Marciniszyn HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { 1895750afb08SLuis Chamberlain rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, 1896750afb08SLuis Chamberlain PAGE_SIZE, 1897750afb08SLuis Chamberlain &rcd->rcvhdrqtailaddr_dma, 1898750afb08SLuis Chamberlain gfp_flags); 1899f48ad614SDennis Dalessandro if (!rcd->rcvhdrtail_kvaddr) 1900f48ad614SDennis Dalessandro goto bail_free; 1901f48ad614SDennis Dalessandro } 1902f48ad614SDennis Dalessandro } 1903f48ad614SDennis Dalessandro /* 1904f48ad614SDennis Dalessandro * These values are per-context: 1905f48ad614SDennis Dalessandro * RcvHdrCnt 1906f48ad614SDennis Dalessandro * RcvHdrEntSize 1907f48ad614SDennis Dalessandro * RcvHdrSize 1908f48ad614SDennis Dalessandro */ 1909f48ad614SDennis Dalessandro reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) 1910f48ad614SDennis Dalessandro & RCV_HDR_CNT_CNT_MASK) 1911f48ad614SDennis Dalessandro << RCV_HDR_CNT_CNT_SHIFT; 1912f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); 1913f48ad614SDennis Dalessandro reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) 1914f48ad614SDennis Dalessandro & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) 1915f48ad614SDennis Dalessandro << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; 1916f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); 191732e3d970SMike Marciniszyn reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) 1918f48ad614SDennis Dalessandro << RCV_HDR_SIZE_HDR_SIZE_SHIFT; 1919f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); 1920f48ad614SDennis Dalessandro 1921f48ad614SDennis Dalessandro /* 1922f48ad614SDennis Dalessandro * Program dummy tail address for every receive context 1923f48ad614SDennis Dalessandro * before enabling any receive context 1924f48ad614SDennis Dalessandro */ 1925f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, 192660368186STymoteusz Kielan dd->rcvhdrtail_dummy_dma); 1927f48ad614SDennis Dalessandro 1928f48ad614SDennis Dalessandro return 0; 1929f48ad614SDennis Dalessandro 1930f48ad614SDennis Dalessandro bail_free: 1931f48ad614SDennis Dalessandro dd_dev_err(dd, 1932f48ad614SDennis Dalessandro "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1933f48ad614SDennis Dalessandro rcd->ctxt); 1934f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 193560368186STymoteusz Kielan rcd->rcvhdrq_dma); 1936f48ad614SDennis Dalessandro rcd->rcvhdrq = NULL; 1937f48ad614SDennis Dalessandro bail: 1938f48ad614SDennis Dalessandro return -ENOMEM; 1939f48ad614SDennis Dalessandro } 1940f48ad614SDennis Dalessandro 1941f48ad614SDennis Dalessandro /** 1942f48ad614SDennis Dalessandro * allocate eager buffers, both kernel and user contexts. 1943f48ad614SDennis Dalessandro * @rcd: the context we are setting up. 1944f48ad614SDennis Dalessandro * 1945f48ad614SDennis Dalessandro * Allocate the eager TID buffers and program them into hip. 1946f48ad614SDennis Dalessandro * They are no longer completely contiguous, we do multiple allocation 1947f48ad614SDennis Dalessandro * calls. Otherwise we get the OOM code involved, by asking for too 1948f48ad614SDennis Dalessandro * much per call, with disastrous results on some kernels. 1949f48ad614SDennis Dalessandro */ 1950f48ad614SDennis Dalessandro int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) 1951f48ad614SDennis Dalessandro { 1952f48ad614SDennis Dalessandro struct hfi1_devdata *dd = rcd->dd; 1953071e4fecSMike Marciniszyn u32 max_entries, egrtop, alloced_bytes = 0; 1954f48ad614SDennis Dalessandro gfp_t gfp_flags; 1955071e4fecSMike Marciniszyn u16 order, idx = 0; 1956f48ad614SDennis Dalessandro int ret = 0; 1957f48ad614SDennis Dalessandro u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); 1958f48ad614SDennis Dalessandro 1959f48ad614SDennis Dalessandro /* 1960f48ad614SDennis Dalessandro * GFP_USER, but without GFP_FS, so buffer cache can be 1961f48ad614SDennis Dalessandro * coalesced (we hope); otherwise, even at order 4, 1962f48ad614SDennis Dalessandro * heavy filesystem activity makes these fail, and we can 1963f48ad614SDennis Dalessandro * use compound pages. 1964f48ad614SDennis Dalessandro */ 1965f48ad614SDennis Dalessandro gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1966f48ad614SDennis Dalessandro 1967f48ad614SDennis Dalessandro /* 1968f48ad614SDennis Dalessandro * The minimum size of the eager buffers is a groups of MTU-sized 1969f48ad614SDennis Dalessandro * buffers. 1970f48ad614SDennis Dalessandro * The global eager_buffer_size parameter is checked against the 1971f48ad614SDennis Dalessandro * theoretical lower limit of the value. Here, we check against the 1972f48ad614SDennis Dalessandro * MTU. 1973f48ad614SDennis Dalessandro */ 1974f48ad614SDennis Dalessandro if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) 1975f48ad614SDennis Dalessandro rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; 1976f48ad614SDennis Dalessandro /* 1977f48ad614SDennis Dalessandro * If using one-pkt-per-egr-buffer, lower the eager buffer 1978f48ad614SDennis Dalessandro * size to the max MTU (page-aligned). 1979f48ad614SDennis Dalessandro */ 1980f48ad614SDennis Dalessandro if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 1981f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = round_mtu; 1982f48ad614SDennis Dalessandro 1983f48ad614SDennis Dalessandro /* 1984f48ad614SDennis Dalessandro * Eager buffers sizes of 1MB or less require smaller TID sizes 1985f48ad614SDennis Dalessandro * to satisfy the "multiple of 8 RcvArray entries" requirement. 1986f48ad614SDennis Dalessandro */ 1987f48ad614SDennis Dalessandro if (rcd->egrbufs.size <= (1 << 20)) 1988f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, 1989f48ad614SDennis Dalessandro rounddown_pow_of_two(rcd->egrbufs.size / 8)); 1990f48ad614SDennis Dalessandro 1991f48ad614SDennis Dalessandro while (alloced_bytes < rcd->egrbufs.size && 1992f48ad614SDennis Dalessandro rcd->egrbufs.alloced < rcd->egrbufs.count) { 1993f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr = 1994750afb08SLuis Chamberlain dma_alloc_coherent(&dd->pcidev->dev, 1995f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size, 199660368186STymoteusz Kielan &rcd->egrbufs.buffers[idx].dma, 1997f48ad614SDennis Dalessandro gfp_flags); 1998f48ad614SDennis Dalessandro if (rcd->egrbufs.buffers[idx].addr) { 1999f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].len = 2000f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size; 2001f48ad614SDennis Dalessandro rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = 2002f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr; 200360368186STymoteusz Kielan rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma = 200460368186STymoteusz Kielan rcd->egrbufs.buffers[idx].dma; 2005f48ad614SDennis Dalessandro rcd->egrbufs.alloced++; 2006f48ad614SDennis Dalessandro alloced_bytes += rcd->egrbufs.rcvtid_size; 2007f48ad614SDennis Dalessandro idx++; 2008f48ad614SDennis Dalessandro } else { 2009f48ad614SDennis Dalessandro u32 new_size, i, j; 2010f48ad614SDennis Dalessandro u64 offset = 0; 2011f48ad614SDennis Dalessandro 2012f48ad614SDennis Dalessandro /* 2013f48ad614SDennis Dalessandro * Fail the eager buffer allocation if: 2014f48ad614SDennis Dalessandro * - we are already using the lowest acceptable size 2015f48ad614SDennis Dalessandro * - we are using one-pkt-per-egr-buffer (this implies 2016f48ad614SDennis Dalessandro * that we are accepting only one size) 2017f48ad614SDennis Dalessandro */ 2018f48ad614SDennis Dalessandro if (rcd->egrbufs.rcvtid_size == round_mtu || 2019f48ad614SDennis Dalessandro !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { 2020f48ad614SDennis Dalessandro dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", 2021f48ad614SDennis Dalessandro rcd->ctxt); 202294679061SMichael J. Ruhl ret = -ENOMEM; 2023f48ad614SDennis Dalessandro goto bail_rcvegrbuf_phys; 2024f48ad614SDennis Dalessandro } 2025f48ad614SDennis Dalessandro 2026f48ad614SDennis Dalessandro new_size = rcd->egrbufs.rcvtid_size / 2; 2027f48ad614SDennis Dalessandro 2028f48ad614SDennis Dalessandro /* 2029f48ad614SDennis Dalessandro * If the first attempt to allocate memory failed, don't 2030f48ad614SDennis Dalessandro * fail everything but continue with the next lower 2031f48ad614SDennis Dalessandro * size. 2032f48ad614SDennis Dalessandro */ 2033f48ad614SDennis Dalessandro if (idx == 0) { 2034f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = new_size; 2035f48ad614SDennis Dalessandro continue; 2036f48ad614SDennis Dalessandro } 2037f48ad614SDennis Dalessandro 2038f48ad614SDennis Dalessandro /* 2039f48ad614SDennis Dalessandro * Re-partition already allocated buffers to a smaller 2040f48ad614SDennis Dalessandro * size. 2041f48ad614SDennis Dalessandro */ 2042f48ad614SDennis Dalessandro rcd->egrbufs.alloced = 0; 2043f48ad614SDennis Dalessandro for (i = 0, j = 0, offset = 0; j < idx; i++) { 2044f48ad614SDennis Dalessandro if (i >= rcd->egrbufs.count) 2045f48ad614SDennis Dalessandro break; 204660368186STymoteusz Kielan rcd->egrbufs.rcvtids[i].dma = 204760368186STymoteusz Kielan rcd->egrbufs.buffers[j].dma + offset; 2048f48ad614SDennis Dalessandro rcd->egrbufs.rcvtids[i].addr = 2049f48ad614SDennis Dalessandro rcd->egrbufs.buffers[j].addr + offset; 2050f48ad614SDennis Dalessandro rcd->egrbufs.alloced++; 205160368186STymoteusz Kielan if ((rcd->egrbufs.buffers[j].dma + offset + 2052f48ad614SDennis Dalessandro new_size) == 205360368186STymoteusz Kielan (rcd->egrbufs.buffers[j].dma + 2054f48ad614SDennis Dalessandro rcd->egrbufs.buffers[j].len)) { 2055f48ad614SDennis Dalessandro j++; 2056f48ad614SDennis Dalessandro offset = 0; 2057f48ad614SDennis Dalessandro } else { 2058f48ad614SDennis Dalessandro offset += new_size; 2059f48ad614SDennis Dalessandro } 2060f48ad614SDennis Dalessandro } 2061f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = new_size; 2062f48ad614SDennis Dalessandro } 2063f48ad614SDennis Dalessandro } 2064f48ad614SDennis Dalessandro rcd->egrbufs.numbufs = idx; 2065f48ad614SDennis Dalessandro rcd->egrbufs.size = alloced_bytes; 2066f48ad614SDennis Dalessandro 2067f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 2068f48ad614SDennis Dalessandro "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", 206923002d5bSGrzegorz Heldt rcd->ctxt, rcd->egrbufs.alloced, 207023002d5bSGrzegorz Heldt rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); 2071f48ad614SDennis Dalessandro 2072f48ad614SDennis Dalessandro /* 2073f48ad614SDennis Dalessandro * Set the contexts rcv array head update threshold to the closest 2074f48ad614SDennis Dalessandro * power of 2 (so we can use a mask instead of modulo) below half 2075f48ad614SDennis Dalessandro * the allocated entries. 2076f48ad614SDennis Dalessandro */ 2077f48ad614SDennis Dalessandro rcd->egrbufs.threshold = 2078f48ad614SDennis Dalessandro rounddown_pow_of_two(rcd->egrbufs.alloced / 2); 2079f48ad614SDennis Dalessandro /* 2080f48ad614SDennis Dalessandro * Compute the expected RcvArray entry base. This is done after 2081f48ad614SDennis Dalessandro * allocating the eager buffers in order to maximize the 2082f48ad614SDennis Dalessandro * expected RcvArray entries for the context. 2083f48ad614SDennis Dalessandro */ 2084f48ad614SDennis Dalessandro max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; 2085f48ad614SDennis Dalessandro egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); 2086f48ad614SDennis Dalessandro rcd->expected_count = max_entries - egrtop; 2087f48ad614SDennis Dalessandro if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) 2088f48ad614SDennis Dalessandro rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; 2089f48ad614SDennis Dalessandro 2090f48ad614SDennis Dalessandro rcd->expected_base = rcd->eager_base + egrtop; 2091f48ad614SDennis Dalessandro hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", 2092f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, 2093f48ad614SDennis Dalessandro rcd->eager_base, rcd->expected_base); 2094f48ad614SDennis Dalessandro 2095f48ad614SDennis Dalessandro if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { 2096f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 2097f48ad614SDennis Dalessandro "ctxt%u: current Eager buffer size is invalid %u\n", 2098f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.rcvtid_size); 2099f48ad614SDennis Dalessandro ret = -EINVAL; 210062239fc6SMichael J. Ruhl goto bail_rcvegrbuf_phys; 2101f48ad614SDennis Dalessandro } 2102f48ad614SDennis Dalessandro 2103f48ad614SDennis Dalessandro for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { 2104f48ad614SDennis Dalessandro hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, 210560368186STymoteusz Kielan rcd->egrbufs.rcvtids[idx].dma, order); 2106f48ad614SDennis Dalessandro cond_resched(); 2107f48ad614SDennis Dalessandro } 210862239fc6SMichael J. Ruhl 210962239fc6SMichael J. Ruhl return 0; 2110f48ad614SDennis Dalessandro 2111f48ad614SDennis Dalessandro bail_rcvegrbuf_phys: 2112f48ad614SDennis Dalessandro for (idx = 0; idx < rcd->egrbufs.alloced && 2113f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr; 2114f48ad614SDennis Dalessandro idx++) { 2115f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, 2116f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].len, 2117f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr, 211860368186STymoteusz Kielan rcd->egrbufs.buffers[idx].dma); 2119f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr = NULL; 212060368186STymoteusz Kielan rcd->egrbufs.buffers[idx].dma = 0; 2121f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].len = 0; 2122f48ad614SDennis Dalessandro } 212362239fc6SMichael J. Ruhl 2124f48ad614SDennis Dalessandro return ret; 2125f48ad614SDennis Dalessandro } 2126