1f48ad614SDennis Dalessandro /* 25d18ee67SSebastian Sanchez * Copyright(c) 2015 - 2018 Intel Corporation. 3f48ad614SDennis Dalessandro * 4f48ad614SDennis Dalessandro * This file is provided under a dual BSD/GPLv2 license. When using or 5f48ad614SDennis Dalessandro * redistributing this file, you may do so under either license. 6f48ad614SDennis Dalessandro * 7f48ad614SDennis Dalessandro * GPL LICENSE SUMMARY 8f48ad614SDennis Dalessandro * 9f48ad614SDennis Dalessandro * This program is free software; you can redistribute it and/or modify 10f48ad614SDennis Dalessandro * it under the terms of version 2 of the GNU General Public License as 11f48ad614SDennis Dalessandro * published by the Free Software Foundation. 12f48ad614SDennis Dalessandro * 13f48ad614SDennis Dalessandro * This program is distributed in the hope that it will be useful, but 14f48ad614SDennis Dalessandro * WITHOUT ANY WARRANTY; without even the implied warranty of 15f48ad614SDennis Dalessandro * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16f48ad614SDennis Dalessandro * General Public License for more details. 17f48ad614SDennis Dalessandro * 18f48ad614SDennis Dalessandro * BSD LICENSE 19f48ad614SDennis Dalessandro * 20f48ad614SDennis Dalessandro * Redistribution and use in source and binary forms, with or without 21f48ad614SDennis Dalessandro * modification, are permitted provided that the following conditions 22f48ad614SDennis Dalessandro * are met: 23f48ad614SDennis Dalessandro * 24f48ad614SDennis Dalessandro * - Redistributions of source code must retain the above copyright 25f48ad614SDennis Dalessandro * notice, this list of conditions and the following disclaimer. 26f48ad614SDennis Dalessandro * - Redistributions in binary form must reproduce the above copyright 27f48ad614SDennis Dalessandro * notice, this list of conditions and the following disclaimer in 28f48ad614SDennis Dalessandro * the documentation and/or other materials provided with the 29f48ad614SDennis Dalessandro * distribution. 30f48ad614SDennis Dalessandro * - Neither the name of Intel Corporation nor the names of its 31f48ad614SDennis Dalessandro * contributors may be used to endorse or promote products derived 32f48ad614SDennis Dalessandro * from this software without specific prior written permission. 33f48ad614SDennis Dalessandro * 34f48ad614SDennis Dalessandro * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35f48ad614SDennis Dalessandro * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36f48ad614SDennis Dalessandro * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37f48ad614SDennis Dalessandro * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38f48ad614SDennis Dalessandro * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39f48ad614SDennis Dalessandro * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40f48ad614SDennis Dalessandro * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41f48ad614SDennis Dalessandro * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42f48ad614SDennis Dalessandro * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43f48ad614SDennis Dalessandro * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44f48ad614SDennis Dalessandro * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45f48ad614SDennis Dalessandro * 46f48ad614SDennis Dalessandro */ 47f48ad614SDennis Dalessandro 48f48ad614SDennis Dalessandro #include <linux/pci.h> 49f48ad614SDennis Dalessandro #include <linux/netdevice.h> 50f48ad614SDennis Dalessandro #include <linux/vmalloc.h> 51f48ad614SDennis Dalessandro #include <linux/delay.h> 52f48ad614SDennis Dalessandro #include <linux/idr.h> 53f48ad614SDennis Dalessandro #include <linux/module.h> 54f48ad614SDennis Dalessandro #include <linux/printk.h> 55f48ad614SDennis Dalessandro #include <linux/hrtimer.h> 568737ce95SMichael J. Ruhl #include <linux/bitmap.h> 57f48ad614SDennis Dalessandro #include <rdma/rdma_vt.h> 58f48ad614SDennis Dalessandro 59f48ad614SDennis Dalessandro #include "hfi.h" 60f48ad614SDennis Dalessandro #include "device.h" 61f48ad614SDennis Dalessandro #include "common.h" 62f48ad614SDennis Dalessandro #include "trace.h" 63f48ad614SDennis Dalessandro #include "mad.h" 64f48ad614SDennis Dalessandro #include "sdma.h" 65f48ad614SDennis Dalessandro #include "debugfs.h" 66f48ad614SDennis Dalessandro #include "verbs.h" 67f48ad614SDennis Dalessandro #include "aspm.h" 684197344bSDennis Dalessandro #include "affinity.h" 69d4829ea6SVishwanathapura, Niranjana #include "vnic.h" 70fe4e74eeSMichael J. Ruhl #include "exp_rcv.h" 71f48ad614SDennis Dalessandro 72f48ad614SDennis Dalessandro #undef pr_fmt 73f48ad614SDennis Dalessandro #define pr_fmt(fmt) DRIVER_NAME ": " fmt 74f48ad614SDennis Dalessandro 75dd1ed108SMike Marciniszyn #define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5 76f48ad614SDennis Dalessandro /* 77f48ad614SDennis Dalessandro * min buffers we want to have per context, after driver 78f48ad614SDennis Dalessandro */ 79f48ad614SDennis Dalessandro #define HFI1_MIN_USER_CTXT_BUFCNT 7 80f48ad614SDennis Dalessandro 81f48ad614SDennis Dalessandro #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 82f48ad614SDennis Dalessandro #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 83f48ad614SDennis Dalessandro #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ 84f48ad614SDennis Dalessandro #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ 85f48ad614SDennis Dalessandro 86f48ad614SDennis Dalessandro /* 87f48ad614SDennis Dalessandro * Number of user receive contexts we are configured to use (to allow for more 88f48ad614SDennis Dalessandro * pio buffers per ctxt, etc.) Zero means use one user context per CPU. 89f48ad614SDennis Dalessandro */ 90f48ad614SDennis Dalessandro int num_user_contexts = -1; 915da9e742SMichael J. Ruhl module_param_named(num_user_contexts, num_user_contexts, int, 0444); 92f48ad614SDennis Dalessandro MODULE_PARM_DESC( 935da9e742SMichael J. Ruhl num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)"); 94f48ad614SDennis Dalessandro 95f48ad614SDennis Dalessandro uint krcvqs[RXE_NUM_DATA_VL]; 96f48ad614SDennis Dalessandro int krcvqsset; 97f48ad614SDennis Dalessandro module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); 98f48ad614SDennis Dalessandro MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); 99f48ad614SDennis Dalessandro 100f48ad614SDennis Dalessandro /* computed based on above array */ 101429b6a72SHarish Chegondi unsigned long n_krcvqs; 102f48ad614SDennis Dalessandro 103f48ad614SDennis Dalessandro static unsigned hfi1_rcvarr_split = 25; 104f48ad614SDennis Dalessandro module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 105f48ad614SDennis Dalessandro MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); 106f48ad614SDennis Dalessandro 1079746fa43STymoteusz Kielan static uint eager_buffer_size = (8 << 20); /* 8MB */ 108f48ad614SDennis Dalessandro module_param(eager_buffer_size, uint, S_IRUGO); 1099746fa43STymoteusz Kielan MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB"); 110f48ad614SDennis Dalessandro 111f48ad614SDennis Dalessandro static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ 112f48ad614SDennis Dalessandro module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); 113f48ad614SDennis Dalessandro MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); 114f48ad614SDennis Dalessandro 115f48ad614SDennis Dalessandro static uint hfi1_hdrq_entsize = 32; 116d9a6ce68SMike Marciniszyn module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444); 117d9a6ce68SMike Marciniszyn MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)"); 118f48ad614SDennis Dalessandro 119f48ad614SDennis Dalessandro unsigned int user_credit_return_threshold = 33; /* default is 33% */ 120f48ad614SDennis Dalessandro module_param(user_credit_return_threshold, uint, S_IRUGO); 121f48ad614SDennis Dalessandro MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); 122f48ad614SDennis Dalessandro 123f4cd8765SMichael J. Ruhl static inline u64 encode_rcv_header_entry_size(u16 size); 124f48ad614SDennis Dalessandro 125f48ad614SDennis Dalessandro static struct idr hfi1_unit_table; 126f48ad614SDennis Dalessandro 127f2a3bc00SMichael J. Ruhl static int hfi1_create_kctxt(struct hfi1_devdata *dd, 128f2a3bc00SMichael J. Ruhl struct hfi1_pportdata *ppd) 129f48ad614SDennis Dalessandro { 130f2a3bc00SMichael J. Ruhl struct hfi1_ctxtdata *rcd; 131f48ad614SDennis Dalessandro int ret; 132f48ad614SDennis Dalessandro 133f48ad614SDennis Dalessandro /* Control context has to be always 0 */ 134f48ad614SDennis Dalessandro BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); 135f48ad614SDennis Dalessandro 136f2a3bc00SMichael J. Ruhl ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); 137f2a3bc00SMichael J. Ruhl if (ret < 0) { 138f2a3bc00SMichael J. Ruhl dd_dev_err(dd, "Kernel receive context allocation failed\n"); 139f2a3bc00SMichael J. Ruhl return ret; 140f48ad614SDennis Dalessandro } 141f2a3bc00SMichael J. Ruhl 142f48ad614SDennis Dalessandro /* 143f2a3bc00SMichael J. Ruhl * Set up the kernel context flags here and now because they use 144f2a3bc00SMichael J. Ruhl * default values for all receive side memories. User contexts will 145f2a3bc00SMichael J. Ruhl * be handled as they are created. 146f48ad614SDennis Dalessandro */ 147f48ad614SDennis Dalessandro rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | 148f48ad614SDennis Dalessandro HFI1_CAP_KGET(NODROP_RHQ_FULL) | 149f48ad614SDennis Dalessandro HFI1_CAP_KGET(NODROP_EGR_FULL) | 150f48ad614SDennis Dalessandro HFI1_CAP_KGET(DMA_RTAIL); 151f48ad614SDennis Dalessandro 152f48ad614SDennis Dalessandro /* Control context must use DMA_RTAIL */ 153f48ad614SDennis Dalessandro if (rcd->ctxt == HFI1_CTRL_CTXT) 154f48ad614SDennis Dalessandro rcd->flags |= HFI1_CAP_DMA_RTAIL; 155f48ad614SDennis Dalessandro rcd->seq_cnt = 1; 156f48ad614SDennis Dalessandro 157f48ad614SDennis Dalessandro rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); 158f48ad614SDennis Dalessandro if (!rcd->sc) { 159f2a3bc00SMichael J. Ruhl dd_dev_err(dd, "Kernel send context allocation failed\n"); 160f2a3bc00SMichael J. Ruhl return -ENOMEM; 161f48ad614SDennis Dalessandro } 1629b60d2cbSMichael J. Ruhl hfi1_init_ctxt(rcd->sc); 163f2a3bc00SMichael J. Ruhl 164f2a3bc00SMichael J. Ruhl return 0; 165f48ad614SDennis Dalessandro } 166f48ad614SDennis Dalessandro 167f48ad614SDennis Dalessandro /* 168f2a3bc00SMichael J. Ruhl * Create the receive context array and one or more kernel contexts 169f48ad614SDennis Dalessandro */ 170f2a3bc00SMichael J. Ruhl int hfi1_create_kctxts(struct hfi1_devdata *dd) 171f2a3bc00SMichael J. Ruhl { 172f2a3bc00SMichael J. Ruhl u16 i; 173f2a3bc00SMichael J. Ruhl int ret; 174f2a3bc00SMichael J. Ruhl 175953a9cebSKamenee Arumugam dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), 176f2a3bc00SMichael J. Ruhl GFP_KERNEL, dd->node); 177f2a3bc00SMichael J. Ruhl if (!dd->rcd) 178f2a3bc00SMichael J. Ruhl return -ENOMEM; 179f2a3bc00SMichael J. Ruhl 180f2a3bc00SMichael J. Ruhl for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 181f2a3bc00SMichael J. Ruhl ret = hfi1_create_kctxt(dd, dd->pport); 182f2a3bc00SMichael J. Ruhl if (ret) 183f2a3bc00SMichael J. Ruhl goto bail; 184f2a3bc00SMichael J. Ruhl } 185f48ad614SDennis Dalessandro 186f48ad614SDennis Dalessandro return 0; 187f2a3bc00SMichael J. Ruhl bail: 188f683c80cSMichael J. Ruhl for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) 189d295dbebSMichael J. Ruhl hfi1_free_ctxt(dd->rcd[i]); 190f683c80cSMichael J. Ruhl 191f683c80cSMichael J. Ruhl /* All the contexts should be freed, free the array */ 192f48ad614SDennis Dalessandro kfree(dd->rcd); 193f48ad614SDennis Dalessandro dd->rcd = NULL; 194f48ad614SDennis Dalessandro return ret; 195f48ad614SDennis Dalessandro } 196f48ad614SDennis Dalessandro 197f48ad614SDennis Dalessandro /* 198d295dbebSMichael J. Ruhl * Helper routines for the receive context reference count (rcd and uctxt). 199f683c80cSMichael J. Ruhl */ 200f683c80cSMichael J. Ruhl static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd) 201f683c80cSMichael J. Ruhl { 202f683c80cSMichael J. Ruhl kref_init(&rcd->kref); 203f683c80cSMichael J. Ruhl } 204f683c80cSMichael J. Ruhl 205f2a3bc00SMichael J. Ruhl /** 206f2a3bc00SMichael J. Ruhl * hfi1_rcd_free - When reference is zero clean up. 207f2a3bc00SMichael J. Ruhl * @kref: pointer to an initialized rcd data structure 208f2a3bc00SMichael J. Ruhl * 209f2a3bc00SMichael J. Ruhl */ 210f683c80cSMichael J. Ruhl static void hfi1_rcd_free(struct kref *kref) 211f683c80cSMichael J. Ruhl { 212d295dbebSMichael J. Ruhl unsigned long flags; 213f683c80cSMichael J. Ruhl struct hfi1_ctxtdata *rcd = 214f683c80cSMichael J. Ruhl container_of(kref, struct hfi1_ctxtdata, kref); 215f683c80cSMichael J. Ruhl 216f683c80cSMichael J. Ruhl hfi1_free_ctxtdata(rcd->dd, rcd); 217d295dbebSMichael J. Ruhl 218d295dbebSMichael J. Ruhl spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); 219d295dbebSMichael J. Ruhl rcd->dd->rcd[rcd->ctxt] = NULL; 220d295dbebSMichael J. Ruhl spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); 221d295dbebSMichael J. Ruhl 222f683c80cSMichael J. Ruhl kfree(rcd); 223f683c80cSMichael J. Ruhl } 224f683c80cSMichael J. Ruhl 225f2a3bc00SMichael J. Ruhl /** 226f2a3bc00SMichael J. Ruhl * hfi1_rcd_put - decrement reference for rcd 227f2a3bc00SMichael J. Ruhl * @rcd: pointer to an initialized rcd data structure 228f2a3bc00SMichael J. Ruhl * 229f2a3bc00SMichael J. Ruhl * Use this to put a reference after the init. 230f2a3bc00SMichael J. Ruhl */ 231f683c80cSMichael J. Ruhl int hfi1_rcd_put(struct hfi1_ctxtdata *rcd) 232f683c80cSMichael J. Ruhl { 233f683c80cSMichael J. Ruhl if (rcd) 234f683c80cSMichael J. Ruhl return kref_put(&rcd->kref, hfi1_rcd_free); 235f683c80cSMichael J. Ruhl 236f683c80cSMichael J. Ruhl return 0; 237f683c80cSMichael J. Ruhl } 238f683c80cSMichael J. Ruhl 239f2a3bc00SMichael J. Ruhl /** 240f2a3bc00SMichael J. Ruhl * hfi1_rcd_get - increment reference for rcd 241f2a3bc00SMichael J. Ruhl * @rcd: pointer to an initialized rcd data structure 242f2a3bc00SMichael J. Ruhl * 243f2a3bc00SMichael J. Ruhl * Use this to get a reference after the init. 244f2a3bc00SMichael J. Ruhl */ 245f683c80cSMichael J. Ruhl void hfi1_rcd_get(struct hfi1_ctxtdata *rcd) 246f683c80cSMichael J. Ruhl { 247f683c80cSMichael J. Ruhl kref_get(&rcd->kref); 248f683c80cSMichael J. Ruhl } 249f683c80cSMichael J. Ruhl 250f2a3bc00SMichael J. Ruhl /** 251f2a3bc00SMichael J. Ruhl * allocate_rcd_index - allocate an rcd index from the rcd array 252f2a3bc00SMichael J. Ruhl * @dd: pointer to a valid devdata structure 253f2a3bc00SMichael J. Ruhl * @rcd: rcd data structure to assign 254f2a3bc00SMichael J. Ruhl * @index: pointer to index that is allocated 255f2a3bc00SMichael J. Ruhl * 256f2a3bc00SMichael J. Ruhl * Find an empty index in the rcd array, and assign the given rcd to it. 257f2a3bc00SMichael J. Ruhl * If the array is full, we are EBUSY. 258f2a3bc00SMichael J. Ruhl * 259f2a3bc00SMichael J. Ruhl */ 260d295dbebSMichael J. Ruhl static int allocate_rcd_index(struct hfi1_devdata *dd, 261f2a3bc00SMichael J. Ruhl struct hfi1_ctxtdata *rcd, u16 *index) 262f2a3bc00SMichael J. Ruhl { 263f2a3bc00SMichael J. Ruhl unsigned long flags; 264f2a3bc00SMichael J. Ruhl u16 ctxt; 265f2a3bc00SMichael J. Ruhl 266f2a3bc00SMichael J. Ruhl spin_lock_irqsave(&dd->uctxt_lock, flags); 267f2a3bc00SMichael J. Ruhl for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) 268f2a3bc00SMichael J. Ruhl if (!dd->rcd[ctxt]) 269f2a3bc00SMichael J. Ruhl break; 270f2a3bc00SMichael J. Ruhl 271f2a3bc00SMichael J. Ruhl if (ctxt < dd->num_rcv_contexts) { 272f2a3bc00SMichael J. Ruhl rcd->ctxt = ctxt; 273f2a3bc00SMichael J. Ruhl dd->rcd[ctxt] = rcd; 274f2a3bc00SMichael J. Ruhl hfi1_rcd_init(rcd); 275f2a3bc00SMichael J. Ruhl } 276f2a3bc00SMichael J. Ruhl spin_unlock_irqrestore(&dd->uctxt_lock, flags); 277f2a3bc00SMichael J. Ruhl 278f2a3bc00SMichael J. Ruhl if (ctxt >= dd->num_rcv_contexts) 279f2a3bc00SMichael J. Ruhl return -EBUSY; 280f2a3bc00SMichael J. Ruhl 281f2a3bc00SMichael J. Ruhl *index = ctxt; 282f2a3bc00SMichael J. Ruhl 283f2a3bc00SMichael J. Ruhl return 0; 284f2a3bc00SMichael J. Ruhl } 285f2a3bc00SMichael J. Ruhl 286d295dbebSMichael J. Ruhl /** 287d59075adSMichael J. Ruhl * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the 288d59075adSMichael J. Ruhl * array 289d59075adSMichael J. Ruhl * @dd: pointer to a valid devdata structure 290d59075adSMichael J. Ruhl * @ctxt: the index of an possilbe rcd 291d59075adSMichael J. Ruhl * 292d59075adSMichael J. Ruhl * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given 293d59075adSMichael J. Ruhl * ctxt index is valid. 294d59075adSMichael J. Ruhl * 295d59075adSMichael J. Ruhl * The caller is responsible for making the _put(). 296d59075adSMichael J. Ruhl * 297d59075adSMichael J. Ruhl */ 298d59075adSMichael J. Ruhl struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, 299d59075adSMichael J. Ruhl u16 ctxt) 300d59075adSMichael J. Ruhl { 301d59075adSMichael J. Ruhl if (ctxt < dd->num_rcv_contexts) 302d59075adSMichael J. Ruhl return hfi1_rcd_get_by_index(dd, ctxt); 303d59075adSMichael J. Ruhl 304d59075adSMichael J. Ruhl return NULL; 305d59075adSMichael J. Ruhl } 306d59075adSMichael J. Ruhl 307d59075adSMichael J. Ruhl /** 308d295dbebSMichael J. Ruhl * hfi1_rcd_get_by_index 309d295dbebSMichael J. Ruhl * @dd: pointer to a valid devdata structure 310d295dbebSMichael J. Ruhl * @ctxt: the index of an possilbe rcd 311d295dbebSMichael J. Ruhl * 312d295dbebSMichael J. Ruhl * We need to protect access to the rcd array. If access is needed to 313d295dbebSMichael J. Ruhl * one or more index, get the protecting spinlock and then increment the 314d295dbebSMichael J. Ruhl * kref. 315d295dbebSMichael J. Ruhl * 316d295dbebSMichael J. Ruhl * The caller is responsible for making the _put(). 317d295dbebSMichael J. Ruhl * 318d295dbebSMichael J. Ruhl */ 319d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) 320d295dbebSMichael J. Ruhl { 321d295dbebSMichael J. Ruhl unsigned long flags; 322d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *rcd = NULL; 323d295dbebSMichael J. Ruhl 324d295dbebSMichael J. Ruhl spin_lock_irqsave(&dd->uctxt_lock, flags); 325d295dbebSMichael J. Ruhl if (dd->rcd[ctxt]) { 326d295dbebSMichael J. Ruhl rcd = dd->rcd[ctxt]; 327d295dbebSMichael J. Ruhl hfi1_rcd_get(rcd); 328d295dbebSMichael J. Ruhl } 329d295dbebSMichael J. Ruhl spin_unlock_irqrestore(&dd->uctxt_lock, flags); 330d295dbebSMichael J. Ruhl 331d295dbebSMichael J. Ruhl return rcd; 332d295dbebSMichael J. Ruhl } 333d295dbebSMichael J. Ruhl 334f683c80cSMichael J. Ruhl /* 335d295dbebSMichael J. Ruhl * Common code for user and kernel context create and setup. 336d295dbebSMichael J. Ruhl * NOTE: the initial kref is done here (hf1_rcd_init()). 337f48ad614SDennis Dalessandro */ 338f2a3bc00SMichael J. Ruhl int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, 339f2a3bc00SMichael J. Ruhl struct hfi1_ctxtdata **context) 340f48ad614SDennis Dalessandro { 341f48ad614SDennis Dalessandro struct hfi1_devdata *dd = ppd->dd; 342f48ad614SDennis Dalessandro struct hfi1_ctxtdata *rcd; 343f48ad614SDennis Dalessandro unsigned kctxt_ngroups = 0; 344f48ad614SDennis Dalessandro u32 base; 345f48ad614SDennis Dalessandro 346f48ad614SDennis Dalessandro if (dd->rcv_entries.nctxt_extra > 3472280740fSVishwanathapura, Niranjana dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) 348f48ad614SDennis Dalessandro kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 3492280740fSVishwanathapura, Niranjana (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); 3504dfe7cceSJianxin Xiong rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); 351f48ad614SDennis Dalessandro if (rcd) { 352f48ad614SDennis Dalessandro u32 rcvtids, max_entries; 353f2a3bc00SMichael J. Ruhl u16 ctxt; 354f2a3bc00SMichael J. Ruhl int ret; 355f48ad614SDennis Dalessandro 356f2a3bc00SMichael J. Ruhl ret = allocate_rcd_index(dd, rcd, &ctxt); 357f2a3bc00SMichael J. Ruhl if (ret) { 358f2a3bc00SMichael J. Ruhl *context = NULL; 359f2a3bc00SMichael J. Ruhl kfree(rcd); 360f2a3bc00SMichael J. Ruhl return ret; 361f2a3bc00SMichael J. Ruhl } 362f2a3bc00SMichael J. Ruhl 363f48ad614SDennis Dalessandro INIT_LIST_HEAD(&rcd->qp_wait_list); 364c8314811SMike Marciniszyn hfi1_exp_tid_group_init(rcd); 365f48ad614SDennis Dalessandro rcd->ppd = ppd; 366f48ad614SDennis Dalessandro rcd->dd = dd; 3678737ce95SMichael J. Ruhl __set_bit(0, rcd->in_use_ctxts); 368f48ad614SDennis Dalessandro rcd->numa_id = numa; 369f48ad614SDennis Dalessandro rcd->rcv_array_groups = dd->rcv_entries.ngroups; 370b0ba3c18SMike Marciniszyn rcd->rhf_rcv_function_map = normal_rhf_rcv_functions; 371f48ad614SDennis Dalessandro 372ed71e86aSKaike Wan mutex_init(&rcd->exp_mutex); 373f48ad614SDennis Dalessandro 374d295dbebSMichael J. Ruhl hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); 375d295dbebSMichael J. Ruhl 376f48ad614SDennis Dalessandro /* 377f48ad614SDennis Dalessandro * Calculate the context's RcvArray entry starting point. 378f48ad614SDennis Dalessandro * We do this here because we have to take into account all 379f48ad614SDennis Dalessandro * the RcvArray entries that previous context would have 3802280740fSVishwanathapura, Niranjana * taken and we have to account for any extra groups assigned 3812280740fSVishwanathapura, Niranjana * to the static (kernel) or dynamic (vnic/user) contexts. 382f48ad614SDennis Dalessandro */ 3832280740fSVishwanathapura, Niranjana if (ctxt < dd->first_dyn_alloc_ctxt) { 384f48ad614SDennis Dalessandro if (ctxt < kctxt_ngroups) { 385f48ad614SDennis Dalessandro base = ctxt * (dd->rcv_entries.ngroups + 1); 386f48ad614SDennis Dalessandro rcd->rcv_array_groups++; 387ee495adaSDennis Dalessandro } else { 388f48ad614SDennis Dalessandro base = kctxt_ngroups + 389f48ad614SDennis Dalessandro (ctxt * dd->rcv_entries.ngroups); 390ee495adaSDennis Dalessandro } 391f48ad614SDennis Dalessandro } else { 3922280740fSVishwanathapura, Niranjana u16 ct = ctxt - dd->first_dyn_alloc_ctxt; 393f48ad614SDennis Dalessandro 394f48ad614SDennis Dalessandro base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + 395f48ad614SDennis Dalessandro kctxt_ngroups); 396f48ad614SDennis Dalessandro if (ct < dd->rcv_entries.nctxt_extra) { 397f48ad614SDennis Dalessandro base += ct * (dd->rcv_entries.ngroups + 1); 398f48ad614SDennis Dalessandro rcd->rcv_array_groups++; 399ee495adaSDennis Dalessandro } else { 400f48ad614SDennis Dalessandro base += dd->rcv_entries.nctxt_extra + 401f48ad614SDennis Dalessandro (ct * dd->rcv_entries.ngroups); 402f48ad614SDennis Dalessandro } 403ee495adaSDennis Dalessandro } 404f48ad614SDennis Dalessandro rcd->eager_base = base * dd->rcv_entries.group_size; 405f48ad614SDennis Dalessandro 406f48ad614SDennis Dalessandro rcd->rcvhdrq_cnt = rcvhdrcnt; 407f48ad614SDennis Dalessandro rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 40840442b30SMike Marciniszyn rcd->rhf_offset = 40940442b30SMike Marciniszyn rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32); 410f48ad614SDennis Dalessandro /* 411f48ad614SDennis Dalessandro * Simple Eager buffer allocation: we have already pre-allocated 412f48ad614SDennis Dalessandro * the number of RcvArray entry groups. Each ctxtdata structure 413f48ad614SDennis Dalessandro * holds the number of groups for that context. 414f48ad614SDennis Dalessandro * 415f48ad614SDennis Dalessandro * To follow CSR requirements and maintain cacheline alignment, 416f48ad614SDennis Dalessandro * make sure all sizes and bases are multiples of group_size. 417f48ad614SDennis Dalessandro * 418f48ad614SDennis Dalessandro * The expected entry count is what is left after assigning 419f48ad614SDennis Dalessandro * eager. 420f48ad614SDennis Dalessandro */ 421f48ad614SDennis Dalessandro max_entries = rcd->rcv_array_groups * 422f48ad614SDennis Dalessandro dd->rcv_entries.group_size; 423f48ad614SDennis Dalessandro rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); 424f48ad614SDennis Dalessandro rcd->egrbufs.count = round_down(rcvtids, 425f48ad614SDennis Dalessandro dd->rcv_entries.group_size); 426f48ad614SDennis Dalessandro if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { 427f48ad614SDennis Dalessandro dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", 428f48ad614SDennis Dalessandro rcd->ctxt); 429f48ad614SDennis Dalessandro rcd->egrbufs.count = MAX_EAGER_ENTRIES; 430f48ad614SDennis Dalessandro } 431f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 432f48ad614SDennis Dalessandro "ctxt%u: max Eager buffer RcvArray entries: %u\n", 433f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.count); 434f48ad614SDennis Dalessandro 435f48ad614SDennis Dalessandro /* 436f48ad614SDennis Dalessandro * Allocate array that will hold the eager buffer accounting 437f48ad614SDennis Dalessandro * data. 438f48ad614SDennis Dalessandro * This will allocate the maximum possible buffer count based 439f48ad614SDennis Dalessandro * on the value of the RcvArray split parameter. 440f48ad614SDennis Dalessandro * The resulting value will be rounded down to the closest 441f48ad614SDennis Dalessandro * multiple of dd->rcv_entries.group_size. 442f48ad614SDennis Dalessandro */ 443953a9cebSKamenee Arumugam rcd->egrbufs.buffers = 444953a9cebSKamenee Arumugam kcalloc_node(rcd->egrbufs.count, 445953a9cebSKamenee Arumugam sizeof(*rcd->egrbufs.buffers), 446b448bf9aSSebastian Sanchez GFP_KERNEL, numa); 447f48ad614SDennis Dalessandro if (!rcd->egrbufs.buffers) 448f48ad614SDennis Dalessandro goto bail; 449953a9cebSKamenee Arumugam rcd->egrbufs.rcvtids = 450953a9cebSKamenee Arumugam kcalloc_node(rcd->egrbufs.count, 451f48ad614SDennis Dalessandro sizeof(*rcd->egrbufs.rcvtids), 452b448bf9aSSebastian Sanchez GFP_KERNEL, numa); 453f48ad614SDennis Dalessandro if (!rcd->egrbufs.rcvtids) 454f48ad614SDennis Dalessandro goto bail; 455f48ad614SDennis Dalessandro rcd->egrbufs.size = eager_buffer_size; 456f48ad614SDennis Dalessandro /* 457f48ad614SDennis Dalessandro * The size of the buffers programmed into the RcvArray 458f48ad614SDennis Dalessandro * entries needs to be big enough to handle the highest 459f48ad614SDennis Dalessandro * MTU supported. 460f48ad614SDennis Dalessandro */ 461f48ad614SDennis Dalessandro if (rcd->egrbufs.size < hfi1_max_mtu) { 462f48ad614SDennis Dalessandro rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); 463f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 464f48ad614SDennis Dalessandro "ctxt%u: eager bufs size too small. Adjusting to %zu\n", 465f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.size); 466f48ad614SDennis Dalessandro } 467f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; 468f48ad614SDennis Dalessandro 4692280740fSVishwanathapura, Niranjana /* Applicable only for statically created kernel contexts */ 4702280740fSVishwanathapura, Niranjana if (ctxt < dd->first_dyn_alloc_ctxt) { 471b448bf9aSSebastian Sanchez rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), 472b448bf9aSSebastian Sanchez GFP_KERNEL, numa); 473f48ad614SDennis Dalessandro if (!rcd->opstats) 474f48ad614SDennis Dalessandro goto bail; 475f48ad614SDennis Dalessandro } 476f683c80cSMichael J. Ruhl 477f2a3bc00SMichael J. Ruhl *context = rcd; 478f2a3bc00SMichael J. Ruhl return 0; 479f48ad614SDennis Dalessandro } 480f2a3bc00SMichael J. Ruhl 481f48ad614SDennis Dalessandro bail: 482f2a3bc00SMichael J. Ruhl *context = NULL; 483d295dbebSMichael J. Ruhl hfi1_free_ctxt(rcd); 484f2a3bc00SMichael J. Ruhl return -ENOMEM; 485f2a3bc00SMichael J. Ruhl } 486f2a3bc00SMichael J. Ruhl 487f2a3bc00SMichael J. Ruhl /** 488f2a3bc00SMichael J. Ruhl * hfi1_free_ctxt 489f2a3bc00SMichael J. Ruhl * @rcd: pointer to an initialized rcd data structure 490f2a3bc00SMichael J. Ruhl * 491d295dbebSMichael J. Ruhl * This wrapper is the free function that matches hfi1_create_ctxtdata(). 492d295dbebSMichael J. Ruhl * When a context is done being used (kernel or user), this function is called 493d295dbebSMichael J. Ruhl * for the "final" put to match the kref init from hf1i_create_ctxtdata(). 494d295dbebSMichael J. Ruhl * Other users of the context do a get/put sequence to make sure that the 495d295dbebSMichael J. Ruhl * structure isn't removed while in use. 496f2a3bc00SMichael J. Ruhl */ 497d295dbebSMichael J. Ruhl void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd) 498f2a3bc00SMichael J. Ruhl { 499f2a3bc00SMichael J. Ruhl hfi1_rcd_put(rcd); 500f2a3bc00SMichael J. Ruhl } 501f48ad614SDennis Dalessandro 502f48ad614SDennis Dalessandro /* 503f48ad614SDennis Dalessandro * Convert a receive header entry size that to the encoding used in the CSR. 504f48ad614SDennis Dalessandro * 505f48ad614SDennis Dalessandro * Return a zero if the given size is invalid. 506f48ad614SDennis Dalessandro */ 507f48ad614SDennis Dalessandro static inline u64 encode_rcv_header_entry_size(u16 size) 508f48ad614SDennis Dalessandro { 509f48ad614SDennis Dalessandro /* there are only 3 valid receive header entry sizes */ 510f48ad614SDennis Dalessandro if (size == 2) 511f48ad614SDennis Dalessandro return 1; 512f48ad614SDennis Dalessandro if (size == 16) 513f48ad614SDennis Dalessandro return 2; 514f48ad614SDennis Dalessandro else if (size == 32) 515f48ad614SDennis Dalessandro return 4; 516f48ad614SDennis Dalessandro return 0; /* invalid */ 517f48ad614SDennis Dalessandro } 518f48ad614SDennis Dalessandro 519f48ad614SDennis Dalessandro /* 520f48ad614SDennis Dalessandro * Select the largest ccti value over all SLs to determine the intra- 521f48ad614SDennis Dalessandro * packet gap for the link. 522f48ad614SDennis Dalessandro * 523f48ad614SDennis Dalessandro * called with cca_timer_lock held (to protect access to cca_timer 524f48ad614SDennis Dalessandro * array), and rcu_read_lock() (to protect access to cc_state). 525f48ad614SDennis Dalessandro */ 526f48ad614SDennis Dalessandro void set_link_ipg(struct hfi1_pportdata *ppd) 527f48ad614SDennis Dalessandro { 528f48ad614SDennis Dalessandro struct hfi1_devdata *dd = ppd->dd; 529f48ad614SDennis Dalessandro struct cc_state *cc_state; 530f48ad614SDennis Dalessandro int i; 531f48ad614SDennis Dalessandro u16 cce, ccti_limit, max_ccti = 0; 532f48ad614SDennis Dalessandro u16 shift, mult; 533f48ad614SDennis Dalessandro u64 src; 534f48ad614SDennis Dalessandro u32 current_egress_rate; /* Mbits /sec */ 535f48ad614SDennis Dalessandro u32 max_pkt_time; 536f48ad614SDennis Dalessandro /* 537f48ad614SDennis Dalessandro * max_pkt_time is the maximum packet egress time in units 538f48ad614SDennis Dalessandro * of the fabric clock period 1/(805 MHz). 539f48ad614SDennis Dalessandro */ 540f48ad614SDennis Dalessandro 541f48ad614SDennis Dalessandro cc_state = get_cc_state(ppd); 542f48ad614SDennis Dalessandro 543f48ad614SDennis Dalessandro if (!cc_state) 544f48ad614SDennis Dalessandro /* 545f48ad614SDennis Dalessandro * This should _never_ happen - rcu_read_lock() is held, 546f48ad614SDennis Dalessandro * and set_link_ipg() should not be called if cc_state 547f48ad614SDennis Dalessandro * is NULL. 548f48ad614SDennis Dalessandro */ 549f48ad614SDennis Dalessandro return; 550f48ad614SDennis Dalessandro 551f48ad614SDennis Dalessandro for (i = 0; i < OPA_MAX_SLS; i++) { 552f48ad614SDennis Dalessandro u16 ccti = ppd->cca_timer[i].ccti; 553f48ad614SDennis Dalessandro 554f48ad614SDennis Dalessandro if (ccti > max_ccti) 555f48ad614SDennis Dalessandro max_ccti = ccti; 556f48ad614SDennis Dalessandro } 557f48ad614SDennis Dalessandro 558f48ad614SDennis Dalessandro ccti_limit = cc_state->cct.ccti_limit; 559f48ad614SDennis Dalessandro if (max_ccti > ccti_limit) 560f48ad614SDennis Dalessandro max_ccti = ccti_limit; 561f48ad614SDennis Dalessandro 562f48ad614SDennis Dalessandro cce = cc_state->cct.entries[max_ccti].entry; 563f48ad614SDennis Dalessandro shift = (cce & 0xc000) >> 14; 564f48ad614SDennis Dalessandro mult = (cce & 0x3fff); 565f48ad614SDennis Dalessandro 566f48ad614SDennis Dalessandro current_egress_rate = active_egress_rate(ppd); 567f48ad614SDennis Dalessandro 568f48ad614SDennis Dalessandro max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); 569f48ad614SDennis Dalessandro 570f48ad614SDennis Dalessandro src = (max_pkt_time >> shift) * mult; 571f48ad614SDennis Dalessandro 572f48ad614SDennis Dalessandro src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; 573f48ad614SDennis Dalessandro src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; 574f48ad614SDennis Dalessandro 575f48ad614SDennis Dalessandro write_csr(dd, SEND_STATIC_RATE_CONTROL, src); 576f48ad614SDennis Dalessandro } 577f48ad614SDennis Dalessandro 578f48ad614SDennis Dalessandro static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) 579f48ad614SDennis Dalessandro { 580f48ad614SDennis Dalessandro struct cca_timer *cca_timer; 581f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 582f48ad614SDennis Dalessandro int sl; 583f48ad614SDennis Dalessandro u16 ccti_timer, ccti_min; 584f48ad614SDennis Dalessandro struct cc_state *cc_state; 585f48ad614SDennis Dalessandro unsigned long flags; 586f48ad614SDennis Dalessandro enum hrtimer_restart ret = HRTIMER_NORESTART; 587f48ad614SDennis Dalessandro 588f48ad614SDennis Dalessandro cca_timer = container_of(t, struct cca_timer, hrtimer); 589f48ad614SDennis Dalessandro ppd = cca_timer->ppd; 590f48ad614SDennis Dalessandro sl = cca_timer->sl; 591f48ad614SDennis Dalessandro 592f48ad614SDennis Dalessandro rcu_read_lock(); 593f48ad614SDennis Dalessandro 594f48ad614SDennis Dalessandro cc_state = get_cc_state(ppd); 595f48ad614SDennis Dalessandro 596f48ad614SDennis Dalessandro if (!cc_state) { 597f48ad614SDennis Dalessandro rcu_read_unlock(); 598f48ad614SDennis Dalessandro return HRTIMER_NORESTART; 599f48ad614SDennis Dalessandro } 600f48ad614SDennis Dalessandro 601f48ad614SDennis Dalessandro /* 602f48ad614SDennis Dalessandro * 1) decrement ccti for SL 603f48ad614SDennis Dalessandro * 2) calculate IPG for link (set_link_ipg()) 604f48ad614SDennis Dalessandro * 3) restart timer, unless ccti is at min value 605f48ad614SDennis Dalessandro */ 606f48ad614SDennis Dalessandro 607f48ad614SDennis Dalessandro ccti_min = cc_state->cong_setting.entries[sl].ccti_min; 608f48ad614SDennis Dalessandro ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 609f48ad614SDennis Dalessandro 610f48ad614SDennis Dalessandro spin_lock_irqsave(&ppd->cca_timer_lock, flags); 611f48ad614SDennis Dalessandro 612f48ad614SDennis Dalessandro if (cca_timer->ccti > ccti_min) { 613f48ad614SDennis Dalessandro cca_timer->ccti--; 614f48ad614SDennis Dalessandro set_link_ipg(ppd); 615f48ad614SDennis Dalessandro } 616f48ad614SDennis Dalessandro 617f48ad614SDennis Dalessandro if (cca_timer->ccti > ccti_min) { 618f48ad614SDennis Dalessandro unsigned long nsec = 1024 * ccti_timer; 619f48ad614SDennis Dalessandro /* ccti_timer is in units of 1.024 usec */ 620f48ad614SDennis Dalessandro hrtimer_forward_now(t, ns_to_ktime(nsec)); 621f48ad614SDennis Dalessandro ret = HRTIMER_RESTART; 622f48ad614SDennis Dalessandro } 623f48ad614SDennis Dalessandro 624f48ad614SDennis Dalessandro spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 625f48ad614SDennis Dalessandro rcu_read_unlock(); 626f48ad614SDennis Dalessandro return ret; 627f48ad614SDennis Dalessandro } 628f48ad614SDennis Dalessandro 629f48ad614SDennis Dalessandro /* 630f48ad614SDennis Dalessandro * Common code for initializing the physical port structure. 631f48ad614SDennis Dalessandro */ 632f48ad614SDennis Dalessandro void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 633f48ad614SDennis Dalessandro struct hfi1_devdata *dd, u8 hw_pidx, u8 port) 634f48ad614SDennis Dalessandro { 6358adf71faSJianxin Xiong int i; 636f48ad614SDennis Dalessandro uint default_pkey_idx; 6378adf71faSJianxin Xiong struct cc_state *cc_state; 638f48ad614SDennis Dalessandro 639f48ad614SDennis Dalessandro ppd->dd = dd; 640f48ad614SDennis Dalessandro ppd->hw_pidx = hw_pidx; 641f48ad614SDennis Dalessandro ppd->port = port; /* IB port number, not index */ 64207190076SKamenee Arumugam ppd->prev_link_width = LINK_WIDTH_DEFAULT; 64307190076SKamenee Arumugam /* 64407190076SKamenee Arumugam * There are C_VL_COUNT number of PortVLXmitWait counters. 64507190076SKamenee Arumugam * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. 64607190076SKamenee Arumugam */ 64707190076SKamenee Arumugam for (i = 0; i < C_VL_COUNT + 1; i++) { 64807190076SKamenee Arumugam ppd->port_vl_xmit_wait_last[i] = 0; 64907190076SKamenee Arumugam ppd->vl_xmit_flit_cnt[i] = 0; 65007190076SKamenee Arumugam } 651f48ad614SDennis Dalessandro 652f48ad614SDennis Dalessandro default_pkey_idx = 1; 653f48ad614SDennis Dalessandro 654f48ad614SDennis Dalessandro ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; 65553526500SNeel Desai ppd->part_enforce |= HFI1_PART_ENFORCE_IN; 65653526500SNeel Desai 657f48ad614SDennis Dalessandro if (loopback) { 658f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, 659f48ad614SDennis Dalessandro "Faking data partition 0x8001 in idx %u\n", 660f48ad614SDennis Dalessandro !default_pkey_idx); 661f48ad614SDennis Dalessandro ppd->pkeys[!default_pkey_idx] = 0x8001; 662f48ad614SDennis Dalessandro } 663f48ad614SDennis Dalessandro 664f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_vc_work, handle_verify_cap); 665f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_up_work, handle_link_up); 666f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_down_work, handle_link_down); 667f48ad614SDennis Dalessandro INIT_WORK(&ppd->freeze_work, handle_freeze); 668f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); 669f48ad614SDennis Dalessandro INIT_WORK(&ppd->sma_message_work, handle_sma_message); 670f48ad614SDennis Dalessandro INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); 671673b975fSDean Luick INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); 672f48ad614SDennis Dalessandro INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); 673f48ad614SDennis Dalessandro INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 674f48ad614SDennis Dalessandro 675f48ad614SDennis Dalessandro mutex_init(&ppd->hls_lock); 676f48ad614SDennis Dalessandro spin_lock_init(&ppd->qsfp_info.qsfp_lock); 677f48ad614SDennis Dalessandro 678f48ad614SDennis Dalessandro ppd->qsfp_info.ppd = ppd; 679f48ad614SDennis Dalessandro ppd->sm_trap_qp = 0x0; 680f48ad614SDennis Dalessandro ppd->sa_qp = 0x1; 681f48ad614SDennis Dalessandro 682f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 683f48ad614SDennis Dalessandro 684f48ad614SDennis Dalessandro spin_lock_init(&ppd->cca_timer_lock); 685f48ad614SDennis Dalessandro 686f48ad614SDennis Dalessandro for (i = 0; i < OPA_MAX_SLS; i++) { 687f48ad614SDennis Dalessandro hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, 688f48ad614SDennis Dalessandro HRTIMER_MODE_REL); 689f48ad614SDennis Dalessandro ppd->cca_timer[i].ppd = ppd; 690f48ad614SDennis Dalessandro ppd->cca_timer[i].sl = i; 691f48ad614SDennis Dalessandro ppd->cca_timer[i].ccti = 0; 692f48ad614SDennis Dalessandro ppd->cca_timer[i].hrtimer.function = cca_timer_fn; 693f48ad614SDennis Dalessandro } 694f48ad614SDennis Dalessandro 695f48ad614SDennis Dalessandro ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; 696f48ad614SDennis Dalessandro 697f48ad614SDennis Dalessandro spin_lock_init(&ppd->cc_state_lock); 698f48ad614SDennis Dalessandro spin_lock_init(&ppd->cc_log_lock); 6998adf71faSJianxin Xiong cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL); 7008adf71faSJianxin Xiong RCU_INIT_POINTER(ppd->cc_state, cc_state); 7018adf71faSJianxin Xiong if (!cc_state) 702f48ad614SDennis Dalessandro goto bail; 703f48ad614SDennis Dalessandro return; 704f48ad614SDennis Dalessandro 705f48ad614SDennis Dalessandro bail: 706f48ad614SDennis Dalessandro 707f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, 708f48ad614SDennis Dalessandro "Congestion Control Agent disabled for port %d\n", port); 709f48ad614SDennis Dalessandro } 710f48ad614SDennis Dalessandro 711f48ad614SDennis Dalessandro /* 712f48ad614SDennis Dalessandro * Do initialization for device that is only needed on 713f48ad614SDennis Dalessandro * first detect, not on resets. 714f48ad614SDennis Dalessandro */ 715f48ad614SDennis Dalessandro static int loadtime_init(struct hfi1_devdata *dd) 716f48ad614SDennis Dalessandro { 717f48ad614SDennis Dalessandro return 0; 718f48ad614SDennis Dalessandro } 719f48ad614SDennis Dalessandro 720f48ad614SDennis Dalessandro /** 721f48ad614SDennis Dalessandro * init_after_reset - re-initialize after a reset 722f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 723f48ad614SDennis Dalessandro * 724f48ad614SDennis Dalessandro * sanity check at least some of the values after reset, and 725f48ad614SDennis Dalessandro * ensure no receive or transmit (explicitly, in case reset 726f48ad614SDennis Dalessandro * failed 727f48ad614SDennis Dalessandro */ 728f48ad614SDennis Dalessandro static int init_after_reset(struct hfi1_devdata *dd) 729f48ad614SDennis Dalessandro { 730f48ad614SDennis Dalessandro int i; 731d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *rcd; 732f48ad614SDennis Dalessandro /* 733f48ad614SDennis Dalessandro * Ensure chip does no sends or receives, tail updates, or 734f48ad614SDennis Dalessandro * pioavail updates while we re-initialize. This is mostly 735f48ad614SDennis Dalessandro * for the driver data structures, not chip registers. 736f48ad614SDennis Dalessandro */ 737d295dbebSMichael J. Ruhl for (i = 0; i < dd->num_rcv_contexts; i++) { 738d295dbebSMichael J. Ruhl rcd = hfi1_rcd_get_by_index(dd, i); 739f48ad614SDennis Dalessandro hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | 740f48ad614SDennis Dalessandro HFI1_RCVCTRL_INTRAVAIL_DIS | 741d295dbebSMichael J. Ruhl HFI1_RCVCTRL_TAILUPD_DIS, rcd); 742d295dbebSMichael J. Ruhl hfi1_rcd_put(rcd); 743d295dbebSMichael J. Ruhl } 744f48ad614SDennis Dalessandro pio_send_control(dd, PSC_GLOBAL_DISABLE); 745f48ad614SDennis Dalessandro for (i = 0; i < dd->num_send_contexts; i++) 746f48ad614SDennis Dalessandro sc_disable(dd->send_contexts[i].sc); 747f48ad614SDennis Dalessandro 748f48ad614SDennis Dalessandro return 0; 749f48ad614SDennis Dalessandro } 750f48ad614SDennis Dalessandro 751f48ad614SDennis Dalessandro static void enable_chip(struct hfi1_devdata *dd) 752f48ad614SDennis Dalessandro { 753d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *rcd; 754f48ad614SDennis Dalessandro u32 rcvmask; 755e6f7622dSMichael J. Ruhl u16 i; 756f48ad614SDennis Dalessandro 757f48ad614SDennis Dalessandro /* enable PIO send */ 758f48ad614SDennis Dalessandro pio_send_control(dd, PSC_GLOBAL_ENABLE); 759f48ad614SDennis Dalessandro 760f48ad614SDennis Dalessandro /* 761f48ad614SDennis Dalessandro * Enable kernel ctxts' receive and receive interrupt. 762f48ad614SDennis Dalessandro * Other ctxts done as user opens and initializes them. 763f48ad614SDennis Dalessandro */ 7642280740fSVishwanathapura, Niranjana for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 765d295dbebSMichael J. Ruhl rcd = hfi1_rcd_get_by_index(dd, i); 766d295dbebSMichael J. Ruhl if (!rcd) 767d295dbebSMichael J. Ruhl continue; 768f48ad614SDennis Dalessandro rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; 769d295dbebSMichael J. Ruhl rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? 770f48ad614SDennis Dalessandro HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 771d295dbebSMichael J. Ruhl if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 772f48ad614SDennis Dalessandro rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; 773d295dbebSMichael J. Ruhl if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL)) 774f48ad614SDennis Dalessandro rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; 775d295dbebSMichael J. Ruhl if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) 776f48ad614SDennis Dalessandro rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; 777d295dbebSMichael J. Ruhl hfi1_rcvctrl(dd, rcvmask, rcd); 778d295dbebSMichael J. Ruhl sc_enable(rcd->sc); 779d295dbebSMichael J. Ruhl hfi1_rcd_put(rcd); 780f48ad614SDennis Dalessandro } 781f48ad614SDennis Dalessandro } 782f48ad614SDennis Dalessandro 783f48ad614SDennis Dalessandro /** 784f48ad614SDennis Dalessandro * create_workqueues - create per port workqueues 785f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 786f48ad614SDennis Dalessandro */ 787f48ad614SDennis Dalessandro static int create_workqueues(struct hfi1_devdata *dd) 788f48ad614SDennis Dalessandro { 789f48ad614SDennis Dalessandro int pidx; 790f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 791f48ad614SDennis Dalessandro 792f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 793f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 794f48ad614SDennis Dalessandro if (!ppd->hfi1_wq) { 795f48ad614SDennis Dalessandro ppd->hfi1_wq = 796f48ad614SDennis Dalessandro alloc_workqueue( 797f48ad614SDennis Dalessandro "hfi%d_%d", 798f48ad614SDennis Dalessandro WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 799dd1ed108SMike Marciniszyn HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, 800f48ad614SDennis Dalessandro dd->unit, pidx); 801f48ad614SDennis Dalessandro if (!ppd->hfi1_wq) 802f48ad614SDennis Dalessandro goto wq_error; 803f48ad614SDennis Dalessandro } 80471d47008SSebastian Sanchez if (!ppd->link_wq) { 80571d47008SSebastian Sanchez /* 80671d47008SSebastian Sanchez * Make the link workqueue single-threaded to enforce 80771d47008SSebastian Sanchez * serialization. 80871d47008SSebastian Sanchez */ 80971d47008SSebastian Sanchez ppd->link_wq = 81071d47008SSebastian Sanchez alloc_workqueue( 81171d47008SSebastian Sanchez "hfi_link_%d_%d", 81271d47008SSebastian Sanchez WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 81371d47008SSebastian Sanchez 1, /* max_active */ 81471d47008SSebastian Sanchez dd->unit, pidx); 81571d47008SSebastian Sanchez if (!ppd->link_wq) 81671d47008SSebastian Sanchez goto wq_error; 81771d47008SSebastian Sanchez } 818f48ad614SDennis Dalessandro } 819f48ad614SDennis Dalessandro return 0; 820f48ad614SDennis Dalessandro wq_error: 821f48ad614SDennis Dalessandro pr_err("alloc_workqueue failed for port %d\n", pidx + 1); 822f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 823f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 824f48ad614SDennis Dalessandro if (ppd->hfi1_wq) { 825f48ad614SDennis Dalessandro destroy_workqueue(ppd->hfi1_wq); 826f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 827f48ad614SDennis Dalessandro } 82871d47008SSebastian Sanchez if (ppd->link_wq) { 82971d47008SSebastian Sanchez destroy_workqueue(ppd->link_wq); 83071d47008SSebastian Sanchez ppd->link_wq = NULL; 83171d47008SSebastian Sanchez } 832f48ad614SDennis Dalessandro } 833f48ad614SDennis Dalessandro return -ENOMEM; 834f48ad614SDennis Dalessandro } 835f48ad614SDennis Dalessandro 836f48ad614SDennis Dalessandro /** 837f48ad614SDennis Dalessandro * hfi1_init - do the actual initialization sequence on the chip 838f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 839f48ad614SDennis Dalessandro * @reinit: re-initializing, so don't allocate new memory 840f48ad614SDennis Dalessandro * 841f48ad614SDennis Dalessandro * Do the actual initialization sequence on the chip. This is done 842f48ad614SDennis Dalessandro * both from the init routine called from the PCI infrastructure, and 843f48ad614SDennis Dalessandro * when we reset the chip, or detect that it was reset internally, 844f48ad614SDennis Dalessandro * or it's administratively re-enabled. 845f48ad614SDennis Dalessandro * 846f48ad614SDennis Dalessandro * Memory allocation here and in called routines is only done in 847f48ad614SDennis Dalessandro * the first case (reinit == 0). We have to be careful, because even 848f48ad614SDennis Dalessandro * without memory allocation, we need to re-write all the chip registers 849f48ad614SDennis Dalessandro * TIDs, etc. after the reset or enable has completed. 850f48ad614SDennis Dalessandro */ 851f48ad614SDennis Dalessandro int hfi1_init(struct hfi1_devdata *dd, int reinit) 852f48ad614SDennis Dalessandro { 853f48ad614SDennis Dalessandro int ret = 0, pidx, lastfail = 0; 854e6f7622dSMichael J. Ruhl unsigned long len; 855e6f7622dSMichael J. Ruhl u16 i; 856f48ad614SDennis Dalessandro struct hfi1_ctxtdata *rcd; 857f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 858f48ad614SDennis Dalessandro 859f48ad614SDennis Dalessandro /* Set up send low level handlers */ 860f48ad614SDennis Dalessandro dd->process_pio_send = hfi1_verbs_send_pio; 861f48ad614SDennis Dalessandro dd->process_dma_send = hfi1_verbs_send_dma; 862f48ad614SDennis Dalessandro dd->pio_inline_send = pio_copy; 86364551edeSVishwanathapura, Niranjana dd->process_vnic_dma_send = hfi1_vnic_send_dma; 864f48ad614SDennis Dalessandro 865f48ad614SDennis Dalessandro if (is_ax(dd)) { 866f48ad614SDennis Dalessandro atomic_set(&dd->drop_packet, DROP_PACKET_ON); 867f48ad614SDennis Dalessandro dd->do_drop = 1; 868f48ad614SDennis Dalessandro } else { 869f48ad614SDennis Dalessandro atomic_set(&dd->drop_packet, DROP_PACKET_OFF); 870f48ad614SDennis Dalessandro dd->do_drop = 0; 871f48ad614SDennis Dalessandro } 872f48ad614SDennis Dalessandro 873f48ad614SDennis Dalessandro /* make sure the link is not "up" */ 874f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 875f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 876f48ad614SDennis Dalessandro ppd->linkup = 0; 877f48ad614SDennis Dalessandro } 878f48ad614SDennis Dalessandro 879f48ad614SDennis Dalessandro if (reinit) 880f48ad614SDennis Dalessandro ret = init_after_reset(dd); 881f48ad614SDennis Dalessandro else 882f48ad614SDennis Dalessandro ret = loadtime_init(dd); 883f48ad614SDennis Dalessandro if (ret) 884f48ad614SDennis Dalessandro goto done; 885f48ad614SDennis Dalessandro 886f48ad614SDennis Dalessandro /* allocate dummy tail memory for all receive contexts */ 887f48ad614SDennis Dalessandro dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( 888f48ad614SDennis Dalessandro &dd->pcidev->dev, sizeof(u64), 88960368186STymoteusz Kielan &dd->rcvhdrtail_dummy_dma, 890f48ad614SDennis Dalessandro GFP_KERNEL); 891f48ad614SDennis Dalessandro 892f48ad614SDennis Dalessandro if (!dd->rcvhdrtail_dummy_kvaddr) { 893f48ad614SDennis Dalessandro dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 894f48ad614SDennis Dalessandro ret = -ENOMEM; 895f48ad614SDennis Dalessandro goto done; 896f48ad614SDennis Dalessandro } 897f48ad614SDennis Dalessandro 898f48ad614SDennis Dalessandro /* dd->rcd can be NULL if early initialization failed */ 8992280740fSVishwanathapura, Niranjana for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { 900f48ad614SDennis Dalessandro /* 901f48ad614SDennis Dalessandro * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 902f48ad614SDennis Dalessandro * re-init, the simplest way to handle this is to free 903f48ad614SDennis Dalessandro * existing, and re-allocate. 904f48ad614SDennis Dalessandro * Need to re-create rest of ctxt 0 ctxtdata as well. 905f48ad614SDennis Dalessandro */ 906d295dbebSMichael J. Ruhl rcd = hfi1_rcd_get_by_index(dd, i); 907f48ad614SDennis Dalessandro if (!rcd) 908f48ad614SDennis Dalessandro continue; 909f48ad614SDennis Dalessandro 910f48ad614SDennis Dalessandro rcd->do_interrupt = &handle_receive_interrupt; 911f48ad614SDennis Dalessandro 912f48ad614SDennis Dalessandro lastfail = hfi1_create_rcvhdrq(dd, rcd); 913f48ad614SDennis Dalessandro if (!lastfail) 914f48ad614SDennis Dalessandro lastfail = hfi1_setup_eagerbufs(rcd); 915f48ad614SDennis Dalessandro if (lastfail) { 916f48ad614SDennis Dalessandro dd_dev_err(dd, 917f48ad614SDennis Dalessandro "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 918f48ad614SDennis Dalessandro ret = lastfail; 919f48ad614SDennis Dalessandro } 920d295dbebSMichael J. Ruhl hfi1_rcd_put(rcd); 921f48ad614SDennis Dalessandro } 922f48ad614SDennis Dalessandro 923f48ad614SDennis Dalessandro /* Allocate enough memory for user event notification. */ 92406e81e3eSMike Marciniszyn len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS * 925f48ad614SDennis Dalessandro sizeof(*dd->events)); 926f48ad614SDennis Dalessandro dd->events = vmalloc_user(len); 927f48ad614SDennis Dalessandro if (!dd->events) 928f48ad614SDennis Dalessandro dd_dev_err(dd, "Failed to allocate user events page\n"); 929f48ad614SDennis Dalessandro /* 930f48ad614SDennis Dalessandro * Allocate a page for device and port status. 931f48ad614SDennis Dalessandro * Page will be shared amongst all user processes. 932f48ad614SDennis Dalessandro */ 933f48ad614SDennis Dalessandro dd->status = vmalloc_user(PAGE_SIZE); 934f48ad614SDennis Dalessandro if (!dd->status) 935f48ad614SDennis Dalessandro dd_dev_err(dd, "Failed to allocate dev status page\n"); 936f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 937f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 938f48ad614SDennis Dalessandro if (dd->status) 939f48ad614SDennis Dalessandro /* Currently, we only have one port */ 940f48ad614SDennis Dalessandro ppd->statusp = &dd->status->port; 941f48ad614SDennis Dalessandro 942f48ad614SDennis Dalessandro set_mtu(ppd); 943f48ad614SDennis Dalessandro } 944f48ad614SDennis Dalessandro 945f48ad614SDennis Dalessandro /* enable chip even if we have an error, so we can debug cause */ 946f48ad614SDennis Dalessandro enable_chip(dd); 947f48ad614SDennis Dalessandro 948f48ad614SDennis Dalessandro done: 949f48ad614SDennis Dalessandro /* 950f48ad614SDennis Dalessandro * Set status even if port serdes is not initialized 951f48ad614SDennis Dalessandro * so that diags will work. 952f48ad614SDennis Dalessandro */ 953f48ad614SDennis Dalessandro if (dd->status) 954f48ad614SDennis Dalessandro dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | 955f48ad614SDennis Dalessandro HFI1_STATUS_INITTED; 956f48ad614SDennis Dalessandro if (!ret) { 957f48ad614SDennis Dalessandro /* enable all interrupts from the chip */ 958f48ad614SDennis Dalessandro set_intr_state(dd, 1); 959f48ad614SDennis Dalessandro 960f48ad614SDennis Dalessandro /* chip is OK for user apps; mark it as initialized */ 961f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 962f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 963f48ad614SDennis Dalessandro 964f48ad614SDennis Dalessandro /* 965f48ad614SDennis Dalessandro * start the serdes - must be after interrupts are 966f48ad614SDennis Dalessandro * enabled so we are notified when the link goes up 967f48ad614SDennis Dalessandro */ 968f48ad614SDennis Dalessandro lastfail = bringup_serdes(ppd); 969f48ad614SDennis Dalessandro if (lastfail) 970f48ad614SDennis Dalessandro dd_dev_info(dd, 971f48ad614SDennis Dalessandro "Failed to bring up port %u\n", 972f48ad614SDennis Dalessandro ppd->port); 973f48ad614SDennis Dalessandro 974f48ad614SDennis Dalessandro /* 975f48ad614SDennis Dalessandro * Set status even if port serdes is not initialized 976f48ad614SDennis Dalessandro * so that diags will work. 977f48ad614SDennis Dalessandro */ 978f48ad614SDennis Dalessandro if (ppd->statusp) 979f48ad614SDennis Dalessandro *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | 980f48ad614SDennis Dalessandro HFI1_STATUS_INITTED; 981f48ad614SDennis Dalessandro if (!ppd->link_speed_enabled) 982f48ad614SDennis Dalessandro continue; 983f48ad614SDennis Dalessandro } 984f48ad614SDennis Dalessandro } 985f48ad614SDennis Dalessandro 986f48ad614SDennis Dalessandro /* if ret is non-zero, we probably should do some cleanup here... */ 987f48ad614SDennis Dalessandro return ret; 988f48ad614SDennis Dalessandro } 989f48ad614SDennis Dalessandro 990f48ad614SDennis Dalessandro static inline struct hfi1_devdata *__hfi1_lookup(int unit) 991f48ad614SDennis Dalessandro { 992f48ad614SDennis Dalessandro return idr_find(&hfi1_unit_table, unit); 993f48ad614SDennis Dalessandro } 994f48ad614SDennis Dalessandro 995f48ad614SDennis Dalessandro struct hfi1_devdata *hfi1_lookup(int unit) 996f48ad614SDennis Dalessandro { 997f48ad614SDennis Dalessandro struct hfi1_devdata *dd; 998f48ad614SDennis Dalessandro unsigned long flags; 999f48ad614SDennis Dalessandro 1000f48ad614SDennis Dalessandro spin_lock_irqsave(&hfi1_devs_lock, flags); 1001f48ad614SDennis Dalessandro dd = __hfi1_lookup(unit); 1002f48ad614SDennis Dalessandro spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1003f48ad614SDennis Dalessandro 1004f48ad614SDennis Dalessandro return dd; 1005f48ad614SDennis Dalessandro } 1006f48ad614SDennis Dalessandro 1007f48ad614SDennis Dalessandro /* 1008f48ad614SDennis Dalessandro * Stop the timers during unit shutdown, or after an error late 1009f48ad614SDennis Dalessandro * in initialization. 1010f48ad614SDennis Dalessandro */ 1011f48ad614SDennis Dalessandro static void stop_timers(struct hfi1_devdata *dd) 1012f48ad614SDennis Dalessandro { 1013f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 1014f48ad614SDennis Dalessandro int pidx; 1015f48ad614SDennis Dalessandro 1016f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1017f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 10188064135eSKees Cook if (ppd->led_override_timer.function) { 1019f48ad614SDennis Dalessandro del_timer_sync(&ppd->led_override_timer); 1020f48ad614SDennis Dalessandro atomic_set(&ppd->led_override_timer_active, 0); 1021f48ad614SDennis Dalessandro } 1022f48ad614SDennis Dalessandro } 1023f48ad614SDennis Dalessandro } 1024f48ad614SDennis Dalessandro 1025f48ad614SDennis Dalessandro /** 1026f48ad614SDennis Dalessandro * shutdown_device - shut down a device 1027f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 1028f48ad614SDennis Dalessandro * 1029f48ad614SDennis Dalessandro * This is called to make the device quiet when we are about to 1030f48ad614SDennis Dalessandro * unload the driver, and also when the device is administratively 1031f48ad614SDennis Dalessandro * disabled. It does not free any data structures. 1032f48ad614SDennis Dalessandro * Everything it does has to be setup again by hfi1_init(dd, 1) 1033f48ad614SDennis Dalessandro */ 1034f48ad614SDennis Dalessandro static void shutdown_device(struct hfi1_devdata *dd) 1035f48ad614SDennis Dalessandro { 1036f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 1037d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *rcd; 1038f48ad614SDennis Dalessandro unsigned pidx; 1039f48ad614SDennis Dalessandro int i; 1040f48ad614SDennis Dalessandro 10418d3e7113SAlex Estrin if (dd->flags & HFI1_SHUTDOWN) 10428d3e7113SAlex Estrin return; 10438d3e7113SAlex Estrin dd->flags |= HFI1_SHUTDOWN; 10448d3e7113SAlex Estrin 1045f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1046f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1047f48ad614SDennis Dalessandro 1048f48ad614SDennis Dalessandro ppd->linkup = 0; 1049f48ad614SDennis Dalessandro if (ppd->statusp) 1050f48ad614SDennis Dalessandro *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 1051f48ad614SDennis Dalessandro HFI1_STATUS_IB_READY); 1052f48ad614SDennis Dalessandro } 1053f48ad614SDennis Dalessandro dd->flags &= ~HFI1_INITTED; 1054f48ad614SDennis Dalessandro 105582a97926SMichael J. Ruhl /* mask and clean up interrupts, but not errors */ 1056f48ad614SDennis Dalessandro set_intr_state(dd, 0); 105782a97926SMichael J. Ruhl hfi1_clean_up_interrupts(dd); 1058f48ad614SDennis Dalessandro 1059f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1060f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1061d295dbebSMichael J. Ruhl for (i = 0; i < dd->num_rcv_contexts; i++) { 1062d295dbebSMichael J. Ruhl rcd = hfi1_rcd_get_by_index(dd, i); 1063f48ad614SDennis Dalessandro hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | 1064f48ad614SDennis Dalessandro HFI1_RCVCTRL_CTXT_DIS | 1065f48ad614SDennis Dalessandro HFI1_RCVCTRL_INTRAVAIL_DIS | 1066f48ad614SDennis Dalessandro HFI1_RCVCTRL_PKEY_DIS | 1067d295dbebSMichael J. Ruhl HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd); 1068d295dbebSMichael J. Ruhl hfi1_rcd_put(rcd); 1069d295dbebSMichael J. Ruhl } 1070f48ad614SDennis Dalessandro /* 1071f48ad614SDennis Dalessandro * Gracefully stop all sends allowing any in progress to 1072f48ad614SDennis Dalessandro * trickle out first. 1073f48ad614SDennis Dalessandro */ 1074f48ad614SDennis Dalessandro for (i = 0; i < dd->num_send_contexts; i++) 1075f48ad614SDennis Dalessandro sc_flush(dd->send_contexts[i].sc); 1076f48ad614SDennis Dalessandro } 1077f48ad614SDennis Dalessandro 1078f48ad614SDennis Dalessandro /* 1079f48ad614SDennis Dalessandro * Enough for anything that's going to trickle out to have actually 1080f48ad614SDennis Dalessandro * done so. 1081f48ad614SDennis Dalessandro */ 1082f48ad614SDennis Dalessandro udelay(20); 1083f48ad614SDennis Dalessandro 1084f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1085f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1086f48ad614SDennis Dalessandro 1087f48ad614SDennis Dalessandro /* disable all contexts */ 1088f48ad614SDennis Dalessandro for (i = 0; i < dd->num_send_contexts; i++) 1089f48ad614SDennis Dalessandro sc_disable(dd->send_contexts[i].sc); 1090f48ad614SDennis Dalessandro /* disable the send device */ 1091f48ad614SDennis Dalessandro pio_send_control(dd, PSC_GLOBAL_DISABLE); 1092f48ad614SDennis Dalessandro 1093f48ad614SDennis Dalessandro shutdown_led_override(ppd); 1094f48ad614SDennis Dalessandro 1095f48ad614SDennis Dalessandro /* 1096f48ad614SDennis Dalessandro * Clear SerdesEnable. 1097f48ad614SDennis Dalessandro * We can't count on interrupts since we are stopping. 1098f48ad614SDennis Dalessandro */ 1099f48ad614SDennis Dalessandro hfi1_quiet_serdes(ppd); 1100f48ad614SDennis Dalessandro 1101f48ad614SDennis Dalessandro if (ppd->hfi1_wq) { 1102f48ad614SDennis Dalessandro destroy_workqueue(ppd->hfi1_wq); 1103f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 1104f48ad614SDennis Dalessandro } 110571d47008SSebastian Sanchez if (ppd->link_wq) { 110671d47008SSebastian Sanchez destroy_workqueue(ppd->link_wq); 110771d47008SSebastian Sanchez ppd->link_wq = NULL; 110871d47008SSebastian Sanchez } 1109f48ad614SDennis Dalessandro } 1110f48ad614SDennis Dalessandro sdma_exit(dd); 1111f48ad614SDennis Dalessandro } 1112f48ad614SDennis Dalessandro 1113f48ad614SDennis Dalessandro /** 1114f48ad614SDennis Dalessandro * hfi1_free_ctxtdata - free a context's allocated data 1115f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 1116f48ad614SDennis Dalessandro * @rcd: the ctxtdata structure 1117f48ad614SDennis Dalessandro * 1118f48ad614SDennis Dalessandro * free up any allocated data for a context 1119f48ad614SDennis Dalessandro * It should never change any chip state, or global driver state. 1120f48ad614SDennis Dalessandro */ 1121f48ad614SDennis Dalessandro void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1122f48ad614SDennis Dalessandro { 1123f683c80cSMichael J. Ruhl u32 e; 1124f48ad614SDennis Dalessandro 1125f48ad614SDennis Dalessandro if (!rcd) 1126f48ad614SDennis Dalessandro return; 1127f48ad614SDennis Dalessandro 1128f48ad614SDennis Dalessandro if (rcd->rcvhdrq) { 1129b2578431SMike Marciniszyn dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), 113060368186STymoteusz Kielan rcd->rcvhdrq, rcd->rcvhdrq_dma); 1131f48ad614SDennis Dalessandro rcd->rcvhdrq = NULL; 1132f48ad614SDennis Dalessandro if (rcd->rcvhdrtail_kvaddr) { 1133f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1134f48ad614SDennis Dalessandro (void *)rcd->rcvhdrtail_kvaddr, 113560368186STymoteusz Kielan rcd->rcvhdrqtailaddr_dma); 1136f48ad614SDennis Dalessandro rcd->rcvhdrtail_kvaddr = NULL; 1137f48ad614SDennis Dalessandro } 1138f48ad614SDennis Dalessandro } 1139f48ad614SDennis Dalessandro 1140f48ad614SDennis Dalessandro /* all the RcvArray entries should have been cleared by now */ 1141f48ad614SDennis Dalessandro kfree(rcd->egrbufs.rcvtids); 1142f683c80cSMichael J. Ruhl rcd->egrbufs.rcvtids = NULL; 1143f48ad614SDennis Dalessandro 1144f48ad614SDennis Dalessandro for (e = 0; e < rcd->egrbufs.alloced; e++) { 114560368186STymoteusz Kielan if (rcd->egrbufs.buffers[e].dma) 1146f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, 1147f48ad614SDennis Dalessandro rcd->egrbufs.buffers[e].len, 1148f48ad614SDennis Dalessandro rcd->egrbufs.buffers[e].addr, 114960368186STymoteusz Kielan rcd->egrbufs.buffers[e].dma); 1150f48ad614SDennis Dalessandro } 1151f48ad614SDennis Dalessandro kfree(rcd->egrbufs.buffers); 1152f683c80cSMichael J. Ruhl rcd->egrbufs.alloced = 0; 1153f683c80cSMichael J. Ruhl rcd->egrbufs.buffers = NULL; 1154f48ad614SDennis Dalessandro 1155f48ad614SDennis Dalessandro sc_free(rcd->sc); 1156f683c80cSMichael J. Ruhl rcd->sc = NULL; 1157f683c80cSMichael J. Ruhl 1158f48ad614SDennis Dalessandro vfree(rcd->subctxt_uregbase); 1159f48ad614SDennis Dalessandro vfree(rcd->subctxt_rcvegrbuf); 1160f48ad614SDennis Dalessandro vfree(rcd->subctxt_rcvhdr_base); 1161f48ad614SDennis Dalessandro kfree(rcd->opstats); 1162f683c80cSMichael J. Ruhl 1163f683c80cSMichael J. Ruhl rcd->subctxt_uregbase = NULL; 1164f683c80cSMichael J. Ruhl rcd->subctxt_rcvegrbuf = NULL; 1165f683c80cSMichael J. Ruhl rcd->subctxt_rcvhdr_base = NULL; 1166f683c80cSMichael J. Ruhl rcd->opstats = NULL; 1167f48ad614SDennis Dalessandro } 1168f48ad614SDennis Dalessandro 1169f48ad614SDennis Dalessandro /* 1170f48ad614SDennis Dalessandro * Release our hold on the shared asic data. If we are the last one, 1171dba715f0SDean Luick * return the structure to be finalized outside the lock. Must be 1172dba715f0SDean Luick * holding hfi1_devs_lock. 1173f48ad614SDennis Dalessandro */ 1174dba715f0SDean Luick static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) 1175f48ad614SDennis Dalessandro { 1176dba715f0SDean Luick struct hfi1_asic_data *ad; 1177f48ad614SDennis Dalessandro int other; 1178f48ad614SDennis Dalessandro 1179f48ad614SDennis Dalessandro if (!dd->asic_data) 1180dba715f0SDean Luick return NULL; 1181f48ad614SDennis Dalessandro dd->asic_data->dds[dd->hfi1_id] = NULL; 1182f48ad614SDennis Dalessandro other = dd->hfi1_id ? 0 : 1; 1183dba715f0SDean Luick ad = dd->asic_data; 1184f48ad614SDennis Dalessandro dd->asic_data = NULL; 1185dba715f0SDean Luick /* return NULL if the other dd still has a link */ 1186dba715f0SDean Luick return ad->dds[other] ? NULL : ad; 1187dba715f0SDean Luick } 1188dba715f0SDean Luick 1189dba715f0SDean Luick static void finalize_asic_data(struct hfi1_devdata *dd, 1190dba715f0SDean Luick struct hfi1_asic_data *ad) 1191dba715f0SDean Luick { 1192dba715f0SDean Luick clean_up_i2c(dd, ad); 1193dba715f0SDean Luick kfree(ad); 1194f48ad614SDennis Dalessandro } 1195f48ad614SDennis Dalessandro 1196e9777ad4SSebastian Sanchez /** 1197e9777ad4SSebastian Sanchez * hfi1_clean_devdata - cleans up per-unit data structure 1198e9777ad4SSebastian Sanchez * @dd: pointer to a valid devdata structure 1199e9777ad4SSebastian Sanchez * 1200e9777ad4SSebastian Sanchez * It cleans up all data structures set up by 1201e9777ad4SSebastian Sanchez * by hfi1_alloc_devdata(). 1202e9777ad4SSebastian Sanchez */ 1203e9777ad4SSebastian Sanchez static void hfi1_clean_devdata(struct hfi1_devdata *dd) 1204f48ad614SDennis Dalessandro { 1205dba715f0SDean Luick struct hfi1_asic_data *ad; 1206f48ad614SDennis Dalessandro unsigned long flags; 1207f48ad614SDennis Dalessandro 1208f48ad614SDennis Dalessandro spin_lock_irqsave(&hfi1_devs_lock, flags); 1209e9777ad4SSebastian Sanchez if (!list_empty(&dd->list)) { 1210f48ad614SDennis Dalessandro idr_remove(&hfi1_unit_table, dd->unit); 1211e9777ad4SSebastian Sanchez list_del_init(&dd->list); 1212e9777ad4SSebastian Sanchez } 1213dba715f0SDean Luick ad = release_asic_data(dd); 1214f48ad614SDennis Dalessandro spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1215e9777ad4SSebastian Sanchez 1216dba715f0SDean Luick finalize_asic_data(dd, ad); 1217f48ad614SDennis Dalessandro free_platform_config(dd); 1218f48ad614SDennis Dalessandro rcu_barrier(); /* wait for rcu callbacks to complete */ 1219f48ad614SDennis Dalessandro free_percpu(dd->int_counter); 1220f48ad614SDennis Dalessandro free_percpu(dd->rcv_limit); 1221f48ad614SDennis Dalessandro free_percpu(dd->send_schedule); 12221b311f89SMike Marciniszyn free_percpu(dd->tx_opstats); 1223e9777ad4SSebastian Sanchez dd->int_counter = NULL; 1224e9777ad4SSebastian Sanchez dd->rcv_limit = NULL; 1225e9777ad4SSebastian Sanchez dd->send_schedule = NULL; 1226e9777ad4SSebastian Sanchez dd->tx_opstats = NULL; 12275d18ee67SSebastian Sanchez kfree(dd->comp_vect); 12285d18ee67SSebastian Sanchez dd->comp_vect = NULL; 1229473291b3SAlex Estrin sdma_clean(dd, dd->num_sdma); 1230f48ad614SDennis Dalessandro rvt_dealloc_device(&dd->verbs_dev.rdi); 1231f48ad614SDennis Dalessandro } 1232f48ad614SDennis Dalessandro 1233e9777ad4SSebastian Sanchez static void __hfi1_free_devdata(struct kobject *kobj) 1234e9777ad4SSebastian Sanchez { 1235e9777ad4SSebastian Sanchez struct hfi1_devdata *dd = 1236e9777ad4SSebastian Sanchez container_of(kobj, struct hfi1_devdata, kobj); 1237e9777ad4SSebastian Sanchez 1238e9777ad4SSebastian Sanchez hfi1_clean_devdata(dd); 1239e9777ad4SSebastian Sanchez } 1240e9777ad4SSebastian Sanchez 1241f48ad614SDennis Dalessandro static struct kobj_type hfi1_devdata_type = { 1242f48ad614SDennis Dalessandro .release = __hfi1_free_devdata, 1243f48ad614SDennis Dalessandro }; 1244f48ad614SDennis Dalessandro 1245f48ad614SDennis Dalessandro void hfi1_free_devdata(struct hfi1_devdata *dd) 1246f48ad614SDennis Dalessandro { 1247f48ad614SDennis Dalessandro kobject_put(&dd->kobj); 1248f48ad614SDennis Dalessandro } 1249f48ad614SDennis Dalessandro 1250f48ad614SDennis Dalessandro /* 1251f48ad614SDennis Dalessandro * Allocate our primary per-unit data structure. Must be done via verbs 1252f48ad614SDennis Dalessandro * allocator, because the verbs cleanup process both does cleanup and 1253f48ad614SDennis Dalessandro * free of the data structure. 1254f48ad614SDennis Dalessandro * "extra" is for chip-specific data. 1255f48ad614SDennis Dalessandro * 1256f48ad614SDennis Dalessandro * Use the idr mechanism to get a unit number for this unit. 1257f48ad614SDennis Dalessandro */ 1258f48ad614SDennis Dalessandro struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) 1259f48ad614SDennis Dalessandro { 1260f48ad614SDennis Dalessandro unsigned long flags; 1261f48ad614SDennis Dalessandro struct hfi1_devdata *dd; 1262f48ad614SDennis Dalessandro int ret, nports; 1263f48ad614SDennis Dalessandro 1264f48ad614SDennis Dalessandro /* extra is * number of ports */ 1265f48ad614SDennis Dalessandro nports = extra / sizeof(struct hfi1_pportdata); 1266f48ad614SDennis Dalessandro 1267f48ad614SDennis Dalessandro dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, 1268f48ad614SDennis Dalessandro nports); 1269f48ad614SDennis Dalessandro if (!dd) 1270f48ad614SDennis Dalessandro return ERR_PTR(-ENOMEM); 1271f48ad614SDennis Dalessandro dd->num_pports = nports; 1272f48ad614SDennis Dalessandro dd->pport = (struct hfi1_pportdata *)(dd + 1); 127345d92457SSebastian Sanchez dd->pcidev = pdev; 127445d92457SSebastian Sanchez pci_set_drvdata(pdev, dd); 1275f48ad614SDennis Dalessandro 1276f48ad614SDennis Dalessandro INIT_LIST_HEAD(&dd->list); 1277f48ad614SDennis Dalessandro idr_preload(GFP_KERNEL); 1278f48ad614SDennis Dalessandro spin_lock_irqsave(&hfi1_devs_lock, flags); 1279f48ad614SDennis Dalessandro 1280f48ad614SDennis Dalessandro ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); 1281f48ad614SDennis Dalessandro if (ret >= 0) { 1282f48ad614SDennis Dalessandro dd->unit = ret; 1283f48ad614SDennis Dalessandro list_add(&dd->list, &hfi1_dev_list); 1284f48ad614SDennis Dalessandro } 12855d18ee67SSebastian Sanchez dd->node = -1; 1286f48ad614SDennis Dalessandro 1287f48ad614SDennis Dalessandro spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1288f48ad614SDennis Dalessandro idr_preload_end(); 1289f48ad614SDennis Dalessandro 1290f48ad614SDennis Dalessandro if (ret < 0) { 1291f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, 1292f48ad614SDennis Dalessandro "Could not allocate unit ID: error %d\n", -ret); 1293f48ad614SDennis Dalessandro goto bail; 1294f48ad614SDennis Dalessandro } 12955084c8ffSMichael J. Ruhl rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); 12965084c8ffSMichael J. Ruhl 1297f48ad614SDennis Dalessandro /* 1298f48ad614SDennis Dalessandro * Initialize all locks for the device. This needs to be as early as 1299f48ad614SDennis Dalessandro * possible so locks are usable. 1300f48ad614SDennis Dalessandro */ 1301f48ad614SDennis Dalessandro spin_lock_init(&dd->sc_lock); 1302f48ad614SDennis Dalessandro spin_lock_init(&dd->sendctrl_lock); 1303f48ad614SDennis Dalessandro spin_lock_init(&dd->rcvctrl_lock); 1304f48ad614SDennis Dalessandro spin_lock_init(&dd->uctxt_lock); 1305f48ad614SDennis Dalessandro spin_lock_init(&dd->hfi1_diag_trans_lock); 1306f48ad614SDennis Dalessandro spin_lock_init(&dd->sc_init_lock); 1307f48ad614SDennis Dalessandro spin_lock_init(&dd->dc8051_memlock); 1308f48ad614SDennis Dalessandro seqlock_init(&dd->sc2vl_lock); 1309f48ad614SDennis Dalessandro spin_lock_init(&dd->sde_map_lock); 1310f48ad614SDennis Dalessandro spin_lock_init(&dd->pio_map_lock); 131122546b74STadeusz Struk mutex_init(&dd->dc8051_lock); 1312f48ad614SDennis Dalessandro init_waitqueue_head(&dd->event_queue); 1313f48ad614SDennis Dalessandro 1314f48ad614SDennis Dalessandro dd->int_counter = alloc_percpu(u64); 1315f48ad614SDennis Dalessandro if (!dd->int_counter) { 1316f48ad614SDennis Dalessandro ret = -ENOMEM; 1317f48ad614SDennis Dalessandro goto bail; 1318f48ad614SDennis Dalessandro } 1319f48ad614SDennis Dalessandro 1320f48ad614SDennis Dalessandro dd->rcv_limit = alloc_percpu(u64); 1321f48ad614SDennis Dalessandro if (!dd->rcv_limit) { 1322f48ad614SDennis Dalessandro ret = -ENOMEM; 1323f48ad614SDennis Dalessandro goto bail; 1324f48ad614SDennis Dalessandro } 1325f48ad614SDennis Dalessandro 1326f48ad614SDennis Dalessandro dd->send_schedule = alloc_percpu(u64); 1327f48ad614SDennis Dalessandro if (!dd->send_schedule) { 1328f48ad614SDennis Dalessandro ret = -ENOMEM; 1329f48ad614SDennis Dalessandro goto bail; 1330f48ad614SDennis Dalessandro } 1331f48ad614SDennis Dalessandro 13321b311f89SMike Marciniszyn dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); 13331b311f89SMike Marciniszyn if (!dd->tx_opstats) { 13341b311f89SMike Marciniszyn ret = -ENOMEM; 13351b311f89SMike Marciniszyn goto bail; 13361b311f89SMike Marciniszyn } 13371b311f89SMike Marciniszyn 13385d18ee67SSebastian Sanchez dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL); 13395d18ee67SSebastian Sanchez if (!dd->comp_vect) { 13405d18ee67SSebastian Sanchez ret = -ENOMEM; 13415d18ee67SSebastian Sanchez goto bail; 13425d18ee67SSebastian Sanchez } 13435d18ee67SSebastian Sanchez 1344f48ad614SDennis Dalessandro kobject_init(&dd->kobj, &hfi1_devdata_type); 1345f48ad614SDennis Dalessandro return dd; 1346f48ad614SDennis Dalessandro 1347f48ad614SDennis Dalessandro bail: 1348e9777ad4SSebastian Sanchez hfi1_clean_devdata(dd); 1349f48ad614SDennis Dalessandro return ERR_PTR(ret); 1350f48ad614SDennis Dalessandro } 1351f48ad614SDennis Dalessandro 1352f48ad614SDennis Dalessandro /* 1353f48ad614SDennis Dalessandro * Called from freeze mode handlers, and from PCI error 1354f48ad614SDennis Dalessandro * reporting code. Should be paranoid about state of 1355f48ad614SDennis Dalessandro * system and data structures. 1356f48ad614SDennis Dalessandro */ 1357f48ad614SDennis Dalessandro void hfi1_disable_after_error(struct hfi1_devdata *dd) 1358f48ad614SDennis Dalessandro { 1359f48ad614SDennis Dalessandro if (dd->flags & HFI1_INITTED) { 1360f48ad614SDennis Dalessandro u32 pidx; 1361f48ad614SDennis Dalessandro 1362f48ad614SDennis Dalessandro dd->flags &= ~HFI1_INITTED; 1363f48ad614SDennis Dalessandro if (dd->pport) 1364f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1365f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 1366f48ad614SDennis Dalessandro 1367f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1368f48ad614SDennis Dalessandro if (dd->flags & HFI1_PRESENT) 1369f48ad614SDennis Dalessandro set_link_state(ppd, HLS_DN_DISABLE); 1370f48ad614SDennis Dalessandro 1371f48ad614SDennis Dalessandro if (ppd->statusp) 1372f48ad614SDennis Dalessandro *ppd->statusp &= ~HFI1_STATUS_IB_READY; 1373f48ad614SDennis Dalessandro } 1374f48ad614SDennis Dalessandro } 1375f48ad614SDennis Dalessandro 1376f48ad614SDennis Dalessandro /* 1377f48ad614SDennis Dalessandro * Mark as having had an error for driver, and also 1378f48ad614SDennis Dalessandro * for /sys and status word mapped to user programs. 1379f48ad614SDennis Dalessandro * This marks unit as not usable, until reset. 1380f48ad614SDennis Dalessandro */ 1381f48ad614SDennis Dalessandro if (dd->status) 1382f48ad614SDennis Dalessandro dd->status->dev |= HFI1_STATUS_HWERROR; 1383f48ad614SDennis Dalessandro } 1384f48ad614SDennis Dalessandro 1385f48ad614SDennis Dalessandro static void remove_one(struct pci_dev *); 1386f48ad614SDennis Dalessandro static int init_one(struct pci_dev *, const struct pci_device_id *); 13878d3e7113SAlex Estrin static void shutdown_one(struct pci_dev *); 1388f48ad614SDennis Dalessandro 1389f48ad614SDennis Dalessandro #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " 1390f48ad614SDennis Dalessandro #define PFX DRIVER_NAME ": " 1391f48ad614SDennis Dalessandro 1392d6373019SSebastian Sanchez const struct pci_device_id hfi1_pci_tbl[] = { 1393f48ad614SDennis Dalessandro { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, 1394f48ad614SDennis Dalessandro { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, 1395f48ad614SDennis Dalessandro { 0, } 1396f48ad614SDennis Dalessandro }; 1397f48ad614SDennis Dalessandro 1398f48ad614SDennis Dalessandro MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); 1399f48ad614SDennis Dalessandro 1400f48ad614SDennis Dalessandro static struct pci_driver hfi1_pci_driver = { 1401f48ad614SDennis Dalessandro .name = DRIVER_NAME, 1402f48ad614SDennis Dalessandro .probe = init_one, 1403f48ad614SDennis Dalessandro .remove = remove_one, 14048d3e7113SAlex Estrin .shutdown = shutdown_one, 1405f48ad614SDennis Dalessandro .id_table = hfi1_pci_tbl, 1406f48ad614SDennis Dalessandro .err_handler = &hfi1_pci_err_handler, 1407f48ad614SDennis Dalessandro }; 1408f48ad614SDennis Dalessandro 1409f48ad614SDennis Dalessandro static void __init compute_krcvqs(void) 1410f48ad614SDennis Dalessandro { 1411f48ad614SDennis Dalessandro int i; 1412f48ad614SDennis Dalessandro 1413f48ad614SDennis Dalessandro for (i = 0; i < krcvqsset; i++) 1414f48ad614SDennis Dalessandro n_krcvqs += krcvqs[i]; 1415f48ad614SDennis Dalessandro } 1416f48ad614SDennis Dalessandro 1417f48ad614SDennis Dalessandro /* 1418f48ad614SDennis Dalessandro * Do all the generic driver unit- and chip-independent memory 1419f48ad614SDennis Dalessandro * allocation and initialization. 1420f48ad614SDennis Dalessandro */ 1421f48ad614SDennis Dalessandro static int __init hfi1_mod_init(void) 1422f48ad614SDennis Dalessandro { 1423f48ad614SDennis Dalessandro int ret; 1424f48ad614SDennis Dalessandro 1425f48ad614SDennis Dalessandro ret = dev_init(); 1426f48ad614SDennis Dalessandro if (ret) 1427f48ad614SDennis Dalessandro goto bail; 1428f48ad614SDennis Dalessandro 1429d6373019SSebastian Sanchez ret = node_affinity_init(); 1430d6373019SSebastian Sanchez if (ret) 1431d6373019SSebastian Sanchez goto bail; 14324197344bSDennis Dalessandro 1433f48ad614SDennis Dalessandro /* validate max MTU before any devices start */ 1434f48ad614SDennis Dalessandro if (!valid_opa_max_mtu(hfi1_max_mtu)) { 1435f48ad614SDennis Dalessandro pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", 1436f48ad614SDennis Dalessandro hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); 1437f48ad614SDennis Dalessandro hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 1438f48ad614SDennis Dalessandro } 1439f48ad614SDennis Dalessandro /* valid CUs run from 1-128 in powers of 2 */ 1440f48ad614SDennis Dalessandro if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) 1441f48ad614SDennis Dalessandro hfi1_cu = 1; 1442f48ad614SDennis Dalessandro /* valid credit return threshold is 0-100, variable is unsigned */ 1443f48ad614SDennis Dalessandro if (user_credit_return_threshold > 100) 1444f48ad614SDennis Dalessandro user_credit_return_threshold = 100; 1445f48ad614SDennis Dalessandro 1446f48ad614SDennis Dalessandro compute_krcvqs(); 1447f48ad614SDennis Dalessandro /* 1448f48ad614SDennis Dalessandro * sanitize receive interrupt count, time must wait until after 1449f48ad614SDennis Dalessandro * the hardware type is known 1450f48ad614SDennis Dalessandro */ 1451f48ad614SDennis Dalessandro if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) 1452f48ad614SDennis Dalessandro rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; 1453f48ad614SDennis Dalessandro /* reject invalid combinations */ 1454f48ad614SDennis Dalessandro if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { 1455f48ad614SDennis Dalessandro pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); 1456f48ad614SDennis Dalessandro rcv_intr_count = 1; 1457f48ad614SDennis Dalessandro } 1458f48ad614SDennis Dalessandro if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { 1459f48ad614SDennis Dalessandro /* 1460f48ad614SDennis Dalessandro * Avoid indefinite packet delivery by requiring a timeout 1461f48ad614SDennis Dalessandro * if count is > 1. 1462f48ad614SDennis Dalessandro */ 1463f48ad614SDennis Dalessandro pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); 1464f48ad614SDennis Dalessandro rcv_intr_timeout = 1; 1465f48ad614SDennis Dalessandro } 1466f48ad614SDennis Dalessandro if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { 1467f48ad614SDennis Dalessandro /* 1468f48ad614SDennis Dalessandro * The dynamic algorithm expects a non-zero timeout 1469f48ad614SDennis Dalessandro * and a count > 1. 1470f48ad614SDennis Dalessandro */ 1471f48ad614SDennis Dalessandro pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); 1472f48ad614SDennis Dalessandro rcv_intr_dynamic = 0; 1473f48ad614SDennis Dalessandro } 1474f48ad614SDennis Dalessandro 1475f48ad614SDennis Dalessandro /* sanitize link CRC options */ 1476f48ad614SDennis Dalessandro link_crc_mask &= SUPPORTED_CRCS; 1477f48ad614SDennis Dalessandro 1478f48ad614SDennis Dalessandro /* 1479f48ad614SDennis Dalessandro * These must be called before the driver is registered with 1480f48ad614SDennis Dalessandro * the PCI subsystem. 1481f48ad614SDennis Dalessandro */ 1482f48ad614SDennis Dalessandro idr_init(&hfi1_unit_table); 1483f48ad614SDennis Dalessandro 1484f48ad614SDennis Dalessandro hfi1_dbg_init(); 1485f48ad614SDennis Dalessandro ret = hfi1_wss_init(); 1486f48ad614SDennis Dalessandro if (ret < 0) 1487f48ad614SDennis Dalessandro goto bail_wss; 1488f48ad614SDennis Dalessandro ret = pci_register_driver(&hfi1_pci_driver); 1489f48ad614SDennis Dalessandro if (ret < 0) { 1490f48ad614SDennis Dalessandro pr_err("Unable to register driver: error %d\n", -ret); 1491f48ad614SDennis Dalessandro goto bail_dev; 1492f48ad614SDennis Dalessandro } 1493f48ad614SDennis Dalessandro goto bail; /* all OK */ 1494f48ad614SDennis Dalessandro 1495f48ad614SDennis Dalessandro bail_dev: 1496f48ad614SDennis Dalessandro hfi1_wss_exit(); 1497f48ad614SDennis Dalessandro bail_wss: 1498f48ad614SDennis Dalessandro hfi1_dbg_exit(); 1499f48ad614SDennis Dalessandro idr_destroy(&hfi1_unit_table); 1500f48ad614SDennis Dalessandro dev_cleanup(); 1501f48ad614SDennis Dalessandro bail: 1502f48ad614SDennis Dalessandro return ret; 1503f48ad614SDennis Dalessandro } 1504f48ad614SDennis Dalessandro 1505f48ad614SDennis Dalessandro module_init(hfi1_mod_init); 1506f48ad614SDennis Dalessandro 1507f48ad614SDennis Dalessandro /* 1508f48ad614SDennis Dalessandro * Do the non-unit driver cleanup, memory free, etc. at unload. 1509f48ad614SDennis Dalessandro */ 1510f48ad614SDennis Dalessandro static void __exit hfi1_mod_cleanup(void) 1511f48ad614SDennis Dalessandro { 1512f48ad614SDennis Dalessandro pci_unregister_driver(&hfi1_pci_driver); 15135d18ee67SSebastian Sanchez node_affinity_destroy_all(); 1514f48ad614SDennis Dalessandro hfi1_wss_exit(); 1515f48ad614SDennis Dalessandro hfi1_dbg_exit(); 1516f48ad614SDennis Dalessandro 1517f48ad614SDennis Dalessandro idr_destroy(&hfi1_unit_table); 1518f48ad614SDennis Dalessandro dispose_firmware(); /* asymmetric with obtain_firmware() */ 1519f48ad614SDennis Dalessandro dev_cleanup(); 1520f48ad614SDennis Dalessandro } 1521f48ad614SDennis Dalessandro 1522f48ad614SDennis Dalessandro module_exit(hfi1_mod_cleanup); 1523f48ad614SDennis Dalessandro 1524f48ad614SDennis Dalessandro /* this can only be called after a successful initialization */ 1525f48ad614SDennis Dalessandro static void cleanup_device_data(struct hfi1_devdata *dd) 1526f48ad614SDennis Dalessandro { 1527f48ad614SDennis Dalessandro int ctxt; 1528f48ad614SDennis Dalessandro int pidx; 1529f48ad614SDennis Dalessandro 1530f48ad614SDennis Dalessandro /* users can't do anything more with chip */ 1531f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1532f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd = &dd->pport[pidx]; 1533f48ad614SDennis Dalessandro struct cc_state *cc_state; 1534f48ad614SDennis Dalessandro int i; 1535f48ad614SDennis Dalessandro 1536f48ad614SDennis Dalessandro if (ppd->statusp) 1537f48ad614SDennis Dalessandro *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; 1538f48ad614SDennis Dalessandro 1539f48ad614SDennis Dalessandro for (i = 0; i < OPA_MAX_SLS; i++) 1540f48ad614SDennis Dalessandro hrtimer_cancel(&ppd->cca_timer[i].hrtimer); 1541f48ad614SDennis Dalessandro 1542f48ad614SDennis Dalessandro spin_lock(&ppd->cc_state_lock); 15438adf71faSJianxin Xiong cc_state = get_cc_state_protected(ppd); 1544f48ad614SDennis Dalessandro RCU_INIT_POINTER(ppd->cc_state, NULL); 1545f48ad614SDennis Dalessandro spin_unlock(&ppd->cc_state_lock); 1546f48ad614SDennis Dalessandro 1547f48ad614SDennis Dalessandro if (cc_state) 1548476d95bdSWei Yongjun kfree_rcu(cc_state, rcu); 1549f48ad614SDennis Dalessandro } 1550f48ad614SDennis Dalessandro 1551f48ad614SDennis Dalessandro free_credit_return(dd); 1552f48ad614SDennis Dalessandro 1553f48ad614SDennis Dalessandro if (dd->rcvhdrtail_dummy_kvaddr) { 1554f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, sizeof(u64), 1555f48ad614SDennis Dalessandro (void *)dd->rcvhdrtail_dummy_kvaddr, 155660368186STymoteusz Kielan dd->rcvhdrtail_dummy_dma); 1557f48ad614SDennis Dalessandro dd->rcvhdrtail_dummy_kvaddr = NULL; 1558f48ad614SDennis Dalessandro } 1559f48ad614SDennis Dalessandro 1560d295dbebSMichael J. Ruhl /* 1561d295dbebSMichael J. Ruhl * Free any resources still in use (usually just kernel contexts) 1562d295dbebSMichael J. Ruhl * at unload; we do for ctxtcnt, because that's what we allocate. 1563d295dbebSMichael J. Ruhl */ 1564d295dbebSMichael J. Ruhl for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { 1565d295dbebSMichael J. Ruhl struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; 1566f48ad614SDennis Dalessandro 1567f48ad614SDennis Dalessandro if (rcd) { 1568f48ad614SDennis Dalessandro hfi1_clear_tids(rcd); 1569d295dbebSMichael J. Ruhl hfi1_free_ctxt(rcd); 1570f48ad614SDennis Dalessandro } 1571f48ad614SDennis Dalessandro } 1572d295dbebSMichael J. Ruhl 1573d295dbebSMichael J. Ruhl kfree(dd->rcd); 1574d295dbebSMichael J. Ruhl dd->rcd = NULL; 1575d295dbebSMichael J. Ruhl 1576f48ad614SDennis Dalessandro free_pio_map(dd); 1577f48ad614SDennis Dalessandro /* must follow rcv context free - need to remove rcv's hooks */ 1578f48ad614SDennis Dalessandro for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) 1579f48ad614SDennis Dalessandro sc_free(dd->send_contexts[ctxt].sc); 1580f48ad614SDennis Dalessandro dd->num_send_contexts = 0; 1581f48ad614SDennis Dalessandro kfree(dd->send_contexts); 1582f48ad614SDennis Dalessandro dd->send_contexts = NULL; 1583f48ad614SDennis Dalessandro kfree(dd->hw_to_sw); 1584f48ad614SDennis Dalessandro dd->hw_to_sw = NULL; 1585f48ad614SDennis Dalessandro kfree(dd->boardname); 1586f48ad614SDennis Dalessandro vfree(dd->events); 1587f48ad614SDennis Dalessandro vfree(dd->status); 1588f48ad614SDennis Dalessandro } 1589f48ad614SDennis Dalessandro 1590f48ad614SDennis Dalessandro /* 1591f48ad614SDennis Dalessandro * Clean up on unit shutdown, or error during unit load after 1592f48ad614SDennis Dalessandro * successful initialization. 1593f48ad614SDennis Dalessandro */ 1594f48ad614SDennis Dalessandro static void postinit_cleanup(struct hfi1_devdata *dd) 1595f48ad614SDennis Dalessandro { 1596f48ad614SDennis Dalessandro hfi1_start_cleanup(dd); 15975d18ee67SSebastian Sanchez hfi1_comp_vectors_clean_up(dd); 15985d18ee67SSebastian Sanchez hfi1_dev_affinity_clean_up(dd); 1599f48ad614SDennis Dalessandro 1600f48ad614SDennis Dalessandro hfi1_pcie_ddcleanup(dd); 1601f48ad614SDennis Dalessandro hfi1_pcie_cleanup(dd->pcidev); 1602f48ad614SDennis Dalessandro 1603f48ad614SDennis Dalessandro cleanup_device_data(dd); 1604f48ad614SDennis Dalessandro 1605f48ad614SDennis Dalessandro hfi1_free_devdata(dd); 1606f48ad614SDennis Dalessandro } 1607f48ad614SDennis Dalessandro 160811501ab9SKrzysztof Blaszkowski static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) 160911501ab9SKrzysztof Blaszkowski { 161011501ab9SKrzysztof Blaszkowski if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 161111501ab9SKrzysztof Blaszkowski hfi1_early_err(dev, "Receive header queue count too small\n"); 161211501ab9SKrzysztof Blaszkowski return -EINVAL; 161311501ab9SKrzysztof Blaszkowski } 161411501ab9SKrzysztof Blaszkowski 161511501ab9SKrzysztof Blaszkowski if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 161611501ab9SKrzysztof Blaszkowski hfi1_early_err(dev, 161711501ab9SKrzysztof Blaszkowski "Receive header queue count cannot be greater than %u\n", 161811501ab9SKrzysztof Blaszkowski HFI1_MAX_HDRQ_EGRBUF_CNT); 161911501ab9SKrzysztof Blaszkowski return -EINVAL; 162011501ab9SKrzysztof Blaszkowski } 162111501ab9SKrzysztof Blaszkowski 162211501ab9SKrzysztof Blaszkowski if (thecnt % HDRQ_INCREMENT) { 162311501ab9SKrzysztof Blaszkowski hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", 162411501ab9SKrzysztof Blaszkowski thecnt, HDRQ_INCREMENT); 162511501ab9SKrzysztof Blaszkowski return -EINVAL; 162611501ab9SKrzysztof Blaszkowski } 162711501ab9SKrzysztof Blaszkowski 162811501ab9SKrzysztof Blaszkowski return 0; 162911501ab9SKrzysztof Blaszkowski } 163011501ab9SKrzysztof Blaszkowski 1631f48ad614SDennis Dalessandro static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1632f48ad614SDennis Dalessandro { 1633f48ad614SDennis Dalessandro int ret = 0, j, pidx, initfail; 163483fb4af6SKrzysztof Blaszkowski struct hfi1_devdata *dd; 1635f48ad614SDennis Dalessandro struct hfi1_pportdata *ppd; 1636f48ad614SDennis Dalessandro 1637f48ad614SDennis Dalessandro /* First, lock the non-writable module parameters */ 1638f48ad614SDennis Dalessandro HFI1_CAP_LOCK(); 1639f48ad614SDennis Dalessandro 16405d6f08afSTadeusz Struk /* Validate dev ids */ 16415d6f08afSTadeusz Struk if (!(ent->device == PCI_DEVICE_ID_INTEL0 || 16425d6f08afSTadeusz Struk ent->device == PCI_DEVICE_ID_INTEL1)) { 16435d6f08afSTadeusz Struk hfi1_early_err(&pdev->dev, 16445d6f08afSTadeusz Struk "Failing on unknown Intel deviceid 0x%x\n", 16455d6f08afSTadeusz Struk ent->device); 16465d6f08afSTadeusz Struk ret = -ENODEV; 16475d6f08afSTadeusz Struk goto bail; 16485d6f08afSTadeusz Struk } 16495d6f08afSTadeusz Struk 1650f48ad614SDennis Dalessandro /* Validate some global module parameters */ 165111501ab9SKrzysztof Blaszkowski ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); 165211501ab9SKrzysztof Blaszkowski if (ret) 1653f48ad614SDennis Dalessandro goto bail; 165411501ab9SKrzysztof Blaszkowski 1655f48ad614SDennis Dalessandro /* use the encoding function as a sanitization check */ 1656f48ad614SDennis Dalessandro if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 1657f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", 1658f48ad614SDennis Dalessandro hfi1_hdrq_entsize); 1659f48ad614SDennis Dalessandro ret = -EINVAL; 1660f48ad614SDennis Dalessandro goto bail; 1661f48ad614SDennis Dalessandro } 1662f48ad614SDennis Dalessandro 1663f48ad614SDennis Dalessandro /* The receive eager buffer size must be set before the receive 1664f48ad614SDennis Dalessandro * contexts are created. 1665f48ad614SDennis Dalessandro * 1666f48ad614SDennis Dalessandro * Set the eager buffer size. Validate that it falls in a range 1667f48ad614SDennis Dalessandro * allowed by the hardware - all powers of 2 between the min and 1668f48ad614SDennis Dalessandro * max. The maximum valid MTU is within the eager buffer range 1669f48ad614SDennis Dalessandro * so we do not need to cap the max_mtu by an eager buffer size 1670f48ad614SDennis Dalessandro * setting. 1671f48ad614SDennis Dalessandro */ 1672f48ad614SDennis Dalessandro if (eager_buffer_size) { 1673f48ad614SDennis Dalessandro if (!is_power_of_2(eager_buffer_size)) 1674f48ad614SDennis Dalessandro eager_buffer_size = 1675f48ad614SDennis Dalessandro roundup_pow_of_two(eager_buffer_size); 1676f48ad614SDennis Dalessandro eager_buffer_size = 1677f48ad614SDennis Dalessandro clamp_val(eager_buffer_size, 1678f48ad614SDennis Dalessandro MIN_EAGER_BUFFER * 8, 1679f48ad614SDennis Dalessandro MAX_EAGER_BUFFER_TOTAL); 1680f48ad614SDennis Dalessandro hfi1_early_info(&pdev->dev, "Eager buffer size %u\n", 1681f48ad614SDennis Dalessandro eager_buffer_size); 1682f48ad614SDennis Dalessandro } else { 1683f48ad614SDennis Dalessandro hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n"); 1684f48ad614SDennis Dalessandro ret = -EINVAL; 1685f48ad614SDennis Dalessandro goto bail; 1686f48ad614SDennis Dalessandro } 1687f48ad614SDennis Dalessandro 1688f48ad614SDennis Dalessandro /* restrict value of hfi1_rcvarr_split */ 1689f48ad614SDennis Dalessandro hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); 1690f48ad614SDennis Dalessandro 1691f48ad614SDennis Dalessandro ret = hfi1_pcie_init(pdev, ent); 1692f48ad614SDennis Dalessandro if (ret) 1693f48ad614SDennis Dalessandro goto bail; 1694f48ad614SDennis Dalessandro 169583fb4af6SKrzysztof Blaszkowski /* 169683fb4af6SKrzysztof Blaszkowski * Do device-specific initialization, function table setup, dd 169783fb4af6SKrzysztof Blaszkowski * allocation, etc. 169883fb4af6SKrzysztof Blaszkowski */ 169983fb4af6SKrzysztof Blaszkowski dd = hfi1_init_dd(pdev, ent); 170083fb4af6SKrzysztof Blaszkowski 170183fb4af6SKrzysztof Blaszkowski if (IS_ERR(dd)) { 1702f48ad614SDennis Dalessandro ret = PTR_ERR(dd); 1703f48ad614SDennis Dalessandro goto clean_bail; /* error already printed */ 170483fb4af6SKrzysztof Blaszkowski } 1705f48ad614SDennis Dalessandro 1706f48ad614SDennis Dalessandro ret = create_workqueues(dd); 1707f48ad614SDennis Dalessandro if (ret) 1708f48ad614SDennis Dalessandro goto clean_bail; 1709f48ad614SDennis Dalessandro 1710f48ad614SDennis Dalessandro /* do the generic initialization */ 1711f48ad614SDennis Dalessandro initfail = hfi1_init(dd, 0); 1712f48ad614SDennis Dalessandro 1713d4829ea6SVishwanathapura, Niranjana /* setup vnic */ 1714d4829ea6SVishwanathapura, Niranjana hfi1_vnic_setup(dd); 1715d4829ea6SVishwanathapura, Niranjana 1716f48ad614SDennis Dalessandro ret = hfi1_register_ib_device(dd); 1717f48ad614SDennis Dalessandro 1718f48ad614SDennis Dalessandro /* 1719f48ad614SDennis Dalessandro * Now ready for use. this should be cleared whenever we 1720f48ad614SDennis Dalessandro * detect a reset, or initiate one. If earlier failure, 1721f48ad614SDennis Dalessandro * we still create devices, so diags, etc. can be used 1722f48ad614SDennis Dalessandro * to determine cause of problem. 1723f48ad614SDennis Dalessandro */ 1724f48ad614SDennis Dalessandro if (!initfail && !ret) { 1725f48ad614SDennis Dalessandro dd->flags |= HFI1_INITTED; 1726f48ad614SDennis Dalessandro /* create debufs files after init and ib register */ 1727f48ad614SDennis Dalessandro hfi1_dbg_ibdev_init(&dd->verbs_dev); 1728f48ad614SDennis Dalessandro } 1729f48ad614SDennis Dalessandro 1730f48ad614SDennis Dalessandro j = hfi1_device_create(dd); 1731f48ad614SDennis Dalessandro if (j) 1732f48ad614SDennis Dalessandro dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1733f48ad614SDennis Dalessandro 1734f48ad614SDennis Dalessandro if (initfail || ret) { 173582a97926SMichael J. Ruhl hfi1_clean_up_interrupts(dd); 1736f48ad614SDennis Dalessandro stop_timers(dd); 1737f48ad614SDennis Dalessandro flush_workqueue(ib_wq); 1738f48ad614SDennis Dalessandro for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1739f48ad614SDennis Dalessandro hfi1_quiet_serdes(dd->pport + pidx); 1740f48ad614SDennis Dalessandro ppd = dd->pport + pidx; 1741f48ad614SDennis Dalessandro if (ppd->hfi1_wq) { 1742f48ad614SDennis Dalessandro destroy_workqueue(ppd->hfi1_wq); 1743f48ad614SDennis Dalessandro ppd->hfi1_wq = NULL; 1744f48ad614SDennis Dalessandro } 174571d47008SSebastian Sanchez if (ppd->link_wq) { 174671d47008SSebastian Sanchez destroy_workqueue(ppd->link_wq); 174771d47008SSebastian Sanchez ppd->link_wq = NULL; 174871d47008SSebastian Sanchez } 1749f48ad614SDennis Dalessandro } 1750f48ad614SDennis Dalessandro if (!j) 1751f48ad614SDennis Dalessandro hfi1_device_remove(dd); 1752f48ad614SDennis Dalessandro if (!ret) 1753f48ad614SDennis Dalessandro hfi1_unregister_ib_device(dd); 17542280740fSVishwanathapura, Niranjana hfi1_vnic_cleanup(dd); 1755f48ad614SDennis Dalessandro postinit_cleanup(dd); 1756f48ad614SDennis Dalessandro if (initfail) 1757f48ad614SDennis Dalessandro ret = initfail; 1758f48ad614SDennis Dalessandro goto bail; /* everything already cleaned */ 1759f48ad614SDennis Dalessandro } 1760f48ad614SDennis Dalessandro 1761f48ad614SDennis Dalessandro sdma_start(dd); 1762f48ad614SDennis Dalessandro 1763f48ad614SDennis Dalessandro return 0; 1764f48ad614SDennis Dalessandro 1765f48ad614SDennis Dalessandro clean_bail: 1766f48ad614SDennis Dalessandro hfi1_pcie_cleanup(pdev); 1767f48ad614SDennis Dalessandro bail: 1768f48ad614SDennis Dalessandro return ret; 1769f48ad614SDennis Dalessandro } 1770f48ad614SDennis Dalessandro 1771acd7c8feSTadeusz Struk static void wait_for_clients(struct hfi1_devdata *dd) 1772acd7c8feSTadeusz Struk { 1773acd7c8feSTadeusz Struk /* 1774acd7c8feSTadeusz Struk * Remove the device init value and complete the device if there is 1775acd7c8feSTadeusz Struk * no clients or wait for active clients to finish. 1776acd7c8feSTadeusz Struk */ 1777acd7c8feSTadeusz Struk if (atomic_dec_and_test(&dd->user_refcount)) 1778acd7c8feSTadeusz Struk complete(&dd->user_comp); 1779acd7c8feSTadeusz Struk 1780acd7c8feSTadeusz Struk wait_for_completion(&dd->user_comp); 1781acd7c8feSTadeusz Struk } 1782acd7c8feSTadeusz Struk 1783f48ad614SDennis Dalessandro static void remove_one(struct pci_dev *pdev) 1784f48ad614SDennis Dalessandro { 1785f48ad614SDennis Dalessandro struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1786f48ad614SDennis Dalessandro 1787f48ad614SDennis Dalessandro /* close debugfs files before ib unregister */ 1788f48ad614SDennis Dalessandro hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1789acd7c8feSTadeusz Struk 1790acd7c8feSTadeusz Struk /* remove the /dev hfi1 interface */ 1791acd7c8feSTadeusz Struk hfi1_device_remove(dd); 1792acd7c8feSTadeusz Struk 1793acd7c8feSTadeusz Struk /* wait for existing user space clients to finish */ 1794acd7c8feSTadeusz Struk wait_for_clients(dd); 1795acd7c8feSTadeusz Struk 1796f48ad614SDennis Dalessandro /* unregister from IB core */ 1797f48ad614SDennis Dalessandro hfi1_unregister_ib_device(dd); 1798f48ad614SDennis Dalessandro 1799d4829ea6SVishwanathapura, Niranjana /* cleanup vnic */ 1800d4829ea6SVishwanathapura, Niranjana hfi1_vnic_cleanup(dd); 1801d4829ea6SVishwanathapura, Niranjana 1802f48ad614SDennis Dalessandro /* 1803f48ad614SDennis Dalessandro * Disable the IB link, disable interrupts on the device, 1804f48ad614SDennis Dalessandro * clear dma engines, etc. 1805f48ad614SDennis Dalessandro */ 1806f48ad614SDennis Dalessandro shutdown_device(dd); 1807f48ad614SDennis Dalessandro 1808f48ad614SDennis Dalessandro stop_timers(dd); 1809f48ad614SDennis Dalessandro 1810f48ad614SDennis Dalessandro /* wait until all of our (qsfp) queue_work() calls complete */ 1811f48ad614SDennis Dalessandro flush_workqueue(ib_wq); 1812f48ad614SDennis Dalessandro 1813f48ad614SDennis Dalessandro postinit_cleanup(dd); 1814f48ad614SDennis Dalessandro } 1815f48ad614SDennis Dalessandro 18168d3e7113SAlex Estrin static void shutdown_one(struct pci_dev *pdev) 18178d3e7113SAlex Estrin { 18188d3e7113SAlex Estrin struct hfi1_devdata *dd = pci_get_drvdata(pdev); 18198d3e7113SAlex Estrin 18208d3e7113SAlex Estrin shutdown_device(dd); 18218d3e7113SAlex Estrin } 18228d3e7113SAlex Estrin 1823f48ad614SDennis Dalessandro /** 1824f48ad614SDennis Dalessandro * hfi1_create_rcvhdrq - create a receive header queue 1825f48ad614SDennis Dalessandro * @dd: the hfi1_ib device 1826f48ad614SDennis Dalessandro * @rcd: the context data 1827f48ad614SDennis Dalessandro * 1828f48ad614SDennis Dalessandro * This must be contiguous memory (from an i/o perspective), and must be 1829f48ad614SDennis Dalessandro * DMA'able (which means for some systems, it will go through an IOMMU, 1830f48ad614SDennis Dalessandro * or be forced into a low address range). 1831f48ad614SDennis Dalessandro */ 1832f48ad614SDennis Dalessandro int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1833f48ad614SDennis Dalessandro { 1834f48ad614SDennis Dalessandro unsigned amt; 1835f48ad614SDennis Dalessandro u64 reg; 1836f48ad614SDennis Dalessandro 1837f48ad614SDennis Dalessandro if (!rcd->rcvhdrq) { 1838f48ad614SDennis Dalessandro gfp_t gfp_flags; 1839f48ad614SDennis Dalessandro 1840b2578431SMike Marciniszyn amt = rcvhdrq_size(rcd); 1841f48ad614SDennis Dalessandro 1842cc9a97eaSNiranjana Vishwanathapura if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) 18432280740fSVishwanathapura, Niranjana gfp_flags = GFP_KERNEL; 18442280740fSVishwanathapura, Niranjana else 18452280740fSVishwanathapura, Niranjana gfp_flags = GFP_USER; 1846f48ad614SDennis Dalessandro rcd->rcvhdrq = dma_zalloc_coherent( 184760368186STymoteusz Kielan &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, 1848f48ad614SDennis Dalessandro gfp_flags | __GFP_COMP); 1849f48ad614SDennis Dalessandro 1850f48ad614SDennis Dalessandro if (!rcd->rcvhdrq) { 1851f48ad614SDennis Dalessandro dd_dev_err(dd, 1852f48ad614SDennis Dalessandro "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1853f48ad614SDennis Dalessandro amt, rcd->ctxt); 1854f48ad614SDennis Dalessandro goto bail; 1855f48ad614SDennis Dalessandro } 1856f48ad614SDennis Dalessandro 18571bc0299dSMike Marciniszyn if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || 18581bc0299dSMike Marciniszyn HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { 1859f48ad614SDennis Dalessandro rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( 18601bc0299dSMike Marciniszyn &dd->pcidev->dev, PAGE_SIZE, 18611bc0299dSMike Marciniszyn &rcd->rcvhdrqtailaddr_dma, gfp_flags); 1862f48ad614SDennis Dalessandro if (!rcd->rcvhdrtail_kvaddr) 1863f48ad614SDennis Dalessandro goto bail_free; 1864f48ad614SDennis Dalessandro } 1865f48ad614SDennis Dalessandro } 1866f48ad614SDennis Dalessandro /* 1867f48ad614SDennis Dalessandro * These values are per-context: 1868f48ad614SDennis Dalessandro * RcvHdrCnt 1869f48ad614SDennis Dalessandro * RcvHdrEntSize 1870f48ad614SDennis Dalessandro * RcvHdrSize 1871f48ad614SDennis Dalessandro */ 1872f48ad614SDennis Dalessandro reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) 1873f48ad614SDennis Dalessandro & RCV_HDR_CNT_CNT_MASK) 1874f48ad614SDennis Dalessandro << RCV_HDR_CNT_CNT_SHIFT; 1875f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); 1876f48ad614SDennis Dalessandro reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) 1877f48ad614SDennis Dalessandro & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) 1878f48ad614SDennis Dalessandro << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; 1879f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); 188032e3d970SMike Marciniszyn reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) 1881f48ad614SDennis Dalessandro << RCV_HDR_SIZE_HDR_SIZE_SHIFT; 1882f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); 1883f48ad614SDennis Dalessandro 1884f48ad614SDennis Dalessandro /* 1885f48ad614SDennis Dalessandro * Program dummy tail address for every receive context 1886f48ad614SDennis Dalessandro * before enabling any receive context 1887f48ad614SDennis Dalessandro */ 1888f48ad614SDennis Dalessandro write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, 188960368186STymoteusz Kielan dd->rcvhdrtail_dummy_dma); 1890f48ad614SDennis Dalessandro 1891f48ad614SDennis Dalessandro return 0; 1892f48ad614SDennis Dalessandro 1893f48ad614SDennis Dalessandro bail_free: 1894f48ad614SDennis Dalessandro dd_dev_err(dd, 1895f48ad614SDennis Dalessandro "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1896f48ad614SDennis Dalessandro rcd->ctxt); 1897f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 189860368186STymoteusz Kielan rcd->rcvhdrq_dma); 1899f48ad614SDennis Dalessandro rcd->rcvhdrq = NULL; 1900f48ad614SDennis Dalessandro bail: 1901f48ad614SDennis Dalessandro return -ENOMEM; 1902f48ad614SDennis Dalessandro } 1903f48ad614SDennis Dalessandro 1904f48ad614SDennis Dalessandro /** 1905f48ad614SDennis Dalessandro * allocate eager buffers, both kernel and user contexts. 1906f48ad614SDennis Dalessandro * @rcd: the context we are setting up. 1907f48ad614SDennis Dalessandro * 1908f48ad614SDennis Dalessandro * Allocate the eager TID buffers and program them into hip. 1909f48ad614SDennis Dalessandro * They are no longer completely contiguous, we do multiple allocation 1910f48ad614SDennis Dalessandro * calls. Otherwise we get the OOM code involved, by asking for too 1911f48ad614SDennis Dalessandro * much per call, with disastrous results on some kernels. 1912f48ad614SDennis Dalessandro */ 1913f48ad614SDennis Dalessandro int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) 1914f48ad614SDennis Dalessandro { 1915f48ad614SDennis Dalessandro struct hfi1_devdata *dd = rcd->dd; 1916071e4fecSMike Marciniszyn u32 max_entries, egrtop, alloced_bytes = 0; 1917f48ad614SDennis Dalessandro gfp_t gfp_flags; 1918071e4fecSMike Marciniszyn u16 order, idx = 0; 1919f48ad614SDennis Dalessandro int ret = 0; 1920f48ad614SDennis Dalessandro u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); 1921f48ad614SDennis Dalessandro 1922f48ad614SDennis Dalessandro /* 1923f48ad614SDennis Dalessandro * GFP_USER, but without GFP_FS, so buffer cache can be 1924f48ad614SDennis Dalessandro * coalesced (we hope); otherwise, even at order 4, 1925f48ad614SDennis Dalessandro * heavy filesystem activity makes these fail, and we can 1926f48ad614SDennis Dalessandro * use compound pages. 1927f48ad614SDennis Dalessandro */ 1928f48ad614SDennis Dalessandro gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1929f48ad614SDennis Dalessandro 1930f48ad614SDennis Dalessandro /* 1931f48ad614SDennis Dalessandro * The minimum size of the eager buffers is a groups of MTU-sized 1932f48ad614SDennis Dalessandro * buffers. 1933f48ad614SDennis Dalessandro * The global eager_buffer_size parameter is checked against the 1934f48ad614SDennis Dalessandro * theoretical lower limit of the value. Here, we check against the 1935f48ad614SDennis Dalessandro * MTU. 1936f48ad614SDennis Dalessandro */ 1937f48ad614SDennis Dalessandro if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) 1938f48ad614SDennis Dalessandro rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; 1939f48ad614SDennis Dalessandro /* 1940f48ad614SDennis Dalessandro * If using one-pkt-per-egr-buffer, lower the eager buffer 1941f48ad614SDennis Dalessandro * size to the max MTU (page-aligned). 1942f48ad614SDennis Dalessandro */ 1943f48ad614SDennis Dalessandro if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 1944f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = round_mtu; 1945f48ad614SDennis Dalessandro 1946f48ad614SDennis Dalessandro /* 1947f48ad614SDennis Dalessandro * Eager buffers sizes of 1MB or less require smaller TID sizes 1948f48ad614SDennis Dalessandro * to satisfy the "multiple of 8 RcvArray entries" requirement. 1949f48ad614SDennis Dalessandro */ 1950f48ad614SDennis Dalessandro if (rcd->egrbufs.size <= (1 << 20)) 1951f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, 1952f48ad614SDennis Dalessandro rounddown_pow_of_two(rcd->egrbufs.size / 8)); 1953f48ad614SDennis Dalessandro 1954f48ad614SDennis Dalessandro while (alloced_bytes < rcd->egrbufs.size && 1955f48ad614SDennis Dalessandro rcd->egrbufs.alloced < rcd->egrbufs.count) { 1956f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr = 1957f48ad614SDennis Dalessandro dma_zalloc_coherent(&dd->pcidev->dev, 1958f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size, 195960368186STymoteusz Kielan &rcd->egrbufs.buffers[idx].dma, 1960f48ad614SDennis Dalessandro gfp_flags); 1961f48ad614SDennis Dalessandro if (rcd->egrbufs.buffers[idx].addr) { 1962f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].len = 1963f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size; 1964f48ad614SDennis Dalessandro rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = 1965f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr; 196660368186STymoteusz Kielan rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma = 196760368186STymoteusz Kielan rcd->egrbufs.buffers[idx].dma; 1968f48ad614SDennis Dalessandro rcd->egrbufs.alloced++; 1969f48ad614SDennis Dalessandro alloced_bytes += rcd->egrbufs.rcvtid_size; 1970f48ad614SDennis Dalessandro idx++; 1971f48ad614SDennis Dalessandro } else { 1972f48ad614SDennis Dalessandro u32 new_size, i, j; 1973f48ad614SDennis Dalessandro u64 offset = 0; 1974f48ad614SDennis Dalessandro 1975f48ad614SDennis Dalessandro /* 1976f48ad614SDennis Dalessandro * Fail the eager buffer allocation if: 1977f48ad614SDennis Dalessandro * - we are already using the lowest acceptable size 1978f48ad614SDennis Dalessandro * - we are using one-pkt-per-egr-buffer (this implies 1979f48ad614SDennis Dalessandro * that we are accepting only one size) 1980f48ad614SDennis Dalessandro */ 1981f48ad614SDennis Dalessandro if (rcd->egrbufs.rcvtid_size == round_mtu || 1982f48ad614SDennis Dalessandro !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { 1983f48ad614SDennis Dalessandro dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", 1984f48ad614SDennis Dalessandro rcd->ctxt); 198594679061SMichael J. Ruhl ret = -ENOMEM; 1986f48ad614SDennis Dalessandro goto bail_rcvegrbuf_phys; 1987f48ad614SDennis Dalessandro } 1988f48ad614SDennis Dalessandro 1989f48ad614SDennis Dalessandro new_size = rcd->egrbufs.rcvtid_size / 2; 1990f48ad614SDennis Dalessandro 1991f48ad614SDennis Dalessandro /* 1992f48ad614SDennis Dalessandro * If the first attempt to allocate memory failed, don't 1993f48ad614SDennis Dalessandro * fail everything but continue with the next lower 1994f48ad614SDennis Dalessandro * size. 1995f48ad614SDennis Dalessandro */ 1996f48ad614SDennis Dalessandro if (idx == 0) { 1997f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = new_size; 1998f48ad614SDennis Dalessandro continue; 1999f48ad614SDennis Dalessandro } 2000f48ad614SDennis Dalessandro 2001f48ad614SDennis Dalessandro /* 2002f48ad614SDennis Dalessandro * Re-partition already allocated buffers to a smaller 2003f48ad614SDennis Dalessandro * size. 2004f48ad614SDennis Dalessandro */ 2005f48ad614SDennis Dalessandro rcd->egrbufs.alloced = 0; 2006f48ad614SDennis Dalessandro for (i = 0, j = 0, offset = 0; j < idx; i++) { 2007f48ad614SDennis Dalessandro if (i >= rcd->egrbufs.count) 2008f48ad614SDennis Dalessandro break; 200960368186STymoteusz Kielan rcd->egrbufs.rcvtids[i].dma = 201060368186STymoteusz Kielan rcd->egrbufs.buffers[j].dma + offset; 2011f48ad614SDennis Dalessandro rcd->egrbufs.rcvtids[i].addr = 2012f48ad614SDennis Dalessandro rcd->egrbufs.buffers[j].addr + offset; 2013f48ad614SDennis Dalessandro rcd->egrbufs.alloced++; 201460368186STymoteusz Kielan if ((rcd->egrbufs.buffers[j].dma + offset + 2015f48ad614SDennis Dalessandro new_size) == 201660368186STymoteusz Kielan (rcd->egrbufs.buffers[j].dma + 2017f48ad614SDennis Dalessandro rcd->egrbufs.buffers[j].len)) { 2018f48ad614SDennis Dalessandro j++; 2019f48ad614SDennis Dalessandro offset = 0; 2020f48ad614SDennis Dalessandro } else { 2021f48ad614SDennis Dalessandro offset += new_size; 2022f48ad614SDennis Dalessandro } 2023f48ad614SDennis Dalessandro } 2024f48ad614SDennis Dalessandro rcd->egrbufs.rcvtid_size = new_size; 2025f48ad614SDennis Dalessandro } 2026f48ad614SDennis Dalessandro } 2027f48ad614SDennis Dalessandro rcd->egrbufs.numbufs = idx; 2028f48ad614SDennis Dalessandro rcd->egrbufs.size = alloced_bytes; 2029f48ad614SDennis Dalessandro 2030f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 2031f48ad614SDennis Dalessandro "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", 203223002d5bSGrzegorz Heldt rcd->ctxt, rcd->egrbufs.alloced, 203323002d5bSGrzegorz Heldt rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); 2034f48ad614SDennis Dalessandro 2035f48ad614SDennis Dalessandro /* 2036f48ad614SDennis Dalessandro * Set the contexts rcv array head update threshold to the closest 2037f48ad614SDennis Dalessandro * power of 2 (so we can use a mask instead of modulo) below half 2038f48ad614SDennis Dalessandro * the allocated entries. 2039f48ad614SDennis Dalessandro */ 2040f48ad614SDennis Dalessandro rcd->egrbufs.threshold = 2041f48ad614SDennis Dalessandro rounddown_pow_of_two(rcd->egrbufs.alloced / 2); 2042f48ad614SDennis Dalessandro /* 2043f48ad614SDennis Dalessandro * Compute the expected RcvArray entry base. This is done after 2044f48ad614SDennis Dalessandro * allocating the eager buffers in order to maximize the 2045f48ad614SDennis Dalessandro * expected RcvArray entries for the context. 2046f48ad614SDennis Dalessandro */ 2047f48ad614SDennis Dalessandro max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; 2048f48ad614SDennis Dalessandro egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); 2049f48ad614SDennis Dalessandro rcd->expected_count = max_entries - egrtop; 2050f48ad614SDennis Dalessandro if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) 2051f48ad614SDennis Dalessandro rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; 2052f48ad614SDennis Dalessandro 2053f48ad614SDennis Dalessandro rcd->expected_base = rcd->eager_base + egrtop; 2054f48ad614SDennis Dalessandro hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", 2055f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, 2056f48ad614SDennis Dalessandro rcd->eager_base, rcd->expected_base); 2057f48ad614SDennis Dalessandro 2058f48ad614SDennis Dalessandro if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { 2059f48ad614SDennis Dalessandro hfi1_cdbg(PROC, 2060f48ad614SDennis Dalessandro "ctxt%u: current Eager buffer size is invalid %u\n", 2061f48ad614SDennis Dalessandro rcd->ctxt, rcd->egrbufs.rcvtid_size); 2062f48ad614SDennis Dalessandro ret = -EINVAL; 206362239fc6SMichael J. Ruhl goto bail_rcvegrbuf_phys; 2064f48ad614SDennis Dalessandro } 2065f48ad614SDennis Dalessandro 2066f48ad614SDennis Dalessandro for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { 2067f48ad614SDennis Dalessandro hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, 206860368186STymoteusz Kielan rcd->egrbufs.rcvtids[idx].dma, order); 2069f48ad614SDennis Dalessandro cond_resched(); 2070f48ad614SDennis Dalessandro } 207162239fc6SMichael J. Ruhl 207262239fc6SMichael J. Ruhl return 0; 2073f48ad614SDennis Dalessandro 2074f48ad614SDennis Dalessandro bail_rcvegrbuf_phys: 2075f48ad614SDennis Dalessandro for (idx = 0; idx < rcd->egrbufs.alloced && 2076f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr; 2077f48ad614SDennis Dalessandro idx++) { 2078f48ad614SDennis Dalessandro dma_free_coherent(&dd->pcidev->dev, 2079f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].len, 2080f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr, 208160368186STymoteusz Kielan rcd->egrbufs.buffers[idx].dma); 2082f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].addr = NULL; 208360368186STymoteusz Kielan rcd->egrbufs.buffers[idx].dma = 0; 2084f48ad614SDennis Dalessandro rcd->egrbufs.buffers[idx].len = 0; 2085f48ad614SDennis Dalessandro } 208662239fc6SMichael J. Ruhl 2087f48ad614SDennis Dalessandro return ret; 2088f48ad614SDennis Dalessandro } 2089