1f931551bSRalph Campbell /* 2e2eed58bSVinit Agnihotri * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. 31fb9fed6SMike Marciniszyn * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 4f931551bSRalph Campbell * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5f931551bSRalph Campbell * 6f931551bSRalph Campbell * This software is available to you under a choice of one of two 7f931551bSRalph Campbell * licenses. You may choose to be licensed under the terms of the GNU 8f931551bSRalph Campbell * General Public License (GPL) Version 2, available from the file 9f931551bSRalph Campbell * COPYING in the main directory of this source tree, or the 10f931551bSRalph Campbell * OpenIB.org BSD license below: 11f931551bSRalph Campbell * 12f931551bSRalph Campbell * Redistribution and use in source and binary forms, with or 13f931551bSRalph Campbell * without modification, are permitted provided that the following 14f931551bSRalph Campbell * conditions are met: 15f931551bSRalph Campbell * 16f931551bSRalph Campbell * - Redistributions of source code must retain the above 17f931551bSRalph Campbell * copyright notice, this list of conditions and the following 18f931551bSRalph Campbell * disclaimer. 19f931551bSRalph Campbell * 20f931551bSRalph Campbell * - Redistributions in binary form must reproduce the above 21f931551bSRalph Campbell * copyright notice, this list of conditions and the following 22f931551bSRalph Campbell * disclaimer in the documentation and/or other materials 23f931551bSRalph Campbell * provided with the distribution. 24f931551bSRalph Campbell * 25f931551bSRalph Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26f931551bSRalph Campbell * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27f931551bSRalph Campbell * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28f931551bSRalph Campbell * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29f931551bSRalph Campbell * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30f931551bSRalph Campbell * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31f931551bSRalph Campbell * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32f931551bSRalph Campbell * SOFTWARE. 33f931551bSRalph Campbell */ 34f931551bSRalph Campbell 35f931551bSRalph Campbell #include <rdma/ib_mad.h> 36f931551bSRalph Campbell #include <rdma/ib_user_verbs.h> 37f931551bSRalph Campbell #include <linux/io.h> 38e4dd23d7SPaul Gortmaker #include <linux/module.h> 39f931551bSRalph Campbell #include <linux/utsname.h> 40f931551bSRalph Campbell #include <linux/rculist.h> 41f931551bSRalph Campbell #include <linux/mm.h> 42af061a64SMike Marciniszyn #include <linux/random.h> 43d6f1c17eSMike Marciniszyn #include <linux/vmalloc.h> 44eb636ac0SDennis Dalessandro #include <rdma/rdma_vt.h> 45f931551bSRalph Campbell 46f931551bSRalph Campbell #include "qib.h" 47f931551bSRalph Campbell #include "qib_common.h" 48f931551bSRalph Campbell 49af061a64SMike Marciniszyn static unsigned int ib_qib_qp_table_size = 256; 50f931551bSRalph Campbell module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); 51f931551bSRalph Campbell MODULE_PARM_DESC(qp_table_size, "QP table size"); 52f931551bSRalph Campbell 537c2e11feSDennis Dalessandro static unsigned int qib_lkey_table_size = 16; 547c2e11feSDennis Dalessandro module_param_named(lkey_table_size, qib_lkey_table_size, uint, 55f931551bSRalph Campbell S_IRUGO); 56f931551bSRalph Campbell MODULE_PARM_DESC(lkey_table_size, 57f931551bSRalph Campbell "LKEY table size in bits (2^n, 1 <= n <= 23)"); 58f931551bSRalph Campbell 59f931551bSRalph Campbell static unsigned int ib_qib_max_pds = 0xFFFF; 60f931551bSRalph Campbell module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO); 61f931551bSRalph Campbell MODULE_PARM_DESC(max_pds, 62f931551bSRalph Campbell "Maximum number of protection domains to support"); 63f931551bSRalph Campbell 64f931551bSRalph Campbell static unsigned int ib_qib_max_ahs = 0xFFFF; 65f931551bSRalph Campbell module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO); 66f931551bSRalph Campbell MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); 67f931551bSRalph Campbell 68f931551bSRalph Campbell unsigned int ib_qib_max_cqes = 0x2FFFF; 69f931551bSRalph Campbell module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO); 70f931551bSRalph Campbell MODULE_PARM_DESC(max_cqes, 71f931551bSRalph Campbell "Maximum number of completion queue entries to support"); 72f931551bSRalph Campbell 73f931551bSRalph Campbell unsigned int ib_qib_max_cqs = 0x1FFFF; 74f931551bSRalph Campbell module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO); 75f931551bSRalph Campbell MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); 76f931551bSRalph Campbell 77f931551bSRalph Campbell unsigned int ib_qib_max_qp_wrs = 0x3FFF; 78f931551bSRalph Campbell module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO); 79f931551bSRalph Campbell MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); 80f931551bSRalph Campbell 81f931551bSRalph Campbell unsigned int ib_qib_max_qps = 16384; 82f931551bSRalph Campbell module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO); 83f931551bSRalph Campbell MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); 84f931551bSRalph Campbell 85f931551bSRalph Campbell unsigned int ib_qib_max_sges = 0x60; 86f931551bSRalph Campbell module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO); 87f931551bSRalph Campbell MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); 88f931551bSRalph Campbell 89f931551bSRalph Campbell unsigned int ib_qib_max_mcast_grps = 16384; 90f931551bSRalph Campbell module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO); 91f931551bSRalph Campbell MODULE_PARM_DESC(max_mcast_grps, 92f931551bSRalph Campbell "Maximum number of multicast groups to support"); 93f931551bSRalph Campbell 94f931551bSRalph Campbell unsigned int ib_qib_max_mcast_qp_attached = 16; 95f931551bSRalph Campbell module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached, 96f931551bSRalph Campbell uint, S_IRUGO); 97f931551bSRalph Campbell MODULE_PARM_DESC(max_mcast_qp_attached, 98f931551bSRalph Campbell "Maximum number of attached QPs to support"); 99f931551bSRalph Campbell 100f931551bSRalph Campbell unsigned int ib_qib_max_srqs = 1024; 101f931551bSRalph Campbell module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO); 102f931551bSRalph Campbell MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); 103f931551bSRalph Campbell 104f931551bSRalph Campbell unsigned int ib_qib_max_srq_sges = 128; 105f931551bSRalph Campbell module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO); 106f931551bSRalph Campbell MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); 107f931551bSRalph Campbell 108f931551bSRalph Campbell unsigned int ib_qib_max_srq_wrs = 0x1FFFF; 109f931551bSRalph Campbell module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO); 110f931551bSRalph Campbell MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); 111f931551bSRalph Campbell 112f931551bSRalph Campbell static unsigned int ib_qib_disable_sma; 113f931551bSRalph Campbell module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO); 114f931551bSRalph Campbell MODULE_PARM_DESC(disable_sma, "Disable the SMA"); 115f931551bSRalph Campbell 116f931551bSRalph Campbell /* 117f931551bSRalph Campbell * Note that it is OK to post send work requests in the SQE and ERR 118f931551bSRalph Campbell * states; qib_do_send() will process them and generate error 119f931551bSRalph Campbell * completions as per IB 1.2 C10-96. 120f931551bSRalph Campbell */ 121f931551bSRalph Campbell const int ib_qib_state_ops[IB_QPS_ERR + 1] = { 122f931551bSRalph Campbell [IB_QPS_RESET] = 0, 123f931551bSRalph Campbell [IB_QPS_INIT] = QIB_POST_RECV_OK, 124f931551bSRalph Campbell [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK, 125f931551bSRalph Campbell [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 126f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK | 127f931551bSRalph Campbell QIB_PROCESS_NEXT_SEND_OK, 128f931551bSRalph Campbell [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 129f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK, 130f931551bSRalph Campbell [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 131f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_FLUSH_SEND, 132f931551bSRalph Campbell [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV | 133f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_FLUSH_SEND, 134f931551bSRalph Campbell }; 135f931551bSRalph Campbell 136f931551bSRalph Campbell struct qib_ucontext { 137f931551bSRalph Campbell struct ib_ucontext ibucontext; 138f931551bSRalph Campbell }; 139f931551bSRalph Campbell 140f931551bSRalph Campbell static inline struct qib_ucontext *to_iucontext(struct ib_ucontext 141f931551bSRalph Campbell *ibucontext) 142f931551bSRalph Campbell { 143f931551bSRalph Campbell return container_of(ibucontext, struct qib_ucontext, ibucontext); 144f931551bSRalph Campbell } 145f931551bSRalph Campbell 146f931551bSRalph Campbell /* 147f931551bSRalph Campbell * Translate ib_wr_opcode into ib_wc_opcode. 148f931551bSRalph Campbell */ 149f931551bSRalph Campbell const enum ib_wc_opcode ib_qib_wc_opcode[] = { 150f931551bSRalph Campbell [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, 151f931551bSRalph Campbell [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, 152f931551bSRalph Campbell [IB_WR_SEND] = IB_WC_SEND, 153f931551bSRalph Campbell [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, 154f931551bSRalph Campbell [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, 155f931551bSRalph Campbell [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, 156f931551bSRalph Campbell [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD 157f931551bSRalph Campbell }; 158f931551bSRalph Campbell 159f931551bSRalph Campbell /* 160f931551bSRalph Campbell * System image GUID. 161f931551bSRalph Campbell */ 162f931551bSRalph Campbell __be64 ib_qib_sys_image_guid; 163f931551bSRalph Campbell 164f931551bSRalph Campbell /** 165f931551bSRalph Campbell * qib_copy_sge - copy data to SGE memory 166f931551bSRalph Campbell * @ss: the SGE state 167f931551bSRalph Campbell * @data: the data to copy 168f931551bSRalph Campbell * @length: the length of the data 169f931551bSRalph Campbell */ 1707c2e11feSDennis Dalessandro void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release) 171f931551bSRalph Campbell { 1727c2e11feSDennis Dalessandro struct rvt_sge *sge = &ss->sge; 173f931551bSRalph Campbell 174f931551bSRalph Campbell while (length) { 175f931551bSRalph Campbell u32 len = sge->length; 176f931551bSRalph Campbell 177f931551bSRalph Campbell if (len > length) 178f931551bSRalph Campbell len = length; 179f931551bSRalph Campbell if (len > sge->sge_length) 180f931551bSRalph Campbell len = sge->sge_length; 181f931551bSRalph Campbell BUG_ON(len == 0); 182f931551bSRalph Campbell memcpy(sge->vaddr, data, len); 183f931551bSRalph Campbell sge->vaddr += len; 184f931551bSRalph Campbell sge->length -= len; 185f931551bSRalph Campbell sge->sge_length -= len; 186f931551bSRalph Campbell if (sge->sge_length == 0) { 187f931551bSRalph Campbell if (release) 1887c2e11feSDennis Dalessandro rvt_put_mr(sge->mr); 189f931551bSRalph Campbell if (--ss->num_sge) 190f931551bSRalph Campbell *sge = *ss->sg_list++; 191f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 1927c2e11feSDennis Dalessandro if (++sge->n >= RVT_SEGSZ) { 193f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 194f931551bSRalph Campbell break; 195f931551bSRalph Campbell sge->n = 0; 196f931551bSRalph Campbell } 197f931551bSRalph Campbell sge->vaddr = 198f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].vaddr; 199f931551bSRalph Campbell sge->length = 200f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].length; 201f931551bSRalph Campbell } 202f931551bSRalph Campbell data += len; 203f931551bSRalph Campbell length -= len; 204f931551bSRalph Campbell } 205f931551bSRalph Campbell } 206f931551bSRalph Campbell 207f931551bSRalph Campbell /** 208f931551bSRalph Campbell * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func 209f931551bSRalph Campbell * @ss: the SGE state 210f931551bSRalph Campbell * @length: the number of bytes to skip 211f931551bSRalph Campbell */ 2127c2e11feSDennis Dalessandro void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release) 213f931551bSRalph Campbell { 2147c2e11feSDennis Dalessandro struct rvt_sge *sge = &ss->sge; 215f931551bSRalph Campbell 216f931551bSRalph Campbell while (length) { 217f931551bSRalph Campbell u32 len = sge->length; 218f931551bSRalph Campbell 219f931551bSRalph Campbell if (len > length) 220f931551bSRalph Campbell len = length; 221f931551bSRalph Campbell if (len > sge->sge_length) 222f931551bSRalph Campbell len = sge->sge_length; 223f931551bSRalph Campbell BUG_ON(len == 0); 224f931551bSRalph Campbell sge->vaddr += len; 225f931551bSRalph Campbell sge->length -= len; 226f931551bSRalph Campbell sge->sge_length -= len; 227f931551bSRalph Campbell if (sge->sge_length == 0) { 228f931551bSRalph Campbell if (release) 2297c2e11feSDennis Dalessandro rvt_put_mr(sge->mr); 230f931551bSRalph Campbell if (--ss->num_sge) 231f931551bSRalph Campbell *sge = *ss->sg_list++; 232f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 2337c2e11feSDennis Dalessandro if (++sge->n >= RVT_SEGSZ) { 234f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 235f931551bSRalph Campbell break; 236f931551bSRalph Campbell sge->n = 0; 237f931551bSRalph Campbell } 238f931551bSRalph Campbell sge->vaddr = 239f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].vaddr; 240f931551bSRalph Campbell sge->length = 241f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].length; 242f931551bSRalph Campbell } 243f931551bSRalph Campbell length -= len; 244f931551bSRalph Campbell } 245f931551bSRalph Campbell } 246f931551bSRalph Campbell 247f931551bSRalph Campbell /* 248f931551bSRalph Campbell * Count the number of DMA descriptors needed to send length bytes of data. 249f931551bSRalph Campbell * Don't modify the qib_sge_state to get the count. 250f931551bSRalph Campbell * Return zero if any of the segments is not aligned. 251f931551bSRalph Campbell */ 2527c2e11feSDennis Dalessandro static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length) 253f931551bSRalph Campbell { 2547c2e11feSDennis Dalessandro struct rvt_sge *sg_list = ss->sg_list; 2557c2e11feSDennis Dalessandro struct rvt_sge sge = ss->sge; 256f931551bSRalph Campbell u8 num_sge = ss->num_sge; 257f931551bSRalph Campbell u32 ndesc = 1; /* count the header */ 258f931551bSRalph Campbell 259f931551bSRalph Campbell while (length) { 260f931551bSRalph Campbell u32 len = sge.length; 261f931551bSRalph Campbell 262f931551bSRalph Campbell if (len > length) 263f931551bSRalph Campbell len = length; 264f931551bSRalph Campbell if (len > sge.sge_length) 265f931551bSRalph Campbell len = sge.sge_length; 266f931551bSRalph Campbell BUG_ON(len == 0); 267f931551bSRalph Campbell if (((long) sge.vaddr & (sizeof(u32) - 1)) || 268f931551bSRalph Campbell (len != length && (len & (sizeof(u32) - 1)))) { 269f931551bSRalph Campbell ndesc = 0; 270f931551bSRalph Campbell break; 271f931551bSRalph Campbell } 272f931551bSRalph Campbell ndesc++; 273f931551bSRalph Campbell sge.vaddr += len; 274f931551bSRalph Campbell sge.length -= len; 275f931551bSRalph Campbell sge.sge_length -= len; 276f931551bSRalph Campbell if (sge.sge_length == 0) { 277f931551bSRalph Campbell if (--num_sge) 278f931551bSRalph Campbell sge = *sg_list++; 279f931551bSRalph Campbell } else if (sge.length == 0 && sge.mr->lkey) { 2807c2e11feSDennis Dalessandro if (++sge.n >= RVT_SEGSZ) { 281f931551bSRalph Campbell if (++sge.m >= sge.mr->mapsz) 282f931551bSRalph Campbell break; 283f931551bSRalph Campbell sge.n = 0; 284f931551bSRalph Campbell } 285f931551bSRalph Campbell sge.vaddr = 286f931551bSRalph Campbell sge.mr->map[sge.m]->segs[sge.n].vaddr; 287f931551bSRalph Campbell sge.length = 288f931551bSRalph Campbell sge.mr->map[sge.m]->segs[sge.n].length; 289f931551bSRalph Campbell } 290f931551bSRalph Campbell length -= len; 291f931551bSRalph Campbell } 292f931551bSRalph Campbell return ndesc; 293f931551bSRalph Campbell } 294f931551bSRalph Campbell 295f931551bSRalph Campbell /* 296f931551bSRalph Campbell * Copy from the SGEs to the data buffer. 297f931551bSRalph Campbell */ 2987c2e11feSDennis Dalessandro static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length) 299f931551bSRalph Campbell { 3007c2e11feSDennis Dalessandro struct rvt_sge *sge = &ss->sge; 301f931551bSRalph Campbell 302f931551bSRalph Campbell while (length) { 303f931551bSRalph Campbell u32 len = sge->length; 304f931551bSRalph Campbell 305f931551bSRalph Campbell if (len > length) 306f931551bSRalph Campbell len = length; 307f931551bSRalph Campbell if (len > sge->sge_length) 308f931551bSRalph Campbell len = sge->sge_length; 309f931551bSRalph Campbell BUG_ON(len == 0); 310f931551bSRalph Campbell memcpy(data, sge->vaddr, len); 311f931551bSRalph Campbell sge->vaddr += len; 312f931551bSRalph Campbell sge->length -= len; 313f931551bSRalph Campbell sge->sge_length -= len; 314f931551bSRalph Campbell if (sge->sge_length == 0) { 315f931551bSRalph Campbell if (--ss->num_sge) 316f931551bSRalph Campbell *sge = *ss->sg_list++; 317f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 3187c2e11feSDennis Dalessandro if (++sge->n >= RVT_SEGSZ) { 319f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 320f931551bSRalph Campbell break; 321f931551bSRalph Campbell sge->n = 0; 322f931551bSRalph Campbell } 323f931551bSRalph Campbell sge->vaddr = 324f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].vaddr; 325f931551bSRalph Campbell sge->length = 326f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].length; 327f931551bSRalph Campbell } 328f931551bSRalph Campbell data += len; 329f931551bSRalph Campbell length -= len; 330f931551bSRalph Campbell } 331f931551bSRalph Campbell } 332f931551bSRalph Campbell 333f931551bSRalph Campbell /** 334f931551bSRalph Campbell * qib_post_one_send - post one RC, UC, or UD send work request 335f931551bSRalph Campbell * @qp: the QP to post on 336f931551bSRalph Campbell * @wr: the work request to send 337f931551bSRalph Campbell */ 3387c2e11feSDennis Dalessandro static int qib_post_one_send(struct rvt_qp *qp, struct ib_send_wr *wr, 339551ace12SMike Marciniszyn int *scheduled) 340f931551bSRalph Campbell { 3417c2e11feSDennis Dalessandro struct rvt_swqe *wqe; 342f931551bSRalph Campbell u32 next; 343f931551bSRalph Campbell int i; 344f931551bSRalph Campbell int j; 345f931551bSRalph Campbell int acc; 346f931551bSRalph Campbell int ret; 347f931551bSRalph Campbell unsigned long flags; 3487c2e11feSDennis Dalessandro struct rvt_lkey_table *rkt; 349f44728d6SDennis Dalessandro struct rvt_pd *pd; 350967bcfc0SMike Marciniszyn int avoid_schedule = 0; 351f931551bSRalph Campbell 352f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 353f931551bSRalph Campbell 354f931551bSRalph Campbell /* Check that state is OK to post send. */ 355f931551bSRalph Campbell if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) 356f931551bSRalph Campbell goto bail_inval; 357f931551bSRalph Campbell 358f931551bSRalph Campbell /* IB spec says that num_sge == 0 is OK. */ 359f931551bSRalph Campbell if (wr->num_sge > qp->s_max_sge) 360f931551bSRalph Campbell goto bail_inval; 361f931551bSRalph Campbell 362f931551bSRalph Campbell /* 363f931551bSRalph Campbell * Don't allow RDMA reads or atomic operations on UC or 364f931551bSRalph Campbell * undefined operations. 365f931551bSRalph Campbell * Make sure buffer is large enough to hold the result for atomics. 366f931551bSRalph Campbell */ 3677c2e11feSDennis Dalessandro if (qp->ibqp.qp_type == IB_QPT_UC) { 368f931551bSRalph Campbell if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) 369f931551bSRalph Campbell goto bail_inval; 370f931551bSRalph Campbell } else if (qp->ibqp.qp_type != IB_QPT_RC) { 371f931551bSRalph Campbell /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ 372f931551bSRalph Campbell if (wr->opcode != IB_WR_SEND && 373f931551bSRalph Campbell wr->opcode != IB_WR_SEND_WITH_IMM) 374f931551bSRalph Campbell goto bail_inval; 375f931551bSRalph Campbell /* Check UD destination address PD */ 376e622f2f4SChristoph Hellwig if (qp->ibqp.pd != ud_wr(wr)->ah->pd) 377f931551bSRalph Campbell goto bail_inval; 378f931551bSRalph Campbell } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) 379f931551bSRalph Campbell goto bail_inval; 380f931551bSRalph Campbell else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && 381f931551bSRalph Campbell (wr->num_sge == 0 || 382f931551bSRalph Campbell wr->sg_list[0].length < sizeof(u64) || 383f931551bSRalph Campbell wr->sg_list[0].addr & (sizeof(u64) - 1))) 384f931551bSRalph Campbell goto bail_inval; 385f931551bSRalph Campbell else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) 386f931551bSRalph Campbell goto bail_inval; 387f931551bSRalph Campbell 388f931551bSRalph Campbell next = qp->s_head + 1; 389f931551bSRalph Campbell if (next >= qp->s_size) 390f931551bSRalph Campbell next = 0; 391f931551bSRalph Campbell if (next == qp->s_last) { 392f931551bSRalph Campbell ret = -ENOMEM; 393f931551bSRalph Campbell goto bail; 394f931551bSRalph Campbell } 395f931551bSRalph Campbell 3967c2e11feSDennis Dalessandro rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table; 397f44728d6SDennis Dalessandro pd = ibpd_to_rvtpd(qp->ibqp.pd); 398f931551bSRalph Campbell wqe = get_swqe_ptr(qp, qp->s_head); 399e622f2f4SChristoph Hellwig 400e622f2f4SChristoph Hellwig if (qp->ibqp.qp_type != IB_QPT_UC && 401e622f2f4SChristoph Hellwig qp->ibqp.qp_type != IB_QPT_RC) 402e622f2f4SChristoph Hellwig memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); 40338071a46SSagi Grimberg else if (wr->opcode == IB_WR_REG_MR) 40438071a46SSagi Grimberg memcpy(&wqe->reg_wr, reg_wr(wr), 40538071a46SSagi Grimberg sizeof(wqe->reg_wr)); 406e622f2f4SChristoph Hellwig else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 407e622f2f4SChristoph Hellwig wr->opcode == IB_WR_RDMA_WRITE || 408e622f2f4SChristoph Hellwig wr->opcode == IB_WR_RDMA_READ) 409e622f2f4SChristoph Hellwig memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); 410e622f2f4SChristoph Hellwig else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 411e622f2f4SChristoph Hellwig wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 412e622f2f4SChristoph Hellwig memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); 413e622f2f4SChristoph Hellwig else 414e622f2f4SChristoph Hellwig memcpy(&wqe->wr, wr, sizeof(wqe->wr)); 415e622f2f4SChristoph Hellwig 416f931551bSRalph Campbell wqe->length = 0; 417f931551bSRalph Campbell j = 0; 418f931551bSRalph Campbell if (wr->num_sge) { 419f931551bSRalph Campbell acc = wr->opcode >= IB_WR_RDMA_READ ? 420f931551bSRalph Campbell IB_ACCESS_LOCAL_WRITE : 0; 421f931551bSRalph Campbell for (i = 0; i < wr->num_sge; i++) { 422f931551bSRalph Campbell u32 length = wr->sg_list[i].length; 423f931551bSRalph Campbell int ok; 424f931551bSRalph Campbell 425f931551bSRalph Campbell if (length == 0) 426f931551bSRalph Campbell continue; 4277c2e11feSDennis Dalessandro ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], 428f931551bSRalph Campbell &wr->sg_list[i], acc); 429f931551bSRalph Campbell if (!ok) 430f931551bSRalph Campbell goto bail_inval_free; 431f931551bSRalph Campbell wqe->length += length; 432f931551bSRalph Campbell j++; 433f931551bSRalph Campbell } 434f931551bSRalph Campbell wqe->wr.num_sge = j; 435f931551bSRalph Campbell } 436f931551bSRalph Campbell if (qp->ibqp.qp_type == IB_QPT_UC || 437f931551bSRalph Campbell qp->ibqp.qp_type == IB_QPT_RC) { 438f931551bSRalph Campbell if (wqe->length > 0x80000000U) 439f931551bSRalph Campbell goto bail_inval_free; 440967bcfc0SMike Marciniszyn if (wqe->length <= qp->pmtu) 441967bcfc0SMike Marciniszyn avoid_schedule = 1; 442f931551bSRalph Campbell } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + 443967bcfc0SMike Marciniszyn qp->port_num - 1)->ibmtu) { 444f931551bSRalph Campbell goto bail_inval_free; 445967bcfc0SMike Marciniszyn } else { 44696ab1ac1SDennis Dalessandro atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); 447967bcfc0SMike Marciniszyn avoid_schedule = 1; 448967bcfc0SMike Marciniszyn } 449f931551bSRalph Campbell wqe->ssn = qp->s_ssn++; 450f931551bSRalph Campbell qp->s_head = next; 451f931551bSRalph Campbell 452f931551bSRalph Campbell ret = 0; 453f931551bSRalph Campbell goto bail; 454f931551bSRalph Campbell 455f931551bSRalph Campbell bail_inval_free: 456f931551bSRalph Campbell while (j) { 4577c2e11feSDennis Dalessandro struct rvt_sge *sge = &wqe->sg_list[--j]; 458f931551bSRalph Campbell 4597c2e11feSDennis Dalessandro rvt_put_mr(sge->mr); 460f931551bSRalph Campbell } 461f931551bSRalph Campbell bail_inval: 462f931551bSRalph Campbell ret = -EINVAL; 463f931551bSRalph Campbell bail: 464967bcfc0SMike Marciniszyn if (!ret && !wr->next && !avoid_schedule && 465551ace12SMike Marciniszyn !qib_sdma_empty( 466551ace12SMike Marciniszyn dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) { 467551ace12SMike Marciniszyn qib_schedule_send(qp); 468551ace12SMike Marciniszyn *scheduled = 1; 469551ace12SMike Marciniszyn } 470f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 471f931551bSRalph Campbell return ret; 472f931551bSRalph Campbell } 473f931551bSRalph Campbell 474f931551bSRalph Campbell /** 475f931551bSRalph Campbell * qib_post_send - post a send on a QP 476f931551bSRalph Campbell * @ibqp: the QP to post the send on 477f931551bSRalph Campbell * @wr: the list of work requests to post 478f931551bSRalph Campbell * @bad_wr: the first bad WR is put here 479f931551bSRalph Campbell * 480f931551bSRalph Campbell * This may be called from interrupt context. 481f931551bSRalph Campbell */ 482f931551bSRalph Campbell static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 483f931551bSRalph Campbell struct ib_send_wr **bad_wr) 484f931551bSRalph Campbell { 4857c2e11feSDennis Dalessandro struct rvt_qp *qp = to_iqp(ibqp); 486ffc26907SDennis Dalessandro struct qib_qp_priv *priv = qp->priv; 487f931551bSRalph Campbell int err = 0; 488551ace12SMike Marciniszyn int scheduled = 0; 489f931551bSRalph Campbell 490f931551bSRalph Campbell for (; wr; wr = wr->next) { 491551ace12SMike Marciniszyn err = qib_post_one_send(qp, wr, &scheduled); 492f931551bSRalph Campbell if (err) { 493f931551bSRalph Campbell *bad_wr = wr; 494f931551bSRalph Campbell goto bail; 495f931551bSRalph Campbell } 496f931551bSRalph Campbell } 497f931551bSRalph Campbell 498f931551bSRalph Campbell /* Try to do the send work in the caller's context. */ 499551ace12SMike Marciniszyn if (!scheduled) 500ffc26907SDennis Dalessandro qib_do_send(&priv->s_work); 501f931551bSRalph Campbell 502f931551bSRalph Campbell bail: 503f931551bSRalph Campbell return err; 504f931551bSRalph Campbell } 505f931551bSRalph Campbell 506f931551bSRalph Campbell /** 507f931551bSRalph Campbell * qib_post_receive - post a receive on a QP 508f931551bSRalph Campbell * @ibqp: the QP to post the receive on 509f931551bSRalph Campbell * @wr: the WR to post 510f931551bSRalph Campbell * @bad_wr: the first bad WR is put here 511f931551bSRalph Campbell * 512f931551bSRalph Campbell * This may be called from interrupt context. 513f931551bSRalph Campbell */ 514f931551bSRalph Campbell static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 515f931551bSRalph Campbell struct ib_recv_wr **bad_wr) 516f931551bSRalph Campbell { 5177c2e11feSDennis Dalessandro struct rvt_qp *qp = to_iqp(ibqp); 5187c2e11feSDennis Dalessandro struct rvt_rwq *wq = qp->r_rq.wq; 519f931551bSRalph Campbell unsigned long flags; 520f931551bSRalph Campbell int ret; 521f931551bSRalph Campbell 522f931551bSRalph Campbell /* Check that state is OK to post receive. */ 523f931551bSRalph Campbell if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) { 524f931551bSRalph Campbell *bad_wr = wr; 525f931551bSRalph Campbell ret = -EINVAL; 526f931551bSRalph Campbell goto bail; 527f931551bSRalph Campbell } 528f931551bSRalph Campbell 529f931551bSRalph Campbell for (; wr; wr = wr->next) { 5307c2e11feSDennis Dalessandro struct rvt_rwqe *wqe; 531f931551bSRalph Campbell u32 next; 532f931551bSRalph Campbell int i; 533f931551bSRalph Campbell 534f931551bSRalph Campbell if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { 535f931551bSRalph Campbell *bad_wr = wr; 536f931551bSRalph Campbell ret = -EINVAL; 537f931551bSRalph Campbell goto bail; 538f931551bSRalph Campbell } 539f931551bSRalph Campbell 540f931551bSRalph Campbell spin_lock_irqsave(&qp->r_rq.lock, flags); 541f931551bSRalph Campbell next = wq->head + 1; 542f931551bSRalph Campbell if (next >= qp->r_rq.size) 543f931551bSRalph Campbell next = 0; 544f931551bSRalph Campbell if (next == wq->tail) { 545f931551bSRalph Campbell spin_unlock_irqrestore(&qp->r_rq.lock, flags); 546f931551bSRalph Campbell *bad_wr = wr; 547f931551bSRalph Campbell ret = -ENOMEM; 548f931551bSRalph Campbell goto bail; 549f931551bSRalph Campbell } 550f931551bSRalph Campbell 551f931551bSRalph Campbell wqe = get_rwqe_ptr(&qp->r_rq, wq->head); 552f931551bSRalph Campbell wqe->wr_id = wr->wr_id; 553f931551bSRalph Campbell wqe->num_sge = wr->num_sge; 554f931551bSRalph Campbell for (i = 0; i < wr->num_sge; i++) 555f931551bSRalph Campbell wqe->sg_list[i] = wr->sg_list[i]; 556f931551bSRalph Campbell /* Make sure queue entry is written before the head index. */ 557f931551bSRalph Campbell smp_wmb(); 558f931551bSRalph Campbell wq->head = next; 559f931551bSRalph Campbell spin_unlock_irqrestore(&qp->r_rq.lock, flags); 560f931551bSRalph Campbell } 561f931551bSRalph Campbell ret = 0; 562f931551bSRalph Campbell 563f931551bSRalph Campbell bail: 564f931551bSRalph Campbell return ret; 565f931551bSRalph Campbell } 566f931551bSRalph Campbell 567f931551bSRalph Campbell /** 568f931551bSRalph Campbell * qib_qp_rcv - processing an incoming packet on a QP 569f931551bSRalph Campbell * @rcd: the context pointer 570f931551bSRalph Campbell * @hdr: the packet header 571f931551bSRalph Campbell * @has_grh: true if the packet has a GRH 572f931551bSRalph Campbell * @data: the packet data 573f931551bSRalph Campbell * @tlen: the packet length 574f931551bSRalph Campbell * @qp: the QP the packet came on 575f931551bSRalph Campbell * 576f931551bSRalph Campbell * This is called from qib_ib_rcv() to process an incoming packet 577f931551bSRalph Campbell * for the given QP. 578f931551bSRalph Campbell * Called at interrupt level. 579f931551bSRalph Campbell */ 580f931551bSRalph Campbell static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, 5817c2e11feSDennis Dalessandro int has_grh, void *data, u32 tlen, struct rvt_qp *qp) 582f931551bSRalph Campbell { 583f931551bSRalph Campbell struct qib_ibport *ibp = &rcd->ppd->ibport_data; 584f931551bSRalph Campbell 585a5210c12SRalph Campbell spin_lock(&qp->r_lock); 586a5210c12SRalph Campbell 587f931551bSRalph Campbell /* Check for valid receive state. */ 588f931551bSRalph Campbell if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { 589f24a6d48SHarish Chegondi ibp->rvp.n_pkt_drops++; 590a5210c12SRalph Campbell goto unlock; 591f931551bSRalph Campbell } 592f931551bSRalph Campbell 593f931551bSRalph Campbell switch (qp->ibqp.qp_type) { 594f931551bSRalph Campbell case IB_QPT_SMI: 595f931551bSRalph Campbell case IB_QPT_GSI: 596f931551bSRalph Campbell if (ib_qib_disable_sma) 597f931551bSRalph Campbell break; 598f931551bSRalph Campbell /* FALLTHROUGH */ 599f931551bSRalph Campbell case IB_QPT_UD: 600f931551bSRalph Campbell qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); 601f931551bSRalph Campbell break; 602f931551bSRalph Campbell 603f931551bSRalph Campbell case IB_QPT_RC: 604f931551bSRalph Campbell qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp); 605f931551bSRalph Campbell break; 606f931551bSRalph Campbell 607f931551bSRalph Campbell case IB_QPT_UC: 608f931551bSRalph Campbell qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp); 609f931551bSRalph Campbell break; 610f931551bSRalph Campbell 611f931551bSRalph Campbell default: 612f931551bSRalph Campbell break; 613f931551bSRalph Campbell } 614a5210c12SRalph Campbell 615a5210c12SRalph Campbell unlock: 616a5210c12SRalph Campbell spin_unlock(&qp->r_lock); 617f931551bSRalph Campbell } 618f931551bSRalph Campbell 619f931551bSRalph Campbell /** 620f931551bSRalph Campbell * qib_ib_rcv - process an incoming packet 621f931551bSRalph Campbell * @rcd: the context pointer 622f931551bSRalph Campbell * @rhdr: the header of the packet 623f931551bSRalph Campbell * @data: the packet payload 624f931551bSRalph Campbell * @tlen: the packet length 625f931551bSRalph Campbell * 626f931551bSRalph Campbell * This is called from qib_kreceive() to process an incoming packet at 627f931551bSRalph Campbell * interrupt level. Tlen is the length of the header + data + CRC in bytes. 628f931551bSRalph Campbell */ 629f931551bSRalph Campbell void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) 630f931551bSRalph Campbell { 631f931551bSRalph Campbell struct qib_pportdata *ppd = rcd->ppd; 632f931551bSRalph Campbell struct qib_ibport *ibp = &ppd->ibport_data; 633f931551bSRalph Campbell struct qib_ib_header *hdr = rhdr; 634f931551bSRalph Campbell struct qib_other_headers *ohdr; 6357c2e11feSDennis Dalessandro struct rvt_qp *qp; 636f931551bSRalph Campbell u32 qp_num; 637f931551bSRalph Campbell int lnh; 638f931551bSRalph Campbell u8 opcode; 639f931551bSRalph Campbell u16 lid; 640f931551bSRalph Campbell 641f931551bSRalph Campbell /* 24 == LRH+BTH+CRC */ 642f931551bSRalph Campbell if (unlikely(tlen < 24)) 643f931551bSRalph Campbell goto drop; 644f931551bSRalph Campbell 645f931551bSRalph Campbell /* Check for a valid destination LID (see ch. 7.11.1). */ 646f931551bSRalph Campbell lid = be16_to_cpu(hdr->lrh[1]); 6479ff198f5SDennis Dalessandro if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) { 648f931551bSRalph Campbell lid &= ~((1 << ppd->lmc) - 1); 649f931551bSRalph Campbell if (unlikely(lid != ppd->lid)) 650f931551bSRalph Campbell goto drop; 651f931551bSRalph Campbell } 652f931551bSRalph Campbell 653f931551bSRalph Campbell /* Check for GRH */ 654f931551bSRalph Campbell lnh = be16_to_cpu(hdr->lrh[0]) & 3; 655f931551bSRalph Campbell if (lnh == QIB_LRH_BTH) 656f931551bSRalph Campbell ohdr = &hdr->u.oth; 657f931551bSRalph Campbell else if (lnh == QIB_LRH_GRH) { 658f931551bSRalph Campbell u32 vtf; 659f931551bSRalph Campbell 660f931551bSRalph Campbell ohdr = &hdr->u.l.oth; 661f931551bSRalph Campbell if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) 662f931551bSRalph Campbell goto drop; 663f931551bSRalph Campbell vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); 664f931551bSRalph Campbell if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 665f931551bSRalph Campbell goto drop; 666f931551bSRalph Campbell } else 667f931551bSRalph Campbell goto drop; 668f931551bSRalph Campbell 669ddb88765SMike Marciniszyn opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f; 670ddb88765SMike Marciniszyn #ifdef CONFIG_DEBUG_FS 671ddb88765SMike Marciniszyn rcd->opstats->stats[opcode].n_bytes += tlen; 672ddb88765SMike Marciniszyn rcd->opstats->stats[opcode].n_packets++; 673ddb88765SMike Marciniszyn #endif 674f931551bSRalph Campbell 675f931551bSRalph Campbell /* Get the destination QP number. */ 676f931551bSRalph Campbell qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; 677f931551bSRalph Campbell if (qp_num == QIB_MULTICAST_QPN) { 678f931551bSRalph Campbell struct qib_mcast *mcast; 679f931551bSRalph Campbell struct qib_mcast_qp *p; 680f931551bSRalph Campbell 681f931551bSRalph Campbell if (lnh != QIB_LRH_GRH) 682f931551bSRalph Campbell goto drop; 683f931551bSRalph Campbell mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); 684f931551bSRalph Campbell if (mcast == NULL) 685f931551bSRalph Campbell goto drop; 6867d7632adSMike Marciniszyn this_cpu_inc(ibp->pmastats->n_multicast_rcv); 687f931551bSRalph Campbell list_for_each_entry_rcu(p, &mcast->qp_list, list) 688f931551bSRalph Campbell qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); 689f931551bSRalph Campbell /* 690f931551bSRalph Campbell * Notify qib_multicast_detach() if it is waiting for us 691f931551bSRalph Campbell * to finish. 692f931551bSRalph Campbell */ 693f931551bSRalph Campbell if (atomic_dec_return(&mcast->refcount) <= 1) 694f931551bSRalph Campbell wake_up(&mcast->wait); 695f931551bSRalph Campbell } else { 696af061a64SMike Marciniszyn if (rcd->lookaside_qp) { 697af061a64SMike Marciniszyn if (rcd->lookaside_qpn != qp_num) { 698af061a64SMike Marciniszyn if (atomic_dec_and_test( 699af061a64SMike Marciniszyn &rcd->lookaside_qp->refcount)) 700af061a64SMike Marciniszyn wake_up( 701af061a64SMike Marciniszyn &rcd->lookaside_qp->wait); 702af061a64SMike Marciniszyn rcd->lookaside_qp = NULL; 703af061a64SMike Marciniszyn } 704af061a64SMike Marciniszyn } 705af061a64SMike Marciniszyn if (!rcd->lookaside_qp) { 706f931551bSRalph Campbell qp = qib_lookup_qpn(ibp, qp_num); 707f931551bSRalph Campbell if (!qp) 708f931551bSRalph Campbell goto drop; 709af061a64SMike Marciniszyn rcd->lookaside_qp = qp; 710af061a64SMike Marciniszyn rcd->lookaside_qpn = qp_num; 711af061a64SMike Marciniszyn } else 712af061a64SMike Marciniszyn qp = rcd->lookaside_qp; 7137d7632adSMike Marciniszyn this_cpu_inc(ibp->pmastats->n_unicast_rcv); 714f931551bSRalph Campbell qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); 715f931551bSRalph Campbell } 716f931551bSRalph Campbell return; 717f931551bSRalph Campbell 718f931551bSRalph Campbell drop: 719f24a6d48SHarish Chegondi ibp->rvp.n_pkt_drops++; 720f931551bSRalph Campbell } 721f931551bSRalph Campbell 722f931551bSRalph Campbell /* 723f931551bSRalph Campbell * This is called from a timer to check for QPs 724f931551bSRalph Campbell * which need kernel memory in order to send a packet. 725f931551bSRalph Campbell */ 726f931551bSRalph Campbell static void mem_timer(unsigned long data) 727f931551bSRalph Campbell { 728f931551bSRalph Campbell struct qib_ibdev *dev = (struct qib_ibdev *) data; 729f931551bSRalph Campbell struct list_head *list = &dev->memwait; 7307c2e11feSDennis Dalessandro struct rvt_qp *qp = NULL; 731ffc26907SDennis Dalessandro struct qib_qp_priv *priv = NULL; 732f931551bSRalph Campbell unsigned long flags; 733f931551bSRalph Campbell 734f931551bSRalph Campbell spin_lock_irqsave(&dev->pending_lock, flags); 735f931551bSRalph Campbell if (!list_empty(list)) { 736ffc26907SDennis Dalessandro priv = list_entry(list->next, struct qib_qp_priv, iowait); 737ffc26907SDennis Dalessandro qp = priv->owner; 738ffc26907SDennis Dalessandro list_del_init(&priv->iowait); 739f931551bSRalph Campbell atomic_inc(&qp->refcount); 740f931551bSRalph Campbell if (!list_empty(list)) 741f931551bSRalph Campbell mod_timer(&dev->mem_timer, jiffies + 1); 742f931551bSRalph Campbell } 743f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 744f931551bSRalph Campbell 745f931551bSRalph Campbell if (qp) { 746f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 747f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_KMEM) { 748f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_KMEM; 749f931551bSRalph Campbell qib_schedule_send(qp); 750f931551bSRalph Campbell } 751f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 752f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 753f931551bSRalph Campbell wake_up(&qp->wait); 754f931551bSRalph Campbell } 755f931551bSRalph Campbell } 756f931551bSRalph Campbell 7577c2e11feSDennis Dalessandro static void update_sge(struct rvt_sge_state *ss, u32 length) 758f931551bSRalph Campbell { 7597c2e11feSDennis Dalessandro struct rvt_sge *sge = &ss->sge; 760f931551bSRalph Campbell 761f931551bSRalph Campbell sge->vaddr += length; 762f931551bSRalph Campbell sge->length -= length; 763f931551bSRalph Campbell sge->sge_length -= length; 764f931551bSRalph Campbell if (sge->sge_length == 0) { 765f931551bSRalph Campbell if (--ss->num_sge) 766f931551bSRalph Campbell *sge = *ss->sg_list++; 767f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 7687c2e11feSDennis Dalessandro if (++sge->n >= RVT_SEGSZ) { 769f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 770f931551bSRalph Campbell return; 771f931551bSRalph Campbell sge->n = 0; 772f931551bSRalph Campbell } 773f931551bSRalph Campbell sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; 774f931551bSRalph Campbell sge->length = sge->mr->map[sge->m]->segs[sge->n].length; 775f931551bSRalph Campbell } 776f931551bSRalph Campbell } 777f931551bSRalph Campbell 778f931551bSRalph Campbell #ifdef __LITTLE_ENDIAN 779f931551bSRalph Campbell static inline u32 get_upper_bits(u32 data, u32 shift) 780f931551bSRalph Campbell { 781f931551bSRalph Campbell return data >> shift; 782f931551bSRalph Campbell } 783f931551bSRalph Campbell 784f931551bSRalph Campbell static inline u32 set_upper_bits(u32 data, u32 shift) 785f931551bSRalph Campbell { 786f931551bSRalph Campbell return data << shift; 787f931551bSRalph Campbell } 788f931551bSRalph Campbell 789f931551bSRalph Campbell static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) 790f931551bSRalph Campbell { 791f931551bSRalph Campbell data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); 792f931551bSRalph Campbell data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); 793f931551bSRalph Campbell return data; 794f931551bSRalph Campbell } 795f931551bSRalph Campbell #else 796f931551bSRalph Campbell static inline u32 get_upper_bits(u32 data, u32 shift) 797f931551bSRalph Campbell { 798f931551bSRalph Campbell return data << shift; 799f931551bSRalph Campbell } 800f931551bSRalph Campbell 801f931551bSRalph Campbell static inline u32 set_upper_bits(u32 data, u32 shift) 802f931551bSRalph Campbell { 803f931551bSRalph Campbell return data >> shift; 804f931551bSRalph Campbell } 805f931551bSRalph Campbell 806f931551bSRalph Campbell static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) 807f931551bSRalph Campbell { 808f931551bSRalph Campbell data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); 809f931551bSRalph Campbell data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); 810f931551bSRalph Campbell return data; 811f931551bSRalph Campbell } 812f931551bSRalph Campbell #endif 813f931551bSRalph Campbell 8147c2e11feSDennis Dalessandro static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss, 815f931551bSRalph Campbell u32 length, unsigned flush_wc) 816f931551bSRalph Campbell { 817f931551bSRalph Campbell u32 extra = 0; 818f931551bSRalph Campbell u32 data = 0; 819f931551bSRalph Campbell u32 last; 820f931551bSRalph Campbell 821f931551bSRalph Campbell while (1) { 822f931551bSRalph Campbell u32 len = ss->sge.length; 823f931551bSRalph Campbell u32 off; 824f931551bSRalph Campbell 825f931551bSRalph Campbell if (len > length) 826f931551bSRalph Campbell len = length; 827f931551bSRalph Campbell if (len > ss->sge.sge_length) 828f931551bSRalph Campbell len = ss->sge.sge_length; 829f931551bSRalph Campbell BUG_ON(len == 0); 830f931551bSRalph Campbell /* If the source address is not aligned, try to align it. */ 831f931551bSRalph Campbell off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); 832f931551bSRalph Campbell if (off) { 833f931551bSRalph Campbell u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & 834f931551bSRalph Campbell ~(sizeof(u32) - 1)); 835f931551bSRalph Campbell u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); 836f931551bSRalph Campbell u32 y; 837f931551bSRalph Campbell 838f931551bSRalph Campbell y = sizeof(u32) - off; 839f931551bSRalph Campbell if (len > y) 840f931551bSRalph Campbell len = y; 841f931551bSRalph Campbell if (len + extra >= sizeof(u32)) { 842f931551bSRalph Campbell data |= set_upper_bits(v, extra * 843f931551bSRalph Campbell BITS_PER_BYTE); 844f931551bSRalph Campbell len = sizeof(u32) - extra; 845f931551bSRalph Campbell if (len == length) { 846f931551bSRalph Campbell last = data; 847f931551bSRalph Campbell break; 848f931551bSRalph Campbell } 849f931551bSRalph Campbell __raw_writel(data, piobuf); 850f931551bSRalph Campbell piobuf++; 851f931551bSRalph Campbell extra = 0; 852f931551bSRalph Campbell data = 0; 853f931551bSRalph Campbell } else { 854f931551bSRalph Campbell /* Clear unused upper bytes */ 855f931551bSRalph Campbell data |= clear_upper_bytes(v, len, extra); 856f931551bSRalph Campbell if (len == length) { 857f931551bSRalph Campbell last = data; 858f931551bSRalph Campbell break; 859f931551bSRalph Campbell } 860f931551bSRalph Campbell extra += len; 861f931551bSRalph Campbell } 862f931551bSRalph Campbell } else if (extra) { 863f931551bSRalph Campbell /* Source address is aligned. */ 864f931551bSRalph Campbell u32 *addr = (u32 *) ss->sge.vaddr; 865f931551bSRalph Campbell int shift = extra * BITS_PER_BYTE; 866f931551bSRalph Campbell int ushift = 32 - shift; 867f931551bSRalph Campbell u32 l = len; 868f931551bSRalph Campbell 869f931551bSRalph Campbell while (l >= sizeof(u32)) { 870f931551bSRalph Campbell u32 v = *addr; 871f931551bSRalph Campbell 872f931551bSRalph Campbell data |= set_upper_bits(v, shift); 873f931551bSRalph Campbell __raw_writel(data, piobuf); 874f931551bSRalph Campbell data = get_upper_bits(v, ushift); 875f931551bSRalph Campbell piobuf++; 876f931551bSRalph Campbell addr++; 877f931551bSRalph Campbell l -= sizeof(u32); 878f931551bSRalph Campbell } 879f931551bSRalph Campbell /* 880f931551bSRalph Campbell * We still have 'extra' number of bytes leftover. 881f931551bSRalph Campbell */ 882f931551bSRalph Campbell if (l) { 883f931551bSRalph Campbell u32 v = *addr; 884f931551bSRalph Campbell 885f931551bSRalph Campbell if (l + extra >= sizeof(u32)) { 886f931551bSRalph Campbell data |= set_upper_bits(v, shift); 887f931551bSRalph Campbell len -= l + extra - sizeof(u32); 888f931551bSRalph Campbell if (len == length) { 889f931551bSRalph Campbell last = data; 890f931551bSRalph Campbell break; 891f931551bSRalph Campbell } 892f931551bSRalph Campbell __raw_writel(data, piobuf); 893f931551bSRalph Campbell piobuf++; 894f931551bSRalph Campbell extra = 0; 895f931551bSRalph Campbell data = 0; 896f931551bSRalph Campbell } else { 897f931551bSRalph Campbell /* Clear unused upper bytes */ 898f931551bSRalph Campbell data |= clear_upper_bytes(v, l, extra); 899f931551bSRalph Campbell if (len == length) { 900f931551bSRalph Campbell last = data; 901f931551bSRalph Campbell break; 902f931551bSRalph Campbell } 903f931551bSRalph Campbell extra += l; 904f931551bSRalph Campbell } 905f931551bSRalph Campbell } else if (len == length) { 906f931551bSRalph Campbell last = data; 907f931551bSRalph Campbell break; 908f931551bSRalph Campbell } 909f931551bSRalph Campbell } else if (len == length) { 910f931551bSRalph Campbell u32 w; 911f931551bSRalph Campbell 912f931551bSRalph Campbell /* 913f931551bSRalph Campbell * Need to round up for the last dword in the 914f931551bSRalph Campbell * packet. 915f931551bSRalph Campbell */ 916f931551bSRalph Campbell w = (len + 3) >> 2; 917f931551bSRalph Campbell qib_pio_copy(piobuf, ss->sge.vaddr, w - 1); 918f931551bSRalph Campbell piobuf += w - 1; 919f931551bSRalph Campbell last = ((u32 *) ss->sge.vaddr)[w - 1]; 920f931551bSRalph Campbell break; 921f931551bSRalph Campbell } else { 922f931551bSRalph Campbell u32 w = len >> 2; 923f931551bSRalph Campbell 924f931551bSRalph Campbell qib_pio_copy(piobuf, ss->sge.vaddr, w); 925f931551bSRalph Campbell piobuf += w; 926f931551bSRalph Campbell 927f931551bSRalph Campbell extra = len & (sizeof(u32) - 1); 928f931551bSRalph Campbell if (extra) { 929f931551bSRalph Campbell u32 v = ((u32 *) ss->sge.vaddr)[w]; 930f931551bSRalph Campbell 931f931551bSRalph Campbell /* Clear unused upper bytes */ 932f931551bSRalph Campbell data = clear_upper_bytes(v, extra, 0); 933f931551bSRalph Campbell } 934f931551bSRalph Campbell } 935f931551bSRalph Campbell update_sge(ss, len); 936f931551bSRalph Campbell length -= len; 937f931551bSRalph Campbell } 938f931551bSRalph Campbell /* Update address before sending packet. */ 939f931551bSRalph Campbell update_sge(ss, length); 940f931551bSRalph Campbell if (flush_wc) { 941f931551bSRalph Campbell /* must flush early everything before trigger word */ 942f931551bSRalph Campbell qib_flush_wc(); 943f931551bSRalph Campbell __raw_writel(last, piobuf); 944f931551bSRalph Campbell /* be sure trigger word is written */ 945f931551bSRalph Campbell qib_flush_wc(); 946f931551bSRalph Campbell } else 947f931551bSRalph Campbell __raw_writel(last, piobuf); 948f931551bSRalph Campbell } 949f931551bSRalph Campbell 95048947109SMike Marciniszyn static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev, 9517c2e11feSDennis Dalessandro struct rvt_qp *qp) 952f931551bSRalph Campbell { 953ffc26907SDennis Dalessandro struct qib_qp_priv *priv = qp->priv; 954f931551bSRalph Campbell struct qib_verbs_txreq *tx; 955f931551bSRalph Campbell unsigned long flags; 956f931551bSRalph Campbell 957f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 958f931551bSRalph Campbell spin_lock(&dev->pending_lock); 959f931551bSRalph Campbell 960f931551bSRalph Campbell if (!list_empty(&dev->txreq_free)) { 961f931551bSRalph Campbell struct list_head *l = dev->txreq_free.next; 962f931551bSRalph Campbell 963f931551bSRalph Campbell list_del(l); 96448947109SMike Marciniszyn spin_unlock(&dev->pending_lock); 96548947109SMike Marciniszyn spin_unlock_irqrestore(&qp->s_lock, flags); 966f931551bSRalph Campbell tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 967f931551bSRalph Campbell } else { 968f931551bSRalph Campbell if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && 969ffc26907SDennis Dalessandro list_empty(&priv->iowait)) { 970f931551bSRalph Campbell dev->n_txwait++; 971f931551bSRalph Campbell qp->s_flags |= QIB_S_WAIT_TX; 972ffc26907SDennis Dalessandro list_add_tail(&priv->iowait, &dev->txwait); 973f931551bSRalph Campbell } 974f931551bSRalph Campbell qp->s_flags &= ~QIB_S_BUSY; 975f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 976f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 97748947109SMike Marciniszyn tx = ERR_PTR(-EBUSY); 97848947109SMike Marciniszyn } 97948947109SMike Marciniszyn return tx; 98048947109SMike Marciniszyn } 981f931551bSRalph Campbell 98248947109SMike Marciniszyn static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev, 9837c2e11feSDennis Dalessandro struct rvt_qp *qp) 98448947109SMike Marciniszyn { 98548947109SMike Marciniszyn struct qib_verbs_txreq *tx; 98648947109SMike Marciniszyn unsigned long flags; 98748947109SMike Marciniszyn 98848947109SMike Marciniszyn spin_lock_irqsave(&dev->pending_lock, flags); 98948947109SMike Marciniszyn /* assume the list non empty */ 99048947109SMike Marciniszyn if (likely(!list_empty(&dev->txreq_free))) { 99148947109SMike Marciniszyn struct list_head *l = dev->txreq_free.next; 99248947109SMike Marciniszyn 99348947109SMike Marciniszyn list_del(l); 99448947109SMike Marciniszyn spin_unlock_irqrestore(&dev->pending_lock, flags); 99548947109SMike Marciniszyn tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 99648947109SMike Marciniszyn } else { 99748947109SMike Marciniszyn /* call slow path to get the extra lock */ 99848947109SMike Marciniszyn spin_unlock_irqrestore(&dev->pending_lock, flags); 99948947109SMike Marciniszyn tx = __get_txreq(dev, qp); 100048947109SMike Marciniszyn } 1001f931551bSRalph Campbell return tx; 1002f931551bSRalph Campbell } 1003f931551bSRalph Campbell 1004f931551bSRalph Campbell void qib_put_txreq(struct qib_verbs_txreq *tx) 1005f931551bSRalph Campbell { 1006f931551bSRalph Campbell struct qib_ibdev *dev; 10077c2e11feSDennis Dalessandro struct rvt_qp *qp; 1008ffc26907SDennis Dalessandro struct qib_qp_priv *priv; 1009f931551bSRalph Campbell unsigned long flags; 1010f931551bSRalph Campbell 1011f931551bSRalph Campbell qp = tx->qp; 1012f931551bSRalph Campbell dev = to_idev(qp->ibqp.device); 1013f931551bSRalph Campbell 1014f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 1015f931551bSRalph Campbell wake_up(&qp->wait); 1016f931551bSRalph Campbell if (tx->mr) { 10177c2e11feSDennis Dalessandro rvt_put_mr(tx->mr); 1018f931551bSRalph Campbell tx->mr = NULL; 1019f931551bSRalph Campbell } 1020f931551bSRalph Campbell if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { 1021f931551bSRalph Campbell tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; 1022f931551bSRalph Campbell dma_unmap_single(&dd_from_dev(dev)->pcidev->dev, 1023f931551bSRalph Campbell tx->txreq.addr, tx->hdr_dwords << 2, 1024f931551bSRalph Campbell DMA_TO_DEVICE); 1025f931551bSRalph Campbell kfree(tx->align_buf); 1026f931551bSRalph Campbell } 1027f931551bSRalph Campbell 1028f931551bSRalph Campbell spin_lock_irqsave(&dev->pending_lock, flags); 1029f931551bSRalph Campbell 1030f931551bSRalph Campbell /* Put struct back on free list */ 1031f931551bSRalph Campbell list_add(&tx->txreq.list, &dev->txreq_free); 1032f931551bSRalph Campbell 1033f931551bSRalph Campbell if (!list_empty(&dev->txwait)) { 1034f931551bSRalph Campbell /* Wake up first QP wanting a free struct */ 1035ffc26907SDennis Dalessandro priv = list_entry(dev->txwait.next, struct qib_qp_priv, 1036ffc26907SDennis Dalessandro iowait); 1037ffc26907SDennis Dalessandro qp = priv->owner; 1038ffc26907SDennis Dalessandro list_del_init(&priv->iowait); 1039f931551bSRalph Campbell atomic_inc(&qp->refcount); 1040f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 1041f931551bSRalph Campbell 1042f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1043f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_TX) { 1044f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_TX; 1045f931551bSRalph Campbell qib_schedule_send(qp); 1046f931551bSRalph Campbell } 1047f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1048f931551bSRalph Campbell 1049f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 1050f931551bSRalph Campbell wake_up(&qp->wait); 1051f931551bSRalph Campbell } else 1052f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 1053f931551bSRalph Campbell } 1054f931551bSRalph Campbell 1055f931551bSRalph Campbell /* 1056f931551bSRalph Campbell * This is called when there are send DMA descriptors that might be 1057f931551bSRalph Campbell * available. 1058f931551bSRalph Campbell * 1059f931551bSRalph Campbell * This is called with ppd->sdma_lock held. 1060f931551bSRalph Campbell */ 1061f931551bSRalph Campbell void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) 1062f931551bSRalph Campbell { 10637c2e11feSDennis Dalessandro struct rvt_qp *qp, *nqp; 1064ffc26907SDennis Dalessandro struct qib_qp_priv *qpp, *nqpp; 10657c2e11feSDennis Dalessandro struct rvt_qp *qps[20]; 1066f931551bSRalph Campbell struct qib_ibdev *dev; 1067f931551bSRalph Campbell unsigned i, n; 1068f931551bSRalph Campbell 1069f931551bSRalph Campbell n = 0; 1070f931551bSRalph Campbell dev = &ppd->dd->verbs_dev; 1071f931551bSRalph Campbell spin_lock(&dev->pending_lock); 1072f931551bSRalph Campbell 1073f931551bSRalph Campbell /* Search wait list for first QP wanting DMA descriptors. */ 1074ffc26907SDennis Dalessandro list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) { 1075ffc26907SDennis Dalessandro qp = qpp->owner; 1076ffc26907SDennis Dalessandro nqp = nqpp->owner; 1077f931551bSRalph Campbell if (qp->port_num != ppd->port) 1078f931551bSRalph Campbell continue; 1079f931551bSRalph Campbell if (n == ARRAY_SIZE(qps)) 1080f931551bSRalph Campbell break; 1081ffc26907SDennis Dalessandro if (qpp->s_tx->txreq.sg_count > avail) 1082f931551bSRalph Campbell break; 1083ffc26907SDennis Dalessandro avail -= qpp->s_tx->txreq.sg_count; 1084ffc26907SDennis Dalessandro list_del_init(&qpp->iowait); 1085f931551bSRalph Campbell atomic_inc(&qp->refcount); 1086f931551bSRalph Campbell qps[n++] = qp; 1087f931551bSRalph Campbell } 1088f931551bSRalph Campbell 1089f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 1090f931551bSRalph Campbell 1091f931551bSRalph Campbell for (i = 0; i < n; i++) { 1092f931551bSRalph Campbell qp = qps[i]; 1093f931551bSRalph Campbell spin_lock(&qp->s_lock); 1094f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { 1095f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; 1096f931551bSRalph Campbell qib_schedule_send(qp); 1097f931551bSRalph Campbell } 1098f931551bSRalph Campbell spin_unlock(&qp->s_lock); 1099f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 1100f931551bSRalph Campbell wake_up(&qp->wait); 1101f931551bSRalph Campbell } 1102f931551bSRalph Campbell } 1103f931551bSRalph Campbell 1104f931551bSRalph Campbell /* 1105f931551bSRalph Campbell * This is called with ppd->sdma_lock held. 1106f931551bSRalph Campbell */ 1107f931551bSRalph Campbell static void sdma_complete(struct qib_sdma_txreq *cookie, int status) 1108f931551bSRalph Campbell { 1109f931551bSRalph Campbell struct qib_verbs_txreq *tx = 1110f931551bSRalph Campbell container_of(cookie, struct qib_verbs_txreq, txreq); 11117c2e11feSDennis Dalessandro struct rvt_qp *qp = tx->qp; 1112ffc26907SDennis Dalessandro struct qib_qp_priv *priv = qp->priv; 1113f931551bSRalph Campbell 1114f931551bSRalph Campbell spin_lock(&qp->s_lock); 1115f931551bSRalph Campbell if (tx->wqe) 1116f931551bSRalph Campbell qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); 1117f931551bSRalph Campbell else if (qp->ibqp.qp_type == IB_QPT_RC) { 1118f931551bSRalph Campbell struct qib_ib_header *hdr; 1119f931551bSRalph Campbell 1120f931551bSRalph Campbell if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) 1121f931551bSRalph Campbell hdr = &tx->align_buf->hdr; 1122f931551bSRalph Campbell else { 1123f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1124f931551bSRalph Campbell 1125f931551bSRalph Campbell hdr = &dev->pio_hdrs[tx->hdr_inx].hdr; 1126f931551bSRalph Campbell } 1127f931551bSRalph Campbell qib_rc_send_complete(qp, hdr); 1128f931551bSRalph Campbell } 1129ffc26907SDennis Dalessandro if (atomic_dec_and_test(&priv->s_dma_busy)) { 1130f931551bSRalph Campbell if (qp->state == IB_QPS_RESET) 1131ffc26907SDennis Dalessandro wake_up(&priv->wait_dma); 1132f931551bSRalph Campbell else if (qp->s_flags & QIB_S_WAIT_DMA) { 1133f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_DMA; 1134f931551bSRalph Campbell qib_schedule_send(qp); 1135f931551bSRalph Campbell } 1136f931551bSRalph Campbell } 1137f931551bSRalph Campbell spin_unlock(&qp->s_lock); 1138f931551bSRalph Campbell 1139f931551bSRalph Campbell qib_put_txreq(tx); 1140f931551bSRalph Campbell } 1141f931551bSRalph Campbell 11427c2e11feSDennis Dalessandro static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp) 1143f931551bSRalph Campbell { 1144ffc26907SDennis Dalessandro struct qib_qp_priv *priv = qp->priv; 1145f931551bSRalph Campbell unsigned long flags; 1146f931551bSRalph Campbell int ret = 0; 1147f931551bSRalph Campbell 1148f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1149f931551bSRalph Campbell if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { 1150f931551bSRalph Campbell spin_lock(&dev->pending_lock); 1151ffc26907SDennis Dalessandro if (list_empty(&priv->iowait)) { 1152f931551bSRalph Campbell if (list_empty(&dev->memwait)) 1153f931551bSRalph Campbell mod_timer(&dev->mem_timer, jiffies + 1); 1154f931551bSRalph Campbell qp->s_flags |= QIB_S_WAIT_KMEM; 1155ffc26907SDennis Dalessandro list_add_tail(&priv->iowait, &dev->memwait); 1156f931551bSRalph Campbell } 1157f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 1158f931551bSRalph Campbell qp->s_flags &= ~QIB_S_BUSY; 1159f931551bSRalph Campbell ret = -EBUSY; 1160f931551bSRalph Campbell } 1161f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1162f931551bSRalph Campbell 1163f931551bSRalph Campbell return ret; 1164f931551bSRalph Campbell } 1165f931551bSRalph Campbell 11667c2e11feSDennis Dalessandro static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr, 11677c2e11feSDennis Dalessandro u32 hdrwords, struct rvt_sge_state *ss, u32 len, 1168f931551bSRalph Campbell u32 plen, u32 dwords) 1169f931551bSRalph Campbell { 1170ffc26907SDennis Dalessandro struct qib_qp_priv *priv = qp->priv; 1171f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1172f931551bSRalph Campbell struct qib_devdata *dd = dd_from_dev(dev); 1173f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 1174f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1175f931551bSRalph Campbell struct qib_verbs_txreq *tx; 1176f931551bSRalph Campbell struct qib_pio_header *phdr; 1177f931551bSRalph Campbell u32 control; 1178f931551bSRalph Campbell u32 ndesc; 1179f931551bSRalph Campbell int ret; 1180f931551bSRalph Campbell 1181ffc26907SDennis Dalessandro tx = priv->s_tx; 1182f931551bSRalph Campbell if (tx) { 1183ffc26907SDennis Dalessandro priv->s_tx = NULL; 1184f931551bSRalph Campbell /* resend previously constructed packet */ 1185f931551bSRalph Campbell ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); 1186f931551bSRalph Campbell goto bail; 1187f931551bSRalph Campbell } 1188f931551bSRalph Campbell 118948947109SMike Marciniszyn tx = get_txreq(dev, qp); 119048947109SMike Marciniszyn if (IS_ERR(tx)) 119148947109SMike Marciniszyn goto bail_tx; 1192f931551bSRalph Campbell 1193f931551bSRalph Campbell control = dd->f_setpbc_control(ppd, plen, qp->s_srate, 1194f931551bSRalph Campbell be16_to_cpu(hdr->lrh[0]) >> 12); 1195f931551bSRalph Campbell tx->qp = qp; 1196f931551bSRalph Campbell atomic_inc(&qp->refcount); 1197f931551bSRalph Campbell tx->wqe = qp->s_wqe; 1198f931551bSRalph Campbell tx->mr = qp->s_rdma_mr; 1199f931551bSRalph Campbell if (qp->s_rdma_mr) 1200f931551bSRalph Campbell qp->s_rdma_mr = NULL; 1201f931551bSRalph Campbell tx->txreq.callback = sdma_complete; 1202f931551bSRalph Campbell if (dd->flags & QIB_HAS_SDMA_TIMEOUT) 1203f931551bSRalph Campbell tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST; 1204f931551bSRalph Campbell else 1205f931551bSRalph Campbell tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ; 1206f931551bSRalph Campbell if (plen + 1 > dd->piosize2kmax_dwords) 1207f931551bSRalph Campbell tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF; 1208f931551bSRalph Campbell 1209f931551bSRalph Campbell if (len) { 1210f931551bSRalph Campbell /* 1211f931551bSRalph Campbell * Don't try to DMA if it takes more descriptors than 1212f931551bSRalph Campbell * the queue holds. 1213f931551bSRalph Campbell */ 1214f931551bSRalph Campbell ndesc = qib_count_sge(ss, len); 1215f931551bSRalph Campbell if (ndesc >= ppd->sdma_descq_cnt) 1216f931551bSRalph Campbell ndesc = 0; 1217f931551bSRalph Campbell } else 1218f931551bSRalph Campbell ndesc = 1; 1219f931551bSRalph Campbell if (ndesc) { 1220f931551bSRalph Campbell phdr = &dev->pio_hdrs[tx->hdr_inx]; 1221f931551bSRalph Campbell phdr->pbc[0] = cpu_to_le32(plen); 1222f931551bSRalph Campbell phdr->pbc[1] = cpu_to_le32(control); 1223f931551bSRalph Campbell memcpy(&phdr->hdr, hdr, hdrwords << 2); 1224f931551bSRalph Campbell tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC; 1225f931551bSRalph Campbell tx->txreq.sg_count = ndesc; 1226f931551bSRalph Campbell tx->txreq.addr = dev->pio_hdrs_phys + 1227f931551bSRalph Campbell tx->hdr_inx * sizeof(struct qib_pio_header); 1228f931551bSRalph Campbell tx->hdr_dwords = hdrwords + 2; /* add PBC length */ 1229f931551bSRalph Campbell ret = qib_sdma_verbs_send(ppd, ss, dwords, tx); 1230f931551bSRalph Campbell goto bail; 1231f931551bSRalph Campbell } 1232f931551bSRalph Campbell 1233f931551bSRalph Campbell /* Allocate a buffer and copy the header and payload to it. */ 1234f931551bSRalph Campbell tx->hdr_dwords = plen + 1; 1235f931551bSRalph Campbell phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC); 1236f931551bSRalph Campbell if (!phdr) 1237f931551bSRalph Campbell goto err_tx; 1238f931551bSRalph Campbell phdr->pbc[0] = cpu_to_le32(plen); 1239f931551bSRalph Campbell phdr->pbc[1] = cpu_to_le32(control); 1240f931551bSRalph Campbell memcpy(&phdr->hdr, hdr, hdrwords << 2); 1241f931551bSRalph Campbell qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len); 1242f931551bSRalph Campbell 1243f931551bSRalph Campbell tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr, 1244f931551bSRalph Campbell tx->hdr_dwords << 2, DMA_TO_DEVICE); 1245f931551bSRalph Campbell if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr)) 1246f931551bSRalph Campbell goto map_err; 1247f931551bSRalph Campbell tx->align_buf = phdr; 1248f931551bSRalph Campbell tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF; 1249f931551bSRalph Campbell tx->txreq.sg_count = 1; 1250f931551bSRalph Campbell ret = qib_sdma_verbs_send(ppd, NULL, 0, tx); 1251f931551bSRalph Campbell goto unaligned; 1252f931551bSRalph Campbell 1253f931551bSRalph Campbell map_err: 1254f931551bSRalph Campbell kfree(phdr); 1255f931551bSRalph Campbell err_tx: 1256f931551bSRalph Campbell qib_put_txreq(tx); 1257f931551bSRalph Campbell ret = wait_kmem(dev, qp); 1258f931551bSRalph Campbell unaligned: 1259f24a6d48SHarish Chegondi ibp->rvp.n_unaligned++; 1260f931551bSRalph Campbell bail: 1261f931551bSRalph Campbell return ret; 126248947109SMike Marciniszyn bail_tx: 126348947109SMike Marciniszyn ret = PTR_ERR(tx); 126448947109SMike Marciniszyn goto bail; 1265f931551bSRalph Campbell } 1266f931551bSRalph Campbell 1267f931551bSRalph Campbell /* 1268f931551bSRalph Campbell * If we are now in the error state, return zero to flush the 1269f931551bSRalph Campbell * send work request. 1270f931551bSRalph Campbell */ 12717c2e11feSDennis Dalessandro static int no_bufs_available(struct rvt_qp *qp) 1272f931551bSRalph Campbell { 1273ffc26907SDennis Dalessandro struct qib_qp_priv *priv = qp->priv; 1274f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1275f931551bSRalph Campbell struct qib_devdata *dd; 1276f931551bSRalph Campbell unsigned long flags; 1277f931551bSRalph Campbell int ret = 0; 1278f931551bSRalph Campbell 1279f931551bSRalph Campbell /* 1280f931551bSRalph Campbell * Note that as soon as want_buffer() is called and 1281f931551bSRalph Campbell * possibly before it returns, qib_ib_piobufavail() 1282f931551bSRalph Campbell * could be called. Therefore, put QP on the I/O wait list before 1283f931551bSRalph Campbell * enabling the PIO avail interrupt. 1284f931551bSRalph Campbell */ 1285f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1286f931551bSRalph Campbell if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { 1287f931551bSRalph Campbell spin_lock(&dev->pending_lock); 1288ffc26907SDennis Dalessandro if (list_empty(&priv->iowait)) { 1289f931551bSRalph Campbell dev->n_piowait++; 1290f931551bSRalph Campbell qp->s_flags |= QIB_S_WAIT_PIO; 1291ffc26907SDennis Dalessandro list_add_tail(&priv->iowait, &dev->piowait); 1292f931551bSRalph Campbell dd = dd_from_dev(dev); 1293f931551bSRalph Campbell dd->f_wantpiobuf_intr(dd, 1); 1294f931551bSRalph Campbell } 1295f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 1296f931551bSRalph Campbell qp->s_flags &= ~QIB_S_BUSY; 1297f931551bSRalph Campbell ret = -EBUSY; 1298f931551bSRalph Campbell } 1299f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1300f931551bSRalph Campbell return ret; 1301f931551bSRalph Campbell } 1302f931551bSRalph Campbell 13037c2e11feSDennis Dalessandro static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr, 13047c2e11feSDennis Dalessandro u32 hdrwords, struct rvt_sge_state *ss, u32 len, 1305f931551bSRalph Campbell u32 plen, u32 dwords) 1306f931551bSRalph Campbell { 1307f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1308f931551bSRalph Campbell struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; 1309f931551bSRalph Campbell u32 *hdr = (u32 *) ibhdr; 1310f931551bSRalph Campbell u32 __iomem *piobuf_orig; 1311f931551bSRalph Campbell u32 __iomem *piobuf; 1312f931551bSRalph Campbell u64 pbc; 1313f931551bSRalph Campbell unsigned long flags; 1314f931551bSRalph Campbell unsigned flush_wc; 1315f931551bSRalph Campbell u32 control; 1316f931551bSRalph Campbell u32 pbufn; 1317f931551bSRalph Campbell 1318f931551bSRalph Campbell control = dd->f_setpbc_control(ppd, plen, qp->s_srate, 1319f931551bSRalph Campbell be16_to_cpu(ibhdr->lrh[0]) >> 12); 1320f931551bSRalph Campbell pbc = ((u64) control << 32) | plen; 1321f931551bSRalph Campbell piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); 1322f931551bSRalph Campbell if (unlikely(piobuf == NULL)) 1323f931551bSRalph Campbell return no_bufs_available(qp); 1324f931551bSRalph Campbell 1325f931551bSRalph Campbell /* 1326f931551bSRalph Campbell * Write the pbc. 1327f931551bSRalph Campbell * We have to flush after the PBC for correctness on some cpus 1328f931551bSRalph Campbell * or WC buffer can be written out of order. 1329f931551bSRalph Campbell */ 1330f931551bSRalph Campbell writeq(pbc, piobuf); 1331f931551bSRalph Campbell piobuf_orig = piobuf; 1332f931551bSRalph Campbell piobuf += 2; 1333f931551bSRalph Campbell 1334f931551bSRalph Campbell flush_wc = dd->flags & QIB_PIO_FLUSH_WC; 1335f931551bSRalph Campbell if (len == 0) { 1336f931551bSRalph Campbell /* 1337f931551bSRalph Campbell * If there is just the header portion, must flush before 1338f931551bSRalph Campbell * writing last word of header for correctness, and after 1339f931551bSRalph Campbell * the last header word (trigger word). 1340f931551bSRalph Campbell */ 1341f931551bSRalph Campbell if (flush_wc) { 1342f931551bSRalph Campbell qib_flush_wc(); 1343f931551bSRalph Campbell qib_pio_copy(piobuf, hdr, hdrwords - 1); 1344f931551bSRalph Campbell qib_flush_wc(); 1345f931551bSRalph Campbell __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); 1346f931551bSRalph Campbell qib_flush_wc(); 1347f931551bSRalph Campbell } else 1348f931551bSRalph Campbell qib_pio_copy(piobuf, hdr, hdrwords); 1349f931551bSRalph Campbell goto done; 1350f931551bSRalph Campbell } 1351f931551bSRalph Campbell 1352f931551bSRalph Campbell if (flush_wc) 1353f931551bSRalph Campbell qib_flush_wc(); 1354f931551bSRalph Campbell qib_pio_copy(piobuf, hdr, hdrwords); 1355f931551bSRalph Campbell piobuf += hdrwords; 1356f931551bSRalph Campbell 1357f931551bSRalph Campbell /* The common case is aligned and contained in one segment. */ 1358f931551bSRalph Campbell if (likely(ss->num_sge == 1 && len <= ss->sge.length && 1359f931551bSRalph Campbell !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { 1360f931551bSRalph Campbell u32 *addr = (u32 *) ss->sge.vaddr; 1361f931551bSRalph Campbell 1362f931551bSRalph Campbell /* Update address before sending packet. */ 1363f931551bSRalph Campbell update_sge(ss, len); 1364f931551bSRalph Campbell if (flush_wc) { 1365f931551bSRalph Campbell qib_pio_copy(piobuf, addr, dwords - 1); 1366f931551bSRalph Campbell /* must flush early everything before trigger word */ 1367f931551bSRalph Campbell qib_flush_wc(); 1368f931551bSRalph Campbell __raw_writel(addr[dwords - 1], piobuf + dwords - 1); 1369f931551bSRalph Campbell /* be sure trigger word is written */ 1370f931551bSRalph Campbell qib_flush_wc(); 1371f931551bSRalph Campbell } else 1372f931551bSRalph Campbell qib_pio_copy(piobuf, addr, dwords); 1373f931551bSRalph Campbell goto done; 1374f931551bSRalph Campbell } 1375f931551bSRalph Campbell copy_io(piobuf, ss, len, flush_wc); 1376f931551bSRalph Campbell done: 1377f931551bSRalph Campbell if (dd->flags & QIB_USE_SPCL_TRIG) { 1378f931551bSRalph Campbell u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; 1379da12c1f6SMike Marciniszyn 1380f931551bSRalph Campbell qib_flush_wc(); 1381f931551bSRalph Campbell __raw_writel(0xaebecede, piobuf_orig + spcl_off); 1382f931551bSRalph Campbell } 1383f931551bSRalph Campbell qib_sendbuf_done(dd, pbufn); 1384f931551bSRalph Campbell if (qp->s_rdma_mr) { 13857c2e11feSDennis Dalessandro rvt_put_mr(qp->s_rdma_mr); 1386f931551bSRalph Campbell qp->s_rdma_mr = NULL; 1387f931551bSRalph Campbell } 1388f931551bSRalph Campbell if (qp->s_wqe) { 1389f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1390f931551bSRalph Campbell qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); 1391f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1392f931551bSRalph Campbell } else if (qp->ibqp.qp_type == IB_QPT_RC) { 1393f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1394f931551bSRalph Campbell qib_rc_send_complete(qp, ibhdr); 1395f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1396f931551bSRalph Campbell } 1397f931551bSRalph Campbell return 0; 1398f931551bSRalph Campbell } 1399f931551bSRalph Campbell 1400f931551bSRalph Campbell /** 1401f931551bSRalph Campbell * qib_verbs_send - send a packet 1402f931551bSRalph Campbell * @qp: the QP to send on 1403f931551bSRalph Campbell * @hdr: the packet header 1404f931551bSRalph Campbell * @hdrwords: the number of 32-bit words in the header 1405f931551bSRalph Campbell * @ss: the SGE to send 1406f931551bSRalph Campbell * @len: the length of the packet in bytes 1407f931551bSRalph Campbell * 1408f931551bSRalph Campbell * Return zero if packet is sent or queued OK. 1409f931551bSRalph Campbell * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise. 1410f931551bSRalph Campbell */ 14117c2e11feSDennis Dalessandro int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr, 14127c2e11feSDennis Dalessandro u32 hdrwords, struct rvt_sge_state *ss, u32 len) 1413f931551bSRalph Campbell { 1414f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1415f931551bSRalph Campbell u32 plen; 1416f931551bSRalph Campbell int ret; 1417f931551bSRalph Campbell u32 dwords = (len + 3) >> 2; 1418f931551bSRalph Campbell 1419f931551bSRalph Campbell /* 1420f931551bSRalph Campbell * Calculate the send buffer trigger address. 1421f931551bSRalph Campbell * The +1 counts for the pbc control dword following the pbc length. 1422f931551bSRalph Campbell */ 1423f931551bSRalph Campbell plen = hdrwords + dwords + 1; 1424f931551bSRalph Campbell 1425f931551bSRalph Campbell /* 1426f931551bSRalph Campbell * VL15 packets (IB_QPT_SMI) will always use PIO, so we 1427f931551bSRalph Campbell * can defer SDMA restart until link goes ACTIVE without 1428f931551bSRalph Campbell * worrying about just how we got there. 1429f931551bSRalph Campbell */ 1430f931551bSRalph Campbell if (qp->ibqp.qp_type == IB_QPT_SMI || 1431f931551bSRalph Campbell !(dd->flags & QIB_HAS_SEND_DMA)) 1432f931551bSRalph Campbell ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len, 1433f931551bSRalph Campbell plen, dwords); 1434f931551bSRalph Campbell else 1435f931551bSRalph Campbell ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len, 1436f931551bSRalph Campbell plen, dwords); 1437f931551bSRalph Campbell 1438f931551bSRalph Campbell return ret; 1439f931551bSRalph Campbell } 1440f931551bSRalph Campbell 1441f931551bSRalph Campbell int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, 1442f931551bSRalph Campbell u64 *rwords, u64 *spkts, u64 *rpkts, 1443f931551bSRalph Campbell u64 *xmit_wait) 1444f931551bSRalph Campbell { 1445f931551bSRalph Campbell int ret; 1446f931551bSRalph Campbell struct qib_devdata *dd = ppd->dd; 1447f931551bSRalph Campbell 1448f931551bSRalph Campbell if (!(dd->flags & QIB_PRESENT)) { 1449f931551bSRalph Campbell /* no hardware, freeze, etc. */ 1450f931551bSRalph Campbell ret = -EINVAL; 1451f931551bSRalph Campbell goto bail; 1452f931551bSRalph Campbell } 1453f931551bSRalph Campbell *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND); 1454f931551bSRalph Campbell *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV); 1455f931551bSRalph Campbell *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND); 1456f931551bSRalph Campbell *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV); 1457f931551bSRalph Campbell *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL); 1458f931551bSRalph Campbell 1459f931551bSRalph Campbell ret = 0; 1460f931551bSRalph Campbell 1461f931551bSRalph Campbell bail: 1462f931551bSRalph Campbell return ret; 1463f931551bSRalph Campbell } 1464f931551bSRalph Campbell 1465f931551bSRalph Campbell /** 1466f931551bSRalph Campbell * qib_get_counters - get various chip counters 1467f931551bSRalph Campbell * @dd: the qlogic_ib device 1468f931551bSRalph Campbell * @cntrs: counters are placed here 1469f931551bSRalph Campbell * 1470f931551bSRalph Campbell * Return the counters needed by recv_pma_get_portcounters(). 1471f931551bSRalph Campbell */ 1472f931551bSRalph Campbell int qib_get_counters(struct qib_pportdata *ppd, 1473f931551bSRalph Campbell struct qib_verbs_counters *cntrs) 1474f931551bSRalph Campbell { 1475f931551bSRalph Campbell int ret; 1476f931551bSRalph Campbell 1477f931551bSRalph Campbell if (!(ppd->dd->flags & QIB_PRESENT)) { 1478f931551bSRalph Campbell /* no hardware, freeze, etc. */ 1479f931551bSRalph Campbell ret = -EINVAL; 1480f931551bSRalph Campbell goto bail; 1481f931551bSRalph Campbell } 1482f931551bSRalph Campbell cntrs->symbol_error_counter = 1483f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR); 1484f931551bSRalph Campbell cntrs->link_error_recovery_counter = 1485f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV); 1486f931551bSRalph Campbell /* 1487f931551bSRalph Campbell * The link downed counter counts when the other side downs the 1488f931551bSRalph Campbell * connection. We add in the number of times we downed the link 1489f931551bSRalph Campbell * due to local link integrity errors to compensate. 1490f931551bSRalph Campbell */ 1491f931551bSRalph Campbell cntrs->link_downed_counter = 1492f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN); 1493f931551bSRalph Campbell cntrs->port_rcv_errors = 1494f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) + 1495f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) + 1496f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) + 1497f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) + 1498f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) + 1499f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) + 1500f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) + 1501f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) + 1502f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT); 1503f931551bSRalph Campbell cntrs->port_rcv_errors += 1504f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR); 1505f931551bSRalph Campbell cntrs->port_rcv_errors += 1506f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR); 1507f931551bSRalph Campbell cntrs->port_rcv_remphys_errors = 1508f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP); 1509f931551bSRalph Campbell cntrs->port_xmit_discards = 1510f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL); 1511f931551bSRalph Campbell cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd, 1512f931551bSRalph Campbell QIBPORTCNTR_WORDSEND); 1513f931551bSRalph Campbell cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd, 1514f931551bSRalph Campbell QIBPORTCNTR_WORDRCV); 1515f931551bSRalph Campbell cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd, 1516f931551bSRalph Campbell QIBPORTCNTR_PKTSEND); 1517f931551bSRalph Campbell cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd, 1518f931551bSRalph Campbell QIBPORTCNTR_PKTRCV); 1519f931551bSRalph Campbell cntrs->local_link_integrity_errors = 1520f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI); 1521f931551bSRalph Campbell cntrs->excessive_buffer_overrun_errors = 1522f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL); 1523f931551bSRalph Campbell cntrs->vl15_dropped = 1524f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP); 1525f931551bSRalph Campbell 1526f931551bSRalph Campbell ret = 0; 1527f931551bSRalph Campbell 1528f931551bSRalph Campbell bail: 1529f931551bSRalph Campbell return ret; 1530f931551bSRalph Campbell } 1531f931551bSRalph Campbell 1532f931551bSRalph Campbell /** 1533f931551bSRalph Campbell * qib_ib_piobufavail - callback when a PIO buffer is available 1534f931551bSRalph Campbell * @dd: the device pointer 1535f931551bSRalph Campbell * 1536f931551bSRalph Campbell * This is called from qib_intr() at interrupt level when a PIO buffer is 1537f931551bSRalph Campbell * available after qib_verbs_send() returned an error that no buffers were 1538f931551bSRalph Campbell * available. Disable the interrupt if there are no more QPs waiting. 1539f931551bSRalph Campbell */ 1540f931551bSRalph Campbell void qib_ib_piobufavail(struct qib_devdata *dd) 1541f931551bSRalph Campbell { 1542f931551bSRalph Campbell struct qib_ibdev *dev = &dd->verbs_dev; 1543f931551bSRalph Campbell struct list_head *list; 15447c2e11feSDennis Dalessandro struct rvt_qp *qps[5]; 15457c2e11feSDennis Dalessandro struct rvt_qp *qp; 1546f931551bSRalph Campbell unsigned long flags; 1547f931551bSRalph Campbell unsigned i, n; 1548ffc26907SDennis Dalessandro struct qib_qp_priv *priv; 1549f931551bSRalph Campbell 1550f931551bSRalph Campbell list = &dev->piowait; 1551f931551bSRalph Campbell n = 0; 1552f931551bSRalph Campbell 1553f931551bSRalph Campbell /* 1554f931551bSRalph Campbell * Note: checking that the piowait list is empty and clearing 1555f931551bSRalph Campbell * the buffer available interrupt needs to be atomic or we 1556f931551bSRalph Campbell * could end up with QPs on the wait list with the interrupt 1557f931551bSRalph Campbell * disabled. 1558f931551bSRalph Campbell */ 1559f931551bSRalph Campbell spin_lock_irqsave(&dev->pending_lock, flags); 1560f931551bSRalph Campbell while (!list_empty(list)) { 1561f931551bSRalph Campbell if (n == ARRAY_SIZE(qps)) 1562f931551bSRalph Campbell goto full; 1563ffc26907SDennis Dalessandro priv = list_entry(list->next, struct qib_qp_priv, iowait); 1564ffc26907SDennis Dalessandro qp = priv->owner; 1565ffc26907SDennis Dalessandro list_del_init(&priv->iowait); 1566f931551bSRalph Campbell atomic_inc(&qp->refcount); 1567f931551bSRalph Campbell qps[n++] = qp; 1568f931551bSRalph Campbell } 1569f931551bSRalph Campbell dd->f_wantpiobuf_intr(dd, 0); 1570f931551bSRalph Campbell full: 1571f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 1572f931551bSRalph Campbell 1573f931551bSRalph Campbell for (i = 0; i < n; i++) { 1574f931551bSRalph Campbell qp = qps[i]; 1575f931551bSRalph Campbell 1576f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1577f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_PIO) { 1578f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_PIO; 1579f931551bSRalph Campbell qib_schedule_send(qp); 1580f931551bSRalph Campbell } 1581f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1582f931551bSRalph Campbell 1583f931551bSRalph Campbell /* Notify qib_destroy_qp() if it is waiting. */ 1584f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 1585f931551bSRalph Campbell wake_up(&qp->wait); 1586f931551bSRalph Campbell } 1587f931551bSRalph Campbell } 1588f931551bSRalph Campbell 15892528e33eSMatan Barak static int qib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, 15902528e33eSMatan Barak struct ib_udata *uhw) 1591f931551bSRalph Campbell { 1592f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1593f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(ibdev); 1594f931551bSRalph Campbell 15952528e33eSMatan Barak if (uhw->inlen || uhw->outlen) 15962528e33eSMatan Barak return -EINVAL; 1597f931551bSRalph Campbell memset(props, 0, sizeof(*props)); 1598f931551bSRalph Campbell 1599f931551bSRalph Campbell props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | 1600f931551bSRalph Campbell IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 1601f931551bSRalph Campbell IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | 1602f931551bSRalph Campbell IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; 1603f931551bSRalph Campbell props->page_size_cap = PAGE_SIZE; 1604f931551bSRalph Campbell props->vendor_id = 1605f931551bSRalph Campbell QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3; 1606f931551bSRalph Campbell props->vendor_part_id = dd->deviceid; 1607f931551bSRalph Campbell props->hw_ver = dd->minrev; 1608f931551bSRalph Campbell props->sys_image_guid = ib_qib_sys_image_guid; 1609f931551bSRalph Campbell props->max_mr_size = ~0ULL; 1610f931551bSRalph Campbell props->max_qp = ib_qib_max_qps; 1611f931551bSRalph Campbell props->max_qp_wr = ib_qib_max_qp_wrs; 1612f931551bSRalph Campbell props->max_sge = ib_qib_max_sges; 1613aaae91f4SSteve Wise props->max_sge_rd = ib_qib_max_sges; 1614f931551bSRalph Campbell props->max_cq = ib_qib_max_cqs; 1615f931551bSRalph Campbell props->max_ah = ib_qib_max_ahs; 1616f931551bSRalph Campbell props->max_cqe = ib_qib_max_cqes; 16177c2e11feSDennis Dalessandro props->max_mr = dev->rdi.lkey_table.max; 16187c2e11feSDennis Dalessandro props->max_fmr = dev->rdi.lkey_table.max; 1619f931551bSRalph Campbell props->max_map_per_fmr = 32767; 1620f44728d6SDennis Dalessandro props->max_pd = dev->rdi.dparms.props.max_pd; 1621f931551bSRalph Campbell props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; 1622f931551bSRalph Campbell props->max_qp_init_rd_atom = 255; 1623f931551bSRalph Campbell /* props->max_res_rd_atom */ 1624f931551bSRalph Campbell props->max_srq = ib_qib_max_srqs; 1625f931551bSRalph Campbell props->max_srq_wr = ib_qib_max_srq_wrs; 1626f931551bSRalph Campbell props->max_srq_sge = ib_qib_max_srq_sges; 1627f931551bSRalph Campbell /* props->local_ca_ack_delay */ 1628f931551bSRalph Campbell props->atomic_cap = IB_ATOMIC_GLOB; 1629f931551bSRalph Campbell props->max_pkeys = qib_get_npkeys(dd); 1630f931551bSRalph Campbell props->max_mcast_grp = ib_qib_max_mcast_grps; 1631f931551bSRalph Campbell props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached; 1632f931551bSRalph Campbell props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 1633f931551bSRalph Campbell props->max_mcast_grp; 1634f931551bSRalph Campbell 1635f931551bSRalph Campbell return 0; 1636f931551bSRalph Campbell } 1637f931551bSRalph Campbell 1638f931551bSRalph Campbell static int qib_query_port(struct ib_device *ibdev, u8 port, 1639f931551bSRalph Campbell struct ib_port_attr *props) 1640f931551bSRalph Campbell { 1641f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1642f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(ibdev, port); 1643f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1644f931551bSRalph Campbell enum ib_mtu mtu; 1645f931551bSRalph Campbell u16 lid = ppd->lid; 1646f931551bSRalph Campbell 1647f931551bSRalph Campbell memset(props, 0, sizeof(*props)); 1648f931551bSRalph Campbell props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); 1649f931551bSRalph Campbell props->lmc = ppd->lmc; 1650f24a6d48SHarish Chegondi props->sm_lid = ibp->rvp.sm_lid; 1651f24a6d48SHarish Chegondi props->sm_sl = ibp->rvp.sm_sl; 1652f931551bSRalph Campbell props->state = dd->f_iblink_state(ppd->lastibcstat); 1653f931551bSRalph Campbell props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat); 1654f24a6d48SHarish Chegondi props->port_cap_flags = ibp->rvp.port_cap_flags; 1655f931551bSRalph Campbell props->gid_tbl_len = QIB_GUIDS_PER_PORT; 1656f931551bSRalph Campbell props->max_msg_sz = 0x80000000; 1657f931551bSRalph Campbell props->pkey_tbl_len = qib_get_npkeys(dd); 1658f24a6d48SHarish Chegondi props->bad_pkey_cntr = ibp->rvp.pkey_violations; 1659f24a6d48SHarish Chegondi props->qkey_viol_cntr = ibp->rvp.qkey_violations; 1660f931551bSRalph Campbell props->active_width = ppd->link_width_active; 1661f931551bSRalph Campbell /* See rate_show() */ 1662f931551bSRalph Campbell props->active_speed = ppd->link_speed_active; 1663f931551bSRalph Campbell props->max_vl_num = qib_num_vls(ppd->vls_supported); 1664f931551bSRalph Campbell props->init_type_reply = 0; 1665f931551bSRalph Campbell 1666f931551bSRalph Campbell props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; 1667f931551bSRalph Campbell switch (ppd->ibmtu) { 1668f931551bSRalph Campbell case 4096: 1669f931551bSRalph Campbell mtu = IB_MTU_4096; 1670f931551bSRalph Campbell break; 1671f931551bSRalph Campbell case 2048: 1672f931551bSRalph Campbell mtu = IB_MTU_2048; 1673f931551bSRalph Campbell break; 1674f931551bSRalph Campbell case 1024: 1675f931551bSRalph Campbell mtu = IB_MTU_1024; 1676f931551bSRalph Campbell break; 1677f931551bSRalph Campbell case 512: 1678f931551bSRalph Campbell mtu = IB_MTU_512; 1679f931551bSRalph Campbell break; 1680f931551bSRalph Campbell case 256: 1681f931551bSRalph Campbell mtu = IB_MTU_256; 1682f931551bSRalph Campbell break; 1683f931551bSRalph Campbell default: 1684f931551bSRalph Campbell mtu = IB_MTU_2048; 1685f931551bSRalph Campbell } 1686f931551bSRalph Campbell props->active_mtu = mtu; 1687f24a6d48SHarish Chegondi props->subnet_timeout = ibp->rvp.subnet_timeout; 1688f931551bSRalph Campbell 1689f931551bSRalph Campbell return 0; 1690f931551bSRalph Campbell } 1691f931551bSRalph Campbell 1692f931551bSRalph Campbell static int qib_modify_device(struct ib_device *device, 1693f931551bSRalph Campbell int device_modify_mask, 1694f931551bSRalph Campbell struct ib_device_modify *device_modify) 1695f931551bSRalph Campbell { 1696f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(device); 1697f931551bSRalph Campbell unsigned i; 1698f931551bSRalph Campbell int ret; 1699f931551bSRalph Campbell 1700f931551bSRalph Campbell if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | 1701f931551bSRalph Campbell IB_DEVICE_MODIFY_NODE_DESC)) { 1702f931551bSRalph Campbell ret = -EOPNOTSUPP; 1703f931551bSRalph Campbell goto bail; 1704f931551bSRalph Campbell } 1705f931551bSRalph Campbell 1706f931551bSRalph Campbell if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) { 1707f931551bSRalph Campbell memcpy(device->node_desc, device_modify->node_desc, 64); 1708f931551bSRalph Campbell for (i = 0; i < dd->num_pports; i++) { 1709f931551bSRalph Campbell struct qib_ibport *ibp = &dd->pport[i].ibport_data; 1710f931551bSRalph Campbell 1711f931551bSRalph Campbell qib_node_desc_chg(ibp); 1712f931551bSRalph Campbell } 1713f931551bSRalph Campbell } 1714f931551bSRalph Campbell 1715f931551bSRalph Campbell if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) { 1716f931551bSRalph Campbell ib_qib_sys_image_guid = 1717f931551bSRalph Campbell cpu_to_be64(device_modify->sys_image_guid); 1718f931551bSRalph Campbell for (i = 0; i < dd->num_pports; i++) { 1719f931551bSRalph Campbell struct qib_ibport *ibp = &dd->pport[i].ibport_data; 1720f931551bSRalph Campbell 1721f931551bSRalph Campbell qib_sys_guid_chg(ibp); 1722f931551bSRalph Campbell } 1723f931551bSRalph Campbell } 1724f931551bSRalph Campbell 1725f931551bSRalph Campbell ret = 0; 1726f931551bSRalph Campbell 1727f931551bSRalph Campbell bail: 1728f931551bSRalph Campbell return ret; 1729f931551bSRalph Campbell } 1730f931551bSRalph Campbell 1731f931551bSRalph Campbell static int qib_modify_port(struct ib_device *ibdev, u8 port, 1732f931551bSRalph Campbell int port_modify_mask, struct ib_port_modify *props) 1733f931551bSRalph Campbell { 1734f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(ibdev, port); 1735f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1736f931551bSRalph Campbell 1737f24a6d48SHarish Chegondi ibp->rvp.port_cap_flags |= props->set_port_cap_mask; 1738f24a6d48SHarish Chegondi ibp->rvp.port_cap_flags &= ~props->clr_port_cap_mask; 1739f931551bSRalph Campbell if (props->set_port_cap_mask || props->clr_port_cap_mask) 1740f931551bSRalph Campbell qib_cap_mask_chg(ibp); 1741f931551bSRalph Campbell if (port_modify_mask & IB_PORT_SHUTDOWN) 1742f931551bSRalph Campbell qib_set_linkstate(ppd, QIB_IB_LINKDOWN); 1743f931551bSRalph Campbell if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) 1744f24a6d48SHarish Chegondi ibp->rvp.qkey_violations = 0; 1745f931551bSRalph Campbell return 0; 1746f931551bSRalph Campbell } 1747f931551bSRalph Campbell 1748f931551bSRalph Campbell static int qib_query_gid(struct ib_device *ibdev, u8 port, 1749f931551bSRalph Campbell int index, union ib_gid *gid) 1750f931551bSRalph Campbell { 1751f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1752f931551bSRalph Campbell int ret = 0; 1753f931551bSRalph Campbell 1754f931551bSRalph Campbell if (!port || port > dd->num_pports) 1755f931551bSRalph Campbell ret = -EINVAL; 1756f931551bSRalph Campbell else { 1757f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(ibdev, port); 1758f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1759f931551bSRalph Campbell 1760f24a6d48SHarish Chegondi gid->global.subnet_prefix = ibp->rvp.gid_prefix; 1761f931551bSRalph Campbell if (index == 0) 1762f931551bSRalph Campbell gid->global.interface_id = ppd->guid; 1763f931551bSRalph Campbell else if (index < QIB_GUIDS_PER_PORT) 1764f931551bSRalph Campbell gid->global.interface_id = ibp->guids[index - 1]; 1765f931551bSRalph Campbell else 1766f931551bSRalph Campbell ret = -EINVAL; 1767f931551bSRalph Campbell } 1768f931551bSRalph Campbell 1769f931551bSRalph Campbell return ret; 1770f931551bSRalph Campbell } 1771f931551bSRalph Campbell 1772f931551bSRalph Campbell int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) 1773f931551bSRalph Campbell { 1774f931551bSRalph Campbell if (ah_attr->sl > 15) 1775f931551bSRalph Campbell return -EINVAL; 1776f931551bSRalph Campbell 177796ab1ac1SDennis Dalessandro return 0; 1778f931551bSRalph Campbell } 1779f931551bSRalph Campbell 1780*5418a5abSHarish Chegondi static void qib_notify_new_ah(struct ib_device *ibdev, 1781*5418a5abSHarish Chegondi struct ib_ah_attr *ah_attr, 1782*5418a5abSHarish Chegondi struct rvt_ah *ah) 1783*5418a5abSHarish Chegondi { 1784*5418a5abSHarish Chegondi struct qib_ibport *ibp; 1785*5418a5abSHarish Chegondi struct qib_pportdata *ppd; 1786*5418a5abSHarish Chegondi 1787*5418a5abSHarish Chegondi /* 1788*5418a5abSHarish Chegondi * Do not trust reading anything from rvt_ah at this point as it is not 1789*5418a5abSHarish Chegondi * done being setup. We can however modify things which we need to set. 1790*5418a5abSHarish Chegondi */ 1791*5418a5abSHarish Chegondi 1792*5418a5abSHarish Chegondi ibp = to_iport(ibdev, ah_attr->port_num); 1793*5418a5abSHarish Chegondi ppd = ppd_from_ibp(ibp); 1794*5418a5abSHarish Chegondi ah->vl = ibp->sl_to_vl[ah->attr.sl]; 1795*5418a5abSHarish Chegondi ah->log_pmtu = ilog2(ppd->ibmtu); 1796*5418a5abSHarish Chegondi } 1797*5418a5abSHarish Chegondi 17981fb9fed6SMike Marciniszyn struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid) 17991fb9fed6SMike Marciniszyn { 18001fb9fed6SMike Marciniszyn struct ib_ah_attr attr; 18011fb9fed6SMike Marciniszyn struct ib_ah *ah = ERR_PTR(-EINVAL); 18027c2e11feSDennis Dalessandro struct rvt_qp *qp0; 18031fb9fed6SMike Marciniszyn 1804041af0bbSMike Marciniszyn memset(&attr, 0, sizeof(attr)); 18051fb9fed6SMike Marciniszyn attr.dlid = dlid; 18061fb9fed6SMike Marciniszyn attr.port_num = ppd_from_ibp(ibp)->port; 18071fb9fed6SMike Marciniszyn rcu_read_lock(); 1808f24a6d48SHarish Chegondi qp0 = rcu_dereference(ibp->rvp.qp[0]); 18091fb9fed6SMike Marciniszyn if (qp0) 18101fb9fed6SMike Marciniszyn ah = ib_create_ah(qp0->ibqp.pd, &attr); 18111fb9fed6SMike Marciniszyn rcu_read_unlock(); 18121fb9fed6SMike Marciniszyn return ah; 18131fb9fed6SMike Marciniszyn } 18141fb9fed6SMike Marciniszyn 1815f931551bSRalph Campbell /** 1816f931551bSRalph Campbell * qib_get_npkeys - return the size of the PKEY table for context 0 1817f931551bSRalph Campbell * @dd: the qlogic_ib device 1818f931551bSRalph Campbell */ 1819f931551bSRalph Campbell unsigned qib_get_npkeys(struct qib_devdata *dd) 1820f931551bSRalph Campbell { 1821f931551bSRalph Campbell return ARRAY_SIZE(dd->rcd[0]->pkeys); 1822f931551bSRalph Campbell } 1823f931551bSRalph Campbell 1824f931551bSRalph Campbell /* 1825f931551bSRalph Campbell * Return the indexed PKEY from the port PKEY table. 1826f931551bSRalph Campbell * No need to validate rcd[ctxt]; the port is setup if we are here. 1827f931551bSRalph Campbell */ 1828f931551bSRalph Campbell unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index) 1829f931551bSRalph Campbell { 1830f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1831f931551bSRalph Campbell struct qib_devdata *dd = ppd->dd; 1832f931551bSRalph Campbell unsigned ctxt = ppd->hw_pidx; 1833f931551bSRalph Campbell unsigned ret; 1834f931551bSRalph Campbell 1835f931551bSRalph Campbell /* dd->rcd null if mini_init or some init failures */ 1836f931551bSRalph Campbell if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys)) 1837f931551bSRalph Campbell ret = 0; 1838f931551bSRalph Campbell else 1839f931551bSRalph Campbell ret = dd->rcd[ctxt]->pkeys[index]; 1840f931551bSRalph Campbell 1841f931551bSRalph Campbell return ret; 1842f931551bSRalph Campbell } 1843f931551bSRalph Campbell 1844f931551bSRalph Campbell static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1845f931551bSRalph Campbell u16 *pkey) 1846f931551bSRalph Campbell { 1847f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1848f931551bSRalph Campbell int ret; 1849f931551bSRalph Campbell 1850f931551bSRalph Campbell if (index >= qib_get_npkeys(dd)) { 1851f931551bSRalph Campbell ret = -EINVAL; 1852f931551bSRalph Campbell goto bail; 1853f931551bSRalph Campbell } 1854f931551bSRalph Campbell 1855f931551bSRalph Campbell *pkey = qib_get_pkey(to_iport(ibdev, port), index); 1856f931551bSRalph Campbell ret = 0; 1857f931551bSRalph Campbell 1858f931551bSRalph Campbell bail: 1859f931551bSRalph Campbell return ret; 1860f931551bSRalph Campbell } 1861f931551bSRalph Campbell 1862f931551bSRalph Campbell /** 1863f931551bSRalph Campbell * qib_alloc_ucontext - allocate a ucontest 1864f931551bSRalph Campbell * @ibdev: the infiniband device 1865f931551bSRalph Campbell * @udata: not used by the QLogic_IB driver 1866f931551bSRalph Campbell */ 1867f931551bSRalph Campbell 1868f931551bSRalph Campbell static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev, 1869f931551bSRalph Campbell struct ib_udata *udata) 1870f931551bSRalph Campbell { 1871f931551bSRalph Campbell struct qib_ucontext *context; 1872f931551bSRalph Campbell struct ib_ucontext *ret; 1873f931551bSRalph Campbell 1874041af0bbSMike Marciniszyn context = kmalloc(sizeof(*context), GFP_KERNEL); 1875f931551bSRalph Campbell if (!context) { 1876f931551bSRalph Campbell ret = ERR_PTR(-ENOMEM); 1877f931551bSRalph Campbell goto bail; 1878f931551bSRalph Campbell } 1879f931551bSRalph Campbell 1880f931551bSRalph Campbell ret = &context->ibucontext; 1881f931551bSRalph Campbell 1882f931551bSRalph Campbell bail: 1883f931551bSRalph Campbell return ret; 1884f931551bSRalph Campbell } 1885f931551bSRalph Campbell 1886f931551bSRalph Campbell static int qib_dealloc_ucontext(struct ib_ucontext *context) 1887f931551bSRalph Campbell { 1888f931551bSRalph Campbell kfree(to_iucontext(context)); 1889f931551bSRalph Campbell return 0; 1890f931551bSRalph Campbell } 1891f931551bSRalph Campbell 1892f931551bSRalph Campbell static void init_ibport(struct qib_pportdata *ppd) 1893f931551bSRalph Campbell { 1894f931551bSRalph Campbell struct qib_verbs_counters cntrs; 1895f931551bSRalph Campbell struct qib_ibport *ibp = &ppd->ibport_data; 1896f931551bSRalph Campbell 1897f24a6d48SHarish Chegondi spin_lock_init(&ibp->rvp.lock); 1898f931551bSRalph Campbell /* Set the prefix to the default value (see ch. 4.1.1) */ 1899f24a6d48SHarish Chegondi ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX; 1900f24a6d48SHarish Chegondi ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); 1901f24a6d48SHarish Chegondi ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | 1902f931551bSRalph Campbell IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP | 1903f931551bSRalph Campbell IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP | 1904f931551bSRalph Campbell IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP | 1905f931551bSRalph Campbell IB_PORT_OTHER_LOCAL_CHANGES_SUP; 1906f931551bSRalph Campbell if (ppd->dd->flags & QIB_HAS_LINK_LATENCY) 1907f24a6d48SHarish Chegondi ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; 1908f24a6d48SHarish Chegondi ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; 1909f24a6d48SHarish Chegondi ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; 1910f24a6d48SHarish Chegondi ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; 1911f24a6d48SHarish Chegondi ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; 1912f24a6d48SHarish Chegondi ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; 1913f931551bSRalph Campbell 1914f931551bSRalph Campbell /* Snapshot current HW counters to "clear" them. */ 1915f931551bSRalph Campbell qib_get_counters(ppd, &cntrs); 1916f931551bSRalph Campbell ibp->z_symbol_error_counter = cntrs.symbol_error_counter; 1917f931551bSRalph Campbell ibp->z_link_error_recovery_counter = 1918f931551bSRalph Campbell cntrs.link_error_recovery_counter; 1919f931551bSRalph Campbell ibp->z_link_downed_counter = cntrs.link_downed_counter; 1920f931551bSRalph Campbell ibp->z_port_rcv_errors = cntrs.port_rcv_errors; 1921f931551bSRalph Campbell ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; 1922f931551bSRalph Campbell ibp->z_port_xmit_discards = cntrs.port_xmit_discards; 1923f931551bSRalph Campbell ibp->z_port_xmit_data = cntrs.port_xmit_data; 1924f931551bSRalph Campbell ibp->z_port_rcv_data = cntrs.port_rcv_data; 1925f931551bSRalph Campbell ibp->z_port_xmit_packets = cntrs.port_xmit_packets; 1926f931551bSRalph Campbell ibp->z_port_rcv_packets = cntrs.port_rcv_packets; 1927f931551bSRalph Campbell ibp->z_local_link_integrity_errors = 1928f931551bSRalph Campbell cntrs.local_link_integrity_errors; 1929f931551bSRalph Campbell ibp->z_excessive_buffer_overrun_errors = 1930f931551bSRalph Campbell cntrs.excessive_buffer_overrun_errors; 1931f931551bSRalph Campbell ibp->z_vl15_dropped = cntrs.vl15_dropped; 1932f24a6d48SHarish Chegondi RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); 1933f24a6d48SHarish Chegondi RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); 1934f931551bSRalph Campbell } 1935f931551bSRalph Campbell 19367738613eSIra Weiny static int qib_port_immutable(struct ib_device *ibdev, u8 port_num, 19377738613eSIra Weiny struct ib_port_immutable *immutable) 19387738613eSIra Weiny { 19397738613eSIra Weiny struct ib_port_attr attr; 19407738613eSIra Weiny int err; 19417738613eSIra Weiny 19427738613eSIra Weiny err = qib_query_port(ibdev, port_num, &attr); 19437738613eSIra Weiny if (err) 19447738613eSIra Weiny return err; 19457738613eSIra Weiny 19467738613eSIra Weiny immutable->pkey_tbl_len = attr.pkey_tbl_len; 19477738613eSIra Weiny immutable->gid_tbl_len = attr.gid_tbl_len; 1948f9b22e35SIra Weiny immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; 1949337877a4SIra Weiny immutable->max_mad_size = IB_MGMT_MAD_SIZE; 19507738613eSIra Weiny 19517738613eSIra Weiny return 0; 19527738613eSIra Weiny } 19537738613eSIra Weiny 1954f931551bSRalph Campbell /** 1955f931551bSRalph Campbell * qib_register_ib_device - register our device with the infiniband core 1956f931551bSRalph Campbell * @dd: the device data structure 1957f931551bSRalph Campbell * Return the allocated qib_ibdev pointer or NULL on error. 1958f931551bSRalph Campbell */ 1959f931551bSRalph Campbell int qib_register_ib_device(struct qib_devdata *dd) 1960f931551bSRalph Campbell { 1961f931551bSRalph Campbell struct qib_ibdev *dev = &dd->verbs_dev; 19622dc05ab5SDennis Dalessandro struct ib_device *ibdev = &dev->rdi.ibdev; 1963f931551bSRalph Campbell struct qib_pportdata *ppd = dd->pport; 19647c2e11feSDennis Dalessandro unsigned i; 1965f931551bSRalph Campbell int ret; 1966f931551bSRalph Campbell 1967f931551bSRalph Campbell dev->qp_table_size = ib_qib_qp_table_size; 1968af061a64SMike Marciniszyn get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); 1969a46a2802SMike Marciniszyn dev->qp_table = kmalloc_array( 1970a46a2802SMike Marciniszyn dev->qp_table_size, 1971a46a2802SMike Marciniszyn sizeof(*dev->qp_table), 1972f931551bSRalph Campbell GFP_KERNEL); 1973f931551bSRalph Campbell if (!dev->qp_table) { 1974f931551bSRalph Campbell ret = -ENOMEM; 1975f931551bSRalph Campbell goto err_qpt; 1976f931551bSRalph Campbell } 1977af061a64SMike Marciniszyn for (i = 0; i < dev->qp_table_size; i++) 1978af061a64SMike Marciniszyn RCU_INIT_POINTER(dev->qp_table[i], NULL); 1979f931551bSRalph Campbell 1980f931551bSRalph Campbell for (i = 0; i < dd->num_pports; i++) 1981f931551bSRalph Campbell init_ibport(ppd + i); 1982f931551bSRalph Campbell 1983f931551bSRalph Campbell /* Only need to initialize non-zero fields. */ 1984f931551bSRalph Campbell spin_lock_init(&dev->qpt_lock); 1985f931551bSRalph Campbell spin_lock_init(&dev->n_cqs_lock); 1986f931551bSRalph Campbell spin_lock_init(&dev->n_qps_lock); 1987f931551bSRalph Campbell spin_lock_init(&dev->n_srqs_lock); 1988f931551bSRalph Campbell spin_lock_init(&dev->n_mcast_grps_lock); 1989f931551bSRalph Campbell init_timer(&dev->mem_timer); 1990f931551bSRalph Campbell dev->mem_timer.function = mem_timer; 1991f931551bSRalph Campbell dev->mem_timer.data = (unsigned long) dev; 1992f931551bSRalph Campbell 1993f931551bSRalph Campbell qib_init_qpn_table(dd, &dev->qpn_table); 1994f931551bSRalph Campbell 1995f931551bSRalph Campbell INIT_LIST_HEAD(&dev->pending_mmaps); 1996f931551bSRalph Campbell spin_lock_init(&dev->pending_lock); 1997f931551bSRalph Campbell dev->mmap_offset = PAGE_SIZE; 1998f931551bSRalph Campbell spin_lock_init(&dev->mmap_offset_lock); 1999f931551bSRalph Campbell INIT_LIST_HEAD(&dev->piowait); 2000f931551bSRalph Campbell INIT_LIST_HEAD(&dev->dmawait); 2001f931551bSRalph Campbell INIT_LIST_HEAD(&dev->txwait); 2002f931551bSRalph Campbell INIT_LIST_HEAD(&dev->memwait); 2003f931551bSRalph Campbell INIT_LIST_HEAD(&dev->txreq_free); 2004f931551bSRalph Campbell 2005f931551bSRalph Campbell if (ppd->sdma_descq_cnt) { 2006f931551bSRalph Campbell dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev, 2007f931551bSRalph Campbell ppd->sdma_descq_cnt * 2008f931551bSRalph Campbell sizeof(struct qib_pio_header), 2009f931551bSRalph Campbell &dev->pio_hdrs_phys, 2010f931551bSRalph Campbell GFP_KERNEL); 2011f931551bSRalph Campbell if (!dev->pio_hdrs) { 2012f931551bSRalph Campbell ret = -ENOMEM; 2013f931551bSRalph Campbell goto err_hdrs; 2014f931551bSRalph Campbell } 2015f931551bSRalph Campbell } 2016f931551bSRalph Campbell 2017f931551bSRalph Campbell for (i = 0; i < ppd->sdma_descq_cnt; i++) { 2018f931551bSRalph Campbell struct qib_verbs_txreq *tx; 2019f931551bSRalph Campbell 2020041af0bbSMike Marciniszyn tx = kzalloc(sizeof(*tx), GFP_KERNEL); 2021f931551bSRalph Campbell if (!tx) { 2022f931551bSRalph Campbell ret = -ENOMEM; 2023f931551bSRalph Campbell goto err_tx; 2024f931551bSRalph Campbell } 2025f931551bSRalph Campbell tx->hdr_inx = i; 2026f931551bSRalph Campbell list_add(&tx->txreq.list, &dev->txreq_free); 2027f931551bSRalph Campbell } 2028f931551bSRalph Campbell 2029f931551bSRalph Campbell /* 2030f931551bSRalph Campbell * The system image GUID is supposed to be the same for all 2031f931551bSRalph Campbell * IB HCAs in a single system but since there can be other 2032f931551bSRalph Campbell * device types in the system, we can't be sure this is unique. 2033f931551bSRalph Campbell */ 2034f931551bSRalph Campbell if (!ib_qib_sys_image_guid) 2035f931551bSRalph Campbell ib_qib_sys_image_guid = ppd->guid; 2036f931551bSRalph Campbell 2037f931551bSRalph Campbell strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX); 2038f931551bSRalph Campbell ibdev->owner = THIS_MODULE; 2039f931551bSRalph Campbell ibdev->node_guid = ppd->guid; 2040f931551bSRalph Campbell ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION; 2041f931551bSRalph Campbell ibdev->uverbs_cmd_mask = 2042f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 2043f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 2044f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 2045f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 2046f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 2047f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 2048f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_MODIFY_AH) | 2049f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_AH) | 2050f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 2051f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_REG_MR) | 2052f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 2053f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 2054f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 2055f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 2056f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 2057f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 2058f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 2059f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 2060f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 2061f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 2062f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 2063f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POST_SEND) | 2064f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POST_RECV) | 2065f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 2066f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 2067f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 2068f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 2069f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 2070f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 2071f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 2072f931551bSRalph Campbell ibdev->node_type = RDMA_NODE_IB_CA; 2073f931551bSRalph Campbell ibdev->phys_port_cnt = dd->num_pports; 2074f931551bSRalph Campbell ibdev->num_comp_vectors = 1; 2075f931551bSRalph Campbell ibdev->dma_device = &dd->pcidev->dev; 2076f931551bSRalph Campbell ibdev->query_device = qib_query_device; 2077f931551bSRalph Campbell ibdev->modify_device = qib_modify_device; 2078f931551bSRalph Campbell ibdev->query_port = qib_query_port; 2079f931551bSRalph Campbell ibdev->modify_port = qib_modify_port; 2080f931551bSRalph Campbell ibdev->query_pkey = qib_query_pkey; 2081f931551bSRalph Campbell ibdev->query_gid = qib_query_gid; 2082f931551bSRalph Campbell ibdev->alloc_ucontext = qib_alloc_ucontext; 2083f931551bSRalph Campbell ibdev->dealloc_ucontext = qib_dealloc_ucontext; 2084f44728d6SDennis Dalessandro ibdev->alloc_pd = NULL; 2085f44728d6SDennis Dalessandro ibdev->dealloc_pd = NULL; 208696ab1ac1SDennis Dalessandro ibdev->create_ah = NULL; 208796ab1ac1SDennis Dalessandro ibdev->destroy_ah = NULL; 208896ab1ac1SDennis Dalessandro ibdev->modify_ah = NULL; 208996ab1ac1SDennis Dalessandro ibdev->query_ah = NULL; 2090f931551bSRalph Campbell ibdev->create_srq = qib_create_srq; 2091f931551bSRalph Campbell ibdev->modify_srq = qib_modify_srq; 2092f931551bSRalph Campbell ibdev->query_srq = qib_query_srq; 2093f931551bSRalph Campbell ibdev->destroy_srq = qib_destroy_srq; 2094f931551bSRalph Campbell ibdev->create_qp = qib_create_qp; 2095f931551bSRalph Campbell ibdev->modify_qp = qib_modify_qp; 2096f931551bSRalph Campbell ibdev->query_qp = qib_query_qp; 2097f931551bSRalph Campbell ibdev->destroy_qp = qib_destroy_qp; 2098f931551bSRalph Campbell ibdev->post_send = qib_post_send; 2099f931551bSRalph Campbell ibdev->post_recv = qib_post_receive; 2100f931551bSRalph Campbell ibdev->post_srq_recv = qib_post_srq_receive; 2101f931551bSRalph Campbell ibdev->create_cq = qib_create_cq; 2102f931551bSRalph Campbell ibdev->destroy_cq = qib_destroy_cq; 2103f931551bSRalph Campbell ibdev->resize_cq = qib_resize_cq; 2104f931551bSRalph Campbell ibdev->poll_cq = qib_poll_cq; 2105f931551bSRalph Campbell ibdev->req_notify_cq = qib_req_notify_cq; 21067c2e11feSDennis Dalessandro ibdev->get_dma_mr = NULL; 21077c2e11feSDennis Dalessandro ibdev->reg_user_mr = NULL; 21087c2e11feSDennis Dalessandro ibdev->dereg_mr = NULL; 21097c2e11feSDennis Dalessandro ibdev->alloc_mr = NULL; 21107c2e11feSDennis Dalessandro ibdev->map_mr_sg = NULL; 21117c2e11feSDennis Dalessandro ibdev->alloc_fmr = NULL; 21127c2e11feSDennis Dalessandro ibdev->map_phys_fmr = NULL; 21137c2e11feSDennis Dalessandro ibdev->unmap_fmr = NULL; 21147c2e11feSDennis Dalessandro ibdev->dealloc_fmr = NULL; 2115f931551bSRalph Campbell ibdev->attach_mcast = qib_multicast_attach; 2116f931551bSRalph Campbell ibdev->detach_mcast = qib_multicast_detach; 2117f931551bSRalph Campbell ibdev->process_mad = qib_process_mad; 2118f931551bSRalph Campbell ibdev->mmap = qib_mmap; 2119eb636ac0SDennis Dalessandro ibdev->dma_ops = NULL; 21207738613eSIra Weiny ibdev->get_port_immutable = qib_port_immutable; 2121f931551bSRalph Campbell 2122f931551bSRalph Campbell snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), 2123e2eed58bSVinit Agnihotri "Intel Infiniband HCA %s", init_utsname()->nodename); 2124f931551bSRalph Campbell 21252dc05ab5SDennis Dalessandro /* 21262dc05ab5SDennis Dalessandro * Fill in rvt info object. 21272dc05ab5SDennis Dalessandro */ 21282dc05ab5SDennis Dalessandro dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files; 21296a9df403SDennis Dalessandro dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name; 21306a9df403SDennis Dalessandro dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev; 213196ab1ac1SDennis Dalessandro dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah; 2132*5418a5abSHarish Chegondi dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah; 21332dc05ab5SDennis Dalessandro dd->verbs_dev.rdi.dparms.props.max_pd = ib_qib_max_pds; 213496ab1ac1SDennis Dalessandro dd->verbs_dev.rdi.dparms.props.max_ah = ib_qib_max_ahs; 21357c2e11feSDennis Dalessandro dd->verbs_dev.rdi.flags = (RVT_FLAG_QP_INIT_DRIVER | 21362dc05ab5SDennis Dalessandro RVT_FLAG_CQ_INIT_DRIVER); 21377c2e11feSDennis Dalessandro dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size; 21382dc05ab5SDennis Dalessandro 21392dc05ab5SDennis Dalessandro ret = rvt_register_device(&dd->verbs_dev.rdi); 2140f931551bSRalph Campbell if (ret) 2141f931551bSRalph Campbell goto err_reg; 2142f931551bSRalph Campbell 2143f931551bSRalph Campbell ret = qib_create_agents(dev); 2144f931551bSRalph Campbell if (ret) 2145f931551bSRalph Campbell goto err_agents; 2146f931551bSRalph Campbell 2147c9bdad3cSMike Marciniszyn ret = qib_verbs_register_sysfs(dd); 2148c9bdad3cSMike Marciniszyn if (ret) 2149f931551bSRalph Campbell goto err_class; 2150f931551bSRalph Campbell 2151f931551bSRalph Campbell goto bail; 2152f931551bSRalph Campbell 2153f931551bSRalph Campbell err_class: 2154f931551bSRalph Campbell qib_free_agents(dev); 2155f931551bSRalph Campbell err_agents: 21562dc05ab5SDennis Dalessandro rvt_unregister_device(&dd->verbs_dev.rdi); 2157f931551bSRalph Campbell err_reg: 2158f931551bSRalph Campbell err_tx: 2159f931551bSRalph Campbell while (!list_empty(&dev->txreq_free)) { 2160f931551bSRalph Campbell struct list_head *l = dev->txreq_free.next; 2161f931551bSRalph Campbell struct qib_verbs_txreq *tx; 2162f931551bSRalph Campbell 2163f931551bSRalph Campbell list_del(l); 2164f931551bSRalph Campbell tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 2165f931551bSRalph Campbell kfree(tx); 2166f931551bSRalph Campbell } 2167f931551bSRalph Campbell if (ppd->sdma_descq_cnt) 2168f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, 2169f931551bSRalph Campbell ppd->sdma_descq_cnt * 2170f931551bSRalph Campbell sizeof(struct qib_pio_header), 2171f931551bSRalph Campbell dev->pio_hdrs, dev->pio_hdrs_phys); 2172f931551bSRalph Campbell err_hdrs: 2173f931551bSRalph Campbell kfree(dev->qp_table); 2174f931551bSRalph Campbell err_qpt: 2175f931551bSRalph Campbell qib_dev_err(dd, "cannot register verbs: %d!\n", -ret); 2176f931551bSRalph Campbell bail: 2177f931551bSRalph Campbell return ret; 2178f931551bSRalph Campbell } 2179f931551bSRalph Campbell 2180f931551bSRalph Campbell void qib_unregister_ib_device(struct qib_devdata *dd) 2181f931551bSRalph Campbell { 2182f931551bSRalph Campbell struct qib_ibdev *dev = &dd->verbs_dev; 2183f931551bSRalph Campbell u32 qps_inuse; 2184f931551bSRalph Campbell 2185f931551bSRalph Campbell qib_verbs_unregister_sysfs(dd); 2186f931551bSRalph Campbell 2187f931551bSRalph Campbell qib_free_agents(dev); 2188f931551bSRalph Campbell 21892dc05ab5SDennis Dalessandro rvt_unregister_device(&dd->verbs_dev.rdi); 2190f931551bSRalph Campbell 2191f931551bSRalph Campbell if (!list_empty(&dev->piowait)) 2192f931551bSRalph Campbell qib_dev_err(dd, "piowait list not empty!\n"); 2193f931551bSRalph Campbell if (!list_empty(&dev->dmawait)) 2194f931551bSRalph Campbell qib_dev_err(dd, "dmawait list not empty!\n"); 2195f931551bSRalph Campbell if (!list_empty(&dev->txwait)) 2196f931551bSRalph Campbell qib_dev_err(dd, "txwait list not empty!\n"); 2197f931551bSRalph Campbell if (!list_empty(&dev->memwait)) 2198f931551bSRalph Campbell qib_dev_err(dd, "memwait list not empty!\n"); 2199f931551bSRalph Campbell 2200f931551bSRalph Campbell qps_inuse = qib_free_all_qps(dd); 2201f931551bSRalph Campbell if (qps_inuse) 2202f931551bSRalph Campbell qib_dev_err(dd, "QP memory leak! %u still in use\n", 2203f931551bSRalph Campbell qps_inuse); 2204f931551bSRalph Campbell 2205f931551bSRalph Campbell del_timer_sync(&dev->mem_timer); 2206f931551bSRalph Campbell qib_free_qpn_table(&dev->qpn_table); 2207f931551bSRalph Campbell while (!list_empty(&dev->txreq_free)) { 2208f931551bSRalph Campbell struct list_head *l = dev->txreq_free.next; 2209f931551bSRalph Campbell struct qib_verbs_txreq *tx; 2210f931551bSRalph Campbell 2211f931551bSRalph Campbell list_del(l); 2212f931551bSRalph Campbell tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 2213f931551bSRalph Campbell kfree(tx); 2214f931551bSRalph Campbell } 2215f931551bSRalph Campbell if (dd->pport->sdma_descq_cnt) 2216f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, 2217f931551bSRalph Campbell dd->pport->sdma_descq_cnt * 2218f931551bSRalph Campbell sizeof(struct qib_pio_header), 2219f931551bSRalph Campbell dev->pio_hdrs, dev->pio_hdrs_phys); 2220f931551bSRalph Campbell kfree(dev->qp_table); 2221f931551bSRalph Campbell } 2222551ace12SMike Marciniszyn 2223551ace12SMike Marciniszyn /* 2224551ace12SMike Marciniszyn * This must be called with s_lock held. 2225551ace12SMike Marciniszyn */ 22267c2e11feSDennis Dalessandro void qib_schedule_send(struct rvt_qp *qp) 2227551ace12SMike Marciniszyn { 2228ffc26907SDennis Dalessandro struct qib_qp_priv *priv = qp->priv; 2229551ace12SMike Marciniszyn if (qib_send_ok(qp)) { 2230551ace12SMike Marciniszyn struct qib_ibport *ibp = 2231551ace12SMike Marciniszyn to_iport(qp->ibqp.device, qp->port_num); 2232551ace12SMike Marciniszyn struct qib_pportdata *ppd = ppd_from_ibp(ibp); 2233551ace12SMike Marciniszyn 2234ffc26907SDennis Dalessandro queue_work(ppd->qib_wq, &priv->s_work); 2235551ace12SMike Marciniszyn } 2236551ace12SMike Marciniszyn } 2237