1*f931551bSRalph Campbell /* 2*f931551bSRalph Campbell * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. 3*f931551bSRalph Campbell * All rights reserved. 4*f931551bSRalph Campbell * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5*f931551bSRalph Campbell * 6*f931551bSRalph Campbell * This software is available to you under a choice of one of two 7*f931551bSRalph Campbell * licenses. You may choose to be licensed under the terms of the GNU 8*f931551bSRalph Campbell * General Public License (GPL) Version 2, available from the file 9*f931551bSRalph Campbell * COPYING in the main directory of this source tree, or the 10*f931551bSRalph Campbell * OpenIB.org BSD license below: 11*f931551bSRalph Campbell * 12*f931551bSRalph Campbell * Redistribution and use in source and binary forms, with or 13*f931551bSRalph Campbell * without modification, are permitted provided that the following 14*f931551bSRalph Campbell * conditions are met: 15*f931551bSRalph Campbell * 16*f931551bSRalph Campbell * - Redistributions of source code must retain the above 17*f931551bSRalph Campbell * copyright notice, this list of conditions and the following 18*f931551bSRalph Campbell * disclaimer. 19*f931551bSRalph Campbell * 20*f931551bSRalph Campbell * - Redistributions in binary form must reproduce the above 21*f931551bSRalph Campbell * copyright notice, this list of conditions and the following 22*f931551bSRalph Campbell * disclaimer in the documentation and/or other materials 23*f931551bSRalph Campbell * provided with the distribution. 24*f931551bSRalph Campbell * 25*f931551bSRalph Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26*f931551bSRalph Campbell * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27*f931551bSRalph Campbell * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28*f931551bSRalph Campbell * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29*f931551bSRalph Campbell * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30*f931551bSRalph Campbell * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31*f931551bSRalph Campbell * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32*f931551bSRalph Campbell * SOFTWARE. 33*f931551bSRalph Campbell */ 34*f931551bSRalph Campbell 35*f931551bSRalph Campbell #include <rdma/ib_mad.h> 36*f931551bSRalph Campbell #include <rdma/ib_user_verbs.h> 37*f931551bSRalph Campbell #include <linux/io.h> 38*f931551bSRalph Campbell #include <linux/utsname.h> 39*f931551bSRalph Campbell #include <linux/rculist.h> 40*f931551bSRalph Campbell #include <linux/mm.h> 41*f931551bSRalph Campbell 42*f931551bSRalph Campbell #include "qib.h" 43*f931551bSRalph Campbell #include "qib_common.h" 44*f931551bSRalph Campbell 45*f931551bSRalph Campbell static unsigned int ib_qib_qp_table_size = 251; 46*f931551bSRalph Campbell module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); 47*f931551bSRalph Campbell MODULE_PARM_DESC(qp_table_size, "QP table size"); 48*f931551bSRalph Campbell 49*f931551bSRalph Campbell unsigned int ib_qib_lkey_table_size = 16; 50*f931551bSRalph Campbell module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint, 51*f931551bSRalph Campbell S_IRUGO); 52*f931551bSRalph Campbell MODULE_PARM_DESC(lkey_table_size, 53*f931551bSRalph Campbell "LKEY table size in bits (2^n, 1 <= n <= 23)"); 54*f931551bSRalph Campbell 55*f931551bSRalph Campbell static unsigned int ib_qib_max_pds = 0xFFFF; 56*f931551bSRalph Campbell module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO); 57*f931551bSRalph Campbell MODULE_PARM_DESC(max_pds, 58*f931551bSRalph Campbell "Maximum number of protection domains to support"); 59*f931551bSRalph Campbell 60*f931551bSRalph Campbell static unsigned int ib_qib_max_ahs = 0xFFFF; 61*f931551bSRalph Campbell module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO); 62*f931551bSRalph Campbell MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); 63*f931551bSRalph Campbell 64*f931551bSRalph Campbell unsigned int ib_qib_max_cqes = 0x2FFFF; 65*f931551bSRalph Campbell module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO); 66*f931551bSRalph Campbell MODULE_PARM_DESC(max_cqes, 67*f931551bSRalph Campbell "Maximum number of completion queue entries to support"); 68*f931551bSRalph Campbell 69*f931551bSRalph Campbell unsigned int ib_qib_max_cqs = 0x1FFFF; 70*f931551bSRalph Campbell module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO); 71*f931551bSRalph Campbell MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); 72*f931551bSRalph Campbell 73*f931551bSRalph Campbell unsigned int ib_qib_max_qp_wrs = 0x3FFF; 74*f931551bSRalph Campbell module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO); 75*f931551bSRalph Campbell MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); 76*f931551bSRalph Campbell 77*f931551bSRalph Campbell unsigned int ib_qib_max_qps = 16384; 78*f931551bSRalph Campbell module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO); 79*f931551bSRalph Campbell MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); 80*f931551bSRalph Campbell 81*f931551bSRalph Campbell unsigned int ib_qib_max_sges = 0x60; 82*f931551bSRalph Campbell module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO); 83*f931551bSRalph Campbell MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); 84*f931551bSRalph Campbell 85*f931551bSRalph Campbell unsigned int ib_qib_max_mcast_grps = 16384; 86*f931551bSRalph Campbell module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO); 87*f931551bSRalph Campbell MODULE_PARM_DESC(max_mcast_grps, 88*f931551bSRalph Campbell "Maximum number of multicast groups to support"); 89*f931551bSRalph Campbell 90*f931551bSRalph Campbell unsigned int ib_qib_max_mcast_qp_attached = 16; 91*f931551bSRalph Campbell module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached, 92*f931551bSRalph Campbell uint, S_IRUGO); 93*f931551bSRalph Campbell MODULE_PARM_DESC(max_mcast_qp_attached, 94*f931551bSRalph Campbell "Maximum number of attached QPs to support"); 95*f931551bSRalph Campbell 96*f931551bSRalph Campbell unsigned int ib_qib_max_srqs = 1024; 97*f931551bSRalph Campbell module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO); 98*f931551bSRalph Campbell MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); 99*f931551bSRalph Campbell 100*f931551bSRalph Campbell unsigned int ib_qib_max_srq_sges = 128; 101*f931551bSRalph Campbell module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO); 102*f931551bSRalph Campbell MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); 103*f931551bSRalph Campbell 104*f931551bSRalph Campbell unsigned int ib_qib_max_srq_wrs = 0x1FFFF; 105*f931551bSRalph Campbell module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO); 106*f931551bSRalph Campbell MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); 107*f931551bSRalph Campbell 108*f931551bSRalph Campbell static unsigned int ib_qib_disable_sma; 109*f931551bSRalph Campbell module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO); 110*f931551bSRalph Campbell MODULE_PARM_DESC(disable_sma, "Disable the SMA"); 111*f931551bSRalph Campbell 112*f931551bSRalph Campbell /* 113*f931551bSRalph Campbell * Note that it is OK to post send work requests in the SQE and ERR 114*f931551bSRalph Campbell * states; qib_do_send() will process them and generate error 115*f931551bSRalph Campbell * completions as per IB 1.2 C10-96. 116*f931551bSRalph Campbell */ 117*f931551bSRalph Campbell const int ib_qib_state_ops[IB_QPS_ERR + 1] = { 118*f931551bSRalph Campbell [IB_QPS_RESET] = 0, 119*f931551bSRalph Campbell [IB_QPS_INIT] = QIB_POST_RECV_OK, 120*f931551bSRalph Campbell [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK, 121*f931551bSRalph Campbell [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 122*f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK | 123*f931551bSRalph Campbell QIB_PROCESS_NEXT_SEND_OK, 124*f931551bSRalph Campbell [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 125*f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK, 126*f931551bSRalph Campbell [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 127*f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_FLUSH_SEND, 128*f931551bSRalph Campbell [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV | 129*f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_FLUSH_SEND, 130*f931551bSRalph Campbell }; 131*f931551bSRalph Campbell 132*f931551bSRalph Campbell struct qib_ucontext { 133*f931551bSRalph Campbell struct ib_ucontext ibucontext; 134*f931551bSRalph Campbell }; 135*f931551bSRalph Campbell 136*f931551bSRalph Campbell static inline struct qib_ucontext *to_iucontext(struct ib_ucontext 137*f931551bSRalph Campbell *ibucontext) 138*f931551bSRalph Campbell { 139*f931551bSRalph Campbell return container_of(ibucontext, struct qib_ucontext, ibucontext); 140*f931551bSRalph Campbell } 141*f931551bSRalph Campbell 142*f931551bSRalph Campbell /* 143*f931551bSRalph Campbell * Translate ib_wr_opcode into ib_wc_opcode. 144*f931551bSRalph Campbell */ 145*f931551bSRalph Campbell const enum ib_wc_opcode ib_qib_wc_opcode[] = { 146*f931551bSRalph Campbell [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, 147*f931551bSRalph Campbell [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, 148*f931551bSRalph Campbell [IB_WR_SEND] = IB_WC_SEND, 149*f931551bSRalph Campbell [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, 150*f931551bSRalph Campbell [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, 151*f931551bSRalph Campbell [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, 152*f931551bSRalph Campbell [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD 153*f931551bSRalph Campbell }; 154*f931551bSRalph Campbell 155*f931551bSRalph Campbell /* 156*f931551bSRalph Campbell * System image GUID. 157*f931551bSRalph Campbell */ 158*f931551bSRalph Campbell __be64 ib_qib_sys_image_guid; 159*f931551bSRalph Campbell 160*f931551bSRalph Campbell /** 161*f931551bSRalph Campbell * qib_copy_sge - copy data to SGE memory 162*f931551bSRalph Campbell * @ss: the SGE state 163*f931551bSRalph Campbell * @data: the data to copy 164*f931551bSRalph Campbell * @length: the length of the data 165*f931551bSRalph Campbell */ 166*f931551bSRalph Campbell void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) 167*f931551bSRalph Campbell { 168*f931551bSRalph Campbell struct qib_sge *sge = &ss->sge; 169*f931551bSRalph Campbell 170*f931551bSRalph Campbell while (length) { 171*f931551bSRalph Campbell u32 len = sge->length; 172*f931551bSRalph Campbell 173*f931551bSRalph Campbell if (len > length) 174*f931551bSRalph Campbell len = length; 175*f931551bSRalph Campbell if (len > sge->sge_length) 176*f931551bSRalph Campbell len = sge->sge_length; 177*f931551bSRalph Campbell BUG_ON(len == 0); 178*f931551bSRalph Campbell memcpy(sge->vaddr, data, len); 179*f931551bSRalph Campbell sge->vaddr += len; 180*f931551bSRalph Campbell sge->length -= len; 181*f931551bSRalph Campbell sge->sge_length -= len; 182*f931551bSRalph Campbell if (sge->sge_length == 0) { 183*f931551bSRalph Campbell if (release) 184*f931551bSRalph Campbell atomic_dec(&sge->mr->refcount); 185*f931551bSRalph Campbell if (--ss->num_sge) 186*f931551bSRalph Campbell *sge = *ss->sg_list++; 187*f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 188*f931551bSRalph Campbell if (++sge->n >= QIB_SEGSZ) { 189*f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 190*f931551bSRalph Campbell break; 191*f931551bSRalph Campbell sge->n = 0; 192*f931551bSRalph Campbell } 193*f931551bSRalph Campbell sge->vaddr = 194*f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].vaddr; 195*f931551bSRalph Campbell sge->length = 196*f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].length; 197*f931551bSRalph Campbell } 198*f931551bSRalph Campbell data += len; 199*f931551bSRalph Campbell length -= len; 200*f931551bSRalph Campbell } 201*f931551bSRalph Campbell } 202*f931551bSRalph Campbell 203*f931551bSRalph Campbell /** 204*f931551bSRalph Campbell * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func 205*f931551bSRalph Campbell * @ss: the SGE state 206*f931551bSRalph Campbell * @length: the number of bytes to skip 207*f931551bSRalph Campbell */ 208*f931551bSRalph Campbell void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) 209*f931551bSRalph Campbell { 210*f931551bSRalph Campbell struct qib_sge *sge = &ss->sge; 211*f931551bSRalph Campbell 212*f931551bSRalph Campbell while (length) { 213*f931551bSRalph Campbell u32 len = sge->length; 214*f931551bSRalph Campbell 215*f931551bSRalph Campbell if (len > length) 216*f931551bSRalph Campbell len = length; 217*f931551bSRalph Campbell if (len > sge->sge_length) 218*f931551bSRalph Campbell len = sge->sge_length; 219*f931551bSRalph Campbell BUG_ON(len == 0); 220*f931551bSRalph Campbell sge->vaddr += len; 221*f931551bSRalph Campbell sge->length -= len; 222*f931551bSRalph Campbell sge->sge_length -= len; 223*f931551bSRalph Campbell if (sge->sge_length == 0) { 224*f931551bSRalph Campbell if (release) 225*f931551bSRalph Campbell atomic_dec(&sge->mr->refcount); 226*f931551bSRalph Campbell if (--ss->num_sge) 227*f931551bSRalph Campbell *sge = *ss->sg_list++; 228*f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 229*f931551bSRalph Campbell if (++sge->n >= QIB_SEGSZ) { 230*f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 231*f931551bSRalph Campbell break; 232*f931551bSRalph Campbell sge->n = 0; 233*f931551bSRalph Campbell } 234*f931551bSRalph Campbell sge->vaddr = 235*f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].vaddr; 236*f931551bSRalph Campbell sge->length = 237*f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].length; 238*f931551bSRalph Campbell } 239*f931551bSRalph Campbell length -= len; 240*f931551bSRalph Campbell } 241*f931551bSRalph Campbell } 242*f931551bSRalph Campbell 243*f931551bSRalph Campbell /* 244*f931551bSRalph Campbell * Count the number of DMA descriptors needed to send length bytes of data. 245*f931551bSRalph Campbell * Don't modify the qib_sge_state to get the count. 246*f931551bSRalph Campbell * Return zero if any of the segments is not aligned. 247*f931551bSRalph Campbell */ 248*f931551bSRalph Campbell static u32 qib_count_sge(struct qib_sge_state *ss, u32 length) 249*f931551bSRalph Campbell { 250*f931551bSRalph Campbell struct qib_sge *sg_list = ss->sg_list; 251*f931551bSRalph Campbell struct qib_sge sge = ss->sge; 252*f931551bSRalph Campbell u8 num_sge = ss->num_sge; 253*f931551bSRalph Campbell u32 ndesc = 1; /* count the header */ 254*f931551bSRalph Campbell 255*f931551bSRalph Campbell while (length) { 256*f931551bSRalph Campbell u32 len = sge.length; 257*f931551bSRalph Campbell 258*f931551bSRalph Campbell if (len > length) 259*f931551bSRalph Campbell len = length; 260*f931551bSRalph Campbell if (len > sge.sge_length) 261*f931551bSRalph Campbell len = sge.sge_length; 262*f931551bSRalph Campbell BUG_ON(len == 0); 263*f931551bSRalph Campbell if (((long) sge.vaddr & (sizeof(u32) - 1)) || 264*f931551bSRalph Campbell (len != length && (len & (sizeof(u32) - 1)))) { 265*f931551bSRalph Campbell ndesc = 0; 266*f931551bSRalph Campbell break; 267*f931551bSRalph Campbell } 268*f931551bSRalph Campbell ndesc++; 269*f931551bSRalph Campbell sge.vaddr += len; 270*f931551bSRalph Campbell sge.length -= len; 271*f931551bSRalph Campbell sge.sge_length -= len; 272*f931551bSRalph Campbell if (sge.sge_length == 0) { 273*f931551bSRalph Campbell if (--num_sge) 274*f931551bSRalph Campbell sge = *sg_list++; 275*f931551bSRalph Campbell } else if (sge.length == 0 && sge.mr->lkey) { 276*f931551bSRalph Campbell if (++sge.n >= QIB_SEGSZ) { 277*f931551bSRalph Campbell if (++sge.m >= sge.mr->mapsz) 278*f931551bSRalph Campbell break; 279*f931551bSRalph Campbell sge.n = 0; 280*f931551bSRalph Campbell } 281*f931551bSRalph Campbell sge.vaddr = 282*f931551bSRalph Campbell sge.mr->map[sge.m]->segs[sge.n].vaddr; 283*f931551bSRalph Campbell sge.length = 284*f931551bSRalph Campbell sge.mr->map[sge.m]->segs[sge.n].length; 285*f931551bSRalph Campbell } 286*f931551bSRalph Campbell length -= len; 287*f931551bSRalph Campbell } 288*f931551bSRalph Campbell return ndesc; 289*f931551bSRalph Campbell } 290*f931551bSRalph Campbell 291*f931551bSRalph Campbell /* 292*f931551bSRalph Campbell * Copy from the SGEs to the data buffer. 293*f931551bSRalph Campbell */ 294*f931551bSRalph Campbell static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) 295*f931551bSRalph Campbell { 296*f931551bSRalph Campbell struct qib_sge *sge = &ss->sge; 297*f931551bSRalph Campbell 298*f931551bSRalph Campbell while (length) { 299*f931551bSRalph Campbell u32 len = sge->length; 300*f931551bSRalph Campbell 301*f931551bSRalph Campbell if (len > length) 302*f931551bSRalph Campbell len = length; 303*f931551bSRalph Campbell if (len > sge->sge_length) 304*f931551bSRalph Campbell len = sge->sge_length; 305*f931551bSRalph Campbell BUG_ON(len == 0); 306*f931551bSRalph Campbell memcpy(data, sge->vaddr, len); 307*f931551bSRalph Campbell sge->vaddr += len; 308*f931551bSRalph Campbell sge->length -= len; 309*f931551bSRalph Campbell sge->sge_length -= len; 310*f931551bSRalph Campbell if (sge->sge_length == 0) { 311*f931551bSRalph Campbell if (--ss->num_sge) 312*f931551bSRalph Campbell *sge = *ss->sg_list++; 313*f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 314*f931551bSRalph Campbell if (++sge->n >= QIB_SEGSZ) { 315*f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 316*f931551bSRalph Campbell break; 317*f931551bSRalph Campbell sge->n = 0; 318*f931551bSRalph Campbell } 319*f931551bSRalph Campbell sge->vaddr = 320*f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].vaddr; 321*f931551bSRalph Campbell sge->length = 322*f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].length; 323*f931551bSRalph Campbell } 324*f931551bSRalph Campbell data += len; 325*f931551bSRalph Campbell length -= len; 326*f931551bSRalph Campbell } 327*f931551bSRalph Campbell } 328*f931551bSRalph Campbell 329*f931551bSRalph Campbell /** 330*f931551bSRalph Campbell * qib_post_one_send - post one RC, UC, or UD send work request 331*f931551bSRalph Campbell * @qp: the QP to post on 332*f931551bSRalph Campbell * @wr: the work request to send 333*f931551bSRalph Campbell */ 334*f931551bSRalph Campbell static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr) 335*f931551bSRalph Campbell { 336*f931551bSRalph Campbell struct qib_swqe *wqe; 337*f931551bSRalph Campbell u32 next; 338*f931551bSRalph Campbell int i; 339*f931551bSRalph Campbell int j; 340*f931551bSRalph Campbell int acc; 341*f931551bSRalph Campbell int ret; 342*f931551bSRalph Campbell unsigned long flags; 343*f931551bSRalph Campbell struct qib_lkey_table *rkt; 344*f931551bSRalph Campbell struct qib_pd *pd; 345*f931551bSRalph Campbell 346*f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 347*f931551bSRalph Campbell 348*f931551bSRalph Campbell /* Check that state is OK to post send. */ 349*f931551bSRalph Campbell if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) 350*f931551bSRalph Campbell goto bail_inval; 351*f931551bSRalph Campbell 352*f931551bSRalph Campbell /* IB spec says that num_sge == 0 is OK. */ 353*f931551bSRalph Campbell if (wr->num_sge > qp->s_max_sge) 354*f931551bSRalph Campbell goto bail_inval; 355*f931551bSRalph Campbell 356*f931551bSRalph Campbell /* 357*f931551bSRalph Campbell * Don't allow RDMA reads or atomic operations on UC or 358*f931551bSRalph Campbell * undefined operations. 359*f931551bSRalph Campbell * Make sure buffer is large enough to hold the result for atomics. 360*f931551bSRalph Campbell */ 361*f931551bSRalph Campbell if (wr->opcode == IB_WR_FAST_REG_MR) { 362*f931551bSRalph Campbell if (qib_fast_reg_mr(qp, wr)) 363*f931551bSRalph Campbell goto bail_inval; 364*f931551bSRalph Campbell } else if (qp->ibqp.qp_type == IB_QPT_UC) { 365*f931551bSRalph Campbell if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) 366*f931551bSRalph Campbell goto bail_inval; 367*f931551bSRalph Campbell } else if (qp->ibqp.qp_type != IB_QPT_RC) { 368*f931551bSRalph Campbell /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ 369*f931551bSRalph Campbell if (wr->opcode != IB_WR_SEND && 370*f931551bSRalph Campbell wr->opcode != IB_WR_SEND_WITH_IMM) 371*f931551bSRalph Campbell goto bail_inval; 372*f931551bSRalph Campbell /* Check UD destination address PD */ 373*f931551bSRalph Campbell if (qp->ibqp.pd != wr->wr.ud.ah->pd) 374*f931551bSRalph Campbell goto bail_inval; 375*f931551bSRalph Campbell } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) 376*f931551bSRalph Campbell goto bail_inval; 377*f931551bSRalph Campbell else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && 378*f931551bSRalph Campbell (wr->num_sge == 0 || 379*f931551bSRalph Campbell wr->sg_list[0].length < sizeof(u64) || 380*f931551bSRalph Campbell wr->sg_list[0].addr & (sizeof(u64) - 1))) 381*f931551bSRalph Campbell goto bail_inval; 382*f931551bSRalph Campbell else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) 383*f931551bSRalph Campbell goto bail_inval; 384*f931551bSRalph Campbell 385*f931551bSRalph Campbell next = qp->s_head + 1; 386*f931551bSRalph Campbell if (next >= qp->s_size) 387*f931551bSRalph Campbell next = 0; 388*f931551bSRalph Campbell if (next == qp->s_last) { 389*f931551bSRalph Campbell ret = -ENOMEM; 390*f931551bSRalph Campbell goto bail; 391*f931551bSRalph Campbell } 392*f931551bSRalph Campbell 393*f931551bSRalph Campbell rkt = &to_idev(qp->ibqp.device)->lk_table; 394*f931551bSRalph Campbell pd = to_ipd(qp->ibqp.pd); 395*f931551bSRalph Campbell wqe = get_swqe_ptr(qp, qp->s_head); 396*f931551bSRalph Campbell wqe->wr = *wr; 397*f931551bSRalph Campbell wqe->length = 0; 398*f931551bSRalph Campbell j = 0; 399*f931551bSRalph Campbell if (wr->num_sge) { 400*f931551bSRalph Campbell acc = wr->opcode >= IB_WR_RDMA_READ ? 401*f931551bSRalph Campbell IB_ACCESS_LOCAL_WRITE : 0; 402*f931551bSRalph Campbell for (i = 0; i < wr->num_sge; i++) { 403*f931551bSRalph Campbell u32 length = wr->sg_list[i].length; 404*f931551bSRalph Campbell int ok; 405*f931551bSRalph Campbell 406*f931551bSRalph Campbell if (length == 0) 407*f931551bSRalph Campbell continue; 408*f931551bSRalph Campbell ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], 409*f931551bSRalph Campbell &wr->sg_list[i], acc); 410*f931551bSRalph Campbell if (!ok) 411*f931551bSRalph Campbell goto bail_inval_free; 412*f931551bSRalph Campbell wqe->length += length; 413*f931551bSRalph Campbell j++; 414*f931551bSRalph Campbell } 415*f931551bSRalph Campbell wqe->wr.num_sge = j; 416*f931551bSRalph Campbell } 417*f931551bSRalph Campbell if (qp->ibqp.qp_type == IB_QPT_UC || 418*f931551bSRalph Campbell qp->ibqp.qp_type == IB_QPT_RC) { 419*f931551bSRalph Campbell if (wqe->length > 0x80000000U) 420*f931551bSRalph Campbell goto bail_inval_free; 421*f931551bSRalph Campbell } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + 422*f931551bSRalph Campbell qp->port_num - 1)->ibmtu) 423*f931551bSRalph Campbell goto bail_inval_free; 424*f931551bSRalph Campbell else 425*f931551bSRalph Campbell atomic_inc(&to_iah(wr->wr.ud.ah)->refcount); 426*f931551bSRalph Campbell wqe->ssn = qp->s_ssn++; 427*f931551bSRalph Campbell qp->s_head = next; 428*f931551bSRalph Campbell 429*f931551bSRalph Campbell ret = 0; 430*f931551bSRalph Campbell goto bail; 431*f931551bSRalph Campbell 432*f931551bSRalph Campbell bail_inval_free: 433*f931551bSRalph Campbell while (j) { 434*f931551bSRalph Campbell struct qib_sge *sge = &wqe->sg_list[--j]; 435*f931551bSRalph Campbell 436*f931551bSRalph Campbell atomic_dec(&sge->mr->refcount); 437*f931551bSRalph Campbell } 438*f931551bSRalph Campbell bail_inval: 439*f931551bSRalph Campbell ret = -EINVAL; 440*f931551bSRalph Campbell bail: 441*f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 442*f931551bSRalph Campbell return ret; 443*f931551bSRalph Campbell } 444*f931551bSRalph Campbell 445*f931551bSRalph Campbell /** 446*f931551bSRalph Campbell * qib_post_send - post a send on a QP 447*f931551bSRalph Campbell * @ibqp: the QP to post the send on 448*f931551bSRalph Campbell * @wr: the list of work requests to post 449*f931551bSRalph Campbell * @bad_wr: the first bad WR is put here 450*f931551bSRalph Campbell * 451*f931551bSRalph Campbell * This may be called from interrupt context. 452*f931551bSRalph Campbell */ 453*f931551bSRalph Campbell static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 454*f931551bSRalph Campbell struct ib_send_wr **bad_wr) 455*f931551bSRalph Campbell { 456*f931551bSRalph Campbell struct qib_qp *qp = to_iqp(ibqp); 457*f931551bSRalph Campbell int err = 0; 458*f931551bSRalph Campbell 459*f931551bSRalph Campbell for (; wr; wr = wr->next) { 460*f931551bSRalph Campbell err = qib_post_one_send(qp, wr); 461*f931551bSRalph Campbell if (err) { 462*f931551bSRalph Campbell *bad_wr = wr; 463*f931551bSRalph Campbell goto bail; 464*f931551bSRalph Campbell } 465*f931551bSRalph Campbell } 466*f931551bSRalph Campbell 467*f931551bSRalph Campbell /* Try to do the send work in the caller's context. */ 468*f931551bSRalph Campbell qib_do_send(&qp->s_work); 469*f931551bSRalph Campbell 470*f931551bSRalph Campbell bail: 471*f931551bSRalph Campbell return err; 472*f931551bSRalph Campbell } 473*f931551bSRalph Campbell 474*f931551bSRalph Campbell /** 475*f931551bSRalph Campbell * qib_post_receive - post a receive on a QP 476*f931551bSRalph Campbell * @ibqp: the QP to post the receive on 477*f931551bSRalph Campbell * @wr: the WR to post 478*f931551bSRalph Campbell * @bad_wr: the first bad WR is put here 479*f931551bSRalph Campbell * 480*f931551bSRalph Campbell * This may be called from interrupt context. 481*f931551bSRalph Campbell */ 482*f931551bSRalph Campbell static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 483*f931551bSRalph Campbell struct ib_recv_wr **bad_wr) 484*f931551bSRalph Campbell { 485*f931551bSRalph Campbell struct qib_qp *qp = to_iqp(ibqp); 486*f931551bSRalph Campbell struct qib_rwq *wq = qp->r_rq.wq; 487*f931551bSRalph Campbell unsigned long flags; 488*f931551bSRalph Campbell int ret; 489*f931551bSRalph Campbell 490*f931551bSRalph Campbell /* Check that state is OK to post receive. */ 491*f931551bSRalph Campbell if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) { 492*f931551bSRalph Campbell *bad_wr = wr; 493*f931551bSRalph Campbell ret = -EINVAL; 494*f931551bSRalph Campbell goto bail; 495*f931551bSRalph Campbell } 496*f931551bSRalph Campbell 497*f931551bSRalph Campbell for (; wr; wr = wr->next) { 498*f931551bSRalph Campbell struct qib_rwqe *wqe; 499*f931551bSRalph Campbell u32 next; 500*f931551bSRalph Campbell int i; 501*f931551bSRalph Campbell 502*f931551bSRalph Campbell if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { 503*f931551bSRalph Campbell *bad_wr = wr; 504*f931551bSRalph Campbell ret = -EINVAL; 505*f931551bSRalph Campbell goto bail; 506*f931551bSRalph Campbell } 507*f931551bSRalph Campbell 508*f931551bSRalph Campbell spin_lock_irqsave(&qp->r_rq.lock, flags); 509*f931551bSRalph Campbell next = wq->head + 1; 510*f931551bSRalph Campbell if (next >= qp->r_rq.size) 511*f931551bSRalph Campbell next = 0; 512*f931551bSRalph Campbell if (next == wq->tail) { 513*f931551bSRalph Campbell spin_unlock_irqrestore(&qp->r_rq.lock, flags); 514*f931551bSRalph Campbell *bad_wr = wr; 515*f931551bSRalph Campbell ret = -ENOMEM; 516*f931551bSRalph Campbell goto bail; 517*f931551bSRalph Campbell } 518*f931551bSRalph Campbell 519*f931551bSRalph Campbell wqe = get_rwqe_ptr(&qp->r_rq, wq->head); 520*f931551bSRalph Campbell wqe->wr_id = wr->wr_id; 521*f931551bSRalph Campbell wqe->num_sge = wr->num_sge; 522*f931551bSRalph Campbell for (i = 0; i < wr->num_sge; i++) 523*f931551bSRalph Campbell wqe->sg_list[i] = wr->sg_list[i]; 524*f931551bSRalph Campbell /* Make sure queue entry is written before the head index. */ 525*f931551bSRalph Campbell smp_wmb(); 526*f931551bSRalph Campbell wq->head = next; 527*f931551bSRalph Campbell spin_unlock_irqrestore(&qp->r_rq.lock, flags); 528*f931551bSRalph Campbell } 529*f931551bSRalph Campbell ret = 0; 530*f931551bSRalph Campbell 531*f931551bSRalph Campbell bail: 532*f931551bSRalph Campbell return ret; 533*f931551bSRalph Campbell } 534*f931551bSRalph Campbell 535*f931551bSRalph Campbell /** 536*f931551bSRalph Campbell * qib_qp_rcv - processing an incoming packet on a QP 537*f931551bSRalph Campbell * @rcd: the context pointer 538*f931551bSRalph Campbell * @hdr: the packet header 539*f931551bSRalph Campbell * @has_grh: true if the packet has a GRH 540*f931551bSRalph Campbell * @data: the packet data 541*f931551bSRalph Campbell * @tlen: the packet length 542*f931551bSRalph Campbell * @qp: the QP the packet came on 543*f931551bSRalph Campbell * 544*f931551bSRalph Campbell * This is called from qib_ib_rcv() to process an incoming packet 545*f931551bSRalph Campbell * for the given QP. 546*f931551bSRalph Campbell * Called at interrupt level. 547*f931551bSRalph Campbell */ 548*f931551bSRalph Campbell static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, 549*f931551bSRalph Campbell int has_grh, void *data, u32 tlen, struct qib_qp *qp) 550*f931551bSRalph Campbell { 551*f931551bSRalph Campbell struct qib_ibport *ibp = &rcd->ppd->ibport_data; 552*f931551bSRalph Campbell 553*f931551bSRalph Campbell /* Check for valid receive state. */ 554*f931551bSRalph Campbell if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { 555*f931551bSRalph Campbell ibp->n_pkt_drops++; 556*f931551bSRalph Campbell return; 557*f931551bSRalph Campbell } 558*f931551bSRalph Campbell 559*f931551bSRalph Campbell switch (qp->ibqp.qp_type) { 560*f931551bSRalph Campbell case IB_QPT_SMI: 561*f931551bSRalph Campbell case IB_QPT_GSI: 562*f931551bSRalph Campbell if (ib_qib_disable_sma) 563*f931551bSRalph Campbell break; 564*f931551bSRalph Campbell /* FALLTHROUGH */ 565*f931551bSRalph Campbell case IB_QPT_UD: 566*f931551bSRalph Campbell qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); 567*f931551bSRalph Campbell break; 568*f931551bSRalph Campbell 569*f931551bSRalph Campbell case IB_QPT_RC: 570*f931551bSRalph Campbell qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp); 571*f931551bSRalph Campbell break; 572*f931551bSRalph Campbell 573*f931551bSRalph Campbell case IB_QPT_UC: 574*f931551bSRalph Campbell qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp); 575*f931551bSRalph Campbell break; 576*f931551bSRalph Campbell 577*f931551bSRalph Campbell default: 578*f931551bSRalph Campbell break; 579*f931551bSRalph Campbell } 580*f931551bSRalph Campbell } 581*f931551bSRalph Campbell 582*f931551bSRalph Campbell /** 583*f931551bSRalph Campbell * qib_ib_rcv - process an incoming packet 584*f931551bSRalph Campbell * @rcd: the context pointer 585*f931551bSRalph Campbell * @rhdr: the header of the packet 586*f931551bSRalph Campbell * @data: the packet payload 587*f931551bSRalph Campbell * @tlen: the packet length 588*f931551bSRalph Campbell * 589*f931551bSRalph Campbell * This is called from qib_kreceive() to process an incoming packet at 590*f931551bSRalph Campbell * interrupt level. Tlen is the length of the header + data + CRC in bytes. 591*f931551bSRalph Campbell */ 592*f931551bSRalph Campbell void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) 593*f931551bSRalph Campbell { 594*f931551bSRalph Campbell struct qib_pportdata *ppd = rcd->ppd; 595*f931551bSRalph Campbell struct qib_ibport *ibp = &ppd->ibport_data; 596*f931551bSRalph Campbell struct qib_ib_header *hdr = rhdr; 597*f931551bSRalph Campbell struct qib_other_headers *ohdr; 598*f931551bSRalph Campbell struct qib_qp *qp; 599*f931551bSRalph Campbell u32 qp_num; 600*f931551bSRalph Campbell int lnh; 601*f931551bSRalph Campbell u8 opcode; 602*f931551bSRalph Campbell u16 lid; 603*f931551bSRalph Campbell 604*f931551bSRalph Campbell /* 24 == LRH+BTH+CRC */ 605*f931551bSRalph Campbell if (unlikely(tlen < 24)) 606*f931551bSRalph Campbell goto drop; 607*f931551bSRalph Campbell 608*f931551bSRalph Campbell /* Check for a valid destination LID (see ch. 7.11.1). */ 609*f931551bSRalph Campbell lid = be16_to_cpu(hdr->lrh[1]); 610*f931551bSRalph Campbell if (lid < QIB_MULTICAST_LID_BASE) { 611*f931551bSRalph Campbell lid &= ~((1 << ppd->lmc) - 1); 612*f931551bSRalph Campbell if (unlikely(lid != ppd->lid)) 613*f931551bSRalph Campbell goto drop; 614*f931551bSRalph Campbell } 615*f931551bSRalph Campbell 616*f931551bSRalph Campbell /* Check for GRH */ 617*f931551bSRalph Campbell lnh = be16_to_cpu(hdr->lrh[0]) & 3; 618*f931551bSRalph Campbell if (lnh == QIB_LRH_BTH) 619*f931551bSRalph Campbell ohdr = &hdr->u.oth; 620*f931551bSRalph Campbell else if (lnh == QIB_LRH_GRH) { 621*f931551bSRalph Campbell u32 vtf; 622*f931551bSRalph Campbell 623*f931551bSRalph Campbell ohdr = &hdr->u.l.oth; 624*f931551bSRalph Campbell if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) 625*f931551bSRalph Campbell goto drop; 626*f931551bSRalph Campbell vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); 627*f931551bSRalph Campbell if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 628*f931551bSRalph Campbell goto drop; 629*f931551bSRalph Campbell } else 630*f931551bSRalph Campbell goto drop; 631*f931551bSRalph Campbell 632*f931551bSRalph Campbell opcode = be32_to_cpu(ohdr->bth[0]) >> 24; 633*f931551bSRalph Campbell ibp->opstats[opcode & 0x7f].n_bytes += tlen; 634*f931551bSRalph Campbell ibp->opstats[opcode & 0x7f].n_packets++; 635*f931551bSRalph Campbell 636*f931551bSRalph Campbell /* Get the destination QP number. */ 637*f931551bSRalph Campbell qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; 638*f931551bSRalph Campbell if (qp_num == QIB_MULTICAST_QPN) { 639*f931551bSRalph Campbell struct qib_mcast *mcast; 640*f931551bSRalph Campbell struct qib_mcast_qp *p; 641*f931551bSRalph Campbell 642*f931551bSRalph Campbell if (lnh != QIB_LRH_GRH) 643*f931551bSRalph Campbell goto drop; 644*f931551bSRalph Campbell mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); 645*f931551bSRalph Campbell if (mcast == NULL) 646*f931551bSRalph Campbell goto drop; 647*f931551bSRalph Campbell ibp->n_multicast_rcv++; 648*f931551bSRalph Campbell list_for_each_entry_rcu(p, &mcast->qp_list, list) 649*f931551bSRalph Campbell qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); 650*f931551bSRalph Campbell /* 651*f931551bSRalph Campbell * Notify qib_multicast_detach() if it is waiting for us 652*f931551bSRalph Campbell * to finish. 653*f931551bSRalph Campbell */ 654*f931551bSRalph Campbell if (atomic_dec_return(&mcast->refcount) <= 1) 655*f931551bSRalph Campbell wake_up(&mcast->wait); 656*f931551bSRalph Campbell } else { 657*f931551bSRalph Campbell qp = qib_lookup_qpn(ibp, qp_num); 658*f931551bSRalph Campbell if (!qp) 659*f931551bSRalph Campbell goto drop; 660*f931551bSRalph Campbell ibp->n_unicast_rcv++; 661*f931551bSRalph Campbell qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); 662*f931551bSRalph Campbell /* 663*f931551bSRalph Campbell * Notify qib_destroy_qp() if it is waiting 664*f931551bSRalph Campbell * for us to finish. 665*f931551bSRalph Campbell */ 666*f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 667*f931551bSRalph Campbell wake_up(&qp->wait); 668*f931551bSRalph Campbell } 669*f931551bSRalph Campbell return; 670*f931551bSRalph Campbell 671*f931551bSRalph Campbell drop: 672*f931551bSRalph Campbell ibp->n_pkt_drops++; 673*f931551bSRalph Campbell } 674*f931551bSRalph Campbell 675*f931551bSRalph Campbell /* 676*f931551bSRalph Campbell * This is called from a timer to check for QPs 677*f931551bSRalph Campbell * which need kernel memory in order to send a packet. 678*f931551bSRalph Campbell */ 679*f931551bSRalph Campbell static void mem_timer(unsigned long data) 680*f931551bSRalph Campbell { 681*f931551bSRalph Campbell struct qib_ibdev *dev = (struct qib_ibdev *) data; 682*f931551bSRalph Campbell struct list_head *list = &dev->memwait; 683*f931551bSRalph Campbell struct qib_qp *qp = NULL; 684*f931551bSRalph Campbell unsigned long flags; 685*f931551bSRalph Campbell 686*f931551bSRalph Campbell spin_lock_irqsave(&dev->pending_lock, flags); 687*f931551bSRalph Campbell if (!list_empty(list)) { 688*f931551bSRalph Campbell qp = list_entry(list->next, struct qib_qp, iowait); 689*f931551bSRalph Campbell list_del_init(&qp->iowait); 690*f931551bSRalph Campbell atomic_inc(&qp->refcount); 691*f931551bSRalph Campbell if (!list_empty(list)) 692*f931551bSRalph Campbell mod_timer(&dev->mem_timer, jiffies + 1); 693*f931551bSRalph Campbell } 694*f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 695*f931551bSRalph Campbell 696*f931551bSRalph Campbell if (qp) { 697*f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 698*f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_KMEM) { 699*f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_KMEM; 700*f931551bSRalph Campbell qib_schedule_send(qp); 701*f931551bSRalph Campbell } 702*f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 703*f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 704*f931551bSRalph Campbell wake_up(&qp->wait); 705*f931551bSRalph Campbell } 706*f931551bSRalph Campbell } 707*f931551bSRalph Campbell 708*f931551bSRalph Campbell static void update_sge(struct qib_sge_state *ss, u32 length) 709*f931551bSRalph Campbell { 710*f931551bSRalph Campbell struct qib_sge *sge = &ss->sge; 711*f931551bSRalph Campbell 712*f931551bSRalph Campbell sge->vaddr += length; 713*f931551bSRalph Campbell sge->length -= length; 714*f931551bSRalph Campbell sge->sge_length -= length; 715*f931551bSRalph Campbell if (sge->sge_length == 0) { 716*f931551bSRalph Campbell if (--ss->num_sge) 717*f931551bSRalph Campbell *sge = *ss->sg_list++; 718*f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 719*f931551bSRalph Campbell if (++sge->n >= QIB_SEGSZ) { 720*f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 721*f931551bSRalph Campbell return; 722*f931551bSRalph Campbell sge->n = 0; 723*f931551bSRalph Campbell } 724*f931551bSRalph Campbell sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; 725*f931551bSRalph Campbell sge->length = sge->mr->map[sge->m]->segs[sge->n].length; 726*f931551bSRalph Campbell } 727*f931551bSRalph Campbell } 728*f931551bSRalph Campbell 729*f931551bSRalph Campbell #ifdef __LITTLE_ENDIAN 730*f931551bSRalph Campbell static inline u32 get_upper_bits(u32 data, u32 shift) 731*f931551bSRalph Campbell { 732*f931551bSRalph Campbell return data >> shift; 733*f931551bSRalph Campbell } 734*f931551bSRalph Campbell 735*f931551bSRalph Campbell static inline u32 set_upper_bits(u32 data, u32 shift) 736*f931551bSRalph Campbell { 737*f931551bSRalph Campbell return data << shift; 738*f931551bSRalph Campbell } 739*f931551bSRalph Campbell 740*f931551bSRalph Campbell static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) 741*f931551bSRalph Campbell { 742*f931551bSRalph Campbell data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); 743*f931551bSRalph Campbell data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); 744*f931551bSRalph Campbell return data; 745*f931551bSRalph Campbell } 746*f931551bSRalph Campbell #else 747*f931551bSRalph Campbell static inline u32 get_upper_bits(u32 data, u32 shift) 748*f931551bSRalph Campbell { 749*f931551bSRalph Campbell return data << shift; 750*f931551bSRalph Campbell } 751*f931551bSRalph Campbell 752*f931551bSRalph Campbell static inline u32 set_upper_bits(u32 data, u32 shift) 753*f931551bSRalph Campbell { 754*f931551bSRalph Campbell return data >> shift; 755*f931551bSRalph Campbell } 756*f931551bSRalph Campbell 757*f931551bSRalph Campbell static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) 758*f931551bSRalph Campbell { 759*f931551bSRalph Campbell data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); 760*f931551bSRalph Campbell data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); 761*f931551bSRalph Campbell return data; 762*f931551bSRalph Campbell } 763*f931551bSRalph Campbell #endif 764*f931551bSRalph Campbell 765*f931551bSRalph Campbell static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss, 766*f931551bSRalph Campbell u32 length, unsigned flush_wc) 767*f931551bSRalph Campbell { 768*f931551bSRalph Campbell u32 extra = 0; 769*f931551bSRalph Campbell u32 data = 0; 770*f931551bSRalph Campbell u32 last; 771*f931551bSRalph Campbell 772*f931551bSRalph Campbell while (1) { 773*f931551bSRalph Campbell u32 len = ss->sge.length; 774*f931551bSRalph Campbell u32 off; 775*f931551bSRalph Campbell 776*f931551bSRalph Campbell if (len > length) 777*f931551bSRalph Campbell len = length; 778*f931551bSRalph Campbell if (len > ss->sge.sge_length) 779*f931551bSRalph Campbell len = ss->sge.sge_length; 780*f931551bSRalph Campbell BUG_ON(len == 0); 781*f931551bSRalph Campbell /* If the source address is not aligned, try to align it. */ 782*f931551bSRalph Campbell off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); 783*f931551bSRalph Campbell if (off) { 784*f931551bSRalph Campbell u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & 785*f931551bSRalph Campbell ~(sizeof(u32) - 1)); 786*f931551bSRalph Campbell u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); 787*f931551bSRalph Campbell u32 y; 788*f931551bSRalph Campbell 789*f931551bSRalph Campbell y = sizeof(u32) - off; 790*f931551bSRalph Campbell if (len > y) 791*f931551bSRalph Campbell len = y; 792*f931551bSRalph Campbell if (len + extra >= sizeof(u32)) { 793*f931551bSRalph Campbell data |= set_upper_bits(v, extra * 794*f931551bSRalph Campbell BITS_PER_BYTE); 795*f931551bSRalph Campbell len = sizeof(u32) - extra; 796*f931551bSRalph Campbell if (len == length) { 797*f931551bSRalph Campbell last = data; 798*f931551bSRalph Campbell break; 799*f931551bSRalph Campbell } 800*f931551bSRalph Campbell __raw_writel(data, piobuf); 801*f931551bSRalph Campbell piobuf++; 802*f931551bSRalph Campbell extra = 0; 803*f931551bSRalph Campbell data = 0; 804*f931551bSRalph Campbell } else { 805*f931551bSRalph Campbell /* Clear unused upper bytes */ 806*f931551bSRalph Campbell data |= clear_upper_bytes(v, len, extra); 807*f931551bSRalph Campbell if (len == length) { 808*f931551bSRalph Campbell last = data; 809*f931551bSRalph Campbell break; 810*f931551bSRalph Campbell } 811*f931551bSRalph Campbell extra += len; 812*f931551bSRalph Campbell } 813*f931551bSRalph Campbell } else if (extra) { 814*f931551bSRalph Campbell /* Source address is aligned. */ 815*f931551bSRalph Campbell u32 *addr = (u32 *) ss->sge.vaddr; 816*f931551bSRalph Campbell int shift = extra * BITS_PER_BYTE; 817*f931551bSRalph Campbell int ushift = 32 - shift; 818*f931551bSRalph Campbell u32 l = len; 819*f931551bSRalph Campbell 820*f931551bSRalph Campbell while (l >= sizeof(u32)) { 821*f931551bSRalph Campbell u32 v = *addr; 822*f931551bSRalph Campbell 823*f931551bSRalph Campbell data |= set_upper_bits(v, shift); 824*f931551bSRalph Campbell __raw_writel(data, piobuf); 825*f931551bSRalph Campbell data = get_upper_bits(v, ushift); 826*f931551bSRalph Campbell piobuf++; 827*f931551bSRalph Campbell addr++; 828*f931551bSRalph Campbell l -= sizeof(u32); 829*f931551bSRalph Campbell } 830*f931551bSRalph Campbell /* 831*f931551bSRalph Campbell * We still have 'extra' number of bytes leftover. 832*f931551bSRalph Campbell */ 833*f931551bSRalph Campbell if (l) { 834*f931551bSRalph Campbell u32 v = *addr; 835*f931551bSRalph Campbell 836*f931551bSRalph Campbell if (l + extra >= sizeof(u32)) { 837*f931551bSRalph Campbell data |= set_upper_bits(v, shift); 838*f931551bSRalph Campbell len -= l + extra - sizeof(u32); 839*f931551bSRalph Campbell if (len == length) { 840*f931551bSRalph Campbell last = data; 841*f931551bSRalph Campbell break; 842*f931551bSRalph Campbell } 843*f931551bSRalph Campbell __raw_writel(data, piobuf); 844*f931551bSRalph Campbell piobuf++; 845*f931551bSRalph Campbell extra = 0; 846*f931551bSRalph Campbell data = 0; 847*f931551bSRalph Campbell } else { 848*f931551bSRalph Campbell /* Clear unused upper bytes */ 849*f931551bSRalph Campbell data |= clear_upper_bytes(v, l, extra); 850*f931551bSRalph Campbell if (len == length) { 851*f931551bSRalph Campbell last = data; 852*f931551bSRalph Campbell break; 853*f931551bSRalph Campbell } 854*f931551bSRalph Campbell extra += l; 855*f931551bSRalph Campbell } 856*f931551bSRalph Campbell } else if (len == length) { 857*f931551bSRalph Campbell last = data; 858*f931551bSRalph Campbell break; 859*f931551bSRalph Campbell } 860*f931551bSRalph Campbell } else if (len == length) { 861*f931551bSRalph Campbell u32 w; 862*f931551bSRalph Campbell 863*f931551bSRalph Campbell /* 864*f931551bSRalph Campbell * Need to round up for the last dword in the 865*f931551bSRalph Campbell * packet. 866*f931551bSRalph Campbell */ 867*f931551bSRalph Campbell w = (len + 3) >> 2; 868*f931551bSRalph Campbell qib_pio_copy(piobuf, ss->sge.vaddr, w - 1); 869*f931551bSRalph Campbell piobuf += w - 1; 870*f931551bSRalph Campbell last = ((u32 *) ss->sge.vaddr)[w - 1]; 871*f931551bSRalph Campbell break; 872*f931551bSRalph Campbell } else { 873*f931551bSRalph Campbell u32 w = len >> 2; 874*f931551bSRalph Campbell 875*f931551bSRalph Campbell qib_pio_copy(piobuf, ss->sge.vaddr, w); 876*f931551bSRalph Campbell piobuf += w; 877*f931551bSRalph Campbell 878*f931551bSRalph Campbell extra = len & (sizeof(u32) - 1); 879*f931551bSRalph Campbell if (extra) { 880*f931551bSRalph Campbell u32 v = ((u32 *) ss->sge.vaddr)[w]; 881*f931551bSRalph Campbell 882*f931551bSRalph Campbell /* Clear unused upper bytes */ 883*f931551bSRalph Campbell data = clear_upper_bytes(v, extra, 0); 884*f931551bSRalph Campbell } 885*f931551bSRalph Campbell } 886*f931551bSRalph Campbell update_sge(ss, len); 887*f931551bSRalph Campbell length -= len; 888*f931551bSRalph Campbell } 889*f931551bSRalph Campbell /* Update address before sending packet. */ 890*f931551bSRalph Campbell update_sge(ss, length); 891*f931551bSRalph Campbell if (flush_wc) { 892*f931551bSRalph Campbell /* must flush early everything before trigger word */ 893*f931551bSRalph Campbell qib_flush_wc(); 894*f931551bSRalph Campbell __raw_writel(last, piobuf); 895*f931551bSRalph Campbell /* be sure trigger word is written */ 896*f931551bSRalph Campbell qib_flush_wc(); 897*f931551bSRalph Campbell } else 898*f931551bSRalph Campbell __raw_writel(last, piobuf); 899*f931551bSRalph Campbell } 900*f931551bSRalph Campbell 901*f931551bSRalph Campbell static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev, 902*f931551bSRalph Campbell struct qib_qp *qp, int *retp) 903*f931551bSRalph Campbell { 904*f931551bSRalph Campbell struct qib_verbs_txreq *tx; 905*f931551bSRalph Campbell unsigned long flags; 906*f931551bSRalph Campbell 907*f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 908*f931551bSRalph Campbell spin_lock(&dev->pending_lock); 909*f931551bSRalph Campbell 910*f931551bSRalph Campbell if (!list_empty(&dev->txreq_free)) { 911*f931551bSRalph Campbell struct list_head *l = dev->txreq_free.next; 912*f931551bSRalph Campbell 913*f931551bSRalph Campbell list_del(l); 914*f931551bSRalph Campbell tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 915*f931551bSRalph Campbell *retp = 0; 916*f931551bSRalph Campbell } else { 917*f931551bSRalph Campbell if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && 918*f931551bSRalph Campbell list_empty(&qp->iowait)) { 919*f931551bSRalph Campbell dev->n_txwait++; 920*f931551bSRalph Campbell qp->s_flags |= QIB_S_WAIT_TX; 921*f931551bSRalph Campbell list_add_tail(&qp->iowait, &dev->txwait); 922*f931551bSRalph Campbell } 923*f931551bSRalph Campbell tx = NULL; 924*f931551bSRalph Campbell qp->s_flags &= ~QIB_S_BUSY; 925*f931551bSRalph Campbell *retp = -EBUSY; 926*f931551bSRalph Campbell } 927*f931551bSRalph Campbell 928*f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 929*f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 930*f931551bSRalph Campbell 931*f931551bSRalph Campbell return tx; 932*f931551bSRalph Campbell } 933*f931551bSRalph Campbell 934*f931551bSRalph Campbell void qib_put_txreq(struct qib_verbs_txreq *tx) 935*f931551bSRalph Campbell { 936*f931551bSRalph Campbell struct qib_ibdev *dev; 937*f931551bSRalph Campbell struct qib_qp *qp; 938*f931551bSRalph Campbell unsigned long flags; 939*f931551bSRalph Campbell 940*f931551bSRalph Campbell qp = tx->qp; 941*f931551bSRalph Campbell dev = to_idev(qp->ibqp.device); 942*f931551bSRalph Campbell 943*f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 944*f931551bSRalph Campbell wake_up(&qp->wait); 945*f931551bSRalph Campbell if (tx->mr) { 946*f931551bSRalph Campbell atomic_dec(&tx->mr->refcount); 947*f931551bSRalph Campbell tx->mr = NULL; 948*f931551bSRalph Campbell } 949*f931551bSRalph Campbell if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { 950*f931551bSRalph Campbell tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; 951*f931551bSRalph Campbell dma_unmap_single(&dd_from_dev(dev)->pcidev->dev, 952*f931551bSRalph Campbell tx->txreq.addr, tx->hdr_dwords << 2, 953*f931551bSRalph Campbell DMA_TO_DEVICE); 954*f931551bSRalph Campbell kfree(tx->align_buf); 955*f931551bSRalph Campbell } 956*f931551bSRalph Campbell 957*f931551bSRalph Campbell spin_lock_irqsave(&dev->pending_lock, flags); 958*f931551bSRalph Campbell 959*f931551bSRalph Campbell /* Put struct back on free list */ 960*f931551bSRalph Campbell list_add(&tx->txreq.list, &dev->txreq_free); 961*f931551bSRalph Campbell 962*f931551bSRalph Campbell if (!list_empty(&dev->txwait)) { 963*f931551bSRalph Campbell /* Wake up first QP wanting a free struct */ 964*f931551bSRalph Campbell qp = list_entry(dev->txwait.next, struct qib_qp, iowait); 965*f931551bSRalph Campbell list_del_init(&qp->iowait); 966*f931551bSRalph Campbell atomic_inc(&qp->refcount); 967*f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 968*f931551bSRalph Campbell 969*f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 970*f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_TX) { 971*f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_TX; 972*f931551bSRalph Campbell qib_schedule_send(qp); 973*f931551bSRalph Campbell } 974*f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 975*f931551bSRalph Campbell 976*f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 977*f931551bSRalph Campbell wake_up(&qp->wait); 978*f931551bSRalph Campbell } else 979*f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 980*f931551bSRalph Campbell } 981*f931551bSRalph Campbell 982*f931551bSRalph Campbell /* 983*f931551bSRalph Campbell * This is called when there are send DMA descriptors that might be 984*f931551bSRalph Campbell * available. 985*f931551bSRalph Campbell * 986*f931551bSRalph Campbell * This is called with ppd->sdma_lock held. 987*f931551bSRalph Campbell */ 988*f931551bSRalph Campbell void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) 989*f931551bSRalph Campbell { 990*f931551bSRalph Campbell struct qib_qp *qp, *nqp; 991*f931551bSRalph Campbell struct qib_qp *qps[20]; 992*f931551bSRalph Campbell struct qib_ibdev *dev; 993*f931551bSRalph Campbell unsigned i, n; 994*f931551bSRalph Campbell 995*f931551bSRalph Campbell n = 0; 996*f931551bSRalph Campbell dev = &ppd->dd->verbs_dev; 997*f931551bSRalph Campbell spin_lock(&dev->pending_lock); 998*f931551bSRalph Campbell 999*f931551bSRalph Campbell /* Search wait list for first QP wanting DMA descriptors. */ 1000*f931551bSRalph Campbell list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) { 1001*f931551bSRalph Campbell if (qp->port_num != ppd->port) 1002*f931551bSRalph Campbell continue; 1003*f931551bSRalph Campbell if (n == ARRAY_SIZE(qps)) 1004*f931551bSRalph Campbell break; 1005*f931551bSRalph Campbell if (qp->s_tx->txreq.sg_count > avail) 1006*f931551bSRalph Campbell break; 1007*f931551bSRalph Campbell avail -= qp->s_tx->txreq.sg_count; 1008*f931551bSRalph Campbell list_del_init(&qp->iowait); 1009*f931551bSRalph Campbell atomic_inc(&qp->refcount); 1010*f931551bSRalph Campbell qps[n++] = qp; 1011*f931551bSRalph Campbell } 1012*f931551bSRalph Campbell 1013*f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 1014*f931551bSRalph Campbell 1015*f931551bSRalph Campbell for (i = 0; i < n; i++) { 1016*f931551bSRalph Campbell qp = qps[i]; 1017*f931551bSRalph Campbell spin_lock(&qp->s_lock); 1018*f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { 1019*f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; 1020*f931551bSRalph Campbell qib_schedule_send(qp); 1021*f931551bSRalph Campbell } 1022*f931551bSRalph Campbell spin_unlock(&qp->s_lock); 1023*f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 1024*f931551bSRalph Campbell wake_up(&qp->wait); 1025*f931551bSRalph Campbell } 1026*f931551bSRalph Campbell } 1027*f931551bSRalph Campbell 1028*f931551bSRalph Campbell /* 1029*f931551bSRalph Campbell * This is called with ppd->sdma_lock held. 1030*f931551bSRalph Campbell */ 1031*f931551bSRalph Campbell static void sdma_complete(struct qib_sdma_txreq *cookie, int status) 1032*f931551bSRalph Campbell { 1033*f931551bSRalph Campbell struct qib_verbs_txreq *tx = 1034*f931551bSRalph Campbell container_of(cookie, struct qib_verbs_txreq, txreq); 1035*f931551bSRalph Campbell struct qib_qp *qp = tx->qp; 1036*f931551bSRalph Campbell 1037*f931551bSRalph Campbell spin_lock(&qp->s_lock); 1038*f931551bSRalph Campbell if (tx->wqe) 1039*f931551bSRalph Campbell qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); 1040*f931551bSRalph Campbell else if (qp->ibqp.qp_type == IB_QPT_RC) { 1041*f931551bSRalph Campbell struct qib_ib_header *hdr; 1042*f931551bSRalph Campbell 1043*f931551bSRalph Campbell if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) 1044*f931551bSRalph Campbell hdr = &tx->align_buf->hdr; 1045*f931551bSRalph Campbell else { 1046*f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1047*f931551bSRalph Campbell 1048*f931551bSRalph Campbell hdr = &dev->pio_hdrs[tx->hdr_inx].hdr; 1049*f931551bSRalph Campbell } 1050*f931551bSRalph Campbell qib_rc_send_complete(qp, hdr); 1051*f931551bSRalph Campbell } 1052*f931551bSRalph Campbell if (atomic_dec_and_test(&qp->s_dma_busy)) { 1053*f931551bSRalph Campbell if (qp->state == IB_QPS_RESET) 1054*f931551bSRalph Campbell wake_up(&qp->wait_dma); 1055*f931551bSRalph Campbell else if (qp->s_flags & QIB_S_WAIT_DMA) { 1056*f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_DMA; 1057*f931551bSRalph Campbell qib_schedule_send(qp); 1058*f931551bSRalph Campbell } 1059*f931551bSRalph Campbell } 1060*f931551bSRalph Campbell spin_unlock(&qp->s_lock); 1061*f931551bSRalph Campbell 1062*f931551bSRalph Campbell qib_put_txreq(tx); 1063*f931551bSRalph Campbell } 1064*f931551bSRalph Campbell 1065*f931551bSRalph Campbell static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) 1066*f931551bSRalph Campbell { 1067*f931551bSRalph Campbell unsigned long flags; 1068*f931551bSRalph Campbell int ret = 0; 1069*f931551bSRalph Campbell 1070*f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1071*f931551bSRalph Campbell if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { 1072*f931551bSRalph Campbell spin_lock(&dev->pending_lock); 1073*f931551bSRalph Campbell if (list_empty(&qp->iowait)) { 1074*f931551bSRalph Campbell if (list_empty(&dev->memwait)) 1075*f931551bSRalph Campbell mod_timer(&dev->mem_timer, jiffies + 1); 1076*f931551bSRalph Campbell qp->s_flags |= QIB_S_WAIT_KMEM; 1077*f931551bSRalph Campbell list_add_tail(&qp->iowait, &dev->memwait); 1078*f931551bSRalph Campbell } 1079*f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 1080*f931551bSRalph Campbell qp->s_flags &= ~QIB_S_BUSY; 1081*f931551bSRalph Campbell ret = -EBUSY; 1082*f931551bSRalph Campbell } 1083*f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1084*f931551bSRalph Campbell 1085*f931551bSRalph Campbell return ret; 1086*f931551bSRalph Campbell } 1087*f931551bSRalph Campbell 1088*f931551bSRalph Campbell static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, 1089*f931551bSRalph Campbell u32 hdrwords, struct qib_sge_state *ss, u32 len, 1090*f931551bSRalph Campbell u32 plen, u32 dwords) 1091*f931551bSRalph Campbell { 1092*f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1093*f931551bSRalph Campbell struct qib_devdata *dd = dd_from_dev(dev); 1094*f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 1095*f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1096*f931551bSRalph Campbell struct qib_verbs_txreq *tx; 1097*f931551bSRalph Campbell struct qib_pio_header *phdr; 1098*f931551bSRalph Campbell u32 control; 1099*f931551bSRalph Campbell u32 ndesc; 1100*f931551bSRalph Campbell int ret; 1101*f931551bSRalph Campbell 1102*f931551bSRalph Campbell tx = qp->s_tx; 1103*f931551bSRalph Campbell if (tx) { 1104*f931551bSRalph Campbell qp->s_tx = NULL; 1105*f931551bSRalph Campbell /* resend previously constructed packet */ 1106*f931551bSRalph Campbell ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); 1107*f931551bSRalph Campbell goto bail; 1108*f931551bSRalph Campbell } 1109*f931551bSRalph Campbell 1110*f931551bSRalph Campbell tx = get_txreq(dev, qp, &ret); 1111*f931551bSRalph Campbell if (!tx) 1112*f931551bSRalph Campbell goto bail; 1113*f931551bSRalph Campbell 1114*f931551bSRalph Campbell control = dd->f_setpbc_control(ppd, plen, qp->s_srate, 1115*f931551bSRalph Campbell be16_to_cpu(hdr->lrh[0]) >> 12); 1116*f931551bSRalph Campbell tx->qp = qp; 1117*f931551bSRalph Campbell atomic_inc(&qp->refcount); 1118*f931551bSRalph Campbell tx->wqe = qp->s_wqe; 1119*f931551bSRalph Campbell tx->mr = qp->s_rdma_mr; 1120*f931551bSRalph Campbell if (qp->s_rdma_mr) 1121*f931551bSRalph Campbell qp->s_rdma_mr = NULL; 1122*f931551bSRalph Campbell tx->txreq.callback = sdma_complete; 1123*f931551bSRalph Campbell if (dd->flags & QIB_HAS_SDMA_TIMEOUT) 1124*f931551bSRalph Campbell tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST; 1125*f931551bSRalph Campbell else 1126*f931551bSRalph Campbell tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ; 1127*f931551bSRalph Campbell if (plen + 1 > dd->piosize2kmax_dwords) 1128*f931551bSRalph Campbell tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF; 1129*f931551bSRalph Campbell 1130*f931551bSRalph Campbell if (len) { 1131*f931551bSRalph Campbell /* 1132*f931551bSRalph Campbell * Don't try to DMA if it takes more descriptors than 1133*f931551bSRalph Campbell * the queue holds. 1134*f931551bSRalph Campbell */ 1135*f931551bSRalph Campbell ndesc = qib_count_sge(ss, len); 1136*f931551bSRalph Campbell if (ndesc >= ppd->sdma_descq_cnt) 1137*f931551bSRalph Campbell ndesc = 0; 1138*f931551bSRalph Campbell } else 1139*f931551bSRalph Campbell ndesc = 1; 1140*f931551bSRalph Campbell if (ndesc) { 1141*f931551bSRalph Campbell phdr = &dev->pio_hdrs[tx->hdr_inx]; 1142*f931551bSRalph Campbell phdr->pbc[0] = cpu_to_le32(plen); 1143*f931551bSRalph Campbell phdr->pbc[1] = cpu_to_le32(control); 1144*f931551bSRalph Campbell memcpy(&phdr->hdr, hdr, hdrwords << 2); 1145*f931551bSRalph Campbell tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC; 1146*f931551bSRalph Campbell tx->txreq.sg_count = ndesc; 1147*f931551bSRalph Campbell tx->txreq.addr = dev->pio_hdrs_phys + 1148*f931551bSRalph Campbell tx->hdr_inx * sizeof(struct qib_pio_header); 1149*f931551bSRalph Campbell tx->hdr_dwords = hdrwords + 2; /* add PBC length */ 1150*f931551bSRalph Campbell ret = qib_sdma_verbs_send(ppd, ss, dwords, tx); 1151*f931551bSRalph Campbell goto bail; 1152*f931551bSRalph Campbell } 1153*f931551bSRalph Campbell 1154*f931551bSRalph Campbell /* Allocate a buffer and copy the header and payload to it. */ 1155*f931551bSRalph Campbell tx->hdr_dwords = plen + 1; 1156*f931551bSRalph Campbell phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC); 1157*f931551bSRalph Campbell if (!phdr) 1158*f931551bSRalph Campbell goto err_tx; 1159*f931551bSRalph Campbell phdr->pbc[0] = cpu_to_le32(plen); 1160*f931551bSRalph Campbell phdr->pbc[1] = cpu_to_le32(control); 1161*f931551bSRalph Campbell memcpy(&phdr->hdr, hdr, hdrwords << 2); 1162*f931551bSRalph Campbell qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len); 1163*f931551bSRalph Campbell 1164*f931551bSRalph Campbell tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr, 1165*f931551bSRalph Campbell tx->hdr_dwords << 2, DMA_TO_DEVICE); 1166*f931551bSRalph Campbell if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr)) 1167*f931551bSRalph Campbell goto map_err; 1168*f931551bSRalph Campbell tx->align_buf = phdr; 1169*f931551bSRalph Campbell tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF; 1170*f931551bSRalph Campbell tx->txreq.sg_count = 1; 1171*f931551bSRalph Campbell ret = qib_sdma_verbs_send(ppd, NULL, 0, tx); 1172*f931551bSRalph Campbell goto unaligned; 1173*f931551bSRalph Campbell 1174*f931551bSRalph Campbell map_err: 1175*f931551bSRalph Campbell kfree(phdr); 1176*f931551bSRalph Campbell err_tx: 1177*f931551bSRalph Campbell qib_put_txreq(tx); 1178*f931551bSRalph Campbell ret = wait_kmem(dev, qp); 1179*f931551bSRalph Campbell unaligned: 1180*f931551bSRalph Campbell ibp->n_unaligned++; 1181*f931551bSRalph Campbell bail: 1182*f931551bSRalph Campbell return ret; 1183*f931551bSRalph Campbell } 1184*f931551bSRalph Campbell 1185*f931551bSRalph Campbell /* 1186*f931551bSRalph Campbell * If we are now in the error state, return zero to flush the 1187*f931551bSRalph Campbell * send work request. 1188*f931551bSRalph Campbell */ 1189*f931551bSRalph Campbell static int no_bufs_available(struct qib_qp *qp) 1190*f931551bSRalph Campbell { 1191*f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1192*f931551bSRalph Campbell struct qib_devdata *dd; 1193*f931551bSRalph Campbell unsigned long flags; 1194*f931551bSRalph Campbell int ret = 0; 1195*f931551bSRalph Campbell 1196*f931551bSRalph Campbell /* 1197*f931551bSRalph Campbell * Note that as soon as want_buffer() is called and 1198*f931551bSRalph Campbell * possibly before it returns, qib_ib_piobufavail() 1199*f931551bSRalph Campbell * could be called. Therefore, put QP on the I/O wait list before 1200*f931551bSRalph Campbell * enabling the PIO avail interrupt. 1201*f931551bSRalph Campbell */ 1202*f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1203*f931551bSRalph Campbell if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { 1204*f931551bSRalph Campbell spin_lock(&dev->pending_lock); 1205*f931551bSRalph Campbell if (list_empty(&qp->iowait)) { 1206*f931551bSRalph Campbell dev->n_piowait++; 1207*f931551bSRalph Campbell qp->s_flags |= QIB_S_WAIT_PIO; 1208*f931551bSRalph Campbell list_add_tail(&qp->iowait, &dev->piowait); 1209*f931551bSRalph Campbell dd = dd_from_dev(dev); 1210*f931551bSRalph Campbell dd->f_wantpiobuf_intr(dd, 1); 1211*f931551bSRalph Campbell } 1212*f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 1213*f931551bSRalph Campbell qp->s_flags &= ~QIB_S_BUSY; 1214*f931551bSRalph Campbell ret = -EBUSY; 1215*f931551bSRalph Campbell } 1216*f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1217*f931551bSRalph Campbell return ret; 1218*f931551bSRalph Campbell } 1219*f931551bSRalph Campbell 1220*f931551bSRalph Campbell static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, 1221*f931551bSRalph Campbell u32 hdrwords, struct qib_sge_state *ss, u32 len, 1222*f931551bSRalph Campbell u32 plen, u32 dwords) 1223*f931551bSRalph Campbell { 1224*f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1225*f931551bSRalph Campbell struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; 1226*f931551bSRalph Campbell u32 *hdr = (u32 *) ibhdr; 1227*f931551bSRalph Campbell u32 __iomem *piobuf_orig; 1228*f931551bSRalph Campbell u32 __iomem *piobuf; 1229*f931551bSRalph Campbell u64 pbc; 1230*f931551bSRalph Campbell unsigned long flags; 1231*f931551bSRalph Campbell unsigned flush_wc; 1232*f931551bSRalph Campbell u32 control; 1233*f931551bSRalph Campbell u32 pbufn; 1234*f931551bSRalph Campbell 1235*f931551bSRalph Campbell control = dd->f_setpbc_control(ppd, plen, qp->s_srate, 1236*f931551bSRalph Campbell be16_to_cpu(ibhdr->lrh[0]) >> 12); 1237*f931551bSRalph Campbell pbc = ((u64) control << 32) | plen; 1238*f931551bSRalph Campbell piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); 1239*f931551bSRalph Campbell if (unlikely(piobuf == NULL)) 1240*f931551bSRalph Campbell return no_bufs_available(qp); 1241*f931551bSRalph Campbell 1242*f931551bSRalph Campbell /* 1243*f931551bSRalph Campbell * Write the pbc. 1244*f931551bSRalph Campbell * We have to flush after the PBC for correctness on some cpus 1245*f931551bSRalph Campbell * or WC buffer can be written out of order. 1246*f931551bSRalph Campbell */ 1247*f931551bSRalph Campbell writeq(pbc, piobuf); 1248*f931551bSRalph Campbell piobuf_orig = piobuf; 1249*f931551bSRalph Campbell piobuf += 2; 1250*f931551bSRalph Campbell 1251*f931551bSRalph Campbell flush_wc = dd->flags & QIB_PIO_FLUSH_WC; 1252*f931551bSRalph Campbell if (len == 0) { 1253*f931551bSRalph Campbell /* 1254*f931551bSRalph Campbell * If there is just the header portion, must flush before 1255*f931551bSRalph Campbell * writing last word of header for correctness, and after 1256*f931551bSRalph Campbell * the last header word (trigger word). 1257*f931551bSRalph Campbell */ 1258*f931551bSRalph Campbell if (flush_wc) { 1259*f931551bSRalph Campbell qib_flush_wc(); 1260*f931551bSRalph Campbell qib_pio_copy(piobuf, hdr, hdrwords - 1); 1261*f931551bSRalph Campbell qib_flush_wc(); 1262*f931551bSRalph Campbell __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); 1263*f931551bSRalph Campbell qib_flush_wc(); 1264*f931551bSRalph Campbell } else 1265*f931551bSRalph Campbell qib_pio_copy(piobuf, hdr, hdrwords); 1266*f931551bSRalph Campbell goto done; 1267*f931551bSRalph Campbell } 1268*f931551bSRalph Campbell 1269*f931551bSRalph Campbell if (flush_wc) 1270*f931551bSRalph Campbell qib_flush_wc(); 1271*f931551bSRalph Campbell qib_pio_copy(piobuf, hdr, hdrwords); 1272*f931551bSRalph Campbell piobuf += hdrwords; 1273*f931551bSRalph Campbell 1274*f931551bSRalph Campbell /* The common case is aligned and contained in one segment. */ 1275*f931551bSRalph Campbell if (likely(ss->num_sge == 1 && len <= ss->sge.length && 1276*f931551bSRalph Campbell !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { 1277*f931551bSRalph Campbell u32 *addr = (u32 *) ss->sge.vaddr; 1278*f931551bSRalph Campbell 1279*f931551bSRalph Campbell /* Update address before sending packet. */ 1280*f931551bSRalph Campbell update_sge(ss, len); 1281*f931551bSRalph Campbell if (flush_wc) { 1282*f931551bSRalph Campbell qib_pio_copy(piobuf, addr, dwords - 1); 1283*f931551bSRalph Campbell /* must flush early everything before trigger word */ 1284*f931551bSRalph Campbell qib_flush_wc(); 1285*f931551bSRalph Campbell __raw_writel(addr[dwords - 1], piobuf + dwords - 1); 1286*f931551bSRalph Campbell /* be sure trigger word is written */ 1287*f931551bSRalph Campbell qib_flush_wc(); 1288*f931551bSRalph Campbell } else 1289*f931551bSRalph Campbell qib_pio_copy(piobuf, addr, dwords); 1290*f931551bSRalph Campbell goto done; 1291*f931551bSRalph Campbell } 1292*f931551bSRalph Campbell copy_io(piobuf, ss, len, flush_wc); 1293*f931551bSRalph Campbell done: 1294*f931551bSRalph Campbell if (dd->flags & QIB_USE_SPCL_TRIG) { 1295*f931551bSRalph Campbell u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; 1296*f931551bSRalph Campbell qib_flush_wc(); 1297*f931551bSRalph Campbell __raw_writel(0xaebecede, piobuf_orig + spcl_off); 1298*f931551bSRalph Campbell } 1299*f931551bSRalph Campbell qib_sendbuf_done(dd, pbufn); 1300*f931551bSRalph Campbell if (qp->s_rdma_mr) { 1301*f931551bSRalph Campbell atomic_dec(&qp->s_rdma_mr->refcount); 1302*f931551bSRalph Campbell qp->s_rdma_mr = NULL; 1303*f931551bSRalph Campbell } 1304*f931551bSRalph Campbell if (qp->s_wqe) { 1305*f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1306*f931551bSRalph Campbell qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); 1307*f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1308*f931551bSRalph Campbell } else if (qp->ibqp.qp_type == IB_QPT_RC) { 1309*f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1310*f931551bSRalph Campbell qib_rc_send_complete(qp, ibhdr); 1311*f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1312*f931551bSRalph Campbell } 1313*f931551bSRalph Campbell return 0; 1314*f931551bSRalph Campbell } 1315*f931551bSRalph Campbell 1316*f931551bSRalph Campbell /** 1317*f931551bSRalph Campbell * qib_verbs_send - send a packet 1318*f931551bSRalph Campbell * @qp: the QP to send on 1319*f931551bSRalph Campbell * @hdr: the packet header 1320*f931551bSRalph Campbell * @hdrwords: the number of 32-bit words in the header 1321*f931551bSRalph Campbell * @ss: the SGE to send 1322*f931551bSRalph Campbell * @len: the length of the packet in bytes 1323*f931551bSRalph Campbell * 1324*f931551bSRalph Campbell * Return zero if packet is sent or queued OK. 1325*f931551bSRalph Campbell * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise. 1326*f931551bSRalph Campbell */ 1327*f931551bSRalph Campbell int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, 1328*f931551bSRalph Campbell u32 hdrwords, struct qib_sge_state *ss, u32 len) 1329*f931551bSRalph Campbell { 1330*f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1331*f931551bSRalph Campbell u32 plen; 1332*f931551bSRalph Campbell int ret; 1333*f931551bSRalph Campbell u32 dwords = (len + 3) >> 2; 1334*f931551bSRalph Campbell 1335*f931551bSRalph Campbell /* 1336*f931551bSRalph Campbell * Calculate the send buffer trigger address. 1337*f931551bSRalph Campbell * The +1 counts for the pbc control dword following the pbc length. 1338*f931551bSRalph Campbell */ 1339*f931551bSRalph Campbell plen = hdrwords + dwords + 1; 1340*f931551bSRalph Campbell 1341*f931551bSRalph Campbell /* 1342*f931551bSRalph Campbell * VL15 packets (IB_QPT_SMI) will always use PIO, so we 1343*f931551bSRalph Campbell * can defer SDMA restart until link goes ACTIVE without 1344*f931551bSRalph Campbell * worrying about just how we got there. 1345*f931551bSRalph Campbell */ 1346*f931551bSRalph Campbell if (qp->ibqp.qp_type == IB_QPT_SMI || 1347*f931551bSRalph Campbell !(dd->flags & QIB_HAS_SEND_DMA)) 1348*f931551bSRalph Campbell ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len, 1349*f931551bSRalph Campbell plen, dwords); 1350*f931551bSRalph Campbell else 1351*f931551bSRalph Campbell ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len, 1352*f931551bSRalph Campbell plen, dwords); 1353*f931551bSRalph Campbell 1354*f931551bSRalph Campbell return ret; 1355*f931551bSRalph Campbell } 1356*f931551bSRalph Campbell 1357*f931551bSRalph Campbell int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, 1358*f931551bSRalph Campbell u64 *rwords, u64 *spkts, u64 *rpkts, 1359*f931551bSRalph Campbell u64 *xmit_wait) 1360*f931551bSRalph Campbell { 1361*f931551bSRalph Campbell int ret; 1362*f931551bSRalph Campbell struct qib_devdata *dd = ppd->dd; 1363*f931551bSRalph Campbell 1364*f931551bSRalph Campbell if (!(dd->flags & QIB_PRESENT)) { 1365*f931551bSRalph Campbell /* no hardware, freeze, etc. */ 1366*f931551bSRalph Campbell ret = -EINVAL; 1367*f931551bSRalph Campbell goto bail; 1368*f931551bSRalph Campbell } 1369*f931551bSRalph Campbell *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND); 1370*f931551bSRalph Campbell *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV); 1371*f931551bSRalph Campbell *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND); 1372*f931551bSRalph Campbell *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV); 1373*f931551bSRalph Campbell *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL); 1374*f931551bSRalph Campbell 1375*f931551bSRalph Campbell ret = 0; 1376*f931551bSRalph Campbell 1377*f931551bSRalph Campbell bail: 1378*f931551bSRalph Campbell return ret; 1379*f931551bSRalph Campbell } 1380*f931551bSRalph Campbell 1381*f931551bSRalph Campbell /** 1382*f931551bSRalph Campbell * qib_get_counters - get various chip counters 1383*f931551bSRalph Campbell * @dd: the qlogic_ib device 1384*f931551bSRalph Campbell * @cntrs: counters are placed here 1385*f931551bSRalph Campbell * 1386*f931551bSRalph Campbell * Return the counters needed by recv_pma_get_portcounters(). 1387*f931551bSRalph Campbell */ 1388*f931551bSRalph Campbell int qib_get_counters(struct qib_pportdata *ppd, 1389*f931551bSRalph Campbell struct qib_verbs_counters *cntrs) 1390*f931551bSRalph Campbell { 1391*f931551bSRalph Campbell int ret; 1392*f931551bSRalph Campbell 1393*f931551bSRalph Campbell if (!(ppd->dd->flags & QIB_PRESENT)) { 1394*f931551bSRalph Campbell /* no hardware, freeze, etc. */ 1395*f931551bSRalph Campbell ret = -EINVAL; 1396*f931551bSRalph Campbell goto bail; 1397*f931551bSRalph Campbell } 1398*f931551bSRalph Campbell cntrs->symbol_error_counter = 1399*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR); 1400*f931551bSRalph Campbell cntrs->link_error_recovery_counter = 1401*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV); 1402*f931551bSRalph Campbell /* 1403*f931551bSRalph Campbell * The link downed counter counts when the other side downs the 1404*f931551bSRalph Campbell * connection. We add in the number of times we downed the link 1405*f931551bSRalph Campbell * due to local link integrity errors to compensate. 1406*f931551bSRalph Campbell */ 1407*f931551bSRalph Campbell cntrs->link_downed_counter = 1408*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN); 1409*f931551bSRalph Campbell cntrs->port_rcv_errors = 1410*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) + 1411*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) + 1412*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) + 1413*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) + 1414*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) + 1415*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) + 1416*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) + 1417*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) + 1418*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT); 1419*f931551bSRalph Campbell cntrs->port_rcv_errors += 1420*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR); 1421*f931551bSRalph Campbell cntrs->port_rcv_errors += 1422*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR); 1423*f931551bSRalph Campbell cntrs->port_rcv_remphys_errors = 1424*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP); 1425*f931551bSRalph Campbell cntrs->port_xmit_discards = 1426*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL); 1427*f931551bSRalph Campbell cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd, 1428*f931551bSRalph Campbell QIBPORTCNTR_WORDSEND); 1429*f931551bSRalph Campbell cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd, 1430*f931551bSRalph Campbell QIBPORTCNTR_WORDRCV); 1431*f931551bSRalph Campbell cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd, 1432*f931551bSRalph Campbell QIBPORTCNTR_PKTSEND); 1433*f931551bSRalph Campbell cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd, 1434*f931551bSRalph Campbell QIBPORTCNTR_PKTRCV); 1435*f931551bSRalph Campbell cntrs->local_link_integrity_errors = 1436*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI); 1437*f931551bSRalph Campbell cntrs->excessive_buffer_overrun_errors = 1438*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL); 1439*f931551bSRalph Campbell cntrs->vl15_dropped = 1440*f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP); 1441*f931551bSRalph Campbell 1442*f931551bSRalph Campbell ret = 0; 1443*f931551bSRalph Campbell 1444*f931551bSRalph Campbell bail: 1445*f931551bSRalph Campbell return ret; 1446*f931551bSRalph Campbell } 1447*f931551bSRalph Campbell 1448*f931551bSRalph Campbell /** 1449*f931551bSRalph Campbell * qib_ib_piobufavail - callback when a PIO buffer is available 1450*f931551bSRalph Campbell * @dd: the device pointer 1451*f931551bSRalph Campbell * 1452*f931551bSRalph Campbell * This is called from qib_intr() at interrupt level when a PIO buffer is 1453*f931551bSRalph Campbell * available after qib_verbs_send() returned an error that no buffers were 1454*f931551bSRalph Campbell * available. Disable the interrupt if there are no more QPs waiting. 1455*f931551bSRalph Campbell */ 1456*f931551bSRalph Campbell void qib_ib_piobufavail(struct qib_devdata *dd) 1457*f931551bSRalph Campbell { 1458*f931551bSRalph Campbell struct qib_ibdev *dev = &dd->verbs_dev; 1459*f931551bSRalph Campbell struct list_head *list; 1460*f931551bSRalph Campbell struct qib_qp *qps[5]; 1461*f931551bSRalph Campbell struct qib_qp *qp; 1462*f931551bSRalph Campbell unsigned long flags; 1463*f931551bSRalph Campbell unsigned i, n; 1464*f931551bSRalph Campbell 1465*f931551bSRalph Campbell list = &dev->piowait; 1466*f931551bSRalph Campbell n = 0; 1467*f931551bSRalph Campbell 1468*f931551bSRalph Campbell /* 1469*f931551bSRalph Campbell * Note: checking that the piowait list is empty and clearing 1470*f931551bSRalph Campbell * the buffer available interrupt needs to be atomic or we 1471*f931551bSRalph Campbell * could end up with QPs on the wait list with the interrupt 1472*f931551bSRalph Campbell * disabled. 1473*f931551bSRalph Campbell */ 1474*f931551bSRalph Campbell spin_lock_irqsave(&dev->pending_lock, flags); 1475*f931551bSRalph Campbell while (!list_empty(list)) { 1476*f931551bSRalph Campbell if (n == ARRAY_SIZE(qps)) 1477*f931551bSRalph Campbell goto full; 1478*f931551bSRalph Campbell qp = list_entry(list->next, struct qib_qp, iowait); 1479*f931551bSRalph Campbell list_del_init(&qp->iowait); 1480*f931551bSRalph Campbell atomic_inc(&qp->refcount); 1481*f931551bSRalph Campbell qps[n++] = qp; 1482*f931551bSRalph Campbell } 1483*f931551bSRalph Campbell dd->f_wantpiobuf_intr(dd, 0); 1484*f931551bSRalph Campbell full: 1485*f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 1486*f931551bSRalph Campbell 1487*f931551bSRalph Campbell for (i = 0; i < n; i++) { 1488*f931551bSRalph Campbell qp = qps[i]; 1489*f931551bSRalph Campbell 1490*f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1491*f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_PIO) { 1492*f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_PIO; 1493*f931551bSRalph Campbell qib_schedule_send(qp); 1494*f931551bSRalph Campbell } 1495*f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1496*f931551bSRalph Campbell 1497*f931551bSRalph Campbell /* Notify qib_destroy_qp() if it is waiting. */ 1498*f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 1499*f931551bSRalph Campbell wake_up(&qp->wait); 1500*f931551bSRalph Campbell } 1501*f931551bSRalph Campbell } 1502*f931551bSRalph Campbell 1503*f931551bSRalph Campbell static int qib_query_device(struct ib_device *ibdev, 1504*f931551bSRalph Campbell struct ib_device_attr *props) 1505*f931551bSRalph Campbell { 1506*f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1507*f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(ibdev); 1508*f931551bSRalph Campbell 1509*f931551bSRalph Campbell memset(props, 0, sizeof(*props)); 1510*f931551bSRalph Campbell 1511*f931551bSRalph Campbell props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | 1512*f931551bSRalph Campbell IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 1513*f931551bSRalph Campbell IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | 1514*f931551bSRalph Campbell IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; 1515*f931551bSRalph Campbell props->page_size_cap = PAGE_SIZE; 1516*f931551bSRalph Campbell props->vendor_id = 1517*f931551bSRalph Campbell QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3; 1518*f931551bSRalph Campbell props->vendor_part_id = dd->deviceid; 1519*f931551bSRalph Campbell props->hw_ver = dd->minrev; 1520*f931551bSRalph Campbell props->sys_image_guid = ib_qib_sys_image_guid; 1521*f931551bSRalph Campbell props->max_mr_size = ~0ULL; 1522*f931551bSRalph Campbell props->max_qp = ib_qib_max_qps; 1523*f931551bSRalph Campbell props->max_qp_wr = ib_qib_max_qp_wrs; 1524*f931551bSRalph Campbell props->max_sge = ib_qib_max_sges; 1525*f931551bSRalph Campbell props->max_cq = ib_qib_max_cqs; 1526*f931551bSRalph Campbell props->max_ah = ib_qib_max_ahs; 1527*f931551bSRalph Campbell props->max_cqe = ib_qib_max_cqes; 1528*f931551bSRalph Campbell props->max_mr = dev->lk_table.max; 1529*f931551bSRalph Campbell props->max_fmr = dev->lk_table.max; 1530*f931551bSRalph Campbell props->max_map_per_fmr = 32767; 1531*f931551bSRalph Campbell props->max_pd = ib_qib_max_pds; 1532*f931551bSRalph Campbell props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; 1533*f931551bSRalph Campbell props->max_qp_init_rd_atom = 255; 1534*f931551bSRalph Campbell /* props->max_res_rd_atom */ 1535*f931551bSRalph Campbell props->max_srq = ib_qib_max_srqs; 1536*f931551bSRalph Campbell props->max_srq_wr = ib_qib_max_srq_wrs; 1537*f931551bSRalph Campbell props->max_srq_sge = ib_qib_max_srq_sges; 1538*f931551bSRalph Campbell /* props->local_ca_ack_delay */ 1539*f931551bSRalph Campbell props->atomic_cap = IB_ATOMIC_GLOB; 1540*f931551bSRalph Campbell props->max_pkeys = qib_get_npkeys(dd); 1541*f931551bSRalph Campbell props->max_mcast_grp = ib_qib_max_mcast_grps; 1542*f931551bSRalph Campbell props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached; 1543*f931551bSRalph Campbell props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 1544*f931551bSRalph Campbell props->max_mcast_grp; 1545*f931551bSRalph Campbell 1546*f931551bSRalph Campbell return 0; 1547*f931551bSRalph Campbell } 1548*f931551bSRalph Campbell 1549*f931551bSRalph Campbell static int qib_query_port(struct ib_device *ibdev, u8 port, 1550*f931551bSRalph Campbell struct ib_port_attr *props) 1551*f931551bSRalph Campbell { 1552*f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1553*f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(ibdev, port); 1554*f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1555*f931551bSRalph Campbell enum ib_mtu mtu; 1556*f931551bSRalph Campbell u16 lid = ppd->lid; 1557*f931551bSRalph Campbell 1558*f931551bSRalph Campbell memset(props, 0, sizeof(*props)); 1559*f931551bSRalph Campbell props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); 1560*f931551bSRalph Campbell props->lmc = ppd->lmc; 1561*f931551bSRalph Campbell props->sm_lid = ibp->sm_lid; 1562*f931551bSRalph Campbell props->sm_sl = ibp->sm_sl; 1563*f931551bSRalph Campbell props->state = dd->f_iblink_state(ppd->lastibcstat); 1564*f931551bSRalph Campbell props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat); 1565*f931551bSRalph Campbell props->port_cap_flags = ibp->port_cap_flags; 1566*f931551bSRalph Campbell props->gid_tbl_len = QIB_GUIDS_PER_PORT; 1567*f931551bSRalph Campbell props->max_msg_sz = 0x80000000; 1568*f931551bSRalph Campbell props->pkey_tbl_len = qib_get_npkeys(dd); 1569*f931551bSRalph Campbell props->bad_pkey_cntr = ibp->pkey_violations; 1570*f931551bSRalph Campbell props->qkey_viol_cntr = ibp->qkey_violations; 1571*f931551bSRalph Campbell props->active_width = ppd->link_width_active; 1572*f931551bSRalph Campbell /* See rate_show() */ 1573*f931551bSRalph Campbell props->active_speed = ppd->link_speed_active; 1574*f931551bSRalph Campbell props->max_vl_num = qib_num_vls(ppd->vls_supported); 1575*f931551bSRalph Campbell props->init_type_reply = 0; 1576*f931551bSRalph Campbell 1577*f931551bSRalph Campbell props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; 1578*f931551bSRalph Campbell switch (ppd->ibmtu) { 1579*f931551bSRalph Campbell case 4096: 1580*f931551bSRalph Campbell mtu = IB_MTU_4096; 1581*f931551bSRalph Campbell break; 1582*f931551bSRalph Campbell case 2048: 1583*f931551bSRalph Campbell mtu = IB_MTU_2048; 1584*f931551bSRalph Campbell break; 1585*f931551bSRalph Campbell case 1024: 1586*f931551bSRalph Campbell mtu = IB_MTU_1024; 1587*f931551bSRalph Campbell break; 1588*f931551bSRalph Campbell case 512: 1589*f931551bSRalph Campbell mtu = IB_MTU_512; 1590*f931551bSRalph Campbell break; 1591*f931551bSRalph Campbell case 256: 1592*f931551bSRalph Campbell mtu = IB_MTU_256; 1593*f931551bSRalph Campbell break; 1594*f931551bSRalph Campbell default: 1595*f931551bSRalph Campbell mtu = IB_MTU_2048; 1596*f931551bSRalph Campbell } 1597*f931551bSRalph Campbell props->active_mtu = mtu; 1598*f931551bSRalph Campbell props->subnet_timeout = ibp->subnet_timeout; 1599*f931551bSRalph Campbell 1600*f931551bSRalph Campbell return 0; 1601*f931551bSRalph Campbell } 1602*f931551bSRalph Campbell 1603*f931551bSRalph Campbell static int qib_modify_device(struct ib_device *device, 1604*f931551bSRalph Campbell int device_modify_mask, 1605*f931551bSRalph Campbell struct ib_device_modify *device_modify) 1606*f931551bSRalph Campbell { 1607*f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(device); 1608*f931551bSRalph Campbell unsigned i; 1609*f931551bSRalph Campbell int ret; 1610*f931551bSRalph Campbell 1611*f931551bSRalph Campbell if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | 1612*f931551bSRalph Campbell IB_DEVICE_MODIFY_NODE_DESC)) { 1613*f931551bSRalph Campbell ret = -EOPNOTSUPP; 1614*f931551bSRalph Campbell goto bail; 1615*f931551bSRalph Campbell } 1616*f931551bSRalph Campbell 1617*f931551bSRalph Campbell if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) { 1618*f931551bSRalph Campbell memcpy(device->node_desc, device_modify->node_desc, 64); 1619*f931551bSRalph Campbell for (i = 0; i < dd->num_pports; i++) { 1620*f931551bSRalph Campbell struct qib_ibport *ibp = &dd->pport[i].ibport_data; 1621*f931551bSRalph Campbell 1622*f931551bSRalph Campbell qib_node_desc_chg(ibp); 1623*f931551bSRalph Campbell } 1624*f931551bSRalph Campbell } 1625*f931551bSRalph Campbell 1626*f931551bSRalph Campbell if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) { 1627*f931551bSRalph Campbell ib_qib_sys_image_guid = 1628*f931551bSRalph Campbell cpu_to_be64(device_modify->sys_image_guid); 1629*f931551bSRalph Campbell for (i = 0; i < dd->num_pports; i++) { 1630*f931551bSRalph Campbell struct qib_ibport *ibp = &dd->pport[i].ibport_data; 1631*f931551bSRalph Campbell 1632*f931551bSRalph Campbell qib_sys_guid_chg(ibp); 1633*f931551bSRalph Campbell } 1634*f931551bSRalph Campbell } 1635*f931551bSRalph Campbell 1636*f931551bSRalph Campbell ret = 0; 1637*f931551bSRalph Campbell 1638*f931551bSRalph Campbell bail: 1639*f931551bSRalph Campbell return ret; 1640*f931551bSRalph Campbell } 1641*f931551bSRalph Campbell 1642*f931551bSRalph Campbell static int qib_modify_port(struct ib_device *ibdev, u8 port, 1643*f931551bSRalph Campbell int port_modify_mask, struct ib_port_modify *props) 1644*f931551bSRalph Campbell { 1645*f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(ibdev, port); 1646*f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1647*f931551bSRalph Campbell 1648*f931551bSRalph Campbell ibp->port_cap_flags |= props->set_port_cap_mask; 1649*f931551bSRalph Campbell ibp->port_cap_flags &= ~props->clr_port_cap_mask; 1650*f931551bSRalph Campbell if (props->set_port_cap_mask || props->clr_port_cap_mask) 1651*f931551bSRalph Campbell qib_cap_mask_chg(ibp); 1652*f931551bSRalph Campbell if (port_modify_mask & IB_PORT_SHUTDOWN) 1653*f931551bSRalph Campbell qib_set_linkstate(ppd, QIB_IB_LINKDOWN); 1654*f931551bSRalph Campbell if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) 1655*f931551bSRalph Campbell ibp->qkey_violations = 0; 1656*f931551bSRalph Campbell return 0; 1657*f931551bSRalph Campbell } 1658*f931551bSRalph Campbell 1659*f931551bSRalph Campbell static int qib_query_gid(struct ib_device *ibdev, u8 port, 1660*f931551bSRalph Campbell int index, union ib_gid *gid) 1661*f931551bSRalph Campbell { 1662*f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1663*f931551bSRalph Campbell int ret = 0; 1664*f931551bSRalph Campbell 1665*f931551bSRalph Campbell if (!port || port > dd->num_pports) 1666*f931551bSRalph Campbell ret = -EINVAL; 1667*f931551bSRalph Campbell else { 1668*f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(ibdev, port); 1669*f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1670*f931551bSRalph Campbell 1671*f931551bSRalph Campbell gid->global.subnet_prefix = ibp->gid_prefix; 1672*f931551bSRalph Campbell if (index == 0) 1673*f931551bSRalph Campbell gid->global.interface_id = ppd->guid; 1674*f931551bSRalph Campbell else if (index < QIB_GUIDS_PER_PORT) 1675*f931551bSRalph Campbell gid->global.interface_id = ibp->guids[index - 1]; 1676*f931551bSRalph Campbell else 1677*f931551bSRalph Campbell ret = -EINVAL; 1678*f931551bSRalph Campbell } 1679*f931551bSRalph Campbell 1680*f931551bSRalph Campbell return ret; 1681*f931551bSRalph Campbell } 1682*f931551bSRalph Campbell 1683*f931551bSRalph Campbell static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev, 1684*f931551bSRalph Campbell struct ib_ucontext *context, 1685*f931551bSRalph Campbell struct ib_udata *udata) 1686*f931551bSRalph Campbell { 1687*f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(ibdev); 1688*f931551bSRalph Campbell struct qib_pd *pd; 1689*f931551bSRalph Campbell struct ib_pd *ret; 1690*f931551bSRalph Campbell 1691*f931551bSRalph Campbell /* 1692*f931551bSRalph Campbell * This is actually totally arbitrary. Some correctness tests 1693*f931551bSRalph Campbell * assume there's a maximum number of PDs that can be allocated. 1694*f931551bSRalph Campbell * We don't actually have this limit, but we fail the test if 1695*f931551bSRalph Campbell * we allow allocations of more than we report for this value. 1696*f931551bSRalph Campbell */ 1697*f931551bSRalph Campbell 1698*f931551bSRalph Campbell pd = kmalloc(sizeof *pd, GFP_KERNEL); 1699*f931551bSRalph Campbell if (!pd) { 1700*f931551bSRalph Campbell ret = ERR_PTR(-ENOMEM); 1701*f931551bSRalph Campbell goto bail; 1702*f931551bSRalph Campbell } 1703*f931551bSRalph Campbell 1704*f931551bSRalph Campbell spin_lock(&dev->n_pds_lock); 1705*f931551bSRalph Campbell if (dev->n_pds_allocated == ib_qib_max_pds) { 1706*f931551bSRalph Campbell spin_unlock(&dev->n_pds_lock); 1707*f931551bSRalph Campbell kfree(pd); 1708*f931551bSRalph Campbell ret = ERR_PTR(-ENOMEM); 1709*f931551bSRalph Campbell goto bail; 1710*f931551bSRalph Campbell } 1711*f931551bSRalph Campbell 1712*f931551bSRalph Campbell dev->n_pds_allocated++; 1713*f931551bSRalph Campbell spin_unlock(&dev->n_pds_lock); 1714*f931551bSRalph Campbell 1715*f931551bSRalph Campbell /* ib_alloc_pd() will initialize pd->ibpd. */ 1716*f931551bSRalph Campbell pd->user = udata != NULL; 1717*f931551bSRalph Campbell 1718*f931551bSRalph Campbell ret = &pd->ibpd; 1719*f931551bSRalph Campbell 1720*f931551bSRalph Campbell bail: 1721*f931551bSRalph Campbell return ret; 1722*f931551bSRalph Campbell } 1723*f931551bSRalph Campbell 1724*f931551bSRalph Campbell static int qib_dealloc_pd(struct ib_pd *ibpd) 1725*f931551bSRalph Campbell { 1726*f931551bSRalph Campbell struct qib_pd *pd = to_ipd(ibpd); 1727*f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(ibpd->device); 1728*f931551bSRalph Campbell 1729*f931551bSRalph Campbell spin_lock(&dev->n_pds_lock); 1730*f931551bSRalph Campbell dev->n_pds_allocated--; 1731*f931551bSRalph Campbell spin_unlock(&dev->n_pds_lock); 1732*f931551bSRalph Campbell 1733*f931551bSRalph Campbell kfree(pd); 1734*f931551bSRalph Campbell 1735*f931551bSRalph Campbell return 0; 1736*f931551bSRalph Campbell } 1737*f931551bSRalph Campbell 1738*f931551bSRalph Campbell int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) 1739*f931551bSRalph Campbell { 1740*f931551bSRalph Campbell /* A multicast address requires a GRH (see ch. 8.4.1). */ 1741*f931551bSRalph Campbell if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE && 1742*f931551bSRalph Campbell ah_attr->dlid != QIB_PERMISSIVE_LID && 1743*f931551bSRalph Campbell !(ah_attr->ah_flags & IB_AH_GRH)) 1744*f931551bSRalph Campbell goto bail; 1745*f931551bSRalph Campbell if ((ah_attr->ah_flags & IB_AH_GRH) && 1746*f931551bSRalph Campbell ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT) 1747*f931551bSRalph Campbell goto bail; 1748*f931551bSRalph Campbell if (ah_attr->dlid == 0) 1749*f931551bSRalph Campbell goto bail; 1750*f931551bSRalph Campbell if (ah_attr->port_num < 1 || 1751*f931551bSRalph Campbell ah_attr->port_num > ibdev->phys_port_cnt) 1752*f931551bSRalph Campbell goto bail; 1753*f931551bSRalph Campbell if (ah_attr->static_rate != IB_RATE_PORT_CURRENT && 1754*f931551bSRalph Campbell ib_rate_to_mult(ah_attr->static_rate) < 0) 1755*f931551bSRalph Campbell goto bail; 1756*f931551bSRalph Campbell if (ah_attr->sl > 15) 1757*f931551bSRalph Campbell goto bail; 1758*f931551bSRalph Campbell return 0; 1759*f931551bSRalph Campbell bail: 1760*f931551bSRalph Campbell return -EINVAL; 1761*f931551bSRalph Campbell } 1762*f931551bSRalph Campbell 1763*f931551bSRalph Campbell /** 1764*f931551bSRalph Campbell * qib_create_ah - create an address handle 1765*f931551bSRalph Campbell * @pd: the protection domain 1766*f931551bSRalph Campbell * @ah_attr: the attributes of the AH 1767*f931551bSRalph Campbell * 1768*f931551bSRalph Campbell * This may be called from interrupt context. 1769*f931551bSRalph Campbell */ 1770*f931551bSRalph Campbell static struct ib_ah *qib_create_ah(struct ib_pd *pd, 1771*f931551bSRalph Campbell struct ib_ah_attr *ah_attr) 1772*f931551bSRalph Campbell { 1773*f931551bSRalph Campbell struct qib_ah *ah; 1774*f931551bSRalph Campbell struct ib_ah *ret; 1775*f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(pd->device); 1776*f931551bSRalph Campbell unsigned long flags; 1777*f931551bSRalph Campbell 1778*f931551bSRalph Campbell if (qib_check_ah(pd->device, ah_attr)) { 1779*f931551bSRalph Campbell ret = ERR_PTR(-EINVAL); 1780*f931551bSRalph Campbell goto bail; 1781*f931551bSRalph Campbell } 1782*f931551bSRalph Campbell 1783*f931551bSRalph Campbell ah = kmalloc(sizeof *ah, GFP_ATOMIC); 1784*f931551bSRalph Campbell if (!ah) { 1785*f931551bSRalph Campbell ret = ERR_PTR(-ENOMEM); 1786*f931551bSRalph Campbell goto bail; 1787*f931551bSRalph Campbell } 1788*f931551bSRalph Campbell 1789*f931551bSRalph Campbell spin_lock_irqsave(&dev->n_ahs_lock, flags); 1790*f931551bSRalph Campbell if (dev->n_ahs_allocated == ib_qib_max_ahs) { 1791*f931551bSRalph Campbell spin_unlock_irqrestore(&dev->n_ahs_lock, flags); 1792*f931551bSRalph Campbell kfree(ah); 1793*f931551bSRalph Campbell ret = ERR_PTR(-ENOMEM); 1794*f931551bSRalph Campbell goto bail; 1795*f931551bSRalph Campbell } 1796*f931551bSRalph Campbell 1797*f931551bSRalph Campbell dev->n_ahs_allocated++; 1798*f931551bSRalph Campbell spin_unlock_irqrestore(&dev->n_ahs_lock, flags); 1799*f931551bSRalph Campbell 1800*f931551bSRalph Campbell /* ib_create_ah() will initialize ah->ibah. */ 1801*f931551bSRalph Campbell ah->attr = *ah_attr; 1802*f931551bSRalph Campbell atomic_set(&ah->refcount, 0); 1803*f931551bSRalph Campbell 1804*f931551bSRalph Campbell ret = &ah->ibah; 1805*f931551bSRalph Campbell 1806*f931551bSRalph Campbell bail: 1807*f931551bSRalph Campbell return ret; 1808*f931551bSRalph Campbell } 1809*f931551bSRalph Campbell 1810*f931551bSRalph Campbell /** 1811*f931551bSRalph Campbell * qib_destroy_ah - destroy an address handle 1812*f931551bSRalph Campbell * @ibah: the AH to destroy 1813*f931551bSRalph Campbell * 1814*f931551bSRalph Campbell * This may be called from interrupt context. 1815*f931551bSRalph Campbell */ 1816*f931551bSRalph Campbell static int qib_destroy_ah(struct ib_ah *ibah) 1817*f931551bSRalph Campbell { 1818*f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(ibah->device); 1819*f931551bSRalph Campbell struct qib_ah *ah = to_iah(ibah); 1820*f931551bSRalph Campbell unsigned long flags; 1821*f931551bSRalph Campbell 1822*f931551bSRalph Campbell if (atomic_read(&ah->refcount) != 0) 1823*f931551bSRalph Campbell return -EBUSY; 1824*f931551bSRalph Campbell 1825*f931551bSRalph Campbell spin_lock_irqsave(&dev->n_ahs_lock, flags); 1826*f931551bSRalph Campbell dev->n_ahs_allocated--; 1827*f931551bSRalph Campbell spin_unlock_irqrestore(&dev->n_ahs_lock, flags); 1828*f931551bSRalph Campbell 1829*f931551bSRalph Campbell kfree(ah); 1830*f931551bSRalph Campbell 1831*f931551bSRalph Campbell return 0; 1832*f931551bSRalph Campbell } 1833*f931551bSRalph Campbell 1834*f931551bSRalph Campbell static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) 1835*f931551bSRalph Campbell { 1836*f931551bSRalph Campbell struct qib_ah *ah = to_iah(ibah); 1837*f931551bSRalph Campbell 1838*f931551bSRalph Campbell if (qib_check_ah(ibah->device, ah_attr)) 1839*f931551bSRalph Campbell return -EINVAL; 1840*f931551bSRalph Campbell 1841*f931551bSRalph Campbell ah->attr = *ah_attr; 1842*f931551bSRalph Campbell 1843*f931551bSRalph Campbell return 0; 1844*f931551bSRalph Campbell } 1845*f931551bSRalph Campbell 1846*f931551bSRalph Campbell static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) 1847*f931551bSRalph Campbell { 1848*f931551bSRalph Campbell struct qib_ah *ah = to_iah(ibah); 1849*f931551bSRalph Campbell 1850*f931551bSRalph Campbell *ah_attr = ah->attr; 1851*f931551bSRalph Campbell 1852*f931551bSRalph Campbell return 0; 1853*f931551bSRalph Campbell } 1854*f931551bSRalph Campbell 1855*f931551bSRalph Campbell /** 1856*f931551bSRalph Campbell * qib_get_npkeys - return the size of the PKEY table for context 0 1857*f931551bSRalph Campbell * @dd: the qlogic_ib device 1858*f931551bSRalph Campbell */ 1859*f931551bSRalph Campbell unsigned qib_get_npkeys(struct qib_devdata *dd) 1860*f931551bSRalph Campbell { 1861*f931551bSRalph Campbell return ARRAY_SIZE(dd->rcd[0]->pkeys); 1862*f931551bSRalph Campbell } 1863*f931551bSRalph Campbell 1864*f931551bSRalph Campbell /* 1865*f931551bSRalph Campbell * Return the indexed PKEY from the port PKEY table. 1866*f931551bSRalph Campbell * No need to validate rcd[ctxt]; the port is setup if we are here. 1867*f931551bSRalph Campbell */ 1868*f931551bSRalph Campbell unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index) 1869*f931551bSRalph Campbell { 1870*f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1871*f931551bSRalph Campbell struct qib_devdata *dd = ppd->dd; 1872*f931551bSRalph Campbell unsigned ctxt = ppd->hw_pidx; 1873*f931551bSRalph Campbell unsigned ret; 1874*f931551bSRalph Campbell 1875*f931551bSRalph Campbell /* dd->rcd null if mini_init or some init failures */ 1876*f931551bSRalph Campbell if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys)) 1877*f931551bSRalph Campbell ret = 0; 1878*f931551bSRalph Campbell else 1879*f931551bSRalph Campbell ret = dd->rcd[ctxt]->pkeys[index]; 1880*f931551bSRalph Campbell 1881*f931551bSRalph Campbell return ret; 1882*f931551bSRalph Campbell } 1883*f931551bSRalph Campbell 1884*f931551bSRalph Campbell static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1885*f931551bSRalph Campbell u16 *pkey) 1886*f931551bSRalph Campbell { 1887*f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1888*f931551bSRalph Campbell int ret; 1889*f931551bSRalph Campbell 1890*f931551bSRalph Campbell if (index >= qib_get_npkeys(dd)) { 1891*f931551bSRalph Campbell ret = -EINVAL; 1892*f931551bSRalph Campbell goto bail; 1893*f931551bSRalph Campbell } 1894*f931551bSRalph Campbell 1895*f931551bSRalph Campbell *pkey = qib_get_pkey(to_iport(ibdev, port), index); 1896*f931551bSRalph Campbell ret = 0; 1897*f931551bSRalph Campbell 1898*f931551bSRalph Campbell bail: 1899*f931551bSRalph Campbell return ret; 1900*f931551bSRalph Campbell } 1901*f931551bSRalph Campbell 1902*f931551bSRalph Campbell /** 1903*f931551bSRalph Campbell * qib_alloc_ucontext - allocate a ucontest 1904*f931551bSRalph Campbell * @ibdev: the infiniband device 1905*f931551bSRalph Campbell * @udata: not used by the QLogic_IB driver 1906*f931551bSRalph Campbell */ 1907*f931551bSRalph Campbell 1908*f931551bSRalph Campbell static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev, 1909*f931551bSRalph Campbell struct ib_udata *udata) 1910*f931551bSRalph Campbell { 1911*f931551bSRalph Campbell struct qib_ucontext *context; 1912*f931551bSRalph Campbell struct ib_ucontext *ret; 1913*f931551bSRalph Campbell 1914*f931551bSRalph Campbell context = kmalloc(sizeof *context, GFP_KERNEL); 1915*f931551bSRalph Campbell if (!context) { 1916*f931551bSRalph Campbell ret = ERR_PTR(-ENOMEM); 1917*f931551bSRalph Campbell goto bail; 1918*f931551bSRalph Campbell } 1919*f931551bSRalph Campbell 1920*f931551bSRalph Campbell ret = &context->ibucontext; 1921*f931551bSRalph Campbell 1922*f931551bSRalph Campbell bail: 1923*f931551bSRalph Campbell return ret; 1924*f931551bSRalph Campbell } 1925*f931551bSRalph Campbell 1926*f931551bSRalph Campbell static int qib_dealloc_ucontext(struct ib_ucontext *context) 1927*f931551bSRalph Campbell { 1928*f931551bSRalph Campbell kfree(to_iucontext(context)); 1929*f931551bSRalph Campbell return 0; 1930*f931551bSRalph Campbell } 1931*f931551bSRalph Campbell 1932*f931551bSRalph Campbell static void init_ibport(struct qib_pportdata *ppd) 1933*f931551bSRalph Campbell { 1934*f931551bSRalph Campbell struct qib_verbs_counters cntrs; 1935*f931551bSRalph Campbell struct qib_ibport *ibp = &ppd->ibport_data; 1936*f931551bSRalph Campbell 1937*f931551bSRalph Campbell spin_lock_init(&ibp->lock); 1938*f931551bSRalph Campbell /* Set the prefix to the default value (see ch. 4.1.1) */ 1939*f931551bSRalph Campbell ibp->gid_prefix = IB_DEFAULT_GID_PREFIX; 1940*f931551bSRalph Campbell ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); 1941*f931551bSRalph Campbell ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | 1942*f931551bSRalph Campbell IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP | 1943*f931551bSRalph Campbell IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP | 1944*f931551bSRalph Campbell IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP | 1945*f931551bSRalph Campbell IB_PORT_OTHER_LOCAL_CHANGES_SUP; 1946*f931551bSRalph Campbell if (ppd->dd->flags & QIB_HAS_LINK_LATENCY) 1947*f931551bSRalph Campbell ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; 1948*f931551bSRalph Campbell ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; 1949*f931551bSRalph Campbell ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; 1950*f931551bSRalph Campbell ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; 1951*f931551bSRalph Campbell ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; 1952*f931551bSRalph Campbell ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; 1953*f931551bSRalph Campbell 1954*f931551bSRalph Campbell /* Snapshot current HW counters to "clear" them. */ 1955*f931551bSRalph Campbell qib_get_counters(ppd, &cntrs); 1956*f931551bSRalph Campbell ibp->z_symbol_error_counter = cntrs.symbol_error_counter; 1957*f931551bSRalph Campbell ibp->z_link_error_recovery_counter = 1958*f931551bSRalph Campbell cntrs.link_error_recovery_counter; 1959*f931551bSRalph Campbell ibp->z_link_downed_counter = cntrs.link_downed_counter; 1960*f931551bSRalph Campbell ibp->z_port_rcv_errors = cntrs.port_rcv_errors; 1961*f931551bSRalph Campbell ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; 1962*f931551bSRalph Campbell ibp->z_port_xmit_discards = cntrs.port_xmit_discards; 1963*f931551bSRalph Campbell ibp->z_port_xmit_data = cntrs.port_xmit_data; 1964*f931551bSRalph Campbell ibp->z_port_rcv_data = cntrs.port_rcv_data; 1965*f931551bSRalph Campbell ibp->z_port_xmit_packets = cntrs.port_xmit_packets; 1966*f931551bSRalph Campbell ibp->z_port_rcv_packets = cntrs.port_rcv_packets; 1967*f931551bSRalph Campbell ibp->z_local_link_integrity_errors = 1968*f931551bSRalph Campbell cntrs.local_link_integrity_errors; 1969*f931551bSRalph Campbell ibp->z_excessive_buffer_overrun_errors = 1970*f931551bSRalph Campbell cntrs.excessive_buffer_overrun_errors; 1971*f931551bSRalph Campbell ibp->z_vl15_dropped = cntrs.vl15_dropped; 1972*f931551bSRalph Campbell } 1973*f931551bSRalph Campbell 1974*f931551bSRalph Campbell /** 1975*f931551bSRalph Campbell * qib_register_ib_device - register our device with the infiniband core 1976*f931551bSRalph Campbell * @dd: the device data structure 1977*f931551bSRalph Campbell * Return the allocated qib_ibdev pointer or NULL on error. 1978*f931551bSRalph Campbell */ 1979*f931551bSRalph Campbell int qib_register_ib_device(struct qib_devdata *dd) 1980*f931551bSRalph Campbell { 1981*f931551bSRalph Campbell struct qib_ibdev *dev = &dd->verbs_dev; 1982*f931551bSRalph Campbell struct ib_device *ibdev = &dev->ibdev; 1983*f931551bSRalph Campbell struct qib_pportdata *ppd = dd->pport; 1984*f931551bSRalph Campbell unsigned i, lk_tab_size; 1985*f931551bSRalph Campbell int ret; 1986*f931551bSRalph Campbell 1987*f931551bSRalph Campbell dev->qp_table_size = ib_qib_qp_table_size; 1988*f931551bSRalph Campbell dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table, 1989*f931551bSRalph Campbell GFP_KERNEL); 1990*f931551bSRalph Campbell if (!dev->qp_table) { 1991*f931551bSRalph Campbell ret = -ENOMEM; 1992*f931551bSRalph Campbell goto err_qpt; 1993*f931551bSRalph Campbell } 1994*f931551bSRalph Campbell 1995*f931551bSRalph Campbell for (i = 0; i < dd->num_pports; i++) 1996*f931551bSRalph Campbell init_ibport(ppd + i); 1997*f931551bSRalph Campbell 1998*f931551bSRalph Campbell /* Only need to initialize non-zero fields. */ 1999*f931551bSRalph Campbell spin_lock_init(&dev->qpt_lock); 2000*f931551bSRalph Campbell spin_lock_init(&dev->n_pds_lock); 2001*f931551bSRalph Campbell spin_lock_init(&dev->n_ahs_lock); 2002*f931551bSRalph Campbell spin_lock_init(&dev->n_cqs_lock); 2003*f931551bSRalph Campbell spin_lock_init(&dev->n_qps_lock); 2004*f931551bSRalph Campbell spin_lock_init(&dev->n_srqs_lock); 2005*f931551bSRalph Campbell spin_lock_init(&dev->n_mcast_grps_lock); 2006*f931551bSRalph Campbell init_timer(&dev->mem_timer); 2007*f931551bSRalph Campbell dev->mem_timer.function = mem_timer; 2008*f931551bSRalph Campbell dev->mem_timer.data = (unsigned long) dev; 2009*f931551bSRalph Campbell 2010*f931551bSRalph Campbell qib_init_qpn_table(dd, &dev->qpn_table); 2011*f931551bSRalph Campbell 2012*f931551bSRalph Campbell /* 2013*f931551bSRalph Campbell * The top ib_qib_lkey_table_size bits are used to index the 2014*f931551bSRalph Campbell * table. The lower 8 bits can be owned by the user (copied from 2015*f931551bSRalph Campbell * the LKEY). The remaining bits act as a generation number or tag. 2016*f931551bSRalph Campbell */ 2017*f931551bSRalph Campbell spin_lock_init(&dev->lk_table.lock); 2018*f931551bSRalph Campbell dev->lk_table.max = 1 << ib_qib_lkey_table_size; 2019*f931551bSRalph Campbell lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); 2020*f931551bSRalph Campbell dev->lk_table.table = (struct qib_mregion **) 2021*f931551bSRalph Campbell __get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); 2022*f931551bSRalph Campbell if (dev->lk_table.table == NULL) { 2023*f931551bSRalph Campbell ret = -ENOMEM; 2024*f931551bSRalph Campbell goto err_lk; 2025*f931551bSRalph Campbell } 2026*f931551bSRalph Campbell memset(dev->lk_table.table, 0, lk_tab_size); 2027*f931551bSRalph Campbell INIT_LIST_HEAD(&dev->pending_mmaps); 2028*f931551bSRalph Campbell spin_lock_init(&dev->pending_lock); 2029*f931551bSRalph Campbell dev->mmap_offset = PAGE_SIZE; 2030*f931551bSRalph Campbell spin_lock_init(&dev->mmap_offset_lock); 2031*f931551bSRalph Campbell INIT_LIST_HEAD(&dev->piowait); 2032*f931551bSRalph Campbell INIT_LIST_HEAD(&dev->dmawait); 2033*f931551bSRalph Campbell INIT_LIST_HEAD(&dev->txwait); 2034*f931551bSRalph Campbell INIT_LIST_HEAD(&dev->memwait); 2035*f931551bSRalph Campbell INIT_LIST_HEAD(&dev->txreq_free); 2036*f931551bSRalph Campbell 2037*f931551bSRalph Campbell if (ppd->sdma_descq_cnt) { 2038*f931551bSRalph Campbell dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev, 2039*f931551bSRalph Campbell ppd->sdma_descq_cnt * 2040*f931551bSRalph Campbell sizeof(struct qib_pio_header), 2041*f931551bSRalph Campbell &dev->pio_hdrs_phys, 2042*f931551bSRalph Campbell GFP_KERNEL); 2043*f931551bSRalph Campbell if (!dev->pio_hdrs) { 2044*f931551bSRalph Campbell ret = -ENOMEM; 2045*f931551bSRalph Campbell goto err_hdrs; 2046*f931551bSRalph Campbell } 2047*f931551bSRalph Campbell } 2048*f931551bSRalph Campbell 2049*f931551bSRalph Campbell for (i = 0; i < ppd->sdma_descq_cnt; i++) { 2050*f931551bSRalph Campbell struct qib_verbs_txreq *tx; 2051*f931551bSRalph Campbell 2052*f931551bSRalph Campbell tx = kzalloc(sizeof *tx, GFP_KERNEL); 2053*f931551bSRalph Campbell if (!tx) { 2054*f931551bSRalph Campbell ret = -ENOMEM; 2055*f931551bSRalph Campbell goto err_tx; 2056*f931551bSRalph Campbell } 2057*f931551bSRalph Campbell tx->hdr_inx = i; 2058*f931551bSRalph Campbell list_add(&tx->txreq.list, &dev->txreq_free); 2059*f931551bSRalph Campbell } 2060*f931551bSRalph Campbell 2061*f931551bSRalph Campbell /* 2062*f931551bSRalph Campbell * The system image GUID is supposed to be the same for all 2063*f931551bSRalph Campbell * IB HCAs in a single system but since there can be other 2064*f931551bSRalph Campbell * device types in the system, we can't be sure this is unique. 2065*f931551bSRalph Campbell */ 2066*f931551bSRalph Campbell if (!ib_qib_sys_image_guid) 2067*f931551bSRalph Campbell ib_qib_sys_image_guid = ppd->guid; 2068*f931551bSRalph Campbell 2069*f931551bSRalph Campbell strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX); 2070*f931551bSRalph Campbell ibdev->owner = THIS_MODULE; 2071*f931551bSRalph Campbell ibdev->node_guid = ppd->guid; 2072*f931551bSRalph Campbell ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION; 2073*f931551bSRalph Campbell ibdev->uverbs_cmd_mask = 2074*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 2075*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 2076*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 2077*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 2078*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 2079*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 2080*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_MODIFY_AH) | 2081*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_AH) | 2082*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 2083*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_REG_MR) | 2084*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 2085*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 2086*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 2087*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 2088*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 2089*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 2090*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 2091*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 2092*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 2093*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 2094*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 2095*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POST_SEND) | 2096*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POST_RECV) | 2097*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 2098*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 2099*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 2100*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 2101*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 2102*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 2103*f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 2104*f931551bSRalph Campbell ibdev->node_type = RDMA_NODE_IB_CA; 2105*f931551bSRalph Campbell ibdev->phys_port_cnt = dd->num_pports; 2106*f931551bSRalph Campbell ibdev->num_comp_vectors = 1; 2107*f931551bSRalph Campbell ibdev->dma_device = &dd->pcidev->dev; 2108*f931551bSRalph Campbell ibdev->query_device = qib_query_device; 2109*f931551bSRalph Campbell ibdev->modify_device = qib_modify_device; 2110*f931551bSRalph Campbell ibdev->query_port = qib_query_port; 2111*f931551bSRalph Campbell ibdev->modify_port = qib_modify_port; 2112*f931551bSRalph Campbell ibdev->query_pkey = qib_query_pkey; 2113*f931551bSRalph Campbell ibdev->query_gid = qib_query_gid; 2114*f931551bSRalph Campbell ibdev->alloc_ucontext = qib_alloc_ucontext; 2115*f931551bSRalph Campbell ibdev->dealloc_ucontext = qib_dealloc_ucontext; 2116*f931551bSRalph Campbell ibdev->alloc_pd = qib_alloc_pd; 2117*f931551bSRalph Campbell ibdev->dealloc_pd = qib_dealloc_pd; 2118*f931551bSRalph Campbell ibdev->create_ah = qib_create_ah; 2119*f931551bSRalph Campbell ibdev->destroy_ah = qib_destroy_ah; 2120*f931551bSRalph Campbell ibdev->modify_ah = qib_modify_ah; 2121*f931551bSRalph Campbell ibdev->query_ah = qib_query_ah; 2122*f931551bSRalph Campbell ibdev->create_srq = qib_create_srq; 2123*f931551bSRalph Campbell ibdev->modify_srq = qib_modify_srq; 2124*f931551bSRalph Campbell ibdev->query_srq = qib_query_srq; 2125*f931551bSRalph Campbell ibdev->destroy_srq = qib_destroy_srq; 2126*f931551bSRalph Campbell ibdev->create_qp = qib_create_qp; 2127*f931551bSRalph Campbell ibdev->modify_qp = qib_modify_qp; 2128*f931551bSRalph Campbell ibdev->query_qp = qib_query_qp; 2129*f931551bSRalph Campbell ibdev->destroy_qp = qib_destroy_qp; 2130*f931551bSRalph Campbell ibdev->post_send = qib_post_send; 2131*f931551bSRalph Campbell ibdev->post_recv = qib_post_receive; 2132*f931551bSRalph Campbell ibdev->post_srq_recv = qib_post_srq_receive; 2133*f931551bSRalph Campbell ibdev->create_cq = qib_create_cq; 2134*f931551bSRalph Campbell ibdev->destroy_cq = qib_destroy_cq; 2135*f931551bSRalph Campbell ibdev->resize_cq = qib_resize_cq; 2136*f931551bSRalph Campbell ibdev->poll_cq = qib_poll_cq; 2137*f931551bSRalph Campbell ibdev->req_notify_cq = qib_req_notify_cq; 2138*f931551bSRalph Campbell ibdev->get_dma_mr = qib_get_dma_mr; 2139*f931551bSRalph Campbell ibdev->reg_phys_mr = qib_reg_phys_mr; 2140*f931551bSRalph Campbell ibdev->reg_user_mr = qib_reg_user_mr; 2141*f931551bSRalph Campbell ibdev->dereg_mr = qib_dereg_mr; 2142*f931551bSRalph Campbell ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr; 2143*f931551bSRalph Campbell ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list; 2144*f931551bSRalph Campbell ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list; 2145*f931551bSRalph Campbell ibdev->alloc_fmr = qib_alloc_fmr; 2146*f931551bSRalph Campbell ibdev->map_phys_fmr = qib_map_phys_fmr; 2147*f931551bSRalph Campbell ibdev->unmap_fmr = qib_unmap_fmr; 2148*f931551bSRalph Campbell ibdev->dealloc_fmr = qib_dealloc_fmr; 2149*f931551bSRalph Campbell ibdev->attach_mcast = qib_multicast_attach; 2150*f931551bSRalph Campbell ibdev->detach_mcast = qib_multicast_detach; 2151*f931551bSRalph Campbell ibdev->process_mad = qib_process_mad; 2152*f931551bSRalph Campbell ibdev->mmap = qib_mmap; 2153*f931551bSRalph Campbell ibdev->dma_ops = &qib_dma_mapping_ops; 2154*f931551bSRalph Campbell 2155*f931551bSRalph Campbell snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), 2156*f931551bSRalph Campbell QIB_IDSTR " %s", init_utsname()->nodename); 2157*f931551bSRalph Campbell 2158*f931551bSRalph Campbell ret = ib_register_device(ibdev, qib_create_port_files); 2159*f931551bSRalph Campbell if (ret) 2160*f931551bSRalph Campbell goto err_reg; 2161*f931551bSRalph Campbell 2162*f931551bSRalph Campbell ret = qib_create_agents(dev); 2163*f931551bSRalph Campbell if (ret) 2164*f931551bSRalph Campbell goto err_agents; 2165*f931551bSRalph Campbell 2166*f931551bSRalph Campbell if (qib_verbs_register_sysfs(dd)) 2167*f931551bSRalph Campbell goto err_class; 2168*f931551bSRalph Campbell 2169*f931551bSRalph Campbell goto bail; 2170*f931551bSRalph Campbell 2171*f931551bSRalph Campbell err_class: 2172*f931551bSRalph Campbell qib_free_agents(dev); 2173*f931551bSRalph Campbell err_agents: 2174*f931551bSRalph Campbell ib_unregister_device(ibdev); 2175*f931551bSRalph Campbell err_reg: 2176*f931551bSRalph Campbell err_tx: 2177*f931551bSRalph Campbell while (!list_empty(&dev->txreq_free)) { 2178*f931551bSRalph Campbell struct list_head *l = dev->txreq_free.next; 2179*f931551bSRalph Campbell struct qib_verbs_txreq *tx; 2180*f931551bSRalph Campbell 2181*f931551bSRalph Campbell list_del(l); 2182*f931551bSRalph Campbell tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 2183*f931551bSRalph Campbell kfree(tx); 2184*f931551bSRalph Campbell } 2185*f931551bSRalph Campbell if (ppd->sdma_descq_cnt) 2186*f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, 2187*f931551bSRalph Campbell ppd->sdma_descq_cnt * 2188*f931551bSRalph Campbell sizeof(struct qib_pio_header), 2189*f931551bSRalph Campbell dev->pio_hdrs, dev->pio_hdrs_phys); 2190*f931551bSRalph Campbell err_hdrs: 2191*f931551bSRalph Campbell free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size)); 2192*f931551bSRalph Campbell err_lk: 2193*f931551bSRalph Campbell kfree(dev->qp_table); 2194*f931551bSRalph Campbell err_qpt: 2195*f931551bSRalph Campbell qib_dev_err(dd, "cannot register verbs: %d!\n", -ret); 2196*f931551bSRalph Campbell bail: 2197*f931551bSRalph Campbell return ret; 2198*f931551bSRalph Campbell } 2199*f931551bSRalph Campbell 2200*f931551bSRalph Campbell void qib_unregister_ib_device(struct qib_devdata *dd) 2201*f931551bSRalph Campbell { 2202*f931551bSRalph Campbell struct qib_ibdev *dev = &dd->verbs_dev; 2203*f931551bSRalph Campbell struct ib_device *ibdev = &dev->ibdev; 2204*f931551bSRalph Campbell u32 qps_inuse; 2205*f931551bSRalph Campbell unsigned lk_tab_size; 2206*f931551bSRalph Campbell 2207*f931551bSRalph Campbell qib_verbs_unregister_sysfs(dd); 2208*f931551bSRalph Campbell 2209*f931551bSRalph Campbell qib_free_agents(dev); 2210*f931551bSRalph Campbell 2211*f931551bSRalph Campbell ib_unregister_device(ibdev); 2212*f931551bSRalph Campbell 2213*f931551bSRalph Campbell if (!list_empty(&dev->piowait)) 2214*f931551bSRalph Campbell qib_dev_err(dd, "piowait list not empty!\n"); 2215*f931551bSRalph Campbell if (!list_empty(&dev->dmawait)) 2216*f931551bSRalph Campbell qib_dev_err(dd, "dmawait list not empty!\n"); 2217*f931551bSRalph Campbell if (!list_empty(&dev->txwait)) 2218*f931551bSRalph Campbell qib_dev_err(dd, "txwait list not empty!\n"); 2219*f931551bSRalph Campbell if (!list_empty(&dev->memwait)) 2220*f931551bSRalph Campbell qib_dev_err(dd, "memwait list not empty!\n"); 2221*f931551bSRalph Campbell if (dev->dma_mr) 2222*f931551bSRalph Campbell qib_dev_err(dd, "DMA MR not NULL!\n"); 2223*f931551bSRalph Campbell 2224*f931551bSRalph Campbell qps_inuse = qib_free_all_qps(dd); 2225*f931551bSRalph Campbell if (qps_inuse) 2226*f931551bSRalph Campbell qib_dev_err(dd, "QP memory leak! %u still in use\n", 2227*f931551bSRalph Campbell qps_inuse); 2228*f931551bSRalph Campbell 2229*f931551bSRalph Campbell del_timer_sync(&dev->mem_timer); 2230*f931551bSRalph Campbell qib_free_qpn_table(&dev->qpn_table); 2231*f931551bSRalph Campbell while (!list_empty(&dev->txreq_free)) { 2232*f931551bSRalph Campbell struct list_head *l = dev->txreq_free.next; 2233*f931551bSRalph Campbell struct qib_verbs_txreq *tx; 2234*f931551bSRalph Campbell 2235*f931551bSRalph Campbell list_del(l); 2236*f931551bSRalph Campbell tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 2237*f931551bSRalph Campbell kfree(tx); 2238*f931551bSRalph Campbell } 2239*f931551bSRalph Campbell if (dd->pport->sdma_descq_cnt) 2240*f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, 2241*f931551bSRalph Campbell dd->pport->sdma_descq_cnt * 2242*f931551bSRalph Campbell sizeof(struct qib_pio_header), 2243*f931551bSRalph Campbell dev->pio_hdrs, dev->pio_hdrs_phys); 2244*f931551bSRalph Campbell lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); 2245*f931551bSRalph Campbell free_pages((unsigned long) dev->lk_table.table, 2246*f931551bSRalph Campbell get_order(lk_tab_size)); 2247*f931551bSRalph Campbell kfree(dev->qp_table); 2248*f931551bSRalph Campbell } 2249