1f931551bSRalph Campbell /* 2*e2eed58bSVinit Agnihotri * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. 31fb9fed6SMike Marciniszyn * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 4f931551bSRalph Campbell * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5f931551bSRalph Campbell * 6f931551bSRalph Campbell * This software is available to you under a choice of one of two 7f931551bSRalph Campbell * licenses. You may choose to be licensed under the terms of the GNU 8f931551bSRalph Campbell * General Public License (GPL) Version 2, available from the file 9f931551bSRalph Campbell * COPYING in the main directory of this source tree, or the 10f931551bSRalph Campbell * OpenIB.org BSD license below: 11f931551bSRalph Campbell * 12f931551bSRalph Campbell * Redistribution and use in source and binary forms, with or 13f931551bSRalph Campbell * without modification, are permitted provided that the following 14f931551bSRalph Campbell * conditions are met: 15f931551bSRalph Campbell * 16f931551bSRalph Campbell * - Redistributions of source code must retain the above 17f931551bSRalph Campbell * copyright notice, this list of conditions and the following 18f931551bSRalph Campbell * disclaimer. 19f931551bSRalph Campbell * 20f931551bSRalph Campbell * - Redistributions in binary form must reproduce the above 21f931551bSRalph Campbell * copyright notice, this list of conditions and the following 22f931551bSRalph Campbell * disclaimer in the documentation and/or other materials 23f931551bSRalph Campbell * provided with the distribution. 24f931551bSRalph Campbell * 25f931551bSRalph Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26f931551bSRalph Campbell * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27f931551bSRalph Campbell * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28f931551bSRalph Campbell * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29f931551bSRalph Campbell * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30f931551bSRalph Campbell * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31f931551bSRalph Campbell * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32f931551bSRalph Campbell * SOFTWARE. 33f931551bSRalph Campbell */ 34f931551bSRalph Campbell 35f931551bSRalph Campbell #include <rdma/ib_mad.h> 36f931551bSRalph Campbell #include <rdma/ib_user_verbs.h> 37f931551bSRalph Campbell #include <linux/io.h> 38e4dd23d7SPaul Gortmaker #include <linux/module.h> 39f931551bSRalph Campbell #include <linux/utsname.h> 40f931551bSRalph Campbell #include <linux/rculist.h> 41f931551bSRalph Campbell #include <linux/mm.h> 42af061a64SMike Marciniszyn #include <linux/random.h> 43f931551bSRalph Campbell 44f931551bSRalph Campbell #include "qib.h" 45f931551bSRalph Campbell #include "qib_common.h" 46f931551bSRalph Campbell 47af061a64SMike Marciniszyn static unsigned int ib_qib_qp_table_size = 256; 48f931551bSRalph Campbell module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); 49f931551bSRalph Campbell MODULE_PARM_DESC(qp_table_size, "QP table size"); 50f931551bSRalph Campbell 51f931551bSRalph Campbell unsigned int ib_qib_lkey_table_size = 16; 52f931551bSRalph Campbell module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint, 53f931551bSRalph Campbell S_IRUGO); 54f931551bSRalph Campbell MODULE_PARM_DESC(lkey_table_size, 55f931551bSRalph Campbell "LKEY table size in bits (2^n, 1 <= n <= 23)"); 56f931551bSRalph Campbell 57f931551bSRalph Campbell static unsigned int ib_qib_max_pds = 0xFFFF; 58f931551bSRalph Campbell module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO); 59f931551bSRalph Campbell MODULE_PARM_DESC(max_pds, 60f931551bSRalph Campbell "Maximum number of protection domains to support"); 61f931551bSRalph Campbell 62f931551bSRalph Campbell static unsigned int ib_qib_max_ahs = 0xFFFF; 63f931551bSRalph Campbell module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO); 64f931551bSRalph Campbell MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); 65f931551bSRalph Campbell 66f931551bSRalph Campbell unsigned int ib_qib_max_cqes = 0x2FFFF; 67f931551bSRalph Campbell module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO); 68f931551bSRalph Campbell MODULE_PARM_DESC(max_cqes, 69f931551bSRalph Campbell "Maximum number of completion queue entries to support"); 70f931551bSRalph Campbell 71f931551bSRalph Campbell unsigned int ib_qib_max_cqs = 0x1FFFF; 72f931551bSRalph Campbell module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO); 73f931551bSRalph Campbell MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); 74f931551bSRalph Campbell 75f931551bSRalph Campbell unsigned int ib_qib_max_qp_wrs = 0x3FFF; 76f931551bSRalph Campbell module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO); 77f931551bSRalph Campbell MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); 78f931551bSRalph Campbell 79f931551bSRalph Campbell unsigned int ib_qib_max_qps = 16384; 80f931551bSRalph Campbell module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO); 81f931551bSRalph Campbell MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); 82f931551bSRalph Campbell 83f931551bSRalph Campbell unsigned int ib_qib_max_sges = 0x60; 84f931551bSRalph Campbell module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO); 85f931551bSRalph Campbell MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); 86f931551bSRalph Campbell 87f931551bSRalph Campbell unsigned int ib_qib_max_mcast_grps = 16384; 88f931551bSRalph Campbell module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO); 89f931551bSRalph Campbell MODULE_PARM_DESC(max_mcast_grps, 90f931551bSRalph Campbell "Maximum number of multicast groups to support"); 91f931551bSRalph Campbell 92f931551bSRalph Campbell unsigned int ib_qib_max_mcast_qp_attached = 16; 93f931551bSRalph Campbell module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached, 94f931551bSRalph Campbell uint, S_IRUGO); 95f931551bSRalph Campbell MODULE_PARM_DESC(max_mcast_qp_attached, 96f931551bSRalph Campbell "Maximum number of attached QPs to support"); 97f931551bSRalph Campbell 98f931551bSRalph Campbell unsigned int ib_qib_max_srqs = 1024; 99f931551bSRalph Campbell module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO); 100f931551bSRalph Campbell MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); 101f931551bSRalph Campbell 102f931551bSRalph Campbell unsigned int ib_qib_max_srq_sges = 128; 103f931551bSRalph Campbell module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO); 104f931551bSRalph Campbell MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); 105f931551bSRalph Campbell 106f931551bSRalph Campbell unsigned int ib_qib_max_srq_wrs = 0x1FFFF; 107f931551bSRalph Campbell module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO); 108f931551bSRalph Campbell MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); 109f931551bSRalph Campbell 110f931551bSRalph Campbell static unsigned int ib_qib_disable_sma; 111f931551bSRalph Campbell module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO); 112f931551bSRalph Campbell MODULE_PARM_DESC(disable_sma, "Disable the SMA"); 113f931551bSRalph Campbell 114f931551bSRalph Campbell /* 115f931551bSRalph Campbell * Note that it is OK to post send work requests in the SQE and ERR 116f931551bSRalph Campbell * states; qib_do_send() will process them and generate error 117f931551bSRalph Campbell * completions as per IB 1.2 C10-96. 118f931551bSRalph Campbell */ 119f931551bSRalph Campbell const int ib_qib_state_ops[IB_QPS_ERR + 1] = { 120f931551bSRalph Campbell [IB_QPS_RESET] = 0, 121f931551bSRalph Campbell [IB_QPS_INIT] = QIB_POST_RECV_OK, 122f931551bSRalph Campbell [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK, 123f931551bSRalph Campbell [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 124f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK | 125f931551bSRalph Campbell QIB_PROCESS_NEXT_SEND_OK, 126f931551bSRalph Campbell [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 127f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK, 128f931551bSRalph Campbell [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 129f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_FLUSH_SEND, 130f931551bSRalph Campbell [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV | 131f931551bSRalph Campbell QIB_POST_SEND_OK | QIB_FLUSH_SEND, 132f931551bSRalph Campbell }; 133f931551bSRalph Campbell 134f931551bSRalph Campbell struct qib_ucontext { 135f931551bSRalph Campbell struct ib_ucontext ibucontext; 136f931551bSRalph Campbell }; 137f931551bSRalph Campbell 138f931551bSRalph Campbell static inline struct qib_ucontext *to_iucontext(struct ib_ucontext 139f931551bSRalph Campbell *ibucontext) 140f931551bSRalph Campbell { 141f931551bSRalph Campbell return container_of(ibucontext, struct qib_ucontext, ibucontext); 142f931551bSRalph Campbell } 143f931551bSRalph Campbell 144f931551bSRalph Campbell /* 145f931551bSRalph Campbell * Translate ib_wr_opcode into ib_wc_opcode. 146f931551bSRalph Campbell */ 147f931551bSRalph Campbell const enum ib_wc_opcode ib_qib_wc_opcode[] = { 148f931551bSRalph Campbell [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, 149f931551bSRalph Campbell [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, 150f931551bSRalph Campbell [IB_WR_SEND] = IB_WC_SEND, 151f931551bSRalph Campbell [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, 152f931551bSRalph Campbell [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, 153f931551bSRalph Campbell [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, 154f931551bSRalph Campbell [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD 155f931551bSRalph Campbell }; 156f931551bSRalph Campbell 157f931551bSRalph Campbell /* 158f931551bSRalph Campbell * System image GUID. 159f931551bSRalph Campbell */ 160f931551bSRalph Campbell __be64 ib_qib_sys_image_guid; 161f931551bSRalph Campbell 162f931551bSRalph Campbell /** 163f931551bSRalph Campbell * qib_copy_sge - copy data to SGE memory 164f931551bSRalph Campbell * @ss: the SGE state 165f931551bSRalph Campbell * @data: the data to copy 166f931551bSRalph Campbell * @length: the length of the data 167f931551bSRalph Campbell */ 168f931551bSRalph Campbell void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) 169f931551bSRalph Campbell { 170f931551bSRalph Campbell struct qib_sge *sge = &ss->sge; 171f931551bSRalph Campbell 172f931551bSRalph Campbell while (length) { 173f931551bSRalph Campbell u32 len = sge->length; 174f931551bSRalph Campbell 175f931551bSRalph Campbell if (len > length) 176f931551bSRalph Campbell len = length; 177f931551bSRalph Campbell if (len > sge->sge_length) 178f931551bSRalph Campbell len = sge->sge_length; 179f931551bSRalph Campbell BUG_ON(len == 0); 180f931551bSRalph Campbell memcpy(sge->vaddr, data, len); 181f931551bSRalph Campbell sge->vaddr += len; 182f931551bSRalph Campbell sge->length -= len; 183f931551bSRalph Campbell sge->sge_length -= len; 184f931551bSRalph Campbell if (sge->sge_length == 0) { 185f931551bSRalph Campbell if (release) 1866a82649fSMike Marciniszyn qib_put_mr(sge->mr); 187f931551bSRalph Campbell if (--ss->num_sge) 188f931551bSRalph Campbell *sge = *ss->sg_list++; 189f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 190f931551bSRalph Campbell if (++sge->n >= QIB_SEGSZ) { 191f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 192f931551bSRalph Campbell break; 193f931551bSRalph Campbell sge->n = 0; 194f931551bSRalph Campbell } 195f931551bSRalph Campbell sge->vaddr = 196f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].vaddr; 197f931551bSRalph Campbell sge->length = 198f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].length; 199f931551bSRalph Campbell } 200f931551bSRalph Campbell data += len; 201f931551bSRalph Campbell length -= len; 202f931551bSRalph Campbell } 203f931551bSRalph Campbell } 204f931551bSRalph Campbell 205f931551bSRalph Campbell /** 206f931551bSRalph Campbell * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func 207f931551bSRalph Campbell * @ss: the SGE state 208f931551bSRalph Campbell * @length: the number of bytes to skip 209f931551bSRalph Campbell */ 210f931551bSRalph Campbell void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) 211f931551bSRalph Campbell { 212f931551bSRalph Campbell struct qib_sge *sge = &ss->sge; 213f931551bSRalph Campbell 214f931551bSRalph Campbell while (length) { 215f931551bSRalph Campbell u32 len = sge->length; 216f931551bSRalph Campbell 217f931551bSRalph Campbell if (len > length) 218f931551bSRalph Campbell len = length; 219f931551bSRalph Campbell if (len > sge->sge_length) 220f931551bSRalph Campbell len = sge->sge_length; 221f931551bSRalph Campbell BUG_ON(len == 0); 222f931551bSRalph Campbell sge->vaddr += len; 223f931551bSRalph Campbell sge->length -= len; 224f931551bSRalph Campbell sge->sge_length -= len; 225f931551bSRalph Campbell if (sge->sge_length == 0) { 226f931551bSRalph Campbell if (release) 2276a82649fSMike Marciniszyn qib_put_mr(sge->mr); 228f931551bSRalph Campbell if (--ss->num_sge) 229f931551bSRalph Campbell *sge = *ss->sg_list++; 230f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 231f931551bSRalph Campbell if (++sge->n >= QIB_SEGSZ) { 232f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 233f931551bSRalph Campbell break; 234f931551bSRalph Campbell sge->n = 0; 235f931551bSRalph Campbell } 236f931551bSRalph Campbell sge->vaddr = 237f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].vaddr; 238f931551bSRalph Campbell sge->length = 239f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].length; 240f931551bSRalph Campbell } 241f931551bSRalph Campbell length -= len; 242f931551bSRalph Campbell } 243f931551bSRalph Campbell } 244f931551bSRalph Campbell 245f931551bSRalph Campbell /* 246f931551bSRalph Campbell * Count the number of DMA descriptors needed to send length bytes of data. 247f931551bSRalph Campbell * Don't modify the qib_sge_state to get the count. 248f931551bSRalph Campbell * Return zero if any of the segments is not aligned. 249f931551bSRalph Campbell */ 250f931551bSRalph Campbell static u32 qib_count_sge(struct qib_sge_state *ss, u32 length) 251f931551bSRalph Campbell { 252f931551bSRalph Campbell struct qib_sge *sg_list = ss->sg_list; 253f931551bSRalph Campbell struct qib_sge sge = ss->sge; 254f931551bSRalph Campbell u8 num_sge = ss->num_sge; 255f931551bSRalph Campbell u32 ndesc = 1; /* count the header */ 256f931551bSRalph Campbell 257f931551bSRalph Campbell while (length) { 258f931551bSRalph Campbell u32 len = sge.length; 259f931551bSRalph Campbell 260f931551bSRalph Campbell if (len > length) 261f931551bSRalph Campbell len = length; 262f931551bSRalph Campbell if (len > sge.sge_length) 263f931551bSRalph Campbell len = sge.sge_length; 264f931551bSRalph Campbell BUG_ON(len == 0); 265f931551bSRalph Campbell if (((long) sge.vaddr & (sizeof(u32) - 1)) || 266f931551bSRalph Campbell (len != length && (len & (sizeof(u32) - 1)))) { 267f931551bSRalph Campbell ndesc = 0; 268f931551bSRalph Campbell break; 269f931551bSRalph Campbell } 270f931551bSRalph Campbell ndesc++; 271f931551bSRalph Campbell sge.vaddr += len; 272f931551bSRalph Campbell sge.length -= len; 273f931551bSRalph Campbell sge.sge_length -= len; 274f931551bSRalph Campbell if (sge.sge_length == 0) { 275f931551bSRalph Campbell if (--num_sge) 276f931551bSRalph Campbell sge = *sg_list++; 277f931551bSRalph Campbell } else if (sge.length == 0 && sge.mr->lkey) { 278f931551bSRalph Campbell if (++sge.n >= QIB_SEGSZ) { 279f931551bSRalph Campbell if (++sge.m >= sge.mr->mapsz) 280f931551bSRalph Campbell break; 281f931551bSRalph Campbell sge.n = 0; 282f931551bSRalph Campbell } 283f931551bSRalph Campbell sge.vaddr = 284f931551bSRalph Campbell sge.mr->map[sge.m]->segs[sge.n].vaddr; 285f931551bSRalph Campbell sge.length = 286f931551bSRalph Campbell sge.mr->map[sge.m]->segs[sge.n].length; 287f931551bSRalph Campbell } 288f931551bSRalph Campbell length -= len; 289f931551bSRalph Campbell } 290f931551bSRalph Campbell return ndesc; 291f931551bSRalph Campbell } 292f931551bSRalph Campbell 293f931551bSRalph Campbell /* 294f931551bSRalph Campbell * Copy from the SGEs to the data buffer. 295f931551bSRalph Campbell */ 296f931551bSRalph Campbell static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) 297f931551bSRalph Campbell { 298f931551bSRalph Campbell struct qib_sge *sge = &ss->sge; 299f931551bSRalph Campbell 300f931551bSRalph Campbell while (length) { 301f931551bSRalph Campbell u32 len = sge->length; 302f931551bSRalph Campbell 303f931551bSRalph Campbell if (len > length) 304f931551bSRalph Campbell len = length; 305f931551bSRalph Campbell if (len > sge->sge_length) 306f931551bSRalph Campbell len = sge->sge_length; 307f931551bSRalph Campbell BUG_ON(len == 0); 308f931551bSRalph Campbell memcpy(data, sge->vaddr, len); 309f931551bSRalph Campbell sge->vaddr += len; 310f931551bSRalph Campbell sge->length -= len; 311f931551bSRalph Campbell sge->sge_length -= len; 312f931551bSRalph Campbell if (sge->sge_length == 0) { 313f931551bSRalph Campbell if (--ss->num_sge) 314f931551bSRalph Campbell *sge = *ss->sg_list++; 315f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 316f931551bSRalph Campbell if (++sge->n >= QIB_SEGSZ) { 317f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 318f931551bSRalph Campbell break; 319f931551bSRalph Campbell sge->n = 0; 320f931551bSRalph Campbell } 321f931551bSRalph Campbell sge->vaddr = 322f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].vaddr; 323f931551bSRalph Campbell sge->length = 324f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].length; 325f931551bSRalph Campbell } 326f931551bSRalph Campbell data += len; 327f931551bSRalph Campbell length -= len; 328f931551bSRalph Campbell } 329f931551bSRalph Campbell } 330f931551bSRalph Campbell 331f931551bSRalph Campbell /** 332f931551bSRalph Campbell * qib_post_one_send - post one RC, UC, or UD send work request 333f931551bSRalph Campbell * @qp: the QP to post on 334f931551bSRalph Campbell * @wr: the work request to send 335f931551bSRalph Campbell */ 336551ace12SMike Marciniszyn static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, 337551ace12SMike Marciniszyn int *scheduled) 338f931551bSRalph Campbell { 339f931551bSRalph Campbell struct qib_swqe *wqe; 340f931551bSRalph Campbell u32 next; 341f931551bSRalph Campbell int i; 342f931551bSRalph Campbell int j; 343f931551bSRalph Campbell int acc; 344f931551bSRalph Campbell int ret; 345f931551bSRalph Campbell unsigned long flags; 346f931551bSRalph Campbell struct qib_lkey_table *rkt; 347f931551bSRalph Campbell struct qib_pd *pd; 348f931551bSRalph Campbell 349f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 350f931551bSRalph Campbell 351f931551bSRalph Campbell /* Check that state is OK to post send. */ 352f931551bSRalph Campbell if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) 353f931551bSRalph Campbell goto bail_inval; 354f931551bSRalph Campbell 355f931551bSRalph Campbell /* IB spec says that num_sge == 0 is OK. */ 356f931551bSRalph Campbell if (wr->num_sge > qp->s_max_sge) 357f931551bSRalph Campbell goto bail_inval; 358f931551bSRalph Campbell 359f931551bSRalph Campbell /* 360f931551bSRalph Campbell * Don't allow RDMA reads or atomic operations on UC or 361f931551bSRalph Campbell * undefined operations. 362f931551bSRalph Campbell * Make sure buffer is large enough to hold the result for atomics. 363f931551bSRalph Campbell */ 364f931551bSRalph Campbell if (wr->opcode == IB_WR_FAST_REG_MR) { 365f931551bSRalph Campbell if (qib_fast_reg_mr(qp, wr)) 366f931551bSRalph Campbell goto bail_inval; 367f931551bSRalph Campbell } else if (qp->ibqp.qp_type == IB_QPT_UC) { 368f931551bSRalph Campbell if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) 369f931551bSRalph Campbell goto bail_inval; 370f931551bSRalph Campbell } else if (qp->ibqp.qp_type != IB_QPT_RC) { 371f931551bSRalph Campbell /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ 372f931551bSRalph Campbell if (wr->opcode != IB_WR_SEND && 373f931551bSRalph Campbell wr->opcode != IB_WR_SEND_WITH_IMM) 374f931551bSRalph Campbell goto bail_inval; 375f931551bSRalph Campbell /* Check UD destination address PD */ 376f931551bSRalph Campbell if (qp->ibqp.pd != wr->wr.ud.ah->pd) 377f931551bSRalph Campbell goto bail_inval; 378f931551bSRalph Campbell } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) 379f931551bSRalph Campbell goto bail_inval; 380f931551bSRalph Campbell else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && 381f931551bSRalph Campbell (wr->num_sge == 0 || 382f931551bSRalph Campbell wr->sg_list[0].length < sizeof(u64) || 383f931551bSRalph Campbell wr->sg_list[0].addr & (sizeof(u64) - 1))) 384f931551bSRalph Campbell goto bail_inval; 385f931551bSRalph Campbell else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) 386f931551bSRalph Campbell goto bail_inval; 387f931551bSRalph Campbell 388f931551bSRalph Campbell next = qp->s_head + 1; 389f931551bSRalph Campbell if (next >= qp->s_size) 390f931551bSRalph Campbell next = 0; 391f931551bSRalph Campbell if (next == qp->s_last) { 392f931551bSRalph Campbell ret = -ENOMEM; 393f931551bSRalph Campbell goto bail; 394f931551bSRalph Campbell } 395f931551bSRalph Campbell 396f931551bSRalph Campbell rkt = &to_idev(qp->ibqp.device)->lk_table; 397f931551bSRalph Campbell pd = to_ipd(qp->ibqp.pd); 398f931551bSRalph Campbell wqe = get_swqe_ptr(qp, qp->s_head); 399f931551bSRalph Campbell wqe->wr = *wr; 400f931551bSRalph Campbell wqe->length = 0; 401f931551bSRalph Campbell j = 0; 402f931551bSRalph Campbell if (wr->num_sge) { 403f931551bSRalph Campbell acc = wr->opcode >= IB_WR_RDMA_READ ? 404f931551bSRalph Campbell IB_ACCESS_LOCAL_WRITE : 0; 405f931551bSRalph Campbell for (i = 0; i < wr->num_sge; i++) { 406f931551bSRalph Campbell u32 length = wr->sg_list[i].length; 407f931551bSRalph Campbell int ok; 408f931551bSRalph Campbell 409f931551bSRalph Campbell if (length == 0) 410f931551bSRalph Campbell continue; 411f931551bSRalph Campbell ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], 412f931551bSRalph Campbell &wr->sg_list[i], acc); 413f931551bSRalph Campbell if (!ok) 414f931551bSRalph Campbell goto bail_inval_free; 415f931551bSRalph Campbell wqe->length += length; 416f931551bSRalph Campbell j++; 417f931551bSRalph Campbell } 418f931551bSRalph Campbell wqe->wr.num_sge = j; 419f931551bSRalph Campbell } 420f931551bSRalph Campbell if (qp->ibqp.qp_type == IB_QPT_UC || 421f931551bSRalph Campbell qp->ibqp.qp_type == IB_QPT_RC) { 422f931551bSRalph Campbell if (wqe->length > 0x80000000U) 423f931551bSRalph Campbell goto bail_inval_free; 424f931551bSRalph Campbell } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + 425f931551bSRalph Campbell qp->port_num - 1)->ibmtu) 426f931551bSRalph Campbell goto bail_inval_free; 427f931551bSRalph Campbell else 428f931551bSRalph Campbell atomic_inc(&to_iah(wr->wr.ud.ah)->refcount); 429f931551bSRalph Campbell wqe->ssn = qp->s_ssn++; 430f931551bSRalph Campbell qp->s_head = next; 431f931551bSRalph Campbell 432f931551bSRalph Campbell ret = 0; 433f931551bSRalph Campbell goto bail; 434f931551bSRalph Campbell 435f931551bSRalph Campbell bail_inval_free: 436f931551bSRalph Campbell while (j) { 437f931551bSRalph Campbell struct qib_sge *sge = &wqe->sg_list[--j]; 438f931551bSRalph Campbell 4396a82649fSMike Marciniszyn qib_put_mr(sge->mr); 440f931551bSRalph Campbell } 441f931551bSRalph Campbell bail_inval: 442f931551bSRalph Campbell ret = -EINVAL; 443f931551bSRalph Campbell bail: 444551ace12SMike Marciniszyn if (!ret && !wr->next && 445551ace12SMike Marciniszyn !qib_sdma_empty( 446551ace12SMike Marciniszyn dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) { 447551ace12SMike Marciniszyn qib_schedule_send(qp); 448551ace12SMike Marciniszyn *scheduled = 1; 449551ace12SMike Marciniszyn } 450f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 451f931551bSRalph Campbell return ret; 452f931551bSRalph Campbell } 453f931551bSRalph Campbell 454f931551bSRalph Campbell /** 455f931551bSRalph Campbell * qib_post_send - post a send on a QP 456f931551bSRalph Campbell * @ibqp: the QP to post the send on 457f931551bSRalph Campbell * @wr: the list of work requests to post 458f931551bSRalph Campbell * @bad_wr: the first bad WR is put here 459f931551bSRalph Campbell * 460f931551bSRalph Campbell * This may be called from interrupt context. 461f931551bSRalph Campbell */ 462f931551bSRalph Campbell static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 463f931551bSRalph Campbell struct ib_send_wr **bad_wr) 464f931551bSRalph Campbell { 465f931551bSRalph Campbell struct qib_qp *qp = to_iqp(ibqp); 466f931551bSRalph Campbell int err = 0; 467551ace12SMike Marciniszyn int scheduled = 0; 468f931551bSRalph Campbell 469f931551bSRalph Campbell for (; wr; wr = wr->next) { 470551ace12SMike Marciniszyn err = qib_post_one_send(qp, wr, &scheduled); 471f931551bSRalph Campbell if (err) { 472f931551bSRalph Campbell *bad_wr = wr; 473f931551bSRalph Campbell goto bail; 474f931551bSRalph Campbell } 475f931551bSRalph Campbell } 476f931551bSRalph Campbell 477f931551bSRalph Campbell /* Try to do the send work in the caller's context. */ 478551ace12SMike Marciniszyn if (!scheduled) 479f931551bSRalph Campbell qib_do_send(&qp->s_work); 480f931551bSRalph Campbell 481f931551bSRalph Campbell bail: 482f931551bSRalph Campbell return err; 483f931551bSRalph Campbell } 484f931551bSRalph Campbell 485f931551bSRalph Campbell /** 486f931551bSRalph Campbell * qib_post_receive - post a receive on a QP 487f931551bSRalph Campbell * @ibqp: the QP to post the receive on 488f931551bSRalph Campbell * @wr: the WR to post 489f931551bSRalph Campbell * @bad_wr: the first bad WR is put here 490f931551bSRalph Campbell * 491f931551bSRalph Campbell * This may be called from interrupt context. 492f931551bSRalph Campbell */ 493f931551bSRalph Campbell static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 494f931551bSRalph Campbell struct ib_recv_wr **bad_wr) 495f931551bSRalph Campbell { 496f931551bSRalph Campbell struct qib_qp *qp = to_iqp(ibqp); 497f931551bSRalph Campbell struct qib_rwq *wq = qp->r_rq.wq; 498f931551bSRalph Campbell unsigned long flags; 499f931551bSRalph Campbell int ret; 500f931551bSRalph Campbell 501f931551bSRalph Campbell /* Check that state is OK to post receive. */ 502f931551bSRalph Campbell if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) { 503f931551bSRalph Campbell *bad_wr = wr; 504f931551bSRalph Campbell ret = -EINVAL; 505f931551bSRalph Campbell goto bail; 506f931551bSRalph Campbell } 507f931551bSRalph Campbell 508f931551bSRalph Campbell for (; wr; wr = wr->next) { 509f931551bSRalph Campbell struct qib_rwqe *wqe; 510f931551bSRalph Campbell u32 next; 511f931551bSRalph Campbell int i; 512f931551bSRalph Campbell 513f931551bSRalph Campbell if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { 514f931551bSRalph Campbell *bad_wr = wr; 515f931551bSRalph Campbell ret = -EINVAL; 516f931551bSRalph Campbell goto bail; 517f931551bSRalph Campbell } 518f931551bSRalph Campbell 519f931551bSRalph Campbell spin_lock_irqsave(&qp->r_rq.lock, flags); 520f931551bSRalph Campbell next = wq->head + 1; 521f931551bSRalph Campbell if (next >= qp->r_rq.size) 522f931551bSRalph Campbell next = 0; 523f931551bSRalph Campbell if (next == wq->tail) { 524f931551bSRalph Campbell spin_unlock_irqrestore(&qp->r_rq.lock, flags); 525f931551bSRalph Campbell *bad_wr = wr; 526f931551bSRalph Campbell ret = -ENOMEM; 527f931551bSRalph Campbell goto bail; 528f931551bSRalph Campbell } 529f931551bSRalph Campbell 530f931551bSRalph Campbell wqe = get_rwqe_ptr(&qp->r_rq, wq->head); 531f931551bSRalph Campbell wqe->wr_id = wr->wr_id; 532f931551bSRalph Campbell wqe->num_sge = wr->num_sge; 533f931551bSRalph Campbell for (i = 0; i < wr->num_sge; i++) 534f931551bSRalph Campbell wqe->sg_list[i] = wr->sg_list[i]; 535f931551bSRalph Campbell /* Make sure queue entry is written before the head index. */ 536f931551bSRalph Campbell smp_wmb(); 537f931551bSRalph Campbell wq->head = next; 538f931551bSRalph Campbell spin_unlock_irqrestore(&qp->r_rq.lock, flags); 539f931551bSRalph Campbell } 540f931551bSRalph Campbell ret = 0; 541f931551bSRalph Campbell 542f931551bSRalph Campbell bail: 543f931551bSRalph Campbell return ret; 544f931551bSRalph Campbell } 545f931551bSRalph Campbell 546f931551bSRalph Campbell /** 547f931551bSRalph Campbell * qib_qp_rcv - processing an incoming packet on a QP 548f931551bSRalph Campbell * @rcd: the context pointer 549f931551bSRalph Campbell * @hdr: the packet header 550f931551bSRalph Campbell * @has_grh: true if the packet has a GRH 551f931551bSRalph Campbell * @data: the packet data 552f931551bSRalph Campbell * @tlen: the packet length 553f931551bSRalph Campbell * @qp: the QP the packet came on 554f931551bSRalph Campbell * 555f931551bSRalph Campbell * This is called from qib_ib_rcv() to process an incoming packet 556f931551bSRalph Campbell * for the given QP. 557f931551bSRalph Campbell * Called at interrupt level. 558f931551bSRalph Campbell */ 559f931551bSRalph Campbell static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, 560f931551bSRalph Campbell int has_grh, void *data, u32 tlen, struct qib_qp *qp) 561f931551bSRalph Campbell { 562f931551bSRalph Campbell struct qib_ibport *ibp = &rcd->ppd->ibport_data; 563f931551bSRalph Campbell 564a5210c12SRalph Campbell spin_lock(&qp->r_lock); 565a5210c12SRalph Campbell 566f931551bSRalph Campbell /* Check for valid receive state. */ 567f931551bSRalph Campbell if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { 568f931551bSRalph Campbell ibp->n_pkt_drops++; 569a5210c12SRalph Campbell goto unlock; 570f931551bSRalph Campbell } 571f931551bSRalph Campbell 572f931551bSRalph Campbell switch (qp->ibqp.qp_type) { 573f931551bSRalph Campbell case IB_QPT_SMI: 574f931551bSRalph Campbell case IB_QPT_GSI: 575f931551bSRalph Campbell if (ib_qib_disable_sma) 576f931551bSRalph Campbell break; 577f931551bSRalph Campbell /* FALLTHROUGH */ 578f931551bSRalph Campbell case IB_QPT_UD: 579f931551bSRalph Campbell qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); 580f931551bSRalph Campbell break; 581f931551bSRalph Campbell 582f931551bSRalph Campbell case IB_QPT_RC: 583f931551bSRalph Campbell qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp); 584f931551bSRalph Campbell break; 585f931551bSRalph Campbell 586f931551bSRalph Campbell case IB_QPT_UC: 587f931551bSRalph Campbell qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp); 588f931551bSRalph Campbell break; 589f931551bSRalph Campbell 590f931551bSRalph Campbell default: 591f931551bSRalph Campbell break; 592f931551bSRalph Campbell } 593a5210c12SRalph Campbell 594a5210c12SRalph Campbell unlock: 595a5210c12SRalph Campbell spin_unlock(&qp->r_lock); 596f931551bSRalph Campbell } 597f931551bSRalph Campbell 598f931551bSRalph Campbell /** 599f931551bSRalph Campbell * qib_ib_rcv - process an incoming packet 600f931551bSRalph Campbell * @rcd: the context pointer 601f931551bSRalph Campbell * @rhdr: the header of the packet 602f931551bSRalph Campbell * @data: the packet payload 603f931551bSRalph Campbell * @tlen: the packet length 604f931551bSRalph Campbell * 605f931551bSRalph Campbell * This is called from qib_kreceive() to process an incoming packet at 606f931551bSRalph Campbell * interrupt level. Tlen is the length of the header + data + CRC in bytes. 607f931551bSRalph Campbell */ 608f931551bSRalph Campbell void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) 609f931551bSRalph Campbell { 610f931551bSRalph Campbell struct qib_pportdata *ppd = rcd->ppd; 611f931551bSRalph Campbell struct qib_ibport *ibp = &ppd->ibport_data; 612f931551bSRalph Campbell struct qib_ib_header *hdr = rhdr; 613f931551bSRalph Campbell struct qib_other_headers *ohdr; 614f931551bSRalph Campbell struct qib_qp *qp; 615f931551bSRalph Campbell u32 qp_num; 616f931551bSRalph Campbell int lnh; 617f931551bSRalph Campbell u8 opcode; 618f931551bSRalph Campbell u16 lid; 619f931551bSRalph Campbell 620f931551bSRalph Campbell /* 24 == LRH+BTH+CRC */ 621f931551bSRalph Campbell if (unlikely(tlen < 24)) 622f931551bSRalph Campbell goto drop; 623f931551bSRalph Campbell 624f931551bSRalph Campbell /* Check for a valid destination LID (see ch. 7.11.1). */ 625f931551bSRalph Campbell lid = be16_to_cpu(hdr->lrh[1]); 626f931551bSRalph Campbell if (lid < QIB_MULTICAST_LID_BASE) { 627f931551bSRalph Campbell lid &= ~((1 << ppd->lmc) - 1); 628f931551bSRalph Campbell if (unlikely(lid != ppd->lid)) 629f931551bSRalph Campbell goto drop; 630f931551bSRalph Campbell } 631f931551bSRalph Campbell 632f931551bSRalph Campbell /* Check for GRH */ 633f931551bSRalph Campbell lnh = be16_to_cpu(hdr->lrh[0]) & 3; 634f931551bSRalph Campbell if (lnh == QIB_LRH_BTH) 635f931551bSRalph Campbell ohdr = &hdr->u.oth; 636f931551bSRalph Campbell else if (lnh == QIB_LRH_GRH) { 637f931551bSRalph Campbell u32 vtf; 638f931551bSRalph Campbell 639f931551bSRalph Campbell ohdr = &hdr->u.l.oth; 640f931551bSRalph Campbell if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) 641f931551bSRalph Campbell goto drop; 642f931551bSRalph Campbell vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); 643f931551bSRalph Campbell if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 644f931551bSRalph Campbell goto drop; 645f931551bSRalph Campbell } else 646f931551bSRalph Campbell goto drop; 647f931551bSRalph Campbell 648f931551bSRalph Campbell opcode = be32_to_cpu(ohdr->bth[0]) >> 24; 649f931551bSRalph Campbell ibp->opstats[opcode & 0x7f].n_bytes += tlen; 650f931551bSRalph Campbell ibp->opstats[opcode & 0x7f].n_packets++; 651f931551bSRalph Campbell 652f931551bSRalph Campbell /* Get the destination QP number. */ 653f931551bSRalph Campbell qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; 654f931551bSRalph Campbell if (qp_num == QIB_MULTICAST_QPN) { 655f931551bSRalph Campbell struct qib_mcast *mcast; 656f931551bSRalph Campbell struct qib_mcast_qp *p; 657f931551bSRalph Campbell 658f931551bSRalph Campbell if (lnh != QIB_LRH_GRH) 659f931551bSRalph Campbell goto drop; 660f931551bSRalph Campbell mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); 661f931551bSRalph Campbell if (mcast == NULL) 662f931551bSRalph Campbell goto drop; 663f931551bSRalph Campbell ibp->n_multicast_rcv++; 664f931551bSRalph Campbell list_for_each_entry_rcu(p, &mcast->qp_list, list) 665f931551bSRalph Campbell qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); 666f931551bSRalph Campbell /* 667f931551bSRalph Campbell * Notify qib_multicast_detach() if it is waiting for us 668f931551bSRalph Campbell * to finish. 669f931551bSRalph Campbell */ 670f931551bSRalph Campbell if (atomic_dec_return(&mcast->refcount) <= 1) 671f931551bSRalph Campbell wake_up(&mcast->wait); 672f931551bSRalph Campbell } else { 673af061a64SMike Marciniszyn if (rcd->lookaside_qp) { 674af061a64SMike Marciniszyn if (rcd->lookaside_qpn != qp_num) { 675af061a64SMike Marciniszyn if (atomic_dec_and_test( 676af061a64SMike Marciniszyn &rcd->lookaside_qp->refcount)) 677af061a64SMike Marciniszyn wake_up( 678af061a64SMike Marciniszyn &rcd->lookaside_qp->wait); 679af061a64SMike Marciniszyn rcd->lookaside_qp = NULL; 680af061a64SMike Marciniszyn } 681af061a64SMike Marciniszyn } 682af061a64SMike Marciniszyn if (!rcd->lookaside_qp) { 683f931551bSRalph Campbell qp = qib_lookup_qpn(ibp, qp_num); 684f931551bSRalph Campbell if (!qp) 685f931551bSRalph Campbell goto drop; 686af061a64SMike Marciniszyn rcd->lookaside_qp = qp; 687af061a64SMike Marciniszyn rcd->lookaside_qpn = qp_num; 688af061a64SMike Marciniszyn } else 689af061a64SMike Marciniszyn qp = rcd->lookaside_qp; 690f931551bSRalph Campbell ibp->n_unicast_rcv++; 691f931551bSRalph Campbell qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); 692f931551bSRalph Campbell } 693f931551bSRalph Campbell return; 694f931551bSRalph Campbell 695f931551bSRalph Campbell drop: 696f931551bSRalph Campbell ibp->n_pkt_drops++; 697f931551bSRalph Campbell } 698f931551bSRalph Campbell 699f931551bSRalph Campbell /* 700f931551bSRalph Campbell * This is called from a timer to check for QPs 701f931551bSRalph Campbell * which need kernel memory in order to send a packet. 702f931551bSRalph Campbell */ 703f931551bSRalph Campbell static void mem_timer(unsigned long data) 704f931551bSRalph Campbell { 705f931551bSRalph Campbell struct qib_ibdev *dev = (struct qib_ibdev *) data; 706f931551bSRalph Campbell struct list_head *list = &dev->memwait; 707f931551bSRalph Campbell struct qib_qp *qp = NULL; 708f931551bSRalph Campbell unsigned long flags; 709f931551bSRalph Campbell 710f931551bSRalph Campbell spin_lock_irqsave(&dev->pending_lock, flags); 711f931551bSRalph Campbell if (!list_empty(list)) { 712f931551bSRalph Campbell qp = list_entry(list->next, struct qib_qp, iowait); 713f931551bSRalph Campbell list_del_init(&qp->iowait); 714f931551bSRalph Campbell atomic_inc(&qp->refcount); 715f931551bSRalph Campbell if (!list_empty(list)) 716f931551bSRalph Campbell mod_timer(&dev->mem_timer, jiffies + 1); 717f931551bSRalph Campbell } 718f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 719f931551bSRalph Campbell 720f931551bSRalph Campbell if (qp) { 721f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 722f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_KMEM) { 723f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_KMEM; 724f931551bSRalph Campbell qib_schedule_send(qp); 725f931551bSRalph Campbell } 726f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 727f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 728f931551bSRalph Campbell wake_up(&qp->wait); 729f931551bSRalph Campbell } 730f931551bSRalph Campbell } 731f931551bSRalph Campbell 732f931551bSRalph Campbell static void update_sge(struct qib_sge_state *ss, u32 length) 733f931551bSRalph Campbell { 734f931551bSRalph Campbell struct qib_sge *sge = &ss->sge; 735f931551bSRalph Campbell 736f931551bSRalph Campbell sge->vaddr += length; 737f931551bSRalph Campbell sge->length -= length; 738f931551bSRalph Campbell sge->sge_length -= length; 739f931551bSRalph Campbell if (sge->sge_length == 0) { 740f931551bSRalph Campbell if (--ss->num_sge) 741f931551bSRalph Campbell *sge = *ss->sg_list++; 742f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) { 743f931551bSRalph Campbell if (++sge->n >= QIB_SEGSZ) { 744f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz) 745f931551bSRalph Campbell return; 746f931551bSRalph Campbell sge->n = 0; 747f931551bSRalph Campbell } 748f931551bSRalph Campbell sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; 749f931551bSRalph Campbell sge->length = sge->mr->map[sge->m]->segs[sge->n].length; 750f931551bSRalph Campbell } 751f931551bSRalph Campbell } 752f931551bSRalph Campbell 753f931551bSRalph Campbell #ifdef __LITTLE_ENDIAN 754f931551bSRalph Campbell static inline u32 get_upper_bits(u32 data, u32 shift) 755f931551bSRalph Campbell { 756f931551bSRalph Campbell return data >> shift; 757f931551bSRalph Campbell } 758f931551bSRalph Campbell 759f931551bSRalph Campbell static inline u32 set_upper_bits(u32 data, u32 shift) 760f931551bSRalph Campbell { 761f931551bSRalph Campbell return data << shift; 762f931551bSRalph Campbell } 763f931551bSRalph Campbell 764f931551bSRalph Campbell static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) 765f931551bSRalph Campbell { 766f931551bSRalph Campbell data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); 767f931551bSRalph Campbell data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); 768f931551bSRalph Campbell return data; 769f931551bSRalph Campbell } 770f931551bSRalph Campbell #else 771f931551bSRalph Campbell static inline u32 get_upper_bits(u32 data, u32 shift) 772f931551bSRalph Campbell { 773f931551bSRalph Campbell return data << shift; 774f931551bSRalph Campbell } 775f931551bSRalph Campbell 776f931551bSRalph Campbell static inline u32 set_upper_bits(u32 data, u32 shift) 777f931551bSRalph Campbell { 778f931551bSRalph Campbell return data >> shift; 779f931551bSRalph Campbell } 780f931551bSRalph Campbell 781f931551bSRalph Campbell static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) 782f931551bSRalph Campbell { 783f931551bSRalph Campbell data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); 784f931551bSRalph Campbell data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); 785f931551bSRalph Campbell return data; 786f931551bSRalph Campbell } 787f931551bSRalph Campbell #endif 788f931551bSRalph Campbell 789f931551bSRalph Campbell static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss, 790f931551bSRalph Campbell u32 length, unsigned flush_wc) 791f931551bSRalph Campbell { 792f931551bSRalph Campbell u32 extra = 0; 793f931551bSRalph Campbell u32 data = 0; 794f931551bSRalph Campbell u32 last; 795f931551bSRalph Campbell 796f931551bSRalph Campbell while (1) { 797f931551bSRalph Campbell u32 len = ss->sge.length; 798f931551bSRalph Campbell u32 off; 799f931551bSRalph Campbell 800f931551bSRalph Campbell if (len > length) 801f931551bSRalph Campbell len = length; 802f931551bSRalph Campbell if (len > ss->sge.sge_length) 803f931551bSRalph Campbell len = ss->sge.sge_length; 804f931551bSRalph Campbell BUG_ON(len == 0); 805f931551bSRalph Campbell /* If the source address is not aligned, try to align it. */ 806f931551bSRalph Campbell off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); 807f931551bSRalph Campbell if (off) { 808f931551bSRalph Campbell u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & 809f931551bSRalph Campbell ~(sizeof(u32) - 1)); 810f931551bSRalph Campbell u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); 811f931551bSRalph Campbell u32 y; 812f931551bSRalph Campbell 813f931551bSRalph Campbell y = sizeof(u32) - off; 814f931551bSRalph Campbell if (len > y) 815f931551bSRalph Campbell len = y; 816f931551bSRalph Campbell if (len + extra >= sizeof(u32)) { 817f931551bSRalph Campbell data |= set_upper_bits(v, extra * 818f931551bSRalph Campbell BITS_PER_BYTE); 819f931551bSRalph Campbell len = sizeof(u32) - extra; 820f931551bSRalph Campbell if (len == length) { 821f931551bSRalph Campbell last = data; 822f931551bSRalph Campbell break; 823f931551bSRalph Campbell } 824f931551bSRalph Campbell __raw_writel(data, piobuf); 825f931551bSRalph Campbell piobuf++; 826f931551bSRalph Campbell extra = 0; 827f931551bSRalph Campbell data = 0; 828f931551bSRalph Campbell } else { 829f931551bSRalph Campbell /* Clear unused upper bytes */ 830f931551bSRalph Campbell data |= clear_upper_bytes(v, len, extra); 831f931551bSRalph Campbell if (len == length) { 832f931551bSRalph Campbell last = data; 833f931551bSRalph Campbell break; 834f931551bSRalph Campbell } 835f931551bSRalph Campbell extra += len; 836f931551bSRalph Campbell } 837f931551bSRalph Campbell } else if (extra) { 838f931551bSRalph Campbell /* Source address is aligned. */ 839f931551bSRalph Campbell u32 *addr = (u32 *) ss->sge.vaddr; 840f931551bSRalph Campbell int shift = extra * BITS_PER_BYTE; 841f931551bSRalph Campbell int ushift = 32 - shift; 842f931551bSRalph Campbell u32 l = len; 843f931551bSRalph Campbell 844f931551bSRalph Campbell while (l >= sizeof(u32)) { 845f931551bSRalph Campbell u32 v = *addr; 846f931551bSRalph Campbell 847f931551bSRalph Campbell data |= set_upper_bits(v, shift); 848f931551bSRalph Campbell __raw_writel(data, piobuf); 849f931551bSRalph Campbell data = get_upper_bits(v, ushift); 850f931551bSRalph Campbell piobuf++; 851f931551bSRalph Campbell addr++; 852f931551bSRalph Campbell l -= sizeof(u32); 853f931551bSRalph Campbell } 854f931551bSRalph Campbell /* 855f931551bSRalph Campbell * We still have 'extra' number of bytes leftover. 856f931551bSRalph Campbell */ 857f931551bSRalph Campbell if (l) { 858f931551bSRalph Campbell u32 v = *addr; 859f931551bSRalph Campbell 860f931551bSRalph Campbell if (l + extra >= sizeof(u32)) { 861f931551bSRalph Campbell data |= set_upper_bits(v, shift); 862f931551bSRalph Campbell len -= l + extra - sizeof(u32); 863f931551bSRalph Campbell if (len == length) { 864f931551bSRalph Campbell last = data; 865f931551bSRalph Campbell break; 866f931551bSRalph Campbell } 867f931551bSRalph Campbell __raw_writel(data, piobuf); 868f931551bSRalph Campbell piobuf++; 869f931551bSRalph Campbell extra = 0; 870f931551bSRalph Campbell data = 0; 871f931551bSRalph Campbell } else { 872f931551bSRalph Campbell /* Clear unused upper bytes */ 873f931551bSRalph Campbell data |= clear_upper_bytes(v, l, extra); 874f931551bSRalph Campbell if (len == length) { 875f931551bSRalph Campbell last = data; 876f931551bSRalph Campbell break; 877f931551bSRalph Campbell } 878f931551bSRalph Campbell extra += l; 879f931551bSRalph Campbell } 880f931551bSRalph Campbell } else if (len == length) { 881f931551bSRalph Campbell last = data; 882f931551bSRalph Campbell break; 883f931551bSRalph Campbell } 884f931551bSRalph Campbell } else if (len == length) { 885f931551bSRalph Campbell u32 w; 886f931551bSRalph Campbell 887f931551bSRalph Campbell /* 888f931551bSRalph Campbell * Need to round up for the last dword in the 889f931551bSRalph Campbell * packet. 890f931551bSRalph Campbell */ 891f931551bSRalph Campbell w = (len + 3) >> 2; 892f931551bSRalph Campbell qib_pio_copy(piobuf, ss->sge.vaddr, w - 1); 893f931551bSRalph Campbell piobuf += w - 1; 894f931551bSRalph Campbell last = ((u32 *) ss->sge.vaddr)[w - 1]; 895f931551bSRalph Campbell break; 896f931551bSRalph Campbell } else { 897f931551bSRalph Campbell u32 w = len >> 2; 898f931551bSRalph Campbell 899f931551bSRalph Campbell qib_pio_copy(piobuf, ss->sge.vaddr, w); 900f931551bSRalph Campbell piobuf += w; 901f931551bSRalph Campbell 902f931551bSRalph Campbell extra = len & (sizeof(u32) - 1); 903f931551bSRalph Campbell if (extra) { 904f931551bSRalph Campbell u32 v = ((u32 *) ss->sge.vaddr)[w]; 905f931551bSRalph Campbell 906f931551bSRalph Campbell /* Clear unused upper bytes */ 907f931551bSRalph Campbell data = clear_upper_bytes(v, extra, 0); 908f931551bSRalph Campbell } 909f931551bSRalph Campbell } 910f931551bSRalph Campbell update_sge(ss, len); 911f931551bSRalph Campbell length -= len; 912f931551bSRalph Campbell } 913f931551bSRalph Campbell /* Update address before sending packet. */ 914f931551bSRalph Campbell update_sge(ss, length); 915f931551bSRalph Campbell if (flush_wc) { 916f931551bSRalph Campbell /* must flush early everything before trigger word */ 917f931551bSRalph Campbell qib_flush_wc(); 918f931551bSRalph Campbell __raw_writel(last, piobuf); 919f931551bSRalph Campbell /* be sure trigger word is written */ 920f931551bSRalph Campbell qib_flush_wc(); 921f931551bSRalph Campbell } else 922f931551bSRalph Campbell __raw_writel(last, piobuf); 923f931551bSRalph Campbell } 924f931551bSRalph Campbell 92548947109SMike Marciniszyn static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev, 92648947109SMike Marciniszyn struct qib_qp *qp) 927f931551bSRalph Campbell { 928f931551bSRalph Campbell struct qib_verbs_txreq *tx; 929f931551bSRalph Campbell unsigned long flags; 930f931551bSRalph Campbell 931f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 932f931551bSRalph Campbell spin_lock(&dev->pending_lock); 933f931551bSRalph Campbell 934f931551bSRalph Campbell if (!list_empty(&dev->txreq_free)) { 935f931551bSRalph Campbell struct list_head *l = dev->txreq_free.next; 936f931551bSRalph Campbell 937f931551bSRalph Campbell list_del(l); 93848947109SMike Marciniszyn spin_unlock(&dev->pending_lock); 93948947109SMike Marciniszyn spin_unlock_irqrestore(&qp->s_lock, flags); 940f931551bSRalph Campbell tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 941f931551bSRalph Campbell } else { 942f931551bSRalph Campbell if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && 943f931551bSRalph Campbell list_empty(&qp->iowait)) { 944f931551bSRalph Campbell dev->n_txwait++; 945f931551bSRalph Campbell qp->s_flags |= QIB_S_WAIT_TX; 946f931551bSRalph Campbell list_add_tail(&qp->iowait, &dev->txwait); 947f931551bSRalph Campbell } 948f931551bSRalph Campbell qp->s_flags &= ~QIB_S_BUSY; 949f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 950f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 95148947109SMike Marciniszyn tx = ERR_PTR(-EBUSY); 95248947109SMike Marciniszyn } 95348947109SMike Marciniszyn return tx; 95448947109SMike Marciniszyn } 955f931551bSRalph Campbell 95648947109SMike Marciniszyn static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev, 95748947109SMike Marciniszyn struct qib_qp *qp) 95848947109SMike Marciniszyn { 95948947109SMike Marciniszyn struct qib_verbs_txreq *tx; 96048947109SMike Marciniszyn unsigned long flags; 96148947109SMike Marciniszyn 96248947109SMike Marciniszyn spin_lock_irqsave(&dev->pending_lock, flags); 96348947109SMike Marciniszyn /* assume the list non empty */ 96448947109SMike Marciniszyn if (likely(!list_empty(&dev->txreq_free))) { 96548947109SMike Marciniszyn struct list_head *l = dev->txreq_free.next; 96648947109SMike Marciniszyn 96748947109SMike Marciniszyn list_del(l); 96848947109SMike Marciniszyn spin_unlock_irqrestore(&dev->pending_lock, flags); 96948947109SMike Marciniszyn tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 97048947109SMike Marciniszyn } else { 97148947109SMike Marciniszyn /* call slow path to get the extra lock */ 97248947109SMike Marciniszyn spin_unlock_irqrestore(&dev->pending_lock, flags); 97348947109SMike Marciniszyn tx = __get_txreq(dev, qp); 97448947109SMike Marciniszyn } 975f931551bSRalph Campbell return tx; 976f931551bSRalph Campbell } 977f931551bSRalph Campbell 978f931551bSRalph Campbell void qib_put_txreq(struct qib_verbs_txreq *tx) 979f931551bSRalph Campbell { 980f931551bSRalph Campbell struct qib_ibdev *dev; 981f931551bSRalph Campbell struct qib_qp *qp; 982f931551bSRalph Campbell unsigned long flags; 983f931551bSRalph Campbell 984f931551bSRalph Campbell qp = tx->qp; 985f931551bSRalph Campbell dev = to_idev(qp->ibqp.device); 986f931551bSRalph Campbell 987f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 988f931551bSRalph Campbell wake_up(&qp->wait); 989f931551bSRalph Campbell if (tx->mr) { 9906a82649fSMike Marciniszyn qib_put_mr(tx->mr); 991f931551bSRalph Campbell tx->mr = NULL; 992f931551bSRalph Campbell } 993f931551bSRalph Campbell if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { 994f931551bSRalph Campbell tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; 995f931551bSRalph Campbell dma_unmap_single(&dd_from_dev(dev)->pcidev->dev, 996f931551bSRalph Campbell tx->txreq.addr, tx->hdr_dwords << 2, 997f931551bSRalph Campbell DMA_TO_DEVICE); 998f931551bSRalph Campbell kfree(tx->align_buf); 999f931551bSRalph Campbell } 1000f931551bSRalph Campbell 1001f931551bSRalph Campbell spin_lock_irqsave(&dev->pending_lock, flags); 1002f931551bSRalph Campbell 1003f931551bSRalph Campbell /* Put struct back on free list */ 1004f931551bSRalph Campbell list_add(&tx->txreq.list, &dev->txreq_free); 1005f931551bSRalph Campbell 1006f931551bSRalph Campbell if (!list_empty(&dev->txwait)) { 1007f931551bSRalph Campbell /* Wake up first QP wanting a free struct */ 1008f931551bSRalph Campbell qp = list_entry(dev->txwait.next, struct qib_qp, iowait); 1009f931551bSRalph Campbell list_del_init(&qp->iowait); 1010f931551bSRalph Campbell atomic_inc(&qp->refcount); 1011f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 1012f931551bSRalph Campbell 1013f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1014f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_TX) { 1015f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_TX; 1016f931551bSRalph Campbell qib_schedule_send(qp); 1017f931551bSRalph Campbell } 1018f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1019f931551bSRalph Campbell 1020f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 1021f931551bSRalph Campbell wake_up(&qp->wait); 1022f931551bSRalph Campbell } else 1023f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 1024f931551bSRalph Campbell } 1025f931551bSRalph Campbell 1026f931551bSRalph Campbell /* 1027f931551bSRalph Campbell * This is called when there are send DMA descriptors that might be 1028f931551bSRalph Campbell * available. 1029f931551bSRalph Campbell * 1030f931551bSRalph Campbell * This is called with ppd->sdma_lock held. 1031f931551bSRalph Campbell */ 1032f931551bSRalph Campbell void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) 1033f931551bSRalph Campbell { 1034f931551bSRalph Campbell struct qib_qp *qp, *nqp; 1035f931551bSRalph Campbell struct qib_qp *qps[20]; 1036f931551bSRalph Campbell struct qib_ibdev *dev; 1037f931551bSRalph Campbell unsigned i, n; 1038f931551bSRalph Campbell 1039f931551bSRalph Campbell n = 0; 1040f931551bSRalph Campbell dev = &ppd->dd->verbs_dev; 1041f931551bSRalph Campbell spin_lock(&dev->pending_lock); 1042f931551bSRalph Campbell 1043f931551bSRalph Campbell /* Search wait list for first QP wanting DMA descriptors. */ 1044f931551bSRalph Campbell list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) { 1045f931551bSRalph Campbell if (qp->port_num != ppd->port) 1046f931551bSRalph Campbell continue; 1047f931551bSRalph Campbell if (n == ARRAY_SIZE(qps)) 1048f931551bSRalph Campbell break; 1049f931551bSRalph Campbell if (qp->s_tx->txreq.sg_count > avail) 1050f931551bSRalph Campbell break; 1051f931551bSRalph Campbell avail -= qp->s_tx->txreq.sg_count; 1052f931551bSRalph Campbell list_del_init(&qp->iowait); 1053f931551bSRalph Campbell atomic_inc(&qp->refcount); 1054f931551bSRalph Campbell qps[n++] = qp; 1055f931551bSRalph Campbell } 1056f931551bSRalph Campbell 1057f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 1058f931551bSRalph Campbell 1059f931551bSRalph Campbell for (i = 0; i < n; i++) { 1060f931551bSRalph Campbell qp = qps[i]; 1061f931551bSRalph Campbell spin_lock(&qp->s_lock); 1062f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { 1063f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; 1064f931551bSRalph Campbell qib_schedule_send(qp); 1065f931551bSRalph Campbell } 1066f931551bSRalph Campbell spin_unlock(&qp->s_lock); 1067f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 1068f931551bSRalph Campbell wake_up(&qp->wait); 1069f931551bSRalph Campbell } 1070f931551bSRalph Campbell } 1071f931551bSRalph Campbell 1072f931551bSRalph Campbell /* 1073f931551bSRalph Campbell * This is called with ppd->sdma_lock held. 1074f931551bSRalph Campbell */ 1075f931551bSRalph Campbell static void sdma_complete(struct qib_sdma_txreq *cookie, int status) 1076f931551bSRalph Campbell { 1077f931551bSRalph Campbell struct qib_verbs_txreq *tx = 1078f931551bSRalph Campbell container_of(cookie, struct qib_verbs_txreq, txreq); 1079f931551bSRalph Campbell struct qib_qp *qp = tx->qp; 1080f931551bSRalph Campbell 1081f931551bSRalph Campbell spin_lock(&qp->s_lock); 1082f931551bSRalph Campbell if (tx->wqe) 1083f931551bSRalph Campbell qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); 1084f931551bSRalph Campbell else if (qp->ibqp.qp_type == IB_QPT_RC) { 1085f931551bSRalph Campbell struct qib_ib_header *hdr; 1086f931551bSRalph Campbell 1087f931551bSRalph Campbell if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) 1088f931551bSRalph Campbell hdr = &tx->align_buf->hdr; 1089f931551bSRalph Campbell else { 1090f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1091f931551bSRalph Campbell 1092f931551bSRalph Campbell hdr = &dev->pio_hdrs[tx->hdr_inx].hdr; 1093f931551bSRalph Campbell } 1094f931551bSRalph Campbell qib_rc_send_complete(qp, hdr); 1095f931551bSRalph Campbell } 1096f931551bSRalph Campbell if (atomic_dec_and_test(&qp->s_dma_busy)) { 1097f931551bSRalph Campbell if (qp->state == IB_QPS_RESET) 1098f931551bSRalph Campbell wake_up(&qp->wait_dma); 1099f931551bSRalph Campbell else if (qp->s_flags & QIB_S_WAIT_DMA) { 1100f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_DMA; 1101f931551bSRalph Campbell qib_schedule_send(qp); 1102f931551bSRalph Campbell } 1103f931551bSRalph Campbell } 1104f931551bSRalph Campbell spin_unlock(&qp->s_lock); 1105f931551bSRalph Campbell 1106f931551bSRalph Campbell qib_put_txreq(tx); 1107f931551bSRalph Campbell } 1108f931551bSRalph Campbell 1109f931551bSRalph Campbell static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) 1110f931551bSRalph Campbell { 1111f931551bSRalph Campbell unsigned long flags; 1112f931551bSRalph Campbell int ret = 0; 1113f931551bSRalph Campbell 1114f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1115f931551bSRalph Campbell if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { 1116f931551bSRalph Campbell spin_lock(&dev->pending_lock); 1117f931551bSRalph Campbell if (list_empty(&qp->iowait)) { 1118f931551bSRalph Campbell if (list_empty(&dev->memwait)) 1119f931551bSRalph Campbell mod_timer(&dev->mem_timer, jiffies + 1); 1120f931551bSRalph Campbell qp->s_flags |= QIB_S_WAIT_KMEM; 1121f931551bSRalph Campbell list_add_tail(&qp->iowait, &dev->memwait); 1122f931551bSRalph Campbell } 1123f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 1124f931551bSRalph Campbell qp->s_flags &= ~QIB_S_BUSY; 1125f931551bSRalph Campbell ret = -EBUSY; 1126f931551bSRalph Campbell } 1127f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1128f931551bSRalph Campbell 1129f931551bSRalph Campbell return ret; 1130f931551bSRalph Campbell } 1131f931551bSRalph Campbell 1132f931551bSRalph Campbell static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, 1133f931551bSRalph Campbell u32 hdrwords, struct qib_sge_state *ss, u32 len, 1134f931551bSRalph Campbell u32 plen, u32 dwords) 1135f931551bSRalph Campbell { 1136f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1137f931551bSRalph Campbell struct qib_devdata *dd = dd_from_dev(dev); 1138f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 1139f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1140f931551bSRalph Campbell struct qib_verbs_txreq *tx; 1141f931551bSRalph Campbell struct qib_pio_header *phdr; 1142f931551bSRalph Campbell u32 control; 1143f931551bSRalph Campbell u32 ndesc; 1144f931551bSRalph Campbell int ret; 1145f931551bSRalph Campbell 1146f931551bSRalph Campbell tx = qp->s_tx; 1147f931551bSRalph Campbell if (tx) { 1148f931551bSRalph Campbell qp->s_tx = NULL; 1149f931551bSRalph Campbell /* resend previously constructed packet */ 1150f931551bSRalph Campbell ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); 1151f931551bSRalph Campbell goto bail; 1152f931551bSRalph Campbell } 1153f931551bSRalph Campbell 115448947109SMike Marciniszyn tx = get_txreq(dev, qp); 115548947109SMike Marciniszyn if (IS_ERR(tx)) 115648947109SMike Marciniszyn goto bail_tx; 1157f931551bSRalph Campbell 1158f931551bSRalph Campbell control = dd->f_setpbc_control(ppd, plen, qp->s_srate, 1159f931551bSRalph Campbell be16_to_cpu(hdr->lrh[0]) >> 12); 1160f931551bSRalph Campbell tx->qp = qp; 1161f931551bSRalph Campbell atomic_inc(&qp->refcount); 1162f931551bSRalph Campbell tx->wqe = qp->s_wqe; 1163f931551bSRalph Campbell tx->mr = qp->s_rdma_mr; 1164f931551bSRalph Campbell if (qp->s_rdma_mr) 1165f931551bSRalph Campbell qp->s_rdma_mr = NULL; 1166f931551bSRalph Campbell tx->txreq.callback = sdma_complete; 1167f931551bSRalph Campbell if (dd->flags & QIB_HAS_SDMA_TIMEOUT) 1168f931551bSRalph Campbell tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST; 1169f931551bSRalph Campbell else 1170f931551bSRalph Campbell tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ; 1171f931551bSRalph Campbell if (plen + 1 > dd->piosize2kmax_dwords) 1172f931551bSRalph Campbell tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF; 1173f931551bSRalph Campbell 1174f931551bSRalph Campbell if (len) { 1175f931551bSRalph Campbell /* 1176f931551bSRalph Campbell * Don't try to DMA if it takes more descriptors than 1177f931551bSRalph Campbell * the queue holds. 1178f931551bSRalph Campbell */ 1179f931551bSRalph Campbell ndesc = qib_count_sge(ss, len); 1180f931551bSRalph Campbell if (ndesc >= ppd->sdma_descq_cnt) 1181f931551bSRalph Campbell ndesc = 0; 1182f931551bSRalph Campbell } else 1183f931551bSRalph Campbell ndesc = 1; 1184f931551bSRalph Campbell if (ndesc) { 1185f931551bSRalph Campbell phdr = &dev->pio_hdrs[tx->hdr_inx]; 1186f931551bSRalph Campbell phdr->pbc[0] = cpu_to_le32(plen); 1187f931551bSRalph Campbell phdr->pbc[1] = cpu_to_le32(control); 1188f931551bSRalph Campbell memcpy(&phdr->hdr, hdr, hdrwords << 2); 1189f931551bSRalph Campbell tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC; 1190f931551bSRalph Campbell tx->txreq.sg_count = ndesc; 1191f931551bSRalph Campbell tx->txreq.addr = dev->pio_hdrs_phys + 1192f931551bSRalph Campbell tx->hdr_inx * sizeof(struct qib_pio_header); 1193f931551bSRalph Campbell tx->hdr_dwords = hdrwords + 2; /* add PBC length */ 1194f931551bSRalph Campbell ret = qib_sdma_verbs_send(ppd, ss, dwords, tx); 1195f931551bSRalph Campbell goto bail; 1196f931551bSRalph Campbell } 1197f931551bSRalph Campbell 1198f931551bSRalph Campbell /* Allocate a buffer and copy the header and payload to it. */ 1199f931551bSRalph Campbell tx->hdr_dwords = plen + 1; 1200f931551bSRalph Campbell phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC); 1201f931551bSRalph Campbell if (!phdr) 1202f931551bSRalph Campbell goto err_tx; 1203f931551bSRalph Campbell phdr->pbc[0] = cpu_to_le32(plen); 1204f931551bSRalph Campbell phdr->pbc[1] = cpu_to_le32(control); 1205f931551bSRalph Campbell memcpy(&phdr->hdr, hdr, hdrwords << 2); 1206f931551bSRalph Campbell qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len); 1207f931551bSRalph Campbell 1208f931551bSRalph Campbell tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr, 1209f931551bSRalph Campbell tx->hdr_dwords << 2, DMA_TO_DEVICE); 1210f931551bSRalph Campbell if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr)) 1211f931551bSRalph Campbell goto map_err; 1212f931551bSRalph Campbell tx->align_buf = phdr; 1213f931551bSRalph Campbell tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF; 1214f931551bSRalph Campbell tx->txreq.sg_count = 1; 1215f931551bSRalph Campbell ret = qib_sdma_verbs_send(ppd, NULL, 0, tx); 1216f931551bSRalph Campbell goto unaligned; 1217f931551bSRalph Campbell 1218f931551bSRalph Campbell map_err: 1219f931551bSRalph Campbell kfree(phdr); 1220f931551bSRalph Campbell err_tx: 1221f931551bSRalph Campbell qib_put_txreq(tx); 1222f931551bSRalph Campbell ret = wait_kmem(dev, qp); 1223f931551bSRalph Campbell unaligned: 1224f931551bSRalph Campbell ibp->n_unaligned++; 1225f931551bSRalph Campbell bail: 1226f931551bSRalph Campbell return ret; 122748947109SMike Marciniszyn bail_tx: 122848947109SMike Marciniszyn ret = PTR_ERR(tx); 122948947109SMike Marciniszyn goto bail; 1230f931551bSRalph Campbell } 1231f931551bSRalph Campbell 1232f931551bSRalph Campbell /* 1233f931551bSRalph Campbell * If we are now in the error state, return zero to flush the 1234f931551bSRalph Campbell * send work request. 1235f931551bSRalph Campbell */ 1236f931551bSRalph Campbell static int no_bufs_available(struct qib_qp *qp) 1237f931551bSRalph Campbell { 1238f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1239f931551bSRalph Campbell struct qib_devdata *dd; 1240f931551bSRalph Campbell unsigned long flags; 1241f931551bSRalph Campbell int ret = 0; 1242f931551bSRalph Campbell 1243f931551bSRalph Campbell /* 1244f931551bSRalph Campbell * Note that as soon as want_buffer() is called and 1245f931551bSRalph Campbell * possibly before it returns, qib_ib_piobufavail() 1246f931551bSRalph Campbell * could be called. Therefore, put QP on the I/O wait list before 1247f931551bSRalph Campbell * enabling the PIO avail interrupt. 1248f931551bSRalph Campbell */ 1249f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1250f931551bSRalph Campbell if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { 1251f931551bSRalph Campbell spin_lock(&dev->pending_lock); 1252f931551bSRalph Campbell if (list_empty(&qp->iowait)) { 1253f931551bSRalph Campbell dev->n_piowait++; 1254f931551bSRalph Campbell qp->s_flags |= QIB_S_WAIT_PIO; 1255f931551bSRalph Campbell list_add_tail(&qp->iowait, &dev->piowait); 1256f931551bSRalph Campbell dd = dd_from_dev(dev); 1257f931551bSRalph Campbell dd->f_wantpiobuf_intr(dd, 1); 1258f931551bSRalph Campbell } 1259f931551bSRalph Campbell spin_unlock(&dev->pending_lock); 1260f931551bSRalph Campbell qp->s_flags &= ~QIB_S_BUSY; 1261f931551bSRalph Campbell ret = -EBUSY; 1262f931551bSRalph Campbell } 1263f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1264f931551bSRalph Campbell return ret; 1265f931551bSRalph Campbell } 1266f931551bSRalph Campbell 1267f931551bSRalph Campbell static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, 1268f931551bSRalph Campbell u32 hdrwords, struct qib_sge_state *ss, u32 len, 1269f931551bSRalph Campbell u32 plen, u32 dwords) 1270f931551bSRalph Campbell { 1271f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1272f931551bSRalph Campbell struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; 1273f931551bSRalph Campbell u32 *hdr = (u32 *) ibhdr; 1274f931551bSRalph Campbell u32 __iomem *piobuf_orig; 1275f931551bSRalph Campbell u32 __iomem *piobuf; 1276f931551bSRalph Campbell u64 pbc; 1277f931551bSRalph Campbell unsigned long flags; 1278f931551bSRalph Campbell unsigned flush_wc; 1279f931551bSRalph Campbell u32 control; 1280f931551bSRalph Campbell u32 pbufn; 1281f931551bSRalph Campbell 1282f931551bSRalph Campbell control = dd->f_setpbc_control(ppd, plen, qp->s_srate, 1283f931551bSRalph Campbell be16_to_cpu(ibhdr->lrh[0]) >> 12); 1284f931551bSRalph Campbell pbc = ((u64) control << 32) | plen; 1285f931551bSRalph Campbell piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); 1286f931551bSRalph Campbell if (unlikely(piobuf == NULL)) 1287f931551bSRalph Campbell return no_bufs_available(qp); 1288f931551bSRalph Campbell 1289f931551bSRalph Campbell /* 1290f931551bSRalph Campbell * Write the pbc. 1291f931551bSRalph Campbell * We have to flush after the PBC for correctness on some cpus 1292f931551bSRalph Campbell * or WC buffer can be written out of order. 1293f931551bSRalph Campbell */ 1294f931551bSRalph Campbell writeq(pbc, piobuf); 1295f931551bSRalph Campbell piobuf_orig = piobuf; 1296f931551bSRalph Campbell piobuf += 2; 1297f931551bSRalph Campbell 1298f931551bSRalph Campbell flush_wc = dd->flags & QIB_PIO_FLUSH_WC; 1299f931551bSRalph Campbell if (len == 0) { 1300f931551bSRalph Campbell /* 1301f931551bSRalph Campbell * If there is just the header portion, must flush before 1302f931551bSRalph Campbell * writing last word of header for correctness, and after 1303f931551bSRalph Campbell * the last header word (trigger word). 1304f931551bSRalph Campbell */ 1305f931551bSRalph Campbell if (flush_wc) { 1306f931551bSRalph Campbell qib_flush_wc(); 1307f931551bSRalph Campbell qib_pio_copy(piobuf, hdr, hdrwords - 1); 1308f931551bSRalph Campbell qib_flush_wc(); 1309f931551bSRalph Campbell __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); 1310f931551bSRalph Campbell qib_flush_wc(); 1311f931551bSRalph Campbell } else 1312f931551bSRalph Campbell qib_pio_copy(piobuf, hdr, hdrwords); 1313f931551bSRalph Campbell goto done; 1314f931551bSRalph Campbell } 1315f931551bSRalph Campbell 1316f931551bSRalph Campbell if (flush_wc) 1317f931551bSRalph Campbell qib_flush_wc(); 1318f931551bSRalph Campbell qib_pio_copy(piobuf, hdr, hdrwords); 1319f931551bSRalph Campbell piobuf += hdrwords; 1320f931551bSRalph Campbell 1321f931551bSRalph Campbell /* The common case is aligned and contained in one segment. */ 1322f931551bSRalph Campbell if (likely(ss->num_sge == 1 && len <= ss->sge.length && 1323f931551bSRalph Campbell !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { 1324f931551bSRalph Campbell u32 *addr = (u32 *) ss->sge.vaddr; 1325f931551bSRalph Campbell 1326f931551bSRalph Campbell /* Update address before sending packet. */ 1327f931551bSRalph Campbell update_sge(ss, len); 1328f931551bSRalph Campbell if (flush_wc) { 1329f931551bSRalph Campbell qib_pio_copy(piobuf, addr, dwords - 1); 1330f931551bSRalph Campbell /* must flush early everything before trigger word */ 1331f931551bSRalph Campbell qib_flush_wc(); 1332f931551bSRalph Campbell __raw_writel(addr[dwords - 1], piobuf + dwords - 1); 1333f931551bSRalph Campbell /* be sure trigger word is written */ 1334f931551bSRalph Campbell qib_flush_wc(); 1335f931551bSRalph Campbell } else 1336f931551bSRalph Campbell qib_pio_copy(piobuf, addr, dwords); 1337f931551bSRalph Campbell goto done; 1338f931551bSRalph Campbell } 1339f931551bSRalph Campbell copy_io(piobuf, ss, len, flush_wc); 1340f931551bSRalph Campbell done: 1341f931551bSRalph Campbell if (dd->flags & QIB_USE_SPCL_TRIG) { 1342f931551bSRalph Campbell u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; 1343f931551bSRalph Campbell qib_flush_wc(); 1344f931551bSRalph Campbell __raw_writel(0xaebecede, piobuf_orig + spcl_off); 1345f931551bSRalph Campbell } 1346f931551bSRalph Campbell qib_sendbuf_done(dd, pbufn); 1347f931551bSRalph Campbell if (qp->s_rdma_mr) { 13486a82649fSMike Marciniszyn qib_put_mr(qp->s_rdma_mr); 1349f931551bSRalph Campbell qp->s_rdma_mr = NULL; 1350f931551bSRalph Campbell } 1351f931551bSRalph Campbell if (qp->s_wqe) { 1352f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1353f931551bSRalph Campbell qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); 1354f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1355f931551bSRalph Campbell } else if (qp->ibqp.qp_type == IB_QPT_RC) { 1356f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1357f931551bSRalph Campbell qib_rc_send_complete(qp, ibhdr); 1358f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1359f931551bSRalph Campbell } 1360f931551bSRalph Campbell return 0; 1361f931551bSRalph Campbell } 1362f931551bSRalph Campbell 1363f931551bSRalph Campbell /** 1364f931551bSRalph Campbell * qib_verbs_send - send a packet 1365f931551bSRalph Campbell * @qp: the QP to send on 1366f931551bSRalph Campbell * @hdr: the packet header 1367f931551bSRalph Campbell * @hdrwords: the number of 32-bit words in the header 1368f931551bSRalph Campbell * @ss: the SGE to send 1369f931551bSRalph Campbell * @len: the length of the packet in bytes 1370f931551bSRalph Campbell * 1371f931551bSRalph Campbell * Return zero if packet is sent or queued OK. 1372f931551bSRalph Campbell * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise. 1373f931551bSRalph Campbell */ 1374f931551bSRalph Campbell int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, 1375f931551bSRalph Campbell u32 hdrwords, struct qib_sge_state *ss, u32 len) 1376f931551bSRalph Campbell { 1377f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1378f931551bSRalph Campbell u32 plen; 1379f931551bSRalph Campbell int ret; 1380f931551bSRalph Campbell u32 dwords = (len + 3) >> 2; 1381f931551bSRalph Campbell 1382f931551bSRalph Campbell /* 1383f931551bSRalph Campbell * Calculate the send buffer trigger address. 1384f931551bSRalph Campbell * The +1 counts for the pbc control dword following the pbc length. 1385f931551bSRalph Campbell */ 1386f931551bSRalph Campbell plen = hdrwords + dwords + 1; 1387f931551bSRalph Campbell 1388f931551bSRalph Campbell /* 1389f931551bSRalph Campbell * VL15 packets (IB_QPT_SMI) will always use PIO, so we 1390f931551bSRalph Campbell * can defer SDMA restart until link goes ACTIVE without 1391f931551bSRalph Campbell * worrying about just how we got there. 1392f931551bSRalph Campbell */ 1393f931551bSRalph Campbell if (qp->ibqp.qp_type == IB_QPT_SMI || 1394f931551bSRalph Campbell !(dd->flags & QIB_HAS_SEND_DMA)) 1395f931551bSRalph Campbell ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len, 1396f931551bSRalph Campbell plen, dwords); 1397f931551bSRalph Campbell else 1398f931551bSRalph Campbell ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len, 1399f931551bSRalph Campbell plen, dwords); 1400f931551bSRalph Campbell 1401f931551bSRalph Campbell return ret; 1402f931551bSRalph Campbell } 1403f931551bSRalph Campbell 1404f931551bSRalph Campbell int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, 1405f931551bSRalph Campbell u64 *rwords, u64 *spkts, u64 *rpkts, 1406f931551bSRalph Campbell u64 *xmit_wait) 1407f931551bSRalph Campbell { 1408f931551bSRalph Campbell int ret; 1409f931551bSRalph Campbell struct qib_devdata *dd = ppd->dd; 1410f931551bSRalph Campbell 1411f931551bSRalph Campbell if (!(dd->flags & QIB_PRESENT)) { 1412f931551bSRalph Campbell /* no hardware, freeze, etc. */ 1413f931551bSRalph Campbell ret = -EINVAL; 1414f931551bSRalph Campbell goto bail; 1415f931551bSRalph Campbell } 1416f931551bSRalph Campbell *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND); 1417f931551bSRalph Campbell *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV); 1418f931551bSRalph Campbell *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND); 1419f931551bSRalph Campbell *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV); 1420f931551bSRalph Campbell *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL); 1421f931551bSRalph Campbell 1422f931551bSRalph Campbell ret = 0; 1423f931551bSRalph Campbell 1424f931551bSRalph Campbell bail: 1425f931551bSRalph Campbell return ret; 1426f931551bSRalph Campbell } 1427f931551bSRalph Campbell 1428f931551bSRalph Campbell /** 1429f931551bSRalph Campbell * qib_get_counters - get various chip counters 1430f931551bSRalph Campbell * @dd: the qlogic_ib device 1431f931551bSRalph Campbell * @cntrs: counters are placed here 1432f931551bSRalph Campbell * 1433f931551bSRalph Campbell * Return the counters needed by recv_pma_get_portcounters(). 1434f931551bSRalph Campbell */ 1435f931551bSRalph Campbell int qib_get_counters(struct qib_pportdata *ppd, 1436f931551bSRalph Campbell struct qib_verbs_counters *cntrs) 1437f931551bSRalph Campbell { 1438f931551bSRalph Campbell int ret; 1439f931551bSRalph Campbell 1440f931551bSRalph Campbell if (!(ppd->dd->flags & QIB_PRESENT)) { 1441f931551bSRalph Campbell /* no hardware, freeze, etc. */ 1442f931551bSRalph Campbell ret = -EINVAL; 1443f931551bSRalph Campbell goto bail; 1444f931551bSRalph Campbell } 1445f931551bSRalph Campbell cntrs->symbol_error_counter = 1446f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR); 1447f931551bSRalph Campbell cntrs->link_error_recovery_counter = 1448f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV); 1449f931551bSRalph Campbell /* 1450f931551bSRalph Campbell * The link downed counter counts when the other side downs the 1451f931551bSRalph Campbell * connection. We add in the number of times we downed the link 1452f931551bSRalph Campbell * due to local link integrity errors to compensate. 1453f931551bSRalph Campbell */ 1454f931551bSRalph Campbell cntrs->link_downed_counter = 1455f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN); 1456f931551bSRalph Campbell cntrs->port_rcv_errors = 1457f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) + 1458f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) + 1459f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) + 1460f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) + 1461f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) + 1462f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) + 1463f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) + 1464f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) + 1465f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT); 1466f931551bSRalph Campbell cntrs->port_rcv_errors += 1467f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR); 1468f931551bSRalph Campbell cntrs->port_rcv_errors += 1469f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR); 1470f931551bSRalph Campbell cntrs->port_rcv_remphys_errors = 1471f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP); 1472f931551bSRalph Campbell cntrs->port_xmit_discards = 1473f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL); 1474f931551bSRalph Campbell cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd, 1475f931551bSRalph Campbell QIBPORTCNTR_WORDSEND); 1476f931551bSRalph Campbell cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd, 1477f931551bSRalph Campbell QIBPORTCNTR_WORDRCV); 1478f931551bSRalph Campbell cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd, 1479f931551bSRalph Campbell QIBPORTCNTR_PKTSEND); 1480f931551bSRalph Campbell cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd, 1481f931551bSRalph Campbell QIBPORTCNTR_PKTRCV); 1482f931551bSRalph Campbell cntrs->local_link_integrity_errors = 1483f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI); 1484f931551bSRalph Campbell cntrs->excessive_buffer_overrun_errors = 1485f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL); 1486f931551bSRalph Campbell cntrs->vl15_dropped = 1487f931551bSRalph Campbell ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP); 1488f931551bSRalph Campbell 1489f931551bSRalph Campbell ret = 0; 1490f931551bSRalph Campbell 1491f931551bSRalph Campbell bail: 1492f931551bSRalph Campbell return ret; 1493f931551bSRalph Campbell } 1494f931551bSRalph Campbell 1495f931551bSRalph Campbell /** 1496f931551bSRalph Campbell * qib_ib_piobufavail - callback when a PIO buffer is available 1497f931551bSRalph Campbell * @dd: the device pointer 1498f931551bSRalph Campbell * 1499f931551bSRalph Campbell * This is called from qib_intr() at interrupt level when a PIO buffer is 1500f931551bSRalph Campbell * available after qib_verbs_send() returned an error that no buffers were 1501f931551bSRalph Campbell * available. Disable the interrupt if there are no more QPs waiting. 1502f931551bSRalph Campbell */ 1503f931551bSRalph Campbell void qib_ib_piobufavail(struct qib_devdata *dd) 1504f931551bSRalph Campbell { 1505f931551bSRalph Campbell struct qib_ibdev *dev = &dd->verbs_dev; 1506f931551bSRalph Campbell struct list_head *list; 1507f931551bSRalph Campbell struct qib_qp *qps[5]; 1508f931551bSRalph Campbell struct qib_qp *qp; 1509f931551bSRalph Campbell unsigned long flags; 1510f931551bSRalph Campbell unsigned i, n; 1511f931551bSRalph Campbell 1512f931551bSRalph Campbell list = &dev->piowait; 1513f931551bSRalph Campbell n = 0; 1514f931551bSRalph Campbell 1515f931551bSRalph Campbell /* 1516f931551bSRalph Campbell * Note: checking that the piowait list is empty and clearing 1517f931551bSRalph Campbell * the buffer available interrupt needs to be atomic or we 1518f931551bSRalph Campbell * could end up with QPs on the wait list with the interrupt 1519f931551bSRalph Campbell * disabled. 1520f931551bSRalph Campbell */ 1521f931551bSRalph Campbell spin_lock_irqsave(&dev->pending_lock, flags); 1522f931551bSRalph Campbell while (!list_empty(list)) { 1523f931551bSRalph Campbell if (n == ARRAY_SIZE(qps)) 1524f931551bSRalph Campbell goto full; 1525f931551bSRalph Campbell qp = list_entry(list->next, struct qib_qp, iowait); 1526f931551bSRalph Campbell list_del_init(&qp->iowait); 1527f931551bSRalph Campbell atomic_inc(&qp->refcount); 1528f931551bSRalph Campbell qps[n++] = qp; 1529f931551bSRalph Campbell } 1530f931551bSRalph Campbell dd->f_wantpiobuf_intr(dd, 0); 1531f931551bSRalph Campbell full: 1532f931551bSRalph Campbell spin_unlock_irqrestore(&dev->pending_lock, flags); 1533f931551bSRalph Campbell 1534f931551bSRalph Campbell for (i = 0; i < n; i++) { 1535f931551bSRalph Campbell qp = qps[i]; 1536f931551bSRalph Campbell 1537f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags); 1538f931551bSRalph Campbell if (qp->s_flags & QIB_S_WAIT_PIO) { 1539f931551bSRalph Campbell qp->s_flags &= ~QIB_S_WAIT_PIO; 1540f931551bSRalph Campbell qib_schedule_send(qp); 1541f931551bSRalph Campbell } 1542f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags); 1543f931551bSRalph Campbell 1544f931551bSRalph Campbell /* Notify qib_destroy_qp() if it is waiting. */ 1545f931551bSRalph Campbell if (atomic_dec_and_test(&qp->refcount)) 1546f931551bSRalph Campbell wake_up(&qp->wait); 1547f931551bSRalph Campbell } 1548f931551bSRalph Campbell } 1549f931551bSRalph Campbell 1550f931551bSRalph Campbell static int qib_query_device(struct ib_device *ibdev, 1551f931551bSRalph Campbell struct ib_device_attr *props) 1552f931551bSRalph Campbell { 1553f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1554f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(ibdev); 1555f931551bSRalph Campbell 1556f931551bSRalph Campbell memset(props, 0, sizeof(*props)); 1557f931551bSRalph Campbell 1558f931551bSRalph Campbell props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | 1559f931551bSRalph Campbell IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 1560f931551bSRalph Campbell IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | 1561f931551bSRalph Campbell IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; 1562f931551bSRalph Campbell props->page_size_cap = PAGE_SIZE; 1563f931551bSRalph Campbell props->vendor_id = 1564f931551bSRalph Campbell QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3; 1565f931551bSRalph Campbell props->vendor_part_id = dd->deviceid; 1566f931551bSRalph Campbell props->hw_ver = dd->minrev; 1567f931551bSRalph Campbell props->sys_image_guid = ib_qib_sys_image_guid; 1568f931551bSRalph Campbell props->max_mr_size = ~0ULL; 1569f931551bSRalph Campbell props->max_qp = ib_qib_max_qps; 1570f931551bSRalph Campbell props->max_qp_wr = ib_qib_max_qp_wrs; 1571f931551bSRalph Campbell props->max_sge = ib_qib_max_sges; 1572f931551bSRalph Campbell props->max_cq = ib_qib_max_cqs; 1573f931551bSRalph Campbell props->max_ah = ib_qib_max_ahs; 1574f931551bSRalph Campbell props->max_cqe = ib_qib_max_cqes; 1575f931551bSRalph Campbell props->max_mr = dev->lk_table.max; 1576f931551bSRalph Campbell props->max_fmr = dev->lk_table.max; 1577f931551bSRalph Campbell props->max_map_per_fmr = 32767; 1578f931551bSRalph Campbell props->max_pd = ib_qib_max_pds; 1579f931551bSRalph Campbell props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; 1580f931551bSRalph Campbell props->max_qp_init_rd_atom = 255; 1581f931551bSRalph Campbell /* props->max_res_rd_atom */ 1582f931551bSRalph Campbell props->max_srq = ib_qib_max_srqs; 1583f931551bSRalph Campbell props->max_srq_wr = ib_qib_max_srq_wrs; 1584f931551bSRalph Campbell props->max_srq_sge = ib_qib_max_srq_sges; 1585f931551bSRalph Campbell /* props->local_ca_ack_delay */ 1586f931551bSRalph Campbell props->atomic_cap = IB_ATOMIC_GLOB; 1587f931551bSRalph Campbell props->max_pkeys = qib_get_npkeys(dd); 1588f931551bSRalph Campbell props->max_mcast_grp = ib_qib_max_mcast_grps; 1589f931551bSRalph Campbell props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached; 1590f931551bSRalph Campbell props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 1591f931551bSRalph Campbell props->max_mcast_grp; 1592f931551bSRalph Campbell 1593f931551bSRalph Campbell return 0; 1594f931551bSRalph Campbell } 1595f931551bSRalph Campbell 1596f931551bSRalph Campbell static int qib_query_port(struct ib_device *ibdev, u8 port, 1597f931551bSRalph Campbell struct ib_port_attr *props) 1598f931551bSRalph Campbell { 1599f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1600f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(ibdev, port); 1601f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1602f931551bSRalph Campbell enum ib_mtu mtu; 1603f931551bSRalph Campbell u16 lid = ppd->lid; 1604f931551bSRalph Campbell 1605f931551bSRalph Campbell memset(props, 0, sizeof(*props)); 1606f931551bSRalph Campbell props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); 1607f931551bSRalph Campbell props->lmc = ppd->lmc; 1608f931551bSRalph Campbell props->sm_lid = ibp->sm_lid; 1609f931551bSRalph Campbell props->sm_sl = ibp->sm_sl; 1610f931551bSRalph Campbell props->state = dd->f_iblink_state(ppd->lastibcstat); 1611f931551bSRalph Campbell props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat); 1612f931551bSRalph Campbell props->port_cap_flags = ibp->port_cap_flags; 1613f931551bSRalph Campbell props->gid_tbl_len = QIB_GUIDS_PER_PORT; 1614f931551bSRalph Campbell props->max_msg_sz = 0x80000000; 1615f931551bSRalph Campbell props->pkey_tbl_len = qib_get_npkeys(dd); 1616f931551bSRalph Campbell props->bad_pkey_cntr = ibp->pkey_violations; 1617f931551bSRalph Campbell props->qkey_viol_cntr = ibp->qkey_violations; 1618f931551bSRalph Campbell props->active_width = ppd->link_width_active; 1619f931551bSRalph Campbell /* See rate_show() */ 1620f931551bSRalph Campbell props->active_speed = ppd->link_speed_active; 1621f931551bSRalph Campbell props->max_vl_num = qib_num_vls(ppd->vls_supported); 1622f931551bSRalph Campbell props->init_type_reply = 0; 1623f931551bSRalph Campbell 1624f931551bSRalph Campbell props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; 1625f931551bSRalph Campbell switch (ppd->ibmtu) { 1626f931551bSRalph Campbell case 4096: 1627f931551bSRalph Campbell mtu = IB_MTU_4096; 1628f931551bSRalph Campbell break; 1629f931551bSRalph Campbell case 2048: 1630f931551bSRalph Campbell mtu = IB_MTU_2048; 1631f931551bSRalph Campbell break; 1632f931551bSRalph Campbell case 1024: 1633f931551bSRalph Campbell mtu = IB_MTU_1024; 1634f931551bSRalph Campbell break; 1635f931551bSRalph Campbell case 512: 1636f931551bSRalph Campbell mtu = IB_MTU_512; 1637f931551bSRalph Campbell break; 1638f931551bSRalph Campbell case 256: 1639f931551bSRalph Campbell mtu = IB_MTU_256; 1640f931551bSRalph Campbell break; 1641f931551bSRalph Campbell default: 1642f931551bSRalph Campbell mtu = IB_MTU_2048; 1643f931551bSRalph Campbell } 1644f931551bSRalph Campbell props->active_mtu = mtu; 1645f931551bSRalph Campbell props->subnet_timeout = ibp->subnet_timeout; 1646f931551bSRalph Campbell 1647f931551bSRalph Campbell return 0; 1648f931551bSRalph Campbell } 1649f931551bSRalph Campbell 1650f931551bSRalph Campbell static int qib_modify_device(struct ib_device *device, 1651f931551bSRalph Campbell int device_modify_mask, 1652f931551bSRalph Campbell struct ib_device_modify *device_modify) 1653f931551bSRalph Campbell { 1654f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(device); 1655f931551bSRalph Campbell unsigned i; 1656f931551bSRalph Campbell int ret; 1657f931551bSRalph Campbell 1658f931551bSRalph Campbell if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | 1659f931551bSRalph Campbell IB_DEVICE_MODIFY_NODE_DESC)) { 1660f931551bSRalph Campbell ret = -EOPNOTSUPP; 1661f931551bSRalph Campbell goto bail; 1662f931551bSRalph Campbell } 1663f931551bSRalph Campbell 1664f931551bSRalph Campbell if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) { 1665f931551bSRalph Campbell memcpy(device->node_desc, device_modify->node_desc, 64); 1666f931551bSRalph Campbell for (i = 0; i < dd->num_pports; i++) { 1667f931551bSRalph Campbell struct qib_ibport *ibp = &dd->pport[i].ibport_data; 1668f931551bSRalph Campbell 1669f931551bSRalph Campbell qib_node_desc_chg(ibp); 1670f931551bSRalph Campbell } 1671f931551bSRalph Campbell } 1672f931551bSRalph Campbell 1673f931551bSRalph Campbell if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) { 1674f931551bSRalph Campbell ib_qib_sys_image_guid = 1675f931551bSRalph Campbell cpu_to_be64(device_modify->sys_image_guid); 1676f931551bSRalph Campbell for (i = 0; i < dd->num_pports; i++) { 1677f931551bSRalph Campbell struct qib_ibport *ibp = &dd->pport[i].ibport_data; 1678f931551bSRalph Campbell 1679f931551bSRalph Campbell qib_sys_guid_chg(ibp); 1680f931551bSRalph Campbell } 1681f931551bSRalph Campbell } 1682f931551bSRalph Campbell 1683f931551bSRalph Campbell ret = 0; 1684f931551bSRalph Campbell 1685f931551bSRalph Campbell bail: 1686f931551bSRalph Campbell return ret; 1687f931551bSRalph Campbell } 1688f931551bSRalph Campbell 1689f931551bSRalph Campbell static int qib_modify_port(struct ib_device *ibdev, u8 port, 1690f931551bSRalph Campbell int port_modify_mask, struct ib_port_modify *props) 1691f931551bSRalph Campbell { 1692f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(ibdev, port); 1693f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1694f931551bSRalph Campbell 1695f931551bSRalph Campbell ibp->port_cap_flags |= props->set_port_cap_mask; 1696f931551bSRalph Campbell ibp->port_cap_flags &= ~props->clr_port_cap_mask; 1697f931551bSRalph Campbell if (props->set_port_cap_mask || props->clr_port_cap_mask) 1698f931551bSRalph Campbell qib_cap_mask_chg(ibp); 1699f931551bSRalph Campbell if (port_modify_mask & IB_PORT_SHUTDOWN) 1700f931551bSRalph Campbell qib_set_linkstate(ppd, QIB_IB_LINKDOWN); 1701f931551bSRalph Campbell if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) 1702f931551bSRalph Campbell ibp->qkey_violations = 0; 1703f931551bSRalph Campbell return 0; 1704f931551bSRalph Campbell } 1705f931551bSRalph Campbell 1706f931551bSRalph Campbell static int qib_query_gid(struct ib_device *ibdev, u8 port, 1707f931551bSRalph Campbell int index, union ib_gid *gid) 1708f931551bSRalph Campbell { 1709f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1710f931551bSRalph Campbell int ret = 0; 1711f931551bSRalph Campbell 1712f931551bSRalph Campbell if (!port || port > dd->num_pports) 1713f931551bSRalph Campbell ret = -EINVAL; 1714f931551bSRalph Campbell else { 1715f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(ibdev, port); 1716f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1717f931551bSRalph Campbell 1718f931551bSRalph Campbell gid->global.subnet_prefix = ibp->gid_prefix; 1719f931551bSRalph Campbell if (index == 0) 1720f931551bSRalph Campbell gid->global.interface_id = ppd->guid; 1721f931551bSRalph Campbell else if (index < QIB_GUIDS_PER_PORT) 1722f931551bSRalph Campbell gid->global.interface_id = ibp->guids[index - 1]; 1723f931551bSRalph Campbell else 1724f931551bSRalph Campbell ret = -EINVAL; 1725f931551bSRalph Campbell } 1726f931551bSRalph Campbell 1727f931551bSRalph Campbell return ret; 1728f931551bSRalph Campbell } 1729f931551bSRalph Campbell 1730f931551bSRalph Campbell static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev, 1731f931551bSRalph Campbell struct ib_ucontext *context, 1732f931551bSRalph Campbell struct ib_udata *udata) 1733f931551bSRalph Campbell { 1734f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(ibdev); 1735f931551bSRalph Campbell struct qib_pd *pd; 1736f931551bSRalph Campbell struct ib_pd *ret; 1737f931551bSRalph Campbell 1738f931551bSRalph Campbell /* 1739f931551bSRalph Campbell * This is actually totally arbitrary. Some correctness tests 1740f931551bSRalph Campbell * assume there's a maximum number of PDs that can be allocated. 1741f931551bSRalph Campbell * We don't actually have this limit, but we fail the test if 1742f931551bSRalph Campbell * we allow allocations of more than we report for this value. 1743f931551bSRalph Campbell */ 1744f931551bSRalph Campbell 1745f931551bSRalph Campbell pd = kmalloc(sizeof *pd, GFP_KERNEL); 1746f931551bSRalph Campbell if (!pd) { 1747f931551bSRalph Campbell ret = ERR_PTR(-ENOMEM); 1748f931551bSRalph Campbell goto bail; 1749f931551bSRalph Campbell } 1750f931551bSRalph Campbell 1751f931551bSRalph Campbell spin_lock(&dev->n_pds_lock); 1752f931551bSRalph Campbell if (dev->n_pds_allocated == ib_qib_max_pds) { 1753f931551bSRalph Campbell spin_unlock(&dev->n_pds_lock); 1754f931551bSRalph Campbell kfree(pd); 1755f931551bSRalph Campbell ret = ERR_PTR(-ENOMEM); 1756f931551bSRalph Campbell goto bail; 1757f931551bSRalph Campbell } 1758f931551bSRalph Campbell 1759f931551bSRalph Campbell dev->n_pds_allocated++; 1760f931551bSRalph Campbell spin_unlock(&dev->n_pds_lock); 1761f931551bSRalph Campbell 1762f931551bSRalph Campbell /* ib_alloc_pd() will initialize pd->ibpd. */ 1763f931551bSRalph Campbell pd->user = udata != NULL; 1764f931551bSRalph Campbell 1765f931551bSRalph Campbell ret = &pd->ibpd; 1766f931551bSRalph Campbell 1767f931551bSRalph Campbell bail: 1768f931551bSRalph Campbell return ret; 1769f931551bSRalph Campbell } 1770f931551bSRalph Campbell 1771f931551bSRalph Campbell static int qib_dealloc_pd(struct ib_pd *ibpd) 1772f931551bSRalph Campbell { 1773f931551bSRalph Campbell struct qib_pd *pd = to_ipd(ibpd); 1774f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(ibpd->device); 1775f931551bSRalph Campbell 1776f931551bSRalph Campbell spin_lock(&dev->n_pds_lock); 1777f931551bSRalph Campbell dev->n_pds_allocated--; 1778f931551bSRalph Campbell spin_unlock(&dev->n_pds_lock); 1779f931551bSRalph Campbell 1780f931551bSRalph Campbell kfree(pd); 1781f931551bSRalph Campbell 1782f931551bSRalph Campbell return 0; 1783f931551bSRalph Campbell } 1784f931551bSRalph Campbell 1785f931551bSRalph Campbell int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) 1786f931551bSRalph Campbell { 1787f931551bSRalph Campbell /* A multicast address requires a GRH (see ch. 8.4.1). */ 1788f931551bSRalph Campbell if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE && 1789f931551bSRalph Campbell ah_attr->dlid != QIB_PERMISSIVE_LID && 1790f931551bSRalph Campbell !(ah_attr->ah_flags & IB_AH_GRH)) 1791f931551bSRalph Campbell goto bail; 1792f931551bSRalph Campbell if ((ah_attr->ah_flags & IB_AH_GRH) && 1793f931551bSRalph Campbell ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT) 1794f931551bSRalph Campbell goto bail; 1795f931551bSRalph Campbell if (ah_attr->dlid == 0) 1796f931551bSRalph Campbell goto bail; 1797f931551bSRalph Campbell if (ah_attr->port_num < 1 || 1798f931551bSRalph Campbell ah_attr->port_num > ibdev->phys_port_cnt) 1799f931551bSRalph Campbell goto bail; 1800f931551bSRalph Campbell if (ah_attr->static_rate != IB_RATE_PORT_CURRENT && 1801f931551bSRalph Campbell ib_rate_to_mult(ah_attr->static_rate) < 0) 1802f931551bSRalph Campbell goto bail; 1803f931551bSRalph Campbell if (ah_attr->sl > 15) 1804f931551bSRalph Campbell goto bail; 1805f931551bSRalph Campbell return 0; 1806f931551bSRalph Campbell bail: 1807f931551bSRalph Campbell return -EINVAL; 1808f931551bSRalph Campbell } 1809f931551bSRalph Campbell 1810f931551bSRalph Campbell /** 1811f931551bSRalph Campbell * qib_create_ah - create an address handle 1812f931551bSRalph Campbell * @pd: the protection domain 1813f931551bSRalph Campbell * @ah_attr: the attributes of the AH 1814f931551bSRalph Campbell * 1815f931551bSRalph Campbell * This may be called from interrupt context. 1816f931551bSRalph Campbell */ 1817f931551bSRalph Campbell static struct ib_ah *qib_create_ah(struct ib_pd *pd, 1818f931551bSRalph Campbell struct ib_ah_attr *ah_attr) 1819f931551bSRalph Campbell { 1820f931551bSRalph Campbell struct qib_ah *ah; 1821f931551bSRalph Campbell struct ib_ah *ret; 1822f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(pd->device); 1823f931551bSRalph Campbell unsigned long flags; 1824f931551bSRalph Campbell 1825f931551bSRalph Campbell if (qib_check_ah(pd->device, ah_attr)) { 1826f931551bSRalph Campbell ret = ERR_PTR(-EINVAL); 1827f931551bSRalph Campbell goto bail; 1828f931551bSRalph Campbell } 1829f931551bSRalph Campbell 1830f931551bSRalph Campbell ah = kmalloc(sizeof *ah, GFP_ATOMIC); 1831f931551bSRalph Campbell if (!ah) { 1832f931551bSRalph Campbell ret = ERR_PTR(-ENOMEM); 1833f931551bSRalph Campbell goto bail; 1834f931551bSRalph Campbell } 1835f931551bSRalph Campbell 1836f931551bSRalph Campbell spin_lock_irqsave(&dev->n_ahs_lock, flags); 1837f931551bSRalph Campbell if (dev->n_ahs_allocated == ib_qib_max_ahs) { 1838f931551bSRalph Campbell spin_unlock_irqrestore(&dev->n_ahs_lock, flags); 1839f931551bSRalph Campbell kfree(ah); 1840f931551bSRalph Campbell ret = ERR_PTR(-ENOMEM); 1841f931551bSRalph Campbell goto bail; 1842f931551bSRalph Campbell } 1843f931551bSRalph Campbell 1844f931551bSRalph Campbell dev->n_ahs_allocated++; 1845f931551bSRalph Campbell spin_unlock_irqrestore(&dev->n_ahs_lock, flags); 1846f931551bSRalph Campbell 1847f931551bSRalph Campbell /* ib_create_ah() will initialize ah->ibah. */ 1848f931551bSRalph Campbell ah->attr = *ah_attr; 1849f931551bSRalph Campbell atomic_set(&ah->refcount, 0); 1850f931551bSRalph Campbell 1851f931551bSRalph Campbell ret = &ah->ibah; 1852f931551bSRalph Campbell 1853f931551bSRalph Campbell bail: 1854f931551bSRalph Campbell return ret; 1855f931551bSRalph Campbell } 1856f931551bSRalph Campbell 18571fb9fed6SMike Marciniszyn struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid) 18581fb9fed6SMike Marciniszyn { 18591fb9fed6SMike Marciniszyn struct ib_ah_attr attr; 18601fb9fed6SMike Marciniszyn struct ib_ah *ah = ERR_PTR(-EINVAL); 18611fb9fed6SMike Marciniszyn struct qib_qp *qp0; 18621fb9fed6SMike Marciniszyn 18631fb9fed6SMike Marciniszyn memset(&attr, 0, sizeof attr); 18641fb9fed6SMike Marciniszyn attr.dlid = dlid; 18651fb9fed6SMike Marciniszyn attr.port_num = ppd_from_ibp(ibp)->port; 18661fb9fed6SMike Marciniszyn rcu_read_lock(); 18671fb9fed6SMike Marciniszyn qp0 = rcu_dereference(ibp->qp0); 18681fb9fed6SMike Marciniszyn if (qp0) 18691fb9fed6SMike Marciniszyn ah = ib_create_ah(qp0->ibqp.pd, &attr); 18701fb9fed6SMike Marciniszyn rcu_read_unlock(); 18711fb9fed6SMike Marciniszyn return ah; 18721fb9fed6SMike Marciniszyn } 18731fb9fed6SMike Marciniszyn 1874f931551bSRalph Campbell /** 1875f931551bSRalph Campbell * qib_destroy_ah - destroy an address handle 1876f931551bSRalph Campbell * @ibah: the AH to destroy 1877f931551bSRalph Campbell * 1878f931551bSRalph Campbell * This may be called from interrupt context. 1879f931551bSRalph Campbell */ 1880f931551bSRalph Campbell static int qib_destroy_ah(struct ib_ah *ibah) 1881f931551bSRalph Campbell { 1882f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(ibah->device); 1883f931551bSRalph Campbell struct qib_ah *ah = to_iah(ibah); 1884f931551bSRalph Campbell unsigned long flags; 1885f931551bSRalph Campbell 1886f931551bSRalph Campbell if (atomic_read(&ah->refcount) != 0) 1887f931551bSRalph Campbell return -EBUSY; 1888f931551bSRalph Campbell 1889f931551bSRalph Campbell spin_lock_irqsave(&dev->n_ahs_lock, flags); 1890f931551bSRalph Campbell dev->n_ahs_allocated--; 1891f931551bSRalph Campbell spin_unlock_irqrestore(&dev->n_ahs_lock, flags); 1892f931551bSRalph Campbell 1893f931551bSRalph Campbell kfree(ah); 1894f931551bSRalph Campbell 1895f931551bSRalph Campbell return 0; 1896f931551bSRalph Campbell } 1897f931551bSRalph Campbell 1898f931551bSRalph Campbell static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) 1899f931551bSRalph Campbell { 1900f931551bSRalph Campbell struct qib_ah *ah = to_iah(ibah); 1901f931551bSRalph Campbell 1902f931551bSRalph Campbell if (qib_check_ah(ibah->device, ah_attr)) 1903f931551bSRalph Campbell return -EINVAL; 1904f931551bSRalph Campbell 1905f931551bSRalph Campbell ah->attr = *ah_attr; 1906f931551bSRalph Campbell 1907f931551bSRalph Campbell return 0; 1908f931551bSRalph Campbell } 1909f931551bSRalph Campbell 1910f931551bSRalph Campbell static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) 1911f931551bSRalph Campbell { 1912f931551bSRalph Campbell struct qib_ah *ah = to_iah(ibah); 1913f931551bSRalph Campbell 1914f931551bSRalph Campbell *ah_attr = ah->attr; 1915f931551bSRalph Campbell 1916f931551bSRalph Campbell return 0; 1917f931551bSRalph Campbell } 1918f931551bSRalph Campbell 1919f931551bSRalph Campbell /** 1920f931551bSRalph Campbell * qib_get_npkeys - return the size of the PKEY table for context 0 1921f931551bSRalph Campbell * @dd: the qlogic_ib device 1922f931551bSRalph Campbell */ 1923f931551bSRalph Campbell unsigned qib_get_npkeys(struct qib_devdata *dd) 1924f931551bSRalph Campbell { 1925f931551bSRalph Campbell return ARRAY_SIZE(dd->rcd[0]->pkeys); 1926f931551bSRalph Campbell } 1927f931551bSRalph Campbell 1928f931551bSRalph Campbell /* 1929f931551bSRalph Campbell * Return the indexed PKEY from the port PKEY table. 1930f931551bSRalph Campbell * No need to validate rcd[ctxt]; the port is setup if we are here. 1931f931551bSRalph Campbell */ 1932f931551bSRalph Campbell unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index) 1933f931551bSRalph Campbell { 1934f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1935f931551bSRalph Campbell struct qib_devdata *dd = ppd->dd; 1936f931551bSRalph Campbell unsigned ctxt = ppd->hw_pidx; 1937f931551bSRalph Campbell unsigned ret; 1938f931551bSRalph Campbell 1939f931551bSRalph Campbell /* dd->rcd null if mini_init or some init failures */ 1940f931551bSRalph Campbell if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys)) 1941f931551bSRalph Campbell ret = 0; 1942f931551bSRalph Campbell else 1943f931551bSRalph Campbell ret = dd->rcd[ctxt]->pkeys[index]; 1944f931551bSRalph Campbell 1945f931551bSRalph Campbell return ret; 1946f931551bSRalph Campbell } 1947f931551bSRalph Campbell 1948f931551bSRalph Campbell static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1949f931551bSRalph Campbell u16 *pkey) 1950f931551bSRalph Campbell { 1951f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(ibdev); 1952f931551bSRalph Campbell int ret; 1953f931551bSRalph Campbell 1954f931551bSRalph Campbell if (index >= qib_get_npkeys(dd)) { 1955f931551bSRalph Campbell ret = -EINVAL; 1956f931551bSRalph Campbell goto bail; 1957f931551bSRalph Campbell } 1958f931551bSRalph Campbell 1959f931551bSRalph Campbell *pkey = qib_get_pkey(to_iport(ibdev, port), index); 1960f931551bSRalph Campbell ret = 0; 1961f931551bSRalph Campbell 1962f931551bSRalph Campbell bail: 1963f931551bSRalph Campbell return ret; 1964f931551bSRalph Campbell } 1965f931551bSRalph Campbell 1966f931551bSRalph Campbell /** 1967f931551bSRalph Campbell * qib_alloc_ucontext - allocate a ucontest 1968f931551bSRalph Campbell * @ibdev: the infiniband device 1969f931551bSRalph Campbell * @udata: not used by the QLogic_IB driver 1970f931551bSRalph Campbell */ 1971f931551bSRalph Campbell 1972f931551bSRalph Campbell static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev, 1973f931551bSRalph Campbell struct ib_udata *udata) 1974f931551bSRalph Campbell { 1975f931551bSRalph Campbell struct qib_ucontext *context; 1976f931551bSRalph Campbell struct ib_ucontext *ret; 1977f931551bSRalph Campbell 1978f931551bSRalph Campbell context = kmalloc(sizeof *context, GFP_KERNEL); 1979f931551bSRalph Campbell if (!context) { 1980f931551bSRalph Campbell ret = ERR_PTR(-ENOMEM); 1981f931551bSRalph Campbell goto bail; 1982f931551bSRalph Campbell } 1983f931551bSRalph Campbell 1984f931551bSRalph Campbell ret = &context->ibucontext; 1985f931551bSRalph Campbell 1986f931551bSRalph Campbell bail: 1987f931551bSRalph Campbell return ret; 1988f931551bSRalph Campbell } 1989f931551bSRalph Campbell 1990f931551bSRalph Campbell static int qib_dealloc_ucontext(struct ib_ucontext *context) 1991f931551bSRalph Campbell { 1992f931551bSRalph Campbell kfree(to_iucontext(context)); 1993f931551bSRalph Campbell return 0; 1994f931551bSRalph Campbell } 1995f931551bSRalph Campbell 1996f931551bSRalph Campbell static void init_ibport(struct qib_pportdata *ppd) 1997f931551bSRalph Campbell { 1998f931551bSRalph Campbell struct qib_verbs_counters cntrs; 1999f931551bSRalph Campbell struct qib_ibport *ibp = &ppd->ibport_data; 2000f931551bSRalph Campbell 2001f931551bSRalph Campbell spin_lock_init(&ibp->lock); 2002f931551bSRalph Campbell /* Set the prefix to the default value (see ch. 4.1.1) */ 2003f931551bSRalph Campbell ibp->gid_prefix = IB_DEFAULT_GID_PREFIX; 2004f931551bSRalph Campbell ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); 2005f931551bSRalph Campbell ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | 2006f931551bSRalph Campbell IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP | 2007f931551bSRalph Campbell IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP | 2008f931551bSRalph Campbell IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP | 2009f931551bSRalph Campbell IB_PORT_OTHER_LOCAL_CHANGES_SUP; 2010f931551bSRalph Campbell if (ppd->dd->flags & QIB_HAS_LINK_LATENCY) 2011f931551bSRalph Campbell ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; 2012f931551bSRalph Campbell ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; 2013f931551bSRalph Campbell ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; 2014f931551bSRalph Campbell ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; 2015f931551bSRalph Campbell ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; 2016f931551bSRalph Campbell ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; 2017f931551bSRalph Campbell 2018f931551bSRalph Campbell /* Snapshot current HW counters to "clear" them. */ 2019f931551bSRalph Campbell qib_get_counters(ppd, &cntrs); 2020f931551bSRalph Campbell ibp->z_symbol_error_counter = cntrs.symbol_error_counter; 2021f931551bSRalph Campbell ibp->z_link_error_recovery_counter = 2022f931551bSRalph Campbell cntrs.link_error_recovery_counter; 2023f931551bSRalph Campbell ibp->z_link_downed_counter = cntrs.link_downed_counter; 2024f931551bSRalph Campbell ibp->z_port_rcv_errors = cntrs.port_rcv_errors; 2025f931551bSRalph Campbell ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; 2026f931551bSRalph Campbell ibp->z_port_xmit_discards = cntrs.port_xmit_discards; 2027f931551bSRalph Campbell ibp->z_port_xmit_data = cntrs.port_xmit_data; 2028f931551bSRalph Campbell ibp->z_port_rcv_data = cntrs.port_rcv_data; 2029f931551bSRalph Campbell ibp->z_port_xmit_packets = cntrs.port_xmit_packets; 2030f931551bSRalph Campbell ibp->z_port_rcv_packets = cntrs.port_rcv_packets; 2031f931551bSRalph Campbell ibp->z_local_link_integrity_errors = 2032f931551bSRalph Campbell cntrs.local_link_integrity_errors; 2033f931551bSRalph Campbell ibp->z_excessive_buffer_overrun_errors = 2034f931551bSRalph Campbell cntrs.excessive_buffer_overrun_errors; 2035f931551bSRalph Campbell ibp->z_vl15_dropped = cntrs.vl15_dropped; 2036af061a64SMike Marciniszyn RCU_INIT_POINTER(ibp->qp0, NULL); 2037af061a64SMike Marciniszyn RCU_INIT_POINTER(ibp->qp1, NULL); 2038f931551bSRalph Campbell } 2039f931551bSRalph Campbell 2040f931551bSRalph Campbell /** 2041f931551bSRalph Campbell * qib_register_ib_device - register our device with the infiniband core 2042f931551bSRalph Campbell * @dd: the device data structure 2043f931551bSRalph Campbell * Return the allocated qib_ibdev pointer or NULL on error. 2044f931551bSRalph Campbell */ 2045f931551bSRalph Campbell int qib_register_ib_device(struct qib_devdata *dd) 2046f931551bSRalph Campbell { 2047f931551bSRalph Campbell struct qib_ibdev *dev = &dd->verbs_dev; 2048f931551bSRalph Campbell struct ib_device *ibdev = &dev->ibdev; 2049f931551bSRalph Campbell struct qib_pportdata *ppd = dd->pport; 2050f931551bSRalph Campbell unsigned i, lk_tab_size; 2051f931551bSRalph Campbell int ret; 2052f931551bSRalph Campbell 2053f931551bSRalph Campbell dev->qp_table_size = ib_qib_qp_table_size; 2054af061a64SMike Marciniszyn get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); 2055af061a64SMike Marciniszyn dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, 2056f931551bSRalph Campbell GFP_KERNEL); 2057f931551bSRalph Campbell if (!dev->qp_table) { 2058f931551bSRalph Campbell ret = -ENOMEM; 2059f931551bSRalph Campbell goto err_qpt; 2060f931551bSRalph Campbell } 2061af061a64SMike Marciniszyn for (i = 0; i < dev->qp_table_size; i++) 2062af061a64SMike Marciniszyn RCU_INIT_POINTER(dev->qp_table[i], NULL); 2063f931551bSRalph Campbell 2064f931551bSRalph Campbell for (i = 0; i < dd->num_pports; i++) 2065f931551bSRalph Campbell init_ibport(ppd + i); 2066f931551bSRalph Campbell 2067f931551bSRalph Campbell /* Only need to initialize non-zero fields. */ 2068f931551bSRalph Campbell spin_lock_init(&dev->qpt_lock); 2069f931551bSRalph Campbell spin_lock_init(&dev->n_pds_lock); 2070f931551bSRalph Campbell spin_lock_init(&dev->n_ahs_lock); 2071f931551bSRalph Campbell spin_lock_init(&dev->n_cqs_lock); 2072f931551bSRalph Campbell spin_lock_init(&dev->n_qps_lock); 2073f931551bSRalph Campbell spin_lock_init(&dev->n_srqs_lock); 2074f931551bSRalph Campbell spin_lock_init(&dev->n_mcast_grps_lock); 2075f931551bSRalph Campbell init_timer(&dev->mem_timer); 2076f931551bSRalph Campbell dev->mem_timer.function = mem_timer; 2077f931551bSRalph Campbell dev->mem_timer.data = (unsigned long) dev; 2078f931551bSRalph Campbell 2079f931551bSRalph Campbell qib_init_qpn_table(dd, &dev->qpn_table); 2080f931551bSRalph Campbell 2081f931551bSRalph Campbell /* 2082f931551bSRalph Campbell * The top ib_qib_lkey_table_size bits are used to index the 2083f931551bSRalph Campbell * table. The lower 8 bits can be owned by the user (copied from 2084f931551bSRalph Campbell * the LKEY). The remaining bits act as a generation number or tag. 2085f931551bSRalph Campbell */ 2086f931551bSRalph Campbell spin_lock_init(&dev->lk_table.lock); 2087f931551bSRalph Campbell dev->lk_table.max = 1 << ib_qib_lkey_table_size; 2088f931551bSRalph Campbell lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); 20891fb9fed6SMike Marciniszyn dev->lk_table.table = (struct qib_mregion __rcu **) 2090f931551bSRalph Campbell __get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); 2091f931551bSRalph Campbell if (dev->lk_table.table == NULL) { 2092f931551bSRalph Campbell ret = -ENOMEM; 2093f931551bSRalph Campbell goto err_lk; 2094f931551bSRalph Campbell } 20958aac4cc3SMike Marciniszyn RCU_INIT_POINTER(dev->dma_mr, NULL); 20968aac4cc3SMike Marciniszyn for (i = 0; i < dev->lk_table.max; i++) 20978aac4cc3SMike Marciniszyn RCU_INIT_POINTER(dev->lk_table.table[i], NULL); 2098f931551bSRalph Campbell INIT_LIST_HEAD(&dev->pending_mmaps); 2099f931551bSRalph Campbell spin_lock_init(&dev->pending_lock); 2100f931551bSRalph Campbell dev->mmap_offset = PAGE_SIZE; 2101f931551bSRalph Campbell spin_lock_init(&dev->mmap_offset_lock); 2102f931551bSRalph Campbell INIT_LIST_HEAD(&dev->piowait); 2103f931551bSRalph Campbell INIT_LIST_HEAD(&dev->dmawait); 2104f931551bSRalph Campbell INIT_LIST_HEAD(&dev->txwait); 2105f931551bSRalph Campbell INIT_LIST_HEAD(&dev->memwait); 2106f931551bSRalph Campbell INIT_LIST_HEAD(&dev->txreq_free); 2107f931551bSRalph Campbell 2108f931551bSRalph Campbell if (ppd->sdma_descq_cnt) { 2109f931551bSRalph Campbell dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev, 2110f931551bSRalph Campbell ppd->sdma_descq_cnt * 2111f931551bSRalph Campbell sizeof(struct qib_pio_header), 2112f931551bSRalph Campbell &dev->pio_hdrs_phys, 2113f931551bSRalph Campbell GFP_KERNEL); 2114f931551bSRalph Campbell if (!dev->pio_hdrs) { 2115f931551bSRalph Campbell ret = -ENOMEM; 2116f931551bSRalph Campbell goto err_hdrs; 2117f931551bSRalph Campbell } 2118f931551bSRalph Campbell } 2119f931551bSRalph Campbell 2120f931551bSRalph Campbell for (i = 0; i < ppd->sdma_descq_cnt; i++) { 2121f931551bSRalph Campbell struct qib_verbs_txreq *tx; 2122f931551bSRalph Campbell 2123f931551bSRalph Campbell tx = kzalloc(sizeof *tx, GFP_KERNEL); 2124f931551bSRalph Campbell if (!tx) { 2125f931551bSRalph Campbell ret = -ENOMEM; 2126f931551bSRalph Campbell goto err_tx; 2127f931551bSRalph Campbell } 2128f931551bSRalph Campbell tx->hdr_inx = i; 2129f931551bSRalph Campbell list_add(&tx->txreq.list, &dev->txreq_free); 2130f931551bSRalph Campbell } 2131f931551bSRalph Campbell 2132f931551bSRalph Campbell /* 2133f931551bSRalph Campbell * The system image GUID is supposed to be the same for all 2134f931551bSRalph Campbell * IB HCAs in a single system but since there can be other 2135f931551bSRalph Campbell * device types in the system, we can't be sure this is unique. 2136f931551bSRalph Campbell */ 2137f931551bSRalph Campbell if (!ib_qib_sys_image_guid) 2138f931551bSRalph Campbell ib_qib_sys_image_guid = ppd->guid; 2139f931551bSRalph Campbell 2140f931551bSRalph Campbell strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX); 2141f931551bSRalph Campbell ibdev->owner = THIS_MODULE; 2142f931551bSRalph Campbell ibdev->node_guid = ppd->guid; 2143f931551bSRalph Campbell ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION; 2144f931551bSRalph Campbell ibdev->uverbs_cmd_mask = 2145f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 2146f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 2147f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 2148f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 2149f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 2150f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 2151f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_MODIFY_AH) | 2152f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_AH) | 2153f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 2154f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_REG_MR) | 2155f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 2156f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 2157f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 2158f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 2159f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 2160f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 2161f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 2162f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 2163f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 2164f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 2165f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 2166f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POST_SEND) | 2167f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POST_RECV) | 2168f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 2169f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 2170f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 2171f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 2172f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 2173f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 2174f931551bSRalph Campbell (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 2175f931551bSRalph Campbell ibdev->node_type = RDMA_NODE_IB_CA; 2176f931551bSRalph Campbell ibdev->phys_port_cnt = dd->num_pports; 2177f931551bSRalph Campbell ibdev->num_comp_vectors = 1; 2178f931551bSRalph Campbell ibdev->dma_device = &dd->pcidev->dev; 2179f931551bSRalph Campbell ibdev->query_device = qib_query_device; 2180f931551bSRalph Campbell ibdev->modify_device = qib_modify_device; 2181f931551bSRalph Campbell ibdev->query_port = qib_query_port; 2182f931551bSRalph Campbell ibdev->modify_port = qib_modify_port; 2183f931551bSRalph Campbell ibdev->query_pkey = qib_query_pkey; 2184f931551bSRalph Campbell ibdev->query_gid = qib_query_gid; 2185f931551bSRalph Campbell ibdev->alloc_ucontext = qib_alloc_ucontext; 2186f931551bSRalph Campbell ibdev->dealloc_ucontext = qib_dealloc_ucontext; 2187f931551bSRalph Campbell ibdev->alloc_pd = qib_alloc_pd; 2188f931551bSRalph Campbell ibdev->dealloc_pd = qib_dealloc_pd; 2189f931551bSRalph Campbell ibdev->create_ah = qib_create_ah; 2190f931551bSRalph Campbell ibdev->destroy_ah = qib_destroy_ah; 2191f931551bSRalph Campbell ibdev->modify_ah = qib_modify_ah; 2192f931551bSRalph Campbell ibdev->query_ah = qib_query_ah; 2193f931551bSRalph Campbell ibdev->create_srq = qib_create_srq; 2194f931551bSRalph Campbell ibdev->modify_srq = qib_modify_srq; 2195f931551bSRalph Campbell ibdev->query_srq = qib_query_srq; 2196f931551bSRalph Campbell ibdev->destroy_srq = qib_destroy_srq; 2197f931551bSRalph Campbell ibdev->create_qp = qib_create_qp; 2198f931551bSRalph Campbell ibdev->modify_qp = qib_modify_qp; 2199f931551bSRalph Campbell ibdev->query_qp = qib_query_qp; 2200f931551bSRalph Campbell ibdev->destroy_qp = qib_destroy_qp; 2201f931551bSRalph Campbell ibdev->post_send = qib_post_send; 2202f931551bSRalph Campbell ibdev->post_recv = qib_post_receive; 2203f931551bSRalph Campbell ibdev->post_srq_recv = qib_post_srq_receive; 2204f931551bSRalph Campbell ibdev->create_cq = qib_create_cq; 2205f931551bSRalph Campbell ibdev->destroy_cq = qib_destroy_cq; 2206f931551bSRalph Campbell ibdev->resize_cq = qib_resize_cq; 2207f931551bSRalph Campbell ibdev->poll_cq = qib_poll_cq; 2208f931551bSRalph Campbell ibdev->req_notify_cq = qib_req_notify_cq; 2209f931551bSRalph Campbell ibdev->get_dma_mr = qib_get_dma_mr; 2210f931551bSRalph Campbell ibdev->reg_phys_mr = qib_reg_phys_mr; 2211f931551bSRalph Campbell ibdev->reg_user_mr = qib_reg_user_mr; 2212f931551bSRalph Campbell ibdev->dereg_mr = qib_dereg_mr; 2213f931551bSRalph Campbell ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr; 2214f931551bSRalph Campbell ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list; 2215f931551bSRalph Campbell ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list; 2216f931551bSRalph Campbell ibdev->alloc_fmr = qib_alloc_fmr; 2217f931551bSRalph Campbell ibdev->map_phys_fmr = qib_map_phys_fmr; 2218f931551bSRalph Campbell ibdev->unmap_fmr = qib_unmap_fmr; 2219f931551bSRalph Campbell ibdev->dealloc_fmr = qib_dealloc_fmr; 2220f931551bSRalph Campbell ibdev->attach_mcast = qib_multicast_attach; 2221f931551bSRalph Campbell ibdev->detach_mcast = qib_multicast_detach; 2222f931551bSRalph Campbell ibdev->process_mad = qib_process_mad; 2223f931551bSRalph Campbell ibdev->mmap = qib_mmap; 2224f931551bSRalph Campbell ibdev->dma_ops = &qib_dma_mapping_ops; 2225f931551bSRalph Campbell 2226f931551bSRalph Campbell snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), 2227*e2eed58bSVinit Agnihotri "Intel Infiniband HCA %s", init_utsname()->nodename); 2228f931551bSRalph Campbell 2229f931551bSRalph Campbell ret = ib_register_device(ibdev, qib_create_port_files); 2230f931551bSRalph Campbell if (ret) 2231f931551bSRalph Campbell goto err_reg; 2232f931551bSRalph Campbell 2233f931551bSRalph Campbell ret = qib_create_agents(dev); 2234f931551bSRalph Campbell if (ret) 2235f931551bSRalph Campbell goto err_agents; 2236f931551bSRalph Campbell 2237f931551bSRalph Campbell if (qib_verbs_register_sysfs(dd)) 2238f931551bSRalph Campbell goto err_class; 2239f931551bSRalph Campbell 2240f931551bSRalph Campbell goto bail; 2241f931551bSRalph Campbell 2242f931551bSRalph Campbell err_class: 2243f931551bSRalph Campbell qib_free_agents(dev); 2244f931551bSRalph Campbell err_agents: 2245f931551bSRalph Campbell ib_unregister_device(ibdev); 2246f931551bSRalph Campbell err_reg: 2247f931551bSRalph Campbell err_tx: 2248f931551bSRalph Campbell while (!list_empty(&dev->txreq_free)) { 2249f931551bSRalph Campbell struct list_head *l = dev->txreq_free.next; 2250f931551bSRalph Campbell struct qib_verbs_txreq *tx; 2251f931551bSRalph Campbell 2252f931551bSRalph Campbell list_del(l); 2253f931551bSRalph Campbell tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 2254f931551bSRalph Campbell kfree(tx); 2255f931551bSRalph Campbell } 2256f931551bSRalph Campbell if (ppd->sdma_descq_cnt) 2257f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, 2258f931551bSRalph Campbell ppd->sdma_descq_cnt * 2259f931551bSRalph Campbell sizeof(struct qib_pio_header), 2260f931551bSRalph Campbell dev->pio_hdrs, dev->pio_hdrs_phys); 2261f931551bSRalph Campbell err_hdrs: 2262f931551bSRalph Campbell free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size)); 2263f931551bSRalph Campbell err_lk: 2264f931551bSRalph Campbell kfree(dev->qp_table); 2265f931551bSRalph Campbell err_qpt: 2266f931551bSRalph Campbell qib_dev_err(dd, "cannot register verbs: %d!\n", -ret); 2267f931551bSRalph Campbell bail: 2268f931551bSRalph Campbell return ret; 2269f931551bSRalph Campbell } 2270f931551bSRalph Campbell 2271f931551bSRalph Campbell void qib_unregister_ib_device(struct qib_devdata *dd) 2272f931551bSRalph Campbell { 2273f931551bSRalph Campbell struct qib_ibdev *dev = &dd->verbs_dev; 2274f931551bSRalph Campbell struct ib_device *ibdev = &dev->ibdev; 2275f931551bSRalph Campbell u32 qps_inuse; 2276f931551bSRalph Campbell unsigned lk_tab_size; 2277f931551bSRalph Campbell 2278f931551bSRalph Campbell qib_verbs_unregister_sysfs(dd); 2279f931551bSRalph Campbell 2280f931551bSRalph Campbell qib_free_agents(dev); 2281f931551bSRalph Campbell 2282f931551bSRalph Campbell ib_unregister_device(ibdev); 2283f931551bSRalph Campbell 2284f931551bSRalph Campbell if (!list_empty(&dev->piowait)) 2285f931551bSRalph Campbell qib_dev_err(dd, "piowait list not empty!\n"); 2286f931551bSRalph Campbell if (!list_empty(&dev->dmawait)) 2287f931551bSRalph Campbell qib_dev_err(dd, "dmawait list not empty!\n"); 2288f931551bSRalph Campbell if (!list_empty(&dev->txwait)) 2289f931551bSRalph Campbell qib_dev_err(dd, "txwait list not empty!\n"); 2290f931551bSRalph Campbell if (!list_empty(&dev->memwait)) 2291f931551bSRalph Campbell qib_dev_err(dd, "memwait list not empty!\n"); 2292f931551bSRalph Campbell if (dev->dma_mr) 2293f931551bSRalph Campbell qib_dev_err(dd, "DMA MR not NULL!\n"); 2294f931551bSRalph Campbell 2295f931551bSRalph Campbell qps_inuse = qib_free_all_qps(dd); 2296f931551bSRalph Campbell if (qps_inuse) 2297f931551bSRalph Campbell qib_dev_err(dd, "QP memory leak! %u still in use\n", 2298f931551bSRalph Campbell qps_inuse); 2299f931551bSRalph Campbell 2300f931551bSRalph Campbell del_timer_sync(&dev->mem_timer); 2301f931551bSRalph Campbell qib_free_qpn_table(&dev->qpn_table); 2302f931551bSRalph Campbell while (!list_empty(&dev->txreq_free)) { 2303f931551bSRalph Campbell struct list_head *l = dev->txreq_free.next; 2304f931551bSRalph Campbell struct qib_verbs_txreq *tx; 2305f931551bSRalph Campbell 2306f931551bSRalph Campbell list_del(l); 2307f931551bSRalph Campbell tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 2308f931551bSRalph Campbell kfree(tx); 2309f931551bSRalph Campbell } 2310f931551bSRalph Campbell if (dd->pport->sdma_descq_cnt) 2311f931551bSRalph Campbell dma_free_coherent(&dd->pcidev->dev, 2312f931551bSRalph Campbell dd->pport->sdma_descq_cnt * 2313f931551bSRalph Campbell sizeof(struct qib_pio_header), 2314f931551bSRalph Campbell dev->pio_hdrs, dev->pio_hdrs_phys); 2315f931551bSRalph Campbell lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); 2316f931551bSRalph Campbell free_pages((unsigned long) dev->lk_table.table, 2317f931551bSRalph Campbell get_order(lk_tab_size)); 2318f931551bSRalph Campbell kfree(dev->qp_table); 2319f931551bSRalph Campbell } 2320551ace12SMike Marciniszyn 2321551ace12SMike Marciniszyn /* 2322551ace12SMike Marciniszyn * This must be called with s_lock held. 2323551ace12SMike Marciniszyn */ 2324551ace12SMike Marciniszyn void qib_schedule_send(struct qib_qp *qp) 2325551ace12SMike Marciniszyn { 2326551ace12SMike Marciniszyn if (qib_send_ok(qp)) { 2327551ace12SMike Marciniszyn struct qib_ibport *ibp = 2328551ace12SMike Marciniszyn to_iport(qp->ibqp.device, qp->port_num); 2329551ace12SMike Marciniszyn struct qib_pportdata *ppd = ppd_from_ibp(ibp); 2330551ace12SMike Marciniszyn 2331551ace12SMike Marciniszyn queue_work(ppd->qib_wq, &qp->s_work); 2332551ace12SMike Marciniszyn } 2333551ace12SMike Marciniszyn } 2334