1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <rdma/ib_mad.h> 49 #include <rdma/ib_user_verbs.h> 50 #include <linux/io.h> 51 #include <linux/module.h> 52 #include <linux/utsname.h> 53 #include <linux/rculist.h> 54 #include <linux/mm.h> 55 #include <linux/vmalloc.h> 56 #include <rdma/opa_addr.h> 57 #include <linux/nospec.h> 58 59 #include "hfi.h" 60 #include "common.h" 61 #include "device.h" 62 #include "trace.h" 63 #include "qp.h" 64 #include "verbs_txreq.h" 65 #include "debugfs.h" 66 #include "vnic.h" 67 #include "fault.h" 68 #include "affinity.h" 69 70 static unsigned int hfi1_lkey_table_size = 16; 71 module_param_named(lkey_table_size, hfi1_lkey_table_size, uint, 72 S_IRUGO); 73 MODULE_PARM_DESC(lkey_table_size, 74 "LKEY table size in bits (2^n, 1 <= n <= 23)"); 75 76 static unsigned int hfi1_max_pds = 0xFFFF; 77 module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO); 78 MODULE_PARM_DESC(max_pds, 79 "Maximum number of protection domains to support"); 80 81 static unsigned int hfi1_max_ahs = 0xFFFF; 82 module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO); 83 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); 84 85 unsigned int hfi1_max_cqes = 0x2FFFFF; 86 module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO); 87 MODULE_PARM_DESC(max_cqes, 88 "Maximum number of completion queue entries to support"); 89 90 unsigned int hfi1_max_cqs = 0x1FFFF; 91 module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO); 92 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); 93 94 unsigned int hfi1_max_qp_wrs = 0x3FFF; 95 module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO); 96 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); 97 98 unsigned int hfi1_max_qps = 32768; 99 module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO); 100 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); 101 102 unsigned int hfi1_max_sges = 0x60; 103 module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO); 104 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); 105 106 unsigned int hfi1_max_mcast_grps = 16384; 107 module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO); 108 MODULE_PARM_DESC(max_mcast_grps, 109 "Maximum number of multicast groups to support"); 110 111 unsigned int hfi1_max_mcast_qp_attached = 16; 112 module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached, 113 uint, S_IRUGO); 114 MODULE_PARM_DESC(max_mcast_qp_attached, 115 "Maximum number of attached QPs to support"); 116 117 unsigned int hfi1_max_srqs = 1024; 118 module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO); 119 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); 120 121 unsigned int hfi1_max_srq_sges = 128; 122 module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO); 123 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); 124 125 unsigned int hfi1_max_srq_wrs = 0x1FFFF; 126 module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO); 127 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); 128 129 unsigned short piothreshold = 256; 130 module_param(piothreshold, ushort, S_IRUGO); 131 MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio"); 132 133 static unsigned int sge_copy_mode; 134 module_param(sge_copy_mode, uint, S_IRUGO); 135 MODULE_PARM_DESC(sge_copy_mode, 136 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS"); 137 138 static void verbs_sdma_complete( 139 struct sdma_txreq *cookie, 140 int status); 141 142 static int pio_wait(struct rvt_qp *qp, 143 struct send_context *sc, 144 struct hfi1_pkt_state *ps, 145 u32 flag); 146 147 /* Length of buffer to create verbs txreq cache name */ 148 #define TXREQ_NAME_LEN 24 149 150 static uint wss_threshold = 80; 151 module_param(wss_threshold, uint, S_IRUGO); 152 MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy"); 153 static uint wss_clean_period = 256; 154 module_param(wss_clean_period, uint, S_IRUGO); 155 MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned"); 156 157 /* 158 * Translate ib_wr_opcode into ib_wc_opcode. 159 */ 160 const enum ib_wc_opcode ib_hfi1_wc_opcode[] = { 161 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, 162 [IB_WR_TID_RDMA_WRITE] = IB_WC_RDMA_WRITE, 163 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, 164 [IB_WR_SEND] = IB_WC_SEND, 165 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, 166 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, 167 [IB_WR_TID_RDMA_READ] = IB_WC_RDMA_READ, 168 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, 169 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD, 170 [IB_WR_SEND_WITH_INV] = IB_WC_SEND, 171 [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV, 172 [IB_WR_REG_MR] = IB_WC_REG_MR 173 }; 174 175 /* 176 * Length of header by opcode, 0 --> not supported 177 */ 178 const u8 hdr_len_by_opcode[256] = { 179 /* RC */ 180 [IB_OPCODE_RC_SEND_FIRST] = 12 + 8, 181 [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8, 182 [IB_OPCODE_RC_SEND_LAST] = 12 + 8, 183 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, 184 [IB_OPCODE_RC_SEND_ONLY] = 12 + 8, 185 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4, 186 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16, 187 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8, 188 [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8, 189 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, 190 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16, 191 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20, 192 [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16, 193 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4, 194 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8, 195 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4, 196 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4, 197 [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4, 198 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4 + 8, 199 [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28, 200 [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28, 201 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4, 202 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4, 203 [IB_OPCODE_TID_RDMA_READ_REQ] = 12 + 8 + 36, 204 [IB_OPCODE_TID_RDMA_READ_RESP] = 12 + 8 + 36, 205 [IB_OPCODE_TID_RDMA_WRITE_REQ] = 12 + 8 + 36, 206 [IB_OPCODE_TID_RDMA_WRITE_RESP] = 12 + 8 + 36, 207 [IB_OPCODE_TID_RDMA_WRITE_DATA] = 12 + 8 + 36, 208 [IB_OPCODE_TID_RDMA_WRITE_DATA_LAST] = 12 + 8 + 36, 209 [IB_OPCODE_TID_RDMA_ACK] = 12 + 8 + 36, 210 [IB_OPCODE_TID_RDMA_RESYNC] = 12 + 8 + 36, 211 /* UC */ 212 [IB_OPCODE_UC_SEND_FIRST] = 12 + 8, 213 [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8, 214 [IB_OPCODE_UC_SEND_LAST] = 12 + 8, 215 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, 216 [IB_OPCODE_UC_SEND_ONLY] = 12 + 8, 217 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4, 218 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16, 219 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8, 220 [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8, 221 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, 222 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16, 223 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20, 224 /* UD */ 225 [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8, 226 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12 227 }; 228 229 static const opcode_handler opcode_handler_tbl[256] = { 230 /* RC */ 231 [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv, 232 [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv, 233 [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv, 234 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv, 235 [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv, 236 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv, 237 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv, 238 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv, 239 [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv, 240 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv, 241 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv, 242 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv, 243 [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv, 244 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv, 245 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv, 246 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv, 247 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv, 248 [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv, 249 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv, 250 [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv, 251 [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv, 252 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv, 253 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv, 254 255 /* TID RDMA has separate handlers for different opcodes.*/ 256 [IB_OPCODE_TID_RDMA_WRITE_REQ] = &hfi1_rc_rcv_tid_rdma_write_req, 257 [IB_OPCODE_TID_RDMA_WRITE_RESP] = &hfi1_rc_rcv_tid_rdma_write_resp, 258 [IB_OPCODE_TID_RDMA_WRITE_DATA] = &hfi1_rc_rcv_tid_rdma_write_data, 259 [IB_OPCODE_TID_RDMA_WRITE_DATA_LAST] = &hfi1_rc_rcv_tid_rdma_write_data, 260 [IB_OPCODE_TID_RDMA_READ_REQ] = &hfi1_rc_rcv_tid_rdma_read_req, 261 [IB_OPCODE_TID_RDMA_READ_RESP] = &hfi1_rc_rcv_tid_rdma_read_resp, 262 [IB_OPCODE_TID_RDMA_RESYNC] = &hfi1_rc_rcv_tid_rdma_resync, 263 [IB_OPCODE_TID_RDMA_ACK] = &hfi1_rc_rcv_tid_rdma_ack, 264 265 /* UC */ 266 [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv, 267 [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv, 268 [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv, 269 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv, 270 [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv, 271 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv, 272 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv, 273 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv, 274 [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv, 275 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv, 276 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv, 277 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv, 278 /* UD */ 279 [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv, 280 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv, 281 /* CNP */ 282 [IB_OPCODE_CNP] = &hfi1_cnp_rcv 283 }; 284 285 #define OPMASK 0x1f 286 287 static const u32 pio_opmask[BIT(3)] = { 288 /* RC */ 289 [IB_OPCODE_RC >> 5] = 290 BIT(RC_OP(SEND_ONLY) & OPMASK) | 291 BIT(RC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) | 292 BIT(RC_OP(RDMA_WRITE_ONLY) & OPMASK) | 293 BIT(RC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK) | 294 BIT(RC_OP(RDMA_READ_REQUEST) & OPMASK) | 295 BIT(RC_OP(ACKNOWLEDGE) & OPMASK) | 296 BIT(RC_OP(ATOMIC_ACKNOWLEDGE) & OPMASK) | 297 BIT(RC_OP(COMPARE_SWAP) & OPMASK) | 298 BIT(RC_OP(FETCH_ADD) & OPMASK), 299 /* UC */ 300 [IB_OPCODE_UC >> 5] = 301 BIT(UC_OP(SEND_ONLY) & OPMASK) | 302 BIT(UC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) | 303 BIT(UC_OP(RDMA_WRITE_ONLY) & OPMASK) | 304 BIT(UC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK), 305 }; 306 307 /* 308 * System image GUID. 309 */ 310 __be64 ib_hfi1_sys_image_guid; 311 312 /* 313 * Make sure the QP is ready and able to accept the given opcode. 314 */ 315 static inline opcode_handler qp_ok(struct hfi1_packet *packet) 316 { 317 if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK)) 318 return NULL; 319 if (((packet->opcode & RVT_OPCODE_QP_MASK) == 320 packet->qp->allowed_ops) || 321 (packet->opcode == IB_OPCODE_CNP)) 322 return opcode_handler_tbl[packet->opcode]; 323 324 return NULL; 325 } 326 327 static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc) 328 { 329 #ifdef CONFIG_FAULT_INJECTION 330 if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP) { 331 /* 332 * In order to drop non-IB traffic we 333 * set PbcInsertHrc to NONE (0x2). 334 * The packet will still be delivered 335 * to the receiving node but a 336 * KHdrHCRCErr (KDETH packet with a bad 337 * HCRC) will be triggered and the 338 * packet will not be delivered to the 339 * correct context. 340 */ 341 pbc &= ~PBC_INSERT_HCRC_SMASK; 342 pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT; 343 } else { 344 /* 345 * In order to drop regular verbs 346 * traffic we set the PbcTestEbp 347 * flag. The packet will still be 348 * delivered to the receiving node but 349 * a 'late ebp error' will be 350 * triggered and will be dropped. 351 */ 352 pbc |= PBC_TEST_EBP; 353 } 354 #endif 355 return pbc; 356 } 357 358 static opcode_handler tid_qp_ok(int opcode, struct hfi1_packet *packet) 359 { 360 if (packet->qp->ibqp.qp_type != IB_QPT_RC || 361 !(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK)) 362 return NULL; 363 if ((opcode & RVT_OPCODE_QP_MASK) == IB_OPCODE_TID_RDMA) 364 return opcode_handler_tbl[opcode]; 365 return NULL; 366 } 367 368 void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet) 369 { 370 struct hfi1_ctxtdata *rcd = packet->rcd; 371 struct ib_header *hdr = packet->hdr; 372 u32 tlen = packet->tlen; 373 struct hfi1_pportdata *ppd = rcd->ppd; 374 struct hfi1_ibport *ibp = &ppd->ibport_data; 375 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; 376 opcode_handler opcode_handler; 377 unsigned long flags; 378 u32 qp_num; 379 int lnh; 380 u8 opcode; 381 382 /* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */ 383 if (unlikely(tlen < 15 * sizeof(u32))) 384 goto drop; 385 386 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 387 if (lnh != HFI1_LRH_BTH) 388 goto drop; 389 390 packet->ohdr = &hdr->u.oth; 391 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); 392 393 opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24); 394 inc_opstats(tlen, &rcd->opstats->stats[opcode]); 395 396 /* verbs_qp can be picked up from any tid_rdma header struct */ 397 qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_req.verbs_qp) & 398 RVT_QPN_MASK; 399 400 rcu_read_lock(); 401 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); 402 if (!packet->qp) 403 goto drop_rcu; 404 spin_lock_irqsave(&packet->qp->r_lock, flags); 405 opcode_handler = tid_qp_ok(opcode, packet); 406 if (likely(opcode_handler)) 407 opcode_handler(packet); 408 else 409 goto drop_unlock; 410 spin_unlock_irqrestore(&packet->qp->r_lock, flags); 411 rcu_read_unlock(); 412 413 return; 414 drop_unlock: 415 spin_unlock_irqrestore(&packet->qp->r_lock, flags); 416 drop_rcu: 417 rcu_read_unlock(); 418 drop: 419 ibp->rvp.n_pkt_drops++; 420 } 421 422 void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet) 423 { 424 struct hfi1_ctxtdata *rcd = packet->rcd; 425 struct ib_header *hdr = packet->hdr; 426 u32 tlen = packet->tlen; 427 struct hfi1_pportdata *ppd = rcd->ppd; 428 struct hfi1_ibport *ibp = &ppd->ibport_data; 429 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; 430 opcode_handler opcode_handler; 431 unsigned long flags; 432 u32 qp_num; 433 int lnh; 434 u8 opcode; 435 436 /* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */ 437 if (unlikely(tlen < 15 * sizeof(u32))) 438 goto drop; 439 440 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 441 if (lnh != HFI1_LRH_BTH) 442 goto drop; 443 444 packet->ohdr = &hdr->u.oth; 445 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); 446 447 opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24); 448 inc_opstats(tlen, &rcd->opstats->stats[opcode]); 449 450 /* verbs_qp can be picked up from any tid_rdma header struct */ 451 qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_rsp.verbs_qp) & 452 RVT_QPN_MASK; 453 454 rcu_read_lock(); 455 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); 456 if (!packet->qp) 457 goto drop_rcu; 458 spin_lock_irqsave(&packet->qp->r_lock, flags); 459 opcode_handler = tid_qp_ok(opcode, packet); 460 if (likely(opcode_handler)) 461 opcode_handler(packet); 462 else 463 goto drop_unlock; 464 spin_unlock_irqrestore(&packet->qp->r_lock, flags); 465 rcu_read_unlock(); 466 467 return; 468 drop_unlock: 469 spin_unlock_irqrestore(&packet->qp->r_lock, flags); 470 drop_rcu: 471 rcu_read_unlock(); 472 drop: 473 ibp->rvp.n_pkt_drops++; 474 } 475 476 static int hfi1_do_pkey_check(struct hfi1_packet *packet) 477 { 478 struct hfi1_ctxtdata *rcd = packet->rcd; 479 struct hfi1_pportdata *ppd = rcd->ppd; 480 struct hfi1_16b_header *hdr = packet->hdr; 481 u16 pkey; 482 483 /* Pkey check needed only for bypass packets */ 484 if (packet->etype != RHF_RCV_TYPE_BYPASS) 485 return 0; 486 487 /* Perform pkey check */ 488 pkey = hfi1_16B_get_pkey(hdr); 489 return ingress_pkey_check(ppd, pkey, packet->sc, 490 packet->qp->s_pkey_index, 491 packet->slid, true); 492 } 493 494 static inline void hfi1_handle_packet(struct hfi1_packet *packet, 495 bool is_mcast) 496 { 497 u32 qp_num; 498 struct hfi1_ctxtdata *rcd = packet->rcd; 499 struct hfi1_pportdata *ppd = rcd->ppd; 500 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 501 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; 502 opcode_handler packet_handler; 503 unsigned long flags; 504 505 inc_opstats(packet->tlen, &rcd->opstats->stats[packet->opcode]); 506 507 if (unlikely(is_mcast)) { 508 struct rvt_mcast *mcast; 509 struct rvt_mcast_qp *p; 510 511 if (!packet->grh) 512 goto drop; 513 mcast = rvt_mcast_find(&ibp->rvp, 514 &packet->grh->dgid, 515 opa_get_lid(packet->dlid, 9B)); 516 if (!mcast) 517 goto drop; 518 rcu_read_lock(); 519 list_for_each_entry_rcu(p, &mcast->qp_list, list) { 520 packet->qp = p->qp; 521 if (hfi1_do_pkey_check(packet)) 522 goto unlock_drop; 523 spin_lock_irqsave(&packet->qp->r_lock, flags); 524 packet_handler = qp_ok(packet); 525 if (likely(packet_handler)) 526 packet_handler(packet); 527 else 528 ibp->rvp.n_pkt_drops++; 529 spin_unlock_irqrestore(&packet->qp->r_lock, flags); 530 } 531 rcu_read_unlock(); 532 /* 533 * Notify rvt_multicast_detach() if it is waiting for us 534 * to finish. 535 */ 536 if (atomic_dec_return(&mcast->refcount) <= 1) 537 wake_up(&mcast->wait); 538 } else { 539 /* Get the destination QP number. */ 540 if (packet->etype == RHF_RCV_TYPE_BYPASS && 541 hfi1_16B_get_l4(packet->hdr) == OPA_16B_L4_FM) 542 qp_num = hfi1_16B_get_dest_qpn(packet->mgmt); 543 else 544 qp_num = ib_bth_get_qpn(packet->ohdr); 545 546 rcu_read_lock(); 547 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); 548 if (!packet->qp) 549 goto unlock_drop; 550 551 if (hfi1_do_pkey_check(packet)) 552 goto unlock_drop; 553 554 spin_lock_irqsave(&packet->qp->r_lock, flags); 555 packet_handler = qp_ok(packet); 556 if (likely(packet_handler)) 557 packet_handler(packet); 558 else 559 ibp->rvp.n_pkt_drops++; 560 spin_unlock_irqrestore(&packet->qp->r_lock, flags); 561 rcu_read_unlock(); 562 } 563 return; 564 unlock_drop: 565 rcu_read_unlock(); 566 drop: 567 ibp->rvp.n_pkt_drops++; 568 } 569 570 /** 571 * hfi1_ib_rcv - process an incoming packet 572 * @packet: data packet information 573 * 574 * This is called to process an incoming packet at interrupt level. 575 */ 576 void hfi1_ib_rcv(struct hfi1_packet *packet) 577 { 578 struct hfi1_ctxtdata *rcd = packet->rcd; 579 580 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); 581 hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid)); 582 } 583 584 void hfi1_16B_rcv(struct hfi1_packet *packet) 585 { 586 struct hfi1_ctxtdata *rcd = packet->rcd; 587 588 trace_input_ibhdr(rcd->dd, packet, false); 589 hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid)); 590 } 591 592 /* 593 * This is called from a timer to check for QPs 594 * which need kernel memory in order to send a packet. 595 */ 596 static void mem_timer(struct timer_list *t) 597 { 598 struct hfi1_ibdev *dev = from_timer(dev, t, mem_timer); 599 struct list_head *list = &dev->memwait; 600 struct rvt_qp *qp = NULL; 601 struct iowait *wait; 602 unsigned long flags; 603 struct hfi1_qp_priv *priv; 604 605 write_seqlock_irqsave(&dev->iowait_lock, flags); 606 if (!list_empty(list)) { 607 wait = list_first_entry(list, struct iowait, list); 608 qp = iowait_to_qp(wait); 609 priv = qp->priv; 610 list_del_init(&priv->s_iowait.list); 611 priv->s_iowait.lock = NULL; 612 /* refcount held until actual wake up */ 613 if (!list_empty(list)) 614 mod_timer(&dev->mem_timer, jiffies + 1); 615 } 616 write_sequnlock_irqrestore(&dev->iowait_lock, flags); 617 618 if (qp) 619 hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM); 620 } 621 622 /* 623 * This is called with progress side lock held. 624 */ 625 /* New API */ 626 static void verbs_sdma_complete( 627 struct sdma_txreq *cookie, 628 int status) 629 { 630 struct verbs_txreq *tx = 631 container_of(cookie, struct verbs_txreq, txreq); 632 struct rvt_qp *qp = tx->qp; 633 634 spin_lock(&qp->s_lock); 635 if (tx->wqe) { 636 rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS); 637 } else if (qp->ibqp.qp_type == IB_QPT_RC) { 638 struct hfi1_opa_header *hdr; 639 640 hdr = &tx->phdr.hdr; 641 if (unlikely(status == SDMA_TXREQ_S_ABORTED)) 642 hfi1_rc_verbs_aborted(qp, hdr); 643 hfi1_rc_send_complete(qp, hdr); 644 } 645 spin_unlock(&qp->s_lock); 646 647 hfi1_put_txreq(tx); 648 } 649 650 void hfi1_wait_kmem(struct rvt_qp *qp) 651 { 652 struct hfi1_qp_priv *priv = qp->priv; 653 struct ib_qp *ibqp = &qp->ibqp; 654 struct ib_device *ibdev = ibqp->device; 655 struct hfi1_ibdev *dev = to_idev(ibdev); 656 657 if (list_empty(&priv->s_iowait.list)) { 658 if (list_empty(&dev->memwait)) 659 mod_timer(&dev->mem_timer, jiffies + 1); 660 qp->s_flags |= RVT_S_WAIT_KMEM; 661 list_add_tail(&priv->s_iowait.list, &dev->memwait); 662 priv->s_iowait.lock = &dev->iowait_lock; 663 trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM); 664 rvt_get_qp(qp); 665 } 666 } 667 668 static int wait_kmem(struct hfi1_ibdev *dev, 669 struct rvt_qp *qp, 670 struct hfi1_pkt_state *ps) 671 { 672 unsigned long flags; 673 int ret = 0; 674 675 spin_lock_irqsave(&qp->s_lock, flags); 676 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 677 write_seqlock(&dev->iowait_lock); 678 list_add_tail(&ps->s_txreq->txreq.list, 679 &ps->wait->tx_head); 680 hfi1_wait_kmem(qp); 681 write_sequnlock(&dev->iowait_lock); 682 hfi1_qp_unbusy(qp, ps->wait); 683 ret = -EBUSY; 684 } 685 spin_unlock_irqrestore(&qp->s_lock, flags); 686 687 return ret; 688 } 689 690 /* 691 * This routine calls txadds for each sg entry. 692 * 693 * Add failures will revert the sge cursor 694 */ 695 static noinline int build_verbs_ulp_payload( 696 struct sdma_engine *sde, 697 u32 length, 698 struct verbs_txreq *tx) 699 { 700 struct rvt_sge_state *ss = tx->ss; 701 struct rvt_sge *sg_list = ss->sg_list; 702 struct rvt_sge sge = ss->sge; 703 u8 num_sge = ss->num_sge; 704 u32 len; 705 int ret = 0; 706 707 while (length) { 708 len = rvt_get_sge_length(&ss->sge, length); 709 WARN_ON_ONCE(len == 0); 710 ret = sdma_txadd_kvaddr( 711 sde->dd, 712 &tx->txreq, 713 ss->sge.vaddr, 714 len); 715 if (ret) 716 goto bail_txadd; 717 rvt_update_sge(ss, len, false); 718 length -= len; 719 } 720 return ret; 721 bail_txadd: 722 /* unwind cursor */ 723 ss->sge = sge; 724 ss->num_sge = num_sge; 725 ss->sg_list = sg_list; 726 return ret; 727 } 728 729 /** 730 * update_tx_opstats - record stats by opcode 731 * @qp; the qp 732 * @ps: transmit packet state 733 * @plen: the plen in dwords 734 * 735 * This is a routine to record the tx opstats after a 736 * packet has been presented to the egress mechanism. 737 */ 738 static void update_tx_opstats(struct rvt_qp *qp, struct hfi1_pkt_state *ps, 739 u32 plen) 740 { 741 #ifdef CONFIG_DEBUG_FS 742 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 743 struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats); 744 745 inc_opstats(plen * 4, &s->stats[ps->opcode]); 746 put_cpu_ptr(s); 747 #endif 748 } 749 750 /* 751 * Build the number of DMA descriptors needed to send length bytes of data. 752 * 753 * NOTE: DMA mapping is held in the tx until completed in the ring or 754 * the tx desc is freed without having been submitted to the ring 755 * 756 * This routine ensures all the helper routine calls succeed. 757 */ 758 /* New API */ 759 static int build_verbs_tx_desc( 760 struct sdma_engine *sde, 761 u32 length, 762 struct verbs_txreq *tx, 763 struct hfi1_ahg_info *ahg_info, 764 u64 pbc) 765 { 766 int ret = 0; 767 struct hfi1_sdma_header *phdr = &tx->phdr; 768 u16 hdrbytes = (tx->hdr_dwords + sizeof(pbc) / 4) << 2; 769 u8 extra_bytes = 0; 770 771 if (tx->phdr.hdr.hdr_type) { 772 /* 773 * hdrbytes accounts for PBC. Need to subtract 8 bytes 774 * before calculating padding. 775 */ 776 extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) + 777 (SIZE_OF_CRC << 2) + SIZE_OF_LT; 778 } 779 if (!ahg_info->ahgcount) { 780 ret = sdma_txinit_ahg( 781 &tx->txreq, 782 ahg_info->tx_flags, 783 hdrbytes + length + 784 extra_bytes, 785 ahg_info->ahgidx, 786 0, 787 NULL, 788 0, 789 verbs_sdma_complete); 790 if (ret) 791 goto bail_txadd; 792 phdr->pbc = cpu_to_le64(pbc); 793 ret = sdma_txadd_kvaddr( 794 sde->dd, 795 &tx->txreq, 796 phdr, 797 hdrbytes); 798 if (ret) 799 goto bail_txadd; 800 } else { 801 ret = sdma_txinit_ahg( 802 &tx->txreq, 803 ahg_info->tx_flags, 804 length, 805 ahg_info->ahgidx, 806 ahg_info->ahgcount, 807 ahg_info->ahgdesc, 808 hdrbytes, 809 verbs_sdma_complete); 810 if (ret) 811 goto bail_txadd; 812 } 813 /* add the ulp payload - if any. tx->ss can be NULL for acks */ 814 if (tx->ss) { 815 ret = build_verbs_ulp_payload(sde, length, tx); 816 if (ret) 817 goto bail_txadd; 818 } 819 820 /* add icrc, lt byte, and padding to flit */ 821 if (extra_bytes) 822 ret = sdma_txadd_daddr(sde->dd, &tx->txreq, 823 sde->dd->sdma_pad_phys, extra_bytes); 824 825 bail_txadd: 826 return ret; 827 } 828 829 static u64 update_hcrc(u8 opcode, u64 pbc) 830 { 831 if ((opcode & IB_OPCODE_TID_RDMA) == IB_OPCODE_TID_RDMA) { 832 pbc &= ~PBC_INSERT_HCRC_SMASK; 833 pbc |= (u64)PBC_IHCRC_LKDETH << PBC_INSERT_HCRC_SHIFT; 834 } 835 return pbc; 836 } 837 838 int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, 839 u64 pbc) 840 { 841 struct hfi1_qp_priv *priv = qp->priv; 842 struct hfi1_ahg_info *ahg_info = priv->s_ahg; 843 u32 hdrwords = ps->s_txreq->hdr_dwords; 844 u32 len = ps->s_txreq->s_cur_size; 845 u32 plen; 846 struct hfi1_ibdev *dev = ps->dev; 847 struct hfi1_pportdata *ppd = ps->ppd; 848 struct verbs_txreq *tx; 849 u8 sc5 = priv->s_sc; 850 int ret; 851 u32 dwords; 852 853 if (ps->s_txreq->phdr.hdr.hdr_type) { 854 u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len); 855 856 dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) + 857 SIZE_OF_LT) >> 2; 858 } else { 859 dwords = (len + 3) >> 2; 860 } 861 plen = hdrwords + dwords + sizeof(pbc) / 4; 862 863 tx = ps->s_txreq; 864 if (!sdma_txreq_built(&tx->txreq)) { 865 if (likely(pbc == 0)) { 866 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); 867 868 /* No vl15 here */ 869 /* set PBC_DC_INFO bit (aka SC[4]) in pbc */ 870 if (ps->s_txreq->phdr.hdr.hdr_type) 871 pbc |= PBC_PACKET_BYPASS | 872 PBC_INSERT_BYPASS_ICRC; 873 else 874 pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); 875 876 pbc = create_pbc(ppd, 877 pbc, 878 qp->srate_mbps, 879 vl, 880 plen); 881 882 if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) 883 pbc = hfi1_fault_tx(qp, ps->opcode, pbc); 884 else 885 /* Update HCRC based on packet opcode */ 886 pbc = update_hcrc(ps->opcode, pbc); 887 } 888 tx->wqe = qp->s_wqe; 889 ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc); 890 if (unlikely(ret)) 891 goto bail_build; 892 } 893 ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent); 894 if (unlikely(ret < 0)) { 895 if (ret == -ECOMM) 896 goto bail_ecomm; 897 return ret; 898 } 899 900 update_tx_opstats(qp, ps, plen); 901 trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device), 902 &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5)); 903 return ret; 904 905 bail_ecomm: 906 /* The current one got "sent" */ 907 return 0; 908 bail_build: 909 ret = wait_kmem(dev, qp, ps); 910 if (!ret) { 911 /* free txreq - bad state */ 912 hfi1_put_txreq(ps->s_txreq); 913 ps->s_txreq = NULL; 914 } 915 return ret; 916 } 917 918 /* 919 * If we are now in the error state, return zero to flush the 920 * send work request. 921 */ 922 static int pio_wait(struct rvt_qp *qp, 923 struct send_context *sc, 924 struct hfi1_pkt_state *ps, 925 u32 flag) 926 { 927 struct hfi1_qp_priv *priv = qp->priv; 928 struct hfi1_devdata *dd = sc->dd; 929 unsigned long flags; 930 int ret = 0; 931 932 /* 933 * Note that as soon as want_buffer() is called and 934 * possibly before it returns, sc_piobufavail() 935 * could be called. Therefore, put QP on the I/O wait list before 936 * enabling the PIO avail interrupt. 937 */ 938 spin_lock_irqsave(&qp->s_lock, flags); 939 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 940 write_seqlock(&sc->waitlock); 941 list_add_tail(&ps->s_txreq->txreq.list, 942 &ps->wait->tx_head); 943 if (list_empty(&priv->s_iowait.list)) { 944 struct hfi1_ibdev *dev = &dd->verbs_dev; 945 int was_empty; 946 947 dev->n_piowait += !!(flag & RVT_S_WAIT_PIO); 948 dev->n_piodrain += !!(flag & HFI1_S_WAIT_PIO_DRAIN); 949 qp->s_flags |= flag; 950 was_empty = list_empty(&sc->piowait); 951 iowait_get_priority(&priv->s_iowait); 952 iowait_queue(ps->pkts_sent, &priv->s_iowait, 953 &sc->piowait); 954 priv->s_iowait.lock = &sc->waitlock; 955 trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO); 956 rvt_get_qp(qp); 957 /* counting: only call wantpiobuf_intr if first user */ 958 if (was_empty) 959 hfi1_sc_wantpiobuf_intr(sc, 1); 960 } 961 write_sequnlock(&sc->waitlock); 962 hfi1_qp_unbusy(qp, ps->wait); 963 ret = -EBUSY; 964 } 965 spin_unlock_irqrestore(&qp->s_lock, flags); 966 return ret; 967 } 968 969 static void verbs_pio_complete(void *arg, int code) 970 { 971 struct rvt_qp *qp = (struct rvt_qp *)arg; 972 struct hfi1_qp_priv *priv = qp->priv; 973 974 if (iowait_pio_dec(&priv->s_iowait)) 975 iowait_drain_wakeup(&priv->s_iowait); 976 } 977 978 int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, 979 u64 pbc) 980 { 981 struct hfi1_qp_priv *priv = qp->priv; 982 u32 hdrwords = ps->s_txreq->hdr_dwords; 983 struct rvt_sge_state *ss = ps->s_txreq->ss; 984 u32 len = ps->s_txreq->s_cur_size; 985 u32 dwords; 986 u32 plen; 987 struct hfi1_pportdata *ppd = ps->ppd; 988 u32 *hdr; 989 u8 sc5; 990 unsigned long flags = 0; 991 struct send_context *sc; 992 struct pio_buf *pbuf; 993 int wc_status = IB_WC_SUCCESS; 994 int ret = 0; 995 pio_release_cb cb = NULL; 996 u8 extra_bytes = 0; 997 998 if (ps->s_txreq->phdr.hdr.hdr_type) { 999 u8 pad_size = hfi1_get_16b_padding((hdrwords << 2), len); 1000 1001 extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT; 1002 dwords = (len + extra_bytes) >> 2; 1003 hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah; 1004 } else { 1005 dwords = (len + 3) >> 2; 1006 hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh; 1007 } 1008 plen = hdrwords + dwords + sizeof(pbc) / 4; 1009 1010 /* only RC/UC use complete */ 1011 switch (qp->ibqp.qp_type) { 1012 case IB_QPT_RC: 1013 case IB_QPT_UC: 1014 cb = verbs_pio_complete; 1015 break; 1016 default: 1017 break; 1018 } 1019 1020 /* vl15 special case taken care of in ud.c */ 1021 sc5 = priv->s_sc; 1022 sc = ps->s_txreq->psc; 1023 1024 if (likely(pbc == 0)) { 1025 u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); 1026 1027 /* set PBC_DC_INFO bit (aka SC[4]) in pbc */ 1028 if (ps->s_txreq->phdr.hdr.hdr_type) 1029 pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC; 1030 else 1031 pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); 1032 1033 pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); 1034 if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) 1035 pbc = hfi1_fault_tx(qp, ps->opcode, pbc); 1036 else 1037 /* Update HCRC based on packet opcode */ 1038 pbc = update_hcrc(ps->opcode, pbc); 1039 } 1040 if (cb) 1041 iowait_pio_inc(&priv->s_iowait); 1042 pbuf = sc_buffer_alloc(sc, plen, cb, qp); 1043 if (IS_ERR_OR_NULL(pbuf)) { 1044 if (cb) 1045 verbs_pio_complete(qp, 0); 1046 if (IS_ERR(pbuf)) { 1047 /* 1048 * If we have filled the PIO buffers to capacity and are 1049 * not in an active state this request is not going to 1050 * go out to so just complete it with an error or else a 1051 * ULP or the core may be stuck waiting. 1052 */ 1053 hfi1_cdbg( 1054 PIO, 1055 "alloc failed. state not active, completing"); 1056 wc_status = IB_WC_GENERAL_ERR; 1057 goto pio_bail; 1058 } else { 1059 /* 1060 * This is a normal occurrence. The PIO buffs are full 1061 * up but we are still happily sending, well we could be 1062 * so lets continue to queue the request. 1063 */ 1064 hfi1_cdbg(PIO, "alloc failed. state active, queuing"); 1065 ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO); 1066 if (!ret) 1067 /* txreq not queued - free */ 1068 goto bail; 1069 /* tx consumed in wait */ 1070 return ret; 1071 } 1072 } 1073 1074 if (dwords == 0) { 1075 pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords); 1076 } else { 1077 seg_pio_copy_start(pbuf, pbc, 1078 hdr, hdrwords * 4); 1079 if (ss) { 1080 while (len) { 1081 void *addr = ss->sge.vaddr; 1082 u32 slen = rvt_get_sge_length(&ss->sge, len); 1083 1084 rvt_update_sge(ss, slen, false); 1085 seg_pio_copy_mid(pbuf, addr, slen); 1086 len -= slen; 1087 } 1088 } 1089 /* add icrc, lt byte, and padding to flit */ 1090 if (extra_bytes) 1091 seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma, 1092 extra_bytes); 1093 1094 seg_pio_copy_end(pbuf); 1095 } 1096 1097 update_tx_opstats(qp, ps, plen); 1098 trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device), 1099 &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5)); 1100 1101 pio_bail: 1102 spin_lock_irqsave(&qp->s_lock, flags); 1103 if (qp->s_wqe) { 1104 rvt_send_complete(qp, qp->s_wqe, wc_status); 1105 } else if (qp->ibqp.qp_type == IB_QPT_RC) { 1106 if (unlikely(wc_status == IB_WC_GENERAL_ERR)) 1107 hfi1_rc_verbs_aborted(qp, &ps->s_txreq->phdr.hdr); 1108 hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr); 1109 } 1110 spin_unlock_irqrestore(&qp->s_lock, flags); 1111 1112 ret = 0; 1113 1114 bail: 1115 hfi1_put_txreq(ps->s_txreq); 1116 return ret; 1117 } 1118 1119 /* 1120 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent 1121 * being an entry from the partition key table), return 0 1122 * otherwise. Use the matching criteria for egress partition keys 1123 * specified in the OPAv1 spec., section 9.1l.7. 1124 */ 1125 static inline int egress_pkey_matches_entry(u16 pkey, u16 ent) 1126 { 1127 u16 mkey = pkey & PKEY_LOW_15_MASK; 1128 u16 mentry = ent & PKEY_LOW_15_MASK; 1129 1130 if (mkey == mentry) { 1131 /* 1132 * If pkey[15] is set (full partition member), 1133 * is bit 15 in the corresponding table element 1134 * clear (limited member)? 1135 */ 1136 if (pkey & PKEY_MEMBER_MASK) 1137 return !!(ent & PKEY_MEMBER_MASK); 1138 return 1; 1139 } 1140 return 0; 1141 } 1142 1143 /** 1144 * egress_pkey_check - check P_KEY of a packet 1145 * @ppd: Physical IB port data 1146 * @slid: SLID for packet 1147 * @bkey: PKEY for header 1148 * @sc5: SC for packet 1149 * @s_pkey_index: It will be used for look up optimization for kernel contexts 1150 * only. If it is negative value, then it means user contexts is calling this 1151 * function. 1152 * 1153 * It checks if hdr's pkey is valid. 1154 * 1155 * Return: 0 on success, otherwise, 1 1156 */ 1157 int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey, 1158 u8 sc5, int8_t s_pkey_index) 1159 { 1160 struct hfi1_devdata *dd; 1161 int i; 1162 int is_user_ctxt_mechanism = (s_pkey_index < 0); 1163 1164 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT)) 1165 return 0; 1166 1167 /* If SC15, pkey[0:14] must be 0x7fff */ 1168 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK)) 1169 goto bad; 1170 1171 /* Is the pkey = 0x0, or 0x8000? */ 1172 if ((pkey & PKEY_LOW_15_MASK) == 0) 1173 goto bad; 1174 1175 /* 1176 * For the kernel contexts only, if a qp is passed into the function, 1177 * the most likely matching pkey has index qp->s_pkey_index 1178 */ 1179 if (!is_user_ctxt_mechanism && 1180 egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) { 1181 return 0; 1182 } 1183 1184 for (i = 0; i < MAX_PKEY_VALUES; i++) { 1185 if (egress_pkey_matches_entry(pkey, ppd->pkeys[i])) 1186 return 0; 1187 } 1188 bad: 1189 /* 1190 * For the user-context mechanism, the P_KEY check would only happen 1191 * once per SDMA request, not once per packet. Therefore, there's no 1192 * need to increment the counter for the user-context mechanism. 1193 */ 1194 if (!is_user_ctxt_mechanism) { 1195 incr_cntr64(&ppd->port_xmit_constraint_errors); 1196 dd = ppd->dd; 1197 if (!(dd->err_info_xmit_constraint.status & 1198 OPA_EI_STATUS_SMASK)) { 1199 dd->err_info_xmit_constraint.status |= 1200 OPA_EI_STATUS_SMASK; 1201 dd->err_info_xmit_constraint.slid = slid; 1202 dd->err_info_xmit_constraint.pkey = pkey; 1203 } 1204 } 1205 return 1; 1206 } 1207 1208 /** 1209 * get_send_routine - choose an egress routine 1210 * 1211 * Choose an egress routine based on QP type 1212 * and size 1213 */ 1214 static inline send_routine get_send_routine(struct rvt_qp *qp, 1215 struct hfi1_pkt_state *ps) 1216 { 1217 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1218 struct hfi1_qp_priv *priv = qp->priv; 1219 struct verbs_txreq *tx = ps->s_txreq; 1220 1221 if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA))) 1222 return dd->process_pio_send; 1223 switch (qp->ibqp.qp_type) { 1224 case IB_QPT_SMI: 1225 return dd->process_pio_send; 1226 case IB_QPT_GSI: 1227 case IB_QPT_UD: 1228 break; 1229 case IB_QPT_UC: 1230 case IB_QPT_RC: 1231 priv->s_running_pkt_size = 1232 (tx->s_cur_size + priv->s_running_pkt_size) / 2; 1233 if (piothreshold && 1234 priv->s_running_pkt_size <= min(piothreshold, qp->pmtu) && 1235 (BIT(ps->opcode & OPMASK) & pio_opmask[ps->opcode >> 5]) && 1236 iowait_sdma_pending(&priv->s_iowait) == 0 && 1237 !sdma_txreq_built(&tx->txreq)) 1238 return dd->process_pio_send; 1239 break; 1240 default: 1241 break; 1242 } 1243 return dd->process_dma_send; 1244 } 1245 1246 /** 1247 * hfi1_verbs_send - send a packet 1248 * @qp: the QP to send on 1249 * @ps: the state of the packet to send 1250 * 1251 * Return zero if packet is sent or queued OK. 1252 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise. 1253 */ 1254 int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) 1255 { 1256 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1257 struct hfi1_qp_priv *priv = qp->priv; 1258 struct ib_other_headers *ohdr = NULL; 1259 send_routine sr; 1260 int ret; 1261 u16 pkey; 1262 u32 slid; 1263 u8 l4 = 0; 1264 1265 /* locate the pkey within the headers */ 1266 if (ps->s_txreq->phdr.hdr.hdr_type) { 1267 struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah; 1268 1269 l4 = hfi1_16B_get_l4(hdr); 1270 if (l4 == OPA_16B_L4_IB_LOCAL) 1271 ohdr = &hdr->u.oth; 1272 else if (l4 == OPA_16B_L4_IB_GLOBAL) 1273 ohdr = &hdr->u.l.oth; 1274 1275 slid = hfi1_16B_get_slid(hdr); 1276 pkey = hfi1_16B_get_pkey(hdr); 1277 } else { 1278 struct ib_header *hdr = &ps->s_txreq->phdr.hdr.ibh; 1279 u8 lnh = ib_get_lnh(hdr); 1280 1281 if (lnh == HFI1_LRH_GRH) 1282 ohdr = &hdr->u.l.oth; 1283 else 1284 ohdr = &hdr->u.oth; 1285 slid = ib_get_slid(hdr); 1286 pkey = ib_bth_get_pkey(ohdr); 1287 } 1288 1289 if (likely(l4 != OPA_16B_L4_FM)) 1290 ps->opcode = ib_bth_get_opcode(ohdr); 1291 else 1292 ps->opcode = IB_OPCODE_UD_SEND_ONLY; 1293 1294 sr = get_send_routine(qp, ps); 1295 ret = egress_pkey_check(dd->pport, slid, pkey, 1296 priv->s_sc, qp->s_pkey_index); 1297 if (unlikely(ret)) { 1298 /* 1299 * The value we are returning here does not get propagated to 1300 * the verbs caller. Thus we need to complete the request with 1301 * error otherwise the caller could be sitting waiting on the 1302 * completion event. Only do this for PIO. SDMA has its own 1303 * mechanism for handling the errors. So for SDMA we can just 1304 * return. 1305 */ 1306 if (sr == dd->process_pio_send) { 1307 unsigned long flags; 1308 1309 hfi1_cdbg(PIO, "%s() Failed. Completing with err", 1310 __func__); 1311 spin_lock_irqsave(&qp->s_lock, flags); 1312 rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); 1313 spin_unlock_irqrestore(&qp->s_lock, flags); 1314 } 1315 return -EINVAL; 1316 } 1317 if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait)) 1318 return pio_wait(qp, 1319 ps->s_txreq->psc, 1320 ps, 1321 HFI1_S_WAIT_PIO_DRAIN); 1322 return sr(qp, ps, 0); 1323 } 1324 1325 /** 1326 * hfi1_fill_device_attr - Fill in rvt dev info device attributes. 1327 * @dd: the device data structure 1328 */ 1329 static void hfi1_fill_device_attr(struct hfi1_devdata *dd) 1330 { 1331 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; 1332 u32 ver = dd->dc8051_ver; 1333 1334 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props)); 1335 1336 rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 32) | 1337 ((u64)(dc8051_ver_min(ver)) << 16) | 1338 (u64)dc8051_ver_patch(ver); 1339 1340 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | 1341 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 1342 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | 1343 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE | 1344 IB_DEVICE_MEM_MGT_EXTENSIONS | 1345 IB_DEVICE_RDMA_NETDEV_OPA_VNIC; 1346 rdi->dparms.props.page_size_cap = PAGE_SIZE; 1347 rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3; 1348 rdi->dparms.props.vendor_part_id = dd->pcidev->device; 1349 rdi->dparms.props.hw_ver = dd->minrev; 1350 rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid; 1351 rdi->dparms.props.max_mr_size = U64_MAX; 1352 rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX; 1353 rdi->dparms.props.max_qp = hfi1_max_qps; 1354 rdi->dparms.props.max_qp_wr = 1355 (hfi1_max_qp_wrs >= HFI1_QP_WQE_INVALID ? 1356 HFI1_QP_WQE_INVALID - 1 : hfi1_max_qp_wrs); 1357 rdi->dparms.props.max_send_sge = hfi1_max_sges; 1358 rdi->dparms.props.max_recv_sge = hfi1_max_sges; 1359 rdi->dparms.props.max_sge_rd = hfi1_max_sges; 1360 rdi->dparms.props.max_cq = hfi1_max_cqs; 1361 rdi->dparms.props.max_ah = hfi1_max_ahs; 1362 rdi->dparms.props.max_cqe = hfi1_max_cqes; 1363 rdi->dparms.props.max_map_per_fmr = 32767; 1364 rdi->dparms.props.max_pd = hfi1_max_pds; 1365 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; 1366 rdi->dparms.props.max_qp_init_rd_atom = 255; 1367 rdi->dparms.props.max_srq = hfi1_max_srqs; 1368 rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs; 1369 rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges; 1370 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB; 1371 rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd); 1372 rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps; 1373 rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached; 1374 rdi->dparms.props.max_total_mcast_qp_attach = 1375 rdi->dparms.props.max_mcast_qp_attach * 1376 rdi->dparms.props.max_mcast_grp; 1377 } 1378 1379 static inline u16 opa_speed_to_ib(u16 in) 1380 { 1381 u16 out = 0; 1382 1383 if (in & OPA_LINK_SPEED_25G) 1384 out |= IB_SPEED_EDR; 1385 if (in & OPA_LINK_SPEED_12_5G) 1386 out |= IB_SPEED_FDR; 1387 1388 return out; 1389 } 1390 1391 /* 1392 * Convert a single OPA link width (no multiple flags) to an IB value. 1393 * A zero OPA link width means link down, which means the IB width value 1394 * is a don't care. 1395 */ 1396 static inline u16 opa_width_to_ib(u16 in) 1397 { 1398 switch (in) { 1399 case OPA_LINK_WIDTH_1X: 1400 /* map 2x and 3x to 1x as they don't exist in IB */ 1401 case OPA_LINK_WIDTH_2X: 1402 case OPA_LINK_WIDTH_3X: 1403 return IB_WIDTH_1X; 1404 default: /* link down or unknown, return our largest width */ 1405 case OPA_LINK_WIDTH_4X: 1406 return IB_WIDTH_4X; 1407 } 1408 } 1409 1410 static int query_port(struct rvt_dev_info *rdi, u8 port_num, 1411 struct ib_port_attr *props) 1412 { 1413 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); 1414 struct hfi1_devdata *dd = dd_from_dev(verbs_dev); 1415 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; 1416 u32 lid = ppd->lid; 1417 1418 /* props being zeroed by the caller, avoid zeroing it here */ 1419 props->lid = lid ? lid : 0; 1420 props->lmc = ppd->lmc; 1421 /* OPA logical states match IB logical states */ 1422 props->state = driver_lstate(ppd); 1423 props->phys_state = driver_pstate(ppd); 1424 props->gid_tbl_len = HFI1_GUIDS_PER_PORT; 1425 props->active_width = (u8)opa_width_to_ib(ppd->link_width_active); 1426 /* see rate_show() in ib core/sysfs.c */ 1427 props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active); 1428 props->max_vl_num = ppd->vls_supported; 1429 1430 /* Once we are a "first class" citizen and have added the OPA MTUs to 1431 * the core we can advertise the larger MTU enum to the ULPs, for now 1432 * advertise only 4K. 1433 * 1434 * Those applications which are either OPA aware or pass the MTU enum 1435 * from the Path Records to us will get the new 8k MTU. Those that 1436 * attempt to process the MTU enum may fail in various ways. 1437 */ 1438 props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ? 1439 4096 : hfi1_max_mtu), IB_MTU_4096); 1440 props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu : 1441 mtu_to_enum(ppd->ibmtu, IB_MTU_4096); 1442 1443 return 0; 1444 } 1445 1446 static int modify_device(struct ib_device *device, 1447 int device_modify_mask, 1448 struct ib_device_modify *device_modify) 1449 { 1450 struct hfi1_devdata *dd = dd_from_ibdev(device); 1451 unsigned i; 1452 int ret; 1453 1454 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | 1455 IB_DEVICE_MODIFY_NODE_DESC)) { 1456 ret = -EOPNOTSUPP; 1457 goto bail; 1458 } 1459 1460 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) { 1461 memcpy(device->node_desc, device_modify->node_desc, 1462 IB_DEVICE_NODE_DESC_MAX); 1463 for (i = 0; i < dd->num_pports; i++) { 1464 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data; 1465 1466 hfi1_node_desc_chg(ibp); 1467 } 1468 } 1469 1470 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) { 1471 ib_hfi1_sys_image_guid = 1472 cpu_to_be64(device_modify->sys_image_guid); 1473 for (i = 0; i < dd->num_pports; i++) { 1474 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data; 1475 1476 hfi1_sys_guid_chg(ibp); 1477 } 1478 } 1479 1480 ret = 0; 1481 1482 bail: 1483 return ret; 1484 } 1485 1486 static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num) 1487 { 1488 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); 1489 struct hfi1_devdata *dd = dd_from_dev(verbs_dev); 1490 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; 1491 int ret; 1492 1493 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0, 1494 OPA_LINKDOWN_REASON_UNKNOWN); 1495 ret = set_link_state(ppd, HLS_DN_DOWNDEF); 1496 return ret; 1497 } 1498 1499 static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp, 1500 int guid_index, __be64 *guid) 1501 { 1502 struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp); 1503 1504 if (guid_index >= HFI1_GUIDS_PER_PORT) 1505 return -EINVAL; 1506 1507 *guid = get_sguid(ibp, guid_index); 1508 return 0; 1509 } 1510 1511 /* 1512 * convert ah port,sl to sc 1513 */ 1514 u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah) 1515 { 1516 struct hfi1_ibport *ibp = to_iport(ibdev, rdma_ah_get_port_num(ah)); 1517 1518 return ibp->sl_to_sc[rdma_ah_get_sl(ah)]; 1519 } 1520 1521 static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) 1522 { 1523 struct hfi1_ibport *ibp; 1524 struct hfi1_pportdata *ppd; 1525 struct hfi1_devdata *dd; 1526 u8 sc5; 1527 u8 sl; 1528 1529 if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) && 1530 !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) 1531 return -EINVAL; 1532 1533 /* test the mapping for validity */ 1534 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); 1535 ppd = ppd_from_ibp(ibp); 1536 dd = dd_from_ppd(ppd); 1537 1538 sl = rdma_ah_get_sl(ah_attr); 1539 if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) 1540 return -EINVAL; 1541 sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc)); 1542 1543 sc5 = ibp->sl_to_sc[sl]; 1544 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) 1545 return -EINVAL; 1546 return 0; 1547 } 1548 1549 static void hfi1_notify_new_ah(struct ib_device *ibdev, 1550 struct rdma_ah_attr *ah_attr, 1551 struct rvt_ah *ah) 1552 { 1553 struct hfi1_ibport *ibp; 1554 struct hfi1_pportdata *ppd; 1555 struct hfi1_devdata *dd; 1556 u8 sc5; 1557 struct rdma_ah_attr *attr = &ah->attr; 1558 1559 /* 1560 * Do not trust reading anything from rvt_ah at this point as it is not 1561 * done being setup. We can however modify things which we need to set. 1562 */ 1563 1564 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); 1565 ppd = ppd_from_ibp(ibp); 1566 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)]; 1567 hfi1_update_ah_attr(ibdev, attr); 1568 hfi1_make_opa_lid(attr); 1569 dd = dd_from_ppd(ppd); 1570 ah->vl = sc_to_vlt(dd, sc5); 1571 if (ah->vl < num_vls || ah->vl == 15) 1572 ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu); 1573 } 1574 1575 /** 1576 * hfi1_get_npkeys - return the size of the PKEY table for context 0 1577 * @dd: the hfi1_ib device 1578 */ 1579 unsigned hfi1_get_npkeys(struct hfi1_devdata *dd) 1580 { 1581 return ARRAY_SIZE(dd->pport[0].pkeys); 1582 } 1583 1584 static void init_ibport(struct hfi1_pportdata *ppd) 1585 { 1586 struct hfi1_ibport *ibp = &ppd->ibport_data; 1587 size_t sz = ARRAY_SIZE(ibp->sl_to_sc); 1588 int i; 1589 1590 for (i = 0; i < sz; i++) { 1591 ibp->sl_to_sc[i] = i; 1592 ibp->sc_to_sl[i] = i; 1593 } 1594 1595 for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++) 1596 INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list); 1597 timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0); 1598 1599 spin_lock_init(&ibp->rvp.lock); 1600 /* Set the prefix to the default value (see ch. 4.1.1) */ 1601 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX; 1602 ibp->rvp.sm_lid = 0; 1603 /* 1604 * Below should only set bits defined in OPA PortInfo.CapabilityMask 1605 * and PortInfo.CapabilityMask3 1606 */ 1607 ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP | 1608 IB_PORT_CAP_MASK_NOTICE_SUP; 1609 ibp->rvp.port_cap3_flags = OPA_CAP_MASK3_IsSharedSpaceSupported; 1610 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; 1611 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; 1612 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; 1613 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; 1614 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; 1615 1616 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); 1617 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); 1618 } 1619 1620 static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str) 1621 { 1622 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 1623 struct hfi1_ibdev *dev = dev_from_rdi(rdi); 1624 u32 ver = dd_from_dev(dev)->dc8051_ver; 1625 1626 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u", dc8051_ver_maj(ver), 1627 dc8051_ver_min(ver), dc8051_ver_patch(ver)); 1628 } 1629 1630 static const char * const driver_cntr_names[] = { 1631 /* must be element 0*/ 1632 "DRIVER_KernIntr", 1633 "DRIVER_ErrorIntr", 1634 "DRIVER_Tx_Errs", 1635 "DRIVER_Rcv_Errs", 1636 "DRIVER_HW_Errs", 1637 "DRIVER_NoPIOBufs", 1638 "DRIVER_CtxtsOpen", 1639 "DRIVER_RcvLen_Errs", 1640 "DRIVER_EgrBufFull", 1641 "DRIVER_EgrHdrFull" 1642 }; 1643 1644 static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */ 1645 static const char **dev_cntr_names; 1646 static const char **port_cntr_names; 1647 int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names); 1648 static int num_dev_cntrs; 1649 static int num_port_cntrs; 1650 static int cntr_names_initialized; 1651 1652 /* 1653 * Convert a list of names separated by '\n' into an array of NULL terminated 1654 * strings. Optionally some entries can be reserved in the array to hold extra 1655 * external strings. 1656 */ 1657 static int init_cntr_names(const char *names_in, 1658 const size_t names_len, 1659 int num_extra_names, 1660 int *num_cntrs, 1661 const char ***cntr_names) 1662 { 1663 char *names_out, *p, **q; 1664 int i, n; 1665 1666 n = 0; 1667 for (i = 0; i < names_len; i++) 1668 if (names_in[i] == '\n') 1669 n++; 1670 1671 names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len, 1672 GFP_KERNEL); 1673 if (!names_out) { 1674 *num_cntrs = 0; 1675 *cntr_names = NULL; 1676 return -ENOMEM; 1677 } 1678 1679 p = names_out + (n + num_extra_names) * sizeof(char *); 1680 memcpy(p, names_in, names_len); 1681 1682 q = (char **)names_out; 1683 for (i = 0; i < n; i++) { 1684 q[i] = p; 1685 p = strchr(p, '\n'); 1686 *p++ = '\0'; 1687 } 1688 1689 *num_cntrs = n; 1690 *cntr_names = (const char **)names_out; 1691 return 0; 1692 } 1693 1694 static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev, 1695 u8 port_num) 1696 { 1697 int i, err; 1698 1699 mutex_lock(&cntr_names_lock); 1700 if (!cntr_names_initialized) { 1701 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 1702 1703 err = init_cntr_names(dd->cntrnames, 1704 dd->cntrnameslen, 1705 num_driver_cntrs, 1706 &num_dev_cntrs, 1707 &dev_cntr_names); 1708 if (err) { 1709 mutex_unlock(&cntr_names_lock); 1710 return NULL; 1711 } 1712 1713 for (i = 0; i < num_driver_cntrs; i++) 1714 dev_cntr_names[num_dev_cntrs + i] = 1715 driver_cntr_names[i]; 1716 1717 err = init_cntr_names(dd->portcntrnames, 1718 dd->portcntrnameslen, 1719 0, 1720 &num_port_cntrs, 1721 &port_cntr_names); 1722 if (err) { 1723 kfree(dev_cntr_names); 1724 dev_cntr_names = NULL; 1725 mutex_unlock(&cntr_names_lock); 1726 return NULL; 1727 } 1728 cntr_names_initialized = 1; 1729 } 1730 mutex_unlock(&cntr_names_lock); 1731 1732 if (!port_num) 1733 return rdma_alloc_hw_stats_struct( 1734 dev_cntr_names, 1735 num_dev_cntrs + num_driver_cntrs, 1736 RDMA_HW_STATS_DEFAULT_LIFESPAN); 1737 else 1738 return rdma_alloc_hw_stats_struct( 1739 port_cntr_names, 1740 num_port_cntrs, 1741 RDMA_HW_STATS_DEFAULT_LIFESPAN); 1742 } 1743 1744 static u64 hfi1_sps_ints(void) 1745 { 1746 unsigned long index, flags; 1747 struct hfi1_devdata *dd; 1748 u64 sps_ints = 0; 1749 1750 xa_lock_irqsave(&hfi1_dev_table, flags); 1751 xa_for_each(&hfi1_dev_table, index, dd) { 1752 sps_ints += get_all_cpu_total(dd->int_counter); 1753 } 1754 xa_unlock_irqrestore(&hfi1_dev_table, flags); 1755 return sps_ints; 1756 } 1757 1758 static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, 1759 u8 port, int index) 1760 { 1761 u64 *values; 1762 int count; 1763 1764 if (!port) { 1765 u64 *stats = (u64 *)&hfi1_stats; 1766 int i; 1767 1768 hfi1_read_cntrs(dd_from_ibdev(ibdev), NULL, &values); 1769 values[num_dev_cntrs] = hfi1_sps_ints(); 1770 for (i = 1; i < num_driver_cntrs; i++) 1771 values[num_dev_cntrs + i] = stats[i]; 1772 count = num_dev_cntrs + num_driver_cntrs; 1773 } else { 1774 struct hfi1_ibport *ibp = to_iport(ibdev, port); 1775 1776 hfi1_read_portcntrs(ppd_from_ibp(ibp), NULL, &values); 1777 count = num_port_cntrs; 1778 } 1779 1780 memcpy(stats->value, values, count * sizeof(u64)); 1781 return count; 1782 } 1783 1784 static const struct ib_device_ops hfi1_dev_ops = { 1785 .owner = THIS_MODULE, 1786 .driver_id = RDMA_DRIVER_HFI1, 1787 1788 .alloc_hw_stats = alloc_hw_stats, 1789 .alloc_rdma_netdev = hfi1_vnic_alloc_rn, 1790 .get_dev_fw_str = hfi1_get_dev_fw_str, 1791 .get_hw_stats = get_hw_stats, 1792 .init_port = hfi1_create_port_files, 1793 .modify_device = modify_device, 1794 /* keep process mad in the driver */ 1795 .process_mad = hfi1_process_mad, 1796 }; 1797 1798 /** 1799 * hfi1_register_ib_device - register our device with the infiniband core 1800 * @dd: the device data structure 1801 * Return 0 if successful, errno if unsuccessful. 1802 */ 1803 int hfi1_register_ib_device(struct hfi1_devdata *dd) 1804 { 1805 struct hfi1_ibdev *dev = &dd->verbs_dev; 1806 struct ib_device *ibdev = &dev->rdi.ibdev; 1807 struct hfi1_pportdata *ppd = dd->pport; 1808 struct hfi1_ibport *ibp = &ppd->ibport_data; 1809 unsigned i; 1810 int ret; 1811 1812 for (i = 0; i < dd->num_pports; i++) 1813 init_ibport(ppd + i); 1814 1815 /* Only need to initialize non-zero fields. */ 1816 1817 timer_setup(&dev->mem_timer, mem_timer, 0); 1818 1819 seqlock_init(&dev->iowait_lock); 1820 seqlock_init(&dev->txwait_lock); 1821 INIT_LIST_HEAD(&dev->txwait); 1822 INIT_LIST_HEAD(&dev->memwait); 1823 1824 ret = verbs_txreq_init(dev); 1825 if (ret) 1826 goto err_verbs_txreq; 1827 1828 /* Use first-port GUID as node guid */ 1829 ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX); 1830 1831 /* 1832 * The system image GUID is supposed to be the same for all 1833 * HFIs in a single system but since there can be other 1834 * device types in the system, we can't be sure this is unique. 1835 */ 1836 if (!ib_hfi1_sys_image_guid) 1837 ib_hfi1_sys_image_guid = ibdev->node_guid; 1838 ibdev->phys_port_cnt = dd->num_pports; 1839 ibdev->dev.parent = &dd->pcidev->dev; 1840 1841 ib_set_device_ops(ibdev, &hfi1_dev_ops); 1842 1843 strlcpy(ibdev->node_desc, init_utsname()->nodename, 1844 sizeof(ibdev->node_desc)); 1845 1846 /* 1847 * Fill in rvt info object. 1848 */ 1849 dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev; 1850 dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah; 1851 dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah; 1852 dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be; 1853 dd->verbs_dev.rdi.driver_f.query_port_state = query_port; 1854 dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port; 1855 dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg; 1856 /* 1857 * Fill in rvt info device attributes. 1858 */ 1859 hfi1_fill_device_attr(dd); 1860 1861 /* queue pair */ 1862 dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size; 1863 dd->verbs_dev.rdi.dparms.qpn_start = 0; 1864 dd->verbs_dev.rdi.dparms.qpn_inc = 1; 1865 dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift; 1866 dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16; 1867 dd->verbs_dev.rdi.dparms.qpn_res_end = 1868 dd->verbs_dev.rdi.dparms.qpn_res_start + 65535; 1869 dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC; 1870 dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK; 1871 dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT; 1872 dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK; 1873 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA | 1874 RDMA_CORE_CAP_OPA_AH; 1875 dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE; 1876 1877 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc; 1878 dd->verbs_dev.rdi.driver_f.qp_priv_init = hfi1_qp_priv_init; 1879 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; 1880 dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; 1881 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; 1882 dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt; 1883 dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send; 1884 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send; 1885 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr; 1886 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp; 1887 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters; 1888 dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue; 1889 dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp; 1890 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp; 1891 dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp; 1892 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu; 1893 dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp; 1894 dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp; 1895 dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc; 1896 dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe; 1897 dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup = 1898 hfi1_comp_vect_mappings_lookup; 1899 1900 /* completeion queue */ 1901 dd->verbs_dev.rdi.ibdev.num_comp_vectors = dd->comp_vect_possible_cpus; 1902 dd->verbs_dev.rdi.dparms.node = dd->node; 1903 1904 /* misc settings */ 1905 dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */ 1906 dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size; 1907 dd->verbs_dev.rdi.dparms.nports = dd->num_pports; 1908 dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd); 1909 dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode; 1910 dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold; 1911 dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period; 1912 dd->verbs_dev.rdi.dparms.reserved_operations = 1; 1913 dd->verbs_dev.rdi.dparms.extra_rdma_atomic = HFI1_TID_RDMA_WRITE_CNT; 1914 1915 /* post send table */ 1916 dd->verbs_dev.rdi.post_parms = hfi1_post_parms; 1917 1918 /* opcode translation table */ 1919 dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode; 1920 1921 ppd = dd->pport; 1922 for (i = 0; i < dd->num_pports; i++, ppd++) 1923 rvt_init_port(&dd->verbs_dev.rdi, 1924 &ppd->ibport_data.rvp, 1925 i, 1926 ppd->pkeys); 1927 1928 rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev, 1929 &ib_hfi1_attr_group); 1930 1931 ret = rvt_register_device(&dd->verbs_dev.rdi); 1932 if (ret) 1933 goto err_verbs_txreq; 1934 1935 ret = hfi1_verbs_register_sysfs(dd); 1936 if (ret) 1937 goto err_class; 1938 1939 return ret; 1940 1941 err_class: 1942 rvt_unregister_device(&dd->verbs_dev.rdi); 1943 err_verbs_txreq: 1944 verbs_txreq_exit(dev); 1945 dd_dev_err(dd, "cannot register verbs: %d!\n", -ret); 1946 return ret; 1947 } 1948 1949 void hfi1_unregister_ib_device(struct hfi1_devdata *dd) 1950 { 1951 struct hfi1_ibdev *dev = &dd->verbs_dev; 1952 1953 hfi1_verbs_unregister_sysfs(dd); 1954 1955 rvt_unregister_device(&dd->verbs_dev.rdi); 1956 1957 if (!list_empty(&dev->txwait)) 1958 dd_dev_err(dd, "txwait list not empty!\n"); 1959 if (!list_empty(&dev->memwait)) 1960 dd_dev_err(dd, "memwait list not empty!\n"); 1961 1962 del_timer_sync(&dev->mem_timer); 1963 verbs_txreq_exit(dev); 1964 1965 mutex_lock(&cntr_names_lock); 1966 kfree(dev_cntr_names); 1967 kfree(port_cntr_names); 1968 dev_cntr_names = NULL; 1969 port_cntr_names = NULL; 1970 cntr_names_initialized = 0; 1971 mutex_unlock(&cntr_names_lock); 1972 } 1973 1974 void hfi1_cnp_rcv(struct hfi1_packet *packet) 1975 { 1976 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 1977 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1978 struct ib_header *hdr = packet->hdr; 1979 struct rvt_qp *qp = packet->qp; 1980 u32 lqpn, rqpn = 0; 1981 u16 rlid = 0; 1982 u8 sl, sc5, svc_type; 1983 1984 switch (packet->qp->ibqp.qp_type) { 1985 case IB_QPT_UC: 1986 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 1987 rqpn = qp->remote_qpn; 1988 svc_type = IB_CC_SVCTYPE_UC; 1989 break; 1990 case IB_QPT_RC: 1991 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 1992 rqpn = qp->remote_qpn; 1993 svc_type = IB_CC_SVCTYPE_RC; 1994 break; 1995 case IB_QPT_SMI: 1996 case IB_QPT_GSI: 1997 case IB_QPT_UD: 1998 svc_type = IB_CC_SVCTYPE_UD; 1999 break; 2000 default: 2001 ibp->rvp.n_pkt_drops++; 2002 return; 2003 } 2004 2005 sc5 = hfi1_9B_get_sc5(hdr, packet->rhf); 2006 sl = ibp->sc_to_sl[sc5]; 2007 lqpn = qp->ibqp.qp_num; 2008 2009 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 2010 } 2011