1 /* 2 * Copyright(c) 2015, 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <rdma/ib_mad.h> 49 #include <rdma/ib_user_verbs.h> 50 #include <linux/io.h> 51 #include <linux/module.h> 52 #include <linux/utsname.h> 53 #include <linux/rculist.h> 54 #include <linux/mm.h> 55 #include <linux/vmalloc.h> 56 57 #include "hfi.h" 58 #include "common.h" 59 #include "device.h" 60 #include "trace.h" 61 #include "qp.h" 62 #include "verbs_txreq.h" 63 64 static unsigned int hfi1_lkey_table_size = 16; 65 module_param_named(lkey_table_size, hfi1_lkey_table_size, uint, 66 S_IRUGO); 67 MODULE_PARM_DESC(lkey_table_size, 68 "LKEY table size in bits (2^n, 1 <= n <= 23)"); 69 70 static unsigned int hfi1_max_pds = 0xFFFF; 71 module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO); 72 MODULE_PARM_DESC(max_pds, 73 "Maximum number of protection domains to support"); 74 75 static unsigned int hfi1_max_ahs = 0xFFFF; 76 module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO); 77 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); 78 79 unsigned int hfi1_max_cqes = 0x2FFFFF; 80 module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO); 81 MODULE_PARM_DESC(max_cqes, 82 "Maximum number of completion queue entries to support"); 83 84 unsigned int hfi1_max_cqs = 0x1FFFF; 85 module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO); 86 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); 87 88 unsigned int hfi1_max_qp_wrs = 0x3FFF; 89 module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO); 90 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); 91 92 unsigned int hfi1_max_qps = 32768; 93 module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO); 94 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); 95 96 unsigned int hfi1_max_sges = 0x60; 97 module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO); 98 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); 99 100 unsigned int hfi1_max_mcast_grps = 16384; 101 module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO); 102 MODULE_PARM_DESC(max_mcast_grps, 103 "Maximum number of multicast groups to support"); 104 105 unsigned int hfi1_max_mcast_qp_attached = 16; 106 module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached, 107 uint, S_IRUGO); 108 MODULE_PARM_DESC(max_mcast_qp_attached, 109 "Maximum number of attached QPs to support"); 110 111 unsigned int hfi1_max_srqs = 1024; 112 module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO); 113 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); 114 115 unsigned int hfi1_max_srq_sges = 128; 116 module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO); 117 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); 118 119 unsigned int hfi1_max_srq_wrs = 0x1FFFF; 120 module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO); 121 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); 122 123 unsigned short piothreshold = 256; 124 module_param(piothreshold, ushort, S_IRUGO); 125 MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio"); 126 127 #define COPY_CACHELESS 1 128 #define COPY_ADAPTIVE 2 129 static unsigned int sge_copy_mode; 130 module_param(sge_copy_mode, uint, S_IRUGO); 131 MODULE_PARM_DESC(sge_copy_mode, 132 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS"); 133 134 static void verbs_sdma_complete( 135 struct sdma_txreq *cookie, 136 int status); 137 138 static int pio_wait(struct rvt_qp *qp, 139 struct send_context *sc, 140 struct hfi1_pkt_state *ps, 141 u32 flag); 142 143 /* Length of buffer to create verbs txreq cache name */ 144 #define TXREQ_NAME_LEN 24 145 146 static uint wss_threshold; 147 module_param(wss_threshold, uint, S_IRUGO); 148 MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy"); 149 static uint wss_clean_period = 256; 150 module_param(wss_clean_period, uint, S_IRUGO); 151 MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned"); 152 153 /* memory working set size */ 154 struct hfi1_wss { 155 unsigned long *entries; 156 atomic_t total_count; 157 atomic_t clean_counter; 158 atomic_t clean_entry; 159 160 int threshold; 161 int num_entries; 162 long pages_mask; 163 }; 164 165 static struct hfi1_wss wss; 166 167 int hfi1_wss_init(void) 168 { 169 long llc_size; 170 long llc_bits; 171 long table_size; 172 long table_bits; 173 174 /* check for a valid percent range - default to 80 if none or invalid */ 175 if (wss_threshold < 1 || wss_threshold > 100) 176 wss_threshold = 80; 177 /* reject a wildly large period */ 178 if (wss_clean_period > 1000000) 179 wss_clean_period = 256; 180 /* reject a zero period */ 181 if (wss_clean_period == 0) 182 wss_clean_period = 1; 183 184 /* 185 * Calculate the table size - the next power of 2 larger than the 186 * LLC size. LLC size is in KiB. 187 */ 188 llc_size = wss_llc_size() * 1024; 189 table_size = roundup_pow_of_two(llc_size); 190 191 /* one bit per page in rounded up table */ 192 llc_bits = llc_size / PAGE_SIZE; 193 table_bits = table_size / PAGE_SIZE; 194 wss.pages_mask = table_bits - 1; 195 wss.num_entries = table_bits / BITS_PER_LONG; 196 197 wss.threshold = (llc_bits * wss_threshold) / 100; 198 if (wss.threshold == 0) 199 wss.threshold = 1; 200 201 atomic_set(&wss.clean_counter, wss_clean_period); 202 203 wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries), 204 GFP_KERNEL); 205 if (!wss.entries) { 206 hfi1_wss_exit(); 207 return -ENOMEM; 208 } 209 210 return 0; 211 } 212 213 void hfi1_wss_exit(void) 214 { 215 /* coded to handle partially initialized and repeat callers */ 216 kfree(wss.entries); 217 wss.entries = NULL; 218 } 219 220 /* 221 * Advance the clean counter. When the clean period has expired, 222 * clean an entry. 223 * 224 * This is implemented in atomics to avoid locking. Because multiple 225 * variables are involved, it can be racy which can lead to slightly 226 * inaccurate information. Since this is only a heuristic, this is 227 * OK. Any innaccuracies will clean themselves out as the counter 228 * advances. That said, it is unlikely the entry clean operation will 229 * race - the next possible racer will not start until the next clean 230 * period. 231 * 232 * The clean counter is implemented as a decrement to zero. When zero 233 * is reached an entry is cleaned. 234 */ 235 static void wss_advance_clean_counter(void) 236 { 237 int entry; 238 int weight; 239 unsigned long bits; 240 241 /* become the cleaner if we decrement the counter to zero */ 242 if (atomic_dec_and_test(&wss.clean_counter)) { 243 /* 244 * Set, not add, the clean period. This avoids an issue 245 * where the counter could decrement below the clean period. 246 * Doing a set can result in lost decrements, slowing the 247 * clean advance. Since this a heuristic, this possible 248 * slowdown is OK. 249 * 250 * An alternative is to loop, advancing the counter by a 251 * clean period until the result is > 0. However, this could 252 * lead to several threads keeping another in the clean loop. 253 * This could be mitigated by limiting the number of times 254 * we stay in the loop. 255 */ 256 atomic_set(&wss.clean_counter, wss_clean_period); 257 258 /* 259 * Uniquely grab the entry to clean and move to next. 260 * The current entry is always the lower bits of 261 * wss.clean_entry. The table size, wss.num_entries, 262 * is always a power-of-2. 263 */ 264 entry = (atomic_inc_return(&wss.clean_entry) - 1) 265 & (wss.num_entries - 1); 266 267 /* clear the entry and count the bits */ 268 bits = xchg(&wss.entries[entry], 0); 269 weight = hweight64((u64)bits); 270 /* only adjust the contended total count if needed */ 271 if (weight) 272 atomic_sub(weight, &wss.total_count); 273 } 274 } 275 276 /* 277 * Insert the given address into the working set array. 278 */ 279 static void wss_insert(void *address) 280 { 281 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask; 282 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */ 283 u32 nr = page & (BITS_PER_LONG - 1); 284 285 if (!test_and_set_bit(nr, &wss.entries[entry])) 286 atomic_inc(&wss.total_count); 287 288 wss_advance_clean_counter(); 289 } 290 291 /* 292 * Is the working set larger than the threshold? 293 */ 294 static inline int wss_exceeds_threshold(void) 295 { 296 return atomic_read(&wss.total_count) >= wss.threshold; 297 } 298 299 /* 300 * Translate ib_wr_opcode into ib_wc_opcode. 301 */ 302 const enum ib_wc_opcode ib_hfi1_wc_opcode[] = { 303 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, 304 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, 305 [IB_WR_SEND] = IB_WC_SEND, 306 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, 307 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, 308 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, 309 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD, 310 [IB_WR_SEND_WITH_INV] = IB_WC_SEND, 311 [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV, 312 [IB_WR_REG_MR] = IB_WC_REG_MR 313 }; 314 315 /* 316 * Length of header by opcode, 0 --> not supported 317 */ 318 const u8 hdr_len_by_opcode[256] = { 319 /* RC */ 320 [IB_OPCODE_RC_SEND_FIRST] = 12 + 8, 321 [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8, 322 [IB_OPCODE_RC_SEND_LAST] = 12 + 8, 323 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, 324 [IB_OPCODE_RC_SEND_ONLY] = 12 + 8, 325 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4, 326 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16, 327 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8, 328 [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8, 329 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, 330 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16, 331 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20, 332 [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16, 333 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4, 334 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8, 335 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4, 336 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4, 337 [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4, 338 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4 + 8, 339 [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28, 340 [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28, 341 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4, 342 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4, 343 /* UC */ 344 [IB_OPCODE_UC_SEND_FIRST] = 12 + 8, 345 [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8, 346 [IB_OPCODE_UC_SEND_LAST] = 12 + 8, 347 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, 348 [IB_OPCODE_UC_SEND_ONLY] = 12 + 8, 349 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4, 350 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16, 351 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8, 352 [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8, 353 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, 354 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16, 355 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20, 356 /* UD */ 357 [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8, 358 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12 359 }; 360 361 static const opcode_handler opcode_handler_tbl[256] = { 362 /* RC */ 363 [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv, 364 [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv, 365 [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv, 366 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv, 367 [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv, 368 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv, 369 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv, 370 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv, 371 [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv, 372 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv, 373 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv, 374 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv, 375 [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv, 376 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv, 377 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv, 378 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv, 379 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv, 380 [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv, 381 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv, 382 [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv, 383 [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv, 384 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv, 385 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv, 386 /* UC */ 387 [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv, 388 [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv, 389 [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv, 390 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv, 391 [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv, 392 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv, 393 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv, 394 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv, 395 [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv, 396 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv, 397 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv, 398 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv, 399 /* UD */ 400 [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv, 401 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv, 402 /* CNP */ 403 [IB_OPCODE_CNP] = &hfi1_cnp_rcv 404 }; 405 406 #define OPMASK 0x1f 407 408 static const u32 pio_opmask[BIT(3)] = { 409 /* RC */ 410 [IB_OPCODE_RC >> 5] = 411 BIT(RC_OP(SEND_ONLY) & OPMASK) | 412 BIT(RC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) | 413 BIT(RC_OP(RDMA_WRITE_ONLY) & OPMASK) | 414 BIT(RC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK) | 415 BIT(RC_OP(RDMA_READ_REQUEST) & OPMASK) | 416 BIT(RC_OP(ACKNOWLEDGE) & OPMASK) | 417 BIT(RC_OP(ATOMIC_ACKNOWLEDGE) & OPMASK) | 418 BIT(RC_OP(COMPARE_SWAP) & OPMASK) | 419 BIT(RC_OP(FETCH_ADD) & OPMASK), 420 /* UC */ 421 [IB_OPCODE_UC >> 5] = 422 BIT(UC_OP(SEND_ONLY) & OPMASK) | 423 BIT(UC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) | 424 BIT(UC_OP(RDMA_WRITE_ONLY) & OPMASK) | 425 BIT(UC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK), 426 }; 427 428 /* 429 * System image GUID. 430 */ 431 __be64 ib_hfi1_sys_image_guid; 432 433 /** 434 * hfi1_copy_sge - copy data to SGE memory 435 * @ss: the SGE state 436 * @data: the data to copy 437 * @length: the length of the data 438 * @copy_last: do a separate copy of the last 8 bytes 439 */ 440 void hfi1_copy_sge( 441 struct rvt_sge_state *ss, 442 void *data, u32 length, 443 int release, 444 int copy_last) 445 { 446 struct rvt_sge *sge = &ss->sge; 447 int in_last = 0; 448 int i; 449 int cacheless_copy = 0; 450 451 if (sge_copy_mode == COPY_CACHELESS) { 452 cacheless_copy = length >= PAGE_SIZE; 453 } else if (sge_copy_mode == COPY_ADAPTIVE) { 454 if (length >= PAGE_SIZE) { 455 /* 456 * NOTE: this *assumes*: 457 * o The first vaddr is the dest. 458 * o If multiple pages, then vaddr is sequential. 459 */ 460 wss_insert(sge->vaddr); 461 if (length >= (2 * PAGE_SIZE)) 462 wss_insert(sge->vaddr + PAGE_SIZE); 463 464 cacheless_copy = wss_exceeds_threshold(); 465 } else { 466 wss_advance_clean_counter(); 467 } 468 } 469 if (copy_last) { 470 if (length > 8) { 471 length -= 8; 472 } else { 473 copy_last = 0; 474 in_last = 1; 475 } 476 } 477 478 again: 479 while (length) { 480 u32 len = sge->length; 481 482 if (len > length) 483 len = length; 484 if (len > sge->sge_length) 485 len = sge->sge_length; 486 WARN_ON_ONCE(len == 0); 487 if (unlikely(in_last)) { 488 /* enforce byte transfer ordering */ 489 for (i = 0; i < len; i++) 490 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i]; 491 } else if (cacheless_copy) { 492 cacheless_memcpy(sge->vaddr, data, len); 493 } else { 494 memcpy(sge->vaddr, data, len); 495 } 496 sge->vaddr += len; 497 sge->length -= len; 498 sge->sge_length -= len; 499 if (sge->sge_length == 0) { 500 if (release) 501 rvt_put_mr(sge->mr); 502 if (--ss->num_sge) 503 *sge = *ss->sg_list++; 504 } else if (sge->length == 0 && sge->mr->lkey) { 505 if (++sge->n >= RVT_SEGSZ) { 506 if (++sge->m >= sge->mr->mapsz) 507 break; 508 sge->n = 0; 509 } 510 sge->vaddr = 511 sge->mr->map[sge->m]->segs[sge->n].vaddr; 512 sge->length = 513 sge->mr->map[sge->m]->segs[sge->n].length; 514 } 515 data += len; 516 length -= len; 517 } 518 519 if (copy_last) { 520 copy_last = 0; 521 in_last = 1; 522 length = 8; 523 goto again; 524 } 525 } 526 527 /** 528 * hfi1_skip_sge - skip over SGE memory 529 * @ss: the SGE state 530 * @length: the number of bytes to skip 531 */ 532 void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release) 533 { 534 struct rvt_sge *sge = &ss->sge; 535 536 while (length) { 537 u32 len = sge->length; 538 539 if (len > length) 540 len = length; 541 if (len > sge->sge_length) 542 len = sge->sge_length; 543 WARN_ON_ONCE(len == 0); 544 sge->vaddr += len; 545 sge->length -= len; 546 sge->sge_length -= len; 547 if (sge->sge_length == 0) { 548 if (release) 549 rvt_put_mr(sge->mr); 550 if (--ss->num_sge) 551 *sge = *ss->sg_list++; 552 } else if (sge->length == 0 && sge->mr->lkey) { 553 if (++sge->n >= RVT_SEGSZ) { 554 if (++sge->m >= sge->mr->mapsz) 555 break; 556 sge->n = 0; 557 } 558 sge->vaddr = 559 sge->mr->map[sge->m]->segs[sge->n].vaddr; 560 sge->length = 561 sge->mr->map[sge->m]->segs[sge->n].length; 562 } 563 length -= len; 564 } 565 } 566 567 /* 568 * Make sure the QP is ready and able to accept the given opcode. 569 */ 570 static inline opcode_handler qp_ok(int opcode, struct hfi1_packet *packet) 571 { 572 if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK)) 573 return NULL; 574 if (((opcode & RVT_OPCODE_QP_MASK) == packet->qp->allowed_ops) || 575 (opcode == IB_OPCODE_CNP)) 576 return opcode_handler_tbl[opcode]; 577 578 return NULL; 579 } 580 581 /** 582 * hfi1_ib_rcv - process an incoming packet 583 * @packet: data packet information 584 * 585 * This is called to process an incoming packet at interrupt level. 586 * 587 * Tlen is the length of the header + data + CRC in bytes. 588 */ 589 void hfi1_ib_rcv(struct hfi1_packet *packet) 590 { 591 struct hfi1_ctxtdata *rcd = packet->rcd; 592 struct ib_header *hdr = packet->hdr; 593 u32 tlen = packet->tlen; 594 struct hfi1_pportdata *ppd = rcd->ppd; 595 struct hfi1_ibport *ibp = &ppd->ibport_data; 596 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; 597 opcode_handler packet_handler; 598 unsigned long flags; 599 u32 qp_num; 600 int lnh; 601 u8 opcode; 602 u16 lid; 603 604 /* Check for GRH */ 605 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 606 if (lnh == HFI1_LRH_BTH) { 607 packet->ohdr = &hdr->u.oth; 608 } else if (lnh == HFI1_LRH_GRH) { 609 u32 vtf; 610 611 packet->ohdr = &hdr->u.l.oth; 612 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) 613 goto drop; 614 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); 615 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 616 goto drop; 617 packet->rcv_flags |= HFI1_HAS_GRH; 618 } else { 619 goto drop; 620 } 621 622 trace_input_ibhdr(rcd->dd, hdr); 623 624 opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24); 625 inc_opstats(tlen, &rcd->opstats->stats[opcode]); 626 627 /* Get the destination QP number. */ 628 qp_num = be32_to_cpu(packet->ohdr->bth[1]) & RVT_QPN_MASK; 629 lid = be16_to_cpu(hdr->lrh[1]); 630 if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && 631 (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) { 632 struct rvt_mcast *mcast; 633 struct rvt_mcast_qp *p; 634 635 if (lnh != HFI1_LRH_GRH) 636 goto drop; 637 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid); 638 if (!mcast) 639 goto drop; 640 list_for_each_entry_rcu(p, &mcast->qp_list, list) { 641 packet->qp = p->qp; 642 spin_lock_irqsave(&packet->qp->r_lock, flags); 643 packet_handler = qp_ok(opcode, packet); 644 if (likely(packet_handler)) 645 packet_handler(packet); 646 else 647 ibp->rvp.n_pkt_drops++; 648 spin_unlock_irqrestore(&packet->qp->r_lock, flags); 649 } 650 /* 651 * Notify rvt_multicast_detach() if it is waiting for us 652 * to finish. 653 */ 654 if (atomic_dec_return(&mcast->refcount) <= 1) 655 wake_up(&mcast->wait); 656 } else { 657 rcu_read_lock(); 658 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); 659 if (!packet->qp) { 660 rcu_read_unlock(); 661 goto drop; 662 } 663 spin_lock_irqsave(&packet->qp->r_lock, flags); 664 packet_handler = qp_ok(opcode, packet); 665 if (likely(packet_handler)) 666 packet_handler(packet); 667 else 668 ibp->rvp.n_pkt_drops++; 669 spin_unlock_irqrestore(&packet->qp->r_lock, flags); 670 rcu_read_unlock(); 671 } 672 return; 673 674 drop: 675 ibp->rvp.n_pkt_drops++; 676 } 677 678 /* 679 * This is called from a timer to check for QPs 680 * which need kernel memory in order to send a packet. 681 */ 682 static void mem_timer(unsigned long data) 683 { 684 struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data; 685 struct list_head *list = &dev->memwait; 686 struct rvt_qp *qp = NULL; 687 struct iowait *wait; 688 unsigned long flags; 689 struct hfi1_qp_priv *priv; 690 691 write_seqlock_irqsave(&dev->iowait_lock, flags); 692 if (!list_empty(list)) { 693 wait = list_first_entry(list, struct iowait, list); 694 qp = iowait_to_qp(wait); 695 priv = qp->priv; 696 list_del_init(&priv->s_iowait.list); 697 /* refcount held until actual wake up */ 698 if (!list_empty(list)) 699 mod_timer(&dev->mem_timer, jiffies + 1); 700 } 701 write_sequnlock_irqrestore(&dev->iowait_lock, flags); 702 703 if (qp) 704 hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM); 705 } 706 707 void update_sge(struct rvt_sge_state *ss, u32 length) 708 { 709 struct rvt_sge *sge = &ss->sge; 710 711 sge->vaddr += length; 712 sge->length -= length; 713 sge->sge_length -= length; 714 if (sge->sge_length == 0) { 715 if (--ss->num_sge) 716 *sge = *ss->sg_list++; 717 } else if (sge->length == 0 && sge->mr->lkey) { 718 if (++sge->n >= RVT_SEGSZ) { 719 if (++sge->m >= sge->mr->mapsz) 720 return; 721 sge->n = 0; 722 } 723 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; 724 sge->length = sge->mr->map[sge->m]->segs[sge->n].length; 725 } 726 } 727 728 /* 729 * This is called with progress side lock held. 730 */ 731 /* New API */ 732 static void verbs_sdma_complete( 733 struct sdma_txreq *cookie, 734 int status) 735 { 736 struct verbs_txreq *tx = 737 container_of(cookie, struct verbs_txreq, txreq); 738 struct rvt_qp *qp = tx->qp; 739 740 spin_lock(&qp->s_lock); 741 if (tx->wqe) { 742 hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS); 743 } else if (qp->ibqp.qp_type == IB_QPT_RC) { 744 struct ib_header *hdr; 745 746 hdr = &tx->phdr.hdr; 747 hfi1_rc_send_complete(qp, hdr); 748 } 749 spin_unlock(&qp->s_lock); 750 751 hfi1_put_txreq(tx); 752 } 753 754 static int wait_kmem(struct hfi1_ibdev *dev, 755 struct rvt_qp *qp, 756 struct hfi1_pkt_state *ps) 757 { 758 struct hfi1_qp_priv *priv = qp->priv; 759 unsigned long flags; 760 int ret = 0; 761 762 spin_lock_irqsave(&qp->s_lock, flags); 763 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 764 write_seqlock(&dev->iowait_lock); 765 list_add_tail(&ps->s_txreq->txreq.list, 766 &priv->s_iowait.tx_head); 767 if (list_empty(&priv->s_iowait.list)) { 768 if (list_empty(&dev->memwait)) 769 mod_timer(&dev->mem_timer, jiffies + 1); 770 qp->s_flags |= RVT_S_WAIT_KMEM; 771 list_add_tail(&priv->s_iowait.list, &dev->memwait); 772 trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM); 773 rvt_get_qp(qp); 774 } 775 write_sequnlock(&dev->iowait_lock); 776 qp->s_flags &= ~RVT_S_BUSY; 777 ret = -EBUSY; 778 } 779 spin_unlock_irqrestore(&qp->s_lock, flags); 780 781 return ret; 782 } 783 784 /* 785 * This routine calls txadds for each sg entry. 786 * 787 * Add failures will revert the sge cursor 788 */ 789 static noinline int build_verbs_ulp_payload( 790 struct sdma_engine *sde, 791 struct rvt_sge_state *ss, 792 u32 length, 793 struct verbs_txreq *tx) 794 { 795 struct rvt_sge *sg_list = ss->sg_list; 796 struct rvt_sge sge = ss->sge; 797 u8 num_sge = ss->num_sge; 798 u32 len; 799 int ret = 0; 800 801 while (length) { 802 len = ss->sge.length; 803 if (len > length) 804 len = length; 805 if (len > ss->sge.sge_length) 806 len = ss->sge.sge_length; 807 WARN_ON_ONCE(len == 0); 808 ret = sdma_txadd_kvaddr( 809 sde->dd, 810 &tx->txreq, 811 ss->sge.vaddr, 812 len); 813 if (ret) 814 goto bail_txadd; 815 update_sge(ss, len); 816 length -= len; 817 } 818 return ret; 819 bail_txadd: 820 /* unwind cursor */ 821 ss->sge = sge; 822 ss->num_sge = num_sge; 823 ss->sg_list = sg_list; 824 return ret; 825 } 826 827 /* 828 * Build the number of DMA descriptors needed to send length bytes of data. 829 * 830 * NOTE: DMA mapping is held in the tx until completed in the ring or 831 * the tx desc is freed without having been submitted to the ring 832 * 833 * This routine ensures all the helper routine calls succeed. 834 */ 835 /* New API */ 836 static int build_verbs_tx_desc( 837 struct sdma_engine *sde, 838 struct rvt_sge_state *ss, 839 u32 length, 840 struct verbs_txreq *tx, 841 struct hfi1_ahg_info *ahg_info, 842 u64 pbc) 843 { 844 int ret = 0; 845 struct hfi1_sdma_header *phdr = &tx->phdr; 846 u16 hdrbytes = tx->hdr_dwords << 2; 847 848 if (!ahg_info->ahgcount) { 849 ret = sdma_txinit_ahg( 850 &tx->txreq, 851 ahg_info->tx_flags, 852 hdrbytes + length, 853 ahg_info->ahgidx, 854 0, 855 NULL, 856 0, 857 verbs_sdma_complete); 858 if (ret) 859 goto bail_txadd; 860 phdr->pbc = cpu_to_le64(pbc); 861 ret = sdma_txadd_kvaddr( 862 sde->dd, 863 &tx->txreq, 864 phdr, 865 hdrbytes); 866 if (ret) 867 goto bail_txadd; 868 } else { 869 ret = sdma_txinit_ahg( 870 &tx->txreq, 871 ahg_info->tx_flags, 872 length, 873 ahg_info->ahgidx, 874 ahg_info->ahgcount, 875 ahg_info->ahgdesc, 876 hdrbytes, 877 verbs_sdma_complete); 878 if (ret) 879 goto bail_txadd; 880 } 881 882 /* add the ulp payload - if any. ss can be NULL for acks */ 883 if (ss) 884 ret = build_verbs_ulp_payload(sde, ss, length, tx); 885 bail_txadd: 886 return ret; 887 } 888 889 int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, 890 u64 pbc) 891 { 892 struct hfi1_qp_priv *priv = qp->priv; 893 struct hfi1_ahg_info *ahg_info = priv->s_ahg; 894 u32 hdrwords = qp->s_hdrwords; 895 struct rvt_sge_state *ss = qp->s_cur_sge; 896 u32 len = qp->s_cur_size; 897 u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */ 898 struct hfi1_ibdev *dev = ps->dev; 899 struct hfi1_pportdata *ppd = ps->ppd; 900 struct verbs_txreq *tx; 901 u64 pbc_flags = 0; 902 u8 sc5 = priv->s_sc; 903 904 int ret; 905 906 tx = ps->s_txreq; 907 if (!sdma_txreq_built(&tx->txreq)) { 908 if (likely(pbc == 0)) { 909 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); 910 /* No vl15 here */ 911 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ 912 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT; 913 914 pbc = create_pbc(ppd, 915 pbc_flags, 916 qp->srate_mbps, 917 vl, 918 plen); 919 } 920 tx->wqe = qp->s_wqe; 921 ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahg_info, pbc); 922 if (unlikely(ret)) 923 goto bail_build; 924 } 925 ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq); 926 if (unlikely(ret < 0)) { 927 if (ret == -ECOMM) 928 goto bail_ecomm; 929 return ret; 930 } 931 trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device), 932 &ps->s_txreq->phdr.hdr); 933 return ret; 934 935 bail_ecomm: 936 /* The current one got "sent" */ 937 return 0; 938 bail_build: 939 ret = wait_kmem(dev, qp, ps); 940 if (!ret) { 941 /* free txreq - bad state */ 942 hfi1_put_txreq(ps->s_txreq); 943 ps->s_txreq = NULL; 944 } 945 return ret; 946 } 947 948 /* 949 * If we are now in the error state, return zero to flush the 950 * send work request. 951 */ 952 static int pio_wait(struct rvt_qp *qp, 953 struct send_context *sc, 954 struct hfi1_pkt_state *ps, 955 u32 flag) 956 { 957 struct hfi1_qp_priv *priv = qp->priv; 958 struct hfi1_devdata *dd = sc->dd; 959 struct hfi1_ibdev *dev = &dd->verbs_dev; 960 unsigned long flags; 961 int ret = 0; 962 963 /* 964 * Note that as soon as want_buffer() is called and 965 * possibly before it returns, sc_piobufavail() 966 * could be called. Therefore, put QP on the I/O wait list before 967 * enabling the PIO avail interrupt. 968 */ 969 spin_lock_irqsave(&qp->s_lock, flags); 970 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 971 write_seqlock(&dev->iowait_lock); 972 list_add_tail(&ps->s_txreq->txreq.list, 973 &priv->s_iowait.tx_head); 974 if (list_empty(&priv->s_iowait.list)) { 975 struct hfi1_ibdev *dev = &dd->verbs_dev; 976 int was_empty; 977 978 dev->n_piowait += !!(flag & RVT_S_WAIT_PIO); 979 dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN); 980 qp->s_flags |= flag; 981 was_empty = list_empty(&sc->piowait); 982 list_add_tail(&priv->s_iowait.list, &sc->piowait); 983 trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO); 984 rvt_get_qp(qp); 985 /* counting: only call wantpiobuf_intr if first user */ 986 if (was_empty) 987 hfi1_sc_wantpiobuf_intr(sc, 1); 988 } 989 write_sequnlock(&dev->iowait_lock); 990 qp->s_flags &= ~RVT_S_BUSY; 991 ret = -EBUSY; 992 } 993 spin_unlock_irqrestore(&qp->s_lock, flags); 994 return ret; 995 } 996 997 static void verbs_pio_complete(void *arg, int code) 998 { 999 struct rvt_qp *qp = (struct rvt_qp *)arg; 1000 struct hfi1_qp_priv *priv = qp->priv; 1001 1002 if (iowait_pio_dec(&priv->s_iowait)) 1003 iowait_drain_wakeup(&priv->s_iowait); 1004 } 1005 1006 int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, 1007 u64 pbc) 1008 { 1009 struct hfi1_qp_priv *priv = qp->priv; 1010 u32 hdrwords = qp->s_hdrwords; 1011 struct rvt_sge_state *ss = qp->s_cur_sge; 1012 u32 len = qp->s_cur_size; 1013 u32 dwords = (len + 3) >> 2; 1014 u32 plen = hdrwords + dwords + 2; /* includes pbc */ 1015 struct hfi1_pportdata *ppd = ps->ppd; 1016 u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr; 1017 u64 pbc_flags = 0; 1018 u8 sc5; 1019 unsigned long flags = 0; 1020 struct send_context *sc; 1021 struct pio_buf *pbuf; 1022 int wc_status = IB_WC_SUCCESS; 1023 int ret = 0; 1024 pio_release_cb cb = NULL; 1025 1026 /* only RC/UC use complete */ 1027 switch (qp->ibqp.qp_type) { 1028 case IB_QPT_RC: 1029 case IB_QPT_UC: 1030 cb = verbs_pio_complete; 1031 break; 1032 default: 1033 break; 1034 } 1035 1036 /* vl15 special case taken care of in ud.c */ 1037 sc5 = priv->s_sc; 1038 sc = ps->s_txreq->psc; 1039 1040 if (likely(pbc == 0)) { 1041 u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); 1042 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ 1043 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT; 1044 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); 1045 } 1046 if (cb) 1047 iowait_pio_inc(&priv->s_iowait); 1048 pbuf = sc_buffer_alloc(sc, plen, cb, qp); 1049 if (unlikely(!pbuf)) { 1050 if (cb) 1051 verbs_pio_complete(qp, 0); 1052 if (ppd->host_link_state != HLS_UP_ACTIVE) { 1053 /* 1054 * If we have filled the PIO buffers to capacity and are 1055 * not in an active state this request is not going to 1056 * go out to so just complete it with an error or else a 1057 * ULP or the core may be stuck waiting. 1058 */ 1059 hfi1_cdbg( 1060 PIO, 1061 "alloc failed. state not active, completing"); 1062 wc_status = IB_WC_GENERAL_ERR; 1063 goto pio_bail; 1064 } else { 1065 /* 1066 * This is a normal occurrence. The PIO buffs are full 1067 * up but we are still happily sending, well we could be 1068 * so lets continue to queue the request. 1069 */ 1070 hfi1_cdbg(PIO, "alloc failed. state active, queuing"); 1071 ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO); 1072 if (!ret) 1073 /* txreq not queued - free */ 1074 goto bail; 1075 /* tx consumed in wait */ 1076 return ret; 1077 } 1078 } 1079 1080 if (len == 0) { 1081 pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords); 1082 } else { 1083 if (ss) { 1084 seg_pio_copy_start(pbuf, pbc, hdr, hdrwords * 4); 1085 while (len) { 1086 void *addr = ss->sge.vaddr; 1087 u32 slen = ss->sge.length; 1088 1089 if (slen > len) 1090 slen = len; 1091 update_sge(ss, slen); 1092 seg_pio_copy_mid(pbuf, addr, slen); 1093 len -= slen; 1094 } 1095 seg_pio_copy_end(pbuf); 1096 } 1097 } 1098 1099 trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device), 1100 &ps->s_txreq->phdr.hdr); 1101 1102 pio_bail: 1103 if (qp->s_wqe) { 1104 spin_lock_irqsave(&qp->s_lock, flags); 1105 hfi1_send_complete(qp, qp->s_wqe, wc_status); 1106 spin_unlock_irqrestore(&qp->s_lock, flags); 1107 } else if (qp->ibqp.qp_type == IB_QPT_RC) { 1108 spin_lock_irqsave(&qp->s_lock, flags); 1109 hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr); 1110 spin_unlock_irqrestore(&qp->s_lock, flags); 1111 } 1112 1113 ret = 0; 1114 1115 bail: 1116 hfi1_put_txreq(ps->s_txreq); 1117 return ret; 1118 } 1119 1120 /* 1121 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent 1122 * being an entry from the partition key table), return 0 1123 * otherwise. Use the matching criteria for egress partition keys 1124 * specified in the OPAv1 spec., section 9.1l.7. 1125 */ 1126 static inline int egress_pkey_matches_entry(u16 pkey, u16 ent) 1127 { 1128 u16 mkey = pkey & PKEY_LOW_15_MASK; 1129 u16 mentry = ent & PKEY_LOW_15_MASK; 1130 1131 if (mkey == mentry) { 1132 /* 1133 * If pkey[15] is set (full partition member), 1134 * is bit 15 in the corresponding table element 1135 * clear (limited member)? 1136 */ 1137 if (pkey & PKEY_MEMBER_MASK) 1138 return !!(ent & PKEY_MEMBER_MASK); 1139 return 1; 1140 } 1141 return 0; 1142 } 1143 1144 /** 1145 * egress_pkey_check - check P_KEY of a packet 1146 * @ppd: Physical IB port data 1147 * @lrh: Local route header 1148 * @bth: Base transport header 1149 * @sc5: SC for packet 1150 * @s_pkey_index: It will be used for look up optimization for kernel contexts 1151 * only. If it is negative value, then it means user contexts is calling this 1152 * function. 1153 * 1154 * It checks if hdr's pkey is valid. 1155 * 1156 * Return: 0 on success, otherwise, 1 1157 */ 1158 int egress_pkey_check(struct hfi1_pportdata *ppd, __be16 *lrh, __be32 *bth, 1159 u8 sc5, int8_t s_pkey_index) 1160 { 1161 struct hfi1_devdata *dd; 1162 int i; 1163 u16 pkey; 1164 int is_user_ctxt_mechanism = (s_pkey_index < 0); 1165 1166 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT)) 1167 return 0; 1168 1169 pkey = (u16)be32_to_cpu(bth[0]); 1170 1171 /* If SC15, pkey[0:14] must be 0x7fff */ 1172 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK)) 1173 goto bad; 1174 1175 /* Is the pkey = 0x0, or 0x8000? */ 1176 if ((pkey & PKEY_LOW_15_MASK) == 0) 1177 goto bad; 1178 1179 /* 1180 * For the kernel contexts only, if a qp is passed into the function, 1181 * the most likely matching pkey has index qp->s_pkey_index 1182 */ 1183 if (!is_user_ctxt_mechanism && 1184 egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) { 1185 return 0; 1186 } 1187 1188 for (i = 0; i < MAX_PKEY_VALUES; i++) { 1189 if (egress_pkey_matches_entry(pkey, ppd->pkeys[i])) 1190 return 0; 1191 } 1192 bad: 1193 /* 1194 * For the user-context mechanism, the P_KEY check would only happen 1195 * once per SDMA request, not once per packet. Therefore, there's no 1196 * need to increment the counter for the user-context mechanism. 1197 */ 1198 if (!is_user_ctxt_mechanism) { 1199 incr_cntr64(&ppd->port_xmit_constraint_errors); 1200 dd = ppd->dd; 1201 if (!(dd->err_info_xmit_constraint.status & 1202 OPA_EI_STATUS_SMASK)) { 1203 u16 slid = be16_to_cpu(lrh[3]); 1204 1205 dd->err_info_xmit_constraint.status |= 1206 OPA_EI_STATUS_SMASK; 1207 dd->err_info_xmit_constraint.slid = slid; 1208 dd->err_info_xmit_constraint.pkey = pkey; 1209 } 1210 } 1211 return 1; 1212 } 1213 1214 /** 1215 * get_send_routine - choose an egress routine 1216 * 1217 * Choose an egress routine based on QP type 1218 * and size 1219 */ 1220 static inline send_routine get_send_routine(struct rvt_qp *qp, 1221 struct verbs_txreq *tx) 1222 { 1223 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1224 struct hfi1_qp_priv *priv = qp->priv; 1225 struct ib_header *h = &tx->phdr.hdr; 1226 1227 if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA))) 1228 return dd->process_pio_send; 1229 switch (qp->ibqp.qp_type) { 1230 case IB_QPT_SMI: 1231 return dd->process_pio_send; 1232 case IB_QPT_GSI: 1233 case IB_QPT_UD: 1234 break; 1235 case IB_QPT_UC: 1236 case IB_QPT_RC: { 1237 u8 op = get_opcode(h); 1238 1239 if (piothreshold && 1240 qp->s_cur_size <= min(piothreshold, qp->pmtu) && 1241 (BIT(op & OPMASK) & pio_opmask[op >> 5]) && 1242 iowait_sdma_pending(&priv->s_iowait) == 0 && 1243 !sdma_txreq_built(&tx->txreq)) 1244 return dd->process_pio_send; 1245 break; 1246 } 1247 default: 1248 break; 1249 } 1250 return dd->process_dma_send; 1251 } 1252 1253 /** 1254 * hfi1_verbs_send - send a packet 1255 * @qp: the QP to send on 1256 * @ps: the state of the packet to send 1257 * 1258 * Return zero if packet is sent or queued OK. 1259 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise. 1260 */ 1261 int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) 1262 { 1263 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1264 struct hfi1_qp_priv *priv = qp->priv; 1265 struct ib_other_headers *ohdr; 1266 struct ib_header *hdr; 1267 send_routine sr; 1268 int ret; 1269 u8 lnh; 1270 1271 hdr = &ps->s_txreq->phdr.hdr; 1272 /* locate the pkey within the headers */ 1273 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 1274 if (lnh == HFI1_LRH_GRH) 1275 ohdr = &hdr->u.l.oth; 1276 else 1277 ohdr = &hdr->u.oth; 1278 1279 sr = get_send_routine(qp, ps->s_txreq); 1280 ret = egress_pkey_check(dd->pport, 1281 hdr->lrh, 1282 ohdr->bth, 1283 priv->s_sc, 1284 qp->s_pkey_index); 1285 if (unlikely(ret)) { 1286 /* 1287 * The value we are returning here does not get propagated to 1288 * the verbs caller. Thus we need to complete the request with 1289 * error otherwise the caller could be sitting waiting on the 1290 * completion event. Only do this for PIO. SDMA has its own 1291 * mechanism for handling the errors. So for SDMA we can just 1292 * return. 1293 */ 1294 if (sr == dd->process_pio_send) { 1295 unsigned long flags; 1296 1297 hfi1_cdbg(PIO, "%s() Failed. Completing with err", 1298 __func__); 1299 spin_lock_irqsave(&qp->s_lock, flags); 1300 hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); 1301 spin_unlock_irqrestore(&qp->s_lock, flags); 1302 } 1303 return -EINVAL; 1304 } 1305 if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait)) 1306 return pio_wait(qp, 1307 ps->s_txreq->psc, 1308 ps, 1309 RVT_S_WAIT_PIO_DRAIN); 1310 return sr(qp, ps, 0); 1311 } 1312 1313 /** 1314 * hfi1_fill_device_attr - Fill in rvt dev info device attributes. 1315 * @dd: the device data structure 1316 */ 1317 static void hfi1_fill_device_attr(struct hfi1_devdata *dd) 1318 { 1319 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; 1320 u16 ver = dd->dc8051_ver; 1321 1322 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props)); 1323 1324 rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 16) | 1325 (u64)dc8051_ver_min(ver); 1326 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | 1327 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 1328 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | 1329 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE | 1330 IB_DEVICE_MEM_MGT_EXTENSIONS; 1331 rdi->dparms.props.page_size_cap = PAGE_SIZE; 1332 rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3; 1333 rdi->dparms.props.vendor_part_id = dd->pcidev->device; 1334 rdi->dparms.props.hw_ver = dd->minrev; 1335 rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid; 1336 rdi->dparms.props.max_mr_size = U64_MAX; 1337 rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX; 1338 rdi->dparms.props.max_qp = hfi1_max_qps; 1339 rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs; 1340 rdi->dparms.props.max_sge = hfi1_max_sges; 1341 rdi->dparms.props.max_sge_rd = hfi1_max_sges; 1342 rdi->dparms.props.max_cq = hfi1_max_cqs; 1343 rdi->dparms.props.max_ah = hfi1_max_ahs; 1344 rdi->dparms.props.max_cqe = hfi1_max_cqes; 1345 rdi->dparms.props.max_mr = rdi->lkey_table.max; 1346 rdi->dparms.props.max_fmr = rdi->lkey_table.max; 1347 rdi->dparms.props.max_map_per_fmr = 32767; 1348 rdi->dparms.props.max_pd = hfi1_max_pds; 1349 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; 1350 rdi->dparms.props.max_qp_init_rd_atom = 255; 1351 rdi->dparms.props.max_srq = hfi1_max_srqs; 1352 rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs; 1353 rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges; 1354 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB; 1355 rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd); 1356 rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps; 1357 rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached; 1358 rdi->dparms.props.max_total_mcast_qp_attach = 1359 rdi->dparms.props.max_mcast_qp_attach * 1360 rdi->dparms.props.max_mcast_grp; 1361 } 1362 1363 static inline u16 opa_speed_to_ib(u16 in) 1364 { 1365 u16 out = 0; 1366 1367 if (in & OPA_LINK_SPEED_25G) 1368 out |= IB_SPEED_EDR; 1369 if (in & OPA_LINK_SPEED_12_5G) 1370 out |= IB_SPEED_FDR; 1371 1372 return out; 1373 } 1374 1375 /* 1376 * Convert a single OPA link width (no multiple flags) to an IB value. 1377 * A zero OPA link width means link down, which means the IB width value 1378 * is a don't care. 1379 */ 1380 static inline u16 opa_width_to_ib(u16 in) 1381 { 1382 switch (in) { 1383 case OPA_LINK_WIDTH_1X: 1384 /* map 2x and 3x to 1x as they don't exist in IB */ 1385 case OPA_LINK_WIDTH_2X: 1386 case OPA_LINK_WIDTH_3X: 1387 return IB_WIDTH_1X; 1388 default: /* link down or unknown, return our largest width */ 1389 case OPA_LINK_WIDTH_4X: 1390 return IB_WIDTH_4X; 1391 } 1392 } 1393 1394 static int query_port(struct rvt_dev_info *rdi, u8 port_num, 1395 struct ib_port_attr *props) 1396 { 1397 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); 1398 struct hfi1_devdata *dd = dd_from_dev(verbs_dev); 1399 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; 1400 u16 lid = ppd->lid; 1401 1402 props->lid = lid ? lid : 0; 1403 props->lmc = ppd->lmc; 1404 /* OPA logical states match IB logical states */ 1405 props->state = driver_lstate(ppd); 1406 props->phys_state = hfi1_ibphys_portstate(ppd); 1407 props->gid_tbl_len = HFI1_GUIDS_PER_PORT; 1408 props->active_width = (u8)opa_width_to_ib(ppd->link_width_active); 1409 /* see rate_show() in ib core/sysfs.c */ 1410 props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active); 1411 props->max_vl_num = ppd->vls_supported; 1412 1413 /* Once we are a "first class" citizen and have added the OPA MTUs to 1414 * the core we can advertise the larger MTU enum to the ULPs, for now 1415 * advertise only 4K. 1416 * 1417 * Those applications which are either OPA aware or pass the MTU enum 1418 * from the Path Records to us will get the new 8k MTU. Those that 1419 * attempt to process the MTU enum may fail in various ways. 1420 */ 1421 props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ? 1422 4096 : hfi1_max_mtu), IB_MTU_4096); 1423 props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu : 1424 mtu_to_enum(ppd->ibmtu, IB_MTU_2048); 1425 1426 return 0; 1427 } 1428 1429 static int modify_device(struct ib_device *device, 1430 int device_modify_mask, 1431 struct ib_device_modify *device_modify) 1432 { 1433 struct hfi1_devdata *dd = dd_from_ibdev(device); 1434 unsigned i; 1435 int ret; 1436 1437 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | 1438 IB_DEVICE_MODIFY_NODE_DESC)) { 1439 ret = -EOPNOTSUPP; 1440 goto bail; 1441 } 1442 1443 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) { 1444 memcpy(device->node_desc, device_modify->node_desc, 1445 IB_DEVICE_NODE_DESC_MAX); 1446 for (i = 0; i < dd->num_pports; i++) { 1447 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data; 1448 1449 hfi1_node_desc_chg(ibp); 1450 } 1451 } 1452 1453 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) { 1454 ib_hfi1_sys_image_guid = 1455 cpu_to_be64(device_modify->sys_image_guid); 1456 for (i = 0; i < dd->num_pports; i++) { 1457 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data; 1458 1459 hfi1_sys_guid_chg(ibp); 1460 } 1461 } 1462 1463 ret = 0; 1464 1465 bail: 1466 return ret; 1467 } 1468 1469 static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num) 1470 { 1471 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); 1472 struct hfi1_devdata *dd = dd_from_dev(verbs_dev); 1473 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; 1474 int ret; 1475 1476 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0, 1477 OPA_LINKDOWN_REASON_UNKNOWN); 1478 ret = set_link_state(ppd, HLS_DN_DOWNDEF); 1479 return ret; 1480 } 1481 1482 static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp, 1483 int guid_index, __be64 *guid) 1484 { 1485 struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp); 1486 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1487 1488 if (guid_index == 0) 1489 *guid = cpu_to_be64(ppd->guid); 1490 else if (guid_index < HFI1_GUIDS_PER_PORT) 1491 *guid = ibp->guids[guid_index - 1]; 1492 else 1493 return -EINVAL; 1494 1495 return 0; 1496 } 1497 1498 /* 1499 * convert ah port,sl to sc 1500 */ 1501 u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah) 1502 { 1503 struct hfi1_ibport *ibp = to_iport(ibdev, ah->port_num); 1504 1505 return ibp->sl_to_sc[ah->sl]; 1506 } 1507 1508 static int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) 1509 { 1510 struct hfi1_ibport *ibp; 1511 struct hfi1_pportdata *ppd; 1512 struct hfi1_devdata *dd; 1513 u8 sc5; 1514 1515 /* test the mapping for validity */ 1516 ibp = to_iport(ibdev, ah_attr->port_num); 1517 ppd = ppd_from_ibp(ibp); 1518 sc5 = ibp->sl_to_sc[ah_attr->sl]; 1519 dd = dd_from_ppd(ppd); 1520 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) 1521 return -EINVAL; 1522 return 0; 1523 } 1524 1525 static void hfi1_notify_new_ah(struct ib_device *ibdev, 1526 struct ib_ah_attr *ah_attr, 1527 struct rvt_ah *ah) 1528 { 1529 struct hfi1_ibport *ibp; 1530 struct hfi1_pportdata *ppd; 1531 struct hfi1_devdata *dd; 1532 u8 sc5; 1533 1534 /* 1535 * Do not trust reading anything from rvt_ah at this point as it is not 1536 * done being setup. We can however modify things which we need to set. 1537 */ 1538 1539 ibp = to_iport(ibdev, ah_attr->port_num); 1540 ppd = ppd_from_ibp(ibp); 1541 sc5 = ibp->sl_to_sc[ah->attr.sl]; 1542 dd = dd_from_ppd(ppd); 1543 ah->vl = sc_to_vlt(dd, sc5); 1544 if (ah->vl < num_vls || ah->vl == 15) 1545 ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu); 1546 } 1547 1548 struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid) 1549 { 1550 struct ib_ah_attr attr; 1551 struct ib_ah *ah = ERR_PTR(-EINVAL); 1552 struct rvt_qp *qp0; 1553 1554 memset(&attr, 0, sizeof(attr)); 1555 attr.dlid = dlid; 1556 attr.port_num = ppd_from_ibp(ibp)->port; 1557 rcu_read_lock(); 1558 qp0 = rcu_dereference(ibp->rvp.qp[0]); 1559 if (qp0) 1560 ah = ib_create_ah(qp0->ibqp.pd, &attr); 1561 rcu_read_unlock(); 1562 return ah; 1563 } 1564 1565 /** 1566 * hfi1_get_npkeys - return the size of the PKEY table for context 0 1567 * @dd: the hfi1_ib device 1568 */ 1569 unsigned hfi1_get_npkeys(struct hfi1_devdata *dd) 1570 { 1571 return ARRAY_SIZE(dd->pport[0].pkeys); 1572 } 1573 1574 static void init_ibport(struct hfi1_pportdata *ppd) 1575 { 1576 struct hfi1_ibport *ibp = &ppd->ibport_data; 1577 size_t sz = ARRAY_SIZE(ibp->sl_to_sc); 1578 int i; 1579 1580 for (i = 0; i < sz; i++) { 1581 ibp->sl_to_sc[i] = i; 1582 ibp->sc_to_sl[i] = i; 1583 } 1584 1585 spin_lock_init(&ibp->rvp.lock); 1586 /* Set the prefix to the default value (see ch. 4.1.1) */ 1587 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX; 1588 ibp->rvp.sm_lid = 0; 1589 /* Below should only set bits defined in OPA PortInfo.CapabilityMask */ 1590 ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP | 1591 IB_PORT_CAP_MASK_NOTICE_SUP; 1592 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; 1593 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; 1594 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; 1595 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; 1596 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; 1597 1598 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); 1599 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); 1600 } 1601 1602 static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str, 1603 size_t str_len) 1604 { 1605 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 1606 struct hfi1_ibdev *dev = dev_from_rdi(rdi); 1607 u16 ver = dd_from_dev(dev)->dc8051_ver; 1608 1609 snprintf(str, str_len, "%u.%u", dc8051_ver_maj(ver), 1610 dc8051_ver_min(ver)); 1611 } 1612 1613 /** 1614 * hfi1_register_ib_device - register our device with the infiniband core 1615 * @dd: the device data structure 1616 * Return 0 if successful, errno if unsuccessful. 1617 */ 1618 int hfi1_register_ib_device(struct hfi1_devdata *dd) 1619 { 1620 struct hfi1_ibdev *dev = &dd->verbs_dev; 1621 struct ib_device *ibdev = &dev->rdi.ibdev; 1622 struct hfi1_pportdata *ppd = dd->pport; 1623 unsigned i; 1624 int ret; 1625 size_t lcpysz = IB_DEVICE_NAME_MAX; 1626 1627 for (i = 0; i < dd->num_pports; i++) 1628 init_ibport(ppd + i); 1629 1630 /* Only need to initialize non-zero fields. */ 1631 1632 setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev); 1633 1634 seqlock_init(&dev->iowait_lock); 1635 INIT_LIST_HEAD(&dev->txwait); 1636 INIT_LIST_HEAD(&dev->memwait); 1637 1638 ret = verbs_txreq_init(dev); 1639 if (ret) 1640 goto err_verbs_txreq; 1641 1642 /* 1643 * The system image GUID is supposed to be the same for all 1644 * HFIs in a single system but since there can be other 1645 * device types in the system, we can't be sure this is unique. 1646 */ 1647 if (!ib_hfi1_sys_image_guid) 1648 ib_hfi1_sys_image_guid = cpu_to_be64(ppd->guid); 1649 lcpysz = strlcpy(ibdev->name, class_name(), lcpysz); 1650 strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz); 1651 ibdev->owner = THIS_MODULE; 1652 ibdev->node_guid = cpu_to_be64(ppd->guid); 1653 ibdev->phys_port_cnt = dd->num_pports; 1654 ibdev->dma_device = &dd->pcidev->dev; 1655 ibdev->modify_device = modify_device; 1656 1657 /* keep process mad in the driver */ 1658 ibdev->process_mad = hfi1_process_mad; 1659 ibdev->get_dev_fw_str = hfi1_get_dev_fw_str; 1660 1661 strncpy(ibdev->node_desc, init_utsname()->nodename, 1662 sizeof(ibdev->node_desc)); 1663 1664 /* 1665 * Fill in rvt info object. 1666 */ 1667 dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files; 1668 dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name; 1669 dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev; 1670 dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah; 1671 dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah; 1672 dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be; 1673 dd->verbs_dev.rdi.driver_f.query_port_state = query_port; 1674 dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port; 1675 dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg; 1676 /* 1677 * Fill in rvt info device attributes. 1678 */ 1679 hfi1_fill_device_attr(dd); 1680 1681 /* queue pair */ 1682 dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size; 1683 dd->verbs_dev.rdi.dparms.qpn_start = 0; 1684 dd->verbs_dev.rdi.dparms.qpn_inc = 1; 1685 dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift; 1686 dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16; 1687 dd->verbs_dev.rdi.dparms.qpn_res_end = 1688 dd->verbs_dev.rdi.dparms.qpn_res_start + 65535; 1689 dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC; 1690 dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK; 1691 dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT; 1692 dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK; 1693 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA; 1694 dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE; 1695 1696 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc; 1697 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; 1698 dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; 1699 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; 1700 dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send; 1701 dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send; 1702 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send; 1703 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr; 1704 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp; 1705 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters; 1706 dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue; 1707 dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp; 1708 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp; 1709 dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp; 1710 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu; 1711 dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp; 1712 dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp; 1713 dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe; 1714 1715 /* completeion queue */ 1716 snprintf(dd->verbs_dev.rdi.dparms.cq_name, 1717 sizeof(dd->verbs_dev.rdi.dparms.cq_name), 1718 "hfi1_cq%d", dd->unit); 1719 dd->verbs_dev.rdi.dparms.node = dd->node; 1720 1721 /* misc settings */ 1722 dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */ 1723 dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size; 1724 dd->verbs_dev.rdi.dparms.nports = dd->num_pports; 1725 dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd); 1726 1727 /* post send table */ 1728 dd->verbs_dev.rdi.post_parms = hfi1_post_parms; 1729 1730 ppd = dd->pport; 1731 for (i = 0; i < dd->num_pports; i++, ppd++) 1732 rvt_init_port(&dd->verbs_dev.rdi, 1733 &ppd->ibport_data.rvp, 1734 i, 1735 ppd->pkeys); 1736 1737 ret = rvt_register_device(&dd->verbs_dev.rdi); 1738 if (ret) 1739 goto err_verbs_txreq; 1740 1741 ret = hfi1_verbs_register_sysfs(dd); 1742 if (ret) 1743 goto err_class; 1744 1745 return ret; 1746 1747 err_class: 1748 rvt_unregister_device(&dd->verbs_dev.rdi); 1749 err_verbs_txreq: 1750 verbs_txreq_exit(dev); 1751 dd_dev_err(dd, "cannot register verbs: %d!\n", -ret); 1752 return ret; 1753 } 1754 1755 void hfi1_unregister_ib_device(struct hfi1_devdata *dd) 1756 { 1757 struct hfi1_ibdev *dev = &dd->verbs_dev; 1758 1759 hfi1_verbs_unregister_sysfs(dd); 1760 1761 rvt_unregister_device(&dd->verbs_dev.rdi); 1762 1763 if (!list_empty(&dev->txwait)) 1764 dd_dev_err(dd, "txwait list not empty!\n"); 1765 if (!list_empty(&dev->memwait)) 1766 dd_dev_err(dd, "memwait list not empty!\n"); 1767 1768 del_timer_sync(&dev->mem_timer); 1769 verbs_txreq_exit(dev); 1770 } 1771 1772 void hfi1_cnp_rcv(struct hfi1_packet *packet) 1773 { 1774 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data; 1775 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1776 struct ib_header *hdr = packet->hdr; 1777 struct rvt_qp *qp = packet->qp; 1778 u32 lqpn, rqpn = 0; 1779 u16 rlid = 0; 1780 u8 sl, sc5, svc_type; 1781 1782 switch (packet->qp->ibqp.qp_type) { 1783 case IB_QPT_UC: 1784 rlid = qp->remote_ah_attr.dlid; 1785 rqpn = qp->remote_qpn; 1786 svc_type = IB_CC_SVCTYPE_UC; 1787 break; 1788 case IB_QPT_RC: 1789 rlid = qp->remote_ah_attr.dlid; 1790 rqpn = qp->remote_qpn; 1791 svc_type = IB_CC_SVCTYPE_RC; 1792 break; 1793 case IB_QPT_SMI: 1794 case IB_QPT_GSI: 1795 case IB_QPT_UD: 1796 svc_type = IB_CC_SVCTYPE_UD; 1797 break; 1798 default: 1799 ibp->rvp.n_pkt_drops++; 1800 return; 1801 } 1802 1803 sc5 = hdr2sc(hdr, packet->rhf); 1804 sl = ibp->sc_to_sl[sc5]; 1805 lqpn = qp->ibqp.qp_num; 1806 1807 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 1808 } 1809