1 /* 2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. 3 * All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "qib.h" 36 37 /* cut down ridiculously long IB macro names */ 38 #define OP(x) IB_OPCODE_UC_##x 39 40 /** 41 * qib_make_uc_req - construct a request packet (SEND, RDMA write) 42 * @qp: a pointer to the QP 43 * 44 * Return 1 if constructed; otherwise, return 0. 45 */ 46 int qib_make_uc_req(struct qib_qp *qp) 47 { 48 struct qib_other_headers *ohdr; 49 struct qib_swqe *wqe; 50 unsigned long flags; 51 u32 hwords; 52 u32 bth0; 53 u32 len; 54 u32 pmtu = qp->pmtu; 55 int ret = 0; 56 57 spin_lock_irqsave(&qp->s_lock, flags); 58 59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { 60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) 61 goto bail; 62 /* We are in the error state, flush the work request. */ 63 if (qp->s_last == qp->s_head) 64 goto bail; 65 /* If DMAs are in progress, we can't flush immediately. */ 66 if (atomic_read(&qp->s_dma_busy)) { 67 qp->s_flags |= QIB_S_WAIT_DMA; 68 goto bail; 69 } 70 wqe = get_swqe_ptr(qp, qp->s_last); 71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 72 goto done; 73 } 74 75 ohdr = &qp->s_hdr->u.oth; 76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) 77 ohdr = &qp->s_hdr->u.l.oth; 78 79 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 80 hwords = 5; 81 bth0 = 0; 82 83 /* Get the next send request. */ 84 wqe = get_swqe_ptr(qp, qp->s_cur); 85 qp->s_wqe = NULL; 86 switch (qp->s_state) { 87 default: 88 if (!(ib_qib_state_ops[qp->state] & 89 QIB_PROCESS_NEXT_SEND_OK)) 90 goto bail; 91 /* Check if send work queue is empty. */ 92 if (qp->s_cur == qp->s_head) 93 goto bail; 94 /* 95 * Start a new request. 96 */ 97 wqe->psn = qp->s_next_psn; 98 qp->s_psn = qp->s_next_psn; 99 qp->s_sge.sge = wqe->sg_list[0]; 100 qp->s_sge.sg_list = wqe->sg_list + 1; 101 qp->s_sge.num_sge = wqe->wr.num_sge; 102 qp->s_sge.total_len = wqe->length; 103 len = wqe->length; 104 qp->s_len = len; 105 switch (wqe->wr.opcode) { 106 case IB_WR_SEND: 107 case IB_WR_SEND_WITH_IMM: 108 if (len > pmtu) { 109 qp->s_state = OP(SEND_FIRST); 110 len = pmtu; 111 break; 112 } 113 if (wqe->wr.opcode == IB_WR_SEND) 114 qp->s_state = OP(SEND_ONLY); 115 else { 116 qp->s_state = 117 OP(SEND_ONLY_WITH_IMMEDIATE); 118 /* Immediate data comes after the BTH */ 119 ohdr->u.imm_data = wqe->wr.ex.imm_data; 120 hwords += 1; 121 } 122 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 123 bth0 |= IB_BTH_SOLICITED; 124 qp->s_wqe = wqe; 125 if (++qp->s_cur >= qp->s_size) 126 qp->s_cur = 0; 127 break; 128 129 case IB_WR_RDMA_WRITE: 130 case IB_WR_RDMA_WRITE_WITH_IMM: 131 ohdr->u.rc.reth.vaddr = 132 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 133 ohdr->u.rc.reth.rkey = 134 cpu_to_be32(wqe->wr.wr.rdma.rkey); 135 ohdr->u.rc.reth.length = cpu_to_be32(len); 136 hwords += sizeof(struct ib_reth) / 4; 137 if (len > pmtu) { 138 qp->s_state = OP(RDMA_WRITE_FIRST); 139 len = pmtu; 140 break; 141 } 142 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) 143 qp->s_state = OP(RDMA_WRITE_ONLY); 144 else { 145 qp->s_state = 146 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 147 /* Immediate data comes after the RETH */ 148 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; 149 hwords += 1; 150 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 151 bth0 |= IB_BTH_SOLICITED; 152 } 153 qp->s_wqe = wqe; 154 if (++qp->s_cur >= qp->s_size) 155 qp->s_cur = 0; 156 break; 157 158 default: 159 goto bail; 160 } 161 break; 162 163 case OP(SEND_FIRST): 164 qp->s_state = OP(SEND_MIDDLE); 165 /* FALLTHROUGH */ 166 case OP(SEND_MIDDLE): 167 len = qp->s_len; 168 if (len > pmtu) { 169 len = pmtu; 170 break; 171 } 172 if (wqe->wr.opcode == IB_WR_SEND) 173 qp->s_state = OP(SEND_LAST); 174 else { 175 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 176 /* Immediate data comes after the BTH */ 177 ohdr->u.imm_data = wqe->wr.ex.imm_data; 178 hwords += 1; 179 } 180 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 181 bth0 |= IB_BTH_SOLICITED; 182 qp->s_wqe = wqe; 183 if (++qp->s_cur >= qp->s_size) 184 qp->s_cur = 0; 185 break; 186 187 case OP(RDMA_WRITE_FIRST): 188 qp->s_state = OP(RDMA_WRITE_MIDDLE); 189 /* FALLTHROUGH */ 190 case OP(RDMA_WRITE_MIDDLE): 191 len = qp->s_len; 192 if (len > pmtu) { 193 len = pmtu; 194 break; 195 } 196 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) 197 qp->s_state = OP(RDMA_WRITE_LAST); 198 else { 199 qp->s_state = 200 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 201 /* Immediate data comes after the BTH */ 202 ohdr->u.imm_data = wqe->wr.ex.imm_data; 203 hwords += 1; 204 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 205 bth0 |= IB_BTH_SOLICITED; 206 } 207 qp->s_wqe = wqe; 208 if (++qp->s_cur >= qp->s_size) 209 qp->s_cur = 0; 210 break; 211 } 212 qp->s_len -= len; 213 qp->s_hdrwords = hwords; 214 qp->s_cur_sge = &qp->s_sge; 215 qp->s_cur_size = len; 216 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), 217 qp->s_next_psn++ & QIB_PSN_MASK); 218 done: 219 ret = 1; 220 goto unlock; 221 222 bail: 223 qp->s_flags &= ~QIB_S_BUSY; 224 unlock: 225 spin_unlock_irqrestore(&qp->s_lock, flags); 226 return ret; 227 } 228 229 /** 230 * qib_uc_rcv - handle an incoming UC packet 231 * @ibp: the port the packet came in on 232 * @hdr: the header of the packet 233 * @has_grh: true if the packet has a GRH 234 * @data: the packet data 235 * @tlen: the length of the packet 236 * @qp: the QP for this packet. 237 * 238 * This is called from qib_qp_rcv() to process an incoming UC packet 239 * for the given QP. 240 * Called at interrupt level. 241 */ 242 void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, 243 int has_grh, void *data, u32 tlen, struct qib_qp *qp) 244 { 245 struct qib_other_headers *ohdr; 246 u32 opcode; 247 u32 hdrsize; 248 u32 psn; 249 u32 pad; 250 struct ib_wc wc; 251 u32 pmtu = qp->pmtu; 252 struct ib_reth *reth; 253 int ret; 254 255 /* Check for GRH */ 256 if (!has_grh) { 257 ohdr = &hdr->u.oth; 258 hdrsize = 8 + 12; /* LRH + BTH */ 259 } else { 260 ohdr = &hdr->u.l.oth; 261 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */ 262 } 263 264 opcode = be32_to_cpu(ohdr->bth[0]); 265 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) 266 return; 267 268 psn = be32_to_cpu(ohdr->bth[2]); 269 opcode >>= 24; 270 271 /* Compare the PSN verses the expected PSN. */ 272 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { 273 /* 274 * Handle a sequence error. 275 * Silently drop any current message. 276 */ 277 qp->r_psn = psn; 278 inv: 279 if (qp->r_state == OP(SEND_FIRST) || 280 qp->r_state == OP(SEND_MIDDLE)) { 281 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); 282 qp->r_sge.num_sge = 0; 283 } else 284 while (qp->r_sge.num_sge) { 285 atomic_dec(&qp->r_sge.sge.mr->refcount); 286 if (--qp->r_sge.num_sge) 287 qp->r_sge.sge = *qp->r_sge.sg_list++; 288 } 289 qp->r_state = OP(SEND_LAST); 290 switch (opcode) { 291 case OP(SEND_FIRST): 292 case OP(SEND_ONLY): 293 case OP(SEND_ONLY_WITH_IMMEDIATE): 294 goto send_first; 295 296 case OP(RDMA_WRITE_FIRST): 297 case OP(RDMA_WRITE_ONLY): 298 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): 299 goto rdma_first; 300 301 default: 302 goto drop; 303 } 304 } 305 306 /* Check for opcode sequence errors. */ 307 switch (qp->r_state) { 308 case OP(SEND_FIRST): 309 case OP(SEND_MIDDLE): 310 if (opcode == OP(SEND_MIDDLE) || 311 opcode == OP(SEND_LAST) || 312 opcode == OP(SEND_LAST_WITH_IMMEDIATE)) 313 break; 314 goto inv; 315 316 case OP(RDMA_WRITE_FIRST): 317 case OP(RDMA_WRITE_MIDDLE): 318 if (opcode == OP(RDMA_WRITE_MIDDLE) || 319 opcode == OP(RDMA_WRITE_LAST) || 320 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 321 break; 322 goto inv; 323 324 default: 325 if (opcode == OP(SEND_FIRST) || 326 opcode == OP(SEND_ONLY) || 327 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) || 328 opcode == OP(RDMA_WRITE_FIRST) || 329 opcode == OP(RDMA_WRITE_ONLY) || 330 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) 331 break; 332 goto inv; 333 } 334 335 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { 336 qp->r_flags |= QIB_R_COMM_EST; 337 if (qp->ibqp.event_handler) { 338 struct ib_event ev; 339 340 ev.device = qp->ibqp.device; 341 ev.element.qp = &qp->ibqp; 342 ev.event = IB_EVENT_COMM_EST; 343 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 344 } 345 } 346 347 /* OK, process the packet. */ 348 switch (opcode) { 349 case OP(SEND_FIRST): 350 case OP(SEND_ONLY): 351 case OP(SEND_ONLY_WITH_IMMEDIATE): 352 send_first: 353 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) 354 qp->r_sge = qp->s_rdma_read_sge; 355 else { 356 ret = qib_get_rwqe(qp, 0); 357 if (ret < 0) 358 goto op_err; 359 if (!ret) 360 goto drop; 361 /* 362 * qp->s_rdma_read_sge will be the owner 363 * of the mr references. 364 */ 365 qp->s_rdma_read_sge = qp->r_sge; 366 } 367 qp->r_rcv_len = 0; 368 if (opcode == OP(SEND_ONLY)) 369 goto no_immediate_data; 370 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) 371 goto send_last_imm; 372 /* FALLTHROUGH */ 373 case OP(SEND_MIDDLE): 374 /* Check for invalid length PMTU or posted rwqe len. */ 375 if (unlikely(tlen != (hdrsize + pmtu + 4))) 376 goto rewind; 377 qp->r_rcv_len += pmtu; 378 if (unlikely(qp->r_rcv_len > qp->r_len)) 379 goto rewind; 380 qib_copy_sge(&qp->r_sge, data, pmtu, 0); 381 break; 382 383 case OP(SEND_LAST_WITH_IMMEDIATE): 384 send_last_imm: 385 wc.ex.imm_data = ohdr->u.imm_data; 386 hdrsize += 4; 387 wc.wc_flags = IB_WC_WITH_IMM; 388 goto send_last; 389 case OP(SEND_LAST): 390 no_immediate_data: 391 wc.ex.imm_data = 0; 392 wc.wc_flags = 0; 393 send_last: 394 /* Get the number of bytes the message was padded by. */ 395 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 396 /* Check for invalid length. */ 397 /* XXX LAST len should be >= 1 */ 398 if (unlikely(tlen < (hdrsize + pad + 4))) 399 goto rewind; 400 /* Don't count the CRC. */ 401 tlen -= (hdrsize + pad + 4); 402 wc.byte_len = tlen + qp->r_rcv_len; 403 if (unlikely(wc.byte_len > qp->r_len)) 404 goto rewind; 405 wc.opcode = IB_WC_RECV; 406 last_imm: 407 qib_copy_sge(&qp->r_sge, data, tlen, 0); 408 while (qp->s_rdma_read_sge.num_sge) { 409 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); 410 if (--qp->s_rdma_read_sge.num_sge) 411 qp->s_rdma_read_sge.sge = 412 *qp->s_rdma_read_sge.sg_list++; 413 } 414 wc.wr_id = qp->r_wr_id; 415 wc.status = IB_WC_SUCCESS; 416 wc.qp = &qp->ibqp; 417 wc.src_qp = qp->remote_qpn; 418 wc.slid = qp->remote_ah_attr.dlid; 419 wc.sl = qp->remote_ah_attr.sl; 420 /* zero fields that are N/A */ 421 wc.vendor_err = 0; 422 wc.pkey_index = 0; 423 wc.dlid_path_bits = 0; 424 wc.port_num = 0; 425 /* Signal completion event if the solicited bit is set. */ 426 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 427 (ohdr->bth[0] & 428 cpu_to_be32(IB_BTH_SOLICITED)) != 0); 429 break; 430 431 case OP(RDMA_WRITE_FIRST): 432 case OP(RDMA_WRITE_ONLY): 433 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */ 434 rdma_first: 435 if (unlikely(!(qp->qp_access_flags & 436 IB_ACCESS_REMOTE_WRITE))) { 437 goto drop; 438 } 439 reth = &ohdr->u.rc.reth; 440 hdrsize += sizeof(*reth); 441 qp->r_len = be32_to_cpu(reth->length); 442 qp->r_rcv_len = 0; 443 qp->r_sge.sg_list = NULL; 444 if (qp->r_len != 0) { 445 u32 rkey = be32_to_cpu(reth->rkey); 446 u64 vaddr = be64_to_cpu(reth->vaddr); 447 int ok; 448 449 /* Check rkey */ 450 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, 451 vaddr, rkey, IB_ACCESS_REMOTE_WRITE); 452 if (unlikely(!ok)) 453 goto drop; 454 qp->r_sge.num_sge = 1; 455 } else { 456 qp->r_sge.num_sge = 0; 457 qp->r_sge.sge.mr = NULL; 458 qp->r_sge.sge.vaddr = NULL; 459 qp->r_sge.sge.length = 0; 460 qp->r_sge.sge.sge_length = 0; 461 } 462 if (opcode == OP(RDMA_WRITE_ONLY)) 463 goto rdma_last; 464 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) { 465 wc.ex.imm_data = ohdr->u.rc.imm_data; 466 goto rdma_last_imm; 467 } 468 /* FALLTHROUGH */ 469 case OP(RDMA_WRITE_MIDDLE): 470 /* Check for invalid length PMTU or posted rwqe len. */ 471 if (unlikely(tlen != (hdrsize + pmtu + 4))) 472 goto drop; 473 qp->r_rcv_len += pmtu; 474 if (unlikely(qp->r_rcv_len > qp->r_len)) 475 goto drop; 476 qib_copy_sge(&qp->r_sge, data, pmtu, 1); 477 break; 478 479 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): 480 wc.ex.imm_data = ohdr->u.imm_data; 481 rdma_last_imm: 482 hdrsize += 4; 483 wc.wc_flags = IB_WC_WITH_IMM; 484 485 /* Get the number of bytes the message was padded by. */ 486 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 487 /* Check for invalid length. */ 488 /* XXX LAST len should be >= 1 */ 489 if (unlikely(tlen < (hdrsize + pad + 4))) 490 goto drop; 491 /* Don't count the CRC. */ 492 tlen -= (hdrsize + pad + 4); 493 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) 494 goto drop; 495 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) 496 while (qp->s_rdma_read_sge.num_sge) { 497 atomic_dec(&qp->s_rdma_read_sge.sge.mr-> 498 refcount); 499 if (--qp->s_rdma_read_sge.num_sge) 500 qp->s_rdma_read_sge.sge = 501 *qp->s_rdma_read_sge.sg_list++; 502 } 503 else { 504 ret = qib_get_rwqe(qp, 1); 505 if (ret < 0) 506 goto op_err; 507 if (!ret) 508 goto drop; 509 } 510 wc.byte_len = qp->r_len; 511 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 512 goto last_imm; 513 514 case OP(RDMA_WRITE_LAST): 515 rdma_last: 516 /* Get the number of bytes the message was padded by. */ 517 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 518 /* Check for invalid length. */ 519 /* XXX LAST len should be >= 1 */ 520 if (unlikely(tlen < (hdrsize + pad + 4))) 521 goto drop; 522 /* Don't count the CRC. */ 523 tlen -= (hdrsize + pad + 4); 524 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) 525 goto drop; 526 qib_copy_sge(&qp->r_sge, data, tlen, 1); 527 while (qp->r_sge.num_sge) { 528 atomic_dec(&qp->r_sge.sge.mr->refcount); 529 if (--qp->r_sge.num_sge) 530 qp->r_sge.sge = *qp->r_sge.sg_list++; 531 } 532 break; 533 534 default: 535 /* Drop packet for unknown opcodes. */ 536 goto drop; 537 } 538 qp->r_psn++; 539 qp->r_state = opcode; 540 return; 541 542 rewind: 543 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); 544 qp->r_sge.num_sge = 0; 545 drop: 546 ibp->n_pkt_drops++; 547 return; 548 549 op_err: 550 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 551 return; 552 553 } 554