1 /* 2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. 3 * All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "qib.h" 36 37 /* cut down ridiculously long IB macro names */ 38 #define OP(x) IB_OPCODE_UC_##x 39 40 /** 41 * qib_make_uc_req - construct a request packet (SEND, RDMA write) 42 * @qp: a pointer to the QP 43 * 44 * Assumes the s_lock is held. 45 * 46 * Return 1 if constructed; otherwise, return 0. 47 */ 48 int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) 49 { 50 struct qib_qp_priv *priv = qp->priv; 51 struct qib_other_headers *ohdr; 52 struct rvt_swqe *wqe; 53 u32 hwords; 54 u32 bth0; 55 u32 len; 56 u32 pmtu = qp->pmtu; 57 int ret = 0; 58 59 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { 60 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 61 goto bail; 62 /* We are in the error state, flush the work request. */ 63 smp_read_barrier_depends(); /* see post_one_send() */ 64 if (qp->s_last == ACCESS_ONCE(qp->s_head)) 65 goto bail; 66 /* If DMAs are in progress, we can't flush immediately. */ 67 if (atomic_read(&priv->s_dma_busy)) { 68 qp->s_flags |= RVT_S_WAIT_DMA; 69 goto bail; 70 } 71 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 72 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 73 goto done; 74 } 75 76 ohdr = &priv->s_hdr->u.oth; 77 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) 78 ohdr = &priv->s_hdr->u.l.oth; 79 80 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 81 hwords = 5; 82 bth0 = 0; 83 84 /* Get the next send request. */ 85 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 86 qp->s_wqe = NULL; 87 switch (qp->s_state) { 88 default: 89 if (!(ib_rvt_state_ops[qp->state] & 90 RVT_PROCESS_NEXT_SEND_OK)) 91 goto bail; 92 /* Check if send work queue is empty. */ 93 smp_read_barrier_depends(); /* see post_one_send() */ 94 if (qp->s_cur == ACCESS_ONCE(qp->s_head)) 95 goto bail; 96 /* 97 * Start a new request. 98 */ 99 qp->s_psn = wqe->psn; 100 qp->s_sge.sge = wqe->sg_list[0]; 101 qp->s_sge.sg_list = wqe->sg_list + 1; 102 qp->s_sge.num_sge = wqe->wr.num_sge; 103 qp->s_sge.total_len = wqe->length; 104 len = wqe->length; 105 qp->s_len = len; 106 switch (wqe->wr.opcode) { 107 case IB_WR_SEND: 108 case IB_WR_SEND_WITH_IMM: 109 if (len > pmtu) { 110 qp->s_state = OP(SEND_FIRST); 111 len = pmtu; 112 break; 113 } 114 if (wqe->wr.opcode == IB_WR_SEND) 115 qp->s_state = OP(SEND_ONLY); 116 else { 117 qp->s_state = 118 OP(SEND_ONLY_WITH_IMMEDIATE); 119 /* Immediate data comes after the BTH */ 120 ohdr->u.imm_data = wqe->wr.ex.imm_data; 121 hwords += 1; 122 } 123 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 124 bth0 |= IB_BTH_SOLICITED; 125 qp->s_wqe = wqe; 126 if (++qp->s_cur >= qp->s_size) 127 qp->s_cur = 0; 128 break; 129 130 case IB_WR_RDMA_WRITE: 131 case IB_WR_RDMA_WRITE_WITH_IMM: 132 ohdr->u.rc.reth.vaddr = 133 cpu_to_be64(wqe->rdma_wr.remote_addr); 134 ohdr->u.rc.reth.rkey = 135 cpu_to_be32(wqe->rdma_wr.rkey); 136 ohdr->u.rc.reth.length = cpu_to_be32(len); 137 hwords += sizeof(struct ib_reth) / 4; 138 if (len > pmtu) { 139 qp->s_state = OP(RDMA_WRITE_FIRST); 140 len = pmtu; 141 break; 142 } 143 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) 144 qp->s_state = OP(RDMA_WRITE_ONLY); 145 else { 146 qp->s_state = 147 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 148 /* Immediate data comes after the RETH */ 149 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; 150 hwords += 1; 151 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 152 bth0 |= IB_BTH_SOLICITED; 153 } 154 qp->s_wqe = wqe; 155 if (++qp->s_cur >= qp->s_size) 156 qp->s_cur = 0; 157 break; 158 159 default: 160 goto bail; 161 } 162 break; 163 164 case OP(SEND_FIRST): 165 qp->s_state = OP(SEND_MIDDLE); 166 /* FALLTHROUGH */ 167 case OP(SEND_MIDDLE): 168 len = qp->s_len; 169 if (len > pmtu) { 170 len = pmtu; 171 break; 172 } 173 if (wqe->wr.opcode == IB_WR_SEND) 174 qp->s_state = OP(SEND_LAST); 175 else { 176 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 177 /* Immediate data comes after the BTH */ 178 ohdr->u.imm_data = wqe->wr.ex.imm_data; 179 hwords += 1; 180 } 181 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 182 bth0 |= IB_BTH_SOLICITED; 183 qp->s_wqe = wqe; 184 if (++qp->s_cur >= qp->s_size) 185 qp->s_cur = 0; 186 break; 187 188 case OP(RDMA_WRITE_FIRST): 189 qp->s_state = OP(RDMA_WRITE_MIDDLE); 190 /* FALLTHROUGH */ 191 case OP(RDMA_WRITE_MIDDLE): 192 len = qp->s_len; 193 if (len > pmtu) { 194 len = pmtu; 195 break; 196 } 197 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) 198 qp->s_state = OP(RDMA_WRITE_LAST); 199 else { 200 qp->s_state = 201 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 202 /* Immediate data comes after the BTH */ 203 ohdr->u.imm_data = wqe->wr.ex.imm_data; 204 hwords += 1; 205 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 206 bth0 |= IB_BTH_SOLICITED; 207 } 208 qp->s_wqe = wqe; 209 if (++qp->s_cur >= qp->s_size) 210 qp->s_cur = 0; 211 break; 212 } 213 qp->s_len -= len; 214 qp->s_hdrwords = hwords; 215 qp->s_cur_sge = &qp->s_sge; 216 qp->s_cur_size = len; 217 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), 218 qp->s_psn++ & QIB_PSN_MASK); 219 done: 220 return 1; 221 bail: 222 qp->s_flags &= ~RVT_S_BUSY; 223 return ret; 224 } 225 226 /** 227 * qib_uc_rcv - handle an incoming UC packet 228 * @ibp: the port the packet came in on 229 * @hdr: the header of the packet 230 * @has_grh: true if the packet has a GRH 231 * @data: the packet data 232 * @tlen: the length of the packet 233 * @qp: the QP for this packet. 234 * 235 * This is called from qib_qp_rcv() to process an incoming UC packet 236 * for the given QP. 237 * Called at interrupt level. 238 */ 239 void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, 240 int has_grh, void *data, u32 tlen, struct rvt_qp *qp) 241 { 242 struct qib_other_headers *ohdr; 243 u32 opcode; 244 u32 hdrsize; 245 u32 psn; 246 u32 pad; 247 struct ib_wc wc; 248 u32 pmtu = qp->pmtu; 249 struct ib_reth *reth; 250 int ret; 251 252 /* Check for GRH */ 253 if (!has_grh) { 254 ohdr = &hdr->u.oth; 255 hdrsize = 8 + 12; /* LRH + BTH */ 256 } else { 257 ohdr = &hdr->u.l.oth; 258 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */ 259 } 260 261 opcode = be32_to_cpu(ohdr->bth[0]); 262 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) 263 return; 264 265 psn = be32_to_cpu(ohdr->bth[2]); 266 opcode >>= 24; 267 268 /* Compare the PSN verses the expected PSN. */ 269 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { 270 /* 271 * Handle a sequence error. 272 * Silently drop any current message. 273 */ 274 qp->r_psn = psn; 275 inv: 276 if (qp->r_state == OP(SEND_FIRST) || 277 qp->r_state == OP(SEND_MIDDLE)) { 278 set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); 279 qp->r_sge.num_sge = 0; 280 } else 281 rvt_put_ss(&qp->r_sge); 282 qp->r_state = OP(SEND_LAST); 283 switch (opcode) { 284 case OP(SEND_FIRST): 285 case OP(SEND_ONLY): 286 case OP(SEND_ONLY_WITH_IMMEDIATE): 287 goto send_first; 288 289 case OP(RDMA_WRITE_FIRST): 290 case OP(RDMA_WRITE_ONLY): 291 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): 292 goto rdma_first; 293 294 default: 295 goto drop; 296 } 297 } 298 299 /* Check for opcode sequence errors. */ 300 switch (qp->r_state) { 301 case OP(SEND_FIRST): 302 case OP(SEND_MIDDLE): 303 if (opcode == OP(SEND_MIDDLE) || 304 opcode == OP(SEND_LAST) || 305 opcode == OP(SEND_LAST_WITH_IMMEDIATE)) 306 break; 307 goto inv; 308 309 case OP(RDMA_WRITE_FIRST): 310 case OP(RDMA_WRITE_MIDDLE): 311 if (opcode == OP(RDMA_WRITE_MIDDLE) || 312 opcode == OP(RDMA_WRITE_LAST) || 313 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 314 break; 315 goto inv; 316 317 default: 318 if (opcode == OP(SEND_FIRST) || 319 opcode == OP(SEND_ONLY) || 320 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) || 321 opcode == OP(RDMA_WRITE_FIRST) || 322 opcode == OP(RDMA_WRITE_ONLY) || 323 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) 324 break; 325 goto inv; 326 } 327 328 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) { 329 qp->r_flags |= RVT_R_COMM_EST; 330 if (qp->ibqp.event_handler) { 331 struct ib_event ev; 332 333 ev.device = qp->ibqp.device; 334 ev.element.qp = &qp->ibqp; 335 ev.event = IB_EVENT_COMM_EST; 336 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 337 } 338 } 339 340 /* OK, process the packet. */ 341 switch (opcode) { 342 case OP(SEND_FIRST): 343 case OP(SEND_ONLY): 344 case OP(SEND_ONLY_WITH_IMMEDIATE): 345 send_first: 346 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) 347 qp->r_sge = qp->s_rdma_read_sge; 348 else { 349 ret = qib_get_rwqe(qp, 0); 350 if (ret < 0) 351 goto op_err; 352 if (!ret) 353 goto drop; 354 /* 355 * qp->s_rdma_read_sge will be the owner 356 * of the mr references. 357 */ 358 qp->s_rdma_read_sge = qp->r_sge; 359 } 360 qp->r_rcv_len = 0; 361 if (opcode == OP(SEND_ONLY)) 362 goto no_immediate_data; 363 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) 364 goto send_last_imm; 365 /* FALLTHROUGH */ 366 case OP(SEND_MIDDLE): 367 /* Check for invalid length PMTU or posted rwqe len. */ 368 if (unlikely(tlen != (hdrsize + pmtu + 4))) 369 goto rewind; 370 qp->r_rcv_len += pmtu; 371 if (unlikely(qp->r_rcv_len > qp->r_len)) 372 goto rewind; 373 qib_copy_sge(&qp->r_sge, data, pmtu, 0); 374 break; 375 376 case OP(SEND_LAST_WITH_IMMEDIATE): 377 send_last_imm: 378 wc.ex.imm_data = ohdr->u.imm_data; 379 hdrsize += 4; 380 wc.wc_flags = IB_WC_WITH_IMM; 381 goto send_last; 382 case OP(SEND_LAST): 383 no_immediate_data: 384 wc.ex.imm_data = 0; 385 wc.wc_flags = 0; 386 send_last: 387 /* Get the number of bytes the message was padded by. */ 388 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 389 /* Check for invalid length. */ 390 /* XXX LAST len should be >= 1 */ 391 if (unlikely(tlen < (hdrsize + pad + 4))) 392 goto rewind; 393 /* Don't count the CRC. */ 394 tlen -= (hdrsize + pad + 4); 395 wc.byte_len = tlen + qp->r_rcv_len; 396 if (unlikely(wc.byte_len > qp->r_len)) 397 goto rewind; 398 wc.opcode = IB_WC_RECV; 399 qib_copy_sge(&qp->r_sge, data, tlen, 0); 400 rvt_put_ss(&qp->s_rdma_read_sge); 401 last_imm: 402 wc.wr_id = qp->r_wr_id; 403 wc.status = IB_WC_SUCCESS; 404 wc.qp = &qp->ibqp; 405 wc.src_qp = qp->remote_qpn; 406 wc.slid = qp->remote_ah_attr.dlid; 407 wc.sl = qp->remote_ah_attr.sl; 408 /* zero fields that are N/A */ 409 wc.vendor_err = 0; 410 wc.pkey_index = 0; 411 wc.dlid_path_bits = 0; 412 wc.port_num = 0; 413 /* Signal completion event if the solicited bit is set. */ 414 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 415 (ohdr->bth[0] & 416 cpu_to_be32(IB_BTH_SOLICITED)) != 0); 417 break; 418 419 case OP(RDMA_WRITE_FIRST): 420 case OP(RDMA_WRITE_ONLY): 421 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */ 422 rdma_first: 423 if (unlikely(!(qp->qp_access_flags & 424 IB_ACCESS_REMOTE_WRITE))) { 425 goto drop; 426 } 427 reth = &ohdr->u.rc.reth; 428 hdrsize += sizeof(*reth); 429 qp->r_len = be32_to_cpu(reth->length); 430 qp->r_rcv_len = 0; 431 qp->r_sge.sg_list = NULL; 432 if (qp->r_len != 0) { 433 u32 rkey = be32_to_cpu(reth->rkey); 434 u64 vaddr = be64_to_cpu(reth->vaddr); 435 int ok; 436 437 /* Check rkey */ 438 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, 439 vaddr, rkey, IB_ACCESS_REMOTE_WRITE); 440 if (unlikely(!ok)) 441 goto drop; 442 qp->r_sge.num_sge = 1; 443 } else { 444 qp->r_sge.num_sge = 0; 445 qp->r_sge.sge.mr = NULL; 446 qp->r_sge.sge.vaddr = NULL; 447 qp->r_sge.sge.length = 0; 448 qp->r_sge.sge.sge_length = 0; 449 } 450 if (opcode == OP(RDMA_WRITE_ONLY)) 451 goto rdma_last; 452 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) { 453 wc.ex.imm_data = ohdr->u.rc.imm_data; 454 goto rdma_last_imm; 455 } 456 /* FALLTHROUGH */ 457 case OP(RDMA_WRITE_MIDDLE): 458 /* Check for invalid length PMTU or posted rwqe len. */ 459 if (unlikely(tlen != (hdrsize + pmtu + 4))) 460 goto drop; 461 qp->r_rcv_len += pmtu; 462 if (unlikely(qp->r_rcv_len > qp->r_len)) 463 goto drop; 464 qib_copy_sge(&qp->r_sge, data, pmtu, 1); 465 break; 466 467 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): 468 wc.ex.imm_data = ohdr->u.imm_data; 469 rdma_last_imm: 470 hdrsize += 4; 471 wc.wc_flags = IB_WC_WITH_IMM; 472 473 /* Get the number of bytes the message was padded by. */ 474 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 475 /* Check for invalid length. */ 476 /* XXX LAST len should be >= 1 */ 477 if (unlikely(tlen < (hdrsize + pad + 4))) 478 goto drop; 479 /* Don't count the CRC. */ 480 tlen -= (hdrsize + pad + 4); 481 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) 482 goto drop; 483 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) 484 rvt_put_ss(&qp->s_rdma_read_sge); 485 else { 486 ret = qib_get_rwqe(qp, 1); 487 if (ret < 0) 488 goto op_err; 489 if (!ret) 490 goto drop; 491 } 492 wc.byte_len = qp->r_len; 493 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 494 qib_copy_sge(&qp->r_sge, data, tlen, 1); 495 rvt_put_ss(&qp->r_sge); 496 goto last_imm; 497 498 case OP(RDMA_WRITE_LAST): 499 rdma_last: 500 /* Get the number of bytes the message was padded by. */ 501 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 502 /* Check for invalid length. */ 503 /* XXX LAST len should be >= 1 */ 504 if (unlikely(tlen < (hdrsize + pad + 4))) 505 goto drop; 506 /* Don't count the CRC. */ 507 tlen -= (hdrsize + pad + 4); 508 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) 509 goto drop; 510 qib_copy_sge(&qp->r_sge, data, tlen, 1); 511 rvt_put_ss(&qp->r_sge); 512 break; 513 514 default: 515 /* Drop packet for unknown opcodes. */ 516 goto drop; 517 } 518 qp->r_psn++; 519 qp->r_state = opcode; 520 return; 521 522 rewind: 523 set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); 524 qp->r_sge.num_sge = 0; 525 drop: 526 ibp->rvp.n_pkt_drops++; 527 return; 528 529 op_err: 530 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 531 return; 532 533 } 534