1f29dd55bSBernard Metzler // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause 2f29dd55bSBernard Metzler 3f29dd55bSBernard Metzler /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ 4f29dd55bSBernard Metzler /* Copyright (c) 2008-2019, IBM Corporation */ 5f29dd55bSBernard Metzler 6f29dd55bSBernard Metzler #include <linux/errno.h> 7f29dd55bSBernard Metzler #include <linux/types.h> 8f29dd55bSBernard Metzler #include <linux/net.h> 9f29dd55bSBernard Metzler #include <linux/scatterlist.h> 10f29dd55bSBernard Metzler #include <linux/llist.h> 11f29dd55bSBernard Metzler #include <asm/barrier.h> 12f29dd55bSBernard Metzler #include <net/tcp.h> 1340e0b090SPeilin Ye #include <trace/events/sock.h> 14f29dd55bSBernard Metzler 15f29dd55bSBernard Metzler #include "siw.h" 16f29dd55bSBernard Metzler #include "siw_verbs.h" 17f29dd55bSBernard Metzler #include "siw_mem.h" 18f29dd55bSBernard Metzler 19f29dd55bSBernard Metzler static char siw_qp_state_to_string[SIW_QP_STATE_COUNT][sizeof "TERMINATE"] = { 20f29dd55bSBernard Metzler [SIW_QP_STATE_IDLE] = "IDLE", 21f29dd55bSBernard Metzler [SIW_QP_STATE_RTR] = "RTR", 22f29dd55bSBernard Metzler [SIW_QP_STATE_RTS] = "RTS", 23f29dd55bSBernard Metzler [SIW_QP_STATE_CLOSING] = "CLOSING", 24f29dd55bSBernard Metzler [SIW_QP_STATE_TERMINATE] = "TERMINATE", 25f29dd55bSBernard Metzler [SIW_QP_STATE_ERROR] = "ERROR" 26f29dd55bSBernard Metzler }; 27f29dd55bSBernard Metzler 28f29dd55bSBernard Metzler /* 29f29dd55bSBernard Metzler * iWARP (RDMAP, DDP and MPA) parameters as well as Softiwarp settings on a 30f29dd55bSBernard Metzler * per-RDMAP message basis. Please keep order of initializer. All MPA len 31f29dd55bSBernard Metzler * is initialized to minimum packet size. 32f29dd55bSBernard Metzler */ 33f29dd55bSBernard Metzler struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1] = { 34f29dd55bSBernard Metzler { /* RDMAP_RDMA_WRITE */ 35f29dd55bSBernard Metzler .hdr_len = sizeof(struct iwarp_rdma_write), 36f29dd55bSBernard Metzler .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_write) - 2), 37f29dd55bSBernard Metzler .ctrl.ddp_rdmap_ctrl = DDP_FLAG_TAGGED | DDP_FLAG_LAST | 38f29dd55bSBernard Metzler cpu_to_be16(DDP_VERSION << 8) | 39f29dd55bSBernard Metzler cpu_to_be16(RDMAP_VERSION << 6) | 40f29dd55bSBernard Metzler cpu_to_be16(RDMAP_RDMA_WRITE), 41f29dd55bSBernard Metzler .rx_data = siw_proc_write }, 42f29dd55bSBernard Metzler { /* RDMAP_RDMA_READ_REQ */ 43f29dd55bSBernard Metzler .hdr_len = sizeof(struct iwarp_rdma_rreq), 44f29dd55bSBernard Metzler .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rreq) - 2), 45f29dd55bSBernard Metzler .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | 46f29dd55bSBernard Metzler cpu_to_be16(RDMAP_VERSION << 6) | 47f29dd55bSBernard Metzler cpu_to_be16(RDMAP_RDMA_READ_REQ), 48f29dd55bSBernard Metzler .rx_data = siw_proc_rreq }, 49f29dd55bSBernard Metzler { /* RDMAP_RDMA_READ_RESP */ 50f29dd55bSBernard Metzler .hdr_len = sizeof(struct iwarp_rdma_rresp), 51f29dd55bSBernard Metzler .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rresp) - 2), 52f29dd55bSBernard Metzler .ctrl.ddp_rdmap_ctrl = DDP_FLAG_TAGGED | DDP_FLAG_LAST | 53f29dd55bSBernard Metzler cpu_to_be16(DDP_VERSION << 8) | 54f29dd55bSBernard Metzler cpu_to_be16(RDMAP_VERSION << 6) | 55f29dd55bSBernard Metzler cpu_to_be16(RDMAP_RDMA_READ_RESP), 56f29dd55bSBernard Metzler .rx_data = siw_proc_rresp }, 57f29dd55bSBernard Metzler { /* RDMAP_SEND */ 58f29dd55bSBernard Metzler .hdr_len = sizeof(struct iwarp_send), 59f29dd55bSBernard Metzler .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2), 60f29dd55bSBernard Metzler .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | 61f29dd55bSBernard Metzler cpu_to_be16(RDMAP_VERSION << 6) | 62f29dd55bSBernard Metzler cpu_to_be16(RDMAP_SEND), 63f29dd55bSBernard Metzler .rx_data = siw_proc_send }, 64f29dd55bSBernard Metzler { /* RDMAP_SEND_INVAL */ 65f29dd55bSBernard Metzler .hdr_len = sizeof(struct iwarp_send_inv), 66f29dd55bSBernard Metzler .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2), 67f29dd55bSBernard Metzler .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | 68f29dd55bSBernard Metzler cpu_to_be16(RDMAP_VERSION << 6) | 69f29dd55bSBernard Metzler cpu_to_be16(RDMAP_SEND_INVAL), 70f29dd55bSBernard Metzler .rx_data = siw_proc_send }, 71f29dd55bSBernard Metzler { /* RDMAP_SEND_SE */ 72f29dd55bSBernard Metzler .hdr_len = sizeof(struct iwarp_send), 73f29dd55bSBernard Metzler .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2), 74f29dd55bSBernard Metzler .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | 75f29dd55bSBernard Metzler cpu_to_be16(RDMAP_VERSION << 6) | 76f29dd55bSBernard Metzler cpu_to_be16(RDMAP_SEND_SE), 77f29dd55bSBernard Metzler .rx_data = siw_proc_send }, 78f29dd55bSBernard Metzler { /* RDMAP_SEND_SE_INVAL */ 79f29dd55bSBernard Metzler .hdr_len = sizeof(struct iwarp_send_inv), 80f29dd55bSBernard Metzler .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2), 81f29dd55bSBernard Metzler .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | 82f29dd55bSBernard Metzler cpu_to_be16(RDMAP_VERSION << 6) | 83f29dd55bSBernard Metzler cpu_to_be16(RDMAP_SEND_SE_INVAL), 84f29dd55bSBernard Metzler .rx_data = siw_proc_send }, 85f29dd55bSBernard Metzler { /* RDMAP_TERMINATE */ 86f29dd55bSBernard Metzler .hdr_len = sizeof(struct iwarp_terminate), 87f29dd55bSBernard Metzler .ctrl.mpa_len = htons(sizeof(struct iwarp_terminate) - 2), 88f29dd55bSBernard Metzler .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | 89f29dd55bSBernard Metzler cpu_to_be16(RDMAP_VERSION << 6) | 90f29dd55bSBernard Metzler cpu_to_be16(RDMAP_TERMINATE), 91f29dd55bSBernard Metzler .rx_data = siw_proc_terminate } 92f29dd55bSBernard Metzler }; 93f29dd55bSBernard Metzler 94f29dd55bSBernard Metzler void siw_qp_llp_data_ready(struct sock *sk) 95f29dd55bSBernard Metzler { 96f29dd55bSBernard Metzler struct siw_qp *qp; 97f29dd55bSBernard Metzler 9840e0b090SPeilin Ye trace_sk_data_ready(sk); 9940e0b090SPeilin Ye 100f29dd55bSBernard Metzler read_lock(&sk->sk_callback_lock); 101f29dd55bSBernard Metzler 102f29dd55bSBernard Metzler if (unlikely(!sk->sk_user_data || !sk_to_qp(sk))) 103f29dd55bSBernard Metzler goto done; 104f29dd55bSBernard Metzler 105f29dd55bSBernard Metzler qp = sk_to_qp(sk); 106f29dd55bSBernard Metzler 107f29dd55bSBernard Metzler if (likely(!qp->rx_stream.rx_suspend && 108f29dd55bSBernard Metzler down_read_trylock(&qp->state_lock))) { 109f29dd55bSBernard Metzler read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 }; 110f29dd55bSBernard Metzler 111f29dd55bSBernard Metzler if (likely(qp->attrs.state == SIW_QP_STATE_RTS)) 112f29dd55bSBernard Metzler /* 113f29dd55bSBernard Metzler * Implements data receive operation during 114f29dd55bSBernard Metzler * socket callback. TCP gracefully catches 115f29dd55bSBernard Metzler * the case where there is nothing to receive 116f29dd55bSBernard Metzler * (not calling siw_tcp_rx_data() then). 117f29dd55bSBernard Metzler */ 118f29dd55bSBernard Metzler tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data); 119f29dd55bSBernard Metzler 120f29dd55bSBernard Metzler up_read(&qp->state_lock); 121f29dd55bSBernard Metzler } else { 122f29dd55bSBernard Metzler siw_dbg_qp(qp, "unable to process RX, suspend: %d\n", 123f29dd55bSBernard Metzler qp->rx_stream.rx_suspend); 124f29dd55bSBernard Metzler } 125f29dd55bSBernard Metzler done: 126f29dd55bSBernard Metzler read_unlock(&sk->sk_callback_lock); 127f29dd55bSBernard Metzler } 128f29dd55bSBernard Metzler 129f29dd55bSBernard Metzler void siw_qp_llp_close(struct siw_qp *qp) 130f29dd55bSBernard Metzler { 131f29dd55bSBernard Metzler siw_dbg_qp(qp, "enter llp close, state = %s\n", 132f29dd55bSBernard Metzler siw_qp_state_to_string[qp->attrs.state]); 133f29dd55bSBernard Metzler 134f29dd55bSBernard Metzler down_write(&qp->state_lock); 135f29dd55bSBernard Metzler 136f29dd55bSBernard Metzler qp->rx_stream.rx_suspend = 1; 137f29dd55bSBernard Metzler qp->tx_ctx.tx_suspend = 1; 138f29dd55bSBernard Metzler qp->attrs.sk = NULL; 139f29dd55bSBernard Metzler 140f29dd55bSBernard Metzler switch (qp->attrs.state) { 141f29dd55bSBernard Metzler case SIW_QP_STATE_RTS: 142f29dd55bSBernard Metzler case SIW_QP_STATE_RTR: 143f29dd55bSBernard Metzler case SIW_QP_STATE_IDLE: 144f29dd55bSBernard Metzler case SIW_QP_STATE_TERMINATE: 145f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_ERROR; 146f29dd55bSBernard Metzler break; 147f29dd55bSBernard Metzler /* 148f29dd55bSBernard Metzler * SIW_QP_STATE_CLOSING: 149f29dd55bSBernard Metzler * 150f29dd55bSBernard Metzler * This is a forced close. shall the QP be moved to 151f29dd55bSBernard Metzler * ERROR or IDLE ? 152f29dd55bSBernard Metzler */ 153f29dd55bSBernard Metzler case SIW_QP_STATE_CLOSING: 154f29dd55bSBernard Metzler if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) 155f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_ERROR; 156f29dd55bSBernard Metzler else 157f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_IDLE; 158f29dd55bSBernard Metzler break; 159f29dd55bSBernard Metzler 160f29dd55bSBernard Metzler default: 161f29dd55bSBernard Metzler siw_dbg_qp(qp, "llp close: no state transition needed: %s\n", 162f29dd55bSBernard Metzler siw_qp_state_to_string[qp->attrs.state]); 163f29dd55bSBernard Metzler break; 164f29dd55bSBernard Metzler } 165f29dd55bSBernard Metzler siw_sq_flush(qp); 166f29dd55bSBernard Metzler siw_rq_flush(qp); 167f29dd55bSBernard Metzler 168f29dd55bSBernard Metzler /* 169f29dd55bSBernard Metzler * Dereference closing CEP 170f29dd55bSBernard Metzler */ 171f29dd55bSBernard Metzler if (qp->cep) { 172f29dd55bSBernard Metzler siw_cep_put(qp->cep); 173f29dd55bSBernard Metzler qp->cep = NULL; 174f29dd55bSBernard Metzler } 175f29dd55bSBernard Metzler 176f29dd55bSBernard Metzler up_write(&qp->state_lock); 177f29dd55bSBernard Metzler 178f29dd55bSBernard Metzler siw_dbg_qp(qp, "llp close exit: state %s\n", 179f29dd55bSBernard Metzler siw_qp_state_to_string[qp->attrs.state]); 180f29dd55bSBernard Metzler } 181f29dd55bSBernard Metzler 182f29dd55bSBernard Metzler /* 183f29dd55bSBernard Metzler * socket callback routine informing about newly available send space. 184f29dd55bSBernard Metzler * Function schedules SQ work for processing SQ items. 185f29dd55bSBernard Metzler */ 186f29dd55bSBernard Metzler void siw_qp_llp_write_space(struct sock *sk) 187f29dd55bSBernard Metzler { 188df791c54SKrishnamraju Eraparaju struct siw_cep *cep; 189f29dd55bSBernard Metzler 190df791c54SKrishnamraju Eraparaju read_lock(&sk->sk_callback_lock); 191df791c54SKrishnamraju Eraparaju 192df791c54SKrishnamraju Eraparaju cep = sk_to_cep(sk); 193df791c54SKrishnamraju Eraparaju if (cep) { 194f29dd55bSBernard Metzler cep->sk_write_space(sk); 195f29dd55bSBernard Metzler 196f29dd55bSBernard Metzler if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 197f29dd55bSBernard Metzler (void)siw_sq_start(cep->qp); 198f29dd55bSBernard Metzler } 199f29dd55bSBernard Metzler 200df791c54SKrishnamraju Eraparaju read_unlock(&sk->sk_callback_lock); 201df791c54SKrishnamraju Eraparaju } 202df791c54SKrishnamraju Eraparaju 203f29dd55bSBernard Metzler static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size) 204f29dd55bSBernard Metzler { 205661f3859SBernard Metzler if (irq_size) { 206f29dd55bSBernard Metzler irq_size = roundup_pow_of_two(irq_size); 207*9191df00SJulia Lawall qp->irq = vcalloc(irq_size, sizeof(struct siw_sqe)); 208f29dd55bSBernard Metzler if (!qp->irq) { 209f29dd55bSBernard Metzler qp->attrs.irq_size = 0; 210f29dd55bSBernard Metzler return -ENOMEM; 211f29dd55bSBernard Metzler } 212661f3859SBernard Metzler } 213661f3859SBernard Metzler if (orq_size) { 214661f3859SBernard Metzler orq_size = roundup_pow_of_two(orq_size); 215*9191df00SJulia Lawall qp->orq = vcalloc(orq_size, sizeof(struct siw_sqe)); 216f29dd55bSBernard Metzler if (!qp->orq) { 217f29dd55bSBernard Metzler qp->attrs.orq_size = 0; 218f29dd55bSBernard Metzler qp->attrs.irq_size = 0; 219f29dd55bSBernard Metzler vfree(qp->irq); 220f29dd55bSBernard Metzler return -ENOMEM; 221f29dd55bSBernard Metzler } 222661f3859SBernard Metzler } 223661f3859SBernard Metzler qp->attrs.irq_size = irq_size; 224661f3859SBernard Metzler qp->attrs.orq_size = orq_size; 225f29dd55bSBernard Metzler siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size); 226f29dd55bSBernard Metzler return 0; 227f29dd55bSBernard Metzler } 228f29dd55bSBernard Metzler 229f29dd55bSBernard Metzler static int siw_qp_enable_crc(struct siw_qp *qp) 230f29dd55bSBernard Metzler { 231f29dd55bSBernard Metzler struct siw_rx_stream *c_rx = &qp->rx_stream; 232f29dd55bSBernard Metzler struct siw_iwarp_tx *c_tx = &qp->tx_ctx; 233708637e6SBernard Metzler int size; 234f29dd55bSBernard Metzler 235f29dd55bSBernard Metzler if (siw_crypto_shash == NULL) 236f29dd55bSBernard Metzler return -ENOENT; 237f29dd55bSBernard Metzler 238708637e6SBernard Metzler size = crypto_shash_descsize(siw_crypto_shash) + 239708637e6SBernard Metzler sizeof(struct shash_desc); 240708637e6SBernard Metzler 241f29dd55bSBernard Metzler c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); 242f29dd55bSBernard Metzler c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); 243f29dd55bSBernard Metzler if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) { 244f29dd55bSBernard Metzler kfree(c_tx->mpa_crc_hd); 245f29dd55bSBernard Metzler kfree(c_rx->mpa_crc_hd); 246f29dd55bSBernard Metzler c_tx->mpa_crc_hd = NULL; 247f29dd55bSBernard Metzler c_rx->mpa_crc_hd = NULL; 248f29dd55bSBernard Metzler return -ENOMEM; 249f29dd55bSBernard Metzler } 250f29dd55bSBernard Metzler c_tx->mpa_crc_hd->tfm = siw_crypto_shash; 251f29dd55bSBernard Metzler c_rx->mpa_crc_hd->tfm = siw_crypto_shash; 252f29dd55bSBernard Metzler 253f29dd55bSBernard Metzler return 0; 254f29dd55bSBernard Metzler } 255f29dd55bSBernard Metzler 256f29dd55bSBernard Metzler /* 257f29dd55bSBernard Metzler * Send a non signalled READ or WRITE to peer side as negotiated 258f29dd55bSBernard Metzler * with MPAv2 P2P setup protocol. The work request is only created 259f29dd55bSBernard Metzler * as a current active WR and does not consume Send Queue space. 260f29dd55bSBernard Metzler * 261f29dd55bSBernard Metzler * Caller must hold QP state lock. 262f29dd55bSBernard Metzler */ 263f29dd55bSBernard Metzler int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl) 264f29dd55bSBernard Metzler { 265f29dd55bSBernard Metzler struct siw_wqe *wqe = tx_wqe(qp); 266f29dd55bSBernard Metzler unsigned long flags; 267f29dd55bSBernard Metzler int rv = 0; 268f29dd55bSBernard Metzler 269f29dd55bSBernard Metzler spin_lock_irqsave(&qp->sq_lock, flags); 270f29dd55bSBernard Metzler 271f29dd55bSBernard Metzler if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { 272f29dd55bSBernard Metzler spin_unlock_irqrestore(&qp->sq_lock, flags); 273f29dd55bSBernard Metzler return -EIO; 274f29dd55bSBernard Metzler } 275f29dd55bSBernard Metzler memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); 276f29dd55bSBernard Metzler 277f29dd55bSBernard Metzler wqe->wr_status = SIW_WR_QUEUED; 278f29dd55bSBernard Metzler wqe->sqe.flags = 0; 279f29dd55bSBernard Metzler wqe->sqe.num_sge = 1; 280f29dd55bSBernard Metzler wqe->sqe.sge[0].length = 0; 281f29dd55bSBernard Metzler wqe->sqe.sge[0].laddr = 0; 282f29dd55bSBernard Metzler wqe->sqe.sge[0].lkey = 0; 283f29dd55bSBernard Metzler /* 284f29dd55bSBernard Metzler * While it must not be checked for inbound zero length 285f29dd55bSBernard Metzler * READ/WRITE, some HW may treat STag 0 special. 286f29dd55bSBernard Metzler */ 287f29dd55bSBernard Metzler wqe->sqe.rkey = 1; 288f29dd55bSBernard Metzler wqe->sqe.raddr = 0; 289f29dd55bSBernard Metzler wqe->processed = 0; 290f29dd55bSBernard Metzler 291f29dd55bSBernard Metzler if (ctrl & MPA_V2_RDMA_WRITE_RTR) 292f29dd55bSBernard Metzler wqe->sqe.opcode = SIW_OP_WRITE; 293f29dd55bSBernard Metzler else if (ctrl & MPA_V2_RDMA_READ_RTR) { 294661f3859SBernard Metzler struct siw_sqe *rreq = NULL; 295f29dd55bSBernard Metzler 296f29dd55bSBernard Metzler wqe->sqe.opcode = SIW_OP_READ; 297f29dd55bSBernard Metzler 298f29dd55bSBernard Metzler spin_lock(&qp->orq_lock); 299f29dd55bSBernard Metzler 300661f3859SBernard Metzler if (qp->attrs.orq_size) 301f29dd55bSBernard Metzler rreq = orq_get_free(qp); 302f29dd55bSBernard Metzler if (rreq) { 303f29dd55bSBernard Metzler siw_read_to_orq(rreq, &wqe->sqe); 304f29dd55bSBernard Metzler qp->orq_put++; 305f29dd55bSBernard Metzler } else 306f29dd55bSBernard Metzler rv = -EIO; 307f29dd55bSBernard Metzler 308f29dd55bSBernard Metzler spin_unlock(&qp->orq_lock); 309f29dd55bSBernard Metzler } else 310f29dd55bSBernard Metzler rv = -EINVAL; 311f29dd55bSBernard Metzler 312f29dd55bSBernard Metzler if (rv) 313f29dd55bSBernard Metzler wqe->wr_status = SIW_WR_IDLE; 314f29dd55bSBernard Metzler 315f29dd55bSBernard Metzler spin_unlock_irqrestore(&qp->sq_lock, flags); 316f29dd55bSBernard Metzler 317f29dd55bSBernard Metzler if (!rv) 318f29dd55bSBernard Metzler rv = siw_sq_start(qp); 319f29dd55bSBernard Metzler 320f29dd55bSBernard Metzler return rv; 321f29dd55bSBernard Metzler } 322f29dd55bSBernard Metzler 323f29dd55bSBernard Metzler /* 324f29dd55bSBernard Metzler * Map memory access error to DDP tagged error 325f29dd55bSBernard Metzler */ 326f29dd55bSBernard Metzler enum ddp_ecode siw_tagged_error(enum siw_access_state state) 327f29dd55bSBernard Metzler { 328f29dd55bSBernard Metzler switch (state) { 329f29dd55bSBernard Metzler case E_STAG_INVALID: 330f29dd55bSBernard Metzler return DDP_ECODE_T_INVALID_STAG; 331f29dd55bSBernard Metzler case E_BASE_BOUNDS: 332f29dd55bSBernard Metzler return DDP_ECODE_T_BASE_BOUNDS; 333f29dd55bSBernard Metzler case E_PD_MISMATCH: 334f29dd55bSBernard Metzler return DDP_ECODE_T_STAG_NOT_ASSOC; 335f29dd55bSBernard Metzler case E_ACCESS_PERM: 336f29dd55bSBernard Metzler /* 337f29dd55bSBernard Metzler * RFC 5041 (DDP) lacks an ecode for insufficient access 338f29dd55bSBernard Metzler * permissions. 'Invalid STag' seem to be the closest 339f29dd55bSBernard Metzler * match though. 340f29dd55bSBernard Metzler */ 341f29dd55bSBernard Metzler return DDP_ECODE_T_INVALID_STAG; 342f29dd55bSBernard Metzler default: 343f29dd55bSBernard Metzler WARN_ON(1); 344f29dd55bSBernard Metzler return DDP_ECODE_T_INVALID_STAG; 345f29dd55bSBernard Metzler } 346f29dd55bSBernard Metzler } 347f29dd55bSBernard Metzler 348f29dd55bSBernard Metzler /* 349f29dd55bSBernard Metzler * Map memory access error to RDMAP protection error 350f29dd55bSBernard Metzler */ 351f29dd55bSBernard Metzler enum rdmap_ecode siw_rdmap_error(enum siw_access_state state) 352f29dd55bSBernard Metzler { 353f29dd55bSBernard Metzler switch (state) { 354f29dd55bSBernard Metzler case E_STAG_INVALID: 355f29dd55bSBernard Metzler return RDMAP_ECODE_INVALID_STAG; 356f29dd55bSBernard Metzler case E_BASE_BOUNDS: 357f29dd55bSBernard Metzler return RDMAP_ECODE_BASE_BOUNDS; 358f29dd55bSBernard Metzler case E_PD_MISMATCH: 359f29dd55bSBernard Metzler return RDMAP_ECODE_STAG_NOT_ASSOC; 360f29dd55bSBernard Metzler case E_ACCESS_PERM: 361f29dd55bSBernard Metzler return RDMAP_ECODE_ACCESS_RIGHTS; 362f29dd55bSBernard Metzler default: 363f29dd55bSBernard Metzler return RDMAP_ECODE_UNSPECIFIED; 364f29dd55bSBernard Metzler } 365f29dd55bSBernard Metzler } 366f29dd55bSBernard Metzler 367f29dd55bSBernard Metzler void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, u8 etype, 368f29dd55bSBernard Metzler u8 ecode, int in_tx) 369f29dd55bSBernard Metzler { 370f29dd55bSBernard Metzler if (!qp->term_info.valid) { 371f29dd55bSBernard Metzler memset(&qp->term_info, 0, sizeof(qp->term_info)); 372f29dd55bSBernard Metzler qp->term_info.layer = layer; 373f29dd55bSBernard Metzler qp->term_info.etype = etype; 374f29dd55bSBernard Metzler qp->term_info.ecode = ecode; 375f29dd55bSBernard Metzler qp->term_info.in_tx = in_tx; 376f29dd55bSBernard Metzler qp->term_info.valid = 1; 377f29dd55bSBernard Metzler } 378f29dd55bSBernard Metzler siw_dbg_qp(qp, "init TERM: layer %d, type %d, code %d, in tx %s\n", 379f29dd55bSBernard Metzler layer, etype, ecode, in_tx ? "yes" : "no"); 380f29dd55bSBernard Metzler } 381f29dd55bSBernard Metzler 382f29dd55bSBernard Metzler /* 383f29dd55bSBernard Metzler * Send a TERMINATE message, as defined in RFC's 5040/5041/5044/6581. 384f29dd55bSBernard Metzler * Sending TERMINATE messages is best effort - such messages 385f29dd55bSBernard Metzler * can only be send if the QP is still connected and it does 386f29dd55bSBernard Metzler * not have another outbound message in-progress, i.e. the 387f29dd55bSBernard Metzler * TERMINATE message must not interfer with an incomplete current 388f29dd55bSBernard Metzler * transmit operation. 389f29dd55bSBernard Metzler */ 390f29dd55bSBernard Metzler void siw_send_terminate(struct siw_qp *qp) 391f29dd55bSBernard Metzler { 392f29dd55bSBernard Metzler struct kvec iov[3]; 393f29dd55bSBernard Metzler struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR }; 394f29dd55bSBernard Metzler struct iwarp_terminate *term = NULL; 395f29dd55bSBernard Metzler union iwarp_hdr *err_hdr = NULL; 396f29dd55bSBernard Metzler struct socket *s = qp->attrs.sk; 397f29dd55bSBernard Metzler struct siw_rx_stream *srx = &qp->rx_stream; 398f29dd55bSBernard Metzler union iwarp_hdr *rx_hdr = &srx->hdr; 399f29dd55bSBernard Metzler u32 crc = 0; 400f29dd55bSBernard Metzler int num_frags, len_terminate, rv; 401f29dd55bSBernard Metzler 402f29dd55bSBernard Metzler if (!qp->term_info.valid) 403f29dd55bSBernard Metzler return; 404f29dd55bSBernard Metzler 405f29dd55bSBernard Metzler qp->term_info.valid = 0; 406f29dd55bSBernard Metzler 407f29dd55bSBernard Metzler if (tx_wqe(qp)->wr_status == SIW_WR_INPROGRESS) { 408f29dd55bSBernard Metzler siw_dbg_qp(qp, "cannot send TERMINATE: op %d in progress\n", 409f29dd55bSBernard Metzler tx_type(tx_wqe(qp))); 410f29dd55bSBernard Metzler return; 411f29dd55bSBernard Metzler } 412f29dd55bSBernard Metzler if (!s && qp->cep) 413f29dd55bSBernard Metzler /* QP not yet in RTS. Take socket from connection end point */ 414f29dd55bSBernard Metzler s = qp->cep->sock; 415f29dd55bSBernard Metzler 416f29dd55bSBernard Metzler if (!s) { 417f29dd55bSBernard Metzler siw_dbg_qp(qp, "cannot send TERMINATE: not connected\n"); 418f29dd55bSBernard Metzler return; 419f29dd55bSBernard Metzler } 420f29dd55bSBernard Metzler 421f29dd55bSBernard Metzler term = kzalloc(sizeof(*term), GFP_KERNEL); 422f29dd55bSBernard Metzler if (!term) 423f29dd55bSBernard Metzler return; 424f29dd55bSBernard Metzler 425f29dd55bSBernard Metzler term->ddp_qn = cpu_to_be32(RDMAP_UNTAGGED_QN_TERMINATE); 426f29dd55bSBernard Metzler term->ddp_mo = 0; 427f29dd55bSBernard Metzler term->ddp_msn = cpu_to_be32(1); 428f29dd55bSBernard Metzler 429f29dd55bSBernard Metzler iov[0].iov_base = term; 430f29dd55bSBernard Metzler iov[0].iov_len = sizeof(*term); 431f29dd55bSBernard Metzler 432f29dd55bSBernard Metzler if ((qp->term_info.layer == TERM_ERROR_LAYER_DDP) || 433f29dd55bSBernard Metzler ((qp->term_info.layer == TERM_ERROR_LAYER_RDMAP) && 434f29dd55bSBernard Metzler (qp->term_info.etype != RDMAP_ETYPE_CATASTROPHIC))) { 435f29dd55bSBernard Metzler err_hdr = kzalloc(sizeof(*err_hdr), GFP_KERNEL); 436f29dd55bSBernard Metzler if (!err_hdr) { 437f29dd55bSBernard Metzler kfree(term); 438f29dd55bSBernard Metzler return; 439f29dd55bSBernard Metzler } 440f29dd55bSBernard Metzler } 441f29dd55bSBernard Metzler memcpy(&term->ctrl, &iwarp_pktinfo[RDMAP_TERMINATE].ctrl, 442f29dd55bSBernard Metzler sizeof(struct iwarp_ctrl)); 443f29dd55bSBernard Metzler 444f29dd55bSBernard Metzler __rdmap_term_set_layer(term, qp->term_info.layer); 445f29dd55bSBernard Metzler __rdmap_term_set_etype(term, qp->term_info.etype); 446f29dd55bSBernard Metzler __rdmap_term_set_ecode(term, qp->term_info.ecode); 447f29dd55bSBernard Metzler 448f29dd55bSBernard Metzler switch (qp->term_info.layer) { 449f29dd55bSBernard Metzler case TERM_ERROR_LAYER_RDMAP: 450f29dd55bSBernard Metzler if (qp->term_info.etype == RDMAP_ETYPE_CATASTROPHIC) 451f29dd55bSBernard Metzler /* No additional DDP/RDMAP header to be included */ 452f29dd55bSBernard Metzler break; 453f29dd55bSBernard Metzler 454f29dd55bSBernard Metzler if (qp->term_info.etype == RDMAP_ETYPE_REMOTE_PROTECTION) { 455f29dd55bSBernard Metzler /* 456f29dd55bSBernard Metzler * Complete RDMAP frame will get attached, and 457f29dd55bSBernard Metzler * DDP segment length is valid 458f29dd55bSBernard Metzler */ 459f29dd55bSBernard Metzler term->flag_m = 1; 460f29dd55bSBernard Metzler term->flag_d = 1; 461f29dd55bSBernard Metzler term->flag_r = 1; 462f29dd55bSBernard Metzler 463f29dd55bSBernard Metzler if (qp->term_info.in_tx) { 464f29dd55bSBernard Metzler struct iwarp_rdma_rreq *rreq; 465f29dd55bSBernard Metzler struct siw_wqe *wqe = tx_wqe(qp); 466f29dd55bSBernard Metzler 467f29dd55bSBernard Metzler /* Inbound RREQ error, detected during 468f29dd55bSBernard Metzler * RRESP creation. Take state from 469f29dd55bSBernard Metzler * current TX work queue element to 470f29dd55bSBernard Metzler * reconstruct peers RREQ. 471f29dd55bSBernard Metzler */ 472f29dd55bSBernard Metzler rreq = (struct iwarp_rdma_rreq *)err_hdr; 473f29dd55bSBernard Metzler 474f29dd55bSBernard Metzler memcpy(&rreq->ctrl, 475f29dd55bSBernard Metzler &iwarp_pktinfo[RDMAP_RDMA_READ_REQ].ctrl, 476f29dd55bSBernard Metzler sizeof(struct iwarp_ctrl)); 477f29dd55bSBernard Metzler 478f29dd55bSBernard Metzler rreq->rsvd = 0; 479f29dd55bSBernard Metzler rreq->ddp_qn = 480f29dd55bSBernard Metzler htonl(RDMAP_UNTAGGED_QN_RDMA_READ); 481f29dd55bSBernard Metzler 482f29dd55bSBernard Metzler /* Provide RREQ's MSN as kept aside */ 483f29dd55bSBernard Metzler rreq->ddp_msn = htonl(wqe->sqe.sge[0].length); 484f29dd55bSBernard Metzler 485f29dd55bSBernard Metzler rreq->ddp_mo = htonl(wqe->processed); 486f29dd55bSBernard Metzler rreq->sink_stag = htonl(wqe->sqe.rkey); 487f29dd55bSBernard Metzler rreq->sink_to = cpu_to_be64(wqe->sqe.raddr); 488f29dd55bSBernard Metzler rreq->read_size = htonl(wqe->sqe.sge[0].length); 489f29dd55bSBernard Metzler rreq->source_stag = htonl(wqe->sqe.sge[0].lkey); 490f29dd55bSBernard Metzler rreq->source_to = 491f29dd55bSBernard Metzler cpu_to_be64(wqe->sqe.sge[0].laddr); 492f29dd55bSBernard Metzler 493f29dd55bSBernard Metzler iov[1].iov_base = rreq; 494f29dd55bSBernard Metzler iov[1].iov_len = sizeof(*rreq); 495f29dd55bSBernard Metzler 496f29dd55bSBernard Metzler rx_hdr = (union iwarp_hdr *)rreq; 497f29dd55bSBernard Metzler } else { 498f29dd55bSBernard Metzler /* Take RDMAP/DDP information from 499f29dd55bSBernard Metzler * current (failed) inbound frame. 500f29dd55bSBernard Metzler */ 501f29dd55bSBernard Metzler iov[1].iov_base = rx_hdr; 502f29dd55bSBernard Metzler 503f29dd55bSBernard Metzler if (__rdmap_get_opcode(&rx_hdr->ctrl) == 504f29dd55bSBernard Metzler RDMAP_RDMA_READ_REQ) 505f29dd55bSBernard Metzler iov[1].iov_len = 506f29dd55bSBernard Metzler sizeof(struct iwarp_rdma_rreq); 507f29dd55bSBernard Metzler else /* SEND type */ 508f29dd55bSBernard Metzler iov[1].iov_len = 509f29dd55bSBernard Metzler sizeof(struct iwarp_send); 510f29dd55bSBernard Metzler } 511f29dd55bSBernard Metzler } else { 512f29dd55bSBernard Metzler /* Do not report DDP hdr information if packet 513f29dd55bSBernard Metzler * layout is unknown 514f29dd55bSBernard Metzler */ 515f29dd55bSBernard Metzler if ((qp->term_info.ecode == RDMAP_ECODE_VERSION) || 516f29dd55bSBernard Metzler (qp->term_info.ecode == RDMAP_ECODE_OPCODE)) 517f29dd55bSBernard Metzler break; 518f29dd55bSBernard Metzler 519f29dd55bSBernard Metzler iov[1].iov_base = rx_hdr; 520f29dd55bSBernard Metzler 521f29dd55bSBernard Metzler /* Only DDP frame will get attached */ 522f29dd55bSBernard Metzler if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED) 523f29dd55bSBernard Metzler iov[1].iov_len = 524f29dd55bSBernard Metzler sizeof(struct iwarp_rdma_write); 525f29dd55bSBernard Metzler else 526f29dd55bSBernard Metzler iov[1].iov_len = sizeof(struct iwarp_send); 527f29dd55bSBernard Metzler 528f29dd55bSBernard Metzler term->flag_m = 1; 529f29dd55bSBernard Metzler term->flag_d = 1; 530f29dd55bSBernard Metzler } 531f29dd55bSBernard Metzler term->ctrl.mpa_len = cpu_to_be16(iov[1].iov_len); 532f29dd55bSBernard Metzler break; 533f29dd55bSBernard Metzler 534f29dd55bSBernard Metzler case TERM_ERROR_LAYER_DDP: 535f29dd55bSBernard Metzler /* Report error encountered while DDP processing. 536f29dd55bSBernard Metzler * This can only happen as a result of inbound 537f29dd55bSBernard Metzler * DDP processing 538f29dd55bSBernard Metzler */ 539f29dd55bSBernard Metzler 540f29dd55bSBernard Metzler /* Do not report DDP hdr information if packet 541f29dd55bSBernard Metzler * layout is unknown 542f29dd55bSBernard Metzler */ 543f29dd55bSBernard Metzler if (((qp->term_info.etype == DDP_ETYPE_TAGGED_BUF) && 544f29dd55bSBernard Metzler (qp->term_info.ecode == DDP_ECODE_T_VERSION)) || 545f29dd55bSBernard Metzler ((qp->term_info.etype == DDP_ETYPE_UNTAGGED_BUF) && 546f29dd55bSBernard Metzler (qp->term_info.ecode == DDP_ECODE_UT_VERSION))) 547f29dd55bSBernard Metzler break; 548f29dd55bSBernard Metzler 549f29dd55bSBernard Metzler iov[1].iov_base = rx_hdr; 550f29dd55bSBernard Metzler 551f29dd55bSBernard Metzler if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED) 552f29dd55bSBernard Metzler iov[1].iov_len = sizeof(struct iwarp_ctrl_tagged); 553f29dd55bSBernard Metzler else 554f29dd55bSBernard Metzler iov[1].iov_len = sizeof(struct iwarp_ctrl_untagged); 555f29dd55bSBernard Metzler 556f29dd55bSBernard Metzler term->flag_m = 1; 557f29dd55bSBernard Metzler term->flag_d = 1; 558f29dd55bSBernard Metzler break; 559f29dd55bSBernard Metzler 560f29dd55bSBernard Metzler default: 561f29dd55bSBernard Metzler break; 562f29dd55bSBernard Metzler } 563f29dd55bSBernard Metzler if (term->flag_m || term->flag_d || term->flag_r) { 564f29dd55bSBernard Metzler iov[2].iov_base = &crc; 565f29dd55bSBernard Metzler iov[2].iov_len = sizeof(crc); 566f29dd55bSBernard Metzler len_terminate = sizeof(*term) + iov[1].iov_len + MPA_CRC_SIZE; 567f29dd55bSBernard Metzler num_frags = 3; 568f29dd55bSBernard Metzler } else { 569f29dd55bSBernard Metzler iov[1].iov_base = &crc; 570f29dd55bSBernard Metzler iov[1].iov_len = sizeof(crc); 571f29dd55bSBernard Metzler len_terminate = sizeof(*term) + MPA_CRC_SIZE; 572f29dd55bSBernard Metzler num_frags = 2; 573f29dd55bSBernard Metzler } 574f29dd55bSBernard Metzler 575f29dd55bSBernard Metzler /* Adjust DDP Segment Length parameter, if valid */ 576f29dd55bSBernard Metzler if (term->flag_m) { 577f29dd55bSBernard Metzler u32 real_ddp_len = be16_to_cpu(rx_hdr->ctrl.mpa_len); 578f29dd55bSBernard Metzler enum rdma_opcode op = __rdmap_get_opcode(&rx_hdr->ctrl); 579f29dd55bSBernard Metzler 580f29dd55bSBernard Metzler real_ddp_len -= iwarp_pktinfo[op].hdr_len - MPA_HDR_SIZE; 581f29dd55bSBernard Metzler rx_hdr->ctrl.mpa_len = cpu_to_be16(real_ddp_len); 582f29dd55bSBernard Metzler } 583f29dd55bSBernard Metzler 584f29dd55bSBernard Metzler term->ctrl.mpa_len = 585f29dd55bSBernard Metzler cpu_to_be16(len_terminate - (MPA_HDR_SIZE + MPA_CRC_SIZE)); 586f29dd55bSBernard Metzler if (qp->tx_ctx.mpa_crc_hd) { 587f29dd55bSBernard Metzler crypto_shash_init(qp->tx_ctx.mpa_crc_hd); 588f29dd55bSBernard Metzler if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, 589f29dd55bSBernard Metzler (u8 *)iov[0].iov_base, 590f29dd55bSBernard Metzler iov[0].iov_len)) 591f29dd55bSBernard Metzler goto out; 592f29dd55bSBernard Metzler 593f29dd55bSBernard Metzler if (num_frags == 3) { 594f29dd55bSBernard Metzler if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, 595f29dd55bSBernard Metzler (u8 *)iov[1].iov_base, 596f29dd55bSBernard Metzler iov[1].iov_len)) 597f29dd55bSBernard Metzler goto out; 598f29dd55bSBernard Metzler } 599f29dd55bSBernard Metzler crypto_shash_final(qp->tx_ctx.mpa_crc_hd, (u8 *)&crc); 600f29dd55bSBernard Metzler } 601f29dd55bSBernard Metzler 602f29dd55bSBernard Metzler rv = kernel_sendmsg(s, &msg, iov, num_frags, len_terminate); 603f29dd55bSBernard Metzler siw_dbg_qp(qp, "sent TERM: %s, layer %d, type %d, code %d (%d bytes)\n", 604f29dd55bSBernard Metzler rv == len_terminate ? "success" : "failure", 605f29dd55bSBernard Metzler __rdmap_term_layer(term), __rdmap_term_etype(term), 606f29dd55bSBernard Metzler __rdmap_term_ecode(term), rv); 607f29dd55bSBernard Metzler out: 608f29dd55bSBernard Metzler kfree(term); 609f29dd55bSBernard Metzler kfree(err_hdr); 610f29dd55bSBernard Metzler } 611f29dd55bSBernard Metzler 612f29dd55bSBernard Metzler /* 613f29dd55bSBernard Metzler * Handle all attrs other than state 614f29dd55bSBernard Metzler */ 615f29dd55bSBernard Metzler static void siw_qp_modify_nonstate(struct siw_qp *qp, 616f29dd55bSBernard Metzler struct siw_qp_attrs *attrs, 617f29dd55bSBernard Metzler enum siw_qp_attr_mask mask) 618f29dd55bSBernard Metzler { 619f29dd55bSBernard Metzler if (mask & SIW_QP_ATTR_ACCESS_FLAGS) { 620f29dd55bSBernard Metzler if (attrs->flags & SIW_RDMA_BIND_ENABLED) 621f29dd55bSBernard Metzler qp->attrs.flags |= SIW_RDMA_BIND_ENABLED; 622f29dd55bSBernard Metzler else 623f29dd55bSBernard Metzler qp->attrs.flags &= ~SIW_RDMA_BIND_ENABLED; 624f29dd55bSBernard Metzler 625f29dd55bSBernard Metzler if (attrs->flags & SIW_RDMA_WRITE_ENABLED) 626f29dd55bSBernard Metzler qp->attrs.flags |= SIW_RDMA_WRITE_ENABLED; 627f29dd55bSBernard Metzler else 628f29dd55bSBernard Metzler qp->attrs.flags &= ~SIW_RDMA_WRITE_ENABLED; 629f29dd55bSBernard Metzler 630f29dd55bSBernard Metzler if (attrs->flags & SIW_RDMA_READ_ENABLED) 631f29dd55bSBernard Metzler qp->attrs.flags |= SIW_RDMA_READ_ENABLED; 632f29dd55bSBernard Metzler else 633f29dd55bSBernard Metzler qp->attrs.flags &= ~SIW_RDMA_READ_ENABLED; 634f29dd55bSBernard Metzler } 635f29dd55bSBernard Metzler } 636f29dd55bSBernard Metzler 637f29dd55bSBernard Metzler static int siw_qp_nextstate_from_idle(struct siw_qp *qp, 638f29dd55bSBernard Metzler struct siw_qp_attrs *attrs, 639f29dd55bSBernard Metzler enum siw_qp_attr_mask mask) 640f29dd55bSBernard Metzler { 641f29dd55bSBernard Metzler int rv = 0; 642f29dd55bSBernard Metzler 643f29dd55bSBernard Metzler switch (attrs->state) { 644f29dd55bSBernard Metzler case SIW_QP_STATE_RTS: 645f29dd55bSBernard Metzler if (attrs->flags & SIW_MPA_CRC) { 646f29dd55bSBernard Metzler rv = siw_qp_enable_crc(qp); 647f29dd55bSBernard Metzler if (rv) 648f29dd55bSBernard Metzler break; 649f29dd55bSBernard Metzler } 650f29dd55bSBernard Metzler if (!(mask & SIW_QP_ATTR_LLP_HANDLE)) { 651f29dd55bSBernard Metzler siw_dbg_qp(qp, "no socket\n"); 652f29dd55bSBernard Metzler rv = -EINVAL; 653f29dd55bSBernard Metzler break; 654f29dd55bSBernard Metzler } 655f29dd55bSBernard Metzler if (!(mask & SIW_QP_ATTR_MPA)) { 656f29dd55bSBernard Metzler siw_dbg_qp(qp, "no MPA\n"); 657f29dd55bSBernard Metzler rv = -EINVAL; 658f29dd55bSBernard Metzler break; 659f29dd55bSBernard Metzler } 660f29dd55bSBernard Metzler /* 661f29dd55bSBernard Metzler * Initialize iWARP TX state 662f29dd55bSBernard Metzler */ 663f29dd55bSBernard Metzler qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 0; 664f29dd55bSBernard Metzler qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 0; 665f29dd55bSBernard Metzler qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 0; 666f29dd55bSBernard Metzler 667f29dd55bSBernard Metzler /* 668f29dd55bSBernard Metzler * Initialize iWARP RX state 669f29dd55bSBernard Metzler */ 670f29dd55bSBernard Metzler qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 1; 671f29dd55bSBernard Metzler qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 1; 672f29dd55bSBernard Metzler qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 1; 673f29dd55bSBernard Metzler 674f29dd55bSBernard Metzler /* 675f29dd55bSBernard Metzler * init IRD free queue, caller has already checked 676f29dd55bSBernard Metzler * limits. 677f29dd55bSBernard Metzler */ 678f29dd55bSBernard Metzler rv = siw_qp_readq_init(qp, attrs->irq_size, 679f29dd55bSBernard Metzler attrs->orq_size); 680f29dd55bSBernard Metzler if (rv) 681f29dd55bSBernard Metzler break; 682f29dd55bSBernard Metzler 683f29dd55bSBernard Metzler qp->attrs.sk = attrs->sk; 684f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_RTS; 685f29dd55bSBernard Metzler 686f29dd55bSBernard Metzler siw_dbg_qp(qp, "enter RTS: crc=%s, ord=%u, ird=%u\n", 687f29dd55bSBernard Metzler attrs->flags & SIW_MPA_CRC ? "y" : "n", 688f29dd55bSBernard Metzler qp->attrs.orq_size, qp->attrs.irq_size); 689f29dd55bSBernard Metzler break; 690f29dd55bSBernard Metzler 691f29dd55bSBernard Metzler case SIW_QP_STATE_ERROR: 692f29dd55bSBernard Metzler siw_rq_flush(qp); 693f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_ERROR; 694f29dd55bSBernard Metzler if (qp->cep) { 695f29dd55bSBernard Metzler siw_cep_put(qp->cep); 696f29dd55bSBernard Metzler qp->cep = NULL; 697f29dd55bSBernard Metzler } 698f29dd55bSBernard Metzler break; 699f29dd55bSBernard Metzler 700f29dd55bSBernard Metzler default: 701f29dd55bSBernard Metzler break; 702f29dd55bSBernard Metzler } 703f29dd55bSBernard Metzler return rv; 704f29dd55bSBernard Metzler } 705f29dd55bSBernard Metzler 706f29dd55bSBernard Metzler static int siw_qp_nextstate_from_rts(struct siw_qp *qp, 707f29dd55bSBernard Metzler struct siw_qp_attrs *attrs) 708f29dd55bSBernard Metzler { 709f29dd55bSBernard Metzler int drop_conn = 0; 710f29dd55bSBernard Metzler 711f29dd55bSBernard Metzler switch (attrs->state) { 712f29dd55bSBernard Metzler case SIW_QP_STATE_CLOSING: 713f29dd55bSBernard Metzler /* 714f29dd55bSBernard Metzler * Verbs: move to IDLE if SQ and ORQ are empty. 715f29dd55bSBernard Metzler * Move to ERROR otherwise. But first of all we must 716f29dd55bSBernard Metzler * close the connection. So we keep CLOSING or ERROR 717f29dd55bSBernard Metzler * as a transient state, schedule connection drop work 718f29dd55bSBernard Metzler * and wait for the socket state change upcall to 719f29dd55bSBernard Metzler * come back closed. 720f29dd55bSBernard Metzler */ 721f29dd55bSBernard Metzler if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) { 722f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_CLOSING; 723f29dd55bSBernard Metzler } else { 724f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_ERROR; 725f29dd55bSBernard Metzler siw_sq_flush(qp); 726f29dd55bSBernard Metzler } 727f29dd55bSBernard Metzler siw_rq_flush(qp); 728f29dd55bSBernard Metzler 729f29dd55bSBernard Metzler drop_conn = 1; 730f29dd55bSBernard Metzler break; 731f29dd55bSBernard Metzler 732f29dd55bSBernard Metzler case SIW_QP_STATE_TERMINATE: 733f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_TERMINATE; 734f29dd55bSBernard Metzler 735f29dd55bSBernard Metzler siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP, 736f29dd55bSBernard Metzler RDMAP_ETYPE_CATASTROPHIC, 737f29dd55bSBernard Metzler RDMAP_ECODE_UNSPECIFIED, 1); 738f29dd55bSBernard Metzler drop_conn = 1; 739f29dd55bSBernard Metzler break; 740f29dd55bSBernard Metzler 741f29dd55bSBernard Metzler case SIW_QP_STATE_ERROR: 742f29dd55bSBernard Metzler /* 743f29dd55bSBernard Metzler * This is an emergency close. 744f29dd55bSBernard Metzler * 745f29dd55bSBernard Metzler * Any in progress transmit operation will get 746f29dd55bSBernard Metzler * cancelled. 747f29dd55bSBernard Metzler * This will likely result in a protocol failure, 748f29dd55bSBernard Metzler * if a TX operation is in transit. The caller 749f29dd55bSBernard Metzler * could unconditional wait to give the current 750f29dd55bSBernard Metzler * operation a chance to complete. 751f29dd55bSBernard Metzler * Esp., how to handle the non-empty IRQ case? 752f29dd55bSBernard Metzler * The peer was asking for data transfer at a valid 753f29dd55bSBernard Metzler * point in time. 754f29dd55bSBernard Metzler */ 755f29dd55bSBernard Metzler siw_sq_flush(qp); 756f29dd55bSBernard Metzler siw_rq_flush(qp); 757f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_ERROR; 758f29dd55bSBernard Metzler drop_conn = 1; 759f29dd55bSBernard Metzler break; 760f29dd55bSBernard Metzler 761f29dd55bSBernard Metzler default: 762f29dd55bSBernard Metzler break; 763f29dd55bSBernard Metzler } 764f29dd55bSBernard Metzler return drop_conn; 765f29dd55bSBernard Metzler } 766f29dd55bSBernard Metzler 767f29dd55bSBernard Metzler static void siw_qp_nextstate_from_term(struct siw_qp *qp, 768f29dd55bSBernard Metzler struct siw_qp_attrs *attrs) 769f29dd55bSBernard Metzler { 770f29dd55bSBernard Metzler switch (attrs->state) { 771f29dd55bSBernard Metzler case SIW_QP_STATE_ERROR: 772f29dd55bSBernard Metzler siw_rq_flush(qp); 773f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_ERROR; 774f29dd55bSBernard Metzler 775f29dd55bSBernard Metzler if (tx_wqe(qp)->wr_status != SIW_WR_IDLE) 776f29dd55bSBernard Metzler siw_sq_flush(qp); 777f29dd55bSBernard Metzler break; 778f29dd55bSBernard Metzler 779f29dd55bSBernard Metzler default: 780f29dd55bSBernard Metzler break; 781f29dd55bSBernard Metzler } 782f29dd55bSBernard Metzler } 783f29dd55bSBernard Metzler 784f29dd55bSBernard Metzler static int siw_qp_nextstate_from_close(struct siw_qp *qp, 785f29dd55bSBernard Metzler struct siw_qp_attrs *attrs) 786f29dd55bSBernard Metzler { 787f29dd55bSBernard Metzler int rv = 0; 788f29dd55bSBernard Metzler 789f29dd55bSBernard Metzler switch (attrs->state) { 790f29dd55bSBernard Metzler case SIW_QP_STATE_IDLE: 791f29dd55bSBernard Metzler WARN_ON(tx_wqe(qp)->wr_status != SIW_WR_IDLE); 792f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_IDLE; 793f29dd55bSBernard Metzler break; 794f29dd55bSBernard Metzler 795f29dd55bSBernard Metzler case SIW_QP_STATE_CLOSING: 796f29dd55bSBernard Metzler /* 797f29dd55bSBernard Metzler * The LLP may already moved the QP to closing 798f29dd55bSBernard Metzler * due to graceful peer close init 799f29dd55bSBernard Metzler */ 800f29dd55bSBernard Metzler break; 801f29dd55bSBernard Metzler 802f29dd55bSBernard Metzler case SIW_QP_STATE_ERROR: 803f29dd55bSBernard Metzler /* 804f29dd55bSBernard Metzler * QP was moved to CLOSING by LLP event 805f29dd55bSBernard Metzler * not yet seen by user. 806f29dd55bSBernard Metzler */ 807f29dd55bSBernard Metzler qp->attrs.state = SIW_QP_STATE_ERROR; 808f29dd55bSBernard Metzler 809f29dd55bSBernard Metzler if (tx_wqe(qp)->wr_status != SIW_WR_IDLE) 810f29dd55bSBernard Metzler siw_sq_flush(qp); 811f29dd55bSBernard Metzler 812f29dd55bSBernard Metzler siw_rq_flush(qp); 813f29dd55bSBernard Metzler break; 814f29dd55bSBernard Metzler 815f29dd55bSBernard Metzler default: 816f29dd55bSBernard Metzler siw_dbg_qp(qp, "state transition undefined: %s => %s\n", 817f29dd55bSBernard Metzler siw_qp_state_to_string[qp->attrs.state], 818f29dd55bSBernard Metzler siw_qp_state_to_string[attrs->state]); 819f29dd55bSBernard Metzler 820f29dd55bSBernard Metzler rv = -ECONNABORTED; 821f29dd55bSBernard Metzler } 822f29dd55bSBernard Metzler return rv; 823f29dd55bSBernard Metzler } 824f29dd55bSBernard Metzler 825f29dd55bSBernard Metzler /* 826f29dd55bSBernard Metzler * Caller must hold qp->state_lock 827f29dd55bSBernard Metzler */ 828f29dd55bSBernard Metzler int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attrs, 829f29dd55bSBernard Metzler enum siw_qp_attr_mask mask) 830f29dd55bSBernard Metzler { 831f29dd55bSBernard Metzler int drop_conn = 0, rv = 0; 832f29dd55bSBernard Metzler 833f29dd55bSBernard Metzler if (!mask) 834f29dd55bSBernard Metzler return 0; 835f29dd55bSBernard Metzler 836f29dd55bSBernard Metzler siw_dbg_qp(qp, "state: %s => %s\n", 837f29dd55bSBernard Metzler siw_qp_state_to_string[qp->attrs.state], 838f29dd55bSBernard Metzler siw_qp_state_to_string[attrs->state]); 839f29dd55bSBernard Metzler 840f29dd55bSBernard Metzler if (mask != SIW_QP_ATTR_STATE) 841f29dd55bSBernard Metzler siw_qp_modify_nonstate(qp, attrs, mask); 842f29dd55bSBernard Metzler 843f29dd55bSBernard Metzler if (!(mask & SIW_QP_ATTR_STATE)) 844f29dd55bSBernard Metzler return 0; 845f29dd55bSBernard Metzler 846f29dd55bSBernard Metzler switch (qp->attrs.state) { 847f29dd55bSBernard Metzler case SIW_QP_STATE_IDLE: 848f29dd55bSBernard Metzler case SIW_QP_STATE_RTR: 849f29dd55bSBernard Metzler rv = siw_qp_nextstate_from_idle(qp, attrs, mask); 850f29dd55bSBernard Metzler break; 851f29dd55bSBernard Metzler 852f29dd55bSBernard Metzler case SIW_QP_STATE_RTS: 853f29dd55bSBernard Metzler drop_conn = siw_qp_nextstate_from_rts(qp, attrs); 854f29dd55bSBernard Metzler break; 855f29dd55bSBernard Metzler 856f29dd55bSBernard Metzler case SIW_QP_STATE_TERMINATE: 857f29dd55bSBernard Metzler siw_qp_nextstate_from_term(qp, attrs); 858f29dd55bSBernard Metzler break; 859f29dd55bSBernard Metzler 860f29dd55bSBernard Metzler case SIW_QP_STATE_CLOSING: 861f29dd55bSBernard Metzler siw_qp_nextstate_from_close(qp, attrs); 862f29dd55bSBernard Metzler break; 863f29dd55bSBernard Metzler default: 864f29dd55bSBernard Metzler break; 865f29dd55bSBernard Metzler } 866f29dd55bSBernard Metzler if (drop_conn) 867f29dd55bSBernard Metzler siw_qp_cm_drop(qp, 0); 868f29dd55bSBernard Metzler 869f29dd55bSBernard Metzler return rv; 870f29dd55bSBernard Metzler } 871f29dd55bSBernard Metzler 872f29dd55bSBernard Metzler void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe) 873f29dd55bSBernard Metzler { 874f29dd55bSBernard Metzler rreq->id = sqe->id; 875f29dd55bSBernard Metzler rreq->opcode = sqe->opcode; 876f29dd55bSBernard Metzler rreq->sge[0].laddr = sqe->sge[0].laddr; 877f29dd55bSBernard Metzler rreq->sge[0].length = sqe->sge[0].length; 878f29dd55bSBernard Metzler rreq->sge[0].lkey = sqe->sge[0].lkey; 879f29dd55bSBernard Metzler rreq->sge[1].lkey = sqe->sge[1].lkey; 880f29dd55bSBernard Metzler rreq->flags = sqe->flags | SIW_WQE_VALID; 881f29dd55bSBernard Metzler rreq->num_sge = 1; 882f29dd55bSBernard Metzler } 883f29dd55bSBernard Metzler 884661f3859SBernard Metzler static int siw_activate_tx_from_sq(struct siw_qp *qp) 885f29dd55bSBernard Metzler { 886661f3859SBernard Metzler struct siw_sqe *sqe; 887f29dd55bSBernard Metzler struct siw_wqe *wqe = tx_wqe(qp); 888f29dd55bSBernard Metzler int rv = 1; 889f29dd55bSBernard Metzler 890f29dd55bSBernard Metzler sqe = sq_get_next(qp); 891661f3859SBernard Metzler if (!sqe) 892661f3859SBernard Metzler return 0; 893f29dd55bSBernard Metzler 894f29dd55bSBernard Metzler memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); 895f29dd55bSBernard Metzler wqe->wr_status = SIW_WR_QUEUED; 896f29dd55bSBernard Metzler 897f29dd55bSBernard Metzler /* First copy SQE to kernel private memory */ 898f29dd55bSBernard Metzler memcpy(&wqe->sqe, sqe, sizeof(*sqe)); 899f29dd55bSBernard Metzler 900f29dd55bSBernard Metzler if (wqe->sqe.opcode >= SIW_NUM_OPCODES) { 901f29dd55bSBernard Metzler rv = -EINVAL; 902f29dd55bSBernard Metzler goto out; 903f29dd55bSBernard Metzler } 904f29dd55bSBernard Metzler if (wqe->sqe.flags & SIW_WQE_INLINE) { 905f29dd55bSBernard Metzler if (wqe->sqe.opcode != SIW_OP_SEND && 906f29dd55bSBernard Metzler wqe->sqe.opcode != SIW_OP_WRITE) { 907f29dd55bSBernard Metzler rv = -EINVAL; 908f29dd55bSBernard Metzler goto out; 909f29dd55bSBernard Metzler } 910f29dd55bSBernard Metzler if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) { 911f29dd55bSBernard Metzler rv = -EINVAL; 912f29dd55bSBernard Metzler goto out; 913f29dd55bSBernard Metzler } 914c536277eSBernard Metzler wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1]; 915f29dd55bSBernard Metzler wqe->sqe.sge[0].lkey = 0; 916f29dd55bSBernard Metzler wqe->sqe.num_sge = 1; 917f29dd55bSBernard Metzler } 918f29dd55bSBernard Metzler if (wqe->sqe.flags & SIW_WQE_READ_FENCE) { 919f29dd55bSBernard Metzler /* A READ cannot be fenced */ 920f29dd55bSBernard Metzler if (unlikely(wqe->sqe.opcode == SIW_OP_READ || 921f29dd55bSBernard Metzler wqe->sqe.opcode == 922f29dd55bSBernard Metzler SIW_OP_READ_LOCAL_INV)) { 923f29dd55bSBernard Metzler siw_dbg_qp(qp, "cannot fence read\n"); 924f29dd55bSBernard Metzler rv = -EINVAL; 925f29dd55bSBernard Metzler goto out; 926f29dd55bSBernard Metzler } 927f29dd55bSBernard Metzler spin_lock(&qp->orq_lock); 928f29dd55bSBernard Metzler 929661f3859SBernard Metzler if (qp->attrs.orq_size && !siw_orq_empty(qp)) { 930f29dd55bSBernard Metzler qp->tx_ctx.orq_fence = 1; 931f29dd55bSBernard Metzler rv = 0; 932f29dd55bSBernard Metzler } 933f29dd55bSBernard Metzler spin_unlock(&qp->orq_lock); 934f29dd55bSBernard Metzler 935f29dd55bSBernard Metzler } else if (wqe->sqe.opcode == SIW_OP_READ || 936f29dd55bSBernard Metzler wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) { 937f29dd55bSBernard Metzler struct siw_sqe *rreq; 938f29dd55bSBernard Metzler 939661f3859SBernard Metzler if (unlikely(!qp->attrs.orq_size)) { 940661f3859SBernard Metzler /* We negotiated not to send READ req's */ 941661f3859SBernard Metzler rv = -EINVAL; 942661f3859SBernard Metzler goto out; 943661f3859SBernard Metzler } 944f29dd55bSBernard Metzler wqe->sqe.num_sge = 1; 945f29dd55bSBernard Metzler 946f29dd55bSBernard Metzler spin_lock(&qp->orq_lock); 947f29dd55bSBernard Metzler 948f29dd55bSBernard Metzler rreq = orq_get_free(qp); 949f29dd55bSBernard Metzler if (rreq) { 950f29dd55bSBernard Metzler /* 951f29dd55bSBernard Metzler * Make an immediate copy in ORQ to be ready 952f29dd55bSBernard Metzler * to process loopback READ reply 953f29dd55bSBernard Metzler */ 954f29dd55bSBernard Metzler siw_read_to_orq(rreq, &wqe->sqe); 955f29dd55bSBernard Metzler qp->orq_put++; 956f29dd55bSBernard Metzler } else { 957f29dd55bSBernard Metzler qp->tx_ctx.orq_fence = 1; 958f29dd55bSBernard Metzler rv = 0; 959f29dd55bSBernard Metzler } 960f29dd55bSBernard Metzler spin_unlock(&qp->orq_lock); 961f29dd55bSBernard Metzler } 962f29dd55bSBernard Metzler 963f29dd55bSBernard Metzler /* Clear SQE, can be re-used by application */ 964f29dd55bSBernard Metzler smp_store_mb(sqe->flags, 0); 965f29dd55bSBernard Metzler qp->sq_get++; 966f29dd55bSBernard Metzler out: 967f29dd55bSBernard Metzler if (unlikely(rv < 0)) { 968f29dd55bSBernard Metzler siw_dbg_qp(qp, "error %d\n", rv); 969f29dd55bSBernard Metzler wqe->wr_status = SIW_WR_IDLE; 970f29dd55bSBernard Metzler } 971f29dd55bSBernard Metzler return rv; 972f29dd55bSBernard Metzler } 973f29dd55bSBernard Metzler 974f29dd55bSBernard Metzler /* 975661f3859SBernard Metzler * Must be called with SQ locked. 976661f3859SBernard Metzler * To avoid complete SQ starvation by constant inbound READ requests, 977661f3859SBernard Metzler * the active IRQ will not be served after qp->irq_burst, if the 978661f3859SBernard Metzler * SQ has pending work. 979661f3859SBernard Metzler */ 980661f3859SBernard Metzler int siw_activate_tx(struct siw_qp *qp) 981661f3859SBernard Metzler { 982661f3859SBernard Metzler struct siw_sqe *irqe; 983661f3859SBernard Metzler struct siw_wqe *wqe = tx_wqe(qp); 984661f3859SBernard Metzler 985661f3859SBernard Metzler if (!qp->attrs.irq_size) 986661f3859SBernard Metzler return siw_activate_tx_from_sq(qp); 987661f3859SBernard Metzler 988661f3859SBernard Metzler irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size]; 989661f3859SBernard Metzler 990661f3859SBernard Metzler if (!(irqe->flags & SIW_WQE_VALID)) 991661f3859SBernard Metzler return siw_activate_tx_from_sq(qp); 992661f3859SBernard Metzler 993661f3859SBernard Metzler /* 994661f3859SBernard Metzler * Avoid local WQE processing starvation in case 995661f3859SBernard Metzler * of constant inbound READ request stream 996661f3859SBernard Metzler */ 997661f3859SBernard Metzler if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) { 998661f3859SBernard Metzler qp->irq_burst = 0; 999661f3859SBernard Metzler return siw_activate_tx_from_sq(qp); 1000661f3859SBernard Metzler } 1001661f3859SBernard Metzler memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); 1002661f3859SBernard Metzler wqe->wr_status = SIW_WR_QUEUED; 1003661f3859SBernard Metzler 1004661f3859SBernard Metzler /* start READ RESPONSE */ 1005661f3859SBernard Metzler wqe->sqe.opcode = SIW_OP_READ_RESPONSE; 1006661f3859SBernard Metzler wqe->sqe.flags = 0; 1007661f3859SBernard Metzler if (irqe->num_sge) { 1008661f3859SBernard Metzler wqe->sqe.num_sge = 1; 1009661f3859SBernard Metzler wqe->sqe.sge[0].length = irqe->sge[0].length; 1010661f3859SBernard Metzler wqe->sqe.sge[0].laddr = irqe->sge[0].laddr; 1011661f3859SBernard Metzler wqe->sqe.sge[0].lkey = irqe->sge[0].lkey; 1012661f3859SBernard Metzler } else { 1013661f3859SBernard Metzler wqe->sqe.num_sge = 0; 1014661f3859SBernard Metzler } 1015661f3859SBernard Metzler 1016661f3859SBernard Metzler /* Retain original RREQ's message sequence number for 1017661f3859SBernard Metzler * potential error reporting cases. 1018661f3859SBernard Metzler */ 1019661f3859SBernard Metzler wqe->sqe.sge[1].length = irqe->sge[1].length; 1020661f3859SBernard Metzler 1021661f3859SBernard Metzler wqe->sqe.rkey = irqe->rkey; 1022661f3859SBernard Metzler wqe->sqe.raddr = irqe->raddr; 1023661f3859SBernard Metzler 1024661f3859SBernard Metzler wqe->processed = 0; 1025661f3859SBernard Metzler qp->irq_get++; 1026661f3859SBernard Metzler 1027661f3859SBernard Metzler /* mark current IRQ entry free */ 1028661f3859SBernard Metzler smp_store_mb(irqe->flags, 0); 1029661f3859SBernard Metzler 1030661f3859SBernard Metzler return 1; 1031661f3859SBernard Metzler } 1032661f3859SBernard Metzler 1033661f3859SBernard Metzler /* 1034f29dd55bSBernard Metzler * Check if current CQ state qualifies for calling CQ completion 1035f29dd55bSBernard Metzler * handler. Must be called with CQ lock held. 1036f29dd55bSBernard Metzler */ 1037f29dd55bSBernard Metzler static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags) 1038f29dd55bSBernard Metzler { 10392c8ccb37SBernard Metzler u32 cq_notify; 1040f29dd55bSBernard Metzler 1041f29dd55bSBernard Metzler if (!cq->base_cq.comp_handler) 1042f29dd55bSBernard Metzler return false; 1043f29dd55bSBernard Metzler 10442c8ccb37SBernard Metzler /* Read application shared notification state */ 10452c8ccb37SBernard Metzler cq_notify = READ_ONCE(cq->notify->flags); 1046f29dd55bSBernard Metzler 1047f29dd55bSBernard Metzler if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) || 1048f29dd55bSBernard Metzler ((cq_notify & SIW_NOTIFY_SOLICITED) && 1049f29dd55bSBernard Metzler (flags & SIW_WQE_SOLICITED))) { 10502c8ccb37SBernard Metzler /* 10512c8ccb37SBernard Metzler * CQ notification is one-shot: Since the 10522c8ccb37SBernard Metzler * current CQE causes user notification, 10532c8ccb37SBernard Metzler * the CQ gets dis-aremd and must be re-aremd 10542c8ccb37SBernard Metzler * by the user for a new notification. 10552c8ccb37SBernard Metzler */ 10562c8ccb37SBernard Metzler WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT); 1057f29dd55bSBernard Metzler 1058f29dd55bSBernard Metzler return true; 1059f29dd55bSBernard Metzler } 1060f29dd55bSBernard Metzler return false; 1061f29dd55bSBernard Metzler } 1062f29dd55bSBernard Metzler 1063f29dd55bSBernard Metzler int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, 1064f29dd55bSBernard Metzler enum siw_wc_status status) 1065f29dd55bSBernard Metzler { 1066f29dd55bSBernard Metzler struct siw_cq *cq = qp->scq; 1067f29dd55bSBernard Metzler int rv = 0; 1068f29dd55bSBernard Metzler 1069f29dd55bSBernard Metzler if (cq) { 1070f29dd55bSBernard Metzler u32 sqe_flags = sqe->flags; 1071f29dd55bSBernard Metzler struct siw_cqe *cqe; 1072f29dd55bSBernard Metzler u32 idx; 1073f29dd55bSBernard Metzler unsigned long flags; 1074f29dd55bSBernard Metzler 1075f29dd55bSBernard Metzler spin_lock_irqsave(&cq->lock, flags); 1076f29dd55bSBernard Metzler 1077f29dd55bSBernard Metzler idx = cq->cq_put % cq->num_cqe; 1078f29dd55bSBernard Metzler cqe = &cq->queue[idx]; 1079f29dd55bSBernard Metzler 1080f29dd55bSBernard Metzler if (!READ_ONCE(cqe->flags)) { 1081f29dd55bSBernard Metzler bool notify; 1082f29dd55bSBernard Metzler 1083f29dd55bSBernard Metzler cqe->id = sqe->id; 1084f29dd55bSBernard Metzler cqe->opcode = sqe->opcode; 1085f29dd55bSBernard Metzler cqe->status = status; 1086f29dd55bSBernard Metzler cqe->imm_data = 0; 1087f29dd55bSBernard Metzler cqe->bytes = bytes; 1088f29dd55bSBernard Metzler 108958fb0b56SBernard Metzler if (rdma_is_kernel_res(&cq->base_cq.res)) 109058fb0b56SBernard Metzler cqe->base_qp = &qp->base_qp; 1091f29dd55bSBernard Metzler else 1092f29dd55bSBernard Metzler cqe->qp_id = qp_id(qp); 1093f29dd55bSBernard Metzler 1094f29dd55bSBernard Metzler /* mark CQE valid for application */ 1095f29dd55bSBernard Metzler WRITE_ONCE(cqe->flags, SIW_WQE_VALID); 1096f29dd55bSBernard Metzler /* recycle SQE */ 1097f29dd55bSBernard Metzler smp_store_mb(sqe->flags, 0); 1098f29dd55bSBernard Metzler 1099f29dd55bSBernard Metzler cq->cq_put++; 1100f29dd55bSBernard Metzler notify = siw_cq_notify_now(cq, sqe_flags); 1101f29dd55bSBernard Metzler 1102f29dd55bSBernard Metzler spin_unlock_irqrestore(&cq->lock, flags); 1103f29dd55bSBernard Metzler 1104f29dd55bSBernard Metzler if (notify) { 1105f29dd55bSBernard Metzler siw_dbg_cq(cq, "Call completion handler\n"); 1106f29dd55bSBernard Metzler cq->base_cq.comp_handler(&cq->base_cq, 1107f29dd55bSBernard Metzler cq->base_cq.cq_context); 1108f29dd55bSBernard Metzler } 1109f29dd55bSBernard Metzler } else { 1110f29dd55bSBernard Metzler spin_unlock_irqrestore(&cq->lock, flags); 1111f29dd55bSBernard Metzler rv = -ENOMEM; 1112f29dd55bSBernard Metzler siw_cq_event(cq, IB_EVENT_CQ_ERR); 1113f29dd55bSBernard Metzler } 1114f29dd55bSBernard Metzler } else { 1115f29dd55bSBernard Metzler /* recycle SQE */ 1116f29dd55bSBernard Metzler smp_store_mb(sqe->flags, 0); 1117f29dd55bSBernard Metzler } 1118f29dd55bSBernard Metzler return rv; 1119f29dd55bSBernard Metzler } 1120f29dd55bSBernard Metzler 1121f29dd55bSBernard Metzler int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes, 1122f29dd55bSBernard Metzler u32 inval_stag, enum siw_wc_status status) 1123f29dd55bSBernard Metzler { 1124f29dd55bSBernard Metzler struct siw_cq *cq = qp->rcq; 1125f29dd55bSBernard Metzler int rv = 0; 1126f29dd55bSBernard Metzler 1127f29dd55bSBernard Metzler if (cq) { 1128f29dd55bSBernard Metzler struct siw_cqe *cqe; 1129f29dd55bSBernard Metzler u32 idx; 1130f29dd55bSBernard Metzler unsigned long flags; 1131f29dd55bSBernard Metzler 1132f29dd55bSBernard Metzler spin_lock_irqsave(&cq->lock, flags); 1133f29dd55bSBernard Metzler 1134f29dd55bSBernard Metzler idx = cq->cq_put % cq->num_cqe; 1135f29dd55bSBernard Metzler cqe = &cq->queue[idx]; 1136f29dd55bSBernard Metzler 1137f29dd55bSBernard Metzler if (!READ_ONCE(cqe->flags)) { 1138f29dd55bSBernard Metzler bool notify; 1139f29dd55bSBernard Metzler u8 cqe_flags = SIW_WQE_VALID; 1140f29dd55bSBernard Metzler 1141f29dd55bSBernard Metzler cqe->id = rqe->id; 1142f29dd55bSBernard Metzler cqe->opcode = SIW_OP_RECEIVE; 1143f29dd55bSBernard Metzler cqe->status = status; 1144f29dd55bSBernard Metzler cqe->imm_data = 0; 1145f29dd55bSBernard Metzler cqe->bytes = bytes; 1146f29dd55bSBernard Metzler 114758fb0b56SBernard Metzler if (rdma_is_kernel_res(&cq->base_cq.res)) { 114858fb0b56SBernard Metzler cqe->base_qp = &qp->base_qp; 1149f29dd55bSBernard Metzler if (inval_stag) { 1150f29dd55bSBernard Metzler cqe_flags |= SIW_WQE_REM_INVAL; 1151f29dd55bSBernard Metzler cqe->inval_stag = inval_stag; 1152f29dd55bSBernard Metzler } 1153f29dd55bSBernard Metzler } else { 1154f29dd55bSBernard Metzler cqe->qp_id = qp_id(qp); 1155f29dd55bSBernard Metzler } 1156f29dd55bSBernard Metzler /* mark CQE valid for application */ 1157f29dd55bSBernard Metzler WRITE_ONCE(cqe->flags, cqe_flags); 1158f29dd55bSBernard Metzler /* recycle RQE */ 1159f29dd55bSBernard Metzler smp_store_mb(rqe->flags, 0); 1160f29dd55bSBernard Metzler 1161f29dd55bSBernard Metzler cq->cq_put++; 1162f29dd55bSBernard Metzler notify = siw_cq_notify_now(cq, SIW_WQE_SIGNALLED); 1163f29dd55bSBernard Metzler 1164f29dd55bSBernard Metzler spin_unlock_irqrestore(&cq->lock, flags); 1165f29dd55bSBernard Metzler 1166f29dd55bSBernard Metzler if (notify) { 1167f29dd55bSBernard Metzler siw_dbg_cq(cq, "Call completion handler\n"); 1168f29dd55bSBernard Metzler cq->base_cq.comp_handler(&cq->base_cq, 1169f29dd55bSBernard Metzler cq->base_cq.cq_context); 1170f29dd55bSBernard Metzler } 1171f29dd55bSBernard Metzler } else { 1172f29dd55bSBernard Metzler spin_unlock_irqrestore(&cq->lock, flags); 1173f29dd55bSBernard Metzler rv = -ENOMEM; 1174f29dd55bSBernard Metzler siw_cq_event(cq, IB_EVENT_CQ_ERR); 1175f29dd55bSBernard Metzler } 1176f29dd55bSBernard Metzler } else { 1177f29dd55bSBernard Metzler /* recycle RQE */ 1178f29dd55bSBernard Metzler smp_store_mb(rqe->flags, 0); 1179f29dd55bSBernard Metzler } 1180f29dd55bSBernard Metzler return rv; 1181f29dd55bSBernard Metzler } 1182f29dd55bSBernard Metzler 1183f29dd55bSBernard Metzler /* 1184f29dd55bSBernard Metzler * siw_sq_flush() 1185f29dd55bSBernard Metzler * 1186f29dd55bSBernard Metzler * Flush SQ and ORRQ entries to CQ. 1187f29dd55bSBernard Metzler * 1188f29dd55bSBernard Metzler * Must be called with QP state write lock held. 1189f29dd55bSBernard Metzler * Therefore, SQ and ORQ lock must not be taken. 1190f29dd55bSBernard Metzler */ 1191f29dd55bSBernard Metzler void siw_sq_flush(struct siw_qp *qp) 1192f29dd55bSBernard Metzler { 1193f29dd55bSBernard Metzler struct siw_sqe *sqe; 1194f29dd55bSBernard Metzler struct siw_wqe *wqe = tx_wqe(qp); 1195f29dd55bSBernard Metzler int async_event = 0; 1196f29dd55bSBernard Metzler 1197f29dd55bSBernard Metzler /* 1198f29dd55bSBernard Metzler * Start with completing any work currently on the ORQ 1199f29dd55bSBernard Metzler */ 1200f29dd55bSBernard Metzler while (qp->attrs.orq_size) { 1201f29dd55bSBernard Metzler sqe = &qp->orq[qp->orq_get % qp->attrs.orq_size]; 1202f29dd55bSBernard Metzler if (!READ_ONCE(sqe->flags)) 1203f29dd55bSBernard Metzler break; 1204f29dd55bSBernard Metzler 1205f29dd55bSBernard Metzler if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0) 1206f29dd55bSBernard Metzler break; 1207f29dd55bSBernard Metzler 1208f29dd55bSBernard Metzler WRITE_ONCE(sqe->flags, 0); 1209f29dd55bSBernard Metzler qp->orq_get++; 1210f29dd55bSBernard Metzler } 1211f29dd55bSBernard Metzler /* 1212f29dd55bSBernard Metzler * Flush an in-progress WQE if present 1213f29dd55bSBernard Metzler */ 1214f29dd55bSBernard Metzler if (wqe->wr_status != SIW_WR_IDLE) { 1215f29dd55bSBernard Metzler siw_dbg_qp(qp, "flush current SQE, type %d, status %d\n", 1216f29dd55bSBernard Metzler tx_type(wqe), wqe->wr_status); 1217f29dd55bSBernard Metzler 1218f29dd55bSBernard Metzler siw_wqe_put_mem(wqe, tx_type(wqe)); 1219f29dd55bSBernard Metzler 1220f29dd55bSBernard Metzler if (tx_type(wqe) != SIW_OP_READ_RESPONSE && 1221f29dd55bSBernard Metzler ((tx_type(wqe) != SIW_OP_READ && 1222f29dd55bSBernard Metzler tx_type(wqe) != SIW_OP_READ_LOCAL_INV) || 1223f29dd55bSBernard Metzler wqe->wr_status == SIW_WR_QUEUED)) 1224f29dd55bSBernard Metzler /* 1225f29dd55bSBernard Metzler * An in-progress Read Request is already in 1226f29dd55bSBernard Metzler * the ORQ 1227f29dd55bSBernard Metzler */ 1228f29dd55bSBernard Metzler siw_sqe_complete(qp, &wqe->sqe, wqe->bytes, 1229f29dd55bSBernard Metzler SIW_WC_WR_FLUSH_ERR); 1230f29dd55bSBernard Metzler 1231f29dd55bSBernard Metzler wqe->wr_status = SIW_WR_IDLE; 1232f29dd55bSBernard Metzler } 1233f29dd55bSBernard Metzler /* 1234f29dd55bSBernard Metzler * Flush the Send Queue 1235f29dd55bSBernard Metzler */ 1236f29dd55bSBernard Metzler while (qp->attrs.sq_size) { 1237f29dd55bSBernard Metzler sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; 1238f29dd55bSBernard Metzler if (!READ_ONCE(sqe->flags)) 1239f29dd55bSBernard Metzler break; 1240f29dd55bSBernard Metzler 1241f29dd55bSBernard Metzler async_event = 1; 1242f29dd55bSBernard Metzler if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0) 1243f29dd55bSBernard Metzler /* 1244f29dd55bSBernard Metzler * Shall IB_EVENT_SQ_DRAINED be supressed if work 1245f29dd55bSBernard Metzler * completion fails? 1246f29dd55bSBernard Metzler */ 1247f29dd55bSBernard Metzler break; 1248f29dd55bSBernard Metzler 1249f29dd55bSBernard Metzler WRITE_ONCE(sqe->flags, 0); 1250f29dd55bSBernard Metzler qp->sq_get++; 1251f29dd55bSBernard Metzler } 1252f29dd55bSBernard Metzler if (async_event) 1253f29dd55bSBernard Metzler siw_qp_event(qp, IB_EVENT_SQ_DRAINED); 1254f29dd55bSBernard Metzler } 1255f29dd55bSBernard Metzler 1256f29dd55bSBernard Metzler /* 1257f29dd55bSBernard Metzler * siw_rq_flush() 1258f29dd55bSBernard Metzler * 1259f29dd55bSBernard Metzler * Flush recv queue entries to CQ. Also 1260f29dd55bSBernard Metzler * takes care of pending active tagged and untagged 1261f29dd55bSBernard Metzler * inbound transfers, which have target memory 1262f29dd55bSBernard Metzler * referenced. 1263f29dd55bSBernard Metzler * 1264f29dd55bSBernard Metzler * Must be called with QP state write lock held. 1265f29dd55bSBernard Metzler * Therefore, RQ lock must not be taken. 1266f29dd55bSBernard Metzler */ 1267f29dd55bSBernard Metzler void siw_rq_flush(struct siw_qp *qp) 1268f29dd55bSBernard Metzler { 1269f29dd55bSBernard Metzler struct siw_wqe *wqe = &qp->rx_untagged.wqe_active; 1270f29dd55bSBernard Metzler 1271f29dd55bSBernard Metzler /* 1272f29dd55bSBernard Metzler * Flush an in-progress untagged operation if present 1273f29dd55bSBernard Metzler */ 1274f29dd55bSBernard Metzler if (wqe->wr_status != SIW_WR_IDLE) { 1275f29dd55bSBernard Metzler siw_dbg_qp(qp, "flush current rqe, type %d, status %d\n", 1276f29dd55bSBernard Metzler rx_type(wqe), wqe->wr_status); 1277f29dd55bSBernard Metzler 1278f29dd55bSBernard Metzler siw_wqe_put_mem(wqe, rx_type(wqe)); 1279f29dd55bSBernard Metzler 1280f29dd55bSBernard Metzler if (rx_type(wqe) == SIW_OP_RECEIVE) { 1281f29dd55bSBernard Metzler siw_rqe_complete(qp, &wqe->rqe, wqe->bytes, 1282f29dd55bSBernard Metzler 0, SIW_WC_WR_FLUSH_ERR); 1283f29dd55bSBernard Metzler } else if (rx_type(wqe) != SIW_OP_READ && 1284f29dd55bSBernard Metzler rx_type(wqe) != SIW_OP_READ_RESPONSE && 1285f29dd55bSBernard Metzler rx_type(wqe) != SIW_OP_WRITE) { 1286f29dd55bSBernard Metzler siw_sqe_complete(qp, &wqe->sqe, 0, SIW_WC_WR_FLUSH_ERR); 1287f29dd55bSBernard Metzler } 1288f29dd55bSBernard Metzler wqe->wr_status = SIW_WR_IDLE; 1289f29dd55bSBernard Metzler } 1290f29dd55bSBernard Metzler wqe = &qp->rx_tagged.wqe_active; 1291f29dd55bSBernard Metzler 1292f29dd55bSBernard Metzler if (wqe->wr_status != SIW_WR_IDLE) { 1293f29dd55bSBernard Metzler siw_wqe_put_mem(wqe, rx_type(wqe)); 1294f29dd55bSBernard Metzler wqe->wr_status = SIW_WR_IDLE; 1295f29dd55bSBernard Metzler } 1296f29dd55bSBernard Metzler /* 1297f29dd55bSBernard Metzler * Flush the Receive Queue 1298f29dd55bSBernard Metzler */ 1299f29dd55bSBernard Metzler while (qp->attrs.rq_size) { 1300f29dd55bSBernard Metzler struct siw_rqe *rqe = 1301f29dd55bSBernard Metzler &qp->recvq[qp->rq_get % qp->attrs.rq_size]; 1302f29dd55bSBernard Metzler 1303f29dd55bSBernard Metzler if (!READ_ONCE(rqe->flags)) 1304f29dd55bSBernard Metzler break; 1305f29dd55bSBernard Metzler 1306f29dd55bSBernard Metzler if (siw_rqe_complete(qp, rqe, 0, 0, SIW_WC_WR_FLUSH_ERR) != 0) 1307f29dd55bSBernard Metzler break; 1308f29dd55bSBernard Metzler 1309f29dd55bSBernard Metzler WRITE_ONCE(rqe->flags, 0); 1310f29dd55bSBernard Metzler qp->rq_get++; 1311f29dd55bSBernard Metzler } 1312f29dd55bSBernard Metzler } 1313f29dd55bSBernard Metzler 1314f29dd55bSBernard Metzler int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp) 1315f29dd55bSBernard Metzler { 131658fb0b56SBernard Metzler int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b, 1317f29dd55bSBernard Metzler GFP_KERNEL); 1318f29dd55bSBernard Metzler 1319f29dd55bSBernard Metzler if (!rv) { 1320f29dd55bSBernard Metzler kref_init(&qp->ref); 1321f29dd55bSBernard Metzler qp->sdev = sdev; 1322f29dd55bSBernard Metzler siw_dbg_qp(qp, "new QP\n"); 1323f29dd55bSBernard Metzler } 1324f29dd55bSBernard Metzler return rv; 1325f29dd55bSBernard Metzler } 1326f29dd55bSBernard Metzler 1327f29dd55bSBernard Metzler void siw_free_qp(struct kref *ref) 1328f29dd55bSBernard Metzler { 1329f29dd55bSBernard Metzler struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref); 1330f29dd55bSBernard Metzler struct siw_device *sdev = qp->sdev; 1331f29dd55bSBernard Metzler unsigned long flags; 1332f29dd55bSBernard Metzler 1333f29dd55bSBernard Metzler if (qp->cep) 1334f29dd55bSBernard Metzler siw_cep_put(qp->cep); 1335f29dd55bSBernard Metzler 1336f29dd55bSBernard Metzler found = xa_erase(&sdev->qp_xa, qp_id(qp)); 1337f29dd55bSBernard Metzler WARN_ON(found != qp); 1338f29dd55bSBernard Metzler spin_lock_irqsave(&sdev->lock, flags); 1339f29dd55bSBernard Metzler list_del(&qp->devq); 1340f29dd55bSBernard Metzler spin_unlock_irqrestore(&sdev->lock, flags); 1341f29dd55bSBernard Metzler 1342f29dd55bSBernard Metzler vfree(qp->sendq); 1343f29dd55bSBernard Metzler vfree(qp->recvq); 1344f29dd55bSBernard Metzler vfree(qp->irq); 1345f29dd55bSBernard Metzler vfree(qp->orq); 1346f29dd55bSBernard Metzler 1347f29dd55bSBernard Metzler siw_put_tx_cpu(qp->tx_cpu); 1348a3c27880SBernard Metzler complete(&qp->qp_free); 1349f29dd55bSBernard Metzler atomic_dec(&sdev->num_qp); 1350f29dd55bSBernard Metzler } 1351