1f931551bSRalph Campbell /*
2d310c4bfSMichael J. Ruhl * Copyright (c) 2012 - 2019 Intel Corporation. All rights reserved.
3f931551bSRalph Campbell * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
4f931551bSRalph Campbell * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5f931551bSRalph Campbell *
6f931551bSRalph Campbell * This software is available to you under a choice of one of two
7f931551bSRalph Campbell * licenses. You may choose to be licensed under the terms of the GNU
8f931551bSRalph Campbell * General Public License (GPL) Version 2, available from the file
9f931551bSRalph Campbell * COPYING in the main directory of this source tree, or the
10f931551bSRalph Campbell * OpenIB.org BSD license below:
11f931551bSRalph Campbell *
12f931551bSRalph Campbell * Redistribution and use in source and binary forms, with or
13f931551bSRalph Campbell * without modification, are permitted provided that the following
14f931551bSRalph Campbell * conditions are met:
15f931551bSRalph Campbell *
16f931551bSRalph Campbell * - Redistributions of source code must retain the above
17f931551bSRalph Campbell * copyright notice, this list of conditions and the following
18f931551bSRalph Campbell * disclaimer.
19f931551bSRalph Campbell *
20f931551bSRalph Campbell * - Redistributions in binary form must reproduce the above
21f931551bSRalph Campbell * copyright notice, this list of conditions and the following
22f931551bSRalph Campbell * disclaimer in the documentation and/or other materials
23f931551bSRalph Campbell * provided with the distribution.
24f931551bSRalph Campbell *
25f931551bSRalph Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26f931551bSRalph Campbell * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27f931551bSRalph Campbell * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28f931551bSRalph Campbell * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29f931551bSRalph Campbell * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30f931551bSRalph Campbell * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31f931551bSRalph Campbell * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32f931551bSRalph Campbell * SOFTWARE.
33f931551bSRalph Campbell */
34f931551bSRalph Campbell
35f931551bSRalph Campbell #include <rdma/ib_smi.h>
369ff198f5SDennis Dalessandro #include <rdma/ib_verbs.h>
37f931551bSRalph Campbell
38f931551bSRalph Campbell #include "qib.h"
39f931551bSRalph Campbell #include "qib_mad.h"
40f931551bSRalph Campbell
41f931551bSRalph Campbell /**
42f931551bSRalph Campbell * qib_ud_loopback - handle send on loopback QPs
43f931551bSRalph Campbell * @sqp: the sending QP
44f931551bSRalph Campbell * @swqe: the send work request
45f931551bSRalph Campbell *
46f931551bSRalph Campbell * This is called from qib_make_ud_req() to forward a WQE addressed
47f931551bSRalph Campbell * to the same HCA.
48f931551bSRalph Campbell * Note that the receive interrupt handler may be calling qib_ud_rcv()
49f931551bSRalph Campbell * while this is being called.
50f931551bSRalph Campbell */
qib_ud_loopback(struct rvt_qp * sqp,struct rvt_swqe * swqe)517c2e11feSDennis Dalessandro static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
52f931551bSRalph Campbell {
53f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
541cefc2cdSHarish Chegondi struct qib_pportdata *ppd = ppd_from_ibp(ibp);
551cefc2cdSHarish Chegondi struct qib_devdata *dd = ppd->dd;
561cefc2cdSHarish Chegondi struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
577c2e11feSDennis Dalessandro struct rvt_qp *qp;
5890898850SDasaratharaman Chandramouli struct rdma_ah_attr *ah_attr;
59f931551bSRalph Campbell unsigned long flags;
607c2e11feSDennis Dalessandro struct rvt_sge_state ssge;
617c2e11feSDennis Dalessandro struct rvt_sge *sge;
62f931551bSRalph Campbell struct ib_wc wc;
63f931551bSRalph Campbell u32 length;
646e0ea9e6SIra Weiny enum ib_qp_type sqptype, dqptype;
65f931551bSRalph Campbell
661cefc2cdSHarish Chegondi rcu_read_lock();
672b0ad2daSMichael J. Ruhl qp = rvt_lookup_qpn(rdi, &ibp->rvp, rvt_get_swqe_remote_qpn(swqe));
68f931551bSRalph Campbell if (!qp) {
69f24a6d48SHarish Chegondi ibp->rvp.n_pkt_drops++;
7013d84914SDennis Dalessandro goto drop;
71f931551bSRalph Campbell }
726e0ea9e6SIra Weiny
736e0ea9e6SIra Weiny sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
746e0ea9e6SIra Weiny IB_QPT_UD : sqp->ibqp.qp_type;
756e0ea9e6SIra Weiny dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
766e0ea9e6SIra Weiny IB_QPT_UD : qp->ibqp.qp_type;
776e0ea9e6SIra Weiny
786e0ea9e6SIra Weiny if (dqptype != sqptype ||
79db3ef0ebSHarish Chegondi !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
80f24a6d48SHarish Chegondi ibp->rvp.n_pkt_drops++;
81f931551bSRalph Campbell goto drop;
82f931551bSRalph Campbell }
83f931551bSRalph Campbell
842b0ad2daSMichael J. Ruhl ah_attr = rvt_get_swqe_ah_attr(swqe);
85f931551bSRalph Campbell ppd = ppd_from_ibp(ibp);
86f931551bSRalph Campbell
87f931551bSRalph Campbell if (qp->ibqp.qp_num > 1) {
88f931551bSRalph Campbell u16 pkey1;
89f931551bSRalph Campbell u16 pkey2;
90f931551bSRalph Campbell u16 lid;
91f931551bSRalph Campbell
92f931551bSRalph Campbell pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
93f931551bSRalph Campbell pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
94f931551bSRalph Campbell if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
95d8966fcdSDasaratharaman Chandramouli lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
96f931551bSRalph Campbell ((1 << ppd->lmc) - 1));
9713d84914SDennis Dalessandro qib_bad_pkey(ibp, pkey1,
98d8966fcdSDasaratharaman Chandramouli rdma_ah_get_sl(ah_attr),
99f931551bSRalph Campbell sqp->ibqp.qp_num, qp->ibqp.qp_num,
100f931551bSRalph Campbell cpu_to_be16(lid),
101d8966fcdSDasaratharaman Chandramouli cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
102f931551bSRalph Campbell goto drop;
103f931551bSRalph Campbell }
104f931551bSRalph Campbell }
105f931551bSRalph Campbell
106f931551bSRalph Campbell /*
107f931551bSRalph Campbell * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
108f931551bSRalph Campbell * Qkeys with the high order bit set mean use the
109f931551bSRalph Campbell * qkey from the QP context instead of the WR (see 10.2.5).
110f931551bSRalph Campbell */
111f931551bSRalph Campbell if (qp->ibqp.qp_num) {
112f931551bSRalph Campbell u32 qkey;
113f931551bSRalph Campbell
1142b0ad2daSMichael J. Ruhl qkey = (int)rvt_get_swqe_remote_qkey(swqe) < 0 ?
1152b0ad2daSMichael J. Ruhl sqp->qkey : rvt_get_swqe_remote_qkey(swqe);
11613d84914SDennis Dalessandro if (unlikely(qkey != qp->qkey))
117f931551bSRalph Campbell goto drop;
118f931551bSRalph Campbell }
119f931551bSRalph Campbell
120f931551bSRalph Campbell /*
12125985edcSLucas De Marchi * A GRH is expected to precede the data even if not
122f931551bSRalph Campbell * present on the wire.
123f931551bSRalph Campbell */
124f931551bSRalph Campbell length = swqe->length;
125041af0bbSMike Marciniszyn memset(&wc, 0, sizeof(wc));
126f931551bSRalph Campbell wc.byte_len = length + sizeof(struct ib_grh);
127f931551bSRalph Campbell
128f931551bSRalph Campbell if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
129f931551bSRalph Campbell wc.wc_flags = IB_WC_WITH_IMM;
130f931551bSRalph Campbell wc.ex.imm_data = swqe->wr.ex.imm_data;
131f931551bSRalph Campbell }
132f931551bSRalph Campbell
133f931551bSRalph Campbell spin_lock_irqsave(&qp->r_lock, flags);
134f931551bSRalph Campbell
135f931551bSRalph Campbell /*
136f931551bSRalph Campbell * Get the next work request entry to find where to put the data.
137f931551bSRalph Campbell */
13801ba79d4SHarish Chegondi if (qp->r_flags & RVT_R_REUSE_SGE)
13901ba79d4SHarish Chegondi qp->r_flags &= ~RVT_R_REUSE_SGE;
140f931551bSRalph Campbell else {
141f931551bSRalph Campbell int ret;
142f931551bSRalph Campbell
143832369faSBrian Welty ret = rvt_get_rwqe(qp, false);
144f931551bSRalph Campbell if (ret < 0) {
145beb5a042SBrian Welty rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
146f931551bSRalph Campbell goto bail_unlock;
147f931551bSRalph Campbell }
148f931551bSRalph Campbell if (!ret) {
149f931551bSRalph Campbell if (qp->ibqp.qp_num == 0)
150f24a6d48SHarish Chegondi ibp->rvp.n_vl15_dropped++;
151f931551bSRalph Campbell goto bail_unlock;
152f931551bSRalph Campbell }
153f931551bSRalph Campbell }
154f931551bSRalph Campbell /* Silently drop packets which are too big. */
155f931551bSRalph Campbell if (unlikely(wc.byte_len > qp->r_len)) {
15601ba79d4SHarish Chegondi qp->r_flags |= RVT_R_REUSE_SGE;
157f24a6d48SHarish Chegondi ibp->rvp.n_pkt_drops++;
158f931551bSRalph Campbell goto bail_unlock;
159f931551bSRalph Campbell }
160f931551bSRalph Campbell
161d8966fcdSDasaratharaman Chandramouli if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
162527dbf12SDasaratharaman Chandramouli struct ib_grh grh;
163d8966fcdSDasaratharaman Chandramouli const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
164527dbf12SDasaratharaman Chandramouli
165d8966fcdSDasaratharaman Chandramouli qib_make_grh(ibp, &grh, grd, 0, 0);
166019f118bSBrian Welty rvt_copy_sge(qp, &qp->r_sge, &grh,
167019f118bSBrian Welty sizeof(grh), true, false);
168f931551bSRalph Campbell wc.wc_flags |= IB_WC_GRH;
169f931551bSRalph Campbell } else
1703fc4a090SBrian Welty rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
171f931551bSRalph Campbell ssge.sg_list = swqe->sg_list + 1;
172f931551bSRalph Campbell ssge.sge = *swqe->sg_list;
173f931551bSRalph Campbell ssge.num_sge = swqe->wr.num_sge;
174f931551bSRalph Campbell sge = &ssge.sge;
175f931551bSRalph Campbell while (length) {
17687fc34b5SMichael J. Ruhl u32 len = rvt_get_sge_length(sge, length);
177f931551bSRalph Campbell
178019f118bSBrian Welty rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
179f931551bSRalph Campbell sge->vaddr += len;
180f931551bSRalph Campbell sge->length -= len;
181f931551bSRalph Campbell sge->sge_length -= len;
182f931551bSRalph Campbell if (sge->sge_length == 0) {
183f931551bSRalph Campbell if (--ssge.num_sge)
184f931551bSRalph Campbell *sge = *ssge.sg_list++;
185f931551bSRalph Campbell } else if (sge->length == 0 && sge->mr->lkey) {
1867c2e11feSDennis Dalessandro if (++sge->n >= RVT_SEGSZ) {
187f931551bSRalph Campbell if (++sge->m >= sge->mr->mapsz)
188f931551bSRalph Campbell break;
189f931551bSRalph Campbell sge->n = 0;
190f931551bSRalph Campbell }
191f931551bSRalph Campbell sge->vaddr =
192f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].vaddr;
193f931551bSRalph Campbell sge->length =
194f931551bSRalph Campbell sge->mr->map[sge->m]->segs[sge->n].length;
195f931551bSRalph Campbell }
196f931551bSRalph Campbell length -= len;
197f931551bSRalph Campbell }
19870696ea7SHarish Chegondi rvt_put_ss(&qp->r_sge);
19901ba79d4SHarish Chegondi if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
200f931551bSRalph Campbell goto bail_unlock;
201f931551bSRalph Campbell wc.wr_id = qp->r_wr_id;
202f931551bSRalph Campbell wc.status = IB_WC_SUCCESS;
203f931551bSRalph Campbell wc.opcode = IB_WC_RECV;
204f931551bSRalph Campbell wc.qp = &qp->ibqp;
205f931551bSRalph Campbell wc.src_qp = sqp->ibqp.qp_num;
206f931551bSRalph Campbell wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
2072b0ad2daSMichael J. Ruhl rvt_get_swqe_pkey_index(swqe) : 0;
208d8966fcdSDasaratharaman Chandramouli wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
209d8966fcdSDasaratharaman Chandramouli ((1 << ppd->lmc) - 1));
210d8966fcdSDasaratharaman Chandramouli wc.sl = rdma_ah_get_sl(ah_attr);
211d8966fcdSDasaratharaman Chandramouli wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
212f931551bSRalph Campbell wc.port_num = qp->port_num;
213f931551bSRalph Campbell /* Signal completion event if the solicited bit is set. */
2145136bfeaSKamenee Arumugam rvt_recv_cq(qp, &wc, swqe->wr.send_flags & IB_SEND_SOLICITED);
215f24a6d48SHarish Chegondi ibp->rvp.n_loop_pkts++;
216f931551bSRalph Campbell bail_unlock:
217f931551bSRalph Campbell spin_unlock_irqrestore(&qp->r_lock, flags);
218f931551bSRalph Campbell drop:
2191cefc2cdSHarish Chegondi rcu_read_unlock();
220f931551bSRalph Campbell }
221f931551bSRalph Campbell
222f931551bSRalph Campbell /**
223f931551bSRalph Campbell * qib_make_ud_req - construct a UD request packet
224f931551bSRalph Campbell * @qp: the QP
225*52092015SLee Jones * @flags: flags to modify and pass back to caller
226f931551bSRalph Campbell *
22746a80d62SMike Marciniszyn * Assumes the s_lock is held.
22846a80d62SMike Marciniszyn *
229f931551bSRalph Campbell * Return 1 if constructed; otherwise, return 0.
230f931551bSRalph Campbell */
qib_make_ud_req(struct rvt_qp * qp,unsigned long * flags)231747f4d7aSMike Marciniszyn int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
232f931551bSRalph Campbell {
233ffc26907SDennis Dalessandro struct qib_qp_priv *priv = qp->priv;
234261a4351SMike Marciniszyn struct ib_other_headers *ohdr;
23590898850SDasaratharaman Chandramouli struct rdma_ah_attr *ah_attr;
236f931551bSRalph Campbell struct qib_pportdata *ppd;
237f931551bSRalph Campbell struct qib_ibport *ibp;
2387c2e11feSDennis Dalessandro struct rvt_swqe *wqe;
239f931551bSRalph Campbell u32 nwords;
240f931551bSRalph Campbell u32 extra_bytes;
241f931551bSRalph Campbell u32 bth0;
242f931551bSRalph Campbell u16 lrh0;
243f931551bSRalph Campbell u16 lid;
244f931551bSRalph Campbell int ret = 0;
245f931551bSRalph Campbell int next_cur;
246f931551bSRalph Campbell
247db3ef0ebSHarish Chegondi if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
248db3ef0ebSHarish Chegondi if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
249f931551bSRalph Campbell goto bail;
250f931551bSRalph Campbell /* We are in the error state, flush the work request. */
2516aa7de05SMark Rutland if (qp->s_last == READ_ONCE(qp->s_head))
252f931551bSRalph Campbell goto bail;
253f931551bSRalph Campbell /* If DMAs are in progress, we can't flush immediately. */
254ffc26907SDennis Dalessandro if (atomic_read(&priv->s_dma_busy)) {
25501ba79d4SHarish Chegondi qp->s_flags |= RVT_S_WAIT_DMA;
256f931551bSRalph Campbell goto bail;
257f931551bSRalph Campbell }
258db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_last);
259116aa033SVenkata Sandeep Dhanalakota rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
260f931551bSRalph Campbell goto done;
261f931551bSRalph Campbell }
262f931551bSRalph Campbell
26346a80d62SMike Marciniszyn /* see post_one_send() */
2646aa7de05SMark Rutland if (qp->s_cur == READ_ONCE(qp->s_head))
265f931551bSRalph Campbell goto bail;
266f931551bSRalph Campbell
267db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
268f931551bSRalph Campbell next_cur = qp->s_cur + 1;
269f931551bSRalph Campbell if (next_cur >= qp->s_size)
270f931551bSRalph Campbell next_cur = 0;
271f931551bSRalph Campbell
272f931551bSRalph Campbell /* Construct the header. */
273f931551bSRalph Campbell ibp = to_iport(qp->ibqp.device, qp->port_num);
274f931551bSRalph Campbell ppd = ppd_from_ibp(ibp);
2752b0ad2daSMichael J. Ruhl ah_attr = rvt_get_swqe_ah_attr(wqe);
276d8966fcdSDasaratharaman Chandramouli if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
277d8966fcdSDasaratharaman Chandramouli if (rdma_ah_get_dlid(ah_attr) !=
278d8966fcdSDasaratharaman Chandramouli be16_to_cpu(IB_LID_PERMISSIVE))
2797d7632adSMike Marciniszyn this_cpu_inc(ibp->pmastats->n_multicast_xmit);
280f931551bSRalph Campbell else
2817d7632adSMike Marciniszyn this_cpu_inc(ibp->pmastats->n_unicast_xmit);
282f931551bSRalph Campbell } else {
2837d7632adSMike Marciniszyn this_cpu_inc(ibp->pmastats->n_unicast_xmit);
284d8966fcdSDasaratharaman Chandramouli lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
285f931551bSRalph Campbell if (unlikely(lid == ppd->lid)) {
286747f4d7aSMike Marciniszyn unsigned long tflags = *flags;
287f931551bSRalph Campbell /*
288f931551bSRalph Campbell * If DMAs are in progress, we can't generate
289f931551bSRalph Campbell * a completion for the loopback packet since
290f931551bSRalph Campbell * it would be out of order.
291f931551bSRalph Campbell * XXX Instead of waiting, we could queue a
292f931551bSRalph Campbell * zero length descriptor so we get a callback.
293f931551bSRalph Campbell */
294ffc26907SDennis Dalessandro if (atomic_read(&priv->s_dma_busy)) {
29501ba79d4SHarish Chegondi qp->s_flags |= RVT_S_WAIT_DMA;
296f931551bSRalph Campbell goto bail;
297f931551bSRalph Campbell }
298f931551bSRalph Campbell qp->s_cur = next_cur;
299747f4d7aSMike Marciniszyn spin_unlock_irqrestore(&qp->s_lock, tflags);
300f931551bSRalph Campbell qib_ud_loopback(qp, wqe);
301747f4d7aSMike Marciniszyn spin_lock_irqsave(&qp->s_lock, tflags);
302747f4d7aSMike Marciniszyn *flags = tflags;
303116aa033SVenkata Sandeep Dhanalakota rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
304f931551bSRalph Campbell goto done;
305f931551bSRalph Campbell }
306f931551bSRalph Campbell }
307f931551bSRalph Campbell
308f931551bSRalph Campbell qp->s_cur = next_cur;
309f931551bSRalph Campbell extra_bytes = -wqe->length & 3;
310f931551bSRalph Campbell nwords = (wqe->length + extra_bytes) >> 2;
311f931551bSRalph Campbell
312f931551bSRalph Campbell /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
313f931551bSRalph Campbell qp->s_hdrwords = 7;
314f931551bSRalph Campbell qp->s_cur_size = wqe->length;
315f931551bSRalph Campbell qp->s_cur_sge = &qp->s_sge;
316d8966fcdSDasaratharaman Chandramouli qp->s_srate = rdma_ah_get_static_rate(ah_attr);
317f931551bSRalph Campbell qp->s_wqe = wqe;
318f931551bSRalph Campbell qp->s_sge.sge = wqe->sg_list[0];
319f931551bSRalph Campbell qp->s_sge.sg_list = wqe->sg_list + 1;
320f931551bSRalph Campbell qp->s_sge.num_sge = wqe->wr.num_sge;
321f931551bSRalph Campbell qp->s_sge.total_len = wqe->length;
322f931551bSRalph Campbell
323d8966fcdSDasaratharaman Chandramouli if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
324f931551bSRalph Campbell /* Header size in 32-bit words. */
325ffc26907SDennis Dalessandro qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
326d8966fcdSDasaratharaman Chandramouli rdma_ah_read_grh(ah_attr),
327f931551bSRalph Campbell qp->s_hdrwords, nwords);
328f931551bSRalph Campbell lrh0 = QIB_LRH_GRH;
329ffc26907SDennis Dalessandro ohdr = &priv->s_hdr->u.l.oth;
330f931551bSRalph Campbell /*
331f931551bSRalph Campbell * Don't worry about sending to locally attached multicast
332f931551bSRalph Campbell * QPs. It is unspecified by the spec. what happens.
333f931551bSRalph Campbell */
334f931551bSRalph Campbell } else {
335f931551bSRalph Campbell /* Header size in 32-bit words. */
336f931551bSRalph Campbell lrh0 = QIB_LRH_BTH;
337ffc26907SDennis Dalessandro ohdr = &priv->s_hdr->u.oth;
338f931551bSRalph Campbell }
339f931551bSRalph Campbell if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
340f931551bSRalph Campbell qp->s_hdrwords++;
341f931551bSRalph Campbell ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
342f931551bSRalph Campbell bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
343f931551bSRalph Campbell } else
344f931551bSRalph Campbell bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
345d8966fcdSDasaratharaman Chandramouli lrh0 |= rdma_ah_get_sl(ah_attr) << 4;
346f931551bSRalph Campbell if (qp->ibqp.qp_type == IB_QPT_SMI)
347f931551bSRalph Campbell lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
348f931551bSRalph Campbell else
349d8966fcdSDasaratharaman Chandramouli lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(ah_attr)] << 12;
350ffc26907SDennis Dalessandro priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
351d8966fcdSDasaratharaman Chandramouli priv->s_hdr->lrh[1] =
352d8966fcdSDasaratharaman Chandramouli cpu_to_be16(rdma_ah_get_dlid(ah_attr)); /* DEST LID */
353ffc26907SDennis Dalessandro priv->s_hdr->lrh[2] =
354ffc26907SDennis Dalessandro cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
355f931551bSRalph Campbell lid = ppd->lid;
356f931551bSRalph Campbell if (lid) {
357d8966fcdSDasaratharaman Chandramouli lid |= rdma_ah_get_path_bits(ah_attr) &
358d8966fcdSDasaratharaman Chandramouli ((1 << ppd->lmc) - 1);
359ffc26907SDennis Dalessandro priv->s_hdr->lrh[3] = cpu_to_be16(lid);
360f931551bSRalph Campbell } else
361ffc26907SDennis Dalessandro priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
362f931551bSRalph Campbell if (wqe->wr.send_flags & IB_SEND_SOLICITED)
363f931551bSRalph Campbell bth0 |= IB_BTH_SOLICITED;
364f931551bSRalph Campbell bth0 |= extra_bytes << 20;
365f931551bSRalph Campbell bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
366f931551bSRalph Campbell qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
3672b0ad2daSMichael J. Ruhl rvt_get_swqe_pkey_index(wqe) : qp->s_pkey_index);
368f931551bSRalph Campbell ohdr->bth[0] = cpu_to_be32(bth0);
369f931551bSRalph Campbell /*
370f931551bSRalph Campbell * Use the multicast QP if the destination LID is a multicast LID.
371f931551bSRalph Campbell */
372d8966fcdSDasaratharaman Chandramouli ohdr->bth[1] = rdma_ah_get_dlid(ah_attr) >=
373d8966fcdSDasaratharaman Chandramouli be16_to_cpu(IB_MULTICAST_LID_BASE) &&
374d8966fcdSDasaratharaman Chandramouli rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ?
375f931551bSRalph Campbell cpu_to_be32(QIB_MULTICAST_QPN) :
3762b0ad2daSMichael J. Ruhl cpu_to_be32(rvt_get_swqe_remote_qpn(wqe));
37746a80d62SMike Marciniszyn ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
378f931551bSRalph Campbell /*
379f931551bSRalph Campbell * Qkeys with the high order bit set mean use the
380f931551bSRalph Campbell * qkey from the QP context instead of the WR (see 10.2.5).
381f931551bSRalph Campbell */
3822b0ad2daSMichael J. Ruhl ohdr->u.ud.deth[0] =
3832b0ad2daSMichael J. Ruhl cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey :
3842b0ad2daSMichael J. Ruhl rvt_get_swqe_remote_qkey(wqe));
385f931551bSRalph Campbell ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
386f931551bSRalph Campbell
387f931551bSRalph Campbell done:
38846a80d62SMike Marciniszyn return 1;
389f931551bSRalph Campbell bail:
39001ba79d4SHarish Chegondi qp->s_flags &= ~RVT_S_BUSY;
391f931551bSRalph Campbell return ret;
392f931551bSRalph Campbell }
393f931551bSRalph Campbell
qib_lookup_pkey(struct qib_ibport * ibp,u16 pkey)394f931551bSRalph Campbell static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
395f931551bSRalph Campbell {
396f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp);
397f931551bSRalph Campbell struct qib_devdata *dd = ppd->dd;
398f931551bSRalph Campbell unsigned ctxt = ppd->hw_pidx;
399f931551bSRalph Campbell unsigned i;
400f931551bSRalph Campbell
401f931551bSRalph Campbell pkey &= 0x7fff; /* remove limited/full membership bit */
402f931551bSRalph Campbell
403f931551bSRalph Campbell for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
404f931551bSRalph Campbell if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
405f931551bSRalph Campbell return i;
406f931551bSRalph Campbell
407f931551bSRalph Campbell /*
408f931551bSRalph Campbell * Should not get here, this means hardware failed to validate pkeys.
409f931551bSRalph Campbell * Punt and return index 0.
410f931551bSRalph Campbell */
411f931551bSRalph Campbell return 0;
412f931551bSRalph Campbell }
413f931551bSRalph Campbell
414f931551bSRalph Campbell /**
415f931551bSRalph Campbell * qib_ud_rcv - receive an incoming UD packet
416f931551bSRalph Campbell * @ibp: the port the packet came in on
417f931551bSRalph Campbell * @hdr: the packet header
418f931551bSRalph Campbell * @has_grh: true if the packet has a GRH
419f931551bSRalph Campbell * @data: the packet data
420f931551bSRalph Campbell * @tlen: the packet length
421f931551bSRalph Campbell * @qp: the QP the packet came on
422f931551bSRalph Campbell *
423f931551bSRalph Campbell * This is called from qib_qp_rcv() to process an incoming UD packet
424f931551bSRalph Campbell * for the given QP.
425f931551bSRalph Campbell * Called at interrupt level.
426f931551bSRalph Campbell */
qib_ud_rcv(struct qib_ibport * ibp,struct ib_header * hdr,int has_grh,void * data,u32 tlen,struct rvt_qp * qp)427261a4351SMike Marciniszyn void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
4287c2e11feSDennis Dalessandro int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
429f931551bSRalph Campbell {
430261a4351SMike Marciniszyn struct ib_other_headers *ohdr;
431f931551bSRalph Campbell int opcode;
432f931551bSRalph Campbell u32 hdrsize;
433f931551bSRalph Campbell u32 pad;
434f931551bSRalph Campbell struct ib_wc wc;
435f931551bSRalph Campbell u32 qkey;
436f931551bSRalph Campbell u32 src_qp;
437f931551bSRalph Campbell u16 dlid;
438f931551bSRalph Campbell
439f931551bSRalph Campbell /* Check for GRH */
440f931551bSRalph Campbell if (!has_grh) {
441f931551bSRalph Campbell ohdr = &hdr->u.oth;
442f931551bSRalph Campbell hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
443f931551bSRalph Campbell } else {
444f931551bSRalph Campbell ohdr = &hdr->u.l.oth;
445f931551bSRalph Campbell hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
446f931551bSRalph Campbell }
447f931551bSRalph Campbell qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
44870696ea7SHarish Chegondi src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
449f931551bSRalph Campbell
450057ae62fSMike Marciniszyn /*
451057ae62fSMike Marciniszyn * Get the number of bytes the message was padded by
452057ae62fSMike Marciniszyn * and drop incomplete packets.
453057ae62fSMike Marciniszyn */
454f931551bSRalph Campbell pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
455057ae62fSMike Marciniszyn if (unlikely(tlen < (hdrsize + pad + 4)))
456057ae62fSMike Marciniszyn goto drop;
457057ae62fSMike Marciniszyn
458f931551bSRalph Campbell tlen -= hdrsize + pad + 4;
459f931551bSRalph Campbell
460f931551bSRalph Campbell /*
461f931551bSRalph Campbell * Check that the permissive LID is only used on QP0
462f931551bSRalph Campbell * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
463f931551bSRalph Campbell */
464f931551bSRalph Campbell if (qp->ibqp.qp_num) {
465f931551bSRalph Campbell if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
466057ae62fSMike Marciniszyn hdr->lrh[3] == IB_LID_PERMISSIVE))
467057ae62fSMike Marciniszyn goto drop;
468f931551bSRalph Campbell if (qp->ibqp.qp_num > 1) {
469f931551bSRalph Campbell u16 pkey1, pkey2;
470f931551bSRalph Campbell
471f931551bSRalph Campbell pkey1 = be32_to_cpu(ohdr->bth[0]);
472f931551bSRalph Campbell pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
473f931551bSRalph Campbell if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
47413d84914SDennis Dalessandro qib_bad_pkey(ibp,
475f931551bSRalph Campbell pkey1,
476f931551bSRalph Campbell (be16_to_cpu(hdr->lrh[0]) >> 4) &
477f931551bSRalph Campbell 0xF,
478f931551bSRalph Campbell src_qp, qp->ibqp.qp_num,
479f931551bSRalph Campbell hdr->lrh[3], hdr->lrh[1]);
480057ae62fSMike Marciniszyn return;
481f931551bSRalph Campbell }
482f931551bSRalph Campbell }
48313d84914SDennis Dalessandro if (unlikely(qkey != qp->qkey))
484057ae62fSMike Marciniszyn return;
48513d84914SDennis Dalessandro
486f931551bSRalph Campbell /* Drop invalid MAD packets (see 13.5.3.1). */
487f931551bSRalph Campbell if (unlikely(qp->ibqp.qp_num == 1 &&
488f931551bSRalph Campbell (tlen != 256 ||
489057ae62fSMike Marciniszyn (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
490057ae62fSMike Marciniszyn goto drop;
491f931551bSRalph Campbell } else {
492f931551bSRalph Campbell struct ib_smp *smp;
493f931551bSRalph Campbell
494f931551bSRalph Campbell /* Drop invalid MAD packets (see 13.5.3.1). */
495057ae62fSMike Marciniszyn if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
496057ae62fSMike Marciniszyn goto drop;
497f931551bSRalph Campbell smp = (struct ib_smp *) data;
498f931551bSRalph Campbell if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
499f931551bSRalph Campbell hdr->lrh[3] == IB_LID_PERMISSIVE) &&
500057ae62fSMike Marciniszyn smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
501057ae62fSMike Marciniszyn goto drop;
502f931551bSRalph Campbell }
503f931551bSRalph Campbell
504f931551bSRalph Campbell /*
505f931551bSRalph Campbell * The opcode is in the low byte when its in network order
506f931551bSRalph Campbell * (top byte when in host order).
507f931551bSRalph Campbell */
508f931551bSRalph Campbell opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
509f931551bSRalph Campbell if (qp->ibqp.qp_num > 1 &&
510f931551bSRalph Campbell opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
511f931551bSRalph Campbell wc.ex.imm_data = ohdr->u.ud.imm_data;
512f931551bSRalph Campbell wc.wc_flags = IB_WC_WITH_IMM;
513f931551bSRalph Campbell } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
514f931551bSRalph Campbell wc.ex.imm_data = 0;
515f931551bSRalph Campbell wc.wc_flags = 0;
516057ae62fSMike Marciniszyn } else
517057ae62fSMike Marciniszyn goto drop;
518f931551bSRalph Campbell
519f931551bSRalph Campbell /*
52025985edcSLucas De Marchi * A GRH is expected to precede the data even if not
521f931551bSRalph Campbell * present on the wire.
522f931551bSRalph Campbell */
523f931551bSRalph Campbell wc.byte_len = tlen + sizeof(struct ib_grh);
524f931551bSRalph Campbell
525f931551bSRalph Campbell /*
526f931551bSRalph Campbell * Get the next work request entry to find where to put the data.
527f931551bSRalph Campbell */
52801ba79d4SHarish Chegondi if (qp->r_flags & RVT_R_REUSE_SGE)
52901ba79d4SHarish Chegondi qp->r_flags &= ~RVT_R_REUSE_SGE;
530f931551bSRalph Campbell else {
531f931551bSRalph Campbell int ret;
532f931551bSRalph Campbell
533832369faSBrian Welty ret = rvt_get_rwqe(qp, false);
534f931551bSRalph Campbell if (ret < 0) {
535beb5a042SBrian Welty rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
536a5210c12SRalph Campbell return;
537f931551bSRalph Campbell }
538f931551bSRalph Campbell if (!ret) {
539f931551bSRalph Campbell if (qp->ibqp.qp_num == 0)
540f24a6d48SHarish Chegondi ibp->rvp.n_vl15_dropped++;
541a5210c12SRalph Campbell return;
542f931551bSRalph Campbell }
543f931551bSRalph Campbell }
544f931551bSRalph Campbell /* Silently drop packets which are too big. */
545f931551bSRalph Campbell if (unlikely(wc.byte_len > qp->r_len)) {
54601ba79d4SHarish Chegondi qp->r_flags |= RVT_R_REUSE_SGE;
547057ae62fSMike Marciniszyn goto drop;
548f931551bSRalph Campbell }
549f931551bSRalph Campbell if (has_grh) {
550019f118bSBrian Welty rvt_copy_sge(qp, &qp->r_sge, &hdr->u.l.grh,
551019f118bSBrian Welty sizeof(struct ib_grh), true, false);
552f931551bSRalph Campbell wc.wc_flags |= IB_WC_GRH;
553f931551bSRalph Campbell } else
5543fc4a090SBrian Welty rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
555019f118bSBrian Welty rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
556019f118bSBrian Welty true, false);
55770696ea7SHarish Chegondi rvt_put_ss(&qp->r_sge);
55801ba79d4SHarish Chegondi if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
559a5210c12SRalph Campbell return;
560f931551bSRalph Campbell wc.wr_id = qp->r_wr_id;
561f931551bSRalph Campbell wc.status = IB_WC_SUCCESS;
562f931551bSRalph Campbell wc.opcode = IB_WC_RECV;
563f931551bSRalph Campbell wc.vendor_err = 0;
564f931551bSRalph Campbell wc.qp = &qp->ibqp;
565f931551bSRalph Campbell wc.src_qp = src_qp;
566f931551bSRalph Campbell wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
567f931551bSRalph Campbell qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
568f931551bSRalph Campbell wc.slid = be16_to_cpu(hdr->lrh[3]);
569f931551bSRalph Campbell wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
570f931551bSRalph Campbell dlid = be16_to_cpu(hdr->lrh[1]);
571f931551bSRalph Campbell /*
572f931551bSRalph Campbell * Save the LMC lower bits if the destination LID is a unicast LID.
573f931551bSRalph Campbell */
5749ff198f5SDennis Dalessandro wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
575f931551bSRalph Campbell dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
576f931551bSRalph Campbell wc.port_num = qp->port_num;
577f931551bSRalph Campbell /* Signal completion event if the solicited bit is set. */
5785136bfeaSKamenee Arumugam rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
579057ae62fSMike Marciniszyn return;
580057ae62fSMike Marciniszyn
581057ae62fSMike Marciniszyn drop:
582f24a6d48SHarish Chegondi ibp->rvp.n_pkt_drops++;
583f931551bSRalph Campbell }
584