xref: /openbmc/linux/drivers/infiniband/hw/cxgb4/ev.c (revision f87deada)
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/slab.h>
33 #include <linux/mman.h>
34 #include <net/sock.h>
35 
36 #include "iw_cxgb4.h"
37 
38 static void print_tpte(struct c4iw_dev *dev, u32 stag)
39 {
40 	int ret;
41 	struct fw_ri_tpte tpte;
42 
43 	ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag,
44 			      (__be32 *)&tpte);
45 	if (ret) {
46 		dev_err(&dev->rdev.lldi.pdev->dev,
47 			"%s cxgb4_read_tpte err %d\n", __func__, ret);
48 		return;
49 	}
50 	pr_debug("stag idx 0x%x valid %d key 0x%x state %d pdid %d perm 0x%x ps %d len 0x%llx va 0x%llx\n",
51 		 stag & 0xffffff00,
52 		 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
53 		 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
54 		 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
55 		 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
56 		 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
57 		 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
58 		 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
59 		 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
60 }
61 
62 static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
63 {
64 	__be64 *p = (void *)err_cqe;
65 
66 	dev_err(&dev->rdev.lldi.pdev->dev,
67 		"AE qpid %d opcode %d status 0x%x "
68 		"type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n",
69 		CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
70 		CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
71 		CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
72 
73 	pr_debug("%016llx %016llx %016llx %016llx\n",
74 		 be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
75 		 be64_to_cpu(p[3]));
76 
77 	/*
78 	 * Ingress WRITE and READ_RESP errors provide
79 	 * the offending stag, so parse and log it.
80 	 */
81 	if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE ||
82 				 CQE_OPCODE(err_cqe) == FW_RI_READ_RESP))
83 		print_tpte(dev, CQE_WRID_STAG(err_cqe));
84 }
85 
86 static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
87 			  struct c4iw_qp *qhp,
88 			  struct t4_cqe *err_cqe,
89 			  enum ib_event_type ib_event)
90 {
91 	struct ib_event event;
92 	struct c4iw_qp_attributes attrs;
93 	unsigned long flag;
94 
95 	dump_err_cqe(dev, err_cqe);
96 
97 	if (qhp->attr.state == C4IW_QP_STATE_RTS) {
98 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
99 		c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
100 			       &attrs, 0);
101 	}
102 
103 	event.event = ib_event;
104 	event.device = chp->ibcq.device;
105 	if (ib_event == IB_EVENT_CQ_ERR)
106 		event.element.cq = &chp->ibcq;
107 	else
108 		event.element.qp = &qhp->ibqp;
109 	if (qhp->ibqp.event_handler)
110 		(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
111 
112 	if (t4_clear_cq_armed(&chp->cq)) {
113 		spin_lock_irqsave(&chp->comp_handler_lock, flag);
114 		(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
115 		spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
116 	}
117 }
118 
119 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
120 {
121 	struct c4iw_cq *chp;
122 	struct c4iw_qp *qhp;
123 	u32 cqid;
124 
125 	spin_lock_irq(&dev->lock);
126 	qhp = get_qhp(dev, CQE_QPID(err_cqe));
127 	if (!qhp) {
128 		pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
129 		       CQE_QPID(err_cqe),
130 		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
131 		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
132 		       CQE_WRID_LOW(err_cqe));
133 		spin_unlock_irq(&dev->lock);
134 		goto out;
135 	}
136 
137 	if (SQ_TYPE(err_cqe))
138 		cqid = qhp->attr.scq;
139 	else
140 		cqid = qhp->attr.rcq;
141 	chp = get_chp(dev, cqid);
142 	if (!chp) {
143 		pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
144 		       cqid, CQE_QPID(err_cqe),
145 		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
146 		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
147 		       CQE_WRID_LOW(err_cqe));
148 		spin_unlock_irq(&dev->lock);
149 		goto out;
150 	}
151 
152 	c4iw_qp_add_ref(&qhp->ibqp);
153 	atomic_inc(&chp->refcnt);
154 	spin_unlock_irq(&dev->lock);
155 
156 	/* Bad incoming write */
157 	if (RQ_TYPE(err_cqe) &&
158 	    (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
159 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
160 		goto done;
161 	}
162 
163 	switch (CQE_STATUS(err_cqe)) {
164 
165 	/* Completion Events */
166 	case T4_ERR_SUCCESS:
167 		pr_err("AE with status 0!\n");
168 		break;
169 
170 	case T4_ERR_STAG:
171 	case T4_ERR_PDID:
172 	case T4_ERR_QPID:
173 	case T4_ERR_ACCESS:
174 	case T4_ERR_WRAP:
175 	case T4_ERR_BOUND:
176 	case T4_ERR_INVALIDATE_SHARED_MR:
177 	case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
178 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
179 		break;
180 
181 	/* Device Fatal Errors */
182 	case T4_ERR_ECC:
183 	case T4_ERR_ECC_PSTAG:
184 	case T4_ERR_INTERNAL_ERR:
185 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
186 		break;
187 
188 	/* QP Fatal Errors */
189 	case T4_ERR_OUT_OF_RQE:
190 	case T4_ERR_PBL_ADDR_BOUND:
191 	case T4_ERR_CRC:
192 	case T4_ERR_MARKER:
193 	case T4_ERR_PDU_LEN_ERR:
194 	case T4_ERR_DDP_VERSION:
195 	case T4_ERR_RDMA_VERSION:
196 	case T4_ERR_OPCODE:
197 	case T4_ERR_DDP_QUEUE_NUM:
198 	case T4_ERR_MSN:
199 	case T4_ERR_TBIT:
200 	case T4_ERR_MO:
201 	case T4_ERR_MSN_GAP:
202 	case T4_ERR_MSN_RANGE:
203 	case T4_ERR_RQE_ADDR_BOUND:
204 	case T4_ERR_IRD_OVERFLOW:
205 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
206 		break;
207 
208 	default:
209 		pr_err("Unknown T4 status 0x%x QPID 0x%x\n",
210 		       CQE_STATUS(err_cqe), qhp->wq.sq.qid);
211 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
212 		break;
213 	}
214 done:
215 	if (atomic_dec_and_test(&chp->refcnt))
216 		wake_up(&chp->wait);
217 	c4iw_qp_rem_ref(&qhp->ibqp);
218 out:
219 	return;
220 }
221 
222 int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
223 {
224 	struct c4iw_cq *chp;
225 	unsigned long flag;
226 
227 	spin_lock_irqsave(&dev->lock, flag);
228 	chp = get_chp(dev, qid);
229 	if (chp) {
230 		atomic_inc(&chp->refcnt);
231 		spin_unlock_irqrestore(&dev->lock, flag);
232 		t4_clear_cq_armed(&chp->cq);
233 		spin_lock_irqsave(&chp->comp_handler_lock, flag);
234 		(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
235 		spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
236 		if (atomic_dec_and_test(&chp->refcnt))
237 			wake_up(&chp->wait);
238 	} else {
239 		pr_debug("unknown cqid 0x%x\n", qid);
240 		spin_unlock_irqrestore(&dev->lock, flag);
241 	}
242 	return 0;
243 }
244