1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /*          Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6 
7 #include "erdma_verbs.h"
8 
9 #define MAX_POLL_CHUNK_SIZE 16
10 
notify_eq(struct erdma_eq * eq)11 void notify_eq(struct erdma_eq *eq)
12 {
13 	u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
14 		      FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
15 
16 	*eq->db_record = db_data;
17 	writeq(db_data, eq->db);
18 
19 	atomic64_inc(&eq->notify_num);
20 }
21 
get_next_valid_eqe(struct erdma_eq * eq)22 void *get_next_valid_eqe(struct erdma_eq *eq)
23 {
24 	u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
25 	u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
26 
27 	return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
28 }
29 
erdma_aeq_event_handler(struct erdma_dev * dev)30 void erdma_aeq_event_handler(struct erdma_dev *dev)
31 {
32 	struct erdma_aeqe *aeqe;
33 	u32 cqn, qpn;
34 	struct erdma_qp *qp;
35 	struct erdma_cq *cq;
36 	struct ib_event event;
37 	u32 poll_cnt = 0;
38 
39 	memset(&event, 0, sizeof(event));
40 
41 	while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
42 		aeqe = get_next_valid_eqe(&dev->aeq);
43 		if (!aeqe)
44 			break;
45 
46 		dma_rmb();
47 
48 		dev->aeq.ci++;
49 		atomic64_inc(&dev->aeq.event_num);
50 		poll_cnt++;
51 
52 		if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
53 			      le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
54 			cqn = le32_to_cpu(aeqe->event_data0);
55 			cq = find_cq_by_cqn(dev, cqn);
56 			if (!cq)
57 				continue;
58 
59 			event.device = cq->ibcq.device;
60 			event.element.cq = &cq->ibcq;
61 			event.event = IB_EVENT_CQ_ERR;
62 			if (cq->ibcq.event_handler)
63 				cq->ibcq.event_handler(&event,
64 						       cq->ibcq.cq_context);
65 		} else {
66 			qpn = le32_to_cpu(aeqe->event_data0);
67 			qp = find_qp_by_qpn(dev, qpn);
68 			if (!qp)
69 				continue;
70 
71 			event.device = qp->ibqp.device;
72 			event.element.qp = &qp->ibqp;
73 			event.event = IB_EVENT_QP_FATAL;
74 			if (qp->ibqp.event_handler)
75 				qp->ibqp.event_handler(&event,
76 						       qp->ibqp.qp_context);
77 		}
78 	}
79 
80 	notify_eq(&dev->aeq);
81 }
82 
erdma_aeq_init(struct erdma_dev * dev)83 int erdma_aeq_init(struct erdma_dev *dev)
84 {
85 	struct erdma_eq *eq = &dev->aeq;
86 	u32 buf_size;
87 
88 	eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
89 	buf_size = eq->depth << EQE_SHIFT;
90 
91 	eq->qbuf =
92 		dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
93 				   &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
94 	if (!eq->qbuf)
95 		return -ENOMEM;
96 
97 	spin_lock_init(&eq->lock);
98 	atomic64_set(&eq->event_num, 0);
99 	atomic64_set(&eq->notify_num, 0);
100 
101 	eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
102 	eq->db_record = (u64 *)(eq->qbuf + buf_size);
103 
104 	erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
105 			  upper_32_bits(eq->qbuf_dma_addr));
106 	erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
107 			  lower_32_bits(eq->qbuf_dma_addr));
108 	erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
109 	erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG,
110 			  eq->qbuf_dma_addr + buf_size);
111 
112 	return 0;
113 }
114 
erdma_aeq_destroy(struct erdma_dev * dev)115 void erdma_aeq_destroy(struct erdma_dev *dev)
116 {
117 	struct erdma_eq *eq = &dev->aeq;
118 
119 	dma_free_coherent(&dev->pdev->dev,
120 			  WARPPED_BUFSIZE(eq->depth << EQE_SHIFT), eq->qbuf,
121 			  eq->qbuf_dma_addr);
122 }
123 
erdma_ceq_completion_handler(struct erdma_eq_cb * ceq_cb)124 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
125 {
126 	struct erdma_dev *dev = ceq_cb->dev;
127 	struct erdma_cq *cq;
128 	u32 poll_cnt = 0;
129 	u64 *ceqe;
130 	int cqn;
131 
132 	if (!ceq_cb->ready)
133 		return;
134 
135 	while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
136 		ceqe = get_next_valid_eqe(&ceq_cb->eq);
137 		if (!ceqe)
138 			break;
139 
140 		dma_rmb();
141 		ceq_cb->eq.ci++;
142 		poll_cnt++;
143 		cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
144 
145 		cq = find_cq_by_cqn(dev, cqn);
146 		if (!cq)
147 			continue;
148 
149 		if (rdma_is_kernel_res(&cq->ibcq.res))
150 			cq->kern_cq.cmdsn++;
151 
152 		if (cq->ibcq.comp_handler)
153 			cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
154 	}
155 
156 	notify_eq(&ceq_cb->eq);
157 }
158 
erdma_intr_ceq_handler(int irq,void * data)159 static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
160 {
161 	struct erdma_eq_cb *ceq_cb = data;
162 
163 	tasklet_schedule(&ceq_cb->tasklet);
164 
165 	return IRQ_HANDLED;
166 }
167 
erdma_intr_ceq_task(unsigned long data)168 static void erdma_intr_ceq_task(unsigned long data)
169 {
170 	erdma_ceq_completion_handler((struct erdma_eq_cb *)data);
171 }
172 
erdma_set_ceq_irq(struct erdma_dev * dev,u16 ceqn)173 static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
174 {
175 	struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
176 	int err;
177 
178 	snprintf(eqc->irq.name, ERDMA_IRQNAME_SIZE, "erdma-ceq%u@pci:%s", ceqn,
179 		 pci_name(dev->pdev));
180 	eqc->irq.msix_vector = pci_irq_vector(dev->pdev, ceqn + 1);
181 
182 	tasklet_init(&dev->ceqs[ceqn].tasklet, erdma_intr_ceq_task,
183 		     (unsigned long)&dev->ceqs[ceqn]);
184 
185 	cpumask_set_cpu(cpumask_local_spread(ceqn + 1, dev->attrs.numa_node),
186 			&eqc->irq.affinity_hint_mask);
187 
188 	err = request_irq(eqc->irq.msix_vector, erdma_intr_ceq_handler, 0,
189 			  eqc->irq.name, eqc);
190 	if (err) {
191 		dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
192 		return err;
193 	}
194 
195 	irq_set_affinity_hint(eqc->irq.msix_vector,
196 			      &eqc->irq.affinity_hint_mask);
197 
198 	return 0;
199 }
200 
erdma_free_ceq_irq(struct erdma_dev * dev,u16 ceqn)201 static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
202 {
203 	struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
204 
205 	irq_set_affinity_hint(eqc->irq.msix_vector, NULL);
206 	free_irq(eqc->irq.msix_vector, eqc);
207 }
208 
create_eq_cmd(struct erdma_dev * dev,u32 eqn,struct erdma_eq * eq)209 static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
210 {
211 	struct erdma_cmdq_create_eq_req req;
212 	dma_addr_t db_info_dma_addr;
213 
214 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
215 				CMDQ_OPCODE_CREATE_EQ);
216 	req.eqn = eqn;
217 	req.depth = ilog2(eq->depth);
218 	req.qbuf_addr = eq->qbuf_dma_addr;
219 	req.qtype = ERDMA_EQ_TYPE_CEQ;
220 	/* Vector index is the same as EQN. */
221 	req.vector_idx = eqn;
222 	db_info_dma_addr = eq->qbuf_dma_addr + (eq->depth << EQE_SHIFT);
223 	req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
224 	req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
225 
226 	return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
227 }
228 
erdma_ceq_init_one(struct erdma_dev * dev,u16 ceqn)229 static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
230 {
231 	struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
232 	u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
233 	int ret;
234 
235 	eq->qbuf =
236 		dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
237 				   &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
238 	if (!eq->qbuf)
239 		return -ENOMEM;
240 
241 	spin_lock_init(&eq->lock);
242 	atomic64_set(&eq->event_num, 0);
243 	atomic64_set(&eq->notify_num, 0);
244 
245 	eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
246 	eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
247 		 (ceqn + 1) * ERDMA_DB_SIZE;
248 	eq->db_record = (u64 *)(eq->qbuf + buf_size);
249 	eq->ci = 0;
250 	dev->ceqs[ceqn].dev = dev;
251 
252 	/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
253 	ret = create_eq_cmd(dev, ceqn + 1, eq);
254 	dev->ceqs[ceqn].ready = ret ? false : true;
255 
256 	return ret;
257 }
258 
erdma_ceq_uninit_one(struct erdma_dev * dev,u16 ceqn)259 static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
260 {
261 	struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
262 	u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
263 	struct erdma_cmdq_destroy_eq_req req;
264 	int err;
265 
266 	dev->ceqs[ceqn].ready = 0;
267 
268 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
269 				CMDQ_OPCODE_DESTROY_EQ);
270 	/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
271 	req.eqn = ceqn + 1;
272 	req.qtype = ERDMA_EQ_TYPE_CEQ;
273 	req.vector_idx = ceqn + 1;
274 
275 	err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
276 	if (err)
277 		return;
278 
279 	dma_free_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf,
280 			  eq->qbuf_dma_addr);
281 }
282 
erdma_ceqs_init(struct erdma_dev * dev)283 int erdma_ceqs_init(struct erdma_dev *dev)
284 {
285 	u32 i, j;
286 	int err;
287 
288 	for (i = 0; i < dev->attrs.irq_num - 1; i++) {
289 		err = erdma_ceq_init_one(dev, i);
290 		if (err)
291 			goto out_err;
292 
293 		err = erdma_set_ceq_irq(dev, i);
294 		if (err) {
295 			erdma_ceq_uninit_one(dev, i);
296 			goto out_err;
297 		}
298 	}
299 
300 	return 0;
301 
302 out_err:
303 	for (j = 0; j < i; j++) {
304 		erdma_free_ceq_irq(dev, j);
305 		erdma_ceq_uninit_one(dev, j);
306 	}
307 
308 	return err;
309 }
310 
erdma_ceqs_uninit(struct erdma_dev * dev)311 void erdma_ceqs_uninit(struct erdma_dev *dev)
312 {
313 	u32 i;
314 
315 	for (i = 0; i < dev->attrs.irq_num - 1; i++) {
316 		erdma_free_ceq_irq(dev, i);
317 		erdma_ceq_uninit_one(dev, i);
318 	}
319 }
320