1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2018 Hisilicon Limited.
4  */
5 
6 #include <rdma/ib_umem.h>
7 #include <rdma/hns-abi.h>
8 #include "hns_roce_device.h"
9 #include "hns_roce_cmd.h"
10 #include "hns_roce_hem.h"
11 
12 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
13 {
14 	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
15 	struct hns_roce_srq *srq;
16 
17 	xa_lock(&srq_table->xa);
18 	srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
19 	if (srq)
20 		atomic_inc(&srq->refcount);
21 	xa_unlock(&srq_table->xa);
22 
23 	if (!srq) {
24 		dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
25 		return;
26 	}
27 
28 	srq->event(srq, event_type);
29 
30 	if (atomic_dec_and_test(&srq->refcount))
31 		complete(&srq->free);
32 }
33 EXPORT_SYMBOL_GPL(hns_roce_srq_event);
34 
35 static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
36 				  enum hns_roce_event event_type)
37 {
38 	struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
39 	struct ib_srq *ibsrq = &srq->ibsrq;
40 	struct ib_event event;
41 
42 	if (ibsrq->event_handler) {
43 		event.device      = ibsrq->device;
44 		event.element.srq = ibsrq;
45 		switch (event_type) {
46 		case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
47 			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
48 			break;
49 		case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
50 			event.event = IB_EVENT_SRQ_ERR;
51 			break;
52 		default:
53 			dev_err(hr_dev->dev,
54 			   "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
55 			   event_type, srq->srqn);
56 			return;
57 		}
58 
59 		ibsrq->event_handler(&event, ibsrq->srq_context);
60 	}
61 }
62 
63 static int hns_roce_sw2hw_srq(struct hns_roce_dev *dev,
64 			      struct hns_roce_cmd_mailbox *mailbox,
65 			      unsigned long srq_num)
66 {
67 	return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
68 				 HNS_ROCE_CMD_SW2HW_SRQ,
69 				 HNS_ROCE_CMD_TIMEOUT_MSECS);
70 }
71 
72 static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
73 			     struct hns_roce_cmd_mailbox *mailbox,
74 			     unsigned long srq_num)
75 {
76 	return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
77 				 mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_SRQ,
78 				 HNS_ROCE_CMD_TIMEOUT_MSECS);
79 }
80 
81 int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd,
82 		       struct hns_roce_mtt *hr_mtt, u64 db_rec_addr,
83 		       struct hns_roce_srq *srq)
84 {
85 	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
86 	struct hns_roce_cmd_mailbox *mailbox;
87 	dma_addr_t dma_handle_wqe;
88 	dma_addr_t dma_handle_idx;
89 	u64 *mtts_wqe;
90 	u64 *mtts_idx;
91 	int ret;
92 
93 	/* Get the physical address of srq buf */
94 	mtts_wqe = hns_roce_table_find(hr_dev,
95 				       &hr_dev->mr_table.mtt_srqwqe_table,
96 				       srq->mtt.first_seg,
97 				       &dma_handle_wqe);
98 	if (!mtts_wqe) {
99 		dev_err(hr_dev->dev,
100 			"SRQ alloc.Failed to find srq buf addr.\n");
101 		return -EINVAL;
102 	}
103 
104 	/* Get physical address of idx que buf */
105 	mtts_idx = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_idx_table,
106 				       srq->idx_que.mtt.first_seg,
107 				       &dma_handle_idx);
108 	if (!mtts_idx) {
109 		dev_err(hr_dev->dev,
110 			"SRQ alloc.Failed to find idx que buf addr.\n");
111 		return -EINVAL;
112 	}
113 
114 	ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
115 	if (ret == -1) {
116 		dev_err(hr_dev->dev, "SRQ alloc.Failed to alloc index.\n");
117 		return -ENOMEM;
118 	}
119 
120 	ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
121 	if (ret)
122 		goto err_out;
123 
124 	ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
125 	if (ret)
126 		goto err_put;
127 
128 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
129 	if (IS_ERR(mailbox)) {
130 		ret = PTR_ERR(mailbox);
131 		goto err_xa;
132 	}
133 
134 	hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
135 			       mtts_wqe, mtts_idx, dma_handle_wqe,
136 			       dma_handle_idx);
137 
138 	ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
139 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
140 	if (ret)
141 		goto err_xa;
142 
143 	atomic_set(&srq->refcount, 1);
144 	init_completion(&srq->free);
145 	return ret;
146 
147 err_xa:
148 	xa_erase(&srq_table->xa, srq->srqn);
149 
150 err_put:
151 	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
152 
153 err_out:
154 	hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
155 	return ret;
156 }
157 
158 void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
159 {
160 	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
161 	int ret;
162 
163 	ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
164 	if (ret)
165 		dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
166 			ret, srq->srqn);
167 
168 	xa_erase(&srq_table->xa, srq->srqn);
169 
170 	if (atomic_dec_and_test(&srq->refcount))
171 		complete(&srq->free);
172 	wait_for_completion(&srq->free);
173 
174 	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
175 	hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
176 }
177 
178 static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
179 				   u32 page_shift)
180 {
181 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
182 	struct hns_roce_idx_que *idx_que = &srq->idx_que;
183 	u32 bitmap_num;
184 	int i;
185 
186 	bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, 8 * sizeof(u64));
187 
188 	idx_que->bitmap = kcalloc(1, bitmap_num / 8, GFP_KERNEL);
189 	if (!idx_que->bitmap)
190 		return -ENOMEM;
191 
192 	bitmap_num = bitmap_num / (8 * sizeof(u64));
193 
194 	idx_que->buf_size = srq->idx_que.buf_size;
195 
196 	if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
197 			       &idx_que->idx_buf, page_shift)) {
198 		kfree(idx_que->bitmap);
199 		return -ENOMEM;
200 	}
201 
202 	for (i = 0; i < bitmap_num; i++)
203 		idx_que->bitmap[i] = ~(0UL);
204 
205 	return 0;
206 }
207 
208 struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
209 				   struct ib_srq_init_attr *srq_init_attr,
210 				   struct ib_udata *udata)
211 {
212 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
213 	struct hns_roce_ib_create_srq_resp resp = {};
214 	struct hns_roce_srq *srq;
215 	int srq_desc_size;
216 	int srq_buf_size;
217 	u32 page_shift;
218 	int ret = 0;
219 	u32 npages;
220 	u32 cqn;
221 
222 	/* Check the actual SRQ wqe and SRQ sge num */
223 	if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
224 	    srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
225 		return ERR_PTR(-EINVAL);
226 
227 	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
228 	if (!srq)
229 		return ERR_PTR(-ENOMEM);
230 
231 	mutex_init(&srq->mutex);
232 	spin_lock_init(&srq->lock);
233 
234 	srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
235 	srq->max_gs = srq_init_attr->attr.max_sge;
236 
237 	srq_desc_size = max(16, 16 * srq->max_gs);
238 
239 	srq->wqe_shift = ilog2(srq_desc_size);
240 
241 	srq_buf_size = srq->max * srq_desc_size;
242 
243 	srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
244 	srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
245 	srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
246 	srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
247 
248 	if (udata) {
249 		struct hns_roce_ib_create_srq  ucmd;
250 
251 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
252 			ret = -EFAULT;
253 			goto err_srq;
254 		}
255 
256 		srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
257 					srq_buf_size, 0, 0);
258 		if (IS_ERR(srq->umem)) {
259 			ret = PTR_ERR(srq->umem);
260 			goto err_srq;
261 		}
262 
263 		if (hr_dev->caps.srqwqe_buf_pg_sz) {
264 			npages = (ib_umem_page_count(srq->umem) +
265 				  (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
266 				  (1 << hr_dev->caps.srqwqe_buf_pg_sz);
267 			page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
268 			ret = hns_roce_mtt_init(hr_dev, npages,
269 						page_shift,
270 						&srq->mtt);
271 		} else
272 			ret = hns_roce_mtt_init(hr_dev,
273 						ib_umem_page_count(srq->umem),
274 						srq->umem->page_shift,
275 						&srq->mtt);
276 		if (ret)
277 			goto err_buf;
278 
279 		ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
280 		if (ret)
281 			goto err_srq_mtt;
282 
283 		/* config index queue BA */
284 		srq->idx_que.umem = ib_umem_get(pd->uobject->context,
285 						ucmd.que_addr,
286 						srq->idx_que.buf_size, 0, 0);
287 		if (IS_ERR(srq->idx_que.umem)) {
288 			dev_err(hr_dev->dev,
289 				"ib_umem_get error for index queue\n");
290 			ret = PTR_ERR(srq->idx_que.umem);
291 			goto err_srq_mtt;
292 		}
293 
294 		if (hr_dev->caps.idx_buf_pg_sz) {
295 			npages = (ib_umem_page_count(srq->idx_que.umem) +
296 				  (1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
297 				  (1 << hr_dev->caps.idx_buf_pg_sz);
298 			page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
299 			ret = hns_roce_mtt_init(hr_dev, npages,
300 						page_shift, &srq->idx_que.mtt);
301 		} else {
302 			ret = hns_roce_mtt_init(hr_dev,
303 				       ib_umem_page_count(srq->idx_que.umem),
304 				       srq->idx_que.umem->page_shift,
305 				       &srq->idx_que.mtt);
306 		}
307 
308 		if (ret) {
309 			dev_err(hr_dev->dev,
310 				"hns_roce_mtt_init error for idx que\n");
311 			goto err_idx_mtt;
312 		}
313 
314 		ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
315 						 srq->idx_que.umem);
316 		if (ret) {
317 			dev_err(hr_dev->dev,
318 			      "hns_roce_ib_umem_write_mtt error for idx que\n");
319 			goto err_idx_buf;
320 		}
321 	} else {
322 		page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
323 		if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
324 				      (1 << page_shift) * 2,
325 				      &srq->buf, page_shift)) {
326 			ret = -ENOMEM;
327 			goto err_srq;
328 		}
329 
330 		srq->head = 0;
331 		srq->tail = srq->max - 1;
332 
333 		ret = hns_roce_mtt_init(hr_dev, srq->buf.npages,
334 					srq->buf.page_shift, &srq->mtt);
335 		if (ret)
336 			goto err_buf;
337 
338 		ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
339 		if (ret)
340 			goto err_srq_mtt;
341 
342 		page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
343 		ret = hns_roce_create_idx_que(pd, srq, page_shift);
344 		if (ret) {
345 			dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
346 				ret);
347 			goto err_srq_mtt;
348 		}
349 
350 		/* Init mtt table for idx_que */
351 		ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
352 					srq->idx_que.idx_buf.page_shift,
353 					&srq->idx_que.mtt);
354 		if (ret)
355 			goto err_create_idx;
356 
357 		/* Write buffer address into the mtt table */
358 		ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
359 					     &srq->idx_que.idx_buf);
360 		if (ret)
361 			goto err_idx_buf;
362 
363 		srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
364 		if (!srq->wrid) {
365 			ret = -ENOMEM;
366 			goto err_idx_buf;
367 		}
368 	}
369 
370 	cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
371 	      to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
372 
373 	srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
374 
375 	ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, 0,
376 				 &srq->mtt, 0, srq);
377 	if (ret)
378 		goto err_wrid;
379 
380 	srq->event = hns_roce_ib_srq_event;
381 	srq->ibsrq.ext.xrc.srq_num = srq->srqn;
382 	resp.srqn = srq->srqn;
383 
384 	if (udata) {
385 		if (ib_copy_to_udata(udata, &resp,
386 				     min(udata->outlen, sizeof(resp)))) {
387 			ret = -EFAULT;
388 			goto err_srqc_alloc;
389 		}
390 	}
391 
392 	return &srq->ibsrq;
393 
394 err_srqc_alloc:
395 	hns_roce_srq_free(hr_dev, srq);
396 
397 err_wrid:
398 	kvfree(srq->wrid);
399 
400 err_idx_buf:
401 	hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
402 
403 err_idx_mtt:
404 	if (udata)
405 		ib_umem_release(srq->idx_que.umem);
406 
407 err_create_idx:
408 	hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
409 			  &srq->idx_que.idx_buf);
410 	kfree(srq->idx_que.bitmap);
411 
412 err_srq_mtt:
413 	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
414 
415 err_buf:
416 	if (udata)
417 		ib_umem_release(srq->umem);
418 	else
419 		hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
420 
421 err_srq:
422 	kfree(srq);
423 	return ERR_PTR(ret);
424 }
425 
426 int hns_roce_destroy_srq(struct ib_srq *ibsrq)
427 {
428 	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
429 	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
430 
431 	hns_roce_srq_free(hr_dev, srq);
432 	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
433 
434 	if (ibsrq->uobject) {
435 		hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
436 		ib_umem_release(srq->idx_que.umem);
437 		ib_umem_release(srq->umem);
438 	} else {
439 		kvfree(srq->wrid);
440 		hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
441 				  &srq->buf);
442 	}
443 
444 	kfree(srq);
445 
446 	return 0;
447 }
448 
449 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
450 {
451 	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
452 
453 	xa_init(&srq_table->xa);
454 
455 	return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
456 				    hr_dev->caps.num_srqs - 1,
457 				    hr_dev->caps.reserved_srqs, 0);
458 }
459 
460 void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
461 {
462 	hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
463 }
464