1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2018 Hisilicon Limited.
4  */
5 
6 #include <rdma/ib_umem.h>
7 #include <rdma/hns-abi.h>
8 #include "hns_roce_device.h"
9 #include "hns_roce_cmd.h"
10 #include "hns_roce_hem.h"
11 
12 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
13 {
14 	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
15 	struct hns_roce_srq *srq;
16 
17 	xa_lock(&srq_table->xa);
18 	srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
19 	if (srq)
20 		atomic_inc(&srq->refcount);
21 	xa_unlock(&srq_table->xa);
22 
23 	if (!srq) {
24 		dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
25 		return;
26 	}
27 
28 	srq->event(srq, event_type);
29 
30 	if (atomic_dec_and_test(&srq->refcount))
31 		complete(&srq->free);
32 }
33 
34 static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
35 				  enum hns_roce_event event_type)
36 {
37 	struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
38 	struct ib_srq *ibsrq = &srq->ibsrq;
39 	struct ib_event event;
40 
41 	if (ibsrq->event_handler) {
42 		event.device      = ibsrq->device;
43 		event.element.srq = ibsrq;
44 		switch (event_type) {
45 		case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
46 			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
47 			break;
48 		case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
49 			event.event = IB_EVENT_SRQ_ERR;
50 			break;
51 		default:
52 			dev_err(hr_dev->dev,
53 			   "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
54 			   event_type, srq->srqn);
55 			return;
56 		}
57 
58 		ibsrq->event_handler(&event, ibsrq->srq_context);
59 	}
60 }
61 
62 static int hns_roce_sw2hw_srq(struct hns_roce_dev *dev,
63 			      struct hns_roce_cmd_mailbox *mailbox,
64 			      unsigned long srq_num)
65 {
66 	return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
67 				 HNS_ROCE_CMD_SW2HW_SRQ,
68 				 HNS_ROCE_CMD_TIMEOUT_MSECS);
69 }
70 
71 static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
72 			     struct hns_roce_cmd_mailbox *mailbox,
73 			     unsigned long srq_num)
74 {
75 	return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
76 				 mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_SRQ,
77 				 HNS_ROCE_CMD_TIMEOUT_MSECS);
78 }
79 
80 static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
81 			      u16 xrcd, struct hns_roce_mtt *hr_mtt,
82 			      u64 db_rec_addr, struct hns_roce_srq *srq)
83 {
84 	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
85 	struct hns_roce_cmd_mailbox *mailbox;
86 	dma_addr_t dma_handle_wqe;
87 	dma_addr_t dma_handle_idx;
88 	u64 *mtts_wqe;
89 	u64 *mtts_idx;
90 	int ret;
91 
92 	/* Get the physical address of srq buf */
93 	mtts_wqe = hns_roce_table_find(hr_dev,
94 				       &hr_dev->mr_table.mtt_srqwqe_table,
95 				       srq->mtt.first_seg,
96 				       &dma_handle_wqe);
97 	if (!mtts_wqe) {
98 		dev_err(hr_dev->dev,
99 			"SRQ alloc.Failed to find srq buf addr.\n");
100 		return -EINVAL;
101 	}
102 
103 	/* Get physical address of idx que buf */
104 	mtts_idx = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_idx_table,
105 				       srq->idx_que.mtt.first_seg,
106 				       &dma_handle_idx);
107 	if (!mtts_idx) {
108 		dev_err(hr_dev->dev,
109 			"SRQ alloc.Failed to find idx que buf addr.\n");
110 		return -EINVAL;
111 	}
112 
113 	ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
114 	if (ret == -1) {
115 		dev_err(hr_dev->dev, "SRQ alloc.Failed to alloc index.\n");
116 		return -ENOMEM;
117 	}
118 
119 	ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
120 	if (ret)
121 		goto err_out;
122 
123 	ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
124 	if (ret)
125 		goto err_put;
126 
127 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
128 	if (IS_ERR(mailbox)) {
129 		ret = PTR_ERR(mailbox);
130 		goto err_xa;
131 	}
132 
133 	hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
134 			       mtts_wqe, mtts_idx, dma_handle_wqe,
135 			       dma_handle_idx);
136 
137 	ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
138 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
139 	if (ret)
140 		goto err_xa;
141 
142 	atomic_set(&srq->refcount, 1);
143 	init_completion(&srq->free);
144 	return ret;
145 
146 err_xa:
147 	xa_erase(&srq_table->xa, srq->srqn);
148 
149 err_put:
150 	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
151 
152 err_out:
153 	hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
154 	return ret;
155 }
156 
157 static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
158 			      struct hns_roce_srq *srq)
159 {
160 	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
161 	int ret;
162 
163 	ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
164 	if (ret)
165 		dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
166 			ret, srq->srqn);
167 
168 	xa_erase(&srq_table->xa, srq->srqn);
169 
170 	if (atomic_dec_and_test(&srq->refcount))
171 		complete(&srq->free);
172 	wait_for_completion(&srq->free);
173 
174 	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
175 	hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
176 }
177 
178 static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
179 				   u32 page_shift)
180 {
181 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
182 	struct hns_roce_idx_que *idx_que = &srq->idx_que;
183 
184 	idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
185 	if (!idx_que->bitmap)
186 		return -ENOMEM;
187 
188 	idx_que->buf_size = srq->idx_que.buf_size;
189 
190 	if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
191 			       &idx_que->idx_buf, page_shift)) {
192 		bitmap_free(idx_que->bitmap);
193 		return -ENOMEM;
194 	}
195 
196 	return 0;
197 }
198 
199 int hns_roce_create_srq(struct ib_srq *ib_srq,
200 			struct ib_srq_init_attr *srq_init_attr,
201 			struct ib_udata *udata)
202 {
203 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
204 	struct hns_roce_ib_create_srq_resp resp = {};
205 	struct hns_roce_srq *srq = to_hr_srq(ib_srq);
206 	int srq_desc_size;
207 	int srq_buf_size;
208 	u32 page_shift;
209 	int ret = 0;
210 	u32 npages;
211 	u32 cqn;
212 
213 	/* Check the actual SRQ wqe and SRQ sge num */
214 	if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
215 	    srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
216 		return -EINVAL;
217 
218 	mutex_init(&srq->mutex);
219 	spin_lock_init(&srq->lock);
220 
221 	srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
222 	srq->max_gs = srq_init_attr->attr.max_sge;
223 
224 	srq_desc_size = max(16, 16 * srq->max_gs);
225 
226 	srq->wqe_shift = ilog2(srq_desc_size);
227 
228 	srq_buf_size = srq->max * srq_desc_size;
229 
230 	srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
231 	srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
232 	srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
233 	srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
234 
235 	if (udata) {
236 		struct hns_roce_ib_create_srq  ucmd;
237 
238 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
239 			return -EFAULT;
240 
241 		srq->umem =
242 			ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
243 		if (IS_ERR(srq->umem))
244 			return PTR_ERR(srq->umem);
245 
246 		if (hr_dev->caps.srqwqe_buf_pg_sz) {
247 			npages = (ib_umem_page_count(srq->umem) +
248 				  (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
249 				  (1 << hr_dev->caps.srqwqe_buf_pg_sz);
250 			page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
251 			ret = hns_roce_mtt_init(hr_dev, npages,
252 						page_shift,
253 						&srq->mtt);
254 		} else
255 			ret = hns_roce_mtt_init(hr_dev,
256 						ib_umem_page_count(srq->umem),
257 						PAGE_SHIFT, &srq->mtt);
258 		if (ret)
259 			goto err_buf;
260 
261 		ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
262 		if (ret)
263 			goto err_srq_mtt;
264 
265 		/* config index queue BA */
266 		srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
267 						srq->idx_que.buf_size, 0, 0);
268 		if (IS_ERR(srq->idx_que.umem)) {
269 			dev_err(hr_dev->dev,
270 				"ib_umem_get error for index queue\n");
271 			ret = PTR_ERR(srq->idx_que.umem);
272 			goto err_srq_mtt;
273 		}
274 
275 		if (hr_dev->caps.idx_buf_pg_sz) {
276 			npages = (ib_umem_page_count(srq->idx_que.umem) +
277 				  (1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
278 				  (1 << hr_dev->caps.idx_buf_pg_sz);
279 			page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
280 			ret = hns_roce_mtt_init(hr_dev, npages,
281 						page_shift, &srq->idx_que.mtt);
282 		} else {
283 			ret = hns_roce_mtt_init(
284 				hr_dev, ib_umem_page_count(srq->idx_que.umem),
285 				PAGE_SHIFT, &srq->idx_que.mtt);
286 		}
287 
288 		if (ret) {
289 			dev_err(hr_dev->dev,
290 				"hns_roce_mtt_init error for idx que\n");
291 			goto err_idx_mtt;
292 		}
293 
294 		ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
295 						 srq->idx_que.umem);
296 		if (ret) {
297 			dev_err(hr_dev->dev,
298 			      "hns_roce_ib_umem_write_mtt error for idx que\n");
299 			goto err_idx_buf;
300 		}
301 	} else {
302 		page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
303 		if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
304 				       (1 << page_shift) * 2, &srq->buf,
305 				       page_shift))
306 			return -ENOMEM;
307 
308 		srq->head = 0;
309 		srq->tail = srq->max - 1;
310 
311 		ret = hns_roce_mtt_init(hr_dev, srq->buf.npages,
312 					srq->buf.page_shift, &srq->mtt);
313 		if (ret)
314 			goto err_buf;
315 
316 		ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
317 		if (ret)
318 			goto err_srq_mtt;
319 
320 		page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
321 		ret = hns_roce_create_idx_que(ib_srq->pd, srq, page_shift);
322 		if (ret) {
323 			dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
324 				ret);
325 			goto err_srq_mtt;
326 		}
327 
328 		/* Init mtt table for idx_que */
329 		ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
330 					srq->idx_que.idx_buf.page_shift,
331 					&srq->idx_que.mtt);
332 		if (ret)
333 			goto err_create_idx;
334 
335 		/* Write buffer address into the mtt table */
336 		ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
337 					     &srq->idx_que.idx_buf);
338 		if (ret)
339 			goto err_idx_buf;
340 
341 		srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
342 		if (!srq->wrid) {
343 			ret = -ENOMEM;
344 			goto err_idx_buf;
345 		}
346 	}
347 
348 	cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
349 	      to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
350 
351 	srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
352 
353 	ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(ib_srq->pd)->pdn, cqn, 0,
354 				 &srq->mtt, 0, srq);
355 	if (ret)
356 		goto err_wrid;
357 
358 	srq->event = hns_roce_ib_srq_event;
359 	srq->ibsrq.ext.xrc.srq_num = srq->srqn;
360 	resp.srqn = srq->srqn;
361 
362 	if (udata) {
363 		if (ib_copy_to_udata(udata, &resp,
364 				     min(udata->outlen, sizeof(resp)))) {
365 			ret = -EFAULT;
366 			goto err_srqc_alloc;
367 		}
368 	}
369 
370 	return 0;
371 
372 err_srqc_alloc:
373 	hns_roce_srq_free(hr_dev, srq);
374 
375 err_wrid:
376 	kvfree(srq->wrid);
377 
378 err_idx_buf:
379 	hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
380 
381 err_idx_mtt:
382 	ib_umem_release(srq->idx_que.umem);
383 
384 err_create_idx:
385 	hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
386 			  &srq->idx_que.idx_buf);
387 	bitmap_free(srq->idx_que.bitmap);
388 
389 err_srq_mtt:
390 	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
391 
392 err_buf:
393 	ib_umem_release(srq->umem);
394 	if (!udata)
395 		hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
396 
397 	return ret;
398 }
399 
400 void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
401 {
402 	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
403 	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
404 
405 	hns_roce_srq_free(hr_dev, srq);
406 	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
407 
408 	if (udata) {
409 		hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
410 	} else {
411 		kvfree(srq->wrid);
412 		hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
413 				  &srq->buf);
414 	}
415 	ib_umem_release(srq->idx_que.umem);
416 	ib_umem_release(srq->umem);
417 }
418 
419 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
420 {
421 	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
422 
423 	xa_init(&srq_table->xa);
424 
425 	return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
426 				    hr_dev->caps.num_srqs - 1,
427 				    hr_dev->caps.reserved_srqs, 0);
428 }
429 
430 void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
431 {
432 	hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
433 }
434