1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2018 Hisilicon Limited.
4 */
5
6 #include <linux/pci.h>
7 #include <rdma/ib_umem.h>
8 #include "hns_roce_device.h"
9 #include "hns_roce_cmd.h"
10 #include "hns_roce_hem.h"
11
hns_roce_srq_event(struct hns_roce_dev * hr_dev,u32 srqn,int event_type)12 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
13 {
14 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
15 struct hns_roce_srq *srq;
16
17 xa_lock(&srq_table->xa);
18 srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
19 if (srq)
20 refcount_inc(&srq->refcount);
21 xa_unlock(&srq_table->xa);
22
23 if (!srq) {
24 dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
25 return;
26 }
27
28 srq->event(srq, event_type);
29
30 if (refcount_dec_and_test(&srq->refcount))
31 complete(&srq->free);
32 }
33
hns_roce_ib_srq_event(struct hns_roce_srq * srq,enum hns_roce_event event_type)34 static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
35 enum hns_roce_event event_type)
36 {
37 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
38 struct ib_srq *ibsrq = &srq->ibsrq;
39 struct ib_event event;
40
41 if (ibsrq->event_handler) {
42 event.device = ibsrq->device;
43 event.element.srq = ibsrq;
44 switch (event_type) {
45 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
46 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
47 break;
48 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
49 event.event = IB_EVENT_SRQ_ERR;
50 break;
51 default:
52 dev_err(hr_dev->dev,
53 "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
54 event_type, srq->srqn);
55 return;
56 }
57
58 ibsrq->event_handler(&event, ibsrq->srq_context);
59 }
60 }
61
alloc_srqn(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)62 static int alloc_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
63 {
64 struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida;
65 int id;
66
67 id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max,
68 GFP_KERNEL);
69 if (id < 0) {
70 ibdev_err(&hr_dev->ib_dev, "failed to alloc srq(%d).\n", id);
71 return -ENOMEM;
72 }
73
74 srq->srqn = id;
75
76 return 0;
77 }
78
free_srqn(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)79 static void free_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
80 {
81 ida_free(&hr_dev->srq_table.srq_ida.ida, (int)srq->srqn);
82 }
83
hns_roce_create_srqc(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)84 static int hns_roce_create_srqc(struct hns_roce_dev *hr_dev,
85 struct hns_roce_srq *srq)
86 {
87 struct ib_device *ibdev = &hr_dev->ib_dev;
88 struct hns_roce_cmd_mailbox *mailbox;
89 int ret;
90
91 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
92 if (IS_ERR(mailbox)) {
93 ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
94 return PTR_ERR(mailbox);
95 }
96
97 ret = hr_dev->hw->write_srqc(srq, mailbox->buf);
98 if (ret) {
99 ibdev_err(ibdev, "failed to write SRQC.\n");
100 goto err_mbox;
101 }
102
103 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_SRQ,
104 srq->srqn);
105 if (ret)
106 ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
107
108 err_mbox:
109 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
110 return ret;
111 }
112
alloc_srqc(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)113 static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
114 {
115 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
116 struct ib_device *ibdev = &hr_dev->ib_dev;
117 int ret;
118
119 ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
120 if (ret) {
121 ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
122 return ret;
123 }
124
125 ret = xa_err(xa_store_irq(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
126 if (ret) {
127 ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
128 goto err_put;
129 }
130
131 ret = hns_roce_create_srqc(hr_dev, srq);
132 if (ret)
133 goto err_xa;
134
135 return 0;
136
137 err_xa:
138 xa_erase_irq(&srq_table->xa, srq->srqn);
139 err_put:
140 hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
141
142 return ret;
143 }
144
free_srqc(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)145 static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
146 {
147 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
148 int ret;
149
150 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_SRQ,
151 srq->srqn);
152 if (ret)
153 dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
154 ret, srq->srqn);
155
156 xa_erase_irq(&srq_table->xa, srq->srqn);
157
158 if (refcount_dec_and_test(&srq->refcount))
159 complete(&srq->free);
160 wait_for_completion(&srq->free);
161
162 hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
163 }
164
alloc_srq_idx(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq,struct ib_udata * udata,unsigned long addr)165 static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
166 struct ib_udata *udata, unsigned long addr)
167 {
168 struct hns_roce_idx_que *idx_que = &srq->idx_que;
169 struct ib_device *ibdev = &hr_dev->ib_dev;
170 struct hns_roce_buf_attr buf_attr = {};
171 int ret;
172
173 srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
174
175 buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + PAGE_SHIFT;
176 buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
177 srq->idx_que.entry_shift);
178 buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
179 buf_attr.region_count = 1;
180
181 ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
182 hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT,
183 udata, addr);
184 if (ret) {
185 ibdev_err(ibdev,
186 "failed to alloc SRQ idx mtr, ret = %d.\n", ret);
187 return ret;
188 }
189
190 if (!udata) {
191 idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
192 if (!idx_que->bitmap) {
193 ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
194 ret = -ENOMEM;
195 goto err_idx_mtr;
196 }
197 }
198
199 idx_que->head = 0;
200 idx_que->tail = 0;
201
202 return 0;
203 err_idx_mtr:
204 hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
205
206 return ret;
207 }
208
free_srq_idx(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)209 static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
210 {
211 struct hns_roce_idx_que *idx_que = &srq->idx_que;
212
213 bitmap_free(idx_que->bitmap);
214 idx_que->bitmap = NULL;
215 hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
216 }
217
alloc_srq_wqe_buf(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq,struct ib_udata * udata,unsigned long addr)218 static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
219 struct hns_roce_srq *srq,
220 struct ib_udata *udata, unsigned long addr)
221 {
222 struct ib_device *ibdev = &hr_dev->ib_dev;
223 struct hns_roce_buf_attr buf_attr = {};
224 int ret;
225
226 srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
227 HNS_ROCE_SGE_SIZE *
228 srq->max_gs)));
229
230 buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + PAGE_SHIFT;
231 buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
232 srq->wqe_shift);
233 buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
234 buf_attr.region_count = 1;
235
236 ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
237 hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT,
238 udata, addr);
239 if (ret)
240 ibdev_err(ibdev,
241 "failed to alloc SRQ buf mtr, ret = %d.\n", ret);
242
243 return ret;
244 }
245
free_srq_wqe_buf(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)246 static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
247 struct hns_roce_srq *srq)
248 {
249 hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
250 }
251
alloc_srq_wrid(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)252 static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
253 {
254 srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
255 if (!srq->wrid)
256 return -ENOMEM;
257
258 return 0;
259 }
260
free_srq_wrid(struct hns_roce_srq * srq)261 static void free_srq_wrid(struct hns_roce_srq *srq)
262 {
263 kvfree(srq->wrid);
264 srq->wrid = NULL;
265 }
266
proc_srq_sge(struct hns_roce_dev * dev,struct hns_roce_srq * hr_srq,bool user)267 static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq,
268 bool user)
269 {
270 u32 max_sge = dev->caps.max_srq_sges;
271
272 if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
273 return max_sge;
274
275 /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
276 * calculate number of max_sge with reserved SGEs when allocating wqe
277 * buf, so there is no need to do this again in kernel. But the number
278 * may exceed the capacity of SGEs recorded in the firmware, so the
279 * kernel driver should just adapt the value accordingly.
280 */
281 if (user)
282 max_sge = roundup_pow_of_two(max_sge + 1);
283 else
284 hr_srq->rsv_sge = 1;
285
286 return max_sge;
287 }
288
set_srq_basic_param(struct hns_roce_srq * srq,struct ib_srq_init_attr * init_attr,struct ib_udata * udata)289 static int set_srq_basic_param(struct hns_roce_srq *srq,
290 struct ib_srq_init_attr *init_attr,
291 struct ib_udata *udata)
292 {
293 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
294 struct ib_srq_attr *attr = &init_attr->attr;
295 u32 max_sge;
296
297 max_sge = proc_srq_sge(hr_dev, srq, !!udata);
298 if (attr->max_wr > hr_dev->caps.max_srq_wrs ||
299 attr->max_sge > max_sge || !attr->max_sge) {
300 ibdev_err(&hr_dev->ib_dev,
301 "invalid SRQ attr, depth = %u, sge = %u.\n",
302 attr->max_wr, attr->max_sge);
303 return -EINVAL;
304 }
305
306 attr->max_wr = max_t(u32, attr->max_wr, HNS_ROCE_MIN_SRQ_WQE_NUM);
307 srq->wqe_cnt = roundup_pow_of_two(attr->max_wr);
308 srq->max_gs = roundup_pow_of_two(attr->max_sge + srq->rsv_sge);
309
310 attr->max_wr = srq->wqe_cnt;
311 attr->max_sge = srq->max_gs - srq->rsv_sge;
312 attr->srq_limit = 0;
313
314 return 0;
315 }
316
set_srq_ext_param(struct hns_roce_srq * srq,struct ib_srq_init_attr * init_attr)317 static void set_srq_ext_param(struct hns_roce_srq *srq,
318 struct ib_srq_init_attr *init_attr)
319 {
320 srq->cqn = ib_srq_has_cq(init_attr->srq_type) ?
321 to_hr_cq(init_attr->ext.cq)->cqn : 0;
322
323 srq->xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
324 to_hr_xrcd(init_attr->ext.xrc.xrcd)->xrcdn : 0;
325 }
326
set_srq_param(struct hns_roce_srq * srq,struct ib_srq_init_attr * init_attr,struct ib_udata * udata)327 static int set_srq_param(struct hns_roce_srq *srq,
328 struct ib_srq_init_attr *init_attr,
329 struct ib_udata *udata)
330 {
331 int ret;
332
333 ret = set_srq_basic_param(srq, init_attr, udata);
334 if (ret)
335 return ret;
336
337 set_srq_ext_param(srq, init_attr);
338
339 return 0;
340 }
341
alloc_srq_buf(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq,struct ib_udata * udata)342 static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
343 struct ib_udata *udata)
344 {
345 struct hns_roce_ib_create_srq ucmd = {};
346 int ret;
347
348 if (udata) {
349 ret = ib_copy_from_udata(&ucmd, udata,
350 min(udata->inlen, sizeof(ucmd)));
351 if (ret) {
352 ibdev_err(&hr_dev->ib_dev,
353 "failed to copy SRQ udata, ret = %d.\n",
354 ret);
355 return ret;
356 }
357 }
358
359 ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
360 if (ret)
361 return ret;
362
363 ret = alloc_srq_wqe_buf(hr_dev, srq, udata, ucmd.buf_addr);
364 if (ret)
365 goto err_idx;
366
367 if (!udata) {
368 ret = alloc_srq_wrid(hr_dev, srq);
369 if (ret)
370 goto err_wqe_buf;
371 }
372
373 return 0;
374
375 err_wqe_buf:
376 free_srq_wqe_buf(hr_dev, srq);
377 err_idx:
378 free_srq_idx(hr_dev, srq);
379
380 return ret;
381 }
382
free_srq_buf(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)383 static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
384 {
385 free_srq_wrid(srq);
386 free_srq_wqe_buf(hr_dev, srq);
387 free_srq_idx(hr_dev, srq);
388 }
389
hns_roce_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * init_attr,struct ib_udata * udata)390 int hns_roce_create_srq(struct ib_srq *ib_srq,
391 struct ib_srq_init_attr *init_attr,
392 struct ib_udata *udata)
393 {
394 struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
395 struct hns_roce_ib_create_srq_resp resp = {};
396 struct hns_roce_srq *srq = to_hr_srq(ib_srq);
397 int ret;
398
399 mutex_init(&srq->mutex);
400 spin_lock_init(&srq->lock);
401
402 ret = set_srq_param(srq, init_attr, udata);
403 if (ret)
404 return ret;
405
406 ret = alloc_srq_buf(hr_dev, srq, udata);
407 if (ret)
408 return ret;
409
410 ret = alloc_srqn(hr_dev, srq);
411 if (ret)
412 goto err_srq_buf;
413
414 ret = alloc_srqc(hr_dev, srq);
415 if (ret)
416 goto err_srqn;
417
418 if (udata) {
419 resp.srqn = srq->srqn;
420 if (ib_copy_to_udata(udata, &resp,
421 min(udata->outlen, sizeof(resp)))) {
422 ret = -EFAULT;
423 goto err_srqc;
424 }
425 }
426
427 srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
428 srq->event = hns_roce_ib_srq_event;
429 refcount_set(&srq->refcount, 1);
430 init_completion(&srq->free);
431
432 return 0;
433
434 err_srqc:
435 free_srqc(hr_dev, srq);
436 err_srqn:
437 free_srqn(hr_dev, srq);
438 err_srq_buf:
439 free_srq_buf(hr_dev, srq);
440
441 return ret;
442 }
443
hns_roce_destroy_srq(struct ib_srq * ibsrq,struct ib_udata * udata)444 int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
445 {
446 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
447 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
448
449 free_srqc(hr_dev, srq);
450 free_srqn(hr_dev, srq);
451 free_srq_buf(hr_dev, srq);
452 return 0;
453 }
454
hns_roce_init_srq_table(struct hns_roce_dev * hr_dev)455 void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
456 {
457 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
458 struct hns_roce_ida *srq_ida = &srq_table->srq_ida;
459
460 xa_init(&srq_table->xa);
461
462 ida_init(&srq_ida->ida);
463 srq_ida->max = hr_dev->caps.num_srqs - 1;
464 srq_ida->min = hr_dev->caps.reserved_srqs;
465 }
466