1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #include <linux/vmalloc.h>
7 
8 #include <rdma/ib_addr.h>
9 #include <rdma/ib_umem.h>
10 #include <rdma/ib_user_verbs.h>
11 #include <rdma/ib_verbs.h>
12 #include <rdma/uverbs_ioctl.h>
13 
14 #include "efa.h"
15 
16 enum {
17 	EFA_MMAP_DMA_PAGE = 0,
18 	EFA_MMAP_IO_WC,
19 	EFA_MMAP_IO_NC,
20 };
21 
22 #define EFA_AENQ_ENABLED_GROUPS \
23 	(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
24 	 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
25 
26 struct efa_user_mmap_entry {
27 	struct rdma_user_mmap_entry rdma_entry;
28 	u64 address;
29 	u8 mmap_flag;
30 };
31 
32 #define EFA_DEFINE_STATS(op) \
33 	op(EFA_TX_BYTES, "tx_bytes") \
34 	op(EFA_TX_PKTS, "tx_pkts") \
35 	op(EFA_RX_BYTES, "rx_bytes") \
36 	op(EFA_RX_PKTS, "rx_pkts") \
37 	op(EFA_RX_DROPS, "rx_drops") \
38 	op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
39 	op(EFA_COMPLETED_CMDS, "completed_cmds") \
40 	op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
41 	op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
42 	op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
43 	op(EFA_CREATE_QP_ERR, "create_qp_err") \
44 	op(EFA_REG_MR_ERR, "reg_mr_err") \
45 	op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
46 	op(EFA_CREATE_AH_ERR, "create_ah_err")
47 
48 #define EFA_STATS_ENUM(ename, name) ename,
49 #define EFA_STATS_STR(ename, name) [ename] = name,
50 
51 enum efa_hw_stats {
52 	EFA_DEFINE_STATS(EFA_STATS_ENUM)
53 };
54 
55 static const char *const efa_stats_names[] = {
56 	EFA_DEFINE_STATS(EFA_STATS_STR)
57 };
58 
59 #define EFA_CHUNK_PAYLOAD_SHIFT       12
60 #define EFA_CHUNK_PAYLOAD_SIZE        BIT(EFA_CHUNK_PAYLOAD_SHIFT)
61 #define EFA_CHUNK_PAYLOAD_PTR_SIZE    8
62 
63 #define EFA_CHUNK_SHIFT               12
64 #define EFA_CHUNK_SIZE                BIT(EFA_CHUNK_SHIFT)
65 #define EFA_CHUNK_PTR_SIZE            sizeof(struct efa_com_ctrl_buff_info)
66 
67 #define EFA_PTRS_PER_CHUNK \
68 	((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
69 
70 #define EFA_CHUNK_USED_SIZE \
71 	((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
72 
73 struct pbl_chunk {
74 	dma_addr_t dma_addr;
75 	u64 *buf;
76 	u32 length;
77 };
78 
79 struct pbl_chunk_list {
80 	struct pbl_chunk *chunks;
81 	unsigned int size;
82 };
83 
84 struct pbl_context {
85 	union {
86 		struct {
87 			dma_addr_t dma_addr;
88 		} continuous;
89 		struct {
90 			u32 pbl_buf_size_in_pages;
91 			struct scatterlist *sgl;
92 			int sg_dma_cnt;
93 			struct pbl_chunk_list chunk_list;
94 		} indirect;
95 	} phys;
96 	u64 *pbl_buf;
97 	u32 pbl_buf_size_in_bytes;
98 	u8 physically_continuous;
99 };
100 
101 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
102 {
103 	return container_of(ibdev, struct efa_dev, ibdev);
104 }
105 
106 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
107 {
108 	return container_of(ibucontext, struct efa_ucontext, ibucontext);
109 }
110 
111 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
112 {
113 	return container_of(ibpd, struct efa_pd, ibpd);
114 }
115 
116 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
117 {
118 	return container_of(ibmr, struct efa_mr, ibmr);
119 }
120 
121 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
122 {
123 	return container_of(ibqp, struct efa_qp, ibqp);
124 }
125 
126 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
127 {
128 	return container_of(ibcq, struct efa_cq, ibcq);
129 }
130 
131 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
132 {
133 	return container_of(ibah, struct efa_ah, ibah);
134 }
135 
136 static inline struct efa_user_mmap_entry *
137 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
138 {
139 	return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
140 }
141 
142 static inline bool is_rdma_read_cap(struct efa_dev *dev)
143 {
144 	return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
145 }
146 
147 #define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
148 				 sizeof_field(typeof(x), fld) <= (sz))
149 
150 #define is_reserved_cleared(reserved) \
151 	!memchr_inv(reserved, 0, sizeof(reserved))
152 
153 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
154 			       size_t size, enum dma_data_direction dir)
155 {
156 	void *addr;
157 
158 	addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
159 	if (!addr)
160 		return NULL;
161 
162 	*dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
163 	if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
164 		ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
165 		free_pages_exact(addr, size);
166 		return NULL;
167 	}
168 
169 	return addr;
170 }
171 
172 int efa_query_device(struct ib_device *ibdev,
173 		     struct ib_device_attr *props,
174 		     struct ib_udata *udata)
175 {
176 	struct efa_com_get_device_attr_result *dev_attr;
177 	struct efa_ibv_ex_query_device_resp resp = {};
178 	struct efa_dev *dev = to_edev(ibdev);
179 	int err;
180 
181 	if (udata && udata->inlen &&
182 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
183 		ibdev_dbg(ibdev,
184 			  "Incompatible ABI params, udata not cleared\n");
185 		return -EINVAL;
186 	}
187 
188 	dev_attr = &dev->dev_attr;
189 
190 	memset(props, 0, sizeof(*props));
191 	props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
192 	props->page_size_cap = dev_attr->page_size_cap;
193 	props->vendor_id = dev->pdev->vendor;
194 	props->vendor_part_id = dev->pdev->device;
195 	props->hw_ver = dev->pdev->subsystem_device;
196 	props->max_qp = dev_attr->max_qp;
197 	props->max_cq = dev_attr->max_cq;
198 	props->max_pd = dev_attr->max_pd;
199 	props->max_mr = dev_attr->max_mr;
200 	props->max_ah = dev_attr->max_ah;
201 	props->max_cqe = dev_attr->max_cq_depth;
202 	props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
203 				 dev_attr->max_rq_depth);
204 	props->max_send_sge = dev_attr->max_sq_sge;
205 	props->max_recv_sge = dev_attr->max_rq_sge;
206 	props->max_sge_rd = dev_attr->max_wr_rdma_sge;
207 
208 	if (udata && udata->outlen) {
209 		resp.max_sq_sge = dev_attr->max_sq_sge;
210 		resp.max_rq_sge = dev_attr->max_rq_sge;
211 		resp.max_sq_wr = dev_attr->max_sq_depth;
212 		resp.max_rq_wr = dev_attr->max_rq_depth;
213 		resp.max_rdma_size = dev_attr->max_rdma_size;
214 
215 		if (is_rdma_read_cap(dev))
216 			resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
217 
218 		err = ib_copy_to_udata(udata, &resp,
219 				       min(sizeof(resp), udata->outlen));
220 		if (err) {
221 			ibdev_dbg(ibdev,
222 				  "Failed to copy udata for query_device\n");
223 			return err;
224 		}
225 	}
226 
227 	return 0;
228 }
229 
230 int efa_query_port(struct ib_device *ibdev, u8 port,
231 		   struct ib_port_attr *props)
232 {
233 	struct efa_dev *dev = to_edev(ibdev);
234 
235 	props->lmc = 1;
236 
237 	props->state = IB_PORT_ACTIVE;
238 	props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
239 	props->gid_tbl_len = 1;
240 	props->pkey_tbl_len = 1;
241 	props->active_speed = IB_SPEED_EDR;
242 	props->active_width = IB_WIDTH_4X;
243 	props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
244 	props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
245 	props->max_msg_sz = dev->dev_attr.mtu;
246 	props->max_vl_num = 1;
247 
248 	return 0;
249 }
250 
251 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
252 		 int qp_attr_mask,
253 		 struct ib_qp_init_attr *qp_init_attr)
254 {
255 	struct efa_dev *dev = to_edev(ibqp->device);
256 	struct efa_com_query_qp_params params = {};
257 	struct efa_com_query_qp_result result;
258 	struct efa_qp *qp = to_eqp(ibqp);
259 	int err;
260 
261 #define EFA_QUERY_QP_SUPP_MASK \
262 	(IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
263 	 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP)
264 
265 	if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
266 		ibdev_dbg(&dev->ibdev,
267 			  "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
268 			  qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
269 		return -EOPNOTSUPP;
270 	}
271 
272 	memset(qp_attr, 0, sizeof(*qp_attr));
273 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
274 
275 	params.qp_handle = qp->qp_handle;
276 	err = efa_com_query_qp(&dev->edev, &params, &result);
277 	if (err)
278 		return err;
279 
280 	qp_attr->qp_state = result.qp_state;
281 	qp_attr->qkey = result.qkey;
282 	qp_attr->sq_psn = result.sq_psn;
283 	qp_attr->sq_draining = result.sq_draining;
284 	qp_attr->port_num = 1;
285 
286 	qp_attr->cap.max_send_wr = qp->max_send_wr;
287 	qp_attr->cap.max_recv_wr = qp->max_recv_wr;
288 	qp_attr->cap.max_send_sge = qp->max_send_sge;
289 	qp_attr->cap.max_recv_sge = qp->max_recv_sge;
290 	qp_attr->cap.max_inline_data = qp->max_inline_data;
291 
292 	qp_init_attr->qp_type = ibqp->qp_type;
293 	qp_init_attr->recv_cq = ibqp->recv_cq;
294 	qp_init_attr->send_cq = ibqp->send_cq;
295 	qp_init_attr->qp_context = ibqp->qp_context;
296 	qp_init_attr->cap = qp_attr->cap;
297 
298 	return 0;
299 }
300 
301 int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
302 		  union ib_gid *gid)
303 {
304 	struct efa_dev *dev = to_edev(ibdev);
305 
306 	memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
307 
308 	return 0;
309 }
310 
311 int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
312 		   u16 *pkey)
313 {
314 	if (index > 0)
315 		return -EINVAL;
316 
317 	*pkey = 0xffff;
318 	return 0;
319 }
320 
321 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
322 {
323 	struct efa_com_dealloc_pd_params params = {
324 		.pdn = pdn,
325 	};
326 
327 	return efa_com_dealloc_pd(&dev->edev, &params);
328 }
329 
330 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
331 {
332 	struct efa_dev *dev = to_edev(ibpd->device);
333 	struct efa_ibv_alloc_pd_resp resp = {};
334 	struct efa_com_alloc_pd_result result;
335 	struct efa_pd *pd = to_epd(ibpd);
336 	int err;
337 
338 	if (udata->inlen &&
339 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
340 		ibdev_dbg(&dev->ibdev,
341 			  "Incompatible ABI params, udata not cleared\n");
342 		err = -EINVAL;
343 		goto err_out;
344 	}
345 
346 	err = efa_com_alloc_pd(&dev->edev, &result);
347 	if (err)
348 		goto err_out;
349 
350 	pd->pdn = result.pdn;
351 	resp.pdn = result.pdn;
352 
353 	if (udata->outlen) {
354 		err = ib_copy_to_udata(udata, &resp,
355 				       min(sizeof(resp), udata->outlen));
356 		if (err) {
357 			ibdev_dbg(&dev->ibdev,
358 				  "Failed to copy udata for alloc_pd\n");
359 			goto err_dealloc_pd;
360 		}
361 	}
362 
363 	ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
364 
365 	return 0;
366 
367 err_dealloc_pd:
368 	efa_pd_dealloc(dev, result.pdn);
369 err_out:
370 	atomic64_inc(&dev->stats.sw_stats.alloc_pd_err);
371 	return err;
372 }
373 
374 void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
375 {
376 	struct efa_dev *dev = to_edev(ibpd->device);
377 	struct efa_pd *pd = to_epd(ibpd);
378 
379 	ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
380 	efa_pd_dealloc(dev, pd->pdn);
381 }
382 
383 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
384 {
385 	struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
386 
387 	return efa_com_destroy_qp(&dev->edev, &params);
388 }
389 
390 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
391 {
392 	rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
393 	rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
394 	rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
395 	rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
396 }
397 
398 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
399 {
400 	struct efa_dev *dev = to_edev(ibqp->pd->device);
401 	struct efa_qp *qp = to_eqp(ibqp);
402 	int err;
403 
404 	ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
405 	err = efa_destroy_qp_handle(dev, qp->qp_handle);
406 	if (err)
407 		return err;
408 
409 	if (qp->rq_cpu_addr) {
410 		ibdev_dbg(&dev->ibdev,
411 			  "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
412 			  qp->rq_cpu_addr, qp->rq_size,
413 			  &qp->rq_dma_addr);
414 		dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
415 				 DMA_TO_DEVICE);
416 	}
417 
418 	efa_qp_user_mmap_entries_remove(qp);
419 	kfree(qp);
420 	return 0;
421 }
422 
423 static struct rdma_user_mmap_entry*
424 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
425 			   u64 address, size_t length,
426 			   u8 mmap_flag, u64 *offset)
427 {
428 	struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
429 	int err;
430 
431 	if (!entry)
432 		return NULL;
433 
434 	entry->address = address;
435 	entry->mmap_flag = mmap_flag;
436 
437 	err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
438 					  length);
439 	if (err) {
440 		kfree(entry);
441 		return NULL;
442 	}
443 	*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
444 
445 	return &entry->rdma_entry;
446 }
447 
448 static int qp_mmap_entries_setup(struct efa_qp *qp,
449 				 struct efa_dev *dev,
450 				 struct efa_ucontext *ucontext,
451 				 struct efa_com_create_qp_params *params,
452 				 struct efa_ibv_create_qp_resp *resp)
453 {
454 	size_t length;
455 	u64 address;
456 
457 	address = dev->db_bar_addr + resp->sq_db_offset;
458 	qp->sq_db_mmap_entry =
459 		efa_user_mmap_entry_insert(&ucontext->ibucontext,
460 					   address,
461 					   PAGE_SIZE, EFA_MMAP_IO_NC,
462 					   &resp->sq_db_mmap_key);
463 	if (!qp->sq_db_mmap_entry)
464 		return -ENOMEM;
465 
466 	resp->sq_db_offset &= ~PAGE_MASK;
467 
468 	address = dev->mem_bar_addr + resp->llq_desc_offset;
469 	length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
470 			    (resp->llq_desc_offset & ~PAGE_MASK));
471 
472 	qp->llq_desc_mmap_entry =
473 		efa_user_mmap_entry_insert(&ucontext->ibucontext,
474 					   address, length,
475 					   EFA_MMAP_IO_WC,
476 					   &resp->llq_desc_mmap_key);
477 	if (!qp->llq_desc_mmap_entry)
478 		goto err_remove_mmap;
479 
480 	resp->llq_desc_offset &= ~PAGE_MASK;
481 
482 	if (qp->rq_size) {
483 		address = dev->db_bar_addr + resp->rq_db_offset;
484 
485 		qp->rq_db_mmap_entry =
486 			efa_user_mmap_entry_insert(&ucontext->ibucontext,
487 						   address, PAGE_SIZE,
488 						   EFA_MMAP_IO_NC,
489 						   &resp->rq_db_mmap_key);
490 		if (!qp->rq_db_mmap_entry)
491 			goto err_remove_mmap;
492 
493 		resp->rq_db_offset &= ~PAGE_MASK;
494 
495 		address = virt_to_phys(qp->rq_cpu_addr);
496 		qp->rq_mmap_entry =
497 			efa_user_mmap_entry_insert(&ucontext->ibucontext,
498 						   address, qp->rq_size,
499 						   EFA_MMAP_DMA_PAGE,
500 						   &resp->rq_mmap_key);
501 		if (!qp->rq_mmap_entry)
502 			goto err_remove_mmap;
503 
504 		resp->rq_mmap_size = qp->rq_size;
505 	}
506 
507 	return 0;
508 
509 err_remove_mmap:
510 	efa_qp_user_mmap_entries_remove(qp);
511 
512 	return -ENOMEM;
513 }
514 
515 static int efa_qp_validate_cap(struct efa_dev *dev,
516 			       struct ib_qp_init_attr *init_attr)
517 {
518 	if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
519 		ibdev_dbg(&dev->ibdev,
520 			  "qp: requested send wr[%u] exceeds the max[%u]\n",
521 			  init_attr->cap.max_send_wr,
522 			  dev->dev_attr.max_sq_depth);
523 		return -EINVAL;
524 	}
525 	if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
526 		ibdev_dbg(&dev->ibdev,
527 			  "qp: requested receive wr[%u] exceeds the max[%u]\n",
528 			  init_attr->cap.max_recv_wr,
529 			  dev->dev_attr.max_rq_depth);
530 		return -EINVAL;
531 	}
532 	if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
533 		ibdev_dbg(&dev->ibdev,
534 			  "qp: requested sge send[%u] exceeds the max[%u]\n",
535 			  init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
536 		return -EINVAL;
537 	}
538 	if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
539 		ibdev_dbg(&dev->ibdev,
540 			  "qp: requested sge recv[%u] exceeds the max[%u]\n",
541 			  init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
542 		return -EINVAL;
543 	}
544 	if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
545 		ibdev_dbg(&dev->ibdev,
546 			  "qp: requested inline data[%u] exceeds the max[%u]\n",
547 			  init_attr->cap.max_inline_data,
548 			  dev->dev_attr.inline_buf_size);
549 		return -EINVAL;
550 	}
551 
552 	return 0;
553 }
554 
555 static int efa_qp_validate_attr(struct efa_dev *dev,
556 				struct ib_qp_init_attr *init_attr)
557 {
558 	if (init_attr->qp_type != IB_QPT_DRIVER &&
559 	    init_attr->qp_type != IB_QPT_UD) {
560 		ibdev_dbg(&dev->ibdev,
561 			  "Unsupported qp type %d\n", init_attr->qp_type);
562 		return -EOPNOTSUPP;
563 	}
564 
565 	if (init_attr->srq) {
566 		ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
567 		return -EOPNOTSUPP;
568 	}
569 
570 	if (init_attr->create_flags) {
571 		ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
572 		return -EOPNOTSUPP;
573 	}
574 
575 	return 0;
576 }
577 
578 struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
579 			    struct ib_qp_init_attr *init_attr,
580 			    struct ib_udata *udata)
581 {
582 	struct efa_com_create_qp_params create_qp_params = {};
583 	struct efa_com_create_qp_result create_qp_resp;
584 	struct efa_dev *dev = to_edev(ibpd->device);
585 	struct efa_ibv_create_qp_resp resp = {};
586 	struct efa_ibv_create_qp cmd = {};
587 	struct efa_ucontext *ucontext;
588 	struct efa_qp *qp;
589 	int err;
590 
591 	ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
592 					     ibucontext);
593 
594 	err = efa_qp_validate_cap(dev, init_attr);
595 	if (err)
596 		goto err_out;
597 
598 	err = efa_qp_validate_attr(dev, init_attr);
599 	if (err)
600 		goto err_out;
601 
602 	if (!field_avail(cmd, driver_qp_type, udata->inlen)) {
603 		ibdev_dbg(&dev->ibdev,
604 			  "Incompatible ABI params, no input udata\n");
605 		err = -EINVAL;
606 		goto err_out;
607 	}
608 
609 	if (udata->inlen > sizeof(cmd) &&
610 	    !ib_is_udata_cleared(udata, sizeof(cmd),
611 				 udata->inlen - sizeof(cmd))) {
612 		ibdev_dbg(&dev->ibdev,
613 			  "Incompatible ABI params, unknown fields in udata\n");
614 		err = -EINVAL;
615 		goto err_out;
616 	}
617 
618 	err = ib_copy_from_udata(&cmd, udata,
619 				 min(sizeof(cmd), udata->inlen));
620 	if (err) {
621 		ibdev_dbg(&dev->ibdev,
622 			  "Cannot copy udata for create_qp\n");
623 		goto err_out;
624 	}
625 
626 	if (cmd.comp_mask) {
627 		ibdev_dbg(&dev->ibdev,
628 			  "Incompatible ABI params, unknown fields in udata\n");
629 		err = -EINVAL;
630 		goto err_out;
631 	}
632 
633 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
634 	if (!qp) {
635 		err = -ENOMEM;
636 		goto err_out;
637 	}
638 
639 	create_qp_params.uarn = ucontext->uarn;
640 	create_qp_params.pd = to_epd(ibpd)->pdn;
641 
642 	if (init_attr->qp_type == IB_QPT_UD) {
643 		create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
644 	} else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
645 		create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
646 	} else {
647 		ibdev_dbg(&dev->ibdev,
648 			  "Unsupported qp type %d driver qp type %d\n",
649 			  init_attr->qp_type, cmd.driver_qp_type);
650 		err = -EOPNOTSUPP;
651 		goto err_free_qp;
652 	}
653 
654 	ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
655 		  init_attr->qp_type, cmd.driver_qp_type);
656 	create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
657 	create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
658 	create_qp_params.sq_depth = init_attr->cap.max_send_wr;
659 	create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
660 
661 	create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
662 	create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
663 	qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
664 	if (qp->rq_size) {
665 		qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
666 						    qp->rq_size, DMA_TO_DEVICE);
667 		if (!qp->rq_cpu_addr) {
668 			err = -ENOMEM;
669 			goto err_free_qp;
670 		}
671 
672 		ibdev_dbg(&dev->ibdev,
673 			  "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
674 			  qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
675 		create_qp_params.rq_base_addr = qp->rq_dma_addr;
676 	}
677 
678 	err = efa_com_create_qp(&dev->edev, &create_qp_params,
679 				&create_qp_resp);
680 	if (err)
681 		goto err_free_mapped;
682 
683 	resp.sq_db_offset = create_qp_resp.sq_db_offset;
684 	resp.rq_db_offset = create_qp_resp.rq_db_offset;
685 	resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
686 	resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
687 	resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
688 
689 	err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
690 				    &resp);
691 	if (err)
692 		goto err_destroy_qp;
693 
694 	qp->qp_handle = create_qp_resp.qp_handle;
695 	qp->ibqp.qp_num = create_qp_resp.qp_num;
696 	qp->ibqp.qp_type = init_attr->qp_type;
697 	qp->max_send_wr = init_attr->cap.max_send_wr;
698 	qp->max_recv_wr = init_attr->cap.max_recv_wr;
699 	qp->max_send_sge = init_attr->cap.max_send_sge;
700 	qp->max_recv_sge = init_attr->cap.max_recv_sge;
701 	qp->max_inline_data = init_attr->cap.max_inline_data;
702 
703 	if (udata->outlen) {
704 		err = ib_copy_to_udata(udata, &resp,
705 				       min(sizeof(resp), udata->outlen));
706 		if (err) {
707 			ibdev_dbg(&dev->ibdev,
708 				  "Failed to copy udata for qp[%u]\n",
709 				  create_qp_resp.qp_num);
710 			goto err_remove_mmap_entries;
711 		}
712 	}
713 
714 	ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
715 
716 	return &qp->ibqp;
717 
718 err_remove_mmap_entries:
719 	efa_qp_user_mmap_entries_remove(qp);
720 err_destroy_qp:
721 	efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
722 err_free_mapped:
723 	if (qp->rq_size) {
724 		dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
725 				 DMA_TO_DEVICE);
726 
727 		if (!qp->rq_mmap_entry)
728 			free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
729 	}
730 err_free_qp:
731 	kfree(qp);
732 err_out:
733 	atomic64_inc(&dev->stats.sw_stats.create_qp_err);
734 	return ERR_PTR(err);
735 }
736 
737 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
738 				  struct ib_qp_attr *qp_attr, int qp_attr_mask,
739 				  enum ib_qp_state cur_state,
740 				  enum ib_qp_state new_state)
741 {
742 #define EFA_MODIFY_QP_SUPP_MASK \
743 	(IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
744 	 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN)
745 
746 	if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
747 		ibdev_dbg(&dev->ibdev,
748 			  "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
749 			  qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
750 		return -EOPNOTSUPP;
751 	}
752 
753 	if (!ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
754 				qp_attr_mask)) {
755 		ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
756 		return -EINVAL;
757 	}
758 
759 	if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
760 		ibdev_dbg(&dev->ibdev, "Can't change port num\n");
761 		return -EOPNOTSUPP;
762 	}
763 
764 	if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
765 		ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
766 		return -EOPNOTSUPP;
767 	}
768 
769 	return 0;
770 }
771 
772 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
773 		  int qp_attr_mask, struct ib_udata *udata)
774 {
775 	struct efa_dev *dev = to_edev(ibqp->device);
776 	struct efa_com_modify_qp_params params = {};
777 	struct efa_qp *qp = to_eqp(ibqp);
778 	enum ib_qp_state cur_state;
779 	enum ib_qp_state new_state;
780 	int err;
781 
782 	if (udata->inlen &&
783 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
784 		ibdev_dbg(&dev->ibdev,
785 			  "Incompatible ABI params, udata not cleared\n");
786 		return -EINVAL;
787 	}
788 
789 	cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
790 						     qp->state;
791 	new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
792 
793 	err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
794 				     new_state);
795 	if (err)
796 		return err;
797 
798 	params.qp_handle = qp->qp_handle;
799 
800 	if (qp_attr_mask & IB_QP_STATE) {
801 		params.modify_mask |= BIT(EFA_ADMIN_QP_STATE_BIT) |
802 				      BIT(EFA_ADMIN_CUR_QP_STATE_BIT);
803 		params.cur_qp_state = qp_attr->cur_qp_state;
804 		params.qp_state = qp_attr->qp_state;
805 	}
806 
807 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
808 		params.modify_mask |=
809 			BIT(EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT);
810 		params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
811 	}
812 
813 	if (qp_attr_mask & IB_QP_QKEY) {
814 		params.modify_mask |= BIT(EFA_ADMIN_QKEY_BIT);
815 		params.qkey = qp_attr->qkey;
816 	}
817 
818 	if (qp_attr_mask & IB_QP_SQ_PSN) {
819 		params.modify_mask |= BIT(EFA_ADMIN_SQ_PSN_BIT);
820 		params.sq_psn = qp_attr->sq_psn;
821 	}
822 
823 	err = efa_com_modify_qp(&dev->edev, &params);
824 	if (err)
825 		return err;
826 
827 	qp->state = new_state;
828 
829 	return 0;
830 }
831 
832 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
833 {
834 	struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
835 
836 	return efa_com_destroy_cq(&dev->edev, &params);
837 }
838 
839 void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
840 {
841 	struct efa_dev *dev = to_edev(ibcq->device);
842 	struct efa_cq *cq = to_ecq(ibcq);
843 
844 	ibdev_dbg(&dev->ibdev,
845 		  "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
846 		  cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
847 
848 	efa_destroy_cq_idx(dev, cq->cq_idx);
849 	dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
850 			 DMA_FROM_DEVICE);
851 	rdma_user_mmap_entry_remove(cq->mmap_entry);
852 }
853 
854 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
855 				 struct efa_ibv_create_cq_resp *resp)
856 {
857 	resp->q_mmap_size = cq->size;
858 	cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
859 						    virt_to_phys(cq->cpu_addr),
860 						    cq->size, EFA_MMAP_DMA_PAGE,
861 						    &resp->q_mmap_key);
862 	if (!cq->mmap_entry)
863 		return -ENOMEM;
864 
865 	return 0;
866 }
867 
868 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
869 		  struct ib_udata *udata)
870 {
871 	struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
872 		udata, struct efa_ucontext, ibucontext);
873 	struct efa_ibv_create_cq_resp resp = {};
874 	struct efa_com_create_cq_params params;
875 	struct efa_com_create_cq_result result;
876 	struct ib_device *ibdev = ibcq->device;
877 	struct efa_dev *dev = to_edev(ibdev);
878 	struct efa_ibv_create_cq cmd = {};
879 	struct efa_cq *cq = to_ecq(ibcq);
880 	int entries = attr->cqe;
881 	int err;
882 
883 	ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
884 
885 	if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
886 		ibdev_dbg(ibdev,
887 			  "cq: requested entries[%u] non-positive or greater than max[%u]\n",
888 			  entries, dev->dev_attr.max_cq_depth);
889 		err = -EINVAL;
890 		goto err_out;
891 	}
892 
893 	if (!field_avail(cmd, num_sub_cqs, udata->inlen)) {
894 		ibdev_dbg(ibdev,
895 			  "Incompatible ABI params, no input udata\n");
896 		err = -EINVAL;
897 		goto err_out;
898 	}
899 
900 	if (udata->inlen > sizeof(cmd) &&
901 	    !ib_is_udata_cleared(udata, sizeof(cmd),
902 				 udata->inlen - sizeof(cmd))) {
903 		ibdev_dbg(ibdev,
904 			  "Incompatible ABI params, unknown fields in udata\n");
905 		err = -EINVAL;
906 		goto err_out;
907 	}
908 
909 	err = ib_copy_from_udata(&cmd, udata,
910 				 min(sizeof(cmd), udata->inlen));
911 	if (err) {
912 		ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
913 		goto err_out;
914 	}
915 
916 	if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
917 		ibdev_dbg(ibdev,
918 			  "Incompatible ABI params, unknown fields in udata\n");
919 		err = -EINVAL;
920 		goto err_out;
921 	}
922 
923 	if (!cmd.cq_entry_size) {
924 		ibdev_dbg(ibdev,
925 			  "Invalid entry size [%u]\n", cmd.cq_entry_size);
926 		err = -EINVAL;
927 		goto err_out;
928 	}
929 
930 	if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
931 		ibdev_dbg(ibdev,
932 			  "Invalid number of sub cqs[%u] expected[%u]\n",
933 			  cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
934 		err = -EINVAL;
935 		goto err_out;
936 	}
937 
938 	cq->ucontext = ucontext;
939 	cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
940 	cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
941 					 DMA_FROM_DEVICE);
942 	if (!cq->cpu_addr) {
943 		err = -ENOMEM;
944 		goto err_out;
945 	}
946 
947 	params.uarn = cq->ucontext->uarn;
948 	params.cq_depth = entries;
949 	params.dma_addr = cq->dma_addr;
950 	params.entry_size_in_bytes = cmd.cq_entry_size;
951 	params.num_sub_cqs = cmd.num_sub_cqs;
952 	err = efa_com_create_cq(&dev->edev, &params, &result);
953 	if (err)
954 		goto err_free_mapped;
955 
956 	resp.cq_idx = result.cq_idx;
957 	cq->cq_idx = result.cq_idx;
958 	cq->ibcq.cqe = result.actual_depth;
959 	WARN_ON_ONCE(entries != result.actual_depth);
960 
961 	err = cq_mmap_entries_setup(dev, cq, &resp);
962 	if (err) {
963 		ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
964 			  cq->cq_idx);
965 		goto err_destroy_cq;
966 	}
967 
968 	if (udata->outlen) {
969 		err = ib_copy_to_udata(udata, &resp,
970 				       min(sizeof(resp), udata->outlen));
971 		if (err) {
972 			ibdev_dbg(ibdev,
973 				  "Failed to copy udata for create_cq\n");
974 			goto err_remove_mmap;
975 		}
976 	}
977 
978 	ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
979 		  cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
980 
981 	return 0;
982 
983 err_remove_mmap:
984 	rdma_user_mmap_entry_remove(cq->mmap_entry);
985 err_destroy_cq:
986 	efa_destroy_cq_idx(dev, cq->cq_idx);
987 err_free_mapped:
988 	dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
989 			 DMA_FROM_DEVICE);
990 	if (!cq->mmap_entry)
991 		free_pages_exact(cq->cpu_addr, cq->size);
992 
993 err_out:
994 	atomic64_inc(&dev->stats.sw_stats.create_cq_err);
995 	return err;
996 }
997 
998 static int umem_to_page_list(struct efa_dev *dev,
999 			     struct ib_umem *umem,
1000 			     u64 *page_list,
1001 			     u32 hp_cnt,
1002 			     u8 hp_shift)
1003 {
1004 	u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1005 	struct ib_block_iter biter;
1006 	unsigned int hp_idx = 0;
1007 
1008 	ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1009 		  hp_cnt, pages_in_hp);
1010 
1011 	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
1012 			    BIT(hp_shift))
1013 		page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1014 
1015 	return 0;
1016 }
1017 
1018 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1019 {
1020 	struct scatterlist *sglist;
1021 	struct page *pg;
1022 	int i;
1023 
1024 	sglist = kcalloc(page_cnt, sizeof(*sglist), GFP_KERNEL);
1025 	if (!sglist)
1026 		return NULL;
1027 	sg_init_table(sglist, page_cnt);
1028 	for (i = 0; i < page_cnt; i++) {
1029 		pg = vmalloc_to_page(buf);
1030 		if (!pg)
1031 			goto err;
1032 		sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1033 		buf += PAGE_SIZE / sizeof(*buf);
1034 	}
1035 	return sglist;
1036 
1037 err:
1038 	kfree(sglist);
1039 	return NULL;
1040 }
1041 
1042 /*
1043  * create a chunk list of physical pages dma addresses from the supplied
1044  * scatter gather list
1045  */
1046 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1047 {
1048 	struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1049 	int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1050 	struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1051 	unsigned int chunk_list_size, chunk_idx, payload_idx;
1052 	int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1053 	struct efa_com_ctrl_buff_info *ctrl_buf;
1054 	u64 *cur_chunk_buf, *prev_chunk_buf;
1055 	struct ib_block_iter biter;
1056 	dma_addr_t dma_addr;
1057 	int i;
1058 
1059 	/* allocate a chunk list that consists of 4KB chunks */
1060 	chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1061 
1062 	chunk_list->size = chunk_list_size;
1063 	chunk_list->chunks = kcalloc(chunk_list_size,
1064 				     sizeof(*chunk_list->chunks),
1065 				     GFP_KERNEL);
1066 	if (!chunk_list->chunks)
1067 		return -ENOMEM;
1068 
1069 	ibdev_dbg(&dev->ibdev,
1070 		  "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1071 		  page_cnt);
1072 
1073 	/* allocate chunk buffers: */
1074 	for (i = 0; i < chunk_list_size; i++) {
1075 		chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1076 		if (!chunk_list->chunks[i].buf)
1077 			goto chunk_list_dealloc;
1078 
1079 		chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1080 	}
1081 	chunk_list->chunks[chunk_list_size - 1].length =
1082 		((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1083 			EFA_CHUNK_PTR_SIZE;
1084 
1085 	/* fill the dma addresses of sg list pages to chunks: */
1086 	chunk_idx = 0;
1087 	payload_idx = 0;
1088 	cur_chunk_buf = chunk_list->chunks[0].buf;
1089 	rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1090 			    EFA_CHUNK_PAYLOAD_SIZE) {
1091 		cur_chunk_buf[payload_idx++] =
1092 			rdma_block_iter_dma_address(&biter);
1093 
1094 		if (payload_idx == EFA_PTRS_PER_CHUNK) {
1095 			chunk_idx++;
1096 			cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1097 			payload_idx = 0;
1098 		}
1099 	}
1100 
1101 	/* map chunks to dma and fill chunks next ptrs */
1102 	for (i = chunk_list_size - 1; i >= 0; i--) {
1103 		dma_addr = dma_map_single(&dev->pdev->dev,
1104 					  chunk_list->chunks[i].buf,
1105 					  chunk_list->chunks[i].length,
1106 					  DMA_TO_DEVICE);
1107 		if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1108 			ibdev_err(&dev->ibdev,
1109 				  "chunk[%u] dma_map_failed\n", i);
1110 			goto chunk_list_unmap;
1111 		}
1112 
1113 		chunk_list->chunks[i].dma_addr = dma_addr;
1114 		ibdev_dbg(&dev->ibdev,
1115 			  "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1116 
1117 		if (!i)
1118 			break;
1119 
1120 		prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1121 
1122 		ctrl_buf = (struct efa_com_ctrl_buff_info *)
1123 				&prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1124 		ctrl_buf->length = chunk_list->chunks[i].length;
1125 
1126 		efa_com_set_dma_addr(dma_addr,
1127 				     &ctrl_buf->address.mem_addr_high,
1128 				     &ctrl_buf->address.mem_addr_low);
1129 	}
1130 
1131 	return 0;
1132 
1133 chunk_list_unmap:
1134 	for (; i < chunk_list_size; i++) {
1135 		dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1136 				 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1137 	}
1138 chunk_list_dealloc:
1139 	for (i = 0; i < chunk_list_size; i++)
1140 		kfree(chunk_list->chunks[i].buf);
1141 
1142 	kfree(chunk_list->chunks);
1143 	return -ENOMEM;
1144 }
1145 
1146 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1147 {
1148 	struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1149 	int i;
1150 
1151 	for (i = 0; i < chunk_list->size; i++) {
1152 		dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1153 				 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1154 		kfree(chunk_list->chunks[i].buf);
1155 	}
1156 
1157 	kfree(chunk_list->chunks);
1158 }
1159 
1160 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
1161 static int pbl_continuous_initialize(struct efa_dev *dev,
1162 				     struct pbl_context *pbl)
1163 {
1164 	dma_addr_t dma_addr;
1165 
1166 	dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1167 				  pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1168 	if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1169 		ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1170 		return -ENOMEM;
1171 	}
1172 
1173 	pbl->phys.continuous.dma_addr = dma_addr;
1174 	ibdev_dbg(&dev->ibdev,
1175 		  "pbl continuous - dma_addr = %pad, size[%u]\n",
1176 		  &dma_addr, pbl->pbl_buf_size_in_bytes);
1177 
1178 	return 0;
1179 }
1180 
1181 /*
1182  * initialize pbl indirect mode:
1183  * create a chunk list out of the dma addresses of the physical pages of
1184  * pbl buffer.
1185  */
1186 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1187 {
1188 	u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
1189 	struct scatterlist *sgl;
1190 	int sg_dma_cnt, err;
1191 
1192 	BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1193 	sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1194 	if (!sgl)
1195 		return -ENOMEM;
1196 
1197 	sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1198 	if (!sg_dma_cnt) {
1199 		err = -EINVAL;
1200 		goto err_map;
1201 	}
1202 
1203 	pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1204 	pbl->phys.indirect.sgl = sgl;
1205 	pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1206 	err = pbl_chunk_list_create(dev, pbl);
1207 	if (err) {
1208 		ibdev_dbg(&dev->ibdev,
1209 			  "chunk_list creation failed[%d]\n", err);
1210 		goto err_chunk;
1211 	}
1212 
1213 	ibdev_dbg(&dev->ibdev,
1214 		  "pbl indirect - size[%u], chunks[%u]\n",
1215 		  pbl->pbl_buf_size_in_bytes,
1216 		  pbl->phys.indirect.chunk_list.size);
1217 
1218 	return 0;
1219 
1220 err_chunk:
1221 	dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1222 err_map:
1223 	kfree(sgl);
1224 	return err;
1225 }
1226 
1227 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1228 {
1229 	pbl_chunk_list_destroy(dev, pbl);
1230 	dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1231 		     pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1232 	kfree(pbl->phys.indirect.sgl);
1233 }
1234 
1235 /* create a page buffer list from a mapped user memory region */
1236 static int pbl_create(struct efa_dev *dev,
1237 		      struct pbl_context *pbl,
1238 		      struct ib_umem *umem,
1239 		      int hp_cnt,
1240 		      u8 hp_shift)
1241 {
1242 	int err;
1243 
1244 	pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1245 	pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1246 	if (!pbl->pbl_buf)
1247 		return -ENOMEM;
1248 
1249 	if (is_vmalloc_addr(pbl->pbl_buf)) {
1250 		pbl->physically_continuous = 0;
1251 		err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1252 					hp_shift);
1253 		if (err)
1254 			goto err_free;
1255 
1256 		err = pbl_indirect_initialize(dev, pbl);
1257 		if (err)
1258 			goto err_free;
1259 	} else {
1260 		pbl->physically_continuous = 1;
1261 		err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1262 					hp_shift);
1263 		if (err)
1264 			goto err_free;
1265 
1266 		err = pbl_continuous_initialize(dev, pbl);
1267 		if (err)
1268 			goto err_free;
1269 	}
1270 
1271 	ibdev_dbg(&dev->ibdev,
1272 		  "user_pbl_created: user_pages[%u], continuous[%u]\n",
1273 		  hp_cnt, pbl->physically_continuous);
1274 
1275 	return 0;
1276 
1277 err_free:
1278 	kvfree(pbl->pbl_buf);
1279 	return err;
1280 }
1281 
1282 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1283 {
1284 	if (pbl->physically_continuous)
1285 		dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1286 				 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1287 	else
1288 		pbl_indirect_terminate(dev, pbl);
1289 
1290 	kvfree(pbl->pbl_buf);
1291 }
1292 
1293 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1294 				 struct efa_com_reg_mr_params *params)
1295 {
1296 	int err;
1297 
1298 	params->inline_pbl = 1;
1299 	err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1300 				params->page_num, params->page_shift);
1301 	if (err)
1302 		return err;
1303 
1304 	ibdev_dbg(&dev->ibdev,
1305 		  "inline_pbl_array - pages[%u]\n", params->page_num);
1306 
1307 	return 0;
1308 }
1309 
1310 static int efa_create_pbl(struct efa_dev *dev,
1311 			  struct pbl_context *pbl,
1312 			  struct efa_mr *mr,
1313 			  struct efa_com_reg_mr_params *params)
1314 {
1315 	int err;
1316 
1317 	err = pbl_create(dev, pbl, mr->umem, params->page_num,
1318 			 params->page_shift);
1319 	if (err) {
1320 		ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1321 		return err;
1322 	}
1323 
1324 	params->inline_pbl = 0;
1325 	params->indirect = !pbl->physically_continuous;
1326 	if (pbl->physically_continuous) {
1327 		params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1328 
1329 		efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1330 				     &params->pbl.pbl.address.mem_addr_high,
1331 				     &params->pbl.pbl.address.mem_addr_low);
1332 	} else {
1333 		params->pbl.pbl.length =
1334 			pbl->phys.indirect.chunk_list.chunks[0].length;
1335 
1336 		efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1337 				     &params->pbl.pbl.address.mem_addr_high,
1338 				     &params->pbl.pbl.address.mem_addr_low);
1339 	}
1340 
1341 	return 0;
1342 }
1343 
1344 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1345 			 u64 virt_addr, int access_flags,
1346 			 struct ib_udata *udata)
1347 {
1348 	struct efa_dev *dev = to_edev(ibpd->device);
1349 	struct efa_com_reg_mr_params params = {};
1350 	struct efa_com_reg_mr_result result = {};
1351 	struct pbl_context pbl;
1352 	int supp_access_flags;
1353 	unsigned int pg_sz;
1354 	struct efa_mr *mr;
1355 	int inline_size;
1356 	int err;
1357 
1358 	if (udata && udata->inlen &&
1359 	    !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1360 		ibdev_dbg(&dev->ibdev,
1361 			  "Incompatible ABI params, udata not cleared\n");
1362 		err = -EINVAL;
1363 		goto err_out;
1364 	}
1365 
1366 	supp_access_flags =
1367 		IB_ACCESS_LOCAL_WRITE |
1368 		(is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
1369 
1370 	access_flags &= ~IB_ACCESS_OPTIONAL;
1371 	if (access_flags & ~supp_access_flags) {
1372 		ibdev_dbg(&dev->ibdev,
1373 			  "Unsupported access flags[%#x], supported[%#x]\n",
1374 			  access_flags, supp_access_flags);
1375 		err = -EOPNOTSUPP;
1376 		goto err_out;
1377 	}
1378 
1379 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1380 	if (!mr) {
1381 		err = -ENOMEM;
1382 		goto err_out;
1383 	}
1384 
1385 	mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1386 	if (IS_ERR(mr->umem)) {
1387 		err = PTR_ERR(mr->umem);
1388 		ibdev_dbg(&dev->ibdev,
1389 			  "Failed to pin and map user space memory[%d]\n", err);
1390 		goto err_free;
1391 	}
1392 
1393 	params.pd = to_epd(ibpd)->pdn;
1394 	params.iova = virt_addr;
1395 	params.mr_length_in_bytes = length;
1396 	params.permissions = access_flags;
1397 
1398 	pg_sz = ib_umem_find_best_pgsz(mr->umem,
1399 				       dev->dev_attr.page_size_cap,
1400 				       virt_addr);
1401 	if (!pg_sz) {
1402 		err = -EOPNOTSUPP;
1403 		ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1404 			  dev->dev_attr.page_size_cap);
1405 		goto err_unmap;
1406 	}
1407 
1408 	params.page_shift = __ffs(pg_sz);
1409 	params.page_num = DIV_ROUND_UP(length + (start & (pg_sz - 1)),
1410 				       pg_sz);
1411 
1412 	ibdev_dbg(&dev->ibdev,
1413 		  "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1414 		  start, length, params.page_shift, params.page_num);
1415 
1416 	inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1417 	if (params.page_num <= inline_size) {
1418 		err = efa_create_inline_pbl(dev, mr, &params);
1419 		if (err)
1420 			goto err_unmap;
1421 
1422 		err = efa_com_register_mr(&dev->edev, &params, &result);
1423 		if (err)
1424 			goto err_unmap;
1425 	} else {
1426 		err = efa_create_pbl(dev, &pbl, mr, &params);
1427 		if (err)
1428 			goto err_unmap;
1429 
1430 		err = efa_com_register_mr(&dev->edev, &params, &result);
1431 		pbl_destroy(dev, &pbl);
1432 
1433 		if (err)
1434 			goto err_unmap;
1435 	}
1436 
1437 	mr->ibmr.lkey = result.l_key;
1438 	mr->ibmr.rkey = result.r_key;
1439 	mr->ibmr.length = length;
1440 	ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1441 
1442 	return &mr->ibmr;
1443 
1444 err_unmap:
1445 	ib_umem_release(mr->umem);
1446 err_free:
1447 	kfree(mr);
1448 err_out:
1449 	atomic64_inc(&dev->stats.sw_stats.reg_mr_err);
1450 	return ERR_PTR(err);
1451 }
1452 
1453 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1454 {
1455 	struct efa_dev *dev = to_edev(ibmr->device);
1456 	struct efa_com_dereg_mr_params params;
1457 	struct efa_mr *mr = to_emr(ibmr);
1458 	int err;
1459 
1460 	ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1461 
1462 	params.l_key = mr->ibmr.lkey;
1463 	err = efa_com_dereg_mr(&dev->edev, &params);
1464 	if (err)
1465 		return err;
1466 
1467 	ib_umem_release(mr->umem);
1468 	kfree(mr);
1469 
1470 	return 0;
1471 }
1472 
1473 int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
1474 			   struct ib_port_immutable *immutable)
1475 {
1476 	struct ib_port_attr attr;
1477 	int err;
1478 
1479 	err = ib_query_port(ibdev, port_num, &attr);
1480 	if (err) {
1481 		ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1482 		return err;
1483 	}
1484 
1485 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
1486 	immutable->gid_tbl_len = attr.gid_tbl_len;
1487 
1488 	return 0;
1489 }
1490 
1491 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1492 {
1493 	struct efa_com_dealloc_uar_params params = {
1494 		.uarn = uarn,
1495 	};
1496 
1497 	return efa_com_dealloc_uar(&dev->edev, &params);
1498 }
1499 
1500 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1501 {
1502 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1503 	struct efa_dev *dev = to_edev(ibucontext->device);
1504 	struct efa_ibv_alloc_ucontext_resp resp = {};
1505 	struct efa_com_alloc_uar_result result;
1506 	int err;
1507 
1508 	/*
1509 	 * it's fine if the driver does not know all request fields,
1510 	 * we will ack input fields in our response.
1511 	 */
1512 
1513 	err = efa_com_alloc_uar(&dev->edev, &result);
1514 	if (err)
1515 		goto err_out;
1516 
1517 	ucontext->uarn = result.uarn;
1518 
1519 	resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1520 	resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1521 	resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1522 	resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1523 	resp.max_llq_size = dev->dev_attr.max_llq_size;
1524 
1525 	if (udata && udata->outlen) {
1526 		err = ib_copy_to_udata(udata, &resp,
1527 				       min(sizeof(resp), udata->outlen));
1528 		if (err)
1529 			goto err_dealloc_uar;
1530 	}
1531 
1532 	return 0;
1533 
1534 err_dealloc_uar:
1535 	efa_dealloc_uar(dev, result.uarn);
1536 err_out:
1537 	atomic64_inc(&dev->stats.sw_stats.alloc_ucontext_err);
1538 	return err;
1539 }
1540 
1541 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1542 {
1543 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1544 	struct efa_dev *dev = to_edev(ibucontext->device);
1545 
1546 	efa_dealloc_uar(dev, ucontext->uarn);
1547 }
1548 
1549 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1550 {
1551 	struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1552 
1553 	/* DMA mapping is already gone, now free the pages */
1554 	if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
1555 		free_pages_exact(phys_to_virt(entry->address),
1556 				 entry->rdma_entry.npages * PAGE_SIZE);
1557 	kfree(entry);
1558 }
1559 
1560 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1561 		      struct vm_area_struct *vma)
1562 {
1563 	struct rdma_user_mmap_entry *rdma_entry;
1564 	struct efa_user_mmap_entry *entry;
1565 	unsigned long va;
1566 	int err = 0;
1567 	u64 pfn;
1568 
1569 	rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1570 	if (!rdma_entry) {
1571 		ibdev_dbg(&dev->ibdev,
1572 			  "pgoff[%#lx] does not have valid entry\n",
1573 			  vma->vm_pgoff);
1574 		return -EINVAL;
1575 	}
1576 	entry = to_emmap(rdma_entry);
1577 
1578 	ibdev_dbg(&dev->ibdev,
1579 		  "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1580 		  entry->address, rdma_entry->npages * PAGE_SIZE,
1581 		  entry->mmap_flag);
1582 
1583 	pfn = entry->address >> PAGE_SHIFT;
1584 	switch (entry->mmap_flag) {
1585 	case EFA_MMAP_IO_NC:
1586 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1587 					entry->rdma_entry.npages * PAGE_SIZE,
1588 					pgprot_noncached(vma->vm_page_prot),
1589 					rdma_entry);
1590 		break;
1591 	case EFA_MMAP_IO_WC:
1592 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1593 					entry->rdma_entry.npages * PAGE_SIZE,
1594 					pgprot_writecombine(vma->vm_page_prot),
1595 					rdma_entry);
1596 		break;
1597 	case EFA_MMAP_DMA_PAGE:
1598 		for (va = vma->vm_start; va < vma->vm_end;
1599 		     va += PAGE_SIZE, pfn++) {
1600 			err = vm_insert_page(vma, va, pfn_to_page(pfn));
1601 			if (err)
1602 				break;
1603 		}
1604 		break;
1605 	default:
1606 		err = -EINVAL;
1607 	}
1608 
1609 	if (err)
1610 		ibdev_dbg(
1611 			&dev->ibdev,
1612 			"Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1613 			entry->address, rdma_entry->npages * PAGE_SIZE,
1614 			entry->mmap_flag, err);
1615 
1616 	rdma_user_mmap_entry_put(rdma_entry);
1617 	return err;
1618 }
1619 
1620 int efa_mmap(struct ib_ucontext *ibucontext,
1621 	     struct vm_area_struct *vma)
1622 {
1623 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1624 	struct efa_dev *dev = to_edev(ibucontext->device);
1625 	size_t length = vma->vm_end - vma->vm_start;
1626 
1627 	ibdev_dbg(&dev->ibdev,
1628 		  "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
1629 		  vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
1630 
1631 	return __efa_mmap(dev, ucontext, vma);
1632 }
1633 
1634 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1635 {
1636 	struct efa_com_destroy_ah_params params = {
1637 		.ah = ah->ah,
1638 		.pdn = to_epd(ah->ibah.pd)->pdn,
1639 	};
1640 
1641 	return efa_com_destroy_ah(&dev->edev, &params);
1642 }
1643 
1644 int efa_create_ah(struct ib_ah *ibah,
1645 		  struct rdma_ah_attr *ah_attr,
1646 		  u32 flags,
1647 		  struct ib_udata *udata)
1648 {
1649 	struct efa_dev *dev = to_edev(ibah->device);
1650 	struct efa_com_create_ah_params params = {};
1651 	struct efa_ibv_create_ah_resp resp = {};
1652 	struct efa_com_create_ah_result result;
1653 	struct efa_ah *ah = to_eah(ibah);
1654 	int err;
1655 
1656 	if (!(flags & RDMA_CREATE_AH_SLEEPABLE)) {
1657 		ibdev_dbg(&dev->ibdev,
1658 			  "Create address handle is not supported in atomic context\n");
1659 		err = -EOPNOTSUPP;
1660 		goto err_out;
1661 	}
1662 
1663 	if (udata->inlen &&
1664 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1665 		ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1666 		err = -EINVAL;
1667 		goto err_out;
1668 	}
1669 
1670 	memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1671 	       sizeof(params.dest_addr));
1672 	params.pdn = to_epd(ibah->pd)->pdn;
1673 	err = efa_com_create_ah(&dev->edev, &params, &result);
1674 	if (err)
1675 		goto err_out;
1676 
1677 	memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
1678 	ah->ah = result.ah;
1679 
1680 	resp.efa_address_handle = result.ah;
1681 
1682 	if (udata->outlen) {
1683 		err = ib_copy_to_udata(udata, &resp,
1684 				       min(sizeof(resp), udata->outlen));
1685 		if (err) {
1686 			ibdev_dbg(&dev->ibdev,
1687 				  "Failed to copy udata for create_ah response\n");
1688 			goto err_destroy_ah;
1689 		}
1690 	}
1691 	ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
1692 
1693 	return 0;
1694 
1695 err_destroy_ah:
1696 	efa_ah_destroy(dev, ah);
1697 err_out:
1698 	atomic64_inc(&dev->stats.sw_stats.create_ah_err);
1699 	return err;
1700 }
1701 
1702 void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
1703 {
1704 	struct efa_dev *dev = to_edev(ibah->pd->device);
1705 	struct efa_ah *ah = to_eah(ibah);
1706 
1707 	ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
1708 
1709 	if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
1710 		ibdev_dbg(&dev->ibdev,
1711 			  "Destroy address handle is not supported in atomic context\n");
1712 		return;
1713 	}
1714 
1715 	efa_ah_destroy(dev, ah);
1716 }
1717 
1718 struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
1719 {
1720 	return rdma_alloc_hw_stats_struct(efa_stats_names,
1721 					  ARRAY_SIZE(efa_stats_names),
1722 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
1723 }
1724 
1725 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1726 		     u8 port_num, int index)
1727 {
1728 	struct efa_com_get_stats_params params = {};
1729 	union efa_com_get_stats_result result;
1730 	struct efa_dev *dev = to_edev(ibdev);
1731 	struct efa_com_basic_stats *bs;
1732 	struct efa_com_stats_admin *as;
1733 	struct efa_stats *s;
1734 	int err;
1735 
1736 	params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
1737 	params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
1738 
1739 	err = efa_com_get_stats(&dev->edev, &params, &result);
1740 	if (err)
1741 		return err;
1742 
1743 	bs = &result.basic_stats;
1744 	stats->value[EFA_TX_BYTES] = bs->tx_bytes;
1745 	stats->value[EFA_TX_PKTS] = bs->tx_pkts;
1746 	stats->value[EFA_RX_BYTES] = bs->rx_bytes;
1747 	stats->value[EFA_RX_PKTS] = bs->rx_pkts;
1748 	stats->value[EFA_RX_DROPS] = bs->rx_drops;
1749 
1750 	as = &dev->edev.aq.stats;
1751 	stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
1752 	stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
1753 	stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
1754 
1755 	s = &dev->stats;
1756 	stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
1757 	stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
1758 	stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
1759 	stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
1760 	stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
1761 	stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
1762 
1763 	return ARRAY_SIZE(efa_stats_names);
1764 }
1765 
1766 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
1767 					 u8 port_num)
1768 {
1769 	return IB_LINK_LAYER_UNSPECIFIED;
1770 }
1771 
1772