1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #include <linux/vmalloc.h>
7 
8 #include <rdma/ib_addr.h>
9 #include <rdma/ib_umem.h>
10 #include <rdma/ib_user_verbs.h>
11 #include <rdma/ib_verbs.h>
12 #include <rdma/uverbs_ioctl.h>
13 
14 #include "efa.h"
15 
16 enum {
17 	EFA_MMAP_DMA_PAGE = 0,
18 	EFA_MMAP_IO_WC,
19 	EFA_MMAP_IO_NC,
20 };
21 
22 #define EFA_AENQ_ENABLED_GROUPS \
23 	(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
24 	 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
25 
26 struct efa_user_mmap_entry {
27 	struct rdma_user_mmap_entry rdma_entry;
28 	u64 address;
29 	u8 mmap_flag;
30 };
31 
32 #define EFA_DEFINE_STATS(op) \
33 	op(EFA_TX_BYTES, "tx_bytes") \
34 	op(EFA_TX_PKTS, "tx_pkts") \
35 	op(EFA_RX_BYTES, "rx_bytes") \
36 	op(EFA_RX_PKTS, "rx_pkts") \
37 	op(EFA_RX_DROPS, "rx_drops") \
38 	op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
39 	op(EFA_COMPLETED_CMDS, "completed_cmds") \
40 	op(EFA_CMDS_ERR, "cmds_err") \
41 	op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
42 	op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
43 	op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
44 	op(EFA_CREATE_QP_ERR, "create_qp_err") \
45 	op(EFA_CREATE_CQ_ERR, "create_cq_err") \
46 	op(EFA_REG_MR_ERR, "reg_mr_err") \
47 	op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
48 	op(EFA_CREATE_AH_ERR, "create_ah_err") \
49 	op(EFA_MMAP_ERR, "mmap_err")
50 
51 #define EFA_STATS_ENUM(ename, name) ename,
52 #define EFA_STATS_STR(ename, name) [ename] = name,
53 
54 enum efa_hw_stats {
55 	EFA_DEFINE_STATS(EFA_STATS_ENUM)
56 };
57 
58 static const char *const efa_stats_names[] = {
59 	EFA_DEFINE_STATS(EFA_STATS_STR)
60 };
61 
62 #define EFA_CHUNK_PAYLOAD_SHIFT       12
63 #define EFA_CHUNK_PAYLOAD_SIZE        BIT(EFA_CHUNK_PAYLOAD_SHIFT)
64 #define EFA_CHUNK_PAYLOAD_PTR_SIZE    8
65 
66 #define EFA_CHUNK_SHIFT               12
67 #define EFA_CHUNK_SIZE                BIT(EFA_CHUNK_SHIFT)
68 #define EFA_CHUNK_PTR_SIZE            sizeof(struct efa_com_ctrl_buff_info)
69 
70 #define EFA_PTRS_PER_CHUNK \
71 	((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
72 
73 #define EFA_CHUNK_USED_SIZE \
74 	((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
75 
76 struct pbl_chunk {
77 	dma_addr_t dma_addr;
78 	u64 *buf;
79 	u32 length;
80 };
81 
82 struct pbl_chunk_list {
83 	struct pbl_chunk *chunks;
84 	unsigned int size;
85 };
86 
87 struct pbl_context {
88 	union {
89 		struct {
90 			dma_addr_t dma_addr;
91 		} continuous;
92 		struct {
93 			u32 pbl_buf_size_in_pages;
94 			struct scatterlist *sgl;
95 			int sg_dma_cnt;
96 			struct pbl_chunk_list chunk_list;
97 		} indirect;
98 	} phys;
99 	u64 *pbl_buf;
100 	u32 pbl_buf_size_in_bytes;
101 	u8 physically_continuous;
102 };
103 
104 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
105 {
106 	return container_of(ibdev, struct efa_dev, ibdev);
107 }
108 
109 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
110 {
111 	return container_of(ibucontext, struct efa_ucontext, ibucontext);
112 }
113 
114 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
115 {
116 	return container_of(ibpd, struct efa_pd, ibpd);
117 }
118 
119 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
120 {
121 	return container_of(ibmr, struct efa_mr, ibmr);
122 }
123 
124 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
125 {
126 	return container_of(ibqp, struct efa_qp, ibqp);
127 }
128 
129 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
130 {
131 	return container_of(ibcq, struct efa_cq, ibcq);
132 }
133 
134 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
135 {
136 	return container_of(ibah, struct efa_ah, ibah);
137 }
138 
139 static inline struct efa_user_mmap_entry *
140 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
141 {
142 	return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
143 }
144 
145 static inline bool is_rdma_read_cap(struct efa_dev *dev)
146 {
147 	return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
148 }
149 
150 #define is_reserved_cleared(reserved) \
151 	!memchr_inv(reserved, 0, sizeof(reserved))
152 
153 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
154 			       size_t size, enum dma_data_direction dir)
155 {
156 	void *addr;
157 
158 	addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
159 	if (!addr)
160 		return NULL;
161 
162 	*dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
163 	if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
164 		ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
165 		free_pages_exact(addr, size);
166 		return NULL;
167 	}
168 
169 	return addr;
170 }
171 
172 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
173 			    dma_addr_t dma_addr,
174 			    size_t size, enum dma_data_direction dir)
175 {
176 	dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
177 	free_pages_exact(cpu_addr, size);
178 }
179 
180 int efa_query_device(struct ib_device *ibdev,
181 		     struct ib_device_attr *props,
182 		     struct ib_udata *udata)
183 {
184 	struct efa_com_get_device_attr_result *dev_attr;
185 	struct efa_ibv_ex_query_device_resp resp = {};
186 	struct efa_dev *dev = to_edev(ibdev);
187 	int err;
188 
189 	if (udata && udata->inlen &&
190 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
191 		ibdev_dbg(ibdev,
192 			  "Incompatible ABI params, udata not cleared\n");
193 		return -EINVAL;
194 	}
195 
196 	dev_attr = &dev->dev_attr;
197 
198 	memset(props, 0, sizeof(*props));
199 	props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
200 	props->page_size_cap = dev_attr->page_size_cap;
201 	props->vendor_id = dev->pdev->vendor;
202 	props->vendor_part_id = dev->pdev->device;
203 	props->hw_ver = dev->pdev->subsystem_device;
204 	props->max_qp = dev_attr->max_qp;
205 	props->max_cq = dev_attr->max_cq;
206 	props->max_pd = dev_attr->max_pd;
207 	props->max_mr = dev_attr->max_mr;
208 	props->max_ah = dev_attr->max_ah;
209 	props->max_cqe = dev_attr->max_cq_depth;
210 	props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
211 				 dev_attr->max_rq_depth);
212 	props->max_send_sge = dev_attr->max_sq_sge;
213 	props->max_recv_sge = dev_attr->max_rq_sge;
214 	props->max_sge_rd = dev_attr->max_wr_rdma_sge;
215 
216 	if (udata && udata->outlen) {
217 		resp.max_sq_sge = dev_attr->max_sq_sge;
218 		resp.max_rq_sge = dev_attr->max_rq_sge;
219 		resp.max_sq_wr = dev_attr->max_sq_depth;
220 		resp.max_rq_wr = dev_attr->max_rq_depth;
221 		resp.max_rdma_size = dev_attr->max_rdma_size;
222 
223 		if (is_rdma_read_cap(dev))
224 			resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
225 
226 		err = ib_copy_to_udata(udata, &resp,
227 				       min(sizeof(resp), udata->outlen));
228 		if (err) {
229 			ibdev_dbg(ibdev,
230 				  "Failed to copy udata for query_device\n");
231 			return err;
232 		}
233 	}
234 
235 	return 0;
236 }
237 
238 int efa_query_port(struct ib_device *ibdev, u8 port,
239 		   struct ib_port_attr *props)
240 {
241 	struct efa_dev *dev = to_edev(ibdev);
242 
243 	props->lmc = 1;
244 
245 	props->state = IB_PORT_ACTIVE;
246 	props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
247 	props->gid_tbl_len = 1;
248 	props->pkey_tbl_len = 1;
249 	props->active_speed = IB_SPEED_EDR;
250 	props->active_width = IB_WIDTH_4X;
251 	props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
252 	props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
253 	props->max_msg_sz = dev->dev_attr.mtu;
254 	props->max_vl_num = 1;
255 
256 	return 0;
257 }
258 
259 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
260 		 int qp_attr_mask,
261 		 struct ib_qp_init_attr *qp_init_attr)
262 {
263 	struct efa_dev *dev = to_edev(ibqp->device);
264 	struct efa_com_query_qp_params params = {};
265 	struct efa_com_query_qp_result result;
266 	struct efa_qp *qp = to_eqp(ibqp);
267 	int err;
268 
269 #define EFA_QUERY_QP_SUPP_MASK \
270 	(IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
271 	 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP)
272 
273 	if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
274 		ibdev_dbg(&dev->ibdev,
275 			  "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
276 			  qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
277 		return -EOPNOTSUPP;
278 	}
279 
280 	memset(qp_attr, 0, sizeof(*qp_attr));
281 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
282 
283 	params.qp_handle = qp->qp_handle;
284 	err = efa_com_query_qp(&dev->edev, &params, &result);
285 	if (err)
286 		return err;
287 
288 	qp_attr->qp_state = result.qp_state;
289 	qp_attr->qkey = result.qkey;
290 	qp_attr->sq_psn = result.sq_psn;
291 	qp_attr->sq_draining = result.sq_draining;
292 	qp_attr->port_num = 1;
293 
294 	qp_attr->cap.max_send_wr = qp->max_send_wr;
295 	qp_attr->cap.max_recv_wr = qp->max_recv_wr;
296 	qp_attr->cap.max_send_sge = qp->max_send_sge;
297 	qp_attr->cap.max_recv_sge = qp->max_recv_sge;
298 	qp_attr->cap.max_inline_data = qp->max_inline_data;
299 
300 	qp_init_attr->qp_type = ibqp->qp_type;
301 	qp_init_attr->recv_cq = ibqp->recv_cq;
302 	qp_init_attr->send_cq = ibqp->send_cq;
303 	qp_init_attr->qp_context = ibqp->qp_context;
304 	qp_init_attr->cap = qp_attr->cap;
305 
306 	return 0;
307 }
308 
309 int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
310 		  union ib_gid *gid)
311 {
312 	struct efa_dev *dev = to_edev(ibdev);
313 
314 	memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
315 
316 	return 0;
317 }
318 
319 int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
320 		   u16 *pkey)
321 {
322 	if (index > 0)
323 		return -EINVAL;
324 
325 	*pkey = 0xffff;
326 	return 0;
327 }
328 
329 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
330 {
331 	struct efa_com_dealloc_pd_params params = {
332 		.pdn = pdn,
333 	};
334 
335 	return efa_com_dealloc_pd(&dev->edev, &params);
336 }
337 
338 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
339 {
340 	struct efa_dev *dev = to_edev(ibpd->device);
341 	struct efa_ibv_alloc_pd_resp resp = {};
342 	struct efa_com_alloc_pd_result result;
343 	struct efa_pd *pd = to_epd(ibpd);
344 	int err;
345 
346 	if (udata->inlen &&
347 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
348 		ibdev_dbg(&dev->ibdev,
349 			  "Incompatible ABI params, udata not cleared\n");
350 		err = -EINVAL;
351 		goto err_out;
352 	}
353 
354 	err = efa_com_alloc_pd(&dev->edev, &result);
355 	if (err)
356 		goto err_out;
357 
358 	pd->pdn = result.pdn;
359 	resp.pdn = result.pdn;
360 
361 	if (udata->outlen) {
362 		err = ib_copy_to_udata(udata, &resp,
363 				       min(sizeof(resp), udata->outlen));
364 		if (err) {
365 			ibdev_dbg(&dev->ibdev,
366 				  "Failed to copy udata for alloc_pd\n");
367 			goto err_dealloc_pd;
368 		}
369 	}
370 
371 	ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
372 
373 	return 0;
374 
375 err_dealloc_pd:
376 	efa_pd_dealloc(dev, result.pdn);
377 err_out:
378 	atomic64_inc(&dev->stats.sw_stats.alloc_pd_err);
379 	return err;
380 }
381 
382 void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
383 {
384 	struct efa_dev *dev = to_edev(ibpd->device);
385 	struct efa_pd *pd = to_epd(ibpd);
386 
387 	ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
388 	efa_pd_dealloc(dev, pd->pdn);
389 }
390 
391 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
392 {
393 	struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
394 
395 	return efa_com_destroy_qp(&dev->edev, &params);
396 }
397 
398 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
399 {
400 	rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
401 	rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
402 	rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
403 	rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
404 }
405 
406 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
407 {
408 	struct efa_dev *dev = to_edev(ibqp->pd->device);
409 	struct efa_qp *qp = to_eqp(ibqp);
410 	int err;
411 
412 	ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
413 
414 	efa_qp_user_mmap_entries_remove(qp);
415 
416 	err = efa_destroy_qp_handle(dev, qp->qp_handle);
417 	if (err)
418 		return err;
419 
420 	if (qp->rq_cpu_addr) {
421 		ibdev_dbg(&dev->ibdev,
422 			  "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
423 			  qp->rq_cpu_addr, qp->rq_size,
424 			  &qp->rq_dma_addr);
425 		efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
426 				qp->rq_size, DMA_TO_DEVICE);
427 	}
428 
429 	kfree(qp);
430 	return 0;
431 }
432 
433 static struct rdma_user_mmap_entry*
434 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
435 			   u64 address, size_t length,
436 			   u8 mmap_flag, u64 *offset)
437 {
438 	struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
439 	int err;
440 
441 	if (!entry)
442 		return NULL;
443 
444 	entry->address = address;
445 	entry->mmap_flag = mmap_flag;
446 
447 	err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
448 					  length);
449 	if (err) {
450 		kfree(entry);
451 		return NULL;
452 	}
453 	*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
454 
455 	return &entry->rdma_entry;
456 }
457 
458 static int qp_mmap_entries_setup(struct efa_qp *qp,
459 				 struct efa_dev *dev,
460 				 struct efa_ucontext *ucontext,
461 				 struct efa_com_create_qp_params *params,
462 				 struct efa_ibv_create_qp_resp *resp)
463 {
464 	size_t length;
465 	u64 address;
466 
467 	address = dev->db_bar_addr + resp->sq_db_offset;
468 	qp->sq_db_mmap_entry =
469 		efa_user_mmap_entry_insert(&ucontext->ibucontext,
470 					   address,
471 					   PAGE_SIZE, EFA_MMAP_IO_NC,
472 					   &resp->sq_db_mmap_key);
473 	if (!qp->sq_db_mmap_entry)
474 		return -ENOMEM;
475 
476 	resp->sq_db_offset &= ~PAGE_MASK;
477 
478 	address = dev->mem_bar_addr + resp->llq_desc_offset;
479 	length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
480 			    (resp->llq_desc_offset & ~PAGE_MASK));
481 
482 	qp->llq_desc_mmap_entry =
483 		efa_user_mmap_entry_insert(&ucontext->ibucontext,
484 					   address, length,
485 					   EFA_MMAP_IO_WC,
486 					   &resp->llq_desc_mmap_key);
487 	if (!qp->llq_desc_mmap_entry)
488 		goto err_remove_mmap;
489 
490 	resp->llq_desc_offset &= ~PAGE_MASK;
491 
492 	if (qp->rq_size) {
493 		address = dev->db_bar_addr + resp->rq_db_offset;
494 
495 		qp->rq_db_mmap_entry =
496 			efa_user_mmap_entry_insert(&ucontext->ibucontext,
497 						   address, PAGE_SIZE,
498 						   EFA_MMAP_IO_NC,
499 						   &resp->rq_db_mmap_key);
500 		if (!qp->rq_db_mmap_entry)
501 			goto err_remove_mmap;
502 
503 		resp->rq_db_offset &= ~PAGE_MASK;
504 
505 		address = virt_to_phys(qp->rq_cpu_addr);
506 		qp->rq_mmap_entry =
507 			efa_user_mmap_entry_insert(&ucontext->ibucontext,
508 						   address, qp->rq_size,
509 						   EFA_MMAP_DMA_PAGE,
510 						   &resp->rq_mmap_key);
511 		if (!qp->rq_mmap_entry)
512 			goto err_remove_mmap;
513 
514 		resp->rq_mmap_size = qp->rq_size;
515 	}
516 
517 	return 0;
518 
519 err_remove_mmap:
520 	efa_qp_user_mmap_entries_remove(qp);
521 
522 	return -ENOMEM;
523 }
524 
525 static int efa_qp_validate_cap(struct efa_dev *dev,
526 			       struct ib_qp_init_attr *init_attr)
527 {
528 	if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
529 		ibdev_dbg(&dev->ibdev,
530 			  "qp: requested send wr[%u] exceeds the max[%u]\n",
531 			  init_attr->cap.max_send_wr,
532 			  dev->dev_attr.max_sq_depth);
533 		return -EINVAL;
534 	}
535 	if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
536 		ibdev_dbg(&dev->ibdev,
537 			  "qp: requested receive wr[%u] exceeds the max[%u]\n",
538 			  init_attr->cap.max_recv_wr,
539 			  dev->dev_attr.max_rq_depth);
540 		return -EINVAL;
541 	}
542 	if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
543 		ibdev_dbg(&dev->ibdev,
544 			  "qp: requested sge send[%u] exceeds the max[%u]\n",
545 			  init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
546 		return -EINVAL;
547 	}
548 	if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
549 		ibdev_dbg(&dev->ibdev,
550 			  "qp: requested sge recv[%u] exceeds the max[%u]\n",
551 			  init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
552 		return -EINVAL;
553 	}
554 	if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
555 		ibdev_dbg(&dev->ibdev,
556 			  "qp: requested inline data[%u] exceeds the max[%u]\n",
557 			  init_attr->cap.max_inline_data,
558 			  dev->dev_attr.inline_buf_size);
559 		return -EINVAL;
560 	}
561 
562 	return 0;
563 }
564 
565 static int efa_qp_validate_attr(struct efa_dev *dev,
566 				struct ib_qp_init_attr *init_attr)
567 {
568 	if (init_attr->qp_type != IB_QPT_DRIVER &&
569 	    init_attr->qp_type != IB_QPT_UD) {
570 		ibdev_dbg(&dev->ibdev,
571 			  "Unsupported qp type %d\n", init_attr->qp_type);
572 		return -EOPNOTSUPP;
573 	}
574 
575 	if (init_attr->srq) {
576 		ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
577 		return -EOPNOTSUPP;
578 	}
579 
580 	if (init_attr->create_flags) {
581 		ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
582 		return -EOPNOTSUPP;
583 	}
584 
585 	return 0;
586 }
587 
588 struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
589 			    struct ib_qp_init_attr *init_attr,
590 			    struct ib_udata *udata)
591 {
592 	struct efa_com_create_qp_params create_qp_params = {};
593 	struct efa_com_create_qp_result create_qp_resp;
594 	struct efa_dev *dev = to_edev(ibpd->device);
595 	struct efa_ibv_create_qp_resp resp = {};
596 	struct efa_ibv_create_qp cmd = {};
597 	struct efa_ucontext *ucontext;
598 	struct efa_qp *qp;
599 	int err;
600 
601 	ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
602 					     ibucontext);
603 
604 	err = efa_qp_validate_cap(dev, init_attr);
605 	if (err)
606 		goto err_out;
607 
608 	err = efa_qp_validate_attr(dev, init_attr);
609 	if (err)
610 		goto err_out;
611 
612 	if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
613 		ibdev_dbg(&dev->ibdev,
614 			  "Incompatible ABI params, no input udata\n");
615 		err = -EINVAL;
616 		goto err_out;
617 	}
618 
619 	if (udata->inlen > sizeof(cmd) &&
620 	    !ib_is_udata_cleared(udata, sizeof(cmd),
621 				 udata->inlen - sizeof(cmd))) {
622 		ibdev_dbg(&dev->ibdev,
623 			  "Incompatible ABI params, unknown fields in udata\n");
624 		err = -EINVAL;
625 		goto err_out;
626 	}
627 
628 	err = ib_copy_from_udata(&cmd, udata,
629 				 min(sizeof(cmd), udata->inlen));
630 	if (err) {
631 		ibdev_dbg(&dev->ibdev,
632 			  "Cannot copy udata for create_qp\n");
633 		goto err_out;
634 	}
635 
636 	if (cmd.comp_mask) {
637 		ibdev_dbg(&dev->ibdev,
638 			  "Incompatible ABI params, unknown fields in udata\n");
639 		err = -EINVAL;
640 		goto err_out;
641 	}
642 
643 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
644 	if (!qp) {
645 		err = -ENOMEM;
646 		goto err_out;
647 	}
648 
649 	create_qp_params.uarn = ucontext->uarn;
650 	create_qp_params.pd = to_epd(ibpd)->pdn;
651 
652 	if (init_attr->qp_type == IB_QPT_UD) {
653 		create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
654 	} else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
655 		create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
656 	} else {
657 		ibdev_dbg(&dev->ibdev,
658 			  "Unsupported qp type %d driver qp type %d\n",
659 			  init_attr->qp_type, cmd.driver_qp_type);
660 		err = -EOPNOTSUPP;
661 		goto err_free_qp;
662 	}
663 
664 	ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
665 		  init_attr->qp_type, cmd.driver_qp_type);
666 	create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
667 	create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
668 	create_qp_params.sq_depth = init_attr->cap.max_send_wr;
669 	create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
670 
671 	create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
672 	create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
673 	qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
674 	if (qp->rq_size) {
675 		qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
676 						    qp->rq_size, DMA_TO_DEVICE);
677 		if (!qp->rq_cpu_addr) {
678 			err = -ENOMEM;
679 			goto err_free_qp;
680 		}
681 
682 		ibdev_dbg(&dev->ibdev,
683 			  "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
684 			  qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
685 		create_qp_params.rq_base_addr = qp->rq_dma_addr;
686 	}
687 
688 	err = efa_com_create_qp(&dev->edev, &create_qp_params,
689 				&create_qp_resp);
690 	if (err)
691 		goto err_free_mapped;
692 
693 	resp.sq_db_offset = create_qp_resp.sq_db_offset;
694 	resp.rq_db_offset = create_qp_resp.rq_db_offset;
695 	resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
696 	resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
697 	resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
698 
699 	err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
700 				    &resp);
701 	if (err)
702 		goto err_destroy_qp;
703 
704 	qp->qp_handle = create_qp_resp.qp_handle;
705 	qp->ibqp.qp_num = create_qp_resp.qp_num;
706 	qp->ibqp.qp_type = init_attr->qp_type;
707 	qp->max_send_wr = init_attr->cap.max_send_wr;
708 	qp->max_recv_wr = init_attr->cap.max_recv_wr;
709 	qp->max_send_sge = init_attr->cap.max_send_sge;
710 	qp->max_recv_sge = init_attr->cap.max_recv_sge;
711 	qp->max_inline_data = init_attr->cap.max_inline_data;
712 
713 	if (udata->outlen) {
714 		err = ib_copy_to_udata(udata, &resp,
715 				       min(sizeof(resp), udata->outlen));
716 		if (err) {
717 			ibdev_dbg(&dev->ibdev,
718 				  "Failed to copy udata for qp[%u]\n",
719 				  create_qp_resp.qp_num);
720 			goto err_remove_mmap_entries;
721 		}
722 	}
723 
724 	ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
725 
726 	return &qp->ibqp;
727 
728 err_remove_mmap_entries:
729 	efa_qp_user_mmap_entries_remove(qp);
730 err_destroy_qp:
731 	efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
732 err_free_mapped:
733 	if (qp->rq_size)
734 		efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
735 				qp->rq_size, DMA_TO_DEVICE);
736 err_free_qp:
737 	kfree(qp);
738 err_out:
739 	atomic64_inc(&dev->stats.sw_stats.create_qp_err);
740 	return ERR_PTR(err);
741 }
742 
743 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
744 				  struct ib_qp_attr *qp_attr, int qp_attr_mask,
745 				  enum ib_qp_state cur_state,
746 				  enum ib_qp_state new_state)
747 {
748 #define EFA_MODIFY_QP_SUPP_MASK \
749 	(IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
750 	 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN)
751 
752 	if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
753 		ibdev_dbg(&dev->ibdev,
754 			  "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
755 			  qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
756 		return -EOPNOTSUPP;
757 	}
758 
759 	if (!ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
760 				qp_attr_mask)) {
761 		ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
762 		return -EINVAL;
763 	}
764 
765 	if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
766 		ibdev_dbg(&dev->ibdev, "Can't change port num\n");
767 		return -EOPNOTSUPP;
768 	}
769 
770 	if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
771 		ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
772 		return -EOPNOTSUPP;
773 	}
774 
775 	return 0;
776 }
777 
778 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
779 		  int qp_attr_mask, struct ib_udata *udata)
780 {
781 	struct efa_dev *dev = to_edev(ibqp->device);
782 	struct efa_com_modify_qp_params params = {};
783 	struct efa_qp *qp = to_eqp(ibqp);
784 	enum ib_qp_state cur_state;
785 	enum ib_qp_state new_state;
786 	int err;
787 
788 	if (udata->inlen &&
789 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
790 		ibdev_dbg(&dev->ibdev,
791 			  "Incompatible ABI params, udata not cleared\n");
792 		return -EINVAL;
793 	}
794 
795 	cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
796 						     qp->state;
797 	new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
798 
799 	err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
800 				     new_state);
801 	if (err)
802 		return err;
803 
804 	params.qp_handle = qp->qp_handle;
805 
806 	if (qp_attr_mask & IB_QP_STATE) {
807 		params.modify_mask |= BIT(EFA_ADMIN_QP_STATE_BIT) |
808 				      BIT(EFA_ADMIN_CUR_QP_STATE_BIT);
809 		params.cur_qp_state = qp_attr->cur_qp_state;
810 		params.qp_state = qp_attr->qp_state;
811 	}
812 
813 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
814 		params.modify_mask |=
815 			BIT(EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT);
816 		params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
817 	}
818 
819 	if (qp_attr_mask & IB_QP_QKEY) {
820 		params.modify_mask |= BIT(EFA_ADMIN_QKEY_BIT);
821 		params.qkey = qp_attr->qkey;
822 	}
823 
824 	if (qp_attr_mask & IB_QP_SQ_PSN) {
825 		params.modify_mask |= BIT(EFA_ADMIN_SQ_PSN_BIT);
826 		params.sq_psn = qp_attr->sq_psn;
827 	}
828 
829 	err = efa_com_modify_qp(&dev->edev, &params);
830 	if (err)
831 		return err;
832 
833 	qp->state = new_state;
834 
835 	return 0;
836 }
837 
838 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
839 {
840 	struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
841 
842 	return efa_com_destroy_cq(&dev->edev, &params);
843 }
844 
845 void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
846 {
847 	struct efa_dev *dev = to_edev(ibcq->device);
848 	struct efa_cq *cq = to_ecq(ibcq);
849 
850 	ibdev_dbg(&dev->ibdev,
851 		  "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
852 		  cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
853 
854 	rdma_user_mmap_entry_remove(cq->mmap_entry);
855 	efa_destroy_cq_idx(dev, cq->cq_idx);
856 	efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
857 			DMA_FROM_DEVICE);
858 }
859 
860 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
861 				 struct efa_ibv_create_cq_resp *resp)
862 {
863 	resp->q_mmap_size = cq->size;
864 	cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
865 						    virt_to_phys(cq->cpu_addr),
866 						    cq->size, EFA_MMAP_DMA_PAGE,
867 						    &resp->q_mmap_key);
868 	if (!cq->mmap_entry)
869 		return -ENOMEM;
870 
871 	return 0;
872 }
873 
874 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
875 		  struct ib_udata *udata)
876 {
877 	struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
878 		udata, struct efa_ucontext, ibucontext);
879 	struct efa_ibv_create_cq_resp resp = {};
880 	struct efa_com_create_cq_params params;
881 	struct efa_com_create_cq_result result;
882 	struct ib_device *ibdev = ibcq->device;
883 	struct efa_dev *dev = to_edev(ibdev);
884 	struct efa_ibv_create_cq cmd = {};
885 	struct efa_cq *cq = to_ecq(ibcq);
886 	int entries = attr->cqe;
887 	int err;
888 
889 	ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
890 
891 	if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
892 		ibdev_dbg(ibdev,
893 			  "cq: requested entries[%u] non-positive or greater than max[%u]\n",
894 			  entries, dev->dev_attr.max_cq_depth);
895 		err = -EINVAL;
896 		goto err_out;
897 	}
898 
899 	if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
900 		ibdev_dbg(ibdev,
901 			  "Incompatible ABI params, no input udata\n");
902 		err = -EINVAL;
903 		goto err_out;
904 	}
905 
906 	if (udata->inlen > sizeof(cmd) &&
907 	    !ib_is_udata_cleared(udata, sizeof(cmd),
908 				 udata->inlen - sizeof(cmd))) {
909 		ibdev_dbg(ibdev,
910 			  "Incompatible ABI params, unknown fields in udata\n");
911 		err = -EINVAL;
912 		goto err_out;
913 	}
914 
915 	err = ib_copy_from_udata(&cmd, udata,
916 				 min(sizeof(cmd), udata->inlen));
917 	if (err) {
918 		ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
919 		goto err_out;
920 	}
921 
922 	if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
923 		ibdev_dbg(ibdev,
924 			  "Incompatible ABI params, unknown fields in udata\n");
925 		err = -EINVAL;
926 		goto err_out;
927 	}
928 
929 	if (!cmd.cq_entry_size) {
930 		ibdev_dbg(ibdev,
931 			  "Invalid entry size [%u]\n", cmd.cq_entry_size);
932 		err = -EINVAL;
933 		goto err_out;
934 	}
935 
936 	if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
937 		ibdev_dbg(ibdev,
938 			  "Invalid number of sub cqs[%u] expected[%u]\n",
939 			  cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
940 		err = -EINVAL;
941 		goto err_out;
942 	}
943 
944 	cq->ucontext = ucontext;
945 	cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
946 	cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
947 					 DMA_FROM_DEVICE);
948 	if (!cq->cpu_addr) {
949 		err = -ENOMEM;
950 		goto err_out;
951 	}
952 
953 	params.uarn = cq->ucontext->uarn;
954 	params.cq_depth = entries;
955 	params.dma_addr = cq->dma_addr;
956 	params.entry_size_in_bytes = cmd.cq_entry_size;
957 	params.num_sub_cqs = cmd.num_sub_cqs;
958 	err = efa_com_create_cq(&dev->edev, &params, &result);
959 	if (err)
960 		goto err_free_mapped;
961 
962 	resp.cq_idx = result.cq_idx;
963 	cq->cq_idx = result.cq_idx;
964 	cq->ibcq.cqe = result.actual_depth;
965 	WARN_ON_ONCE(entries != result.actual_depth);
966 
967 	err = cq_mmap_entries_setup(dev, cq, &resp);
968 	if (err) {
969 		ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
970 			  cq->cq_idx);
971 		goto err_destroy_cq;
972 	}
973 
974 	if (udata->outlen) {
975 		err = ib_copy_to_udata(udata, &resp,
976 				       min(sizeof(resp), udata->outlen));
977 		if (err) {
978 			ibdev_dbg(ibdev,
979 				  "Failed to copy udata for create_cq\n");
980 			goto err_remove_mmap;
981 		}
982 	}
983 
984 	ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
985 		  cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
986 
987 	return 0;
988 
989 err_remove_mmap:
990 	rdma_user_mmap_entry_remove(cq->mmap_entry);
991 err_destroy_cq:
992 	efa_destroy_cq_idx(dev, cq->cq_idx);
993 err_free_mapped:
994 	efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
995 			DMA_FROM_DEVICE);
996 
997 err_out:
998 	atomic64_inc(&dev->stats.sw_stats.create_cq_err);
999 	return err;
1000 }
1001 
1002 static int umem_to_page_list(struct efa_dev *dev,
1003 			     struct ib_umem *umem,
1004 			     u64 *page_list,
1005 			     u32 hp_cnt,
1006 			     u8 hp_shift)
1007 {
1008 	u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1009 	struct ib_block_iter biter;
1010 	unsigned int hp_idx = 0;
1011 
1012 	ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1013 		  hp_cnt, pages_in_hp);
1014 
1015 	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
1016 			    BIT(hp_shift))
1017 		page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1018 
1019 	return 0;
1020 }
1021 
1022 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1023 {
1024 	struct scatterlist *sglist;
1025 	struct page *pg;
1026 	int i;
1027 
1028 	sglist = kcalloc(page_cnt, sizeof(*sglist), GFP_KERNEL);
1029 	if (!sglist)
1030 		return NULL;
1031 	sg_init_table(sglist, page_cnt);
1032 	for (i = 0; i < page_cnt; i++) {
1033 		pg = vmalloc_to_page(buf);
1034 		if (!pg)
1035 			goto err;
1036 		sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1037 		buf += PAGE_SIZE / sizeof(*buf);
1038 	}
1039 	return sglist;
1040 
1041 err:
1042 	kfree(sglist);
1043 	return NULL;
1044 }
1045 
1046 /*
1047  * create a chunk list of physical pages dma addresses from the supplied
1048  * scatter gather list
1049  */
1050 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1051 {
1052 	struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1053 	int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1054 	struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1055 	unsigned int chunk_list_size, chunk_idx, payload_idx;
1056 	int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1057 	struct efa_com_ctrl_buff_info *ctrl_buf;
1058 	u64 *cur_chunk_buf, *prev_chunk_buf;
1059 	struct ib_block_iter biter;
1060 	dma_addr_t dma_addr;
1061 	int i;
1062 
1063 	/* allocate a chunk list that consists of 4KB chunks */
1064 	chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1065 
1066 	chunk_list->size = chunk_list_size;
1067 	chunk_list->chunks = kcalloc(chunk_list_size,
1068 				     sizeof(*chunk_list->chunks),
1069 				     GFP_KERNEL);
1070 	if (!chunk_list->chunks)
1071 		return -ENOMEM;
1072 
1073 	ibdev_dbg(&dev->ibdev,
1074 		  "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1075 		  page_cnt);
1076 
1077 	/* allocate chunk buffers: */
1078 	for (i = 0; i < chunk_list_size; i++) {
1079 		chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1080 		if (!chunk_list->chunks[i].buf)
1081 			goto chunk_list_dealloc;
1082 
1083 		chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1084 	}
1085 	chunk_list->chunks[chunk_list_size - 1].length =
1086 		((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1087 			EFA_CHUNK_PTR_SIZE;
1088 
1089 	/* fill the dma addresses of sg list pages to chunks: */
1090 	chunk_idx = 0;
1091 	payload_idx = 0;
1092 	cur_chunk_buf = chunk_list->chunks[0].buf;
1093 	rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1094 			    EFA_CHUNK_PAYLOAD_SIZE) {
1095 		cur_chunk_buf[payload_idx++] =
1096 			rdma_block_iter_dma_address(&biter);
1097 
1098 		if (payload_idx == EFA_PTRS_PER_CHUNK) {
1099 			chunk_idx++;
1100 			cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1101 			payload_idx = 0;
1102 		}
1103 	}
1104 
1105 	/* map chunks to dma and fill chunks next ptrs */
1106 	for (i = chunk_list_size - 1; i >= 0; i--) {
1107 		dma_addr = dma_map_single(&dev->pdev->dev,
1108 					  chunk_list->chunks[i].buf,
1109 					  chunk_list->chunks[i].length,
1110 					  DMA_TO_DEVICE);
1111 		if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1112 			ibdev_err(&dev->ibdev,
1113 				  "chunk[%u] dma_map_failed\n", i);
1114 			goto chunk_list_unmap;
1115 		}
1116 
1117 		chunk_list->chunks[i].dma_addr = dma_addr;
1118 		ibdev_dbg(&dev->ibdev,
1119 			  "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1120 
1121 		if (!i)
1122 			break;
1123 
1124 		prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1125 
1126 		ctrl_buf = (struct efa_com_ctrl_buff_info *)
1127 				&prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1128 		ctrl_buf->length = chunk_list->chunks[i].length;
1129 
1130 		efa_com_set_dma_addr(dma_addr,
1131 				     &ctrl_buf->address.mem_addr_high,
1132 				     &ctrl_buf->address.mem_addr_low);
1133 	}
1134 
1135 	return 0;
1136 
1137 chunk_list_unmap:
1138 	for (; i < chunk_list_size; i++) {
1139 		dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1140 				 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1141 	}
1142 chunk_list_dealloc:
1143 	for (i = 0; i < chunk_list_size; i++)
1144 		kfree(chunk_list->chunks[i].buf);
1145 
1146 	kfree(chunk_list->chunks);
1147 	return -ENOMEM;
1148 }
1149 
1150 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1151 {
1152 	struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1153 	int i;
1154 
1155 	for (i = 0; i < chunk_list->size; i++) {
1156 		dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1157 				 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1158 		kfree(chunk_list->chunks[i].buf);
1159 	}
1160 
1161 	kfree(chunk_list->chunks);
1162 }
1163 
1164 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
1165 static int pbl_continuous_initialize(struct efa_dev *dev,
1166 				     struct pbl_context *pbl)
1167 {
1168 	dma_addr_t dma_addr;
1169 
1170 	dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1171 				  pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1172 	if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1173 		ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1174 		return -ENOMEM;
1175 	}
1176 
1177 	pbl->phys.continuous.dma_addr = dma_addr;
1178 	ibdev_dbg(&dev->ibdev,
1179 		  "pbl continuous - dma_addr = %pad, size[%u]\n",
1180 		  &dma_addr, pbl->pbl_buf_size_in_bytes);
1181 
1182 	return 0;
1183 }
1184 
1185 /*
1186  * initialize pbl indirect mode:
1187  * create a chunk list out of the dma addresses of the physical pages of
1188  * pbl buffer.
1189  */
1190 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1191 {
1192 	u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
1193 	struct scatterlist *sgl;
1194 	int sg_dma_cnt, err;
1195 
1196 	BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1197 	sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1198 	if (!sgl)
1199 		return -ENOMEM;
1200 
1201 	sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1202 	if (!sg_dma_cnt) {
1203 		err = -EINVAL;
1204 		goto err_map;
1205 	}
1206 
1207 	pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1208 	pbl->phys.indirect.sgl = sgl;
1209 	pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1210 	err = pbl_chunk_list_create(dev, pbl);
1211 	if (err) {
1212 		ibdev_dbg(&dev->ibdev,
1213 			  "chunk_list creation failed[%d]\n", err);
1214 		goto err_chunk;
1215 	}
1216 
1217 	ibdev_dbg(&dev->ibdev,
1218 		  "pbl indirect - size[%u], chunks[%u]\n",
1219 		  pbl->pbl_buf_size_in_bytes,
1220 		  pbl->phys.indirect.chunk_list.size);
1221 
1222 	return 0;
1223 
1224 err_chunk:
1225 	dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1226 err_map:
1227 	kfree(sgl);
1228 	return err;
1229 }
1230 
1231 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1232 {
1233 	pbl_chunk_list_destroy(dev, pbl);
1234 	dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1235 		     pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1236 	kfree(pbl->phys.indirect.sgl);
1237 }
1238 
1239 /* create a page buffer list from a mapped user memory region */
1240 static int pbl_create(struct efa_dev *dev,
1241 		      struct pbl_context *pbl,
1242 		      struct ib_umem *umem,
1243 		      int hp_cnt,
1244 		      u8 hp_shift)
1245 {
1246 	int err;
1247 
1248 	pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1249 	pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1250 	if (!pbl->pbl_buf)
1251 		return -ENOMEM;
1252 
1253 	if (is_vmalloc_addr(pbl->pbl_buf)) {
1254 		pbl->physically_continuous = 0;
1255 		err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1256 					hp_shift);
1257 		if (err)
1258 			goto err_free;
1259 
1260 		err = pbl_indirect_initialize(dev, pbl);
1261 		if (err)
1262 			goto err_free;
1263 	} else {
1264 		pbl->physically_continuous = 1;
1265 		err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1266 					hp_shift);
1267 		if (err)
1268 			goto err_free;
1269 
1270 		err = pbl_continuous_initialize(dev, pbl);
1271 		if (err)
1272 			goto err_free;
1273 	}
1274 
1275 	ibdev_dbg(&dev->ibdev,
1276 		  "user_pbl_created: user_pages[%u], continuous[%u]\n",
1277 		  hp_cnt, pbl->physically_continuous);
1278 
1279 	return 0;
1280 
1281 err_free:
1282 	kvfree(pbl->pbl_buf);
1283 	return err;
1284 }
1285 
1286 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1287 {
1288 	if (pbl->physically_continuous)
1289 		dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1290 				 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1291 	else
1292 		pbl_indirect_terminate(dev, pbl);
1293 
1294 	kvfree(pbl->pbl_buf);
1295 }
1296 
1297 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1298 				 struct efa_com_reg_mr_params *params)
1299 {
1300 	int err;
1301 
1302 	params->inline_pbl = 1;
1303 	err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1304 				params->page_num, params->page_shift);
1305 	if (err)
1306 		return err;
1307 
1308 	ibdev_dbg(&dev->ibdev,
1309 		  "inline_pbl_array - pages[%u]\n", params->page_num);
1310 
1311 	return 0;
1312 }
1313 
1314 static int efa_create_pbl(struct efa_dev *dev,
1315 			  struct pbl_context *pbl,
1316 			  struct efa_mr *mr,
1317 			  struct efa_com_reg_mr_params *params)
1318 {
1319 	int err;
1320 
1321 	err = pbl_create(dev, pbl, mr->umem, params->page_num,
1322 			 params->page_shift);
1323 	if (err) {
1324 		ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1325 		return err;
1326 	}
1327 
1328 	params->inline_pbl = 0;
1329 	params->indirect = !pbl->physically_continuous;
1330 	if (pbl->physically_continuous) {
1331 		params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1332 
1333 		efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1334 				     &params->pbl.pbl.address.mem_addr_high,
1335 				     &params->pbl.pbl.address.mem_addr_low);
1336 	} else {
1337 		params->pbl.pbl.length =
1338 			pbl->phys.indirect.chunk_list.chunks[0].length;
1339 
1340 		efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1341 				     &params->pbl.pbl.address.mem_addr_high,
1342 				     &params->pbl.pbl.address.mem_addr_low);
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1349 			 u64 virt_addr, int access_flags,
1350 			 struct ib_udata *udata)
1351 {
1352 	struct efa_dev *dev = to_edev(ibpd->device);
1353 	struct efa_com_reg_mr_params params = {};
1354 	struct efa_com_reg_mr_result result = {};
1355 	struct pbl_context pbl;
1356 	int supp_access_flags;
1357 	unsigned int pg_sz;
1358 	struct efa_mr *mr;
1359 	int inline_size;
1360 	int err;
1361 
1362 	if (udata && udata->inlen &&
1363 	    !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1364 		ibdev_dbg(&dev->ibdev,
1365 			  "Incompatible ABI params, udata not cleared\n");
1366 		err = -EINVAL;
1367 		goto err_out;
1368 	}
1369 
1370 	supp_access_flags =
1371 		IB_ACCESS_LOCAL_WRITE |
1372 		(is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
1373 
1374 	access_flags &= ~IB_ACCESS_OPTIONAL;
1375 	if (access_flags & ~supp_access_flags) {
1376 		ibdev_dbg(&dev->ibdev,
1377 			  "Unsupported access flags[%#x], supported[%#x]\n",
1378 			  access_flags, supp_access_flags);
1379 		err = -EOPNOTSUPP;
1380 		goto err_out;
1381 	}
1382 
1383 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1384 	if (!mr) {
1385 		err = -ENOMEM;
1386 		goto err_out;
1387 	}
1388 
1389 	mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1390 	if (IS_ERR(mr->umem)) {
1391 		err = PTR_ERR(mr->umem);
1392 		ibdev_dbg(&dev->ibdev,
1393 			  "Failed to pin and map user space memory[%d]\n", err);
1394 		goto err_free;
1395 	}
1396 
1397 	params.pd = to_epd(ibpd)->pdn;
1398 	params.iova = virt_addr;
1399 	params.mr_length_in_bytes = length;
1400 	params.permissions = access_flags;
1401 
1402 	pg_sz = ib_umem_find_best_pgsz(mr->umem,
1403 				       dev->dev_attr.page_size_cap,
1404 				       virt_addr);
1405 	if (!pg_sz) {
1406 		err = -EOPNOTSUPP;
1407 		ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1408 			  dev->dev_attr.page_size_cap);
1409 		goto err_unmap;
1410 	}
1411 
1412 	params.page_shift = __ffs(pg_sz);
1413 	params.page_num = DIV_ROUND_UP(length + (start & (pg_sz - 1)),
1414 				       pg_sz);
1415 
1416 	ibdev_dbg(&dev->ibdev,
1417 		  "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1418 		  start, length, params.page_shift, params.page_num);
1419 
1420 	inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1421 	if (params.page_num <= inline_size) {
1422 		err = efa_create_inline_pbl(dev, mr, &params);
1423 		if (err)
1424 			goto err_unmap;
1425 
1426 		err = efa_com_register_mr(&dev->edev, &params, &result);
1427 		if (err)
1428 			goto err_unmap;
1429 	} else {
1430 		err = efa_create_pbl(dev, &pbl, mr, &params);
1431 		if (err)
1432 			goto err_unmap;
1433 
1434 		err = efa_com_register_mr(&dev->edev, &params, &result);
1435 		pbl_destroy(dev, &pbl);
1436 
1437 		if (err)
1438 			goto err_unmap;
1439 	}
1440 
1441 	mr->ibmr.lkey = result.l_key;
1442 	mr->ibmr.rkey = result.r_key;
1443 	mr->ibmr.length = length;
1444 	ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1445 
1446 	return &mr->ibmr;
1447 
1448 err_unmap:
1449 	ib_umem_release(mr->umem);
1450 err_free:
1451 	kfree(mr);
1452 err_out:
1453 	atomic64_inc(&dev->stats.sw_stats.reg_mr_err);
1454 	return ERR_PTR(err);
1455 }
1456 
1457 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1458 {
1459 	struct efa_dev *dev = to_edev(ibmr->device);
1460 	struct efa_com_dereg_mr_params params;
1461 	struct efa_mr *mr = to_emr(ibmr);
1462 	int err;
1463 
1464 	ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1465 
1466 	params.l_key = mr->ibmr.lkey;
1467 	err = efa_com_dereg_mr(&dev->edev, &params);
1468 	if (err)
1469 		return err;
1470 
1471 	ib_umem_release(mr->umem);
1472 	kfree(mr);
1473 
1474 	return 0;
1475 }
1476 
1477 int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
1478 			   struct ib_port_immutable *immutable)
1479 {
1480 	struct ib_port_attr attr;
1481 	int err;
1482 
1483 	err = ib_query_port(ibdev, port_num, &attr);
1484 	if (err) {
1485 		ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1486 		return err;
1487 	}
1488 
1489 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
1490 	immutable->gid_tbl_len = attr.gid_tbl_len;
1491 
1492 	return 0;
1493 }
1494 
1495 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1496 {
1497 	struct efa_com_dealloc_uar_params params = {
1498 		.uarn = uarn,
1499 	};
1500 
1501 	return efa_com_dealloc_uar(&dev->edev, &params);
1502 }
1503 
1504 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1505 {
1506 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1507 	struct efa_dev *dev = to_edev(ibucontext->device);
1508 	struct efa_ibv_alloc_ucontext_resp resp = {};
1509 	struct efa_com_alloc_uar_result result;
1510 	int err;
1511 
1512 	/*
1513 	 * it's fine if the driver does not know all request fields,
1514 	 * we will ack input fields in our response.
1515 	 */
1516 
1517 	err = efa_com_alloc_uar(&dev->edev, &result);
1518 	if (err)
1519 		goto err_out;
1520 
1521 	ucontext->uarn = result.uarn;
1522 
1523 	resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1524 	resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1525 	resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1526 	resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1527 	resp.max_llq_size = dev->dev_attr.max_llq_size;
1528 
1529 	if (udata && udata->outlen) {
1530 		err = ib_copy_to_udata(udata, &resp,
1531 				       min(sizeof(resp), udata->outlen));
1532 		if (err)
1533 			goto err_dealloc_uar;
1534 	}
1535 
1536 	return 0;
1537 
1538 err_dealloc_uar:
1539 	efa_dealloc_uar(dev, result.uarn);
1540 err_out:
1541 	atomic64_inc(&dev->stats.sw_stats.alloc_ucontext_err);
1542 	return err;
1543 }
1544 
1545 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1546 {
1547 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1548 	struct efa_dev *dev = to_edev(ibucontext->device);
1549 
1550 	efa_dealloc_uar(dev, ucontext->uarn);
1551 }
1552 
1553 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1554 {
1555 	struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1556 
1557 	kfree(entry);
1558 }
1559 
1560 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1561 		      struct vm_area_struct *vma)
1562 {
1563 	struct rdma_user_mmap_entry *rdma_entry;
1564 	struct efa_user_mmap_entry *entry;
1565 	unsigned long va;
1566 	int err = 0;
1567 	u64 pfn;
1568 
1569 	rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1570 	if (!rdma_entry) {
1571 		ibdev_dbg(&dev->ibdev,
1572 			  "pgoff[%#lx] does not have valid entry\n",
1573 			  vma->vm_pgoff);
1574 		atomic64_inc(&dev->stats.sw_stats.mmap_err);
1575 		return -EINVAL;
1576 	}
1577 	entry = to_emmap(rdma_entry);
1578 
1579 	ibdev_dbg(&dev->ibdev,
1580 		  "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1581 		  entry->address, rdma_entry->npages * PAGE_SIZE,
1582 		  entry->mmap_flag);
1583 
1584 	pfn = entry->address >> PAGE_SHIFT;
1585 	switch (entry->mmap_flag) {
1586 	case EFA_MMAP_IO_NC:
1587 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1588 					entry->rdma_entry.npages * PAGE_SIZE,
1589 					pgprot_noncached(vma->vm_page_prot),
1590 					rdma_entry);
1591 		break;
1592 	case EFA_MMAP_IO_WC:
1593 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1594 					entry->rdma_entry.npages * PAGE_SIZE,
1595 					pgprot_writecombine(vma->vm_page_prot),
1596 					rdma_entry);
1597 		break;
1598 	case EFA_MMAP_DMA_PAGE:
1599 		for (va = vma->vm_start; va < vma->vm_end;
1600 		     va += PAGE_SIZE, pfn++) {
1601 			err = vm_insert_page(vma, va, pfn_to_page(pfn));
1602 			if (err)
1603 				break;
1604 		}
1605 		break;
1606 	default:
1607 		err = -EINVAL;
1608 	}
1609 
1610 	if (err) {
1611 		ibdev_dbg(
1612 			&dev->ibdev,
1613 			"Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1614 			entry->address, rdma_entry->npages * PAGE_SIZE,
1615 			entry->mmap_flag, err);
1616 		atomic64_inc(&dev->stats.sw_stats.mmap_err);
1617 	}
1618 
1619 	rdma_user_mmap_entry_put(rdma_entry);
1620 	return err;
1621 }
1622 
1623 int efa_mmap(struct ib_ucontext *ibucontext,
1624 	     struct vm_area_struct *vma)
1625 {
1626 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1627 	struct efa_dev *dev = to_edev(ibucontext->device);
1628 	size_t length = vma->vm_end - vma->vm_start;
1629 
1630 	ibdev_dbg(&dev->ibdev,
1631 		  "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
1632 		  vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
1633 
1634 	return __efa_mmap(dev, ucontext, vma);
1635 }
1636 
1637 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1638 {
1639 	struct efa_com_destroy_ah_params params = {
1640 		.ah = ah->ah,
1641 		.pdn = to_epd(ah->ibah.pd)->pdn,
1642 	};
1643 
1644 	return efa_com_destroy_ah(&dev->edev, &params);
1645 }
1646 
1647 int efa_create_ah(struct ib_ah *ibah,
1648 		  struct rdma_ah_init_attr *init_attr,
1649 		  struct ib_udata *udata)
1650 {
1651 	struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
1652 	struct efa_dev *dev = to_edev(ibah->device);
1653 	struct efa_com_create_ah_params params = {};
1654 	struct efa_ibv_create_ah_resp resp = {};
1655 	struct efa_com_create_ah_result result;
1656 	struct efa_ah *ah = to_eah(ibah);
1657 	int err;
1658 
1659 	if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
1660 		ibdev_dbg(&dev->ibdev,
1661 			  "Create address handle is not supported in atomic context\n");
1662 		err = -EOPNOTSUPP;
1663 		goto err_out;
1664 	}
1665 
1666 	if (udata->inlen &&
1667 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1668 		ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1669 		err = -EINVAL;
1670 		goto err_out;
1671 	}
1672 
1673 	memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1674 	       sizeof(params.dest_addr));
1675 	params.pdn = to_epd(ibah->pd)->pdn;
1676 	err = efa_com_create_ah(&dev->edev, &params, &result);
1677 	if (err)
1678 		goto err_out;
1679 
1680 	memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
1681 	ah->ah = result.ah;
1682 
1683 	resp.efa_address_handle = result.ah;
1684 
1685 	if (udata->outlen) {
1686 		err = ib_copy_to_udata(udata, &resp,
1687 				       min(sizeof(resp), udata->outlen));
1688 		if (err) {
1689 			ibdev_dbg(&dev->ibdev,
1690 				  "Failed to copy udata for create_ah response\n");
1691 			goto err_destroy_ah;
1692 		}
1693 	}
1694 	ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
1695 
1696 	return 0;
1697 
1698 err_destroy_ah:
1699 	efa_ah_destroy(dev, ah);
1700 err_out:
1701 	atomic64_inc(&dev->stats.sw_stats.create_ah_err);
1702 	return err;
1703 }
1704 
1705 void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
1706 {
1707 	struct efa_dev *dev = to_edev(ibah->pd->device);
1708 	struct efa_ah *ah = to_eah(ibah);
1709 
1710 	ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
1711 
1712 	if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
1713 		ibdev_dbg(&dev->ibdev,
1714 			  "Destroy address handle is not supported in atomic context\n");
1715 		return;
1716 	}
1717 
1718 	efa_ah_destroy(dev, ah);
1719 }
1720 
1721 struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
1722 {
1723 	return rdma_alloc_hw_stats_struct(efa_stats_names,
1724 					  ARRAY_SIZE(efa_stats_names),
1725 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
1726 }
1727 
1728 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1729 		     u8 port_num, int index)
1730 {
1731 	struct efa_com_get_stats_params params = {};
1732 	union efa_com_get_stats_result result;
1733 	struct efa_dev *dev = to_edev(ibdev);
1734 	struct efa_com_basic_stats *bs;
1735 	struct efa_com_stats_admin *as;
1736 	struct efa_stats *s;
1737 	int err;
1738 
1739 	params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
1740 	params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
1741 
1742 	err = efa_com_get_stats(&dev->edev, &params, &result);
1743 	if (err)
1744 		return err;
1745 
1746 	bs = &result.basic_stats;
1747 	stats->value[EFA_TX_BYTES] = bs->tx_bytes;
1748 	stats->value[EFA_TX_PKTS] = bs->tx_pkts;
1749 	stats->value[EFA_RX_BYTES] = bs->rx_bytes;
1750 	stats->value[EFA_RX_PKTS] = bs->rx_pkts;
1751 	stats->value[EFA_RX_DROPS] = bs->rx_drops;
1752 
1753 	as = &dev->edev.aq.stats;
1754 	stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
1755 	stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
1756 	stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
1757 	stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
1758 
1759 	s = &dev->stats;
1760 	stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
1761 	stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
1762 	stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
1763 	stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->sw_stats.create_cq_err);
1764 	stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
1765 	stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
1766 	stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
1767 	stats->value[EFA_MMAP_ERR] = atomic64_read(&s->sw_stats.mmap_err);
1768 
1769 	return ARRAY_SIZE(efa_stats_names);
1770 }
1771 
1772 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
1773 					 u8 port_num)
1774 {
1775 	return IB_LINK_LAYER_UNSPECIFIED;
1776 }
1777 
1778