1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #include <linux/vmalloc.h>
7 #include <linux/log2.h>
8 
9 #include <rdma/ib_addr.h>
10 #include <rdma/ib_umem.h>
11 #include <rdma/ib_user_verbs.h>
12 #include <rdma/ib_verbs.h>
13 #include <rdma/uverbs_ioctl.h>
14 
15 #include "efa.h"
16 
17 enum {
18 	EFA_MMAP_DMA_PAGE = 0,
19 	EFA_MMAP_IO_WC,
20 	EFA_MMAP_IO_NC,
21 };
22 
23 #define EFA_AENQ_ENABLED_GROUPS \
24 	(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
25 	 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
26 
27 struct efa_user_mmap_entry {
28 	struct rdma_user_mmap_entry rdma_entry;
29 	u64 address;
30 	u8 mmap_flag;
31 };
32 
33 #define EFA_DEFINE_STATS(op) \
34 	op(EFA_TX_BYTES, "tx_bytes") \
35 	op(EFA_TX_PKTS, "tx_pkts") \
36 	op(EFA_RX_BYTES, "rx_bytes") \
37 	op(EFA_RX_PKTS, "rx_pkts") \
38 	op(EFA_RX_DROPS, "rx_drops") \
39 	op(EFA_SEND_BYTES, "send_bytes") \
40 	op(EFA_SEND_WRS, "send_wrs") \
41 	op(EFA_RECV_BYTES, "recv_bytes") \
42 	op(EFA_RECV_WRS, "recv_wrs") \
43 	op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
44 	op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
45 	op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
46 	op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
47 	op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
48 	op(EFA_COMPLETED_CMDS, "completed_cmds") \
49 	op(EFA_CMDS_ERR, "cmds_err") \
50 	op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
51 	op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
52 	op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
53 	op(EFA_CREATE_QP_ERR, "create_qp_err") \
54 	op(EFA_CREATE_CQ_ERR, "create_cq_err") \
55 	op(EFA_REG_MR_ERR, "reg_mr_err") \
56 	op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
57 	op(EFA_CREATE_AH_ERR, "create_ah_err") \
58 	op(EFA_MMAP_ERR, "mmap_err")
59 
60 #define EFA_STATS_ENUM(ename, name) ename,
61 #define EFA_STATS_STR(ename, name) [ename] = name,
62 
63 enum efa_hw_stats {
64 	EFA_DEFINE_STATS(EFA_STATS_ENUM)
65 };
66 
67 static const char *const efa_stats_names[] = {
68 	EFA_DEFINE_STATS(EFA_STATS_STR)
69 };
70 
71 #define EFA_CHUNK_PAYLOAD_SHIFT       12
72 #define EFA_CHUNK_PAYLOAD_SIZE        BIT(EFA_CHUNK_PAYLOAD_SHIFT)
73 #define EFA_CHUNK_PAYLOAD_PTR_SIZE    8
74 
75 #define EFA_CHUNK_SHIFT               12
76 #define EFA_CHUNK_SIZE                BIT(EFA_CHUNK_SHIFT)
77 #define EFA_CHUNK_PTR_SIZE            sizeof(struct efa_com_ctrl_buff_info)
78 
79 #define EFA_PTRS_PER_CHUNK \
80 	((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
81 
82 #define EFA_CHUNK_USED_SIZE \
83 	((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
84 
85 struct pbl_chunk {
86 	dma_addr_t dma_addr;
87 	u64 *buf;
88 	u32 length;
89 };
90 
91 struct pbl_chunk_list {
92 	struct pbl_chunk *chunks;
93 	unsigned int size;
94 };
95 
96 struct pbl_context {
97 	union {
98 		struct {
99 			dma_addr_t dma_addr;
100 		} continuous;
101 		struct {
102 			u32 pbl_buf_size_in_pages;
103 			struct scatterlist *sgl;
104 			int sg_dma_cnt;
105 			struct pbl_chunk_list chunk_list;
106 		} indirect;
107 	} phys;
108 	u64 *pbl_buf;
109 	u32 pbl_buf_size_in_bytes;
110 	u8 physically_continuous;
111 };
112 
113 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
114 {
115 	return container_of(ibdev, struct efa_dev, ibdev);
116 }
117 
118 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
119 {
120 	return container_of(ibucontext, struct efa_ucontext, ibucontext);
121 }
122 
123 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
124 {
125 	return container_of(ibpd, struct efa_pd, ibpd);
126 }
127 
128 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
129 {
130 	return container_of(ibmr, struct efa_mr, ibmr);
131 }
132 
133 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
134 {
135 	return container_of(ibqp, struct efa_qp, ibqp);
136 }
137 
138 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
139 {
140 	return container_of(ibcq, struct efa_cq, ibcq);
141 }
142 
143 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
144 {
145 	return container_of(ibah, struct efa_ah, ibah);
146 }
147 
148 static inline struct efa_user_mmap_entry *
149 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
150 {
151 	return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
152 }
153 
154 #define EFA_DEV_CAP(dev, cap) \
155 	((dev)->dev_attr.device_caps & \
156 	 EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
157 
158 #define is_reserved_cleared(reserved) \
159 	!memchr_inv(reserved, 0, sizeof(reserved))
160 
161 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
162 			       size_t size, enum dma_data_direction dir)
163 {
164 	void *addr;
165 
166 	addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
167 	if (!addr)
168 		return NULL;
169 
170 	*dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
171 	if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
172 		ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
173 		free_pages_exact(addr, size);
174 		return NULL;
175 	}
176 
177 	return addr;
178 }
179 
180 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
181 			    dma_addr_t dma_addr,
182 			    size_t size, enum dma_data_direction dir)
183 {
184 	dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
185 	free_pages_exact(cpu_addr, size);
186 }
187 
188 int efa_query_device(struct ib_device *ibdev,
189 		     struct ib_device_attr *props,
190 		     struct ib_udata *udata)
191 {
192 	struct efa_com_get_device_attr_result *dev_attr;
193 	struct efa_ibv_ex_query_device_resp resp = {};
194 	struct efa_dev *dev = to_edev(ibdev);
195 	int err;
196 
197 	if (udata && udata->inlen &&
198 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
199 		ibdev_dbg(ibdev,
200 			  "Incompatible ABI params, udata not cleared\n");
201 		return -EINVAL;
202 	}
203 
204 	dev_attr = &dev->dev_attr;
205 
206 	memset(props, 0, sizeof(*props));
207 	props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
208 	props->page_size_cap = dev_attr->page_size_cap;
209 	props->vendor_id = dev->pdev->vendor;
210 	props->vendor_part_id = dev->pdev->device;
211 	props->hw_ver = dev->pdev->subsystem_device;
212 	props->max_qp = dev_attr->max_qp;
213 	props->max_cq = dev_attr->max_cq;
214 	props->max_pd = dev_attr->max_pd;
215 	props->max_mr = dev_attr->max_mr;
216 	props->max_ah = dev_attr->max_ah;
217 	props->max_cqe = dev_attr->max_cq_depth;
218 	props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
219 				 dev_attr->max_rq_depth);
220 	props->max_send_sge = dev_attr->max_sq_sge;
221 	props->max_recv_sge = dev_attr->max_rq_sge;
222 	props->max_sge_rd = dev_attr->max_wr_rdma_sge;
223 	props->max_pkeys = 1;
224 
225 	if (udata && udata->outlen) {
226 		resp.max_sq_sge = dev_attr->max_sq_sge;
227 		resp.max_rq_sge = dev_attr->max_rq_sge;
228 		resp.max_sq_wr = dev_attr->max_sq_depth;
229 		resp.max_rq_wr = dev_attr->max_rq_depth;
230 		resp.max_rdma_size = dev_attr->max_rdma_size;
231 
232 		if (EFA_DEV_CAP(dev, RDMA_READ))
233 			resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
234 
235 		if (EFA_DEV_CAP(dev, RNR_RETRY))
236 			resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
237 
238 		err = ib_copy_to_udata(udata, &resp,
239 				       min(sizeof(resp), udata->outlen));
240 		if (err) {
241 			ibdev_dbg(ibdev,
242 				  "Failed to copy udata for query_device\n");
243 			return err;
244 		}
245 	}
246 
247 	return 0;
248 }
249 
250 int efa_query_port(struct ib_device *ibdev, u32 port,
251 		   struct ib_port_attr *props)
252 {
253 	struct efa_dev *dev = to_edev(ibdev);
254 
255 	props->lmc = 1;
256 
257 	props->state = IB_PORT_ACTIVE;
258 	props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
259 	props->gid_tbl_len = 1;
260 	props->pkey_tbl_len = 1;
261 	props->active_speed = IB_SPEED_EDR;
262 	props->active_width = IB_WIDTH_4X;
263 	props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
264 	props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
265 	props->max_msg_sz = dev->dev_attr.mtu;
266 	props->max_vl_num = 1;
267 
268 	return 0;
269 }
270 
271 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
272 		 int qp_attr_mask,
273 		 struct ib_qp_init_attr *qp_init_attr)
274 {
275 	struct efa_dev *dev = to_edev(ibqp->device);
276 	struct efa_com_query_qp_params params = {};
277 	struct efa_com_query_qp_result result;
278 	struct efa_qp *qp = to_eqp(ibqp);
279 	int err;
280 
281 #define EFA_QUERY_QP_SUPP_MASK \
282 	(IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
283 	 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
284 
285 	if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
286 		ibdev_dbg(&dev->ibdev,
287 			  "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
288 			  qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
289 		return -EOPNOTSUPP;
290 	}
291 
292 	memset(qp_attr, 0, sizeof(*qp_attr));
293 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
294 
295 	params.qp_handle = qp->qp_handle;
296 	err = efa_com_query_qp(&dev->edev, &params, &result);
297 	if (err)
298 		return err;
299 
300 	qp_attr->qp_state = result.qp_state;
301 	qp_attr->qkey = result.qkey;
302 	qp_attr->sq_psn = result.sq_psn;
303 	qp_attr->sq_draining = result.sq_draining;
304 	qp_attr->port_num = 1;
305 	qp_attr->rnr_retry = result.rnr_retry;
306 
307 	qp_attr->cap.max_send_wr = qp->max_send_wr;
308 	qp_attr->cap.max_recv_wr = qp->max_recv_wr;
309 	qp_attr->cap.max_send_sge = qp->max_send_sge;
310 	qp_attr->cap.max_recv_sge = qp->max_recv_sge;
311 	qp_attr->cap.max_inline_data = qp->max_inline_data;
312 
313 	qp_init_attr->qp_type = ibqp->qp_type;
314 	qp_init_attr->recv_cq = ibqp->recv_cq;
315 	qp_init_attr->send_cq = ibqp->send_cq;
316 	qp_init_attr->qp_context = ibqp->qp_context;
317 	qp_init_attr->cap = qp_attr->cap;
318 
319 	return 0;
320 }
321 
322 int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
323 		  union ib_gid *gid)
324 {
325 	struct efa_dev *dev = to_edev(ibdev);
326 
327 	memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
328 
329 	return 0;
330 }
331 
332 int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
333 		   u16 *pkey)
334 {
335 	if (index > 0)
336 		return -EINVAL;
337 
338 	*pkey = 0xffff;
339 	return 0;
340 }
341 
342 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
343 {
344 	struct efa_com_dealloc_pd_params params = {
345 		.pdn = pdn,
346 	};
347 
348 	return efa_com_dealloc_pd(&dev->edev, &params);
349 }
350 
351 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
352 {
353 	struct efa_dev *dev = to_edev(ibpd->device);
354 	struct efa_ibv_alloc_pd_resp resp = {};
355 	struct efa_com_alloc_pd_result result;
356 	struct efa_pd *pd = to_epd(ibpd);
357 	int err;
358 
359 	if (udata->inlen &&
360 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
361 		ibdev_dbg(&dev->ibdev,
362 			  "Incompatible ABI params, udata not cleared\n");
363 		err = -EINVAL;
364 		goto err_out;
365 	}
366 
367 	err = efa_com_alloc_pd(&dev->edev, &result);
368 	if (err)
369 		goto err_out;
370 
371 	pd->pdn = result.pdn;
372 	resp.pdn = result.pdn;
373 
374 	if (udata->outlen) {
375 		err = ib_copy_to_udata(udata, &resp,
376 				       min(sizeof(resp), udata->outlen));
377 		if (err) {
378 			ibdev_dbg(&dev->ibdev,
379 				  "Failed to copy udata for alloc_pd\n");
380 			goto err_dealloc_pd;
381 		}
382 	}
383 
384 	ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
385 
386 	return 0;
387 
388 err_dealloc_pd:
389 	efa_pd_dealloc(dev, result.pdn);
390 err_out:
391 	atomic64_inc(&dev->stats.alloc_pd_err);
392 	return err;
393 }
394 
395 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
396 {
397 	struct efa_dev *dev = to_edev(ibpd->device);
398 	struct efa_pd *pd = to_epd(ibpd);
399 
400 	ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
401 	efa_pd_dealloc(dev, pd->pdn);
402 	return 0;
403 }
404 
405 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
406 {
407 	struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
408 
409 	return efa_com_destroy_qp(&dev->edev, &params);
410 }
411 
412 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
413 {
414 	rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
415 	rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
416 	rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
417 	rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
418 }
419 
420 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
421 {
422 	struct efa_dev *dev = to_edev(ibqp->pd->device);
423 	struct efa_qp *qp = to_eqp(ibqp);
424 	int err;
425 
426 	ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
427 
428 	efa_qp_user_mmap_entries_remove(qp);
429 
430 	err = efa_destroy_qp_handle(dev, qp->qp_handle);
431 	if (err)
432 		return err;
433 
434 	if (qp->rq_cpu_addr) {
435 		ibdev_dbg(&dev->ibdev,
436 			  "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
437 			  qp->rq_cpu_addr, qp->rq_size,
438 			  &qp->rq_dma_addr);
439 		efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
440 				qp->rq_size, DMA_TO_DEVICE);
441 	}
442 
443 	kfree(qp);
444 	return 0;
445 }
446 
447 static struct rdma_user_mmap_entry*
448 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
449 			   u64 address, size_t length,
450 			   u8 mmap_flag, u64 *offset)
451 {
452 	struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
453 	int err;
454 
455 	if (!entry)
456 		return NULL;
457 
458 	entry->address = address;
459 	entry->mmap_flag = mmap_flag;
460 
461 	err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
462 					  length);
463 	if (err) {
464 		kfree(entry);
465 		return NULL;
466 	}
467 	*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
468 
469 	return &entry->rdma_entry;
470 }
471 
472 static int qp_mmap_entries_setup(struct efa_qp *qp,
473 				 struct efa_dev *dev,
474 				 struct efa_ucontext *ucontext,
475 				 struct efa_com_create_qp_params *params,
476 				 struct efa_ibv_create_qp_resp *resp)
477 {
478 	size_t length;
479 	u64 address;
480 
481 	address = dev->db_bar_addr + resp->sq_db_offset;
482 	qp->sq_db_mmap_entry =
483 		efa_user_mmap_entry_insert(&ucontext->ibucontext,
484 					   address,
485 					   PAGE_SIZE, EFA_MMAP_IO_NC,
486 					   &resp->sq_db_mmap_key);
487 	if (!qp->sq_db_mmap_entry)
488 		return -ENOMEM;
489 
490 	resp->sq_db_offset &= ~PAGE_MASK;
491 
492 	address = dev->mem_bar_addr + resp->llq_desc_offset;
493 	length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
494 			    (resp->llq_desc_offset & ~PAGE_MASK));
495 
496 	qp->llq_desc_mmap_entry =
497 		efa_user_mmap_entry_insert(&ucontext->ibucontext,
498 					   address, length,
499 					   EFA_MMAP_IO_WC,
500 					   &resp->llq_desc_mmap_key);
501 	if (!qp->llq_desc_mmap_entry)
502 		goto err_remove_mmap;
503 
504 	resp->llq_desc_offset &= ~PAGE_MASK;
505 
506 	if (qp->rq_size) {
507 		address = dev->db_bar_addr + resp->rq_db_offset;
508 
509 		qp->rq_db_mmap_entry =
510 			efa_user_mmap_entry_insert(&ucontext->ibucontext,
511 						   address, PAGE_SIZE,
512 						   EFA_MMAP_IO_NC,
513 						   &resp->rq_db_mmap_key);
514 		if (!qp->rq_db_mmap_entry)
515 			goto err_remove_mmap;
516 
517 		resp->rq_db_offset &= ~PAGE_MASK;
518 
519 		address = virt_to_phys(qp->rq_cpu_addr);
520 		qp->rq_mmap_entry =
521 			efa_user_mmap_entry_insert(&ucontext->ibucontext,
522 						   address, qp->rq_size,
523 						   EFA_MMAP_DMA_PAGE,
524 						   &resp->rq_mmap_key);
525 		if (!qp->rq_mmap_entry)
526 			goto err_remove_mmap;
527 
528 		resp->rq_mmap_size = qp->rq_size;
529 	}
530 
531 	return 0;
532 
533 err_remove_mmap:
534 	efa_qp_user_mmap_entries_remove(qp);
535 
536 	return -ENOMEM;
537 }
538 
539 static int efa_qp_validate_cap(struct efa_dev *dev,
540 			       struct ib_qp_init_attr *init_attr)
541 {
542 	if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
543 		ibdev_dbg(&dev->ibdev,
544 			  "qp: requested send wr[%u] exceeds the max[%u]\n",
545 			  init_attr->cap.max_send_wr,
546 			  dev->dev_attr.max_sq_depth);
547 		return -EINVAL;
548 	}
549 	if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
550 		ibdev_dbg(&dev->ibdev,
551 			  "qp: requested receive wr[%u] exceeds the max[%u]\n",
552 			  init_attr->cap.max_recv_wr,
553 			  dev->dev_attr.max_rq_depth);
554 		return -EINVAL;
555 	}
556 	if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
557 		ibdev_dbg(&dev->ibdev,
558 			  "qp: requested sge send[%u] exceeds the max[%u]\n",
559 			  init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
560 		return -EINVAL;
561 	}
562 	if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
563 		ibdev_dbg(&dev->ibdev,
564 			  "qp: requested sge recv[%u] exceeds the max[%u]\n",
565 			  init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
566 		return -EINVAL;
567 	}
568 	if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
569 		ibdev_dbg(&dev->ibdev,
570 			  "qp: requested inline data[%u] exceeds the max[%u]\n",
571 			  init_attr->cap.max_inline_data,
572 			  dev->dev_attr.inline_buf_size);
573 		return -EINVAL;
574 	}
575 
576 	return 0;
577 }
578 
579 static int efa_qp_validate_attr(struct efa_dev *dev,
580 				struct ib_qp_init_attr *init_attr)
581 {
582 	if (init_attr->qp_type != IB_QPT_DRIVER &&
583 	    init_attr->qp_type != IB_QPT_UD) {
584 		ibdev_dbg(&dev->ibdev,
585 			  "Unsupported qp type %d\n", init_attr->qp_type);
586 		return -EOPNOTSUPP;
587 	}
588 
589 	if (init_attr->srq) {
590 		ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
591 		return -EOPNOTSUPP;
592 	}
593 
594 	if (init_attr->create_flags) {
595 		ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
596 		return -EOPNOTSUPP;
597 	}
598 
599 	return 0;
600 }
601 
602 struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
603 			    struct ib_qp_init_attr *init_attr,
604 			    struct ib_udata *udata)
605 {
606 	struct efa_com_create_qp_params create_qp_params = {};
607 	struct efa_com_create_qp_result create_qp_resp;
608 	struct efa_dev *dev = to_edev(ibpd->device);
609 	struct efa_ibv_create_qp_resp resp = {};
610 	struct efa_ibv_create_qp cmd = {};
611 	struct efa_ucontext *ucontext;
612 	struct efa_qp *qp;
613 	int err;
614 
615 	ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
616 					     ibucontext);
617 
618 	err = efa_qp_validate_cap(dev, init_attr);
619 	if (err)
620 		goto err_out;
621 
622 	err = efa_qp_validate_attr(dev, init_attr);
623 	if (err)
624 		goto err_out;
625 
626 	if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
627 		ibdev_dbg(&dev->ibdev,
628 			  "Incompatible ABI params, no input udata\n");
629 		err = -EINVAL;
630 		goto err_out;
631 	}
632 
633 	if (udata->inlen > sizeof(cmd) &&
634 	    !ib_is_udata_cleared(udata, sizeof(cmd),
635 				 udata->inlen - sizeof(cmd))) {
636 		ibdev_dbg(&dev->ibdev,
637 			  "Incompatible ABI params, unknown fields in udata\n");
638 		err = -EINVAL;
639 		goto err_out;
640 	}
641 
642 	err = ib_copy_from_udata(&cmd, udata,
643 				 min(sizeof(cmd), udata->inlen));
644 	if (err) {
645 		ibdev_dbg(&dev->ibdev,
646 			  "Cannot copy udata for create_qp\n");
647 		goto err_out;
648 	}
649 
650 	if (cmd.comp_mask) {
651 		ibdev_dbg(&dev->ibdev,
652 			  "Incompatible ABI params, unknown fields in udata\n");
653 		err = -EINVAL;
654 		goto err_out;
655 	}
656 
657 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
658 	if (!qp) {
659 		err = -ENOMEM;
660 		goto err_out;
661 	}
662 
663 	create_qp_params.uarn = ucontext->uarn;
664 	create_qp_params.pd = to_epd(ibpd)->pdn;
665 
666 	if (init_attr->qp_type == IB_QPT_UD) {
667 		create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
668 	} else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
669 		create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
670 	} else {
671 		ibdev_dbg(&dev->ibdev,
672 			  "Unsupported qp type %d driver qp type %d\n",
673 			  init_attr->qp_type, cmd.driver_qp_type);
674 		err = -EOPNOTSUPP;
675 		goto err_free_qp;
676 	}
677 
678 	ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
679 		  init_attr->qp_type, cmd.driver_qp_type);
680 	create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
681 	create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
682 	create_qp_params.sq_depth = init_attr->cap.max_send_wr;
683 	create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
684 
685 	create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
686 	create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
687 	qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
688 	if (qp->rq_size) {
689 		qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
690 						    qp->rq_size, DMA_TO_DEVICE);
691 		if (!qp->rq_cpu_addr) {
692 			err = -ENOMEM;
693 			goto err_free_qp;
694 		}
695 
696 		ibdev_dbg(&dev->ibdev,
697 			  "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
698 			  qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
699 		create_qp_params.rq_base_addr = qp->rq_dma_addr;
700 	}
701 
702 	err = efa_com_create_qp(&dev->edev, &create_qp_params,
703 				&create_qp_resp);
704 	if (err)
705 		goto err_free_mapped;
706 
707 	resp.sq_db_offset = create_qp_resp.sq_db_offset;
708 	resp.rq_db_offset = create_qp_resp.rq_db_offset;
709 	resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
710 	resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
711 	resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
712 
713 	err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
714 				    &resp);
715 	if (err)
716 		goto err_destroy_qp;
717 
718 	qp->qp_handle = create_qp_resp.qp_handle;
719 	qp->ibqp.qp_num = create_qp_resp.qp_num;
720 	qp->ibqp.qp_type = init_attr->qp_type;
721 	qp->max_send_wr = init_attr->cap.max_send_wr;
722 	qp->max_recv_wr = init_attr->cap.max_recv_wr;
723 	qp->max_send_sge = init_attr->cap.max_send_sge;
724 	qp->max_recv_sge = init_attr->cap.max_recv_sge;
725 	qp->max_inline_data = init_attr->cap.max_inline_data;
726 
727 	if (udata->outlen) {
728 		err = ib_copy_to_udata(udata, &resp,
729 				       min(sizeof(resp), udata->outlen));
730 		if (err) {
731 			ibdev_dbg(&dev->ibdev,
732 				  "Failed to copy udata for qp[%u]\n",
733 				  create_qp_resp.qp_num);
734 			goto err_remove_mmap_entries;
735 		}
736 	}
737 
738 	ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
739 
740 	return &qp->ibqp;
741 
742 err_remove_mmap_entries:
743 	efa_qp_user_mmap_entries_remove(qp);
744 err_destroy_qp:
745 	efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
746 err_free_mapped:
747 	if (qp->rq_size)
748 		efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
749 				qp->rq_size, DMA_TO_DEVICE);
750 err_free_qp:
751 	kfree(qp);
752 err_out:
753 	atomic64_inc(&dev->stats.create_qp_err);
754 	return ERR_PTR(err);
755 }
756 
757 static const struct {
758 	int			valid;
759 	enum ib_qp_attr_mask	req_param;
760 	enum ib_qp_attr_mask	opt_param;
761 } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
762 	[IB_QPS_RESET] = {
763 		[IB_QPS_RESET] = { .valid = 1 },
764 		[IB_QPS_INIT]  = {
765 			.valid = 1,
766 			.req_param = IB_QP_PKEY_INDEX |
767 				     IB_QP_PORT |
768 				     IB_QP_QKEY,
769 		},
770 	},
771 	[IB_QPS_INIT] = {
772 		[IB_QPS_RESET] = { .valid = 1 },
773 		[IB_QPS_ERR]   = { .valid = 1 },
774 		[IB_QPS_INIT]  = {
775 			.valid = 1,
776 			.opt_param = IB_QP_PKEY_INDEX |
777 				     IB_QP_PORT |
778 				     IB_QP_QKEY,
779 		},
780 		[IB_QPS_RTR]   = {
781 			.valid = 1,
782 			.opt_param = IB_QP_PKEY_INDEX |
783 				     IB_QP_QKEY,
784 		},
785 	},
786 	[IB_QPS_RTR] = {
787 		[IB_QPS_RESET] = { .valid = 1 },
788 		[IB_QPS_ERR]   = { .valid = 1 },
789 		[IB_QPS_RTS]   = {
790 			.valid = 1,
791 			.req_param = IB_QP_SQ_PSN,
792 			.opt_param = IB_QP_CUR_STATE |
793 				     IB_QP_QKEY |
794 				     IB_QP_RNR_RETRY,
795 
796 		}
797 	},
798 	[IB_QPS_RTS] = {
799 		[IB_QPS_RESET] = { .valid = 1 },
800 		[IB_QPS_ERR]   = { .valid = 1 },
801 		[IB_QPS_RTS]   = {
802 			.valid = 1,
803 			.opt_param = IB_QP_CUR_STATE |
804 				     IB_QP_QKEY,
805 		},
806 		[IB_QPS_SQD] = {
807 			.valid = 1,
808 			.opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
809 		},
810 	},
811 	[IB_QPS_SQD] = {
812 		[IB_QPS_RESET] = { .valid = 1 },
813 		[IB_QPS_ERR]   = { .valid = 1 },
814 		[IB_QPS_RTS]   = {
815 			.valid = 1,
816 			.opt_param = IB_QP_CUR_STATE |
817 				     IB_QP_QKEY,
818 		},
819 		[IB_QPS_SQD] = {
820 			.valid = 1,
821 			.opt_param = IB_QP_PKEY_INDEX |
822 				     IB_QP_QKEY,
823 		}
824 	},
825 	[IB_QPS_SQE] = {
826 		[IB_QPS_RESET] = { .valid = 1 },
827 		[IB_QPS_ERR]   = { .valid = 1 },
828 		[IB_QPS_RTS]   = {
829 			.valid = 1,
830 			.opt_param = IB_QP_CUR_STATE |
831 				     IB_QP_QKEY,
832 		}
833 	},
834 	[IB_QPS_ERR] = {
835 		[IB_QPS_RESET] = { .valid = 1 },
836 		[IB_QPS_ERR]   = { .valid = 1 },
837 	}
838 };
839 
840 static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
841 				    enum ib_qp_state next_state,
842 				    enum ib_qp_attr_mask mask)
843 {
844 	enum ib_qp_attr_mask req_param, opt_param;
845 
846 	if (mask & IB_QP_CUR_STATE  &&
847 	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
848 	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
849 		return false;
850 
851 	if (!srd_qp_state_table[cur_state][next_state].valid)
852 		return false;
853 
854 	req_param = srd_qp_state_table[cur_state][next_state].req_param;
855 	opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
856 
857 	if ((mask & req_param) != req_param)
858 		return false;
859 
860 	if (mask & ~(req_param | opt_param | IB_QP_STATE))
861 		return false;
862 
863 	return true;
864 }
865 
866 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
867 				  struct ib_qp_attr *qp_attr, int qp_attr_mask,
868 				  enum ib_qp_state cur_state,
869 				  enum ib_qp_state new_state)
870 {
871 	int err;
872 
873 #define EFA_MODIFY_QP_SUPP_MASK \
874 	(IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
875 	 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
876 	 IB_QP_RNR_RETRY)
877 
878 	if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
879 		ibdev_dbg(&dev->ibdev,
880 			  "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
881 			  qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
882 		return -EOPNOTSUPP;
883 	}
884 
885 	if (qp->ibqp.qp_type == IB_QPT_DRIVER)
886 		err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
887 					       qp_attr_mask);
888 	else
889 		err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
890 					  qp_attr_mask);
891 
892 	if (err) {
893 		ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
894 		return -EINVAL;
895 	}
896 
897 	if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
898 		ibdev_dbg(&dev->ibdev, "Can't change port num\n");
899 		return -EOPNOTSUPP;
900 	}
901 
902 	if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
903 		ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
904 		return -EOPNOTSUPP;
905 	}
906 
907 	return 0;
908 }
909 
910 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
911 		  int qp_attr_mask, struct ib_udata *udata)
912 {
913 	struct efa_dev *dev = to_edev(ibqp->device);
914 	struct efa_com_modify_qp_params params = {};
915 	struct efa_qp *qp = to_eqp(ibqp);
916 	enum ib_qp_state cur_state;
917 	enum ib_qp_state new_state;
918 	int err;
919 
920 	if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
921 		return -EOPNOTSUPP;
922 
923 	if (udata->inlen &&
924 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
925 		ibdev_dbg(&dev->ibdev,
926 			  "Incompatible ABI params, udata not cleared\n");
927 		return -EINVAL;
928 	}
929 
930 	cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
931 						     qp->state;
932 	new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
933 
934 	err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
935 				     new_state);
936 	if (err)
937 		return err;
938 
939 	params.qp_handle = qp->qp_handle;
940 
941 	if (qp_attr_mask & IB_QP_STATE) {
942 		EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
943 			1);
944 		EFA_SET(&params.modify_mask,
945 			EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
946 		params.cur_qp_state = cur_state;
947 		params.qp_state = new_state;
948 	}
949 
950 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
951 		EFA_SET(&params.modify_mask,
952 			EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
953 		params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
954 	}
955 
956 	if (qp_attr_mask & IB_QP_QKEY) {
957 		EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
958 		params.qkey = qp_attr->qkey;
959 	}
960 
961 	if (qp_attr_mask & IB_QP_SQ_PSN) {
962 		EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
963 		params.sq_psn = qp_attr->sq_psn;
964 	}
965 
966 	if (qp_attr_mask & IB_QP_RNR_RETRY) {
967 		EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
968 			1);
969 		params.rnr_retry = qp_attr->rnr_retry;
970 	}
971 
972 	err = efa_com_modify_qp(&dev->edev, &params);
973 	if (err)
974 		return err;
975 
976 	qp->state = new_state;
977 
978 	return 0;
979 }
980 
981 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
982 {
983 	struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
984 
985 	return efa_com_destroy_cq(&dev->edev, &params);
986 }
987 
988 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
989 {
990 	struct efa_dev *dev = to_edev(ibcq->device);
991 	struct efa_cq *cq = to_ecq(ibcq);
992 
993 	ibdev_dbg(&dev->ibdev,
994 		  "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
995 		  cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
996 
997 	rdma_user_mmap_entry_remove(cq->mmap_entry);
998 	efa_destroy_cq_idx(dev, cq->cq_idx);
999 	efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1000 			DMA_FROM_DEVICE);
1001 	return 0;
1002 }
1003 
1004 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
1005 				 struct efa_ibv_create_cq_resp *resp)
1006 {
1007 	resp->q_mmap_size = cq->size;
1008 	cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1009 						    virt_to_phys(cq->cpu_addr),
1010 						    cq->size, EFA_MMAP_DMA_PAGE,
1011 						    &resp->q_mmap_key);
1012 	if (!cq->mmap_entry)
1013 		return -ENOMEM;
1014 
1015 	return 0;
1016 }
1017 
1018 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1019 		  struct ib_udata *udata)
1020 {
1021 	struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
1022 		udata, struct efa_ucontext, ibucontext);
1023 	struct efa_ibv_create_cq_resp resp = {};
1024 	struct efa_com_create_cq_params params;
1025 	struct efa_com_create_cq_result result;
1026 	struct ib_device *ibdev = ibcq->device;
1027 	struct efa_dev *dev = to_edev(ibdev);
1028 	struct efa_ibv_create_cq cmd = {};
1029 	struct efa_cq *cq = to_ecq(ibcq);
1030 	int entries = attr->cqe;
1031 	int err;
1032 
1033 	ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
1034 
1035 	if (attr->flags)
1036 		return -EOPNOTSUPP;
1037 
1038 	if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
1039 		ibdev_dbg(ibdev,
1040 			  "cq: requested entries[%u] non-positive or greater than max[%u]\n",
1041 			  entries, dev->dev_attr.max_cq_depth);
1042 		err = -EINVAL;
1043 		goto err_out;
1044 	}
1045 
1046 	if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
1047 		ibdev_dbg(ibdev,
1048 			  "Incompatible ABI params, no input udata\n");
1049 		err = -EINVAL;
1050 		goto err_out;
1051 	}
1052 
1053 	if (udata->inlen > sizeof(cmd) &&
1054 	    !ib_is_udata_cleared(udata, sizeof(cmd),
1055 				 udata->inlen - sizeof(cmd))) {
1056 		ibdev_dbg(ibdev,
1057 			  "Incompatible ABI params, unknown fields in udata\n");
1058 		err = -EINVAL;
1059 		goto err_out;
1060 	}
1061 
1062 	err = ib_copy_from_udata(&cmd, udata,
1063 				 min(sizeof(cmd), udata->inlen));
1064 	if (err) {
1065 		ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
1066 		goto err_out;
1067 	}
1068 
1069 	if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
1070 		ibdev_dbg(ibdev,
1071 			  "Incompatible ABI params, unknown fields in udata\n");
1072 		err = -EINVAL;
1073 		goto err_out;
1074 	}
1075 
1076 	if (!cmd.cq_entry_size) {
1077 		ibdev_dbg(ibdev,
1078 			  "Invalid entry size [%u]\n", cmd.cq_entry_size);
1079 		err = -EINVAL;
1080 		goto err_out;
1081 	}
1082 
1083 	if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
1084 		ibdev_dbg(ibdev,
1085 			  "Invalid number of sub cqs[%u] expected[%u]\n",
1086 			  cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
1087 		err = -EINVAL;
1088 		goto err_out;
1089 	}
1090 
1091 	cq->ucontext = ucontext;
1092 	cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
1093 	cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
1094 					 DMA_FROM_DEVICE);
1095 	if (!cq->cpu_addr) {
1096 		err = -ENOMEM;
1097 		goto err_out;
1098 	}
1099 
1100 	params.uarn = cq->ucontext->uarn;
1101 	params.cq_depth = entries;
1102 	params.dma_addr = cq->dma_addr;
1103 	params.entry_size_in_bytes = cmd.cq_entry_size;
1104 	params.num_sub_cqs = cmd.num_sub_cqs;
1105 	err = efa_com_create_cq(&dev->edev, &params, &result);
1106 	if (err)
1107 		goto err_free_mapped;
1108 
1109 	resp.cq_idx = result.cq_idx;
1110 	cq->cq_idx = result.cq_idx;
1111 	cq->ibcq.cqe = result.actual_depth;
1112 	WARN_ON_ONCE(entries != result.actual_depth);
1113 
1114 	err = cq_mmap_entries_setup(dev, cq, &resp);
1115 	if (err) {
1116 		ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
1117 			  cq->cq_idx);
1118 		goto err_destroy_cq;
1119 	}
1120 
1121 	if (udata->outlen) {
1122 		err = ib_copy_to_udata(udata, &resp,
1123 				       min(sizeof(resp), udata->outlen));
1124 		if (err) {
1125 			ibdev_dbg(ibdev,
1126 				  "Failed to copy udata for create_cq\n");
1127 			goto err_remove_mmap;
1128 		}
1129 	}
1130 
1131 	ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
1132 		  cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
1133 
1134 	return 0;
1135 
1136 err_remove_mmap:
1137 	rdma_user_mmap_entry_remove(cq->mmap_entry);
1138 err_destroy_cq:
1139 	efa_destroy_cq_idx(dev, cq->cq_idx);
1140 err_free_mapped:
1141 	efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1142 			DMA_FROM_DEVICE);
1143 
1144 err_out:
1145 	atomic64_inc(&dev->stats.create_cq_err);
1146 	return err;
1147 }
1148 
1149 static int umem_to_page_list(struct efa_dev *dev,
1150 			     struct ib_umem *umem,
1151 			     u64 *page_list,
1152 			     u32 hp_cnt,
1153 			     u8 hp_shift)
1154 {
1155 	u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1156 	struct ib_block_iter biter;
1157 	unsigned int hp_idx = 0;
1158 
1159 	ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1160 		  hp_cnt, pages_in_hp);
1161 
1162 	rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
1163 		page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1164 
1165 	return 0;
1166 }
1167 
1168 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1169 {
1170 	struct scatterlist *sglist;
1171 	struct page *pg;
1172 	int i;
1173 
1174 	sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL);
1175 	if (!sglist)
1176 		return NULL;
1177 	sg_init_table(sglist, page_cnt);
1178 	for (i = 0; i < page_cnt; i++) {
1179 		pg = vmalloc_to_page(buf);
1180 		if (!pg)
1181 			goto err;
1182 		sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1183 		buf += PAGE_SIZE / sizeof(*buf);
1184 	}
1185 	return sglist;
1186 
1187 err:
1188 	kfree(sglist);
1189 	return NULL;
1190 }
1191 
1192 /*
1193  * create a chunk list of physical pages dma addresses from the supplied
1194  * scatter gather list
1195  */
1196 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1197 {
1198 	struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1199 	int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1200 	struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1201 	unsigned int chunk_list_size, chunk_idx, payload_idx;
1202 	int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1203 	struct efa_com_ctrl_buff_info *ctrl_buf;
1204 	u64 *cur_chunk_buf, *prev_chunk_buf;
1205 	struct ib_block_iter biter;
1206 	dma_addr_t dma_addr;
1207 	int i;
1208 
1209 	/* allocate a chunk list that consists of 4KB chunks */
1210 	chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1211 
1212 	chunk_list->size = chunk_list_size;
1213 	chunk_list->chunks = kcalloc(chunk_list_size,
1214 				     sizeof(*chunk_list->chunks),
1215 				     GFP_KERNEL);
1216 	if (!chunk_list->chunks)
1217 		return -ENOMEM;
1218 
1219 	ibdev_dbg(&dev->ibdev,
1220 		  "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1221 		  page_cnt);
1222 
1223 	/* allocate chunk buffers: */
1224 	for (i = 0; i < chunk_list_size; i++) {
1225 		chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1226 		if (!chunk_list->chunks[i].buf)
1227 			goto chunk_list_dealloc;
1228 
1229 		chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1230 	}
1231 	chunk_list->chunks[chunk_list_size - 1].length =
1232 		((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1233 			EFA_CHUNK_PTR_SIZE;
1234 
1235 	/* fill the dma addresses of sg list pages to chunks: */
1236 	chunk_idx = 0;
1237 	payload_idx = 0;
1238 	cur_chunk_buf = chunk_list->chunks[0].buf;
1239 	rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1240 			    EFA_CHUNK_PAYLOAD_SIZE) {
1241 		cur_chunk_buf[payload_idx++] =
1242 			rdma_block_iter_dma_address(&biter);
1243 
1244 		if (payload_idx == EFA_PTRS_PER_CHUNK) {
1245 			chunk_idx++;
1246 			cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1247 			payload_idx = 0;
1248 		}
1249 	}
1250 
1251 	/* map chunks to dma and fill chunks next ptrs */
1252 	for (i = chunk_list_size - 1; i >= 0; i--) {
1253 		dma_addr = dma_map_single(&dev->pdev->dev,
1254 					  chunk_list->chunks[i].buf,
1255 					  chunk_list->chunks[i].length,
1256 					  DMA_TO_DEVICE);
1257 		if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1258 			ibdev_err(&dev->ibdev,
1259 				  "chunk[%u] dma_map_failed\n", i);
1260 			goto chunk_list_unmap;
1261 		}
1262 
1263 		chunk_list->chunks[i].dma_addr = dma_addr;
1264 		ibdev_dbg(&dev->ibdev,
1265 			  "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1266 
1267 		if (!i)
1268 			break;
1269 
1270 		prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1271 
1272 		ctrl_buf = (struct efa_com_ctrl_buff_info *)
1273 				&prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1274 		ctrl_buf->length = chunk_list->chunks[i].length;
1275 
1276 		efa_com_set_dma_addr(dma_addr,
1277 				     &ctrl_buf->address.mem_addr_high,
1278 				     &ctrl_buf->address.mem_addr_low);
1279 	}
1280 
1281 	return 0;
1282 
1283 chunk_list_unmap:
1284 	for (; i < chunk_list_size; i++) {
1285 		dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1286 				 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1287 	}
1288 chunk_list_dealloc:
1289 	for (i = 0; i < chunk_list_size; i++)
1290 		kfree(chunk_list->chunks[i].buf);
1291 
1292 	kfree(chunk_list->chunks);
1293 	return -ENOMEM;
1294 }
1295 
1296 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1297 {
1298 	struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1299 	int i;
1300 
1301 	for (i = 0; i < chunk_list->size; i++) {
1302 		dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1303 				 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1304 		kfree(chunk_list->chunks[i].buf);
1305 	}
1306 
1307 	kfree(chunk_list->chunks);
1308 }
1309 
1310 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
1311 static int pbl_continuous_initialize(struct efa_dev *dev,
1312 				     struct pbl_context *pbl)
1313 {
1314 	dma_addr_t dma_addr;
1315 
1316 	dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1317 				  pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1318 	if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1319 		ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1320 		return -ENOMEM;
1321 	}
1322 
1323 	pbl->phys.continuous.dma_addr = dma_addr;
1324 	ibdev_dbg(&dev->ibdev,
1325 		  "pbl continuous - dma_addr = %pad, size[%u]\n",
1326 		  &dma_addr, pbl->pbl_buf_size_in_bytes);
1327 
1328 	return 0;
1329 }
1330 
1331 /*
1332  * initialize pbl indirect mode:
1333  * create a chunk list out of the dma addresses of the physical pages of
1334  * pbl buffer.
1335  */
1336 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1337 {
1338 	u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
1339 	struct scatterlist *sgl;
1340 	int sg_dma_cnt, err;
1341 
1342 	BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1343 	sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1344 	if (!sgl)
1345 		return -ENOMEM;
1346 
1347 	sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1348 	if (!sg_dma_cnt) {
1349 		err = -EINVAL;
1350 		goto err_map;
1351 	}
1352 
1353 	pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1354 	pbl->phys.indirect.sgl = sgl;
1355 	pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1356 	err = pbl_chunk_list_create(dev, pbl);
1357 	if (err) {
1358 		ibdev_dbg(&dev->ibdev,
1359 			  "chunk_list creation failed[%d]\n", err);
1360 		goto err_chunk;
1361 	}
1362 
1363 	ibdev_dbg(&dev->ibdev,
1364 		  "pbl indirect - size[%u], chunks[%u]\n",
1365 		  pbl->pbl_buf_size_in_bytes,
1366 		  pbl->phys.indirect.chunk_list.size);
1367 
1368 	return 0;
1369 
1370 err_chunk:
1371 	dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1372 err_map:
1373 	kfree(sgl);
1374 	return err;
1375 }
1376 
1377 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1378 {
1379 	pbl_chunk_list_destroy(dev, pbl);
1380 	dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1381 		     pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1382 	kfree(pbl->phys.indirect.sgl);
1383 }
1384 
1385 /* create a page buffer list from a mapped user memory region */
1386 static int pbl_create(struct efa_dev *dev,
1387 		      struct pbl_context *pbl,
1388 		      struct ib_umem *umem,
1389 		      int hp_cnt,
1390 		      u8 hp_shift)
1391 {
1392 	int err;
1393 
1394 	pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1395 	pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1396 	if (!pbl->pbl_buf)
1397 		return -ENOMEM;
1398 
1399 	if (is_vmalloc_addr(pbl->pbl_buf)) {
1400 		pbl->physically_continuous = 0;
1401 		err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1402 					hp_shift);
1403 		if (err)
1404 			goto err_free;
1405 
1406 		err = pbl_indirect_initialize(dev, pbl);
1407 		if (err)
1408 			goto err_free;
1409 	} else {
1410 		pbl->physically_continuous = 1;
1411 		err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1412 					hp_shift);
1413 		if (err)
1414 			goto err_free;
1415 
1416 		err = pbl_continuous_initialize(dev, pbl);
1417 		if (err)
1418 			goto err_free;
1419 	}
1420 
1421 	ibdev_dbg(&dev->ibdev,
1422 		  "user_pbl_created: user_pages[%u], continuous[%u]\n",
1423 		  hp_cnt, pbl->physically_continuous);
1424 
1425 	return 0;
1426 
1427 err_free:
1428 	kvfree(pbl->pbl_buf);
1429 	return err;
1430 }
1431 
1432 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1433 {
1434 	if (pbl->physically_continuous)
1435 		dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1436 				 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1437 	else
1438 		pbl_indirect_terminate(dev, pbl);
1439 
1440 	kvfree(pbl->pbl_buf);
1441 }
1442 
1443 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1444 				 struct efa_com_reg_mr_params *params)
1445 {
1446 	int err;
1447 
1448 	params->inline_pbl = 1;
1449 	err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1450 				params->page_num, params->page_shift);
1451 	if (err)
1452 		return err;
1453 
1454 	ibdev_dbg(&dev->ibdev,
1455 		  "inline_pbl_array - pages[%u]\n", params->page_num);
1456 
1457 	return 0;
1458 }
1459 
1460 static int efa_create_pbl(struct efa_dev *dev,
1461 			  struct pbl_context *pbl,
1462 			  struct efa_mr *mr,
1463 			  struct efa_com_reg_mr_params *params)
1464 {
1465 	int err;
1466 
1467 	err = pbl_create(dev, pbl, mr->umem, params->page_num,
1468 			 params->page_shift);
1469 	if (err) {
1470 		ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1471 		return err;
1472 	}
1473 
1474 	params->inline_pbl = 0;
1475 	params->indirect = !pbl->physically_continuous;
1476 	if (pbl->physically_continuous) {
1477 		params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1478 
1479 		efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1480 				     &params->pbl.pbl.address.mem_addr_high,
1481 				     &params->pbl.pbl.address.mem_addr_low);
1482 	} else {
1483 		params->pbl.pbl.length =
1484 			pbl->phys.indirect.chunk_list.chunks[0].length;
1485 
1486 		efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1487 				     &params->pbl.pbl.address.mem_addr_high,
1488 				     &params->pbl.pbl.address.mem_addr_low);
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1495 			 u64 virt_addr, int access_flags,
1496 			 struct ib_udata *udata)
1497 {
1498 	struct efa_dev *dev = to_edev(ibpd->device);
1499 	struct efa_com_reg_mr_params params = {};
1500 	struct efa_com_reg_mr_result result = {};
1501 	struct pbl_context pbl;
1502 	int supp_access_flags;
1503 	unsigned int pg_sz;
1504 	struct efa_mr *mr;
1505 	int inline_size;
1506 	int err;
1507 
1508 	if (udata && udata->inlen &&
1509 	    !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1510 		ibdev_dbg(&dev->ibdev,
1511 			  "Incompatible ABI params, udata not cleared\n");
1512 		err = -EINVAL;
1513 		goto err_out;
1514 	}
1515 
1516 	supp_access_flags =
1517 		IB_ACCESS_LOCAL_WRITE |
1518 		(EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0);
1519 
1520 	access_flags &= ~IB_ACCESS_OPTIONAL;
1521 	if (access_flags & ~supp_access_flags) {
1522 		ibdev_dbg(&dev->ibdev,
1523 			  "Unsupported access flags[%#x], supported[%#x]\n",
1524 			  access_flags, supp_access_flags);
1525 		err = -EOPNOTSUPP;
1526 		goto err_out;
1527 	}
1528 
1529 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1530 	if (!mr) {
1531 		err = -ENOMEM;
1532 		goto err_out;
1533 	}
1534 
1535 	mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1536 	if (IS_ERR(mr->umem)) {
1537 		err = PTR_ERR(mr->umem);
1538 		ibdev_dbg(&dev->ibdev,
1539 			  "Failed to pin and map user space memory[%d]\n", err);
1540 		goto err_free;
1541 	}
1542 
1543 	params.pd = to_epd(ibpd)->pdn;
1544 	params.iova = virt_addr;
1545 	params.mr_length_in_bytes = length;
1546 	params.permissions = access_flags;
1547 
1548 	pg_sz = ib_umem_find_best_pgsz(mr->umem,
1549 				       dev->dev_attr.page_size_cap,
1550 				       virt_addr);
1551 	if (!pg_sz) {
1552 		err = -EOPNOTSUPP;
1553 		ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1554 			  dev->dev_attr.page_size_cap);
1555 		goto err_unmap;
1556 	}
1557 
1558 	params.page_shift = order_base_2(pg_sz);
1559 	params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
1560 
1561 	ibdev_dbg(&dev->ibdev,
1562 		  "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1563 		  start, length, params.page_shift, params.page_num);
1564 
1565 	inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1566 	if (params.page_num <= inline_size) {
1567 		err = efa_create_inline_pbl(dev, mr, &params);
1568 		if (err)
1569 			goto err_unmap;
1570 
1571 		err = efa_com_register_mr(&dev->edev, &params, &result);
1572 		if (err)
1573 			goto err_unmap;
1574 	} else {
1575 		err = efa_create_pbl(dev, &pbl, mr, &params);
1576 		if (err)
1577 			goto err_unmap;
1578 
1579 		err = efa_com_register_mr(&dev->edev, &params, &result);
1580 		pbl_destroy(dev, &pbl);
1581 
1582 		if (err)
1583 			goto err_unmap;
1584 	}
1585 
1586 	mr->ibmr.lkey = result.l_key;
1587 	mr->ibmr.rkey = result.r_key;
1588 	mr->ibmr.length = length;
1589 	ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1590 
1591 	return &mr->ibmr;
1592 
1593 err_unmap:
1594 	ib_umem_release(mr->umem);
1595 err_free:
1596 	kfree(mr);
1597 err_out:
1598 	atomic64_inc(&dev->stats.reg_mr_err);
1599 	return ERR_PTR(err);
1600 }
1601 
1602 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1603 {
1604 	struct efa_dev *dev = to_edev(ibmr->device);
1605 	struct efa_com_dereg_mr_params params;
1606 	struct efa_mr *mr = to_emr(ibmr);
1607 	int err;
1608 
1609 	ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1610 
1611 	params.l_key = mr->ibmr.lkey;
1612 	err = efa_com_dereg_mr(&dev->edev, &params);
1613 	if (err)
1614 		return err;
1615 
1616 	ib_umem_release(mr->umem);
1617 	kfree(mr);
1618 
1619 	return 0;
1620 }
1621 
1622 int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
1623 			   struct ib_port_immutable *immutable)
1624 {
1625 	struct ib_port_attr attr;
1626 	int err;
1627 
1628 	err = ib_query_port(ibdev, port_num, &attr);
1629 	if (err) {
1630 		ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1631 		return err;
1632 	}
1633 
1634 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
1635 	immutable->gid_tbl_len = attr.gid_tbl_len;
1636 
1637 	return 0;
1638 }
1639 
1640 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1641 {
1642 	struct efa_com_dealloc_uar_params params = {
1643 		.uarn = uarn,
1644 	};
1645 
1646 	return efa_com_dealloc_uar(&dev->edev, &params);
1647 }
1648 
1649 #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \
1650 	(_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \
1651 		     NULL : #_attr)
1652 
1653 static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext,
1654 				   const struct efa_ibv_alloc_ucontext_cmd *cmd)
1655 {
1656 	struct efa_dev *dev = to_edev(ibucontext->device);
1657 	char *attr_str;
1658 
1659 	if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch,
1660 				EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str))
1661 		goto err;
1662 
1663 	if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth,
1664 				EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR,
1665 				attr_str))
1666 		goto err;
1667 
1668 	return 0;
1669 
1670 err:
1671 	ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
1672 		  attr_str);
1673 	return -EOPNOTSUPP;
1674 }
1675 
1676 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1677 {
1678 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1679 	struct efa_dev *dev = to_edev(ibucontext->device);
1680 	struct efa_ibv_alloc_ucontext_resp resp = {};
1681 	struct efa_ibv_alloc_ucontext_cmd cmd = {};
1682 	struct efa_com_alloc_uar_result result;
1683 	int err;
1684 
1685 	/*
1686 	 * it's fine if the driver does not know all request fields,
1687 	 * we will ack input fields in our response.
1688 	 */
1689 
1690 	err = ib_copy_from_udata(&cmd, udata,
1691 				 min(sizeof(cmd), udata->inlen));
1692 	if (err) {
1693 		ibdev_dbg(&dev->ibdev,
1694 			  "Cannot copy udata for alloc_ucontext\n");
1695 		goto err_out;
1696 	}
1697 
1698 	err = efa_user_comp_handshake(ibucontext, &cmd);
1699 	if (err)
1700 		goto err_out;
1701 
1702 	err = efa_com_alloc_uar(&dev->edev, &result);
1703 	if (err)
1704 		goto err_out;
1705 
1706 	ucontext->uarn = result.uarn;
1707 
1708 	resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1709 	resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1710 	resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1711 	resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1712 	resp.max_llq_size = dev->dev_attr.max_llq_size;
1713 	resp.max_tx_batch = dev->dev_attr.max_tx_batch;
1714 	resp.min_sq_wr = dev->dev_attr.min_sq_depth;
1715 
1716 	err = ib_copy_to_udata(udata, &resp,
1717 			       min(sizeof(resp), udata->outlen));
1718 	if (err)
1719 		goto err_dealloc_uar;
1720 
1721 	return 0;
1722 
1723 err_dealloc_uar:
1724 	efa_dealloc_uar(dev, result.uarn);
1725 err_out:
1726 	atomic64_inc(&dev->stats.alloc_ucontext_err);
1727 	return err;
1728 }
1729 
1730 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1731 {
1732 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1733 	struct efa_dev *dev = to_edev(ibucontext->device);
1734 
1735 	efa_dealloc_uar(dev, ucontext->uarn);
1736 }
1737 
1738 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1739 {
1740 	struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1741 
1742 	kfree(entry);
1743 }
1744 
1745 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1746 		      struct vm_area_struct *vma)
1747 {
1748 	struct rdma_user_mmap_entry *rdma_entry;
1749 	struct efa_user_mmap_entry *entry;
1750 	unsigned long va;
1751 	int err = 0;
1752 	u64 pfn;
1753 
1754 	rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1755 	if (!rdma_entry) {
1756 		ibdev_dbg(&dev->ibdev,
1757 			  "pgoff[%#lx] does not have valid entry\n",
1758 			  vma->vm_pgoff);
1759 		atomic64_inc(&dev->stats.mmap_err);
1760 		return -EINVAL;
1761 	}
1762 	entry = to_emmap(rdma_entry);
1763 
1764 	ibdev_dbg(&dev->ibdev,
1765 		  "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1766 		  entry->address, rdma_entry->npages * PAGE_SIZE,
1767 		  entry->mmap_flag);
1768 
1769 	pfn = entry->address >> PAGE_SHIFT;
1770 	switch (entry->mmap_flag) {
1771 	case EFA_MMAP_IO_NC:
1772 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1773 					entry->rdma_entry.npages * PAGE_SIZE,
1774 					pgprot_noncached(vma->vm_page_prot),
1775 					rdma_entry);
1776 		break;
1777 	case EFA_MMAP_IO_WC:
1778 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1779 					entry->rdma_entry.npages * PAGE_SIZE,
1780 					pgprot_writecombine(vma->vm_page_prot),
1781 					rdma_entry);
1782 		break;
1783 	case EFA_MMAP_DMA_PAGE:
1784 		for (va = vma->vm_start; va < vma->vm_end;
1785 		     va += PAGE_SIZE, pfn++) {
1786 			err = vm_insert_page(vma, va, pfn_to_page(pfn));
1787 			if (err)
1788 				break;
1789 		}
1790 		break;
1791 	default:
1792 		err = -EINVAL;
1793 	}
1794 
1795 	if (err) {
1796 		ibdev_dbg(
1797 			&dev->ibdev,
1798 			"Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1799 			entry->address, rdma_entry->npages * PAGE_SIZE,
1800 			entry->mmap_flag, err);
1801 		atomic64_inc(&dev->stats.mmap_err);
1802 	}
1803 
1804 	rdma_user_mmap_entry_put(rdma_entry);
1805 	return err;
1806 }
1807 
1808 int efa_mmap(struct ib_ucontext *ibucontext,
1809 	     struct vm_area_struct *vma)
1810 {
1811 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1812 	struct efa_dev *dev = to_edev(ibucontext->device);
1813 	size_t length = vma->vm_end - vma->vm_start;
1814 
1815 	ibdev_dbg(&dev->ibdev,
1816 		  "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
1817 		  vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
1818 
1819 	return __efa_mmap(dev, ucontext, vma);
1820 }
1821 
1822 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1823 {
1824 	struct efa_com_destroy_ah_params params = {
1825 		.ah = ah->ah,
1826 		.pdn = to_epd(ah->ibah.pd)->pdn,
1827 	};
1828 
1829 	return efa_com_destroy_ah(&dev->edev, &params);
1830 }
1831 
1832 int efa_create_ah(struct ib_ah *ibah,
1833 		  struct rdma_ah_init_attr *init_attr,
1834 		  struct ib_udata *udata)
1835 {
1836 	struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
1837 	struct efa_dev *dev = to_edev(ibah->device);
1838 	struct efa_com_create_ah_params params = {};
1839 	struct efa_ibv_create_ah_resp resp = {};
1840 	struct efa_com_create_ah_result result;
1841 	struct efa_ah *ah = to_eah(ibah);
1842 	int err;
1843 
1844 	if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
1845 		ibdev_dbg(&dev->ibdev,
1846 			  "Create address handle is not supported in atomic context\n");
1847 		err = -EOPNOTSUPP;
1848 		goto err_out;
1849 	}
1850 
1851 	if (udata->inlen &&
1852 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1853 		ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1854 		err = -EINVAL;
1855 		goto err_out;
1856 	}
1857 
1858 	memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1859 	       sizeof(params.dest_addr));
1860 	params.pdn = to_epd(ibah->pd)->pdn;
1861 	err = efa_com_create_ah(&dev->edev, &params, &result);
1862 	if (err)
1863 		goto err_out;
1864 
1865 	memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
1866 	ah->ah = result.ah;
1867 
1868 	resp.efa_address_handle = result.ah;
1869 
1870 	if (udata->outlen) {
1871 		err = ib_copy_to_udata(udata, &resp,
1872 				       min(sizeof(resp), udata->outlen));
1873 		if (err) {
1874 			ibdev_dbg(&dev->ibdev,
1875 				  "Failed to copy udata for create_ah response\n");
1876 			goto err_destroy_ah;
1877 		}
1878 	}
1879 	ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
1880 
1881 	return 0;
1882 
1883 err_destroy_ah:
1884 	efa_ah_destroy(dev, ah);
1885 err_out:
1886 	atomic64_inc(&dev->stats.create_ah_err);
1887 	return err;
1888 }
1889 
1890 int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
1891 {
1892 	struct efa_dev *dev = to_edev(ibah->pd->device);
1893 	struct efa_ah *ah = to_eah(ibah);
1894 
1895 	ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
1896 
1897 	if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
1898 		ibdev_dbg(&dev->ibdev,
1899 			  "Destroy address handle is not supported in atomic context\n");
1900 		return -EOPNOTSUPP;
1901 	}
1902 
1903 	efa_ah_destroy(dev, ah);
1904 	return 0;
1905 }
1906 
1907 struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u32 port_num)
1908 {
1909 	return rdma_alloc_hw_stats_struct(efa_stats_names,
1910 					  ARRAY_SIZE(efa_stats_names),
1911 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
1912 }
1913 
1914 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1915 		     u32 port_num, int index)
1916 {
1917 	struct efa_com_get_stats_params params = {};
1918 	union efa_com_get_stats_result result;
1919 	struct efa_dev *dev = to_edev(ibdev);
1920 	struct efa_com_rdma_read_stats *rrs;
1921 	struct efa_com_messages_stats *ms;
1922 	struct efa_com_basic_stats *bs;
1923 	struct efa_com_stats_admin *as;
1924 	struct efa_stats *s;
1925 	int err;
1926 
1927 	params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
1928 	params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
1929 
1930 	err = efa_com_get_stats(&dev->edev, &params, &result);
1931 	if (err)
1932 		return err;
1933 
1934 	bs = &result.basic_stats;
1935 	stats->value[EFA_TX_BYTES] = bs->tx_bytes;
1936 	stats->value[EFA_TX_PKTS] = bs->tx_pkts;
1937 	stats->value[EFA_RX_BYTES] = bs->rx_bytes;
1938 	stats->value[EFA_RX_PKTS] = bs->rx_pkts;
1939 	stats->value[EFA_RX_DROPS] = bs->rx_drops;
1940 
1941 	params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
1942 	err = efa_com_get_stats(&dev->edev, &params, &result);
1943 	if (err)
1944 		return err;
1945 
1946 	ms = &result.messages_stats;
1947 	stats->value[EFA_SEND_BYTES] = ms->send_bytes;
1948 	stats->value[EFA_SEND_WRS] = ms->send_wrs;
1949 	stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
1950 	stats->value[EFA_RECV_WRS] = ms->recv_wrs;
1951 
1952 	params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
1953 	err = efa_com_get_stats(&dev->edev, &params, &result);
1954 	if (err)
1955 		return err;
1956 
1957 	rrs = &result.rdma_read_stats;
1958 	stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
1959 	stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
1960 	stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
1961 	stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
1962 
1963 	as = &dev->edev.aq.stats;
1964 	stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
1965 	stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
1966 	stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
1967 	stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
1968 
1969 	s = &dev->stats;
1970 	stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
1971 	stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
1972 	stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
1973 	stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
1974 	stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
1975 	stats->value[EFA_ALLOC_UCONTEXT_ERR] =
1976 		atomic64_read(&s->alloc_ucontext_err);
1977 	stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
1978 	stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
1979 
1980 	return ARRAY_SIZE(efa_stats_names);
1981 }
1982 
1983 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
1984 					 u32 port_num)
1985 {
1986 	return IB_LINK_LAYER_UNSPECIFIED;
1987 }
1988 
1989