1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "main.h"
4 
5 /**
6  * irdma_arp_table -manage arp table
7  * @rf: RDMA PCI function
8  * @ip_addr: ip address for device
9  * @ipv4: IPv4 flag
10  * @mac_addr: mac address ptr
11  * @action: modify, delete or add
12  */
13 int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
14 		    const u8 *mac_addr, u32 action)
15 {
16 	unsigned long flags;
17 	int arp_index;
18 	u32 ip[4] = {};
19 
20 	if (ipv4)
21 		ip[0] = *ip_addr;
22 	else
23 		memcpy(ip, ip_addr, sizeof(ip));
24 
25 	spin_lock_irqsave(&rf->arp_lock, flags);
26 	for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
27 		if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
28 			break;
29 	}
30 
31 	switch (action) {
32 	case IRDMA_ARP_ADD:
33 		if (arp_index != rf->arp_table_size) {
34 			arp_index = -1;
35 			break;
36 		}
37 
38 		arp_index = 0;
39 		if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
40 				     (u32 *)&arp_index, &rf->next_arp_index)) {
41 			arp_index = -1;
42 			break;
43 		}
44 
45 		memcpy(rf->arp_table[arp_index].ip_addr, ip,
46 		       sizeof(rf->arp_table[arp_index].ip_addr));
47 		ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
48 		break;
49 	case IRDMA_ARP_RESOLVE:
50 		if (arp_index == rf->arp_table_size)
51 			arp_index = -1;
52 		break;
53 	case IRDMA_ARP_DELETE:
54 		if (arp_index == rf->arp_table_size) {
55 			arp_index = -1;
56 			break;
57 		}
58 
59 		memset(rf->arp_table[arp_index].ip_addr, 0,
60 		       sizeof(rf->arp_table[arp_index].ip_addr));
61 		eth_zero_addr(rf->arp_table[arp_index].mac_addr);
62 		irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
63 		break;
64 	default:
65 		arp_index = -1;
66 		break;
67 	}
68 
69 	spin_unlock_irqrestore(&rf->arp_lock, flags);
70 	return arp_index;
71 }
72 
73 /**
74  * irdma_add_arp - add a new arp entry if needed
75  * @rf: RDMA function
76  * @ip: IP address
77  * @ipv4: IPv4 flag
78  * @mac: MAC address
79  */
80 int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac)
81 {
82 	int arpidx;
83 
84 	arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE);
85 	if (arpidx >= 0) {
86 		if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
87 			return arpidx;
88 
89 		irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
90 				       ipv4, IRDMA_ARP_DELETE);
91 	}
92 
93 	irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD);
94 
95 	return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE);
96 }
97 
98 /**
99  * wr32 - write 32 bits to hw register
100  * @hw: hardware information including registers
101  * @reg: register offset
102  * @val: value to write to register
103  */
104 inline void wr32(struct irdma_hw *hw, u32 reg, u32 val)
105 {
106 	writel(val, hw->hw_addr + reg);
107 }
108 
109 /**
110  * rd32 - read a 32 bit hw register
111  * @hw: hardware information including registers
112  * @reg: register offset
113  *
114  * Return value of register content
115  */
116 inline u32 rd32(struct irdma_hw *hw, u32 reg)
117 {
118 	return readl(hw->hw_addr + reg);
119 }
120 
121 /**
122  * rd64 - read a 64 bit hw register
123  * @hw: hardware information including registers
124  * @reg: register offset
125  *
126  * Return value of register content
127  */
128 inline u64 rd64(struct irdma_hw *hw, u32 reg)
129 {
130 	return readq(hw->hw_addr + reg);
131 }
132 
133 static void irdma_gid_change_event(struct ib_device *ibdev)
134 {
135 	struct ib_event ib_event;
136 
137 	ib_event.event = IB_EVENT_GID_CHANGE;
138 	ib_event.device = ibdev;
139 	ib_event.element.port_num = 1;
140 	ib_dispatch_event(&ib_event);
141 }
142 
143 /**
144  * irdma_inetaddr_event - system notifier for ipv4 addr events
145  * @notifier: not used
146  * @event: event for notifier
147  * @ptr: if address
148  */
149 int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
150 			 void *ptr)
151 {
152 	struct in_ifaddr *ifa = ptr;
153 	struct net_device *real_dev, *netdev = ifa->ifa_dev->dev;
154 	struct irdma_device *iwdev;
155 	struct ib_device *ibdev;
156 	u32 local_ipaddr;
157 
158 	real_dev = rdma_vlan_dev_real_dev(netdev);
159 	if (!real_dev)
160 		real_dev = netdev;
161 
162 	ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
163 	if (!ibdev)
164 		return NOTIFY_DONE;
165 
166 	iwdev = to_iwdev(ibdev);
167 	local_ipaddr = ntohl(ifa->ifa_address);
168 	ibdev_dbg(&iwdev->ibdev,
169 		  "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev,
170 		  event, &local_ipaddr, real_dev->dev_addr);
171 	switch (event) {
172 	case NETDEV_DOWN:
173 		irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
174 				       &local_ipaddr, true, IRDMA_ARP_DELETE);
175 		irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false);
176 		irdma_gid_change_event(&iwdev->ibdev);
177 		break;
178 	case NETDEV_UP:
179 	case NETDEV_CHANGEADDR:
180 		irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr);
181 		irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true);
182 		irdma_gid_change_event(&iwdev->ibdev);
183 		break;
184 	default:
185 		break;
186 	}
187 
188 	ib_device_put(ibdev);
189 
190 	return NOTIFY_DONE;
191 }
192 
193 /**
194  * irdma_inet6addr_event - system notifier for ipv6 addr events
195  * @notifier: not used
196  * @event: event for notifier
197  * @ptr: if address
198  */
199 int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
200 			  void *ptr)
201 {
202 	struct inet6_ifaddr *ifa = ptr;
203 	struct net_device *real_dev, *netdev = ifa->idev->dev;
204 	struct irdma_device *iwdev;
205 	struct ib_device *ibdev;
206 	u32 local_ipaddr6[4];
207 
208 	real_dev = rdma_vlan_dev_real_dev(netdev);
209 	if (!real_dev)
210 		real_dev = netdev;
211 
212 	ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
213 	if (!ibdev)
214 		return NOTIFY_DONE;
215 
216 	iwdev = to_iwdev(ibdev);
217 	irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
218 	ibdev_dbg(&iwdev->ibdev,
219 		  "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev,
220 		  event, local_ipaddr6, real_dev->dev_addr);
221 	switch (event) {
222 	case NETDEV_DOWN:
223 		irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
224 				       local_ipaddr6, false, IRDMA_ARP_DELETE);
225 		irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false);
226 		irdma_gid_change_event(&iwdev->ibdev);
227 		break;
228 	case NETDEV_UP:
229 	case NETDEV_CHANGEADDR:
230 		irdma_add_arp(iwdev->rf, local_ipaddr6, false,
231 			      real_dev->dev_addr);
232 		irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true);
233 		irdma_gid_change_event(&iwdev->ibdev);
234 		break;
235 	default:
236 		break;
237 	}
238 
239 	ib_device_put(ibdev);
240 
241 	return NOTIFY_DONE;
242 }
243 
244 /**
245  * irdma_net_event - system notifier for net events
246  * @notifier: not used
247  * @event: event for notifier
248  * @ptr: neighbor
249  */
250 int irdma_net_event(struct notifier_block *notifier, unsigned long event,
251 		    void *ptr)
252 {
253 	struct neighbour *neigh = ptr;
254 	struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev;
255 	struct irdma_device *iwdev;
256 	struct ib_device *ibdev;
257 	__be32 *p;
258 	u32 local_ipaddr[4] = {};
259 	bool ipv4 = true;
260 
261 	switch (event) {
262 	case NETEVENT_NEIGH_UPDATE:
263 		real_dev = rdma_vlan_dev_real_dev(netdev);
264 		if (!real_dev)
265 			real_dev = netdev;
266 		ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
267 		if (!ibdev)
268 			return NOTIFY_DONE;
269 
270 		iwdev = to_iwdev(ibdev);
271 		p = (__be32 *)neigh->primary_key;
272 		if (neigh->tbl->family == AF_INET6) {
273 			ipv4 = false;
274 			irdma_copy_ip_ntohl(local_ipaddr, p);
275 		} else {
276 			local_ipaddr[0] = ntohl(*p);
277 		}
278 
279 		ibdev_dbg(&iwdev->ibdev,
280 			  "DEV: netdev %p state %d local_ip=%pI4 MAC=%pM\n",
281 			  iwdev->netdev, neigh->nud_state, local_ipaddr,
282 			  neigh->ha);
283 
284 		if (neigh->nud_state & NUD_VALID)
285 			irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha);
286 
287 		else
288 			irdma_manage_arp_cache(iwdev->rf, neigh->ha,
289 					       local_ipaddr, ipv4,
290 					       IRDMA_ARP_DELETE);
291 		ib_device_put(ibdev);
292 		break;
293 	default:
294 		break;
295 	}
296 
297 	return NOTIFY_DONE;
298 }
299 
300 /**
301  * irdma_netdevice_event - system notifier for netdev events
302  * @notifier: not used
303  * @event: event for notifier
304  * @ptr: netdev
305  */
306 int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
307 			  void *ptr)
308 {
309 	struct irdma_device *iwdev;
310 	struct ib_device *ibdev;
311 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
312 
313 	ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
314 	if (!ibdev)
315 		return NOTIFY_DONE;
316 
317 	iwdev = to_iwdev(ibdev);
318 	iwdev->iw_status = 1;
319 	switch (event) {
320 	case NETDEV_DOWN:
321 		iwdev->iw_status = 0;
322 		fallthrough;
323 	case NETDEV_UP:
324 		irdma_port_ibevent(iwdev);
325 		break;
326 	default:
327 		break;
328 	}
329 	ib_device_put(ibdev);
330 
331 	return NOTIFY_DONE;
332 }
333 
334 /**
335  * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
336  * @iwdev: irdma device
337  */
338 static void irdma_add_ipv6_addr(struct irdma_device *iwdev)
339 {
340 	struct net_device *ip_dev;
341 	struct inet6_dev *idev;
342 	struct inet6_ifaddr *ifp, *tmp;
343 	u32 local_ipaddr6[4];
344 
345 	rcu_read_lock();
346 	for_each_netdev_rcu (&init_net, ip_dev) {
347 		if (((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF &&
348 		      rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev) ||
349 		      ip_dev == iwdev->netdev) &&
350 		      (READ_ONCE(ip_dev->flags) & IFF_UP)) {
351 			idev = __in6_dev_get(ip_dev);
352 			if (!idev) {
353 				ibdev_err(&iwdev->ibdev, "ipv6 inet device not found\n");
354 				break;
355 			}
356 			list_for_each_entry_safe (ifp, tmp, &idev->addr_list,
357 						  if_list) {
358 				ibdev_dbg(&iwdev->ibdev,
359 					  "INIT: IP=%pI6, vlan_id=%d, MAC=%pM\n",
360 					  &ifp->addr,
361 					  rdma_vlan_dev_vlan_id(ip_dev),
362 					  ip_dev->dev_addr);
363 
364 				irdma_copy_ip_ntohl(local_ipaddr6,
365 						    ifp->addr.in6_u.u6_addr32);
366 				irdma_manage_arp_cache(iwdev->rf,
367 						       ip_dev->dev_addr,
368 						       local_ipaddr6, false,
369 						       IRDMA_ARP_ADD);
370 			}
371 		}
372 	}
373 	rcu_read_unlock();
374 }
375 
376 /**
377  * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
378  * @iwdev: irdma device
379  */
380 static void irdma_add_ipv4_addr(struct irdma_device *iwdev)
381 {
382 	struct net_device *dev;
383 	struct in_device *idev;
384 	u32 ip_addr;
385 
386 	rcu_read_lock();
387 	for_each_netdev_rcu (&init_net, dev) {
388 		if (((rdma_vlan_dev_vlan_id(dev) < 0xFFFF &&
389 		      rdma_vlan_dev_real_dev(dev) == iwdev->netdev) ||
390 		      dev == iwdev->netdev) && (READ_ONCE(dev->flags) & IFF_UP)) {
391 			const struct in_ifaddr *ifa;
392 
393 			idev = __in_dev_get_rcu(dev);
394 			if (!idev)
395 				continue;
396 
397 			in_dev_for_each_ifa_rcu(ifa, idev) {
398 				ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI4, vlan_id=%d, MAC=%pM\n",
399 					  &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev),
400 					  dev->dev_addr);
401 
402 				ip_addr = ntohl(ifa->ifa_address);
403 				irdma_manage_arp_cache(iwdev->rf, dev->dev_addr,
404 						       &ip_addr, true,
405 						       IRDMA_ARP_ADD);
406 			}
407 		}
408 	}
409 	rcu_read_unlock();
410 }
411 
412 /**
413  * irdma_add_ip - add ip addresses
414  * @iwdev: irdma device
415  *
416  * Add ipv4/ipv6 addresses to the arp cache
417  */
418 void irdma_add_ip(struct irdma_device *iwdev)
419 {
420 	irdma_add_ipv4_addr(iwdev);
421 	irdma_add_ipv6_addr(iwdev);
422 }
423 
424 /**
425  * irdma_alloc_and_get_cqp_request - get cqp struct
426  * @cqp: device cqp ptr
427  * @wait: cqp to be used in wait mode
428  */
429 struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
430 							  bool wait)
431 {
432 	struct irdma_cqp_request *cqp_request = NULL;
433 	unsigned long flags;
434 
435 	spin_lock_irqsave(&cqp->req_lock, flags);
436 	if (!list_empty(&cqp->cqp_avail_reqs)) {
437 		cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
438 					       struct irdma_cqp_request, list);
439 		list_del_init(&cqp_request->list);
440 	}
441 	spin_unlock_irqrestore(&cqp->req_lock, flags);
442 	if (!cqp_request) {
443 		cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
444 		if (cqp_request) {
445 			cqp_request->dynamic = true;
446 			if (wait)
447 				init_waitqueue_head(&cqp_request->waitq);
448 		}
449 	}
450 	if (!cqp_request) {
451 		ibdev_dbg(to_ibdev(cqp->sc_cqp.dev), "ERR: CQP Request Fail: No Memory");
452 		return NULL;
453 	}
454 
455 	cqp_request->waiting = wait;
456 	refcount_set(&cqp_request->refcnt, 1);
457 	memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
458 
459 	return cqp_request;
460 }
461 
462 /**
463  * irdma_get_cqp_request - increase refcount for cqp_request
464  * @cqp_request: pointer to cqp_request instance
465  */
466 static inline void irdma_get_cqp_request(struct irdma_cqp_request *cqp_request)
467 {
468 	refcount_inc(&cqp_request->refcnt);
469 }
470 
471 /**
472  * irdma_free_cqp_request - free cqp request
473  * @cqp: cqp ptr
474  * @cqp_request: to be put back in cqp list
475  */
476 void irdma_free_cqp_request(struct irdma_cqp *cqp,
477 			    struct irdma_cqp_request *cqp_request)
478 {
479 	unsigned long flags;
480 
481 	if (cqp_request->dynamic) {
482 		kfree(cqp_request);
483 	} else {
484 		cqp_request->request_done = false;
485 		cqp_request->callback_fcn = NULL;
486 		cqp_request->waiting = false;
487 
488 		spin_lock_irqsave(&cqp->req_lock, flags);
489 		list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
490 		spin_unlock_irqrestore(&cqp->req_lock, flags);
491 	}
492 	wake_up(&cqp->remove_wq);
493 }
494 
495 /**
496  * irdma_put_cqp_request - dec ref count and free if 0
497  * @cqp: cqp ptr
498  * @cqp_request: to be put back in cqp list
499  */
500 void irdma_put_cqp_request(struct irdma_cqp *cqp,
501 			   struct irdma_cqp_request *cqp_request)
502 {
503 	if (refcount_dec_and_test(&cqp_request->refcnt))
504 		irdma_free_cqp_request(cqp, cqp_request);
505 }
506 
507 /**
508  * irdma_free_pending_cqp_request -free pending cqp request objs
509  * @cqp: cqp ptr
510  * @cqp_request: to be put back in cqp list
511  */
512 static void
513 irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
514 			       struct irdma_cqp_request *cqp_request)
515 {
516 	if (cqp_request->waiting) {
517 		cqp_request->compl_info.error = true;
518 		cqp_request->request_done = true;
519 		wake_up(&cqp_request->waitq);
520 	}
521 	wait_event_timeout(cqp->remove_wq,
522 			   refcount_read(&cqp_request->refcnt) == 1, 1000);
523 	irdma_put_cqp_request(cqp, cqp_request);
524 }
525 
526 /**
527  * irdma_cleanup_pending_cqp_op - clean-up cqp with no
528  * completions
529  * @rf: RDMA PCI function
530  */
531 void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
532 {
533 	struct irdma_sc_dev *dev = &rf->sc_dev;
534 	struct irdma_cqp *cqp = &rf->cqp;
535 	struct irdma_cqp_request *cqp_request = NULL;
536 	struct cqp_cmds_info *pcmdinfo = NULL;
537 	u32 i, pending_work, wqe_idx;
538 
539 	pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
540 	wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
541 	for (i = 0; i < pending_work; i++) {
542 		cqp_request = (struct irdma_cqp_request *)(unsigned long)
543 				      cqp->scratch_array[wqe_idx];
544 		if (cqp_request)
545 			irdma_free_pending_cqp_request(cqp, cqp_request);
546 		wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring);
547 	}
548 
549 	while (!list_empty(&dev->cqp_cmd_head)) {
550 		pcmdinfo = irdma_remove_cqp_head(dev);
551 		cqp_request =
552 			container_of(pcmdinfo, struct irdma_cqp_request, info);
553 		if (cqp_request)
554 			irdma_free_pending_cqp_request(cqp, cqp_request);
555 	}
556 }
557 
558 /**
559  * irdma_wait_event - wait for completion
560  * @rf: RDMA PCI function
561  * @cqp_request: cqp request to wait
562  */
563 static int irdma_wait_event(struct irdma_pci_f *rf,
564 			    struct irdma_cqp_request *cqp_request)
565 {
566 	struct irdma_cqp_timeout cqp_timeout = {};
567 	bool cqp_error = false;
568 	int err_code = 0;
569 
570 	cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
571 	do {
572 		irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
573 		if (wait_event_timeout(cqp_request->waitq,
574 				       cqp_request->request_done,
575 				       msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
576 			break;
577 
578 		irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
579 
580 		if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
581 			continue;
582 
583 		if (!rf->reset) {
584 			rf->reset = true;
585 			rf->gen_ops.request_reset(rf);
586 		}
587 		return -ETIMEDOUT;
588 	} while (1);
589 
590 	cqp_error = cqp_request->compl_info.error;
591 	if (cqp_error) {
592 		err_code = -EIO;
593 		if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
594 			if (cqp_request->compl_info.min_err_code == 0x8002)
595 				err_code = -EBUSY;
596 			else if (cqp_request->compl_info.min_err_code == 0x8029) {
597 				if (!rf->reset) {
598 					rf->reset = true;
599 					rf->gen_ops.request_reset(rf);
600 				}
601 			}
602 		}
603 	}
604 
605 	return err_code;
606 }
607 
608 static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
609 	[IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd",
610 	[IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd",
611 	[IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
612 	[IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
613 	[IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
614 	[IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd",
615 	[IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
616 	[IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
617 	[IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
618 	[IRDMA_OP_CQ_CREATE] = "Create CQ Cmd",
619 	[IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd",
620 	[IRDMA_OP_QP_CREATE] = "Create QP Cmd",
621 	[IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd",
622 	[IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd",
623 	[IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd",
624 	[IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd",
625 	[IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd",
626 	[IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd",
627 	[IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd",
628 	[IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd",
629 	[IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd",
630 	[IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
631 	[IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
632 	[IRDMA_OP_RESUME] = "Resume QP Cmd",
633 	[IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd",
634 	[IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
635 	[IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
636 	[IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
637 	[IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd",
638 	[IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd",
639 	[IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd",
640 	[IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd",
641 	[IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd",
642 	[IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd",
643 	[IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd",
644 	[IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd",
645 	[IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
646 	[IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
647 	[IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
648 	[IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
649 	[IRDMA_OP_GEN_AE] = "Generate AE Cmd",
650 	[IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
651 	[IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd",
652 	[IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
653 	[IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
654 	[IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
655 };
656 
657 static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
658 	{0xffff, 0x8002, "Invalid State"},
659 	{0xffff, 0x8006, "Flush No Wqe Pending"},
660 	{0xffff, 0x8007, "Modify QP Bad Close"},
661 	{0xffff, 0x8009, "LLP Closed"},
662 	{0xffff, 0x800a, "Reset Not Sent"}
663 };
664 
665 /**
666  * irdma_cqp_crit_err - check if CQP error is critical
667  * @dev: pointer to dev structure
668  * @cqp_cmd: code for last CQP operation
669  * @maj_err_code: major error code
670  * @min_err_code: minot error code
671  */
672 bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
673 			u16 maj_err_code, u16 min_err_code)
674 {
675 	int i;
676 
677 	for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) {
678 		if (maj_err_code == irdma_noncrit_err_list[i].maj &&
679 		    min_err_code == irdma_noncrit_err_list[i].min) {
680 			ibdev_dbg(to_ibdev(dev),
681 				  "CQP: [%s Error][%s] maj=0x%x min=0x%x\n",
682 				  irdma_noncrit_err_list[i].desc,
683 				  irdma_cqp_cmd_names[cqp_cmd], maj_err_code,
684 				  min_err_code);
685 			return false;
686 		}
687 	}
688 	return true;
689 }
690 
691 /**
692  * irdma_handle_cqp_op - process cqp command
693  * @rf: RDMA PCI function
694  * @cqp_request: cqp request to process
695  */
696 int irdma_handle_cqp_op(struct irdma_pci_f *rf,
697 			struct irdma_cqp_request *cqp_request)
698 {
699 	struct irdma_sc_dev *dev = &rf->sc_dev;
700 	struct cqp_cmds_info *info = &cqp_request->info;
701 	int status;
702 	bool put_cqp_request = true;
703 
704 	if (rf->reset)
705 		return -EBUSY;
706 
707 	irdma_get_cqp_request(cqp_request);
708 	status = irdma_process_cqp_cmd(dev, info);
709 	if (status)
710 		goto err;
711 
712 	if (cqp_request->waiting) {
713 		put_cqp_request = false;
714 		status = irdma_wait_event(rf, cqp_request);
715 		if (status)
716 			goto err;
717 	}
718 
719 	return 0;
720 
721 err:
722 	if (irdma_cqp_crit_err(dev, info->cqp_cmd,
723 			       cqp_request->compl_info.maj_err_code,
724 			       cqp_request->compl_info.min_err_code))
725 		ibdev_err(&rf->iwdev->ibdev,
726 			  "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
727 			  irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting,
728 			  cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code,
729 			  cqp_request->compl_info.min_err_code);
730 
731 	if (put_cqp_request)
732 		irdma_put_cqp_request(&rf->cqp, cqp_request);
733 
734 	return status;
735 }
736 
737 void irdma_qp_add_ref(struct ib_qp *ibqp)
738 {
739 	struct irdma_qp *iwqp = (struct irdma_qp *)ibqp;
740 
741 	refcount_inc(&iwqp->refcnt);
742 }
743 
744 void irdma_qp_rem_ref(struct ib_qp *ibqp)
745 {
746 	struct irdma_qp *iwqp = to_iwqp(ibqp);
747 	struct irdma_device *iwdev = iwqp->iwdev;
748 	u32 qp_num;
749 	unsigned long flags;
750 
751 	spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
752 	if (!refcount_dec_and_test(&iwqp->refcnt)) {
753 		spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
754 		return;
755 	}
756 
757 	qp_num = iwqp->ibqp.qp_num;
758 	iwdev->rf->qp_table[qp_num] = NULL;
759 	spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
760 	complete(&iwqp->free_qp);
761 }
762 
763 struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
764 {
765 	return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
766 }
767 
768 /**
769  * irdma_get_qp - get qp address
770  * @device: iwarp device
771  * @qpn: qp number
772  */
773 struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
774 {
775 	struct irdma_device *iwdev = to_iwdev(device);
776 
777 	if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
778 		return NULL;
779 
780 	return &iwdev->rf->qp_table[qpn]->ibqp;
781 }
782 
783 /**
784  * irdma_remove_cqp_head - return head entry and remove
785  * @dev: device
786  */
787 void *irdma_remove_cqp_head(struct irdma_sc_dev *dev)
788 {
789 	struct list_head *entry;
790 	struct list_head *list = &dev->cqp_cmd_head;
791 
792 	if (list_empty(list))
793 		return NULL;
794 
795 	entry = list->next;
796 	list_del(entry);
797 
798 	return entry;
799 }
800 
801 /**
802  * irdma_cqp_sds_cmd - create cqp command for sd
803  * @dev: hardware control device structure
804  * @sdinfo: information for sd cqp
805  *
806  */
807 int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
808 		      struct irdma_update_sds_info *sdinfo)
809 {
810 	struct irdma_cqp_request *cqp_request;
811 	struct cqp_cmds_info *cqp_info;
812 	struct irdma_pci_f *rf = dev_to_rf(dev);
813 	int status;
814 
815 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
816 	if (!cqp_request)
817 		return -ENOMEM;
818 
819 	cqp_info = &cqp_request->info;
820 	memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
821 	       sizeof(cqp_info->in.u.update_pe_sds.info));
822 	cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS;
823 	cqp_info->post_sq = 1;
824 	cqp_info->in.u.update_pe_sds.dev = dev;
825 	cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
826 
827 	status = irdma_handle_cqp_op(rf, cqp_request);
828 	irdma_put_cqp_request(&rf->cqp, cqp_request);
829 
830 	return status;
831 }
832 
833 /**
834  * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume
835  * @qp: hardware control qp
836  * @op: suspend or resume
837  */
838 int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op)
839 {
840 	struct irdma_sc_dev *dev = qp->dev;
841 	struct irdma_cqp_request *cqp_request;
842 	struct irdma_sc_cqp *cqp = dev->cqp;
843 	struct cqp_cmds_info *cqp_info;
844 	struct irdma_pci_f *rf = dev_to_rf(dev);
845 	int status;
846 
847 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
848 	if (!cqp_request)
849 		return -ENOMEM;
850 
851 	cqp_info = &cqp_request->info;
852 	cqp_info->cqp_cmd = op;
853 	cqp_info->in.u.suspend_resume.cqp = cqp;
854 	cqp_info->in.u.suspend_resume.qp = qp;
855 	cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
856 
857 	status = irdma_handle_cqp_op(rf, cqp_request);
858 	irdma_put_cqp_request(&rf->cqp, cqp_request);
859 
860 	return status;
861 }
862 
863 /**
864  * irdma_term_modify_qp - modify qp for term message
865  * @qp: hardware control qp
866  * @next_state: qp's next state
867  * @term: terminate code
868  * @term_len: length
869  */
870 void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
871 			  u8 term_len)
872 {
873 	struct irdma_qp *iwqp;
874 
875 	iwqp = qp->qp_uk.back_qp;
876 	irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
877 };
878 
879 /**
880  * irdma_terminate_done - after terminate is completed
881  * @qp: hardware control qp
882  * @timeout_occurred: indicates if terminate timer expired
883  */
884 void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
885 {
886 	struct irdma_qp *iwqp;
887 	u8 hte = 0;
888 	bool first_time;
889 	unsigned long flags;
890 
891 	iwqp = qp->qp_uk.back_qp;
892 	spin_lock_irqsave(&iwqp->lock, flags);
893 	if (iwqp->hte_added) {
894 		iwqp->hte_added = 0;
895 		hte = 1;
896 	}
897 	first_time = !(qp->term_flags & IRDMA_TERM_DONE);
898 	qp->term_flags |= IRDMA_TERM_DONE;
899 	spin_unlock_irqrestore(&iwqp->lock, flags);
900 	if (first_time) {
901 		if (!timeout_occurred)
902 			irdma_terminate_del_timer(qp);
903 
904 		irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
905 		irdma_cm_disconn(iwqp);
906 	}
907 }
908 
909 static void irdma_terminate_timeout(struct timer_list *t)
910 {
911 	struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
912 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
913 
914 	irdma_terminate_done(qp, 1);
915 	irdma_qp_rem_ref(&iwqp->ibqp);
916 }
917 
918 /**
919  * irdma_terminate_start_timer - start terminate timeout
920  * @qp: hardware control qp
921  */
922 void irdma_terminate_start_timer(struct irdma_sc_qp *qp)
923 {
924 	struct irdma_qp *iwqp;
925 
926 	iwqp = qp->qp_uk.back_qp;
927 	irdma_qp_add_ref(&iwqp->ibqp);
928 	timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
929 	iwqp->terminate_timer.expires = jiffies + HZ;
930 
931 	add_timer(&iwqp->terminate_timer);
932 }
933 
934 /**
935  * irdma_terminate_del_timer - delete terminate timeout
936  * @qp: hardware control qp
937  */
938 void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
939 {
940 	struct irdma_qp *iwqp;
941 	int ret;
942 
943 	iwqp = qp->qp_uk.back_qp;
944 	ret = del_timer(&iwqp->terminate_timer);
945 	if (ret)
946 		irdma_qp_rem_ref(&iwqp->ibqp);
947 }
948 
949 /**
950  * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
951  * @dev: function device struct
952  * @val_mem: buffer for fpm
953  * @hmc_fn_id: function id for fpm
954  */
955 int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
956 				struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
957 {
958 	struct irdma_cqp_request *cqp_request;
959 	struct cqp_cmds_info *cqp_info;
960 	struct irdma_pci_f *rf = dev_to_rf(dev);
961 	int status;
962 
963 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
964 	if (!cqp_request)
965 		return -ENOMEM;
966 
967 	cqp_info = &cqp_request->info;
968 	cqp_request->param = NULL;
969 	cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
970 	cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
971 	cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
972 	cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
973 	cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
974 	cqp_info->post_sq = 1;
975 	cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
976 
977 	status = irdma_handle_cqp_op(rf, cqp_request);
978 	irdma_put_cqp_request(&rf->cqp, cqp_request);
979 
980 	return status;
981 }
982 
983 /**
984  * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
985  * @dev: hardware control device structure
986  * @val_mem: buffer with fpm values
987  * @hmc_fn_id: function id for fpm
988  */
989 int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
990 				 struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
991 {
992 	struct irdma_cqp_request *cqp_request;
993 	struct cqp_cmds_info *cqp_info;
994 	struct irdma_pci_f *rf = dev_to_rf(dev);
995 	int status;
996 
997 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
998 	if (!cqp_request)
999 		return -ENOMEM;
1000 
1001 	cqp_info = &cqp_request->info;
1002 	cqp_request->param = NULL;
1003 	cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
1004 	cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
1005 	cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
1006 	cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
1007 	cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
1008 	cqp_info->post_sq = 1;
1009 	cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
1010 
1011 	status = irdma_handle_cqp_op(rf, cqp_request);
1012 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1013 
1014 	return status;
1015 }
1016 
1017 /**
1018  * irdma_cqp_cq_create_cmd - create a cq for the cqp
1019  * @dev: device pointer
1020  * @cq: pointer to created cq
1021  */
1022 int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1023 {
1024 	struct irdma_pci_f *rf = dev_to_rf(dev);
1025 	struct irdma_cqp *iwcqp = &rf->cqp;
1026 	struct irdma_cqp_request *cqp_request;
1027 	struct cqp_cmds_info *cqp_info;
1028 	int status;
1029 
1030 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1031 	if (!cqp_request)
1032 		return -ENOMEM;
1033 
1034 	cqp_info = &cqp_request->info;
1035 	cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
1036 	cqp_info->post_sq = 1;
1037 	cqp_info->in.u.cq_create.cq = cq;
1038 	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1039 
1040 	status = irdma_handle_cqp_op(rf, cqp_request);
1041 	irdma_put_cqp_request(iwcqp, cqp_request);
1042 
1043 	return status;
1044 }
1045 
1046 /**
1047  * irdma_cqp_qp_create_cmd - create a qp for the cqp
1048  * @dev: device pointer
1049  * @qp: pointer to created qp
1050  */
1051 int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1052 {
1053 	struct irdma_pci_f *rf = dev_to_rf(dev);
1054 	struct irdma_cqp *iwcqp = &rf->cqp;
1055 	struct irdma_cqp_request *cqp_request;
1056 	struct cqp_cmds_info *cqp_info;
1057 	struct irdma_create_qp_info *qp_info;
1058 	int status;
1059 
1060 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1061 	if (!cqp_request)
1062 		return -ENOMEM;
1063 
1064 	cqp_info = &cqp_request->info;
1065 	qp_info = &cqp_request->info.in.u.qp_create.info;
1066 	memset(qp_info, 0, sizeof(*qp_info));
1067 	qp_info->cq_num_valid = true;
1068 	qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
1069 	cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
1070 	cqp_info->post_sq = 1;
1071 	cqp_info->in.u.qp_create.qp = qp;
1072 	cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
1073 
1074 	status = irdma_handle_cqp_op(rf, cqp_request);
1075 	irdma_put_cqp_request(iwcqp, cqp_request);
1076 
1077 	return status;
1078 }
1079 
1080 /**
1081  * irdma_dealloc_push_page - free a push page for qp
1082  * @rf: RDMA PCI function
1083  * @qp: hardware control qp
1084  */
1085 static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
1086 				    struct irdma_sc_qp *qp)
1087 {
1088 	struct irdma_cqp_request *cqp_request;
1089 	struct cqp_cmds_info *cqp_info;
1090 	int status;
1091 
1092 	if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
1093 		return;
1094 
1095 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
1096 	if (!cqp_request)
1097 		return;
1098 
1099 	cqp_info = &cqp_request->info;
1100 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
1101 	cqp_info->post_sq = 1;
1102 	cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
1103 	cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
1104 	cqp_info->in.u.manage_push_page.info.free_page = 1;
1105 	cqp_info->in.u.manage_push_page.info.push_page_type = 0;
1106 	cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
1107 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
1108 	status = irdma_handle_cqp_op(rf, cqp_request);
1109 	if (!status)
1110 		qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
1111 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1112 }
1113 
1114 /**
1115  * irdma_free_qp_rsrc - free up memory resources for qp
1116  * @iwqp: qp ptr (user or kernel)
1117  */
1118 void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
1119 {
1120 	struct irdma_device *iwdev = iwqp->iwdev;
1121 	struct irdma_pci_f *rf = iwdev->rf;
1122 	u32 qp_num = iwqp->ibqp.qp_num;
1123 
1124 	irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
1125 	irdma_dealloc_push_page(rf, &iwqp->sc_qp);
1126 	if (iwqp->sc_qp.vsi) {
1127 		irdma_qp_rem_qos(&iwqp->sc_qp);
1128 		iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
1129 					   iwqp->sc_qp.user_pri);
1130 	}
1131 
1132 	if (qp_num > 2)
1133 		irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
1134 	dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
1135 			  iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
1136 	iwqp->q2_ctx_mem.va = NULL;
1137 	dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size,
1138 			  iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
1139 	iwqp->kqp.dma_mem.va = NULL;
1140 	kfree(iwqp->kqp.sq_wrid_mem);
1141 	kfree(iwqp->kqp.rq_wrid_mem);
1142 }
1143 
1144 /**
1145  * irdma_cq_wq_destroy - send cq destroy cqp
1146  * @rf: RDMA PCI function
1147  * @cq: hardware control cq
1148  */
1149 void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
1150 {
1151 	struct irdma_cqp_request *cqp_request;
1152 	struct cqp_cmds_info *cqp_info;
1153 
1154 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1155 	if (!cqp_request)
1156 		return;
1157 
1158 	cqp_info = &cqp_request->info;
1159 	cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY;
1160 	cqp_info->post_sq = 1;
1161 	cqp_info->in.u.cq_destroy.cq = cq;
1162 	cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1163 
1164 	irdma_handle_cqp_op(rf, cqp_request);
1165 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1166 }
1167 
1168 /**
1169  * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait
1170  * @cqp_request: modify QP completion
1171  */
1172 static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
1173 {
1174 	struct cqp_cmds_info *cqp_info;
1175 	struct irdma_qp *iwqp;
1176 
1177 	cqp_info = &cqp_request->info;
1178 	iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
1179 	atomic_dec(&iwqp->hw_mod_qp_pend);
1180 	wake_up(&iwqp->mod_qp_waitq);
1181 }
1182 
1183 /**
1184  * irdma_hw_modify_qp - setup cqp for modify qp
1185  * @iwdev: RDMA device
1186  * @iwqp: qp ptr (user or kernel)
1187  * @info: info for modify qp
1188  * @wait: flag to wait or not for modify qp completion
1189  */
1190 int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
1191 		       struct irdma_modify_qp_info *info, bool wait)
1192 {
1193 	int status;
1194 	struct irdma_pci_f *rf = iwdev->rf;
1195 	struct irdma_cqp_request *cqp_request;
1196 	struct cqp_cmds_info *cqp_info;
1197 	struct irdma_modify_qp_info *m_info;
1198 
1199 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1200 	if (!cqp_request)
1201 		return -ENOMEM;
1202 
1203 	if (!wait) {
1204 		cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
1205 		atomic_inc(&iwqp->hw_mod_qp_pend);
1206 	}
1207 	cqp_info = &cqp_request->info;
1208 	m_info = &cqp_info->in.u.qp_modify.info;
1209 	memcpy(m_info, info, sizeof(*m_info));
1210 	cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1211 	cqp_info->post_sq = 1;
1212 	cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1213 	cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1214 	status = irdma_handle_cqp_op(rf, cqp_request);
1215 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1216 	if (status) {
1217 		if (rdma_protocol_roce(&iwdev->ibdev, 1))
1218 			return status;
1219 
1220 		switch (m_info->next_iwarp_state) {
1221 			struct irdma_gen_ae_info ae_info;
1222 
1223 		case IRDMA_QP_STATE_RTS:
1224 		case IRDMA_QP_STATE_IDLE:
1225 		case IRDMA_QP_STATE_TERMINATE:
1226 		case IRDMA_QP_STATE_CLOSING:
1227 			if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE)
1228 				irdma_send_reset(iwqp->cm_node);
1229 			else
1230 				iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
1231 			if (!wait) {
1232 				ae_info.ae_code = IRDMA_AE_BAD_CLOSE;
1233 				ae_info.ae_src = 0;
1234 				irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
1235 			} else {
1236 				cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
1237 									      wait);
1238 				if (!cqp_request)
1239 					return -ENOMEM;
1240 
1241 				cqp_info = &cqp_request->info;
1242 				m_info = &cqp_info->in.u.qp_modify.info;
1243 				memcpy(m_info, info, sizeof(*m_info));
1244 				cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1245 				cqp_info->post_sq = 1;
1246 				cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1247 				cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1248 				m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR;
1249 				m_info->reset_tcp_conn = true;
1250 				irdma_handle_cqp_op(rf, cqp_request);
1251 				irdma_put_cqp_request(&rf->cqp, cqp_request);
1252 			}
1253 			break;
1254 		case IRDMA_QP_STATE_ERROR:
1255 		default:
1256 			break;
1257 		}
1258 	}
1259 
1260 	return status;
1261 }
1262 
1263 /**
1264  * irdma_cqp_cq_destroy_cmd - destroy the cqp cq
1265  * @dev: device pointer
1266  * @cq: pointer to cq
1267  */
1268 void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1269 {
1270 	struct irdma_pci_f *rf = dev_to_rf(dev);
1271 
1272 	irdma_cq_wq_destroy(rf, cq);
1273 }
1274 
1275 /**
1276  * irdma_cqp_qp_destroy_cmd - destroy the cqp
1277  * @dev: device pointer
1278  * @qp: pointer to qp
1279  */
1280 int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1281 {
1282 	struct irdma_pci_f *rf = dev_to_rf(dev);
1283 	struct irdma_cqp *iwcqp = &rf->cqp;
1284 	struct irdma_cqp_request *cqp_request;
1285 	struct cqp_cmds_info *cqp_info;
1286 	int status;
1287 
1288 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1289 	if (!cqp_request)
1290 		return -ENOMEM;
1291 
1292 	cqp_info = &cqp_request->info;
1293 	memset(cqp_info, 0, sizeof(*cqp_info));
1294 	cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
1295 	cqp_info->post_sq = 1;
1296 	cqp_info->in.u.qp_destroy.qp = qp;
1297 	cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1298 	cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1299 
1300 	status = irdma_handle_cqp_op(rf, cqp_request);
1301 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1302 
1303 	return status;
1304 }
1305 
1306 /**
1307  * irdma_ieq_mpa_crc_ae - generate AE for crc error
1308  * @dev: hardware control device structure
1309  * @qp: hardware control qp
1310  */
1311 void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1312 {
1313 	struct irdma_gen_ae_info info = {};
1314 	struct irdma_pci_f *rf = dev_to_rf(dev);
1315 
1316 	ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n");
1317 	info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1318 	info.ae_src = IRDMA_AE_SOURCE_RQ;
1319 	irdma_gen_ae(rf, qp, &info, false);
1320 }
1321 
1322 /**
1323  * irdma_init_hash_desc - initialize hash for crc calculation
1324  * @desc: cryption type
1325  */
1326 int irdma_init_hash_desc(struct shash_desc **desc)
1327 {
1328 	struct crypto_shash *tfm;
1329 	struct shash_desc *tdesc;
1330 
1331 	tfm = crypto_alloc_shash("crc32c", 0, 0);
1332 	if (IS_ERR(tfm))
1333 		return -EINVAL;
1334 
1335 	tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
1336 			GFP_KERNEL);
1337 	if (!tdesc) {
1338 		crypto_free_shash(tfm);
1339 		return -EINVAL;
1340 	}
1341 
1342 	tdesc->tfm = tfm;
1343 	*desc = tdesc;
1344 
1345 	return 0;
1346 }
1347 
1348 /**
1349  * irdma_free_hash_desc - free hash desc
1350  * @desc: to be freed
1351  */
1352 void irdma_free_hash_desc(struct shash_desc *desc)
1353 {
1354 	if (desc) {
1355 		crypto_free_shash(desc->tfm);
1356 		kfree(desc);
1357 	}
1358 }
1359 
1360 /**
1361  * irdma_ieq_check_mpacrc - check if mpa crc is OK
1362  * @desc: desc for hash
1363  * @addr: address of buffer for crc
1364  * @len: length of buffer
1365  * @val: value to be compared
1366  */
1367 int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len,
1368 			   u32 val)
1369 {
1370 	u32 crc = 0;
1371 	int ret;
1372 	int ret_code = 0;
1373 
1374 	crypto_shash_init(desc);
1375 	ret = crypto_shash_update(desc, addr, len);
1376 	if (!ret)
1377 		crypto_shash_final(desc, (u8 *)&crc);
1378 	if (crc != val)
1379 		ret_code = -EINVAL;
1380 
1381 	return ret_code;
1382 }
1383 
1384 /**
1385  * irdma_ieq_get_qp - get qp based on quad in puda buffer
1386  * @dev: hardware control device structure
1387  * @buf: receive puda buffer on exception q
1388  */
1389 struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
1390 				     struct irdma_puda_buf *buf)
1391 {
1392 	struct irdma_qp *iwqp;
1393 	struct irdma_cm_node *cm_node;
1394 	struct irdma_device *iwdev = buf->vsi->back_vsi;
1395 	u32 loc_addr[4] = {};
1396 	u32 rem_addr[4] = {};
1397 	u16 loc_port, rem_port;
1398 	struct ipv6hdr *ip6h;
1399 	struct iphdr *iph = (struct iphdr *)buf->iph;
1400 	struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1401 
1402 	if (iph->version == 4) {
1403 		loc_addr[0] = ntohl(iph->daddr);
1404 		rem_addr[0] = ntohl(iph->saddr);
1405 	} else {
1406 		ip6h = (struct ipv6hdr *)buf->iph;
1407 		irdma_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
1408 		irdma_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
1409 	}
1410 	loc_port = ntohs(tcph->dest);
1411 	rem_port = ntohs(tcph->source);
1412 	cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1413 				  loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF);
1414 	if (!cm_node)
1415 		return NULL;
1416 
1417 	iwqp = cm_node->iwqp;
1418 	irdma_rem_ref_cm_node(cm_node);
1419 
1420 	return &iwqp->sc_qp;
1421 }
1422 
1423 /**
1424  * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs
1425  * @qp: qp ptr
1426  */
1427 void irdma_send_ieq_ack(struct irdma_sc_qp *qp)
1428 {
1429 	struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node;
1430 	struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf;
1431 	struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1432 
1433 	cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum;
1434 	cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1435 
1436 	irdma_send_ack(cm_node);
1437 }
1438 
1439 /**
1440  * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer
1441  * @qp: qp pointer
1442  * @ah_info: AH info pointer
1443  */
1444 void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
1445 				struct irdma_ah_info *ah_info)
1446 {
1447 	struct irdma_puda_buf *buf = qp->pfpdu.ah_buf;
1448 	struct iphdr *iph;
1449 	struct ipv6hdr *ip6h;
1450 
1451 	memset(ah_info, 0, sizeof(*ah_info));
1452 	ah_info->do_lpbk = true;
1453 	ah_info->vlan_tag = buf->vlan_id;
1454 	ah_info->insert_vlan_tag = buf->vlan_valid;
1455 	ah_info->ipv4_valid = buf->ipv4;
1456 	ah_info->vsi = qp->vsi;
1457 
1458 	if (buf->smac_valid)
1459 		ether_addr_copy(ah_info->mac_addr, buf->smac);
1460 
1461 	if (buf->ipv4) {
1462 		ah_info->ipv4_valid = true;
1463 		iph = (struct iphdr *)buf->iph;
1464 		ah_info->hop_ttl = iph->ttl;
1465 		ah_info->tc_tos = iph->tos;
1466 		ah_info->dest_ip_addr[0] = ntohl(iph->daddr);
1467 		ah_info->src_ip_addr[0] = ntohl(iph->saddr);
1468 	} else {
1469 		ip6h = (struct ipv6hdr *)buf->iph;
1470 		ah_info->hop_ttl = ip6h->hop_limit;
1471 		ah_info->tc_tos = ip6h->priority;
1472 		irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
1473 				    ip6h->daddr.in6_u.u6_addr32);
1474 		irdma_copy_ip_ntohl(ah_info->src_ip_addr,
1475 				    ip6h->saddr.in6_u.u6_addr32);
1476 	}
1477 
1478 	ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev),
1479 						ah_info->dest_ip_addr,
1480 						ah_info->ipv4_valid,
1481 						NULL, IRDMA_ARP_RESOLVE);
1482 }
1483 
1484 /**
1485  * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer
1486  * @buf: puda to update
1487  * @len: length of buffer
1488  * @seqnum: seq number for tcp
1489  */
1490 static void irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf,
1491 					     u16 len, u32 seqnum)
1492 {
1493 	struct tcphdr *tcph;
1494 	struct iphdr *iph;
1495 	u16 iphlen;
1496 	u16 pktsize;
1497 	u8 *addr = buf->mem.va;
1498 
1499 	iphlen = (buf->ipv4) ? 20 : 40;
1500 	iph = (struct iphdr *)(addr + buf->maclen);
1501 	tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1502 	pktsize = len + buf->tcphlen + iphlen;
1503 	iph->tot_len = htons(pktsize);
1504 	tcph->seq = htonl(seqnum);
1505 }
1506 
1507 /**
1508  * irdma_ieq_update_tcpip_info - update tcpip in the buffer
1509  * @buf: puda to update
1510  * @len: length of buffer
1511  * @seqnum: seq number for tcp
1512  */
1513 void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
1514 				 u32 seqnum)
1515 {
1516 	struct tcphdr *tcph;
1517 	u8 *addr;
1518 
1519 	if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1520 		return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum);
1521 
1522 	addr = buf->mem.va;
1523 	tcph = (struct tcphdr *)addr;
1524 	tcph->seq = htonl(seqnum);
1525 }
1526 
1527 /**
1528  * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda
1529  * buffer
1530  * @info: to get information
1531  * @buf: puda buffer
1532  */
1533 static int irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1534 					  struct irdma_puda_buf *buf)
1535 {
1536 	struct iphdr *iph;
1537 	struct ipv6hdr *ip6h;
1538 	struct tcphdr *tcph;
1539 	u16 iphlen;
1540 	u16 pkt_len;
1541 	u8 *mem = buf->mem.va;
1542 	struct ethhdr *ethh = buf->mem.va;
1543 
1544 	if (ethh->h_proto == htons(0x8100)) {
1545 		info->vlan_valid = true;
1546 		buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) &
1547 			       VLAN_VID_MASK;
1548 	}
1549 
1550 	buf->maclen = (info->vlan_valid) ? 18 : 14;
1551 	iphlen = (info->l3proto) ? 40 : 20;
1552 	buf->ipv4 = (info->l3proto) ? false : true;
1553 	buf->iph = mem + buf->maclen;
1554 	iph = (struct iphdr *)buf->iph;
1555 	buf->tcph = buf->iph + iphlen;
1556 	tcph = (struct tcphdr *)buf->tcph;
1557 
1558 	if (buf->ipv4) {
1559 		pkt_len = ntohs(iph->tot_len);
1560 	} else {
1561 		ip6h = (struct ipv6hdr *)buf->iph;
1562 		pkt_len = ntohs(ip6h->payload_len) + iphlen;
1563 	}
1564 
1565 	buf->totallen = pkt_len + buf->maclen;
1566 
1567 	if (info->payload_len < buf->totallen) {
1568 		ibdev_dbg(to_ibdev(buf->vsi->dev),
1569 			  "ERR: payload_len = 0x%x totallen expected0x%x\n",
1570 			  info->payload_len, buf->totallen);
1571 		return -EINVAL;
1572 	}
1573 
1574 	buf->tcphlen = tcph->doff << 2;
1575 	buf->datalen = pkt_len - iphlen - buf->tcphlen;
1576 	buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1577 	buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1578 	buf->seqnum = ntohl(tcph->seq);
1579 
1580 	return 0;
1581 }
1582 
1583 /**
1584  * irdma_puda_get_tcpip_info - get tcpip info from puda buffer
1585  * @info: to get information
1586  * @buf: puda buffer
1587  */
1588 int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1589 			      struct irdma_puda_buf *buf)
1590 {
1591 	struct tcphdr *tcph;
1592 	u32 pkt_len;
1593 	u8 *mem;
1594 
1595 	if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1596 		return irdma_gen1_puda_get_tcpip_info(info, buf);
1597 
1598 	mem = buf->mem.va;
1599 	buf->vlan_valid = info->vlan_valid;
1600 	if (info->vlan_valid)
1601 		buf->vlan_id = info->vlan;
1602 
1603 	buf->ipv4 = info->ipv4;
1604 	if (buf->ipv4)
1605 		buf->iph = mem + IRDMA_IPV4_PAD;
1606 	else
1607 		buf->iph = mem;
1608 
1609 	buf->tcph = mem + IRDMA_TCP_OFFSET;
1610 	tcph = (struct tcphdr *)buf->tcph;
1611 	pkt_len = info->payload_len;
1612 	buf->totallen = pkt_len;
1613 	buf->tcphlen = tcph->doff << 2;
1614 	buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen;
1615 	buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1616 	buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen;
1617 	buf->seqnum = ntohl(tcph->seq);
1618 
1619 	if (info->smac_valid) {
1620 		ether_addr_copy(buf->smac, info->smac);
1621 		buf->smac_valid = true;
1622 	}
1623 
1624 	return 0;
1625 }
1626 
1627 /**
1628  * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats
1629  * @t: timer_list pointer
1630  */
1631 static void irdma_hw_stats_timeout(struct timer_list *t)
1632 {
1633 	struct irdma_vsi_pestat *pf_devstat =
1634 		from_timer(pf_devstat, t, stats_timer);
1635 	struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
1636 
1637 	if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1638 		irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat);
1639 	else
1640 		irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
1641 
1642 	mod_timer(&pf_devstat->stats_timer,
1643 		  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1644 }
1645 
1646 /**
1647  * irdma_hw_stats_start_timer - Start periodic stats timer
1648  * @vsi: vsi structure pointer
1649  */
1650 void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi)
1651 {
1652 	struct irdma_vsi_pestat *devstat = vsi->pestat;
1653 
1654 	timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0);
1655 	mod_timer(&devstat->stats_timer,
1656 		  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1657 }
1658 
1659 /**
1660  * irdma_hw_stats_stop_timer - Delete periodic stats timer
1661  * @vsi: pointer to vsi structure
1662  */
1663 void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
1664 {
1665 	struct irdma_vsi_pestat *devstat = vsi->pestat;
1666 
1667 	del_timer_sync(&devstat->stats_timer);
1668 }
1669 
1670 /**
1671  * irdma_process_stats - Checking for wrap and update stats
1672  * @pestat: stats structure pointer
1673  */
1674 static inline void irdma_process_stats(struct irdma_vsi_pestat *pestat)
1675 {
1676 	sc_vsi_update_stats(pestat->vsi);
1677 }
1678 
1679 /**
1680  * irdma_cqp_gather_stats_gen1 - Gather stats
1681  * @dev: pointer to device structure
1682  * @pestat: statistics structure
1683  */
1684 void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
1685 				 struct irdma_vsi_pestat *pestat)
1686 {
1687 	struct irdma_gather_stats *gather_stats =
1688 		pestat->gather_info.gather_stats_va;
1689 	u32 stats_inst_offset_32;
1690 	u32 stats_inst_offset_64;
1691 
1692 	stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ?
1693 				       pestat->gather_info.stats_inst_index :
1694 				       pestat->hw->hmc.hmc_fn_id;
1695 	stats_inst_offset_32 *= 4;
1696 	stats_inst_offset_64 = stats_inst_offset_32 * 2;
1697 
1698 	gather_stats->rxvlanerr =
1699 		rd32(dev->hw,
1700 		     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_RXVLANERR]
1701 		     + stats_inst_offset_32);
1702 	gather_stats->ip4rxdiscard =
1703 		rd32(dev->hw,
1704 		     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXDISCARD]
1705 		     + stats_inst_offset_32);
1706 	gather_stats->ip4rxtrunc =
1707 		rd32(dev->hw,
1708 		     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXTRUNC]
1709 		     + stats_inst_offset_32);
1710 	gather_stats->ip4txnoroute =
1711 		rd32(dev->hw,
1712 		     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE]
1713 		     + stats_inst_offset_32);
1714 	gather_stats->ip6rxdiscard =
1715 		rd32(dev->hw,
1716 		     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXDISCARD]
1717 		     + stats_inst_offset_32);
1718 	gather_stats->ip6rxtrunc =
1719 		rd32(dev->hw,
1720 		     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXTRUNC]
1721 		     + stats_inst_offset_32);
1722 	gather_stats->ip6txnoroute =
1723 		rd32(dev->hw,
1724 		     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE]
1725 		     + stats_inst_offset_32);
1726 	gather_stats->tcprtxseg =
1727 		rd32(dev->hw,
1728 		     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRTXSEG]
1729 		     + stats_inst_offset_32);
1730 	gather_stats->tcprxopterr =
1731 		rd32(dev->hw,
1732 		     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRXOPTERR]
1733 		     + stats_inst_offset_32);
1734 
1735 	gather_stats->ip4rxocts =
1736 		rd64(dev->hw,
1737 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXOCTS]
1738 		     + stats_inst_offset_64);
1739 	gather_stats->ip4rxpkts =
1740 		rd64(dev->hw,
1741 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXPKTS]
1742 		     + stats_inst_offset_64);
1743 	gather_stats->ip4txfrag =
1744 		rd64(dev->hw,
1745 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXFRAGS]
1746 		     + stats_inst_offset_64);
1747 	gather_stats->ip4rxmcpkts =
1748 		rd64(dev->hw,
1749 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS]
1750 		     + stats_inst_offset_64);
1751 	gather_stats->ip4txocts =
1752 		rd64(dev->hw,
1753 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXOCTS]
1754 		     + stats_inst_offset_64);
1755 	gather_stats->ip4txpkts =
1756 		rd64(dev->hw,
1757 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXPKTS]
1758 		     + stats_inst_offset_64);
1759 	gather_stats->ip4txfrag =
1760 		rd64(dev->hw,
1761 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXFRAGS]
1762 		     + stats_inst_offset_64);
1763 	gather_stats->ip4txmcpkts =
1764 		rd64(dev->hw,
1765 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS]
1766 		     + stats_inst_offset_64);
1767 	gather_stats->ip6rxocts =
1768 		rd64(dev->hw,
1769 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXOCTS]
1770 		     + stats_inst_offset_64);
1771 	gather_stats->ip6rxpkts =
1772 		rd64(dev->hw,
1773 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXPKTS]
1774 		     + stats_inst_offset_64);
1775 	gather_stats->ip6txfrags =
1776 		rd64(dev->hw,
1777 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXFRAGS]
1778 		     + stats_inst_offset_64);
1779 	gather_stats->ip6rxmcpkts =
1780 		rd64(dev->hw,
1781 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS]
1782 		     + stats_inst_offset_64);
1783 	gather_stats->ip6txocts =
1784 		rd64(dev->hw,
1785 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXOCTS]
1786 		     + stats_inst_offset_64);
1787 	gather_stats->ip6txpkts =
1788 		rd64(dev->hw,
1789 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXPKTS]
1790 		     + stats_inst_offset_64);
1791 	gather_stats->ip6txfrags =
1792 		rd64(dev->hw,
1793 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXFRAGS]
1794 		     + stats_inst_offset_64);
1795 	gather_stats->ip6txmcpkts =
1796 		rd64(dev->hw,
1797 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS]
1798 		     + stats_inst_offset_64);
1799 	gather_stats->tcprxsegs =
1800 		rd64(dev->hw,
1801 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPRXSEGS]
1802 		     + stats_inst_offset_64);
1803 	gather_stats->tcptxsegs =
1804 		rd64(dev->hw,
1805 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPTXSEG]
1806 		     + stats_inst_offset_64);
1807 	gather_stats->rdmarxrds =
1808 		rd64(dev->hw,
1809 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXRDS]
1810 		     + stats_inst_offset_64);
1811 	gather_stats->rdmarxsnds =
1812 		rd64(dev->hw,
1813 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXSNDS]
1814 		     + stats_inst_offset_64);
1815 	gather_stats->rdmarxwrs =
1816 		rd64(dev->hw,
1817 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXWRS]
1818 		     + stats_inst_offset_64);
1819 	gather_stats->rdmatxrds =
1820 		rd64(dev->hw,
1821 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXRDS]
1822 		     + stats_inst_offset_64);
1823 	gather_stats->rdmatxsnds =
1824 		rd64(dev->hw,
1825 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXSNDS]
1826 		     + stats_inst_offset_64);
1827 	gather_stats->rdmatxwrs =
1828 		rd64(dev->hw,
1829 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXWRS]
1830 		     + stats_inst_offset_64);
1831 	gather_stats->rdmavbn =
1832 		rd64(dev->hw,
1833 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVBND]
1834 		     + stats_inst_offset_64);
1835 	gather_stats->rdmavinv =
1836 		rd64(dev->hw,
1837 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVINV]
1838 		     + stats_inst_offset_64);
1839 	gather_stats->udprxpkts =
1840 		rd64(dev->hw,
1841 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPRXPKTS]
1842 		     + stats_inst_offset_64);
1843 	gather_stats->udptxpkts =
1844 		rd64(dev->hw,
1845 		     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPTXPKTS]
1846 		     + stats_inst_offset_64);
1847 
1848 	irdma_process_stats(pestat);
1849 }
1850 
1851 /**
1852  * irdma_process_cqp_stats - Checking for wrap and update stats
1853  * @cqp_request: cqp_request structure pointer
1854  */
1855 static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
1856 {
1857 	struct irdma_vsi_pestat *pestat = cqp_request->param;
1858 
1859 	irdma_process_stats(pestat);
1860 }
1861 
1862 /**
1863  * irdma_cqp_gather_stats_cmd - Gather stats
1864  * @dev: pointer to device structure
1865  * @pestat: pointer to stats info
1866  * @wait: flag to wait or not wait for stats
1867  */
1868 int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
1869 			       struct irdma_vsi_pestat *pestat, bool wait)
1870 
1871 {
1872 	struct irdma_pci_f *rf = dev_to_rf(dev);
1873 	struct irdma_cqp *iwcqp = &rf->cqp;
1874 	struct irdma_cqp_request *cqp_request;
1875 	struct cqp_cmds_info *cqp_info;
1876 	int status;
1877 
1878 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1879 	if (!cqp_request)
1880 		return -ENOMEM;
1881 
1882 	cqp_info = &cqp_request->info;
1883 	memset(cqp_info, 0, sizeof(*cqp_info));
1884 	cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
1885 	cqp_info->post_sq = 1;
1886 	cqp_info->in.u.stats_gather.info = pestat->gather_info;
1887 	cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request;
1888 	cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
1889 	cqp_request->param = pestat;
1890 	if (!wait)
1891 		cqp_request->callback_fcn = irdma_process_cqp_stats;
1892 	status = irdma_handle_cqp_op(rf, cqp_request);
1893 	if (wait)
1894 		irdma_process_stats(pestat);
1895 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1896 
1897 	return status;
1898 }
1899 
1900 /**
1901  * irdma_cqp_stats_inst_cmd - Allocate/free stats instance
1902  * @vsi: pointer to vsi structure
1903  * @cmd: command to allocate or free
1904  * @stats_info: pointer to allocate stats info
1905  */
1906 int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
1907 			     struct irdma_stats_inst_info *stats_info)
1908 {
1909 	struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
1910 	struct irdma_cqp *iwcqp = &rf->cqp;
1911 	struct irdma_cqp_request *cqp_request;
1912 	struct cqp_cmds_info *cqp_info;
1913 	int status;
1914 	bool wait = false;
1915 
1916 	if (cmd == IRDMA_OP_STATS_ALLOCATE)
1917 		wait = true;
1918 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1919 	if (!cqp_request)
1920 		return -ENOMEM;
1921 
1922 	cqp_info = &cqp_request->info;
1923 	memset(cqp_info, 0, sizeof(*cqp_info));
1924 	cqp_info->cqp_cmd = cmd;
1925 	cqp_info->post_sq = 1;
1926 	cqp_info->in.u.stats_manage.info = *stats_info;
1927 	cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request;
1928 	cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
1929 	status = irdma_handle_cqp_op(rf, cqp_request);
1930 	if (wait)
1931 		stats_info->stats_idx = cqp_request->compl_info.op_ret_val;
1932 	irdma_put_cqp_request(iwcqp, cqp_request);
1933 
1934 	return status;
1935 }
1936 
1937 /**
1938  * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
1939  * @dev: pointer to device info
1940  * @sc_ceq: pointer to ceq structure
1941  * @op: Create or Destroy
1942  */
1943 int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
1944 		      u8 op)
1945 {
1946 	struct irdma_cqp_request *cqp_request;
1947 	struct cqp_cmds_info *cqp_info;
1948 	struct irdma_pci_f *rf = dev_to_rf(dev);
1949 	int status;
1950 
1951 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1952 	if (!cqp_request)
1953 		return -ENOMEM;
1954 
1955 	cqp_info = &cqp_request->info;
1956 	cqp_info->post_sq = 1;
1957 	cqp_info->cqp_cmd = op;
1958 	cqp_info->in.u.ceq_create.ceq = sc_ceq;
1959 	cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request;
1960 
1961 	status = irdma_handle_cqp_op(rf, cqp_request);
1962 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1963 
1964 	return status;
1965 }
1966 
1967 /**
1968  * irdma_cqp_aeq_cmd - Create/Destroy AEQ
1969  * @dev: pointer to device info
1970  * @sc_aeq: pointer to aeq structure
1971  * @op: Create or Destroy
1972  */
1973 int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
1974 		      u8 op)
1975 {
1976 	struct irdma_cqp_request *cqp_request;
1977 	struct cqp_cmds_info *cqp_info;
1978 	struct irdma_pci_f *rf = dev_to_rf(dev);
1979 	int status;
1980 
1981 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1982 	if (!cqp_request)
1983 		return -ENOMEM;
1984 
1985 	cqp_info = &cqp_request->info;
1986 	cqp_info->post_sq = 1;
1987 	cqp_info->cqp_cmd = op;
1988 	cqp_info->in.u.aeq_create.aeq = sc_aeq;
1989 	cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request;
1990 
1991 	status = irdma_handle_cqp_op(rf, cqp_request);
1992 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1993 
1994 	return status;
1995 }
1996 
1997 /**
1998  * irdma_cqp_ws_node_cmd - Add/modify/delete ws node
1999  * @dev: pointer to device structure
2000  * @cmd: Add, modify or delete
2001  * @node_info: pointer to ws node info
2002  */
2003 int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
2004 			  struct irdma_ws_node_info *node_info)
2005 {
2006 	struct irdma_pci_f *rf = dev_to_rf(dev);
2007 	struct irdma_cqp *iwcqp = &rf->cqp;
2008 	struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
2009 	struct irdma_cqp_request *cqp_request;
2010 	struct cqp_cmds_info *cqp_info;
2011 	int status;
2012 	bool poll;
2013 
2014 	if (!rf->sc_dev.ceq_valid)
2015 		poll = true;
2016 	else
2017 		poll = false;
2018 
2019 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
2020 	if (!cqp_request)
2021 		return -ENOMEM;
2022 
2023 	cqp_info = &cqp_request->info;
2024 	memset(cqp_info, 0, sizeof(*cqp_info));
2025 	cqp_info->cqp_cmd = cmd;
2026 	cqp_info->post_sq = 1;
2027 	cqp_info->in.u.ws_node.info = *node_info;
2028 	cqp_info->in.u.ws_node.cqp = cqp;
2029 	cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
2030 	status = irdma_handle_cqp_op(rf, cqp_request);
2031 	if (status)
2032 		goto exit;
2033 
2034 	if (poll) {
2035 		struct irdma_ccq_cqe_info compl_info;
2036 
2037 		status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
2038 						       &compl_info);
2039 		node_info->qs_handle = compl_info.op_ret_val;
2040 		ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n",
2041 			  compl_info.op_code, compl_info.op_ret_val);
2042 	} else {
2043 		node_info->qs_handle = cqp_request->compl_info.op_ret_val;
2044 	}
2045 
2046 exit:
2047 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2048 
2049 	return status;
2050 }
2051 
2052 /**
2053  * irdma_ah_cqp_op - perform an AH cqp operation
2054  * @rf: RDMA PCI function
2055  * @sc_ah: address handle
2056  * @cmd: AH operation
2057  * @wait: wait if true
2058  * @callback_fcn: Callback function on CQP op completion
2059  * @cb_param: parameter for callback function
2060  *
2061  * returns errno
2062  */
2063 int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
2064 		    bool wait,
2065 		    void (*callback_fcn)(struct irdma_cqp_request *),
2066 		    void *cb_param)
2067 {
2068 	struct irdma_cqp_request *cqp_request;
2069 	struct cqp_cmds_info *cqp_info;
2070 	int status;
2071 
2072 	if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
2073 		return -EINVAL;
2074 
2075 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2076 	if (!cqp_request)
2077 		return -ENOMEM;
2078 
2079 	cqp_info = &cqp_request->info;
2080 	cqp_info->cqp_cmd = cmd;
2081 	cqp_info->post_sq = 1;
2082 	if (cmd == IRDMA_OP_AH_CREATE) {
2083 		cqp_info->in.u.ah_create.info = sc_ah->ah_info;
2084 		cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
2085 		cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
2086 	} else if (cmd == IRDMA_OP_AH_DESTROY) {
2087 		cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
2088 		cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
2089 		cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
2090 	}
2091 
2092 	if (!wait) {
2093 		cqp_request->callback_fcn = callback_fcn;
2094 		cqp_request->param = cb_param;
2095 	}
2096 	status = irdma_handle_cqp_op(rf, cqp_request);
2097 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2098 
2099 	if (status)
2100 		return -ENOMEM;
2101 
2102 	if (wait)
2103 		sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE);
2104 
2105 	return 0;
2106 }
2107 
2108 /**
2109  * irdma_ieq_ah_cb - callback after creation of AH for IEQ
2110  * @cqp_request: pointer to cqp_request of create AH
2111  */
2112 static void irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
2113 {
2114 	struct irdma_sc_qp *qp = cqp_request->param;
2115 	struct irdma_sc_ah *sc_ah = qp->pfpdu.ah;
2116 	unsigned long flags;
2117 
2118 	spin_lock_irqsave(&qp->pfpdu.lock, flags);
2119 	if (!cqp_request->compl_info.op_ret_val) {
2120 		sc_ah->ah_info.ah_valid = true;
2121 		irdma_ieq_process_fpdus(qp, qp->vsi->ieq);
2122 	} else {
2123 		sc_ah->ah_info.ah_valid = false;
2124 		irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
2125 	}
2126 	spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
2127 }
2128 
2129 /**
2130  * irdma_ilq_ah_cb - callback after creation of AH for ILQ
2131  * @cqp_request: pointer to cqp_request of create AH
2132  */
2133 static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
2134 {
2135 	struct irdma_cm_node *cm_node = cqp_request->param;
2136 	struct irdma_sc_ah *sc_ah = cm_node->ah;
2137 
2138 	sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
2139 	irdma_add_conn_est_qh(cm_node);
2140 }
2141 
2142 /**
2143  * irdma_puda_create_ah - create AH for ILQ/IEQ qp's
2144  * @dev: device pointer
2145  * @ah_info: Address handle info
2146  * @wait: When true will wait for operation to complete
2147  * @type: ILQ/IEQ
2148  * @cb_param: Callback param when not waiting
2149  * @ah_ret: Returned pointer to address handle if created
2150  *
2151  */
2152 int irdma_puda_create_ah(struct irdma_sc_dev *dev,
2153 			 struct irdma_ah_info *ah_info, bool wait,
2154 			 enum puda_rsrc_type type, void *cb_param,
2155 			 struct irdma_sc_ah **ah_ret)
2156 {
2157 	struct irdma_sc_ah *ah;
2158 	struct irdma_pci_f *rf = dev_to_rf(dev);
2159 	int err;
2160 
2161 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2162 	*ah_ret = ah;
2163 	if (!ah)
2164 		return -ENOMEM;
2165 
2166 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
2167 			       &ah_info->ah_idx, &rf->next_ah);
2168 	if (err)
2169 		goto err_free;
2170 
2171 	ah->dev = dev;
2172 	ah->ah_info = *ah_info;
2173 
2174 	if (type == IRDMA_PUDA_RSRC_TYPE_ILQ)
2175 		err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2176 				      irdma_ilq_ah_cb, cb_param);
2177 	else
2178 		err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2179 				      irdma_ieq_ah_cb, cb_param);
2180 
2181 	if (err)
2182 		goto error;
2183 	return 0;
2184 
2185 error:
2186 	irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2187 err_free:
2188 	kfree(ah);
2189 	*ah_ret = NULL;
2190 	return -ENOMEM;
2191 }
2192 
2193 /**
2194  * irdma_puda_free_ah - free a puda address handle
2195  * @dev: device pointer
2196  * @ah: The address handle to free
2197  */
2198 void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
2199 {
2200 	struct irdma_pci_f *rf = dev_to_rf(dev);
2201 
2202 	if (!ah)
2203 		return;
2204 
2205 	if (ah->ah_info.ah_valid) {
2206 		irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
2207 		irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2208 	}
2209 
2210 	kfree(ah);
2211 }
2212 
2213 /**
2214  * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
2215  * @cqp_request: pointer to cqp_request of create AH
2216  */
2217 void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
2218 {
2219 	struct irdma_sc_ah *sc_ah = cqp_request->param;
2220 
2221 	if (!cqp_request->compl_info.op_ret_val)
2222 		sc_ah->ah_info.ah_valid = true;
2223 	else
2224 		sc_ah->ah_info.ah_valid = false;
2225 }
2226 
2227 /**
2228  * irdma_prm_add_pble_mem - add moemory to pble resources
2229  * @pprm: pble resource manager
2230  * @pchunk: chunk of memory to add
2231  */
2232 int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
2233 			   struct irdma_chunk *pchunk)
2234 {
2235 	u64 sizeofbitmap;
2236 
2237 	if (pchunk->size & 0xfff)
2238 		return -EINVAL;
2239 
2240 	sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
2241 
2242 	pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
2243 	if (!pchunk->bitmapbuf)
2244 		return -ENOMEM;
2245 
2246 	pchunk->sizeofbitmap = sizeofbitmap;
2247 	/* each pble is 8 bytes hence shift by 3 */
2248 	pprm->total_pble_alloc += pchunk->size >> 3;
2249 	pprm->free_pble_cnt += pchunk->size >> 3;
2250 
2251 	return 0;
2252 }
2253 
2254 /**
2255  * irdma_prm_get_pbles - get pble's from prm
2256  * @pprm: pble resource manager
2257  * @chunkinfo: nformation about chunk where pble's were acquired
2258  * @mem_size: size of pble memory needed
2259  * @vaddr: returns virtual address of pble memory
2260  * @fpm_addr: returns fpm address of pble memory
2261  */
2262 int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
2263 			struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
2264 			u64 **vaddr, u64 *fpm_addr)
2265 {
2266 	u64 bits_needed;
2267 	u64 bit_idx = PBLE_INVALID_IDX;
2268 	struct irdma_chunk *pchunk = NULL;
2269 	struct list_head *chunk_entry = pprm->clist.next;
2270 	u32 offset;
2271 	unsigned long flags;
2272 	*vaddr = NULL;
2273 	*fpm_addr = 0;
2274 
2275 	bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift));
2276 
2277 	spin_lock_irqsave(&pprm->prm_lock, flags);
2278 	while (chunk_entry != &pprm->clist) {
2279 		pchunk = (struct irdma_chunk *)chunk_entry;
2280 		bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf,
2281 						     pchunk->sizeofbitmap, 0,
2282 						     bits_needed, 0);
2283 		if (bit_idx < pchunk->sizeofbitmap)
2284 			break;
2285 
2286 		/* list.next used macro */
2287 		chunk_entry = pchunk->list.next;
2288 	}
2289 
2290 	if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
2291 		spin_unlock_irqrestore(&pprm->prm_lock, flags);
2292 		return -ENOMEM;
2293 	}
2294 
2295 	bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
2296 	offset = bit_idx << pprm->pble_shift;
2297 	*vaddr = pchunk->vaddr + offset;
2298 	*fpm_addr = pchunk->fpm_addr + offset;
2299 
2300 	chunkinfo->pchunk = pchunk;
2301 	chunkinfo->bit_idx = bit_idx;
2302 	chunkinfo->bits_used = bits_needed;
2303 	/* 3 is sizeof pble divide */
2304 	pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3);
2305 	spin_unlock_irqrestore(&pprm->prm_lock, flags);
2306 
2307 	return 0;
2308 }
2309 
2310 /**
2311  * irdma_prm_return_pbles - return pbles back to prm
2312  * @pprm: pble resource manager
2313  * @chunkinfo: chunk where pble's were acquired and to be freed
2314  */
2315 void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
2316 			    struct irdma_pble_chunkinfo *chunkinfo)
2317 {
2318 	unsigned long flags;
2319 
2320 	spin_lock_irqsave(&pprm->prm_lock, flags);
2321 	pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3);
2322 	bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx,
2323 		     chunkinfo->bits_used);
2324 	spin_unlock_irqrestore(&pprm->prm_lock, flags);
2325 }
2326 
2327 int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma,
2328 			   u32 pg_cnt)
2329 {
2330 	struct page *vm_page;
2331 	int i;
2332 	u8 *addr;
2333 
2334 	addr = (u8 *)(uintptr_t)va;
2335 	for (i = 0; i < pg_cnt; i++) {
2336 		vm_page = vmalloc_to_page(addr);
2337 		if (!vm_page)
2338 			goto err;
2339 
2340 		pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE,
2341 					 DMA_BIDIRECTIONAL);
2342 		if (dma_mapping_error(hw->device, pg_dma[i]))
2343 			goto err;
2344 
2345 		addr += PAGE_SIZE;
2346 	}
2347 
2348 	return 0;
2349 
2350 err:
2351 	irdma_unmap_vm_page_list(hw, pg_dma, i);
2352 	return -ENOMEM;
2353 }
2354 
2355 void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt)
2356 {
2357 	int i;
2358 
2359 	for (i = 0; i < pg_cnt; i++)
2360 		dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
2361 }
2362 
2363 /**
2364  * irdma_pble_free_paged_mem - free virtual paged memory
2365  * @chunk: chunk to free with paged memory
2366  */
2367 void irdma_pble_free_paged_mem(struct irdma_chunk *chunk)
2368 {
2369 	if (!chunk->pg_cnt)
2370 		goto done;
2371 
2372 	irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs,
2373 				 chunk->pg_cnt);
2374 
2375 done:
2376 	kfree(chunk->dmainfo.dmaaddrs);
2377 	chunk->dmainfo.dmaaddrs = NULL;
2378 	vfree(chunk->vaddr);
2379 	chunk->vaddr = NULL;
2380 	chunk->type = 0;
2381 }
2382 
2383 /**
2384  * irdma_pble_get_paged_mem -allocate paged memory for pbles
2385  * @chunk: chunk to add for paged memory
2386  * @pg_cnt: number of pages needed
2387  */
2388 int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt)
2389 {
2390 	u32 size;
2391 	void *va;
2392 
2393 	chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
2394 	if (!chunk->dmainfo.dmaaddrs)
2395 		return -ENOMEM;
2396 
2397 	size = PAGE_SIZE * pg_cnt;
2398 	va = vmalloc(size);
2399 	if (!va)
2400 		goto err;
2401 
2402 	if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs,
2403 				   pg_cnt)) {
2404 		vfree(va);
2405 		goto err;
2406 	}
2407 	chunk->vaddr = va;
2408 	chunk->size = size;
2409 	chunk->pg_cnt = pg_cnt;
2410 	chunk->type = PBLE_SD_PAGED;
2411 
2412 	return 0;
2413 err:
2414 	kfree(chunk->dmainfo.dmaaddrs);
2415 	chunk->dmainfo.dmaaddrs = NULL;
2416 
2417 	return -ENOMEM;
2418 }
2419 
2420 /**
2421  * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID
2422  * @dev: device pointer
2423  */
2424 u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev)
2425 {
2426 	struct irdma_pci_f *rf = dev_to_rf(dev);
2427 	u32 next = 1;
2428 	u32 node_id;
2429 
2430 	if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
2431 			     &node_id, &next))
2432 		return IRDMA_WS_NODE_INVALID;
2433 
2434 	return (u16)node_id;
2435 }
2436 
2437 /**
2438  * irdma_free_ws_node_id - Free a tx scheduler node ID
2439  * @dev: device pointer
2440  * @node_id: Work scheduler node ID
2441  */
2442 void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id)
2443 {
2444 	struct irdma_pci_f *rf = dev_to_rf(dev);
2445 
2446 	irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
2447 }
2448 
2449 /**
2450  * irdma_modify_qp_to_err - Modify a QP to error
2451  * @sc_qp: qp structure
2452  */
2453 void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
2454 {
2455 	struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
2456 	struct ib_qp_attr attr;
2457 
2458 	if (qp->iwdev->rf->reset)
2459 		return;
2460 	attr.qp_state = IB_QPS_ERR;
2461 
2462 	if (rdma_protocol_roce(qp->ibqp.device, 1))
2463 		irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2464 	else
2465 		irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2466 }
2467 
2468 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
2469 {
2470 	struct ib_event ibevent;
2471 
2472 	if (!iwqp->ibqp.event_handler)
2473 		return;
2474 
2475 	switch (event) {
2476 	case IRDMA_QP_EVENT_CATASTROPHIC:
2477 		ibevent.event = IB_EVENT_QP_FATAL;
2478 		break;
2479 	case IRDMA_QP_EVENT_ACCESS_ERR:
2480 		ibevent.event = IB_EVENT_QP_ACCESS_ERR;
2481 		break;
2482 	}
2483 	ibevent.device = iwqp->ibqp.device;
2484 	ibevent.element.qp = &iwqp->ibqp;
2485 	iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
2486 }
2487 
2488 bool irdma_cq_empty(struct irdma_cq *iwcq)
2489 {
2490 	struct irdma_cq_uk *ukcq;
2491 	u64 qword3;
2492 	__le64 *cqe;
2493 	u8 polarity;
2494 
2495 	ukcq  = &iwcq->sc_cq.cq_uk;
2496 	cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
2497 	get_64bit_val(cqe, 24, &qword3);
2498 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
2499 
2500 	return polarity != ukcq->polarity;
2501 }
2502 
2503 void irdma_remove_cmpls_list(struct irdma_cq *iwcq)
2504 {
2505 	struct irdma_cmpl_gen *cmpl_node;
2506 	struct list_head *tmp_node, *list_node;
2507 
2508 	list_for_each_safe (list_node, tmp_node, &iwcq->cmpl_generated) {
2509 		cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list);
2510 		list_del(&cmpl_node->list);
2511 		kfree(cmpl_node);
2512 	}
2513 }
2514 
2515 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info)
2516 {
2517 	struct irdma_cmpl_gen *cmpl;
2518 
2519 	if (list_empty(&iwcq->cmpl_generated))
2520 		return -ENOENT;
2521 	cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list);
2522 	list_del(&cmpl->list);
2523 	memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info));
2524 	kfree(cmpl);
2525 
2526 	ibdev_dbg(iwcq->ibcq.device,
2527 		  "VERBS: %s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%llx\n",
2528 		  __func__, cq_poll_info->qp_id, cq_poll_info->op_type,
2529 		  cq_poll_info->wr_id);
2530 
2531 	return 0;
2532 }
2533 
2534 /**
2535  * irdma_set_cpi_common_values - fill in values for polling info struct
2536  * @cpi: resulting structure of cq_poll_info type
2537  * @qp: QPair
2538  * @qp_num: id of the QP
2539  */
2540 static void irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
2541 					struct irdma_qp_uk *qp, u32 qp_num)
2542 {
2543 	cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
2544 	cpi->error = true;
2545 	cpi->major_err = IRDMA_FLUSH_MAJOR_ERR;
2546 	cpi->minor_err = FLUSH_GENERAL_ERR;
2547 	cpi->qp_handle = (irdma_qp_handle)(uintptr_t)qp;
2548 	cpi->qp_id = qp_num;
2549 }
2550 
2551 static inline void irdma_comp_handler(struct irdma_cq *cq)
2552 {
2553 	if (!cq->ibcq.comp_handler)
2554 		return;
2555 	if (atomic_cmpxchg(&cq->armed, 1, 0))
2556 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
2557 }
2558 
2559 void irdma_generate_flush_completions(struct irdma_qp *iwqp)
2560 {
2561 	struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
2562 	struct irdma_ring *sq_ring = &qp->sq_ring;
2563 	struct irdma_ring *rq_ring = &qp->rq_ring;
2564 	struct irdma_cmpl_gen *cmpl;
2565 	__le64 *sw_wqe;
2566 	u64 wqe_qword;
2567 	u32 wqe_idx;
2568 	bool compl_generated = false;
2569 	unsigned long flags1;
2570 
2571 	spin_lock_irqsave(&iwqp->iwscq->lock, flags1);
2572 	if (irdma_cq_empty(iwqp->iwscq)) {
2573 		unsigned long flags2;
2574 
2575 		spin_lock_irqsave(&iwqp->lock, flags2);
2576 		while (IRDMA_RING_MORE_WORK(*sq_ring)) {
2577 			cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
2578 			if (!cmpl) {
2579 				spin_unlock_irqrestore(&iwqp->lock, flags2);
2580 				spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2581 				return;
2582 			}
2583 
2584 			wqe_idx = sq_ring->tail;
2585 			irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2586 
2587 			cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
2588 			sw_wqe = qp->sq_base[wqe_idx].elem;
2589 			get_64bit_val(sw_wqe, 24, &wqe_qword);
2590 			cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE);
2591 			/* remove the SQ WR by moving SQ tail*/
2592 			IRDMA_RING_SET_TAIL(*sq_ring,
2593 				sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
2594 
2595 			ibdev_dbg(iwqp->iwscq->ibcq.device,
2596 				  "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
2597 				  __func__, cmpl->cpi.wr_id, qp->qp_id);
2598 			list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
2599 			compl_generated = true;
2600 		}
2601 		spin_unlock_irqrestore(&iwqp->lock, flags2);
2602 		spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2603 		if (compl_generated)
2604 			irdma_comp_handler(iwqp->iwscq);
2605 	} else {
2606 		spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2607 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
2608 				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
2609 	}
2610 
2611 	spin_lock_irqsave(&iwqp->iwrcq->lock, flags1);
2612 	if (irdma_cq_empty(iwqp->iwrcq)) {
2613 		unsigned long flags2;
2614 
2615 		spin_lock_irqsave(&iwqp->lock, flags2);
2616 		while (IRDMA_RING_MORE_WORK(*rq_ring)) {
2617 			cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
2618 			if (!cmpl) {
2619 				spin_unlock_irqrestore(&iwqp->lock, flags2);
2620 				spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2621 				return;
2622 			}
2623 
2624 			wqe_idx = rq_ring->tail;
2625 			irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2626 
2627 			cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
2628 			cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
2629 			/* remove the RQ WR by moving RQ tail */
2630 			IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
2631 			ibdev_dbg(iwqp->iwrcq->ibcq.device,
2632 				  "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
2633 				  __func__, cmpl->cpi.wr_id, qp->qp_id,
2634 				  wqe_idx);
2635 			list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
2636 
2637 			compl_generated = true;
2638 		}
2639 		spin_unlock_irqrestore(&iwqp->lock, flags2);
2640 		spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2641 		if (compl_generated)
2642 			irdma_comp_handler(iwqp->iwrcq);
2643 	} else {
2644 		spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2645 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
2646 				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
2647 	}
2648 }
2649