xref: /openbmc/linux/drivers/net/ethernet/qlogic/qede/qede_rdma.c (revision 762f99f4f3cb41a775b5157dd761217beba65873)
17268f33eSAlexander Lobakin // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2b262a06eSMichal Kalderon /* QLogic qedr NIC Driver
3b262a06eSMichal Kalderon  * Copyright (c) 2015-2017  QLogic Corporation
4c4fad2a5SAlexander Lobakin  * Copyright (c) 2019-2020 Marvell International Ltd.
5b262a06eSMichal Kalderon  */
67268f33eSAlexander Lobakin 
7b262a06eSMichal Kalderon #include <linux/pci.h>
8b262a06eSMichal Kalderon #include <linux/netdevice.h>
9b262a06eSMichal Kalderon #include <linux/list.h>
10b262a06eSMichal Kalderon #include <linux/mutex.h>
11b262a06eSMichal Kalderon #include <linux/qed/qede_rdma.h>
12b262a06eSMichal Kalderon #include "qede.h"
13b262a06eSMichal Kalderon 
14b262a06eSMichal Kalderon static struct qedr_driver *qedr_drv;
15b262a06eSMichal Kalderon static LIST_HEAD(qedr_dev_list);
16b262a06eSMichal Kalderon static DEFINE_MUTEX(qedr_dev_list_lock);
17b262a06eSMichal Kalderon 
qede_rdma_supported(struct qede_dev * dev)18bbfcd1e8SMichal Kalderon bool qede_rdma_supported(struct qede_dev *dev)
19b262a06eSMichal Kalderon {
20b262a06eSMichal Kalderon 	return dev->dev_info.common.rdma_supported;
21b262a06eSMichal Kalderon }
22b262a06eSMichal Kalderon 
_qede_rdma_dev_add(struct qede_dev * edev)23bbfcd1e8SMichal Kalderon static void _qede_rdma_dev_add(struct qede_dev *edev)
24b262a06eSMichal Kalderon {
25b262a06eSMichal Kalderon 	if (!qedr_drv)
26b262a06eSMichal Kalderon 		return;
27b262a06eSMichal Kalderon 
28ccc67ef5STomer Tayar 	/* Leftovers from previous error recovery */
29ccc67ef5STomer Tayar 	edev->rdma_info.exp_recovery = false;
30b262a06eSMichal Kalderon 	edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
31b262a06eSMichal Kalderon 						 edev->ndev);
32b262a06eSMichal Kalderon }
33b262a06eSMichal Kalderon 
qede_rdma_create_wq(struct qede_dev * edev)34bbfcd1e8SMichal Kalderon static int qede_rdma_create_wq(struct qede_dev *edev)
35b262a06eSMichal Kalderon {
36bbfcd1e8SMichal Kalderon 	INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
37af6565adSMichal Kalderon 	kref_init(&edev->rdma_info.refcnt);
38af6565adSMichal Kalderon 	init_completion(&edev->rdma_info.event_comp);
39af6565adSMichal Kalderon 
40bbfcd1e8SMichal Kalderon 	edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
41bbfcd1e8SMichal Kalderon 	if (!edev->rdma_info.rdma_wq) {
42b262a06eSMichal Kalderon 		DP_NOTICE(edev, "qedr: Could not create workqueue\n");
43b262a06eSMichal Kalderon 		return -ENOMEM;
44b262a06eSMichal Kalderon 	}
45b262a06eSMichal Kalderon 
46b262a06eSMichal Kalderon 	return 0;
47b262a06eSMichal Kalderon }
48b262a06eSMichal Kalderon 
qede_rdma_cleanup_event(struct qede_dev * edev)49bbfcd1e8SMichal Kalderon static void qede_rdma_cleanup_event(struct qede_dev *edev)
50b262a06eSMichal Kalderon {
51bbfcd1e8SMichal Kalderon 	struct list_head *head = &edev->rdma_info.rdma_event_list;
52bbfcd1e8SMichal Kalderon 	struct qede_rdma_event_work *event_node;
53b262a06eSMichal Kalderon 
54bbfcd1e8SMichal Kalderon 	flush_workqueue(edev->rdma_info.rdma_wq);
55b262a06eSMichal Kalderon 	while (!list_empty(head)) {
56bbfcd1e8SMichal Kalderon 		event_node = list_entry(head->next, struct qede_rdma_event_work,
57b262a06eSMichal Kalderon 					list);
58b262a06eSMichal Kalderon 		cancel_work_sync(&event_node->work);
59b262a06eSMichal Kalderon 		list_del(&event_node->list);
60b262a06eSMichal Kalderon 		kfree(event_node);
61b262a06eSMichal Kalderon 	}
62b262a06eSMichal Kalderon }
63b262a06eSMichal Kalderon 
qede_rdma_complete_event(struct kref * ref)64af6565adSMichal Kalderon static void qede_rdma_complete_event(struct kref *ref)
65af6565adSMichal Kalderon {
66af6565adSMichal Kalderon 	struct qede_rdma_dev *rdma_dev =
67af6565adSMichal Kalderon 		container_of(ref, struct qede_rdma_dev, refcnt);
68af6565adSMichal Kalderon 
69af6565adSMichal Kalderon 	/* no more events will be added after this */
70af6565adSMichal Kalderon 	complete(&rdma_dev->event_comp);
71af6565adSMichal Kalderon }
72af6565adSMichal Kalderon 
qede_rdma_destroy_wq(struct qede_dev * edev)73bbfcd1e8SMichal Kalderon static void qede_rdma_destroy_wq(struct qede_dev *edev)
74b262a06eSMichal Kalderon {
75af6565adSMichal Kalderon 	/* Avoid race with add_event flow, make sure it finishes before
76af6565adSMichal Kalderon 	 * we start accessing the list and cleaning up the work
77af6565adSMichal Kalderon 	 */
78af6565adSMichal Kalderon 	kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
79af6565adSMichal Kalderon 	wait_for_completion(&edev->rdma_info.event_comp);
80af6565adSMichal Kalderon 
81bbfcd1e8SMichal Kalderon 	qede_rdma_cleanup_event(edev);
82bbfcd1e8SMichal Kalderon 	destroy_workqueue(edev->rdma_info.rdma_wq);
834079c7f7SAlexander Lobakin 	edev->rdma_info.rdma_wq = NULL;
84b262a06eSMichal Kalderon }
85b262a06eSMichal Kalderon 
qede_rdma_dev_add(struct qede_dev * edev,bool recovery)86ccc67ef5STomer Tayar int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
87b262a06eSMichal Kalderon {
88ccc67ef5STomer Tayar 	int rc;
89b262a06eSMichal Kalderon 
90ccc67ef5STomer Tayar 	if (!qede_rdma_supported(edev))
91ccc67ef5STomer Tayar 		return 0;
92ccc67ef5STomer Tayar 
93ccc67ef5STomer Tayar 	/* Cannot start qedr while recovering since it wasn't fully stopped */
94ccc67ef5STomer Tayar 	if (recovery)
95ccc67ef5STomer Tayar 		return 0;
96ccc67ef5STomer Tayar 
97bbfcd1e8SMichal Kalderon 	rc = qede_rdma_create_wq(edev);
98b262a06eSMichal Kalderon 	if (rc)
99b262a06eSMichal Kalderon 		return rc;
100b262a06eSMichal Kalderon 
101b262a06eSMichal Kalderon 	INIT_LIST_HEAD(&edev->rdma_info.entry);
102b262a06eSMichal Kalderon 	mutex_lock(&qedr_dev_list_lock);
103b262a06eSMichal Kalderon 	list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
104bbfcd1e8SMichal Kalderon 	_qede_rdma_dev_add(edev);
105b262a06eSMichal Kalderon 	mutex_unlock(&qedr_dev_list_lock);
106b262a06eSMichal Kalderon 
107b262a06eSMichal Kalderon 	return rc;
108b262a06eSMichal Kalderon }
109b262a06eSMichal Kalderon 
_qede_rdma_dev_remove(struct qede_dev * edev)110bbfcd1e8SMichal Kalderon static void _qede_rdma_dev_remove(struct qede_dev *edev)
111b262a06eSMichal Kalderon {
112b262a06eSMichal Kalderon 	if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
113b262a06eSMichal Kalderon 		qedr_drv->remove(edev->rdma_info.qedr_dev);
114b262a06eSMichal Kalderon }
115b262a06eSMichal Kalderon 
qede_rdma_dev_remove(struct qede_dev * edev,bool recovery)116ccc67ef5STomer Tayar void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery)
117b262a06eSMichal Kalderon {
118bbfcd1e8SMichal Kalderon 	if (!qede_rdma_supported(edev))
119b262a06eSMichal Kalderon 		return;
120b262a06eSMichal Kalderon 
121ccc67ef5STomer Tayar 	/* Cannot remove qedr while recovering since it wasn't fully stopped */
122ccc67ef5STomer Tayar 	if (!recovery) {
123bbfcd1e8SMichal Kalderon 		qede_rdma_destroy_wq(edev);
124b262a06eSMichal Kalderon 		mutex_lock(&qedr_dev_list_lock);
125ccc67ef5STomer Tayar 		if (!edev->rdma_info.exp_recovery)
126bbfcd1e8SMichal Kalderon 			_qede_rdma_dev_remove(edev);
127ccc67ef5STomer Tayar 		edev->rdma_info.qedr_dev = NULL;
128b262a06eSMichal Kalderon 		list_del(&edev->rdma_info.entry);
129b262a06eSMichal Kalderon 		mutex_unlock(&qedr_dev_list_lock);
130ccc67ef5STomer Tayar 	} else {
131ccc67ef5STomer Tayar 		if (!edev->rdma_info.exp_recovery) {
132ccc67ef5STomer Tayar 			mutex_lock(&qedr_dev_list_lock);
133ccc67ef5STomer Tayar 			_qede_rdma_dev_remove(edev);
134ccc67ef5STomer Tayar 			mutex_unlock(&qedr_dev_list_lock);
135ccc67ef5STomer Tayar 		}
136ccc67ef5STomer Tayar 		edev->rdma_info.exp_recovery = true;
137ccc67ef5STomer Tayar 	}
138b262a06eSMichal Kalderon }
139b262a06eSMichal Kalderon 
_qede_rdma_dev_open(struct qede_dev * edev)140bbfcd1e8SMichal Kalderon static void _qede_rdma_dev_open(struct qede_dev *edev)
141b262a06eSMichal Kalderon {
142b262a06eSMichal Kalderon 	if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
143b262a06eSMichal Kalderon 		qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
144b262a06eSMichal Kalderon }
145b262a06eSMichal Kalderon 
qede_rdma_dev_open(struct qede_dev * edev)146bbfcd1e8SMichal Kalderon static void qede_rdma_dev_open(struct qede_dev *edev)
147b262a06eSMichal Kalderon {
148bbfcd1e8SMichal Kalderon 	if (!qede_rdma_supported(edev))
149b262a06eSMichal Kalderon 		return;
150b262a06eSMichal Kalderon 
151b262a06eSMichal Kalderon 	mutex_lock(&qedr_dev_list_lock);
152bbfcd1e8SMichal Kalderon 	_qede_rdma_dev_open(edev);
153b262a06eSMichal Kalderon 	mutex_unlock(&qedr_dev_list_lock);
154b262a06eSMichal Kalderon }
155b262a06eSMichal Kalderon 
_qede_rdma_dev_close(struct qede_dev * edev)156bbfcd1e8SMichal Kalderon static void _qede_rdma_dev_close(struct qede_dev *edev)
157b262a06eSMichal Kalderon {
158b262a06eSMichal Kalderon 	if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
159b262a06eSMichal Kalderon 		qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
160b262a06eSMichal Kalderon }
161b262a06eSMichal Kalderon 
qede_rdma_dev_close(struct qede_dev * edev)162bbfcd1e8SMichal Kalderon static void qede_rdma_dev_close(struct qede_dev *edev)
163b262a06eSMichal Kalderon {
164bbfcd1e8SMichal Kalderon 	if (!qede_rdma_supported(edev))
165b262a06eSMichal Kalderon 		return;
166b262a06eSMichal Kalderon 
167b262a06eSMichal Kalderon 	mutex_lock(&qedr_dev_list_lock);
168bbfcd1e8SMichal Kalderon 	_qede_rdma_dev_close(edev);
169b262a06eSMichal Kalderon 	mutex_unlock(&qedr_dev_list_lock);
170b262a06eSMichal Kalderon }
171b262a06eSMichal Kalderon 
qede_rdma_dev_shutdown(struct qede_dev * edev)172bbfcd1e8SMichal Kalderon static void qede_rdma_dev_shutdown(struct qede_dev *edev)
173b262a06eSMichal Kalderon {
174bbfcd1e8SMichal Kalderon 	if (!qede_rdma_supported(edev))
175b262a06eSMichal Kalderon 		return;
176b262a06eSMichal Kalderon 
177b262a06eSMichal Kalderon 	mutex_lock(&qedr_dev_list_lock);
178b262a06eSMichal Kalderon 	if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
179b262a06eSMichal Kalderon 		qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
180b262a06eSMichal Kalderon 	mutex_unlock(&qedr_dev_list_lock);
181b262a06eSMichal Kalderon }
182b262a06eSMichal Kalderon 
qede_rdma_register_driver(struct qedr_driver * drv)183bbfcd1e8SMichal Kalderon int qede_rdma_register_driver(struct qedr_driver *drv)
184b262a06eSMichal Kalderon {
185b262a06eSMichal Kalderon 	struct qede_dev *edev;
186b262a06eSMichal Kalderon 	u8 qedr_counter = 0;
187b262a06eSMichal Kalderon 
188b262a06eSMichal Kalderon 	mutex_lock(&qedr_dev_list_lock);
189b262a06eSMichal Kalderon 	if (qedr_drv) {
190b262a06eSMichal Kalderon 		mutex_unlock(&qedr_dev_list_lock);
191b262a06eSMichal Kalderon 		return -EINVAL;
192b262a06eSMichal Kalderon 	}
193b262a06eSMichal Kalderon 	qedr_drv = drv;
194b262a06eSMichal Kalderon 
195b262a06eSMichal Kalderon 	list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
196b262a06eSMichal Kalderon 		struct net_device *ndev;
197b262a06eSMichal Kalderon 
198b262a06eSMichal Kalderon 		qedr_counter++;
199bbfcd1e8SMichal Kalderon 		_qede_rdma_dev_add(edev);
200b262a06eSMichal Kalderon 		ndev = edev->ndev;
201b262a06eSMichal Kalderon 		if (netif_running(ndev) && netif_oper_up(ndev))
202bbfcd1e8SMichal Kalderon 			_qede_rdma_dev_open(edev);
203b262a06eSMichal Kalderon 	}
204b262a06eSMichal Kalderon 	mutex_unlock(&qedr_dev_list_lock);
205b262a06eSMichal Kalderon 
206bbfcd1e8SMichal Kalderon 	pr_notice("qedr: discovered and registered %d RDMA funcs\n",
207b262a06eSMichal Kalderon 		  qedr_counter);
208b262a06eSMichal Kalderon 
209b262a06eSMichal Kalderon 	return 0;
210b262a06eSMichal Kalderon }
211bbfcd1e8SMichal Kalderon EXPORT_SYMBOL(qede_rdma_register_driver);
212b262a06eSMichal Kalderon 
qede_rdma_unregister_driver(struct qedr_driver * drv)213bbfcd1e8SMichal Kalderon void qede_rdma_unregister_driver(struct qedr_driver *drv)
214b262a06eSMichal Kalderon {
215b262a06eSMichal Kalderon 	struct qede_dev *edev;
216b262a06eSMichal Kalderon 
217b262a06eSMichal Kalderon 	mutex_lock(&qedr_dev_list_lock);
218b262a06eSMichal Kalderon 	list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
219ccc67ef5STomer Tayar 		/* If device has experienced recovery it was already removed */
220ccc67ef5STomer Tayar 		if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery)
221bbfcd1e8SMichal Kalderon 			_qede_rdma_dev_remove(edev);
222b262a06eSMichal Kalderon 	}
223b262a06eSMichal Kalderon 	qedr_drv = NULL;
224b262a06eSMichal Kalderon 	mutex_unlock(&qedr_dev_list_lock);
225b262a06eSMichal Kalderon }
226bbfcd1e8SMichal Kalderon EXPORT_SYMBOL(qede_rdma_unregister_driver);
227b262a06eSMichal Kalderon 
qede_rdma_changeaddr(struct qede_dev * edev)228bbfcd1e8SMichal Kalderon static void qede_rdma_changeaddr(struct qede_dev *edev)
229b262a06eSMichal Kalderon {
230bbfcd1e8SMichal Kalderon 	if (!qede_rdma_supported(edev))
231b262a06eSMichal Kalderon 		return;
232b262a06eSMichal Kalderon 
233b262a06eSMichal Kalderon 	if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
234b262a06eSMichal Kalderon 		qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
235b262a06eSMichal Kalderon }
236b262a06eSMichal Kalderon 
qede_rdma_change_mtu(struct qede_dev * edev)23797fb3e33SMichal Kalderon static void qede_rdma_change_mtu(struct qede_dev *edev)
23897fb3e33SMichal Kalderon {
23997fb3e33SMichal Kalderon 	if (qede_rdma_supported(edev)) {
24097fb3e33SMichal Kalderon 		if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
24197fb3e33SMichal Kalderon 			qedr_drv->notify(edev->rdma_info.qedr_dev,
24297fb3e33SMichal Kalderon 					 QEDE_CHANGE_MTU);
24397fb3e33SMichal Kalderon 	}
24497fb3e33SMichal Kalderon }
24597fb3e33SMichal Kalderon 
246bbfcd1e8SMichal Kalderon static struct qede_rdma_event_work *
qede_rdma_get_free_event_node(struct qede_dev * edev)247bbfcd1e8SMichal Kalderon qede_rdma_get_free_event_node(struct qede_dev *edev)
248b262a06eSMichal Kalderon {
249bbfcd1e8SMichal Kalderon 	struct qede_rdma_event_work *event_node = NULL;
250b262a06eSMichal Kalderon 	bool found = false;
251b262a06eSMichal Kalderon 
252*36861d1fSWang Hai 	list_for_each_entry(event_node, &edev->rdma_info.rdma_event_list,
253*36861d1fSWang Hai 			    list) {
254b262a06eSMichal Kalderon 		if (!work_pending(&event_node->work)) {
255b262a06eSMichal Kalderon 			found = true;
256b262a06eSMichal Kalderon 			break;
257b262a06eSMichal Kalderon 		}
258b262a06eSMichal Kalderon 	}
259b262a06eSMichal Kalderon 
260b262a06eSMichal Kalderon 	if (!found) {
261090477e4SMichal Kalderon 		event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC);
262b262a06eSMichal Kalderon 		if (!event_node) {
263b262a06eSMichal Kalderon 			DP_NOTICE(edev,
264bbfcd1e8SMichal Kalderon 				  "qedr: Could not allocate memory for rdma work\n");
265b262a06eSMichal Kalderon 			return NULL;
266b262a06eSMichal Kalderon 		}
267b262a06eSMichal Kalderon 		list_add_tail(&event_node->list,
268bbfcd1e8SMichal Kalderon 			      &edev->rdma_info.rdma_event_list);
269b262a06eSMichal Kalderon 	}
270b262a06eSMichal Kalderon 
271b262a06eSMichal Kalderon 	return event_node;
272b262a06eSMichal Kalderon }
273b262a06eSMichal Kalderon 
qede_rdma_handle_event(struct work_struct * work)274bbfcd1e8SMichal Kalderon static void qede_rdma_handle_event(struct work_struct *work)
275b262a06eSMichal Kalderon {
276bbfcd1e8SMichal Kalderon 	struct qede_rdma_event_work *event_node;
277bbfcd1e8SMichal Kalderon 	enum qede_rdma_event event;
278b262a06eSMichal Kalderon 	struct qede_dev *edev;
279b262a06eSMichal Kalderon 
280bbfcd1e8SMichal Kalderon 	event_node = container_of(work, struct qede_rdma_event_work, work);
281b262a06eSMichal Kalderon 	event = event_node->event;
282b262a06eSMichal Kalderon 	edev = event_node->ptr;
283b262a06eSMichal Kalderon 
284b262a06eSMichal Kalderon 	switch (event) {
285b262a06eSMichal Kalderon 	case QEDE_UP:
286bbfcd1e8SMichal Kalderon 		qede_rdma_dev_open(edev);
287b262a06eSMichal Kalderon 		break;
288b262a06eSMichal Kalderon 	case QEDE_DOWN:
289bbfcd1e8SMichal Kalderon 		qede_rdma_dev_close(edev);
290b262a06eSMichal Kalderon 		break;
291b262a06eSMichal Kalderon 	case QEDE_CLOSE:
292bbfcd1e8SMichal Kalderon 		qede_rdma_dev_shutdown(edev);
293b262a06eSMichal Kalderon 		break;
294b262a06eSMichal Kalderon 	case QEDE_CHANGE_ADDR:
295bbfcd1e8SMichal Kalderon 		qede_rdma_changeaddr(edev);
296b262a06eSMichal Kalderon 		break;
29797fb3e33SMichal Kalderon 	case QEDE_CHANGE_MTU:
29897fb3e33SMichal Kalderon 		qede_rdma_change_mtu(edev);
29997fb3e33SMichal Kalderon 		break;
300b262a06eSMichal Kalderon 	default:
301bbfcd1e8SMichal Kalderon 		DP_NOTICE(edev, "Invalid rdma event %d", event);
302b262a06eSMichal Kalderon 	}
303b262a06eSMichal Kalderon }
304b262a06eSMichal Kalderon 
qede_rdma_add_event(struct qede_dev * edev,enum qede_rdma_event event)305bbfcd1e8SMichal Kalderon static void qede_rdma_add_event(struct qede_dev *edev,
306bbfcd1e8SMichal Kalderon 				enum qede_rdma_event event)
307b262a06eSMichal Kalderon {
308bbfcd1e8SMichal Kalderon 	struct qede_rdma_event_work *event_node;
309b262a06eSMichal Kalderon 
310ccc67ef5STomer Tayar 	/* If a recovery was experienced avoid adding the event */
311ccc67ef5STomer Tayar 	if (edev->rdma_info.exp_recovery)
312ccc67ef5STomer Tayar 		return;
313ccc67ef5STomer Tayar 
3144079c7f7SAlexander Lobakin 	if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
315b262a06eSMichal Kalderon 		return;
316b262a06eSMichal Kalderon 
317af6565adSMichal Kalderon 	/* We don't want the cleanup flow to start while we're allocating and
318af6565adSMichal Kalderon 	 * scheduling the work
319af6565adSMichal Kalderon 	 */
320af6565adSMichal Kalderon 	if (!kref_get_unless_zero(&edev->rdma_info.refcnt))
321af6565adSMichal Kalderon 		return; /* already being destroyed */
322af6565adSMichal Kalderon 
323bbfcd1e8SMichal Kalderon 	event_node = qede_rdma_get_free_event_node(edev);
324b262a06eSMichal Kalderon 	if (!event_node)
325af6565adSMichal Kalderon 		goto out;
326b262a06eSMichal Kalderon 
327b262a06eSMichal Kalderon 	event_node->event = event;
328b262a06eSMichal Kalderon 	event_node->ptr = edev;
329b262a06eSMichal Kalderon 
330bbfcd1e8SMichal Kalderon 	INIT_WORK(&event_node->work, qede_rdma_handle_event);
331bbfcd1e8SMichal Kalderon 	queue_work(edev->rdma_info.rdma_wq, &event_node->work);
332af6565adSMichal Kalderon 
333af6565adSMichal Kalderon out:
334af6565adSMichal Kalderon 	kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
335b262a06eSMichal Kalderon }
336b262a06eSMichal Kalderon 
qede_rdma_dev_event_open(struct qede_dev * edev)337bbfcd1e8SMichal Kalderon void qede_rdma_dev_event_open(struct qede_dev *edev)
338b262a06eSMichal Kalderon {
339bbfcd1e8SMichal Kalderon 	qede_rdma_add_event(edev, QEDE_UP);
340b262a06eSMichal Kalderon }
341b262a06eSMichal Kalderon 
qede_rdma_dev_event_close(struct qede_dev * edev)342bbfcd1e8SMichal Kalderon void qede_rdma_dev_event_close(struct qede_dev *edev)
343b262a06eSMichal Kalderon {
344bbfcd1e8SMichal Kalderon 	qede_rdma_add_event(edev, QEDE_DOWN);
345b262a06eSMichal Kalderon }
346b262a06eSMichal Kalderon 
qede_rdma_event_changeaddr(struct qede_dev * edev)347bbfcd1e8SMichal Kalderon void qede_rdma_event_changeaddr(struct qede_dev *edev)
348b262a06eSMichal Kalderon {
349bbfcd1e8SMichal Kalderon 	qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
350b262a06eSMichal Kalderon }
35197fb3e33SMichal Kalderon 
qede_rdma_event_change_mtu(struct qede_dev * edev)35297fb3e33SMichal Kalderon void qede_rdma_event_change_mtu(struct qede_dev *edev)
35397fb3e33SMichal Kalderon {
35497fb3e33SMichal Kalderon 	qede_rdma_add_event(edev, QEDE_CHANGE_MTU);
35597fb3e33SMichal Kalderon }
356