1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qedr NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/list.h>
10 #include <linux/mutex.h>
11 #include <linux/qed/qede_rdma.h>
12 #include "qede.h"
13 
14 static struct qedr_driver *qedr_drv;
15 static LIST_HEAD(qedr_dev_list);
16 static DEFINE_MUTEX(qedr_dev_list_lock);
17 
18 bool qede_rdma_supported(struct qede_dev *dev)
19 {
20 	return dev->dev_info.common.rdma_supported;
21 }
22 
23 static void _qede_rdma_dev_add(struct qede_dev *edev)
24 {
25 	if (!qedr_drv)
26 		return;
27 
28 	/* Leftovers from previous error recovery */
29 	edev->rdma_info.exp_recovery = false;
30 	edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
31 						 edev->ndev);
32 }
33 
34 static int qede_rdma_create_wq(struct qede_dev *edev)
35 {
36 	INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
37 	kref_init(&edev->rdma_info.refcnt);
38 	init_completion(&edev->rdma_info.event_comp);
39 
40 	edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
41 	if (!edev->rdma_info.rdma_wq) {
42 		DP_NOTICE(edev, "qedr: Could not create workqueue\n");
43 		return -ENOMEM;
44 	}
45 
46 	return 0;
47 }
48 
49 static void qede_rdma_cleanup_event(struct qede_dev *edev)
50 {
51 	struct list_head *head = &edev->rdma_info.rdma_event_list;
52 	struct qede_rdma_event_work *event_node;
53 
54 	flush_workqueue(edev->rdma_info.rdma_wq);
55 	while (!list_empty(head)) {
56 		event_node = list_entry(head->next, struct qede_rdma_event_work,
57 					list);
58 		cancel_work_sync(&event_node->work);
59 		list_del(&event_node->list);
60 		kfree(event_node);
61 	}
62 }
63 
64 static void qede_rdma_complete_event(struct kref *ref)
65 {
66 	struct qede_rdma_dev *rdma_dev =
67 		container_of(ref, struct qede_rdma_dev, refcnt);
68 
69 	/* no more events will be added after this */
70 	complete(&rdma_dev->event_comp);
71 }
72 
73 static void qede_rdma_destroy_wq(struct qede_dev *edev)
74 {
75 	/* Avoid race with add_event flow, make sure it finishes before
76 	 * we start accessing the list and cleaning up the work
77 	 */
78 	kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
79 	wait_for_completion(&edev->rdma_info.event_comp);
80 
81 	qede_rdma_cleanup_event(edev);
82 	destroy_workqueue(edev->rdma_info.rdma_wq);
83 	edev->rdma_info.rdma_wq = NULL;
84 }
85 
86 int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
87 {
88 	int rc;
89 
90 	if (!qede_rdma_supported(edev))
91 		return 0;
92 
93 	/* Cannot start qedr while recovering since it wasn't fully stopped */
94 	if (recovery)
95 		return 0;
96 
97 	rc = qede_rdma_create_wq(edev);
98 	if (rc)
99 		return rc;
100 
101 	INIT_LIST_HEAD(&edev->rdma_info.entry);
102 	mutex_lock(&qedr_dev_list_lock);
103 	list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
104 	_qede_rdma_dev_add(edev);
105 	mutex_unlock(&qedr_dev_list_lock);
106 
107 	return rc;
108 }
109 
110 static void _qede_rdma_dev_remove(struct qede_dev *edev)
111 {
112 	if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
113 		qedr_drv->remove(edev->rdma_info.qedr_dev);
114 }
115 
116 void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery)
117 {
118 	if (!qede_rdma_supported(edev))
119 		return;
120 
121 	/* Cannot remove qedr while recovering since it wasn't fully stopped */
122 	if (!recovery) {
123 		qede_rdma_destroy_wq(edev);
124 		mutex_lock(&qedr_dev_list_lock);
125 		if (!edev->rdma_info.exp_recovery)
126 			_qede_rdma_dev_remove(edev);
127 		edev->rdma_info.qedr_dev = NULL;
128 		list_del(&edev->rdma_info.entry);
129 		mutex_unlock(&qedr_dev_list_lock);
130 	} else {
131 		if (!edev->rdma_info.exp_recovery) {
132 			mutex_lock(&qedr_dev_list_lock);
133 			_qede_rdma_dev_remove(edev);
134 			mutex_unlock(&qedr_dev_list_lock);
135 		}
136 		edev->rdma_info.exp_recovery = true;
137 	}
138 }
139 
140 static void _qede_rdma_dev_open(struct qede_dev *edev)
141 {
142 	if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
143 		qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
144 }
145 
146 static void qede_rdma_dev_open(struct qede_dev *edev)
147 {
148 	if (!qede_rdma_supported(edev))
149 		return;
150 
151 	mutex_lock(&qedr_dev_list_lock);
152 	_qede_rdma_dev_open(edev);
153 	mutex_unlock(&qedr_dev_list_lock);
154 }
155 
156 static void _qede_rdma_dev_close(struct qede_dev *edev)
157 {
158 	if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
159 		qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
160 }
161 
162 static void qede_rdma_dev_close(struct qede_dev *edev)
163 {
164 	if (!qede_rdma_supported(edev))
165 		return;
166 
167 	mutex_lock(&qedr_dev_list_lock);
168 	_qede_rdma_dev_close(edev);
169 	mutex_unlock(&qedr_dev_list_lock);
170 }
171 
172 static void qede_rdma_dev_shutdown(struct qede_dev *edev)
173 {
174 	if (!qede_rdma_supported(edev))
175 		return;
176 
177 	mutex_lock(&qedr_dev_list_lock);
178 	if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
179 		qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
180 	mutex_unlock(&qedr_dev_list_lock);
181 }
182 
183 int qede_rdma_register_driver(struct qedr_driver *drv)
184 {
185 	struct qede_dev *edev;
186 	u8 qedr_counter = 0;
187 
188 	mutex_lock(&qedr_dev_list_lock);
189 	if (qedr_drv) {
190 		mutex_unlock(&qedr_dev_list_lock);
191 		return -EINVAL;
192 	}
193 	qedr_drv = drv;
194 
195 	list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
196 		struct net_device *ndev;
197 
198 		qedr_counter++;
199 		_qede_rdma_dev_add(edev);
200 		ndev = edev->ndev;
201 		if (netif_running(ndev) && netif_oper_up(ndev))
202 			_qede_rdma_dev_open(edev);
203 	}
204 	mutex_unlock(&qedr_dev_list_lock);
205 
206 	pr_notice("qedr: discovered and registered %d RDMA funcs\n",
207 		  qedr_counter);
208 
209 	return 0;
210 }
211 EXPORT_SYMBOL(qede_rdma_register_driver);
212 
213 void qede_rdma_unregister_driver(struct qedr_driver *drv)
214 {
215 	struct qede_dev *edev;
216 
217 	mutex_lock(&qedr_dev_list_lock);
218 	list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
219 		/* If device has experienced recovery it was already removed */
220 		if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery)
221 			_qede_rdma_dev_remove(edev);
222 	}
223 	qedr_drv = NULL;
224 	mutex_unlock(&qedr_dev_list_lock);
225 }
226 EXPORT_SYMBOL(qede_rdma_unregister_driver);
227 
228 static void qede_rdma_changeaddr(struct qede_dev *edev)
229 {
230 	if (!qede_rdma_supported(edev))
231 		return;
232 
233 	if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
234 		qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
235 }
236 
237 static void qede_rdma_change_mtu(struct qede_dev *edev)
238 {
239 	if (qede_rdma_supported(edev)) {
240 		if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
241 			qedr_drv->notify(edev->rdma_info.qedr_dev,
242 					 QEDE_CHANGE_MTU);
243 	}
244 }
245 
246 static struct qede_rdma_event_work *
247 qede_rdma_get_free_event_node(struct qede_dev *edev)
248 {
249 	struct qede_rdma_event_work *event_node = NULL;
250 	struct list_head *list_node = NULL;
251 	bool found = false;
252 
253 	list_for_each(list_node, &edev->rdma_info.rdma_event_list) {
254 		event_node = list_entry(list_node, struct qede_rdma_event_work,
255 					list);
256 		if (!work_pending(&event_node->work)) {
257 			found = true;
258 			break;
259 		}
260 	}
261 
262 	if (!found) {
263 		event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC);
264 		if (!event_node) {
265 			DP_NOTICE(edev,
266 				  "qedr: Could not allocate memory for rdma work\n");
267 			return NULL;
268 		}
269 		list_add_tail(&event_node->list,
270 			      &edev->rdma_info.rdma_event_list);
271 	}
272 
273 	return event_node;
274 }
275 
276 static void qede_rdma_handle_event(struct work_struct *work)
277 {
278 	struct qede_rdma_event_work *event_node;
279 	enum qede_rdma_event event;
280 	struct qede_dev *edev;
281 
282 	event_node = container_of(work, struct qede_rdma_event_work, work);
283 	event = event_node->event;
284 	edev = event_node->ptr;
285 
286 	switch (event) {
287 	case QEDE_UP:
288 		qede_rdma_dev_open(edev);
289 		break;
290 	case QEDE_DOWN:
291 		qede_rdma_dev_close(edev);
292 		break;
293 	case QEDE_CLOSE:
294 		qede_rdma_dev_shutdown(edev);
295 		break;
296 	case QEDE_CHANGE_ADDR:
297 		qede_rdma_changeaddr(edev);
298 		break;
299 	case QEDE_CHANGE_MTU:
300 		qede_rdma_change_mtu(edev);
301 		break;
302 	default:
303 		DP_NOTICE(edev, "Invalid rdma event %d", event);
304 	}
305 }
306 
307 static void qede_rdma_add_event(struct qede_dev *edev,
308 				enum qede_rdma_event event)
309 {
310 	struct qede_rdma_event_work *event_node;
311 
312 	/* If a recovery was experienced avoid adding the event */
313 	if (edev->rdma_info.exp_recovery)
314 		return;
315 
316 	if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
317 		return;
318 
319 	/* We don't want the cleanup flow to start while we're allocating and
320 	 * scheduling the work
321 	 */
322 	if (!kref_get_unless_zero(&edev->rdma_info.refcnt))
323 		return; /* already being destroyed */
324 
325 	event_node = qede_rdma_get_free_event_node(edev);
326 	if (!event_node)
327 		goto out;
328 
329 	event_node->event = event;
330 	event_node->ptr = edev;
331 
332 	INIT_WORK(&event_node->work, qede_rdma_handle_event);
333 	queue_work(edev->rdma_info.rdma_wq, &event_node->work);
334 
335 out:
336 	kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
337 }
338 
339 void qede_rdma_dev_event_open(struct qede_dev *edev)
340 {
341 	qede_rdma_add_event(edev, QEDE_UP);
342 }
343 
344 void qede_rdma_dev_event_close(struct qede_dev *edev)
345 {
346 	qede_rdma_add_event(edev, QEDE_DOWN);
347 }
348 
349 void qede_rdma_event_changeaddr(struct qede_dev *edev)
350 {
351 	qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
352 }
353 
354 void qede_rdma_event_change_mtu(struct qede_dev *edev)
355 {
356 	qede_rdma_add_event(edev, QEDE_CHANGE_MTU);
357 }
358