1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu.h"
16 #include "cgx.h"
17 
18 struct cgx_evq_entry {
19 	struct list_head evq_node;
20 	struct cgx_link_event link_event;
21 };
22 
23 #define M(_name, _id, _req_type, _rsp_type)				\
24 static struct _req_type __maybe_unused					\
25 *otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid)		\
26 {									\
27 	struct _req_type *req;						\
28 									\
29 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
30 		&rvu->mbox_up, devid, sizeof(struct _req_type),		\
31 		sizeof(struct _rsp_type));				\
32 	if (!req)							\
33 		return NULL;						\
34 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
35 	req->hdr.id = _id;						\
36 	return req;							\
37 }
38 
39 MBOX_UP_CGX_MESSAGES
40 #undef M
41 
42 /* Returns bitmap of mapped PFs */
43 static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
44 {
45 	return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
46 }
47 
48 static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
49 {
50 	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
51 }
52 
53 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
54 {
55 	if (cgx_id >= rvu->cgx_cnt)
56 		return NULL;
57 
58 	return rvu->cgx_idmap[cgx_id];
59 }
60 
61 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
62 {
63 	struct npc_pkind *pkind = &rvu->hw->pkind;
64 	int cgx_cnt = rvu->cgx_cnt;
65 	int cgx, lmac_cnt, lmac;
66 	int pf = PF_CGXMAP_BASE;
67 	int size, free_pkind;
68 
69 	if (!cgx_cnt)
70 		return 0;
71 
72 	if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
73 		return -EINVAL;
74 
75 	/* Alloc map table
76 	 * An additional entry is required since PF id starts from 1 and
77 	 * hence entry at offset 0 is invalid.
78 	 */
79 	size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
80 	rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
81 	if (!rvu->pf2cgxlmac_map)
82 		return -ENOMEM;
83 
84 	/* Initialize offset 0 with an invalid cgx and lmac id */
85 	rvu->pf2cgxlmac_map[0] = 0xFF;
86 
87 	/* Reverse map table */
88 	rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
89 				  cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
90 				  GFP_KERNEL);
91 	if (!rvu->cgxlmac2pf_map)
92 		return -ENOMEM;
93 
94 	rvu->cgx_mapped_pfs = 0;
95 	for (cgx = 0; cgx < cgx_cnt; cgx++) {
96 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
97 		for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
98 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
99 			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
100 			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
101 			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
102 			rvu->cgx_mapped_pfs++;
103 		}
104 	}
105 	return 0;
106 }
107 
108 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
109 {
110 	struct cgx_evq_entry *qentry;
111 	unsigned long flags;
112 	int err;
113 
114 	qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
115 	if (!qentry)
116 		return -ENOMEM;
117 
118 	/* Lock the event queue before we read the local link status */
119 	spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
120 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
121 				&qentry->link_event.link_uinfo);
122 	qentry->link_event.cgx_id = cgx_id;
123 	qentry->link_event.lmac_id = lmac_id;
124 	if (err)
125 		goto skip_add;
126 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
127 skip_add:
128 	spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
129 
130 	/* start worker to process the events */
131 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
132 
133 	return 0;
134 }
135 
136 /* This is called from interrupt context and is expected to be atomic */
137 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
138 {
139 	struct cgx_evq_entry *qentry;
140 	struct rvu *rvu = data;
141 
142 	/* post event to the event queue */
143 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
144 	if (!qentry)
145 		return -ENOMEM;
146 	qentry->link_event = *event;
147 	spin_lock(&rvu->cgx_evq_lock);
148 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
149 	spin_unlock(&rvu->cgx_evq_lock);
150 
151 	/* start worker to process the events */
152 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
153 
154 	return 0;
155 }
156 
157 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
158 {
159 	struct cgx_link_user_info *linfo;
160 	struct cgx_link_info_msg *msg;
161 	unsigned long pfmap;
162 	int err, pfid;
163 
164 	linfo = &event->link_uinfo;
165 	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
166 
167 	do {
168 		pfid = find_first_bit(&pfmap, 16);
169 		clear_bit(pfid, &pfmap);
170 
171 		/* check if notification is enabled */
172 		if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
173 			dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
174 				 event->cgx_id, event->lmac_id,
175 				 linfo->link_up ? "UP" : "DOWN");
176 			continue;
177 		}
178 
179 		/* Send mbox message to PF */
180 		msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
181 		if (!msg)
182 			continue;
183 		msg->link_info = *linfo;
184 		otx2_mbox_msg_send(&rvu->mbox_up, pfid);
185 		err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
186 		if (err)
187 			dev_warn(rvu->dev, "notification to pf %d failed\n",
188 				 pfid);
189 	} while (pfmap);
190 }
191 
192 static void cgx_evhandler_task(struct work_struct *work)
193 {
194 	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
195 	struct cgx_evq_entry *qentry;
196 	struct cgx_link_event *event;
197 	unsigned long flags;
198 
199 	do {
200 		/* Dequeue an event */
201 		spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
202 		qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
203 						  struct cgx_evq_entry,
204 						  evq_node);
205 		if (qentry)
206 			list_del(&qentry->evq_node);
207 		spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
208 		if (!qentry)
209 			break; /* nothing more to process */
210 
211 		event = &qentry->link_event;
212 
213 		/* process event */
214 		cgx_notify_pfs(event, rvu);
215 		kfree(qentry);
216 	} while (1);
217 }
218 
219 static void cgx_lmac_event_handler_init(struct rvu *rvu)
220 {
221 	struct cgx_event_cb cb;
222 	int cgx, lmac, err;
223 	void *cgxd;
224 
225 	spin_lock_init(&rvu->cgx_evq_lock);
226 	INIT_LIST_HEAD(&rvu->cgx_evq_head);
227 	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
228 	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
229 	if (!rvu->cgx_evh_wq) {
230 		dev_err(rvu->dev, "alloc workqueue failed");
231 		return;
232 	}
233 
234 	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
235 	cb.data = rvu;
236 
237 	for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
238 		cgxd = rvu_cgx_pdata(cgx, rvu);
239 		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
240 			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
241 			if (err)
242 				dev_err(rvu->dev,
243 					"%d:%d handler register failed\n",
244 					cgx, lmac);
245 		}
246 	}
247 }
248 
249 void rvu_cgx_wq_destroy(struct rvu *rvu)
250 {
251 	if (rvu->cgx_evh_wq) {
252 		flush_workqueue(rvu->cgx_evh_wq);
253 		destroy_workqueue(rvu->cgx_evh_wq);
254 		rvu->cgx_evh_wq = NULL;
255 	}
256 }
257 
258 int rvu_cgx_probe(struct rvu *rvu)
259 {
260 	int i, err;
261 
262 	/* find available cgx ports */
263 	rvu->cgx_cnt = cgx_get_cgx_cnt();
264 	if (!rvu->cgx_cnt) {
265 		dev_info(rvu->dev, "No CGX devices found!\n");
266 		return -ENODEV;
267 	}
268 
269 	rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
270 				      GFP_KERNEL);
271 	if (!rvu->cgx_idmap)
272 		return -ENOMEM;
273 
274 	/* Initialize the cgxdata table */
275 	for (i = 0; i < rvu->cgx_cnt; i++)
276 		rvu->cgx_idmap[i] = cgx_get_pdata(i);
277 
278 	/* Map CGX LMAC interfaces to RVU PFs */
279 	err = rvu_map_cgx_lmac_pf(rvu);
280 	if (err)
281 		return err;
282 
283 	/* Register for CGX events */
284 	cgx_lmac_event_handler_init(rvu);
285 	return 0;
286 }
287 
288 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
289 {
290 	int pf = rvu_get_pf(pcifunc);
291 	u8 cgx_id, lmac_id;
292 
293 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
294 	 * if received from other PF/VF simply ACK, nothing to do.
295 	 */
296 	if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
297 		return -ENODEV;
298 
299 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
300 
301 	cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
302 
303 	return 0;
304 }
305 
306 int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
307 				    struct msg_rsp *rsp)
308 {
309 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
310 	return 0;
311 }
312 
313 int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
314 				   struct msg_rsp *rsp)
315 {
316 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
317 	return 0;
318 }
319 
320 int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
321 			       struct cgx_stats_rsp *rsp)
322 {
323 	int pf = rvu_get_pf(req->hdr.pcifunc);
324 	int stat = 0, err = 0;
325 	u64 tx_stat, rx_stat;
326 	u8 cgx_idx, lmac;
327 	void *cgxd;
328 
329 	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
330 	    !is_pf_cgxmapped(rvu, pf))
331 		return -ENODEV;
332 
333 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
334 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
335 
336 	/* Rx stats */
337 	while (stat < CGX_RX_STATS_COUNT) {
338 		err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
339 		if (err)
340 			return err;
341 		rsp->rx_stats[stat] = rx_stat;
342 		stat++;
343 	}
344 
345 	/* Tx stats */
346 	stat = 0;
347 	while (stat < CGX_TX_STATS_COUNT) {
348 		err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
349 		if (err)
350 			return err;
351 		rsp->tx_stats[stat] = tx_stat;
352 		stat++;
353 	}
354 	return 0;
355 }
356 
357 int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
358 				      struct cgx_mac_addr_set_or_get *req,
359 				      struct cgx_mac_addr_set_or_get *rsp)
360 {
361 	int pf = rvu_get_pf(req->hdr.pcifunc);
362 	u8 cgx_id, lmac_id;
363 
364 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
365 
366 	cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
367 
368 	return 0;
369 }
370 
371 int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
372 				      struct cgx_mac_addr_set_or_get *req,
373 				      struct cgx_mac_addr_set_or_get *rsp)
374 {
375 	int pf = rvu_get_pf(req->hdr.pcifunc);
376 	u8 cgx_id, lmac_id;
377 	int rc = 0, i;
378 	u64 cfg;
379 
380 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
381 
382 	rsp->hdr.rc = rc;
383 	cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
384 	/* copy 48 bit mac address to req->mac_addr */
385 	for (i = 0; i < ETH_ALEN; i++)
386 		rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
387 	return 0;
388 }
389 
390 int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
391 					struct msg_rsp *rsp)
392 {
393 	u16 pcifunc = req->hdr.pcifunc;
394 	int pf = rvu_get_pf(pcifunc);
395 	u8 cgx_id, lmac_id;
396 
397 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
398 	 * if received from other PF/VF simply ACK, nothing to do.
399 	 */
400 	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
401 	    !is_pf_cgxmapped(rvu, pf))
402 		return -ENODEV;
403 
404 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
405 
406 	cgx_lmac_promisc_config(cgx_id, lmac_id, true);
407 	return 0;
408 }
409 
410 int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
411 					 struct msg_rsp *rsp)
412 {
413 	u16 pcifunc = req->hdr.pcifunc;
414 	int pf = rvu_get_pf(pcifunc);
415 	u8 cgx_id, lmac_id;
416 
417 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
418 	 * if received from other PF/VF simply ACK, nothing to do.
419 	 */
420 	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
421 	    !is_pf_cgxmapped(rvu, pf))
422 		return -ENODEV;
423 
424 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
425 
426 	cgx_lmac_promisc_config(cgx_id, lmac_id, false);
427 	return 0;
428 }
429 
430 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
431 {
432 	int pf = rvu_get_pf(pcifunc);
433 	u8 cgx_id, lmac_id;
434 
435 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
436 	 * if received from other PF/VF simply ACK, nothing to do.
437 	 */
438 	if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
439 		return -ENODEV;
440 
441 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
442 
443 	if (en) {
444 		set_bit(pf, &rvu->pf_notify_bmap);
445 		/* Send the current link status to PF */
446 		rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
447 	} else {
448 		clear_bit(pf, &rvu->pf_notify_bmap);
449 	}
450 
451 	return 0;
452 }
453 
454 int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
455 					  struct msg_rsp *rsp)
456 {
457 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
458 	return 0;
459 }
460 
461 int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
462 					 struct msg_rsp *rsp)
463 {
464 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
465 	return 0;
466 }
467 
468 int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
469 				      struct cgx_link_info_msg *rsp)
470 {
471 	u8 cgx_id, lmac_id;
472 	int pf, err;
473 
474 	pf = rvu_get_pf(req->hdr.pcifunc);
475 
476 	if (!is_pf_cgxmapped(rvu, pf))
477 		return -ENODEV;
478 
479 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
480 
481 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
482 				&rsp->link_info);
483 	return err;
484 }
485 
486 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
487 {
488 	int pf = rvu_get_pf(pcifunc);
489 	u8 cgx_id, lmac_id;
490 
491 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
492 	 * if received from other PF/VF simply ACK, nothing to do.
493 	 */
494 	if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
495 		return -ENODEV;
496 
497 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
498 
499 	return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
500 					  lmac_id, en);
501 }
502 
503 int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
504 				       struct msg_rsp *rsp)
505 {
506 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
507 	return 0;
508 }
509 
510 int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
511 					struct msg_rsp *rsp)
512 {
513 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
514 	return 0;
515 }
516