xref: /openbmc/linux/drivers/net/ethernet/marvell/octeontx2/af/cgx.c (revision 943126417891372d56aa3fe46295cbf53db31370)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 CGX driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/phy.h>
18 #include <linux/of.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_net.h>
21 
22 #include "cgx.h"
23 
24 #define DRV_NAME	"octeontx2-cgx"
25 #define DRV_STRING      "Marvell OcteonTX2 CGX/MAC Driver"
26 
27 /**
28  * struct lmac
29  * @wq_cmd_cmplt:	waitq to keep the process blocked until cmd completion
30  * @cmd_lock:		Lock to serialize the command interface
31  * @resp:		command response
32  * @link_info:		link related information
33  * @event_cb:		callback for linkchange events
34  * @cmd_pend:		flag set before new command is started
35  *			flag cleared after command response is received
36  * @cgx:		parent cgx port
37  * @lmac_id:		lmac port id
38  * @name:		lmac port name
39  */
40 struct lmac {
41 	wait_queue_head_t wq_cmd_cmplt;
42 	struct mutex cmd_lock;
43 	u64 resp;
44 	struct cgx_link_user_info link_info;
45 	struct cgx_event_cb event_cb;
46 	bool cmd_pend;
47 	struct cgx *cgx;
48 	u8 lmac_id;
49 	char *name;
50 };
51 
52 struct cgx {
53 	void __iomem		*reg_base;
54 	struct pci_dev		*pdev;
55 	u8			cgx_id;
56 	u8			lmac_count;
57 	struct lmac		*lmac_idmap[MAX_LMAC_PER_CGX];
58 	struct list_head	cgx_list;
59 };
60 
61 static LIST_HEAD(cgx_list);
62 
63 /* Convert firmware speed encoding to user format(Mbps) */
64 static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
65 
66 /* Convert firmware lmac type encoding to string */
67 static char *cgx_lmactype_string[LMAC_MODE_MAX];
68 
69 /* Supported devices */
70 static const struct pci_device_id cgx_id_table[] = {
71 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
72 	{ 0, }  /* end of table */
73 };
74 
75 MODULE_DEVICE_TABLE(pci, cgx_id_table);
76 
77 static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
78 {
79 	writeq(val, cgx->reg_base + (lmac << 18) + offset);
80 }
81 
82 static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
83 {
84 	return readq(cgx->reg_base + (lmac << 18) + offset);
85 }
86 
87 static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
88 {
89 	if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
90 		return NULL;
91 
92 	return cgx->lmac_idmap[lmac_id];
93 }
94 
95 int cgx_get_cgx_cnt(void)
96 {
97 	struct cgx *cgx_dev;
98 	int count = 0;
99 
100 	list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
101 		count++;
102 
103 	return count;
104 }
105 EXPORT_SYMBOL(cgx_get_cgx_cnt);
106 
107 int cgx_get_lmac_cnt(void *cgxd)
108 {
109 	struct cgx *cgx = cgxd;
110 
111 	if (!cgx)
112 		return -ENODEV;
113 
114 	return cgx->lmac_count;
115 }
116 EXPORT_SYMBOL(cgx_get_lmac_cnt);
117 
118 void *cgx_get_pdata(int cgx_id)
119 {
120 	struct cgx *cgx_dev;
121 
122 	list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
123 		if (cgx_dev->cgx_id == cgx_id)
124 			return cgx_dev;
125 	}
126 	return NULL;
127 }
128 EXPORT_SYMBOL(cgx_get_pdata);
129 
130 /* Ensure the required lock for event queue(where asynchronous events are
131  * posted) is acquired before calling this API. Else an asynchronous event(with
132  * latest link status) can reach the destination before this function returns
133  * and could make the link status appear wrong.
134  */
135 int cgx_get_link_info(void *cgxd, int lmac_id,
136 		      struct cgx_link_user_info *linfo)
137 {
138 	struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
139 
140 	if (!lmac)
141 		return -ENODEV;
142 
143 	*linfo = lmac->link_info;
144 	return 0;
145 }
146 EXPORT_SYMBOL(cgx_get_link_info);
147 
148 static u64 mac2u64 (u8 *mac_addr)
149 {
150 	u64 mac = 0;
151 	int index;
152 
153 	for (index = ETH_ALEN - 1; index >= 0; index--)
154 		mac |= ((u64)*mac_addr++) << (8 * index);
155 	return mac;
156 }
157 
158 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
159 {
160 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
161 	u64 cfg;
162 
163 	/* copy 6bytes from macaddr */
164 	/* memcpy(&cfg, mac_addr, 6); */
165 
166 	cfg = mac2u64 (mac_addr);
167 
168 	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
169 		  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
170 
171 	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
172 	cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
173 	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
174 
175 	return 0;
176 }
177 EXPORT_SYMBOL(cgx_lmac_addr_set);
178 
179 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
180 {
181 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
182 	u64 cfg;
183 
184 	cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
185 	return cfg & CGX_RX_DMAC_ADR_MASK;
186 }
187 EXPORT_SYMBOL(cgx_lmac_addr_get);
188 
189 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
190 {
191 	struct cgx *cgx = cgxd;
192 
193 	if (!cgx || lmac_id >= cgx->lmac_count)
194 		return -ENODEV;
195 
196 	cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
197 	return 0;
198 }
199 EXPORT_SYMBOL(cgx_set_pkind);
200 
201 static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
202 {
203 	u64 cfg;
204 
205 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
206 	return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
207 }
208 
209 /* Configure CGX LMAC in internal loopback mode */
210 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
211 {
212 	struct cgx *cgx = cgxd;
213 	u8 lmac_type;
214 	u64 cfg;
215 
216 	if (!cgx || lmac_id >= cgx->lmac_count)
217 		return -ENODEV;
218 
219 	lmac_type = cgx_get_lmac_type(cgx, lmac_id);
220 	if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
221 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
222 		if (enable)
223 			cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
224 		else
225 			cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
226 		cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
227 	} else {
228 		cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
229 		if (enable)
230 			cfg |= CGXX_SPUX_CONTROL1_LBK;
231 		else
232 			cfg &= ~CGXX_SPUX_CONTROL1_LBK;
233 		cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
234 	}
235 	return 0;
236 }
237 EXPORT_SYMBOL(cgx_lmac_internal_loopback);
238 
239 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
240 {
241 	struct cgx *cgx = cgx_get_pdata(cgx_id);
242 	u64 cfg = 0;
243 
244 	if (!cgx)
245 		return;
246 
247 	if (enable) {
248 		/* Enable promiscuous mode on LMAC */
249 		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
250 		cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
251 		cfg |= CGX_DMAC_BCAST_MODE;
252 		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
253 
254 		cfg = cgx_read(cgx, 0,
255 			       (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
256 		cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
257 		cgx_write(cgx, 0,
258 			  (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
259 	} else {
260 		/* Disable promiscuous mode */
261 		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
262 		cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
263 		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
264 		cfg = cgx_read(cgx, 0,
265 			       (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
266 		cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
267 		cgx_write(cgx, 0,
268 			  (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
269 	}
270 }
271 EXPORT_SYMBOL(cgx_lmac_promisc_config);
272 
273 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
274 {
275 	struct cgx *cgx = cgxd;
276 
277 	if (!cgx || lmac_id >= cgx->lmac_count)
278 		return -ENODEV;
279 	*rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
280 	return 0;
281 }
282 EXPORT_SYMBOL(cgx_get_rx_stats);
283 
284 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
285 {
286 	struct cgx *cgx = cgxd;
287 
288 	if (!cgx || lmac_id >= cgx->lmac_count)
289 		return -ENODEV;
290 	*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
291 	return 0;
292 }
293 EXPORT_SYMBOL(cgx_get_tx_stats);
294 
295 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
296 {
297 	struct cgx *cgx = cgxd;
298 	u64 cfg;
299 
300 	if (!cgx || lmac_id >= cgx->lmac_count)
301 		return -ENODEV;
302 
303 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
304 	if (enable)
305 		cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
306 	else
307 		cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
308 	cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
309 	return 0;
310 }
311 EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
312 
313 /* CGX Firmware interface low level support */
314 static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
315 {
316 	struct cgx *cgx = lmac->cgx;
317 	struct device *dev;
318 	int err = 0;
319 	u64 cmd;
320 
321 	/* Ensure no other command is in progress */
322 	err = mutex_lock_interruptible(&lmac->cmd_lock);
323 	if (err)
324 		return err;
325 
326 	/* Ensure command register is free */
327 	cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
328 	if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
329 		err = -EBUSY;
330 		goto unlock;
331 	}
332 
333 	/* Update ownership in command request */
334 	req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
335 
336 	/* Mark this lmac as pending, before we start */
337 	lmac->cmd_pend = true;
338 
339 	/* Start command in hardware */
340 	cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
341 
342 	/* Ensure command is completed without errors */
343 	if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
344 				msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
345 		dev = &cgx->pdev->dev;
346 		dev_err(dev, "cgx port %d:%d cmd timeout\n",
347 			cgx->cgx_id, lmac->lmac_id);
348 		err = -EIO;
349 		goto unlock;
350 	}
351 
352 	/* we have a valid command response */
353 	smp_rmb(); /* Ensure the latest updates are visible */
354 	*resp = lmac->resp;
355 
356 unlock:
357 	mutex_unlock(&lmac->cmd_lock);
358 
359 	return err;
360 }
361 
362 static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
363 				      struct cgx *cgx, int lmac_id)
364 {
365 	struct lmac *lmac;
366 	int err;
367 
368 	lmac = lmac_pdata(lmac_id, cgx);
369 	if (!lmac)
370 		return -ENODEV;
371 
372 	err = cgx_fwi_cmd_send(req, resp, lmac);
373 
374 	/* Check for valid response */
375 	if (!err) {
376 		if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
377 			return -EIO;
378 		else
379 			return 0;
380 	}
381 
382 	return err;
383 }
384 
385 static inline void cgx_link_usertable_init(void)
386 {
387 	cgx_speed_mbps[CGX_LINK_NONE] = 0;
388 	cgx_speed_mbps[CGX_LINK_10M] = 10;
389 	cgx_speed_mbps[CGX_LINK_100M] = 100;
390 	cgx_speed_mbps[CGX_LINK_1G] = 1000;
391 	cgx_speed_mbps[CGX_LINK_2HG] = 2500;
392 	cgx_speed_mbps[CGX_LINK_5G] = 5000;
393 	cgx_speed_mbps[CGX_LINK_10G] = 10000;
394 	cgx_speed_mbps[CGX_LINK_20G] = 20000;
395 	cgx_speed_mbps[CGX_LINK_25G] = 25000;
396 	cgx_speed_mbps[CGX_LINK_40G] = 40000;
397 	cgx_speed_mbps[CGX_LINK_50G] = 50000;
398 	cgx_speed_mbps[CGX_LINK_100G] = 100000;
399 
400 	cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
401 	cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
402 	cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
403 	cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
404 	cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
405 	cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
406 	cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
407 	cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
408 	cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
409 	cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
410 }
411 
412 static inline void link_status_user_format(u64 lstat,
413 					   struct cgx_link_user_info *linfo,
414 					   struct cgx *cgx, u8 lmac_id)
415 {
416 	char *lmac_string;
417 
418 	linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
419 	linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
420 	linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
421 	linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
422 	lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
423 	strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
424 }
425 
426 /* Hardware event handlers */
427 static inline void cgx_link_change_handler(u64 lstat,
428 					   struct lmac *lmac)
429 {
430 	struct cgx_link_user_info *linfo;
431 	struct cgx *cgx = lmac->cgx;
432 	struct cgx_link_event event;
433 	struct device *dev;
434 	int err_type;
435 
436 	dev = &cgx->pdev->dev;
437 
438 	link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
439 	err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
440 
441 	event.cgx_id = cgx->cgx_id;
442 	event.lmac_id = lmac->lmac_id;
443 
444 	/* update the local copy of link status */
445 	lmac->link_info = event.link_uinfo;
446 	linfo = &lmac->link_info;
447 
448 	if (!lmac->event_cb.notify_link_chg) {
449 		dev_dbg(dev, "cgx port %d:%d Link change handler null",
450 			cgx->cgx_id, lmac->lmac_id);
451 		if (err_type != CGX_ERR_NONE) {
452 			dev_err(dev, "cgx port %d:%d Link error %d\n",
453 				cgx->cgx_id, lmac->lmac_id, err_type);
454 		}
455 		dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
456 			 cgx->cgx_id, lmac->lmac_id,
457 			 linfo->link_up ? "UP" : "DOWN", linfo->speed);
458 		return;
459 	}
460 
461 	if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
462 		dev_err(dev, "event notification failure\n");
463 }
464 
465 static inline bool cgx_cmdresp_is_linkevent(u64 event)
466 {
467 	u8 id;
468 
469 	id = FIELD_GET(EVTREG_ID, event);
470 	if (id == CGX_CMD_LINK_BRING_UP ||
471 	    id == CGX_CMD_LINK_BRING_DOWN)
472 		return true;
473 	else
474 		return false;
475 }
476 
477 static inline bool cgx_event_is_linkevent(u64 event)
478 {
479 	if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
480 		return true;
481 	else
482 		return false;
483 }
484 
485 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
486 {
487 	struct lmac *lmac = data;
488 	struct cgx *cgx;
489 	u64 event;
490 
491 	cgx = lmac->cgx;
492 
493 	event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
494 
495 	if (!FIELD_GET(EVTREG_ACK, event))
496 		return IRQ_NONE;
497 
498 	switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
499 	case CGX_EVT_CMD_RESP:
500 		/* Copy the response. Since only one command is active at a
501 		 * time, there is no way a response can get overwritten
502 		 */
503 		lmac->resp = event;
504 		/* Ensure response is updated before thread context starts */
505 		smp_wmb();
506 
507 		/* There wont be separate events for link change initiated from
508 		 * software; Hence report the command responses as events
509 		 */
510 		if (cgx_cmdresp_is_linkevent(event))
511 			cgx_link_change_handler(event, lmac);
512 
513 		/* Release thread waiting for completion  */
514 		lmac->cmd_pend = false;
515 		wake_up_interruptible(&lmac->wq_cmd_cmplt);
516 		break;
517 	case CGX_EVT_ASYNC:
518 		if (cgx_event_is_linkevent(event))
519 			cgx_link_change_handler(event, lmac);
520 		break;
521 	}
522 
523 	/* Any new event or command response will be posted by firmware
524 	 * only after the current status is acked.
525 	 * Ack the interrupt register as well.
526 	 */
527 	cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
528 	cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
529 
530 	return IRQ_HANDLED;
531 }
532 
533 /* APIs for PHY management using CGX firmware interface */
534 
535 /* callback registration for hardware events like link change */
536 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
537 {
538 	struct cgx *cgx = cgxd;
539 	struct lmac *lmac;
540 
541 	lmac = lmac_pdata(lmac_id, cgx);
542 	if (!lmac)
543 		return -ENODEV;
544 
545 	lmac->event_cb = *cb;
546 
547 	return 0;
548 }
549 EXPORT_SYMBOL(cgx_lmac_evh_register);
550 
551 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
552 {
553 	u64 req = 0;
554 
555 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
556 	return cgx_fwi_cmd_generic(req, resp, cgx, 0);
557 }
558 
559 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
560 {
561 	struct device *dev = &cgx->pdev->dev;
562 	int major_ver, minor_ver;
563 	u64 resp;
564 	int err;
565 
566 	if (!cgx->lmac_count)
567 		return 0;
568 
569 	err = cgx_fwi_read_version(&resp, cgx);
570 	if (err)
571 		return err;
572 
573 	major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
574 	minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
575 	dev_dbg(dev, "Firmware command interface version = %d.%d\n",
576 		major_ver, minor_ver);
577 	if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
578 	    minor_ver != CGX_FIRMWARE_MINOR_VER)
579 		return -EIO;
580 	else
581 		return 0;
582 }
583 
584 static int cgx_lmac_init(struct cgx *cgx)
585 {
586 	struct lmac *lmac;
587 	int i, err;
588 
589 	cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
590 	if (cgx->lmac_count > MAX_LMAC_PER_CGX)
591 		cgx->lmac_count = MAX_LMAC_PER_CGX;
592 
593 	for (i = 0; i < cgx->lmac_count; i++) {
594 		lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
595 		if (!lmac)
596 			return -ENOMEM;
597 		lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
598 		if (!lmac->name)
599 			return -ENOMEM;
600 		sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
601 		lmac->lmac_id = i;
602 		lmac->cgx = cgx;
603 		init_waitqueue_head(&lmac->wq_cmd_cmplt);
604 		mutex_init(&lmac->cmd_lock);
605 		err = request_irq(pci_irq_vector(cgx->pdev,
606 						 CGX_LMAC_FWI + i * 9),
607 				   cgx_fwi_event_handler, 0, lmac->name, lmac);
608 		if (err)
609 			return err;
610 
611 		/* Enable interrupt */
612 		cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
613 			  FW_CGX_INT);
614 
615 		/* Add reference */
616 		cgx->lmac_idmap[i] = lmac;
617 	}
618 
619 	return cgx_lmac_verify_fwi_version(cgx);
620 }
621 
622 static int cgx_lmac_exit(struct cgx *cgx)
623 {
624 	struct lmac *lmac;
625 	int i;
626 
627 	/* Free all lmac related resources */
628 	for (i = 0; i < cgx->lmac_count; i++) {
629 		lmac = cgx->lmac_idmap[i];
630 		if (!lmac)
631 			continue;
632 		free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
633 		kfree(lmac->name);
634 		kfree(lmac);
635 	}
636 
637 	return 0;
638 }
639 
640 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
641 {
642 	struct device *dev = &pdev->dev;
643 	struct cgx *cgx;
644 	int err, nvec;
645 
646 	cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
647 	if (!cgx)
648 		return -ENOMEM;
649 	cgx->pdev = pdev;
650 
651 	pci_set_drvdata(pdev, cgx);
652 
653 	err = pci_enable_device(pdev);
654 	if (err) {
655 		dev_err(dev, "Failed to enable PCI device\n");
656 		pci_set_drvdata(pdev, NULL);
657 		return err;
658 	}
659 
660 	err = pci_request_regions(pdev, DRV_NAME);
661 	if (err) {
662 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
663 		goto err_disable_device;
664 	}
665 
666 	/* MAP configuration registers */
667 	cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
668 	if (!cgx->reg_base) {
669 		dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
670 		err = -ENOMEM;
671 		goto err_release_regions;
672 	}
673 
674 	nvec = CGX_NVEC;
675 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
676 	if (err < 0 || err != nvec) {
677 		dev_err(dev, "Request for %d msix vectors failed, err %d\n",
678 			nvec, err);
679 		goto err_release_regions;
680 	}
681 
682 	list_add(&cgx->cgx_list, &cgx_list);
683 	cgx->cgx_id = cgx_get_cgx_cnt() - 1;
684 
685 	cgx_link_usertable_init();
686 
687 	err = cgx_lmac_init(cgx);
688 	if (err)
689 		goto err_release_lmac;
690 
691 	return 0;
692 
693 err_release_lmac:
694 	cgx_lmac_exit(cgx);
695 	list_del(&cgx->cgx_list);
696 err_release_regions:
697 	pci_release_regions(pdev);
698 err_disable_device:
699 	pci_disable_device(pdev);
700 	pci_set_drvdata(pdev, NULL);
701 	return err;
702 }
703 
704 static void cgx_remove(struct pci_dev *pdev)
705 {
706 	struct cgx *cgx = pci_get_drvdata(pdev);
707 
708 	cgx_lmac_exit(cgx);
709 	list_del(&cgx->cgx_list);
710 	pci_free_irq_vectors(pdev);
711 	pci_release_regions(pdev);
712 	pci_disable_device(pdev);
713 	pci_set_drvdata(pdev, NULL);
714 }
715 
716 struct pci_driver cgx_driver = {
717 	.name = DRV_NAME,
718 	.id_table = cgx_id_table,
719 	.probe = cgx_probe,
720 	.remove = cgx_remove,
721 };
722