1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor, Inc.
4  * Copyright 2017 NXP
5  */
6 
7 #include <net/pfe_eth/pfe_eth.h>
8 #include <net/pfe_eth/pfe_firmware.h>
9 
10 static struct tx_desc_s *g_tx_desc;
11 static struct rx_desc_s *g_rx_desc;
12 
13 /*
14  * HIF Rx interface function
15  * Reads the rx descriptor from the current location (rx_to_read).
16  * - If the descriptor has a valid data/pkt, then get the data pointer
17  * - check for the input rx phy number
18  * - increment the rx data pointer by pkt_head_room_size
19  * - decrement the data length by pkt_head_room_size
20  * - handover the packet to caller.
21  *
22  * @param[out] pkt_ptr - Pointer to store rx packet
23  * @param[out] phy_port - Pointer to store recv phy port
24  *
25  * @return -1 if no packet, else return length of packet.
26  */
pfe_recv(uchar ** pkt_ptr,int * phy_port)27 int pfe_recv(uchar **pkt_ptr, int *phy_port)
28 {
29 	struct rx_desc_s *rx_desc = g_rx_desc;
30 	struct buf_desc *bd;
31 	int len = 0;
32 
33 	struct hif_header_s *hif_header;
34 
35 	bd = rx_desc->rx_base + rx_desc->rx_to_read;
36 
37 	if (readl(&bd->ctrl) & BD_CTRL_DESC_EN)
38 		return len; /* No pending Rx packet */
39 
40 	/* this len include hif_header(8 bytes) */
41 	len = readl(&bd->ctrl) & 0xFFFF;
42 
43 	hif_header = (struct hif_header_s *)DDR_PFE_TO_VIRT(readl(&bd->data));
44 
45 	/* Get the receive port info from the packet */
46 	debug("Pkt received:");
47 	debug(" Pkt ptr(%p), len(%d), gemac_port(%d) status(%08x)\n",
48 	      hif_header, len, hif_header->port_no, readl(&bd->status));
49 #ifdef DEBUG
50 	{
51 		int i;
52 		unsigned char *p = (unsigned char *)hif_header;
53 
54 		for (i = 0; i < len; i++) {
55 			if (!(i % 16))
56 				printf("\n");
57 			printf(" %02x", p[i]);
58 		}
59 		printf("\n");
60 	}
61 #endif
62 
63 	*pkt_ptr = (uchar *)(hif_header + 1);
64 	*phy_port = hif_header->port_no;
65 	len -= sizeof(struct hif_header_s);
66 
67 	return len;
68 }
69 
70 /*
71  * HIF function to check the Rx done
72  * This function will check the rx done indication of the current rx_to_read
73  * locations
74  * if success, moves the rx_to_read to next location.
75  */
pfe_eth_free_pkt(struct udevice * dev,uchar * packet,int length)76 int pfe_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
77 {
78 	struct rx_desc_s *rx_desc = g_rx_desc;
79 	struct buf_desc *bd;
80 
81 	debug("%s:rx_base: %p, rx_to_read: %d\n", __func__, rx_desc->rx_base,
82 	      rx_desc->rx_to_read);
83 
84 	bd = rx_desc->rx_base + rx_desc->rx_to_read;
85 
86 	/* reset the control field */
87 	writel((MAX_FRAME_SIZE | BD_CTRL_LIFM | BD_CTRL_DESC_EN
88 		    | BD_CTRL_DIR), &bd->ctrl);
89 	writel(0, &bd->status);
90 
91 	debug("Rx Done : status: %08x, ctrl: %08x\n", readl(&bd->status),
92 	      readl(&bd->ctrl));
93 
94 	/* Give START_STROBE to BDP to fetch the descriptor __NOW__,
95 	 * BDP need not wait for rx_poll_cycle time to fetch the descriptor,
96 	 * In idle state (ie., no rx pkt), BDP will not fetch
97 	 * the descriptor even if strobe is given.
98 	 */
99 	writel((readl(HIF_RX_CTRL) | HIF_CTRL_BDP_CH_START_WSTB), HIF_RX_CTRL);
100 
101 	/* increment the rx_to_read index to next location */
102 	rx_desc->rx_to_read = (rx_desc->rx_to_read + 1)
103 			       & (rx_desc->rx_ring_size - 1);
104 
105 	debug("Rx next pkt location: %d\n", rx_desc->rx_to_read);
106 
107 	return 0;
108 }
109 
110 /*
111  * HIF Tx interface function
112  * This function sends a single packet to PFE from HIF interface.
113  * - No interrupt indication on tx completion.
114  * - Data is copied to tx buffers before tx descriptor is updated
115  *   and TX DMA is enabled.
116  *
117  * @param[in] phy_port	Phy port number to send out this packet
118  * @param[in] data	Pointer to the data
119  * @param[in] length	Length of the ethernet packet to be transferred.
120  *
121  * @return -1 if tx Q is full, else returns the tx location where the pkt is
122  * placed.
123  */
pfe_send(int phy_port,void * data,int length)124 int pfe_send(int phy_port, void *data, int length)
125 {
126 	struct tx_desc_s *tx_desc = g_tx_desc;
127 	struct buf_desc *bd;
128 	struct hif_header_s hif_header;
129 	u8 *tx_buf_va;
130 
131 	debug("%s:pkt: %p, len: %d, tx_base: %p, tx_to_send: %d\n", __func__,
132 	      data, length, tx_desc->tx_base, tx_desc->tx_to_send);
133 
134 	bd = tx_desc->tx_base + tx_desc->tx_to_send;
135 
136 	/* check queue-full condition */
137 	if (readl(&bd->ctrl) & BD_CTRL_DESC_EN)
138 		return -1;
139 
140 	/* PFE checks for min pkt size */
141 	if (length < MIN_PKT_SIZE)
142 		length = MIN_PKT_SIZE;
143 
144 	tx_buf_va = (void *)DDR_PFE_TO_VIRT(readl(&bd->data));
145 	debug("%s: tx_buf_va: %p, tx_buf_pa: %08x\n", __func__, tx_buf_va,
146 	      readl(&bd->data));
147 
148 	/* Fill the gemac/phy port number to send this packet out */
149 	memset(&hif_header, 0, sizeof(struct hif_header_s));
150 	hif_header.port_no = phy_port;
151 
152 	memcpy(tx_buf_va, (u8 *)&hif_header, sizeof(struct hif_header_s));
153 	memcpy(tx_buf_va + sizeof(struct hif_header_s), data, length);
154 	length += sizeof(struct hif_header_s);
155 
156 #ifdef DEBUG
157 	{
158 		int i;
159 		unsigned char *p = (unsigned char *)tx_buf_va;
160 
161 		for (i = 0; i < length; i++) {
162 			if (!(i % 16))
163 				printf("\n");
164 			printf("%02x ", p[i]);
165 		}
166 	}
167 #endif
168 
169 	debug("Tx Done: status: %08x, ctrl: %08x\n", readl(&bd->status),
170 	      readl(&bd->ctrl));
171 
172 	/* fill the tx desc */
173 	writel((u32)(BD_CTRL_DESC_EN | BD_CTRL_LIFM | (length & 0xFFFF)),
174 	       &bd->ctrl);
175 	writel(0, &bd->status);
176 
177 	writel((HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB), HIF_TX_CTRL);
178 
179 	udelay(100);
180 
181 	return tx_desc->tx_to_send;
182 }
183 
184 /*
185  * HIF function to check the Tx done
186  *  This function will check the tx done indication of the current tx_to_send
187  *  locations
188  *  if success, moves the tx_to_send to next location.
189  *
190  * @return -1 if TX ownership bit is not cleared by hw.
191  * else on success (tx done completion) return zero.
192  */
pfe_tx_done(void)193 int pfe_tx_done(void)
194 {
195 	struct tx_desc_s *tx_desc = g_tx_desc;
196 	struct buf_desc *bd;
197 
198 	debug("%s:tx_base: %p, tx_to_send: %d\n", __func__, tx_desc->tx_base,
199 	      tx_desc->tx_to_send);
200 
201 	bd = tx_desc->tx_base + tx_desc->tx_to_send;
202 
203 	/* check queue-full condition */
204 	if (readl(&bd->ctrl) & BD_CTRL_DESC_EN)
205 		return -1;
206 
207 	/* reset the control field */
208 	writel(0, &bd->ctrl);
209 	writel(0, &bd->status);
210 
211 	debug("Tx Done : status: %08x, ctrl: %08x\n", readl(&bd->status),
212 	      readl(&bd->ctrl));
213 
214 	/* increment the txtosend index to next location */
215 	tx_desc->tx_to_send = (tx_desc->tx_to_send + 1)
216 			       & (tx_desc->tx_ring_size - 1);
217 
218 	debug("Tx next pkt location: %d\n", tx_desc->tx_to_send);
219 
220 	return 0;
221 }
222 
223 /*
224  * Helper function to dump Rx descriptors.
225  */
hif_rx_desc_dump(void)226 static inline void hif_rx_desc_dump(void)
227 {
228 	struct buf_desc *bd_va;
229 	int i;
230 	struct rx_desc_s *rx_desc;
231 
232 	if (!g_rx_desc) {
233 		printf("%s: HIF Rx desc no init\n", __func__);
234 		return;
235 	}
236 
237 	rx_desc = g_rx_desc;
238 	bd_va = rx_desc->rx_base;
239 
240 	debug("HIF rx desc: base_va: %p, base_pa: %08x\n", rx_desc->rx_base,
241 	      rx_desc->rx_base_pa);
242 	for (i = 0; i < rx_desc->rx_ring_size; i++) {
243 		debug("status: %08x, ctrl: %08x, data: %08x, next: 0x%08x\n",
244 		      readl(&bd_va->status),
245 		      readl(&bd_va->ctrl),
246 		      readl(&bd_va->data),
247 		      readl(&bd_va->next));
248 		bd_va++;
249 	}
250 }
251 
252 /*
253  * This function mark all Rx descriptors as LAST_BD.
254  */
hif_rx_desc_disable(void)255 void hif_rx_desc_disable(void)
256 {
257 	int i;
258 	struct rx_desc_s *rx_desc;
259 	struct buf_desc *bd_va;
260 
261 	if (!g_rx_desc) {
262 		printf("%s: HIF Rx desc not initialized\n", __func__);
263 		return;
264 	}
265 
266 	rx_desc = g_rx_desc;
267 	bd_va = rx_desc->rx_base;
268 
269 	for (i = 0; i < rx_desc->rx_ring_size; i++) {
270 		writel(readl(&bd_va->ctrl) | BD_CTRL_LAST_BD, &bd_va->ctrl);
271 		bd_va++;
272 	}
273 }
274 
275 /*
276  * HIF Rx Desc initialization function.
277  */
hif_rx_desc_init(struct pfe_ddr_address * pfe_addr)278 static int hif_rx_desc_init(struct pfe_ddr_address *pfe_addr)
279 {
280 	u32 ctrl;
281 	struct buf_desc *bd_va;
282 	struct buf_desc *bd_pa;
283 	struct rx_desc_s *rx_desc;
284 	u32 rx_buf_pa;
285 	int i;
286 
287 	/* sanity check */
288 	if (g_rx_desc) {
289 		printf("%s: HIF Rx desc re-init request\n", __func__);
290 		return 0;
291 	}
292 
293 	rx_desc = (struct rx_desc_s *)malloc(sizeof(struct rx_desc_s));
294 	if (!rx_desc) {
295 		printf("%s: Memory allocation failure\n", __func__);
296 		return -ENOMEM;
297 	}
298 	memset(rx_desc, 0, sizeof(struct rx_desc_s));
299 
300 	/* init: Rx ring buffer */
301 	rx_desc->rx_ring_size = HIF_RX_DESC_NT;
302 
303 	/* NOTE: must be 64bit aligned  */
304 	bd_va = (struct buf_desc *)(pfe_addr->ddr_pfe_baseaddr
305 		 + RX_BD_BASEADDR);
306 	bd_pa = (struct buf_desc *)(pfe_addr->ddr_pfe_phys_baseaddr
307 				    + RX_BD_BASEADDR);
308 
309 	rx_desc->rx_base = bd_va;
310 	rx_desc->rx_base_pa = (unsigned long)bd_pa;
311 
312 	rx_buf_pa = pfe_addr->ddr_pfe_phys_baseaddr + HIF_RX_PKT_DDR_BASEADDR;
313 
314 	debug("%s: Rx desc base: %p, base_pa: %08x, desc_count: %d\n",
315 	      __func__, rx_desc->rx_base, rx_desc->rx_base_pa,
316 	      rx_desc->rx_ring_size);
317 
318 	memset(bd_va, 0, sizeof(struct buf_desc) * rx_desc->rx_ring_size);
319 
320 	ctrl = (MAX_FRAME_SIZE | BD_CTRL_DESC_EN | BD_CTRL_DIR | BD_CTRL_LIFM);
321 
322 	for (i = 0; i < rx_desc->rx_ring_size; i++) {
323 		writel((unsigned long)(bd_pa + 1), &bd_va->next);
324 		writel(ctrl, &bd_va->ctrl);
325 		writel(rx_buf_pa + (i * MAX_FRAME_SIZE), &bd_va->data);
326 		bd_va++;
327 		bd_pa++;
328 	}
329 	--bd_va;
330 	writel((u32)rx_desc->rx_base_pa, &bd_va->next);
331 
332 	writel(rx_desc->rx_base_pa, HIF_RX_BDP_ADDR);
333 	writel((readl(HIF_RX_CTRL) | HIF_CTRL_BDP_CH_START_WSTB), HIF_RX_CTRL);
334 
335 	g_rx_desc = rx_desc;
336 
337 	return 0;
338 }
339 
340 /*
341  * Helper function to dump Tx Descriptors.
342  */
hif_tx_desc_dump(void)343 static inline void hif_tx_desc_dump(void)
344 {
345 	struct tx_desc_s *tx_desc;
346 	int i;
347 	struct buf_desc *bd_va;
348 
349 	if (!g_tx_desc) {
350 		printf("%s: HIF Tx desc no init\n", __func__);
351 		return;
352 	}
353 
354 	tx_desc = g_tx_desc;
355 	bd_va = tx_desc->tx_base;
356 
357 	debug("HIF tx desc: base_va: %p, base_pa: %08x\n", tx_desc->tx_base,
358 	      tx_desc->tx_base_pa);
359 
360 	for (i = 0; i < tx_desc->tx_ring_size; i++)
361 		bd_va++;
362 }
363 
364 /*
365  * HIF Tx descriptor initialization function.
366  */
hif_tx_desc_init(struct pfe_ddr_address * pfe_addr)367 static int hif_tx_desc_init(struct pfe_ddr_address *pfe_addr)
368 {
369 	struct buf_desc *bd_va;
370 	struct buf_desc *bd_pa;
371 	int i;
372 	struct tx_desc_s *tx_desc;
373 	u32 tx_buf_pa;
374 
375 	/* sanity check */
376 	if (g_tx_desc) {
377 		printf("%s: HIF Tx desc re-init request\n", __func__);
378 		return 0;
379 	}
380 
381 	tx_desc = (struct tx_desc_s *)malloc(sizeof(struct tx_desc_s));
382 	if (!tx_desc) {
383 		printf("%s:%d:Memory allocation failure\n", __func__,
384 		       __LINE__);
385 		return -ENOMEM;
386 	}
387 	memset(tx_desc, 0, sizeof(struct tx_desc_s));
388 
389 	/* init: Tx ring buffer */
390 	tx_desc->tx_ring_size = HIF_TX_DESC_NT;
391 
392 	/* NOTE: must be 64bit aligned  */
393 	bd_va = (struct buf_desc *)(pfe_addr->ddr_pfe_baseaddr
394 		 + TX_BD_BASEADDR);
395 	bd_pa = (struct buf_desc *)(pfe_addr->ddr_pfe_phys_baseaddr
396 				    + TX_BD_BASEADDR);
397 
398 	tx_desc->tx_base_pa = (unsigned long)bd_pa;
399 	tx_desc->tx_base = bd_va;
400 
401 	debug("%s: Tx desc_base: %p, base_pa: %08x, desc_count: %d\n",
402 	      __func__, tx_desc->tx_base, tx_desc->tx_base_pa,
403 	      tx_desc->tx_ring_size);
404 
405 	memset(bd_va, 0, sizeof(struct buf_desc) * tx_desc->tx_ring_size);
406 
407 	tx_buf_pa = pfe_addr->ddr_pfe_phys_baseaddr + HIF_TX_PKT_DDR_BASEADDR;
408 
409 	for (i = 0; i < tx_desc->tx_ring_size; i++) {
410 		writel((unsigned long)(bd_pa + 1), &bd_va->next);
411 		writel(tx_buf_pa + (i * MAX_FRAME_SIZE), &bd_va->data);
412 		bd_va++;
413 		bd_pa++;
414 	}
415 	--bd_va;
416 	writel((u32)tx_desc->tx_base_pa, &bd_va->next);
417 
418 	writel(tx_desc->tx_base_pa, HIF_TX_BDP_ADDR);
419 
420 	g_tx_desc = tx_desc;
421 
422 	return 0;
423 }
424 
425 /*
426  * PFE/Class initialization.
427  */
pfe_class_init(struct pfe_ddr_address * pfe_addr)428 static void pfe_class_init(struct pfe_ddr_address *pfe_addr)
429 {
430 	struct class_cfg class_cfg = {
431 		.route_table_baseaddr = pfe_addr->ddr_pfe_phys_baseaddr +
432 					ROUTE_TABLE_BASEADDR,
433 		.route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
434 	};
435 
436 	class_init(&class_cfg);
437 
438 	debug("class init complete\n");
439 }
440 
441 /*
442  * PFE/TMU initialization.
443  */
pfe_tmu_init(struct pfe_ddr_address * pfe_addr)444 static void pfe_tmu_init(struct pfe_ddr_address *pfe_addr)
445 {
446 	struct tmu_cfg tmu_cfg = {
447 		.llm_base_addr = pfe_addr->ddr_pfe_phys_baseaddr
448 				 + TMU_LLM_BASEADDR,
449 		.llm_queue_len = TMU_LLM_QUEUE_LEN,
450 	};
451 
452 	tmu_init(&tmu_cfg);
453 
454 	debug("tmu init complete\n");
455 }
456 
457 /*
458  * PFE/BMU (both BMU1 & BMU2) initialization.
459  */
pfe_bmu_init(struct pfe_ddr_address * pfe_addr)460 static void pfe_bmu_init(struct pfe_ddr_address *pfe_addr)
461 {
462 	struct bmu_cfg bmu1_cfg = {
463 		.baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
464 						BMU1_LMEM_BASEADDR),
465 		.count = BMU1_BUF_COUNT,
466 		.size = BMU1_BUF_SIZE,
467 	};
468 
469 	struct bmu_cfg bmu2_cfg = {
470 		.baseaddr = pfe_addr->ddr_pfe_phys_baseaddr + BMU2_DDR_BASEADDR,
471 		.count = BMU2_BUF_COUNT,
472 		.size = BMU2_BUF_SIZE,
473 	};
474 
475 	bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
476 	debug("bmu1 init: done\n");
477 
478 	bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
479 	debug("bmu2 init: done\n");
480 }
481 
482 /*
483  * PFE/GPI initialization function.
484  *  - egpi1, egpi2, egpi3, hgpi
485  */
pfe_gpi_init(struct pfe_ddr_address * pfe_addr)486 static void pfe_gpi_init(struct pfe_ddr_address *pfe_addr)
487 {
488 	struct gpi_cfg egpi1_cfg = {
489 		.lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
490 		.tmlf_txthres = EGPI1_TMLF_TXTHRES,
491 		.aseq_len = EGPI1_ASEQ_LEN,
492 	};
493 
494 	struct gpi_cfg egpi2_cfg = {
495 		.lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
496 		.tmlf_txthres = EGPI2_TMLF_TXTHRES,
497 		.aseq_len = EGPI2_ASEQ_LEN,
498 	};
499 
500 	struct gpi_cfg hgpi_cfg = {
501 		.lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
502 		.tmlf_txthres = HGPI_TMLF_TXTHRES,
503 		.aseq_len = HGPI_ASEQ_LEN,
504 	};
505 
506 	gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
507 	debug("GPI1 init complete\n");
508 
509 	gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
510 	debug("GPI2 init complete\n");
511 
512 	gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
513 	debug("HGPI init complete\n");
514 }
515 
516 /*
517  * PFE/HIF initialization function.
518  */
pfe_hif_init(struct pfe_ddr_address * pfe_addr)519 static int pfe_hif_init(struct pfe_ddr_address *pfe_addr)
520 {
521 	int ret = 0;
522 
523 	hif_tx_disable();
524 	hif_rx_disable();
525 
526 	ret = hif_tx_desc_init(pfe_addr);
527 	if (ret)
528 		return ret;
529 	ret = hif_rx_desc_init(pfe_addr);
530 	if (ret)
531 		return ret;
532 
533 	hif_init();
534 
535 	hif_tx_enable();
536 	hif_rx_enable();
537 
538 	hif_rx_desc_dump();
539 	hif_tx_desc_dump();
540 
541 	debug("HIF init complete\n");
542 	return ret;
543 }
544 
545 /*
546  * PFE initialization
547  * - Firmware loading (CLASS-PE and TMU-PE)
548  * - BMU1 and BMU2 init
549  * - GEMAC init
550  * - GPI init
551  * - CLASS-PE init
552  * - TMU-PE init
553  * - HIF tx and rx descriptors init
554  *
555  * @param[in]	edev	Pointer to eth device structure.
556  *
557  * @return 0, on success.
558  */
pfe_hw_init(struct pfe_ddr_address * pfe_addr)559 static int pfe_hw_init(struct pfe_ddr_address *pfe_addr)
560 {
561 	int ret = 0;
562 
563 	debug("%s: start\n", __func__);
564 
565 	writel(0x3, CLASS_PE_SYS_CLK_RATIO);
566 	writel(0x3, TMU_PE_SYS_CLK_RATIO);
567 	writel(0x3, UTIL_PE_SYS_CLK_RATIO);
568 	udelay(10);
569 
570 	pfe_class_init(pfe_addr);
571 
572 	pfe_tmu_init(pfe_addr);
573 
574 	pfe_bmu_init(pfe_addr);
575 
576 	pfe_gpi_init(pfe_addr);
577 
578 	ret = pfe_hif_init(pfe_addr);
579 	if (ret)
580 		return ret;
581 
582 	bmu_enable(BMU1_BASE_ADDR);
583 	debug("bmu1 enabled\n");
584 
585 	bmu_enable(BMU2_BASE_ADDR);
586 	debug("bmu2 enabled\n");
587 
588 	debug("%s: done\n", __func__);
589 
590 	return ret;
591 }
592 
593 /*
594  * PFE driver init function.
595  * - Initializes pfe_lib
596  * - pfe hw init
597  * - fw loading and enables PEs
598  * - should be executed once.
599  *
600  * @param[in] pfe  Pointer the pfe control block
601  */
pfe_drv_init(struct pfe_ddr_address * pfe_addr)602 int pfe_drv_init(struct pfe_ddr_address  *pfe_addr)
603 {
604 	int ret = 0;
605 
606 	pfe_lib_init();
607 
608 	ret = pfe_hw_init(pfe_addr);
609 	if (ret)
610 		return ret;
611 
612 	/* Load the class,TM, Util fw.
613 	 * By now pfe is:
614 	 * - out of reset + disabled + configured.
615 	 * Fw loading should be done after pfe_hw_init()
616 	 */
617 	/* It loads default inbuilt sbl firmware */
618 	pfe_firmware_init();
619 
620 	return ret;
621 }
622 
623 /*
624  * PFE remove function
625  *  - stops PEs
626  *  - frees tx/rx descriptor resources
627  *  - should be called once.
628  *
629  * @param[in] pfe Pointer to pfe control block.
630  */
pfe_eth_remove(struct udevice * dev)631 int pfe_eth_remove(struct udevice *dev)
632 {
633 	if (g_tx_desc)
634 		free(g_tx_desc);
635 
636 	if (g_rx_desc)
637 		free(g_rx_desc);
638 
639 	pfe_firmware_exit();
640 
641 	return 0;
642 }
643