1 /*
2  * Copyright (C) 2014 Freescale Semiconductor
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/types.h>
10 #include <malloc.h>
11 #include <net.h>
12 #include <hwconfig.h>
13 #include <phy.h>
14 #include <linux/compat.h>
15 
16 #include "ldpaa_eth.h"
17 
18 #undef CONFIG_PHYLIB
19 static int init_phy(struct eth_device *dev)
20 {
21 	/*TODO for external PHY */
22 
23 	return 0;
24 }
25 
26 static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
27 			 const struct dpaa_fd *fd)
28 {
29 	u64 fd_addr;
30 	uint16_t fd_offset;
31 	uint32_t fd_length;
32 	struct ldpaa_fas *fas;
33 	uint32_t status, err;
34 	u32 timeo = (CONFIG_SYS_HZ * 2) / 1000;
35 	u32 time_start;
36 	struct qbman_release_desc releasedesc;
37 	struct qbman_swp *swp = dflt_dpio->sw_portal;
38 
39 	fd_addr = ldpaa_fd_get_addr(fd);
40 	fd_offset = ldpaa_fd_get_offset(fd);
41 	fd_length = ldpaa_fd_get_len(fd);
42 
43 	debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length);
44 
45 	if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
46 		/* Read the frame annotation status word and check for errors */
47 		fas = (struct ldpaa_fas *)
48 				((uint8_t *)(fd_addr) +
49 				priv->buf_layout.private_data_size);
50 		status = le32_to_cpu(fas->status);
51 		if (status & LDPAA_ETH_RX_ERR_MASK) {
52 			printf("Rx frame error(s): 0x%08x\n",
53 			       status & LDPAA_ETH_RX_ERR_MASK);
54 			goto error;
55 		} else if (status & LDPAA_ETH_RX_UNSUPP_MASK) {
56 			printf("Unsupported feature in bitmask: 0x%08x\n",
57 			       status & LDPAA_ETH_RX_UNSUPP_MASK);
58 			goto error;
59 		}
60 	}
61 
62 	debug("Rx frame: To Upper layer\n");
63 	net_process_received_packet((uint8_t *)(fd_addr) + fd_offset,
64 				    fd_length);
65 
66 error:
67 	flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
68 	qbman_release_desc_clear(&releasedesc);
69 	qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
70 	time_start = get_timer(0);
71 	do {
72 		/* Release buffer into the QBMAN */
73 		err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
74 	} while (get_timer(time_start) < timeo && err == -EBUSY);
75 
76 	if (err == -EBUSY)
77 		printf("Rx frame: QBMAN buffer release fails\n");
78 
79 	return;
80 }
81 
82 static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
83 {
84 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
85 	const struct ldpaa_dq *dq;
86 	const struct dpaa_fd *fd;
87 	int i = 5, err = 0, status;
88 	u32 timeo = (CONFIG_SYS_HZ * 2) / 1000;
89 	u32 time_start;
90 	static struct qbman_pull_desc pulldesc;
91 	struct qbman_swp *swp = dflt_dpio->sw_portal;
92 
93 	while (--i) {
94 		qbman_pull_desc_clear(&pulldesc);
95 		qbman_pull_desc_set_numframes(&pulldesc, 1);
96 		qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid);
97 
98 		err = qbman_swp_pull(swp, &pulldesc);
99 		if (err < 0) {
100 			printf("Dequeue frames error:0x%08x\n", err);
101 			continue;
102 		}
103 
104 		time_start = get_timer(0);
105 
106 		 do {
107 			dq = qbman_swp_dqrr_next(swp);
108 		} while (get_timer(time_start) < timeo && !dq);
109 
110 		if (dq) {
111 			/* Check for valid frame. If not sent a consume
112 			 * confirmation to QBMAN otherwise give it to NADK
113 			 * application and then send consume confirmation to
114 			 * QBMAN.
115 			 */
116 			status = (uint8_t)ldpaa_dq_flags(dq);
117 			if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
118 				debug("Dequeue RX frames:");
119 				debug("No frame delivered\n");
120 
121 				qbman_swp_dqrr_consume(swp, dq);
122 				continue;
123 			}
124 
125 			fd = ldpaa_dq_fd(dq);
126 
127 			/* Obtain FD and process it */
128 			ldpaa_eth_rx(priv, fd);
129 			qbman_swp_dqrr_consume(swp, dq);
130 			break;
131 		} else {
132 			err = -ENODATA;
133 			debug("No DQRR entries\n");
134 			break;
135 		}
136 	}
137 
138 	return err;
139 }
140 
141 static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
142 {
143 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
144 	struct dpaa_fd fd;
145 	u64 buffer_start;
146 	int data_offset, err;
147 	u32 timeo = (CONFIG_SYS_HZ * 10) / 1000;
148 	u32 time_start;
149 	struct qbman_swp *swp = dflt_dpio->sw_portal;
150 	struct qbman_eq_desc ed;
151 	struct qbman_release_desc releasedesc;
152 
153 	/* Setup the FD fields */
154 	memset(&fd, 0, sizeof(fd));
155 
156 	data_offset = priv->tx_data_offset;
157 
158 	do {
159 		err = qbman_swp_acquire(dflt_dpio->sw_portal,
160 					dflt_dpbp->dpbp_attr.bpid,
161 					&buffer_start, 1);
162 	} while (err == -EBUSY);
163 
164 	if (err < 0) {
165 		printf("qbman_swp_acquire() failed\n");
166 		return -ENOMEM;
167 	}
168 
169 	debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);
170 
171 	memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);
172 
173 	flush_dcache_range(buffer_start, buffer_start +
174 					LDPAA_ETH_RX_BUFFER_SIZE);
175 
176 	ldpaa_fd_set_addr(&fd, (u64)buffer_start);
177 	ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
178 	ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
179 	ldpaa_fd_set_len(&fd, len);
180 
181 	fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
182 				LDPAA_FD_CTRL_PTV1;
183 
184 	qbman_eq_desc_clear(&ed);
185 	qbman_eq_desc_set_no_orp(&ed, 0);
186 	qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
187 
188 	time_start = get_timer(0);
189 
190 	while (get_timer(time_start) < timeo) {
191 		err = qbman_swp_enqueue(swp, &ed,
192 				(const struct qbman_fd *)(&fd));
193 		if (err != -EBUSY)
194 			break;
195 	}
196 
197 	if (err < 0) {
198 		printf("error enqueueing Tx frame\n");
199 		goto error;
200 	}
201 
202 	return err;
203 
204 error:
205 	qbman_release_desc_clear(&releasedesc);
206 	qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
207 	time_start = get_timer(0);
208 	do {
209 		/* Release buffer into the QBMAN */
210 		err = qbman_swp_release(swp, &releasedesc, &buffer_start, 1);
211 	} while (get_timer(time_start) < timeo && err == -EBUSY);
212 
213 	if (err == -EBUSY)
214 		printf("TX data: QBMAN buffer release fails\n");
215 
216 	return err;
217 }
218 
219 static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
220 {
221 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
222 	struct dpni_queue_attr rx_queue_attr;
223 	int err;
224 
225 	if (net_dev->state == ETH_STATE_ACTIVE)
226 		return 0;
227 
228 	/* DPNI initialization */
229 	err = ldpaa_dpni_setup(priv);
230 	if (err < 0)
231 		goto err_dpni_setup;
232 
233 	err = ldpaa_dpbp_setup();
234 	if (err < 0)
235 		goto err_dpbp_setup;
236 
237 	/* DPNI binding DPBP */
238 	err = ldpaa_dpni_bind(priv);
239 	if (err)
240 		goto err_bind;
241 
242 	err = dpni_add_mac_addr(dflt_mc_io, MC_CMD_NO_FLAGS,
243 				priv->dpni_handle, net_dev->enetaddr);
244 	if (err) {
245 		printf("dpni_add_mac_addr() failed\n");
246 		return err;
247 	}
248 
249 #ifdef CONFIG_PHYLIB
250 	/* TODO Check this path */
251 	err = phy_startup(priv->phydev);
252 	if (err) {
253 		printf("%s: Could not initialize\n", priv->phydev->dev->name);
254 		return err;
255 	}
256 #else
257 	priv->phydev->speed = SPEED_1000;
258 	priv->phydev->link = 1;
259 	priv->phydev->duplex = DUPLEX_FULL;
260 #endif
261 
262 	err = dpni_enable(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
263 	if (err < 0) {
264 		printf("dpni_enable() failed\n");
265 		return err;
266 	}
267 
268 	/* TODO: support multiple Rx flows */
269 	err = dpni_get_rx_flow(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle,
270 			       0, 0, &rx_queue_attr);
271 	if (err) {
272 		printf("dpni_get_rx_flow() failed\n");
273 		goto err_rx_flow;
274 	}
275 
276 	priv->rx_dflt_fqid = rx_queue_attr.fqid;
277 
278 	err = dpni_get_qdid(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle,
279 			    &priv->tx_qdid);
280 	if (err) {
281 		printf("dpni_get_qdid() failed\n");
282 		goto err_qdid;
283 	}
284 
285 	if (!priv->phydev->link)
286 		printf("%s: No link.\n", priv->phydev->dev->name);
287 
288 	return priv->phydev->link ? 0 : -1;
289 
290 err_qdid:
291 err_rx_flow:
292 	dpni_disable(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
293 err_bind:
294 	ldpaa_dpbp_free();
295 err_dpbp_setup:
296 	dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
297 err_dpni_setup:
298 	return err;
299 }
300 
301 static void ldpaa_eth_stop(struct eth_device *net_dev)
302 {
303 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
304 	int err = 0;
305 
306 	if ((net_dev->state == ETH_STATE_PASSIVE) ||
307 	    (net_dev->state == ETH_STATE_INIT))
308 		return;
309 	/* Stop Tx and Rx traffic */
310 	err = dpni_disable(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
311 	if (err < 0)
312 		printf("dpni_disable() failed\n");
313 
314 #ifdef CONFIG_PHYLIB
315 	phy_shutdown(priv->phydev);
316 #endif
317 
318 	ldpaa_dpbp_free();
319 	dpni_reset(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
320 	dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
321 }
322 
323 static void ldpaa_dpbp_drain_cnt(int count)
324 {
325 	uint64_t buf_array[7];
326 	void *addr;
327 	int ret, i;
328 
329 	BUG_ON(count > 7);
330 
331 	do {
332 		ret = qbman_swp_acquire(dflt_dpio->sw_portal,
333 					dflt_dpbp->dpbp_attr.bpid,
334 					buf_array, count);
335 		if (ret < 0) {
336 			printf("qbman_swp_acquire() failed\n");
337 			return;
338 		}
339 		for (i = 0; i < ret; i++) {
340 			addr = (void *)buf_array[i];
341 			debug("Free: buffer addr =0x%p\n", addr);
342 			free(addr);
343 		}
344 	} while (ret);
345 }
346 
347 static void ldpaa_dpbp_drain(void)
348 {
349 	int i;
350 	for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7)
351 		ldpaa_dpbp_drain_cnt(7);
352 }
353 
354 static int ldpaa_bp_add_7(uint16_t bpid)
355 {
356 	uint64_t buf_array[7];
357 	u8 *addr;
358 	int i;
359 	struct qbman_release_desc rd;
360 
361 	for (i = 0; i < 7; i++) {
362 		addr = memalign(L1_CACHE_BYTES, LDPAA_ETH_RX_BUFFER_SIZE);
363 		if (!addr) {
364 			printf("addr allocation failed\n");
365 			goto err_alloc;
366 		}
367 		memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE);
368 		flush_dcache_range((u64)addr,
369 				   (u64)(addr + LDPAA_ETH_RX_BUFFER_SIZE));
370 
371 		buf_array[i] = (uint64_t)addr;
372 		debug("Release: buffer addr =0x%p\n", addr);
373 	}
374 
375 release_bufs:
376 	/* In case the portal is busy, retry until successful.
377 	 * This function is guaranteed to succeed in a reasonable amount
378 	 * of time.
379 	 */
380 
381 	do {
382 		mdelay(1);
383 		qbman_release_desc_clear(&rd);
384 		qbman_release_desc_set_bpid(&rd, bpid);
385 	} while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i));
386 
387 	return i;
388 
389 err_alloc:
390 	if (i)
391 		goto release_bufs;
392 
393 	return 0;
394 }
395 
396 static int ldpaa_dpbp_seed(uint16_t bpid)
397 {
398 	int i;
399 	int count;
400 
401 	for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) {
402 		count = ldpaa_bp_add_7(bpid);
403 		if (count < 7)
404 			printf("Buffer Seed= %d\n", count);
405 	}
406 
407 	return 0;
408 }
409 
410 static int ldpaa_dpbp_setup(void)
411 {
412 	int err;
413 
414 	err = dpbp_open(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_attr.id,
415 			&dflt_dpbp->dpbp_handle);
416 	if (err) {
417 		printf("dpbp_open() failed\n");
418 		goto err_open;
419 	}
420 
421 	err = dpbp_enable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
422 	if (err) {
423 		printf("dpbp_enable() failed\n");
424 		goto err_enable;
425 	}
426 
427 	err = dpbp_get_attributes(dflt_mc_io, MC_CMD_NO_FLAGS,
428 				  dflt_dpbp->dpbp_handle,
429 				  &dflt_dpbp->dpbp_attr);
430 	if (err) {
431 		printf("dpbp_get_attributes() failed\n");
432 		goto err_get_attr;
433 	}
434 
435 	err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid);
436 	if (err) {
437 		printf("Buffer seeding failed for DPBP %d (bpid=%d)\n",
438 		       dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid);
439 		goto err_seed;
440 	}
441 
442 	return 0;
443 
444 err_seed:
445 err_get_attr:
446 	dpbp_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
447 err_enable:
448 	dpbp_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
449 err_open:
450 	return err;
451 }
452 
453 static void ldpaa_dpbp_free(void)
454 {
455 	ldpaa_dpbp_drain();
456 	dpbp_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
457 	dpbp_reset(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
458 	dpbp_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
459 }
460 
461 static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv)
462 {
463 	int err;
464 
465 	/* and get a handle for the DPNI this interface is associate with */
466 	err = dpni_open(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_id,
467 			&priv->dpni_handle);
468 	if (err) {
469 		printf("dpni_open() failed\n");
470 		goto err_open;
471 	}
472 
473 	err = dpni_get_attributes(dflt_mc_io, MC_CMD_NO_FLAGS,
474 				  priv->dpni_handle, &priv->dpni_attrs);
475 	if (err) {
476 		printf("dpni_get_attributes() failed (err=%d)\n", err);
477 		goto err_get_attr;
478 	}
479 
480 	/* Configure our buffers' layout */
481 	priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
482 				   DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
483 				   DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
484 	priv->buf_layout.pass_parser_result = true;
485 	priv->buf_layout.pass_frame_status = true;
486 	priv->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE;
487 	/* ...rx, ... */
488 	err = dpni_set_rx_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS,
489 					priv->dpni_handle, &priv->buf_layout);
490 	if (err) {
491 		printf("dpni_set_rx_buffer_layout() failed");
492 		goto err_buf_layout;
493 	}
494 
495 	/* ... tx, ... */
496 	priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PARSER_RESULT;
497 	err = dpni_set_tx_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS,
498 					priv->dpni_handle, &priv->buf_layout);
499 	if (err) {
500 		printf("dpni_set_tx_buffer_layout() failed");
501 		goto err_buf_layout;
502 	}
503 
504 	/* ... tx-confirm. */
505 	priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
506 	err = dpni_set_tx_conf_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS,
507 					     priv->dpni_handle,
508 					     &priv->buf_layout);
509 	if (err) {
510 		printf("dpni_set_tx_conf_buffer_layout() failed");
511 		goto err_buf_layout;
512 	}
513 
514 	/* Now that we've set our tx buffer layout, retrieve the minimum
515 	 * required tx data offset.
516 	 */
517 	err = dpni_get_tx_data_offset(dflt_mc_io, MC_CMD_NO_FLAGS,
518 				      priv->dpni_handle, &priv->tx_data_offset);
519 	if (err) {
520 		printf("dpni_get_tx_data_offset() failed\n");
521 		goto err_data_offset;
522 	}
523 
524 	/* Warn in case TX data offset is not multiple of 64 bytes. */
525 	WARN_ON(priv->tx_data_offset % 64);
526 
527 	/* Accomodate SWA space. */
528 	priv->tx_data_offset += LDPAA_ETH_SWA_SIZE;
529 	debug("priv->tx_data_offset=%d\n", priv->tx_data_offset);
530 
531 	return 0;
532 
533 err_data_offset:
534 err_buf_layout:
535 err_get_attr:
536 	dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
537 err_open:
538 	return err;
539 }
540 
541 static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
542 {
543 	struct dpni_pools_cfg pools_params;
544 	struct dpni_tx_flow_cfg dflt_tx_flow;
545 	int err = 0;
546 
547 	pools_params.num_dpbp = 1;
548 	pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id;
549 	pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE;
550 	err = dpni_set_pools(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle,
551 			     &pools_params);
552 	if (err) {
553 		printf("dpni_set_pools() failed\n");
554 		return err;
555 	}
556 
557 	priv->tx_flow_id = DPNI_NEW_FLOW_ID;
558 	memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
559 
560 	dflt_tx_flow.options = DPNI_TX_FLOW_OPT_ONLY_TX_ERROR;
561 	dflt_tx_flow.conf_err_cfg.use_default_queue = 0;
562 	dflt_tx_flow.conf_err_cfg.errors_only = 1;
563 	err = dpni_set_tx_flow(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle,
564 			       &priv->tx_flow_id, &dflt_tx_flow);
565 	if (err) {
566 		printf("dpni_set_tx_flow() failed\n");
567 		return err;
568 	}
569 
570 	return 0;
571 }
572 
573 static int ldpaa_eth_netdev_init(struct eth_device *net_dev)
574 {
575 	int err;
576 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
577 
578 	sprintf(net_dev->name, "DPNI%d", priv->dpni_id);
579 
580 	net_dev->iobase = 0;
581 	net_dev->init = ldpaa_eth_open;
582 	net_dev->halt = ldpaa_eth_stop;
583 	net_dev->send = ldpaa_eth_tx;
584 	net_dev->recv = ldpaa_eth_pull_dequeue_rx;
585 /*
586 	TODO: PHY MDIO information
587 	priv->bus = info->bus;
588 	priv->phyaddr = info->phy_addr;
589 	priv->enet_if = info->enet_if;
590 */
591 
592 	if (init_phy(net_dev))
593 		return 0;
594 
595 	err = eth_register(net_dev);
596 	if (err < 0) {
597 		printf("eth_register() = %d\n", err);
598 		return err;
599 	}
600 
601 	return 0;
602 }
603 
604 int ldpaa_eth_init(struct dprc_obj_desc obj_desc)
605 {
606 	struct eth_device		*net_dev = NULL;
607 	struct ldpaa_eth_priv		*priv = NULL;
608 	int				err = 0;
609 
610 
611 	/* Net device */
612 	net_dev = (struct eth_device *)malloc(sizeof(struct eth_device));
613 	if (!net_dev) {
614 		printf("eth_device malloc() failed\n");
615 		return -ENOMEM;
616 	}
617 	memset(net_dev, 0, sizeof(struct eth_device));
618 
619 	/* alloc the ldpaa ethernet private struct */
620 	priv = (struct ldpaa_eth_priv *)malloc(sizeof(struct ldpaa_eth_priv));
621 	if (!priv) {
622 		printf("ldpaa_eth_priv malloc() failed\n");
623 		return -ENOMEM;
624 	}
625 	memset(priv, 0, sizeof(struct ldpaa_eth_priv));
626 
627 	net_dev->priv = (void *)priv;
628 	priv->net_dev = (struct eth_device *)net_dev;
629 	priv->dpni_id = obj_desc.id;
630 
631 	err = ldpaa_eth_netdev_init(net_dev);
632 	if (err)
633 		goto err_netdev_init;
634 
635 	debug("ldpaa ethernet: Probed interface %s\n", net_dev->name);
636 	return 0;
637 
638 err_netdev_init:
639 	free(priv);
640 	net_dev->priv = NULL;
641 	free(net_dev);
642 
643 	return err;
644 }
645