1 /*
2  * Copyright (C) 2014 Freescale Semiconductor
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/types.h>
10 #include <malloc.h>
11 #include <net.h>
12 #include <hwconfig.h>
13 #include <phy.h>
14 #include <linux/compat.h>
15 
16 #include "ldpaa_eth.h"
17 
18 #undef CONFIG_PHYLIB
19 static int init_phy(struct eth_device *dev)
20 {
21 	/*TODO for external PHY */
22 
23 	return 0;
24 }
25 
26 static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
27 			 const struct dpaa_fd *fd)
28 {
29 	u64 fd_addr;
30 	uint16_t fd_offset;
31 	uint32_t fd_length;
32 	struct ldpaa_fas *fas;
33 	uint32_t status, err;
34 	u32 timeo = (CONFIG_SYS_HZ * 2) / 1000;
35 	u32 time_start;
36 	struct qbman_release_desc releasedesc;
37 	struct qbman_swp *swp = dflt_dpio->sw_portal;
38 
39 	fd_addr = ldpaa_fd_get_addr(fd);
40 	fd_offset = ldpaa_fd_get_offset(fd);
41 	fd_length = ldpaa_fd_get_len(fd);
42 
43 	debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length);
44 
45 	if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
46 		/* Read the frame annotation status word and check for errors */
47 		fas = (struct ldpaa_fas *)
48 				((uint8_t *)(fd_addr) +
49 				priv->buf_layout.private_data_size);
50 		status = le32_to_cpu(fas->status);
51 		if (status & LDPAA_ETH_RX_ERR_MASK) {
52 			printf("Rx frame error(s): 0x%08x\n",
53 			       status & LDPAA_ETH_RX_ERR_MASK);
54 			goto error;
55 		} else if (status & LDPAA_ETH_RX_UNSUPP_MASK) {
56 			printf("Unsupported feature in bitmask: 0x%08x\n",
57 			       status & LDPAA_ETH_RX_UNSUPP_MASK);
58 			goto error;
59 		}
60 	}
61 
62 	debug("Rx frame: To Upper layer\n");
63 	net_process_received_packet((uint8_t *)(fd_addr) + fd_offset,
64 				    fd_length);
65 
66 error:
67 	flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
68 	qbman_release_desc_clear(&releasedesc);
69 	qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
70 	time_start = get_timer(0);
71 	do {
72 		/* Release buffer into the QBMAN */
73 		err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
74 	} while (get_timer(time_start) < timeo && err == -EBUSY);
75 
76 	if (err == -EBUSY)
77 		printf("Rx frame: QBMAN buffer release fails\n");
78 
79 	return;
80 }
81 
82 static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
83 {
84 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
85 	const struct ldpaa_dq *dq;
86 	const struct dpaa_fd *fd;
87 	int i = 5, err = 0, status;
88 	u32 timeo = (CONFIG_SYS_HZ * 2) / 1000;
89 	u32 time_start;
90 	static struct qbman_pull_desc pulldesc;
91 	struct qbman_swp *swp = dflt_dpio->sw_portal;
92 
93 	while (--i) {
94 		qbman_pull_desc_clear(&pulldesc);
95 		qbman_pull_desc_set_numframes(&pulldesc, 1);
96 		qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid);
97 
98 		err = qbman_swp_pull(swp, &pulldesc);
99 		if (err < 0) {
100 			printf("Dequeue frames error:0x%08x\n", err);
101 			continue;
102 		}
103 
104 		time_start = get_timer(0);
105 
106 		 do {
107 			dq = qbman_swp_dqrr_next(swp);
108 		} while (get_timer(time_start) < timeo && !dq);
109 
110 		if (dq) {
111 			/* Check for valid frame. If not sent a consume
112 			 * confirmation to QBMAN otherwise give it to NADK
113 			 * application and then send consume confirmation to
114 			 * QBMAN.
115 			 */
116 			status = (uint8_t)ldpaa_dq_flags(dq);
117 			if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
118 				debug("Dequeue RX frames:");
119 				debug("No frame delivered\n");
120 
121 				qbman_swp_dqrr_consume(swp, dq);
122 				continue;
123 			}
124 
125 			fd = ldpaa_dq_fd(dq);
126 
127 			/* Obtain FD and process it */
128 			ldpaa_eth_rx(priv, fd);
129 			qbman_swp_dqrr_consume(swp, dq);
130 			break;
131 		} else {
132 			err = -ENODATA;
133 			debug("No DQRR entries\n");
134 			break;
135 		}
136 	}
137 
138 	return err;
139 }
140 
141 static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
142 {
143 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
144 	struct dpaa_fd fd;
145 	u64 buffer_start;
146 	int data_offset, err;
147 	u32 timeo = (CONFIG_SYS_HZ * 10) / 1000;
148 	u32 time_start;
149 	struct qbman_swp *swp = dflt_dpio->sw_portal;
150 	struct qbman_eq_desc ed;
151 	struct qbman_release_desc releasedesc;
152 
153 	/* Setup the FD fields */
154 	memset(&fd, 0, sizeof(fd));
155 
156 	data_offset = priv->tx_data_offset;
157 
158 	do {
159 		err = qbman_swp_acquire(dflt_dpio->sw_portal,
160 					dflt_dpbp->dpbp_attr.bpid,
161 					&buffer_start, 1);
162 	} while (err == -EBUSY);
163 
164 	if (err < 0) {
165 		printf("qbman_swp_acquire() failed\n");
166 		return -ENOMEM;
167 	}
168 
169 	debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);
170 
171 	memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);
172 
173 	flush_dcache_range(buffer_start, buffer_start +
174 					LDPAA_ETH_RX_BUFFER_SIZE);
175 
176 	ldpaa_fd_set_addr(&fd, (u64)buffer_start);
177 	ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
178 	ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
179 	ldpaa_fd_set_len(&fd, len);
180 
181 	fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
182 				LDPAA_FD_CTRL_PTV1;
183 
184 	qbman_eq_desc_clear(&ed);
185 	qbman_eq_desc_set_no_orp(&ed, 0);
186 	qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
187 
188 	time_start = get_timer(0);
189 
190 	while (get_timer(time_start) < timeo) {
191 		err = qbman_swp_enqueue(swp, &ed,
192 				(const struct qbman_fd *)(&fd));
193 		if (err != -EBUSY)
194 			break;
195 	}
196 
197 	if (err < 0) {
198 		printf("error enqueueing Tx frame\n");
199 		goto error;
200 	}
201 
202 	return err;
203 
204 error:
205 	qbman_release_desc_clear(&releasedesc);
206 	qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
207 	time_start = get_timer(0);
208 	do {
209 		/* Release buffer into the QBMAN */
210 		err = qbman_swp_release(swp, &releasedesc, &buffer_start, 1);
211 	} while (get_timer(time_start) < timeo && err == -EBUSY);
212 
213 	if (err == -EBUSY)
214 		printf("TX data: QBMAN buffer release fails\n");
215 
216 	return err;
217 }
218 
219 static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
220 {
221 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
222 	struct dpni_queue_attr rx_queue_attr;
223 	uint8_t mac_addr[6];
224 	int err;
225 
226 	if (net_dev->state == ETH_STATE_ACTIVE)
227 		return 0;
228 
229 	/* DPNI initialization */
230 	err = ldpaa_dpni_setup(priv);
231 	if (err < 0)
232 		goto err_dpni_setup;
233 
234 	err = ldpaa_dpbp_setup();
235 	if (err < 0)
236 		goto err_dpbp_setup;
237 
238 	/* DPNI binding DPBP */
239 	err = ldpaa_dpni_bind(priv);
240 	if (err)
241 		goto err_bind;
242 
243 	err = dpni_get_primary_mac_addr(dflt_mc_io, MC_CMD_NO_FLAGS,
244 					priv->dpni_handle, mac_addr);
245 	if (err) {
246 		printf("dpni_get_primary_mac_addr() failed\n");
247 		return err;
248 	}
249 
250 	memcpy(net_dev->enetaddr, mac_addr, 0x6);
251 
252 	/* setup the MAC address */
253 	if (net_dev->enetaddr[0] & 0x01) {
254 		printf("%s: MacAddress is multcast address\n",	__func__);
255 		return 1;
256 	}
257 
258 #ifdef CONFIG_PHYLIB
259 	/* TODO Check this path */
260 	err = phy_startup(priv->phydev);
261 	if (err) {
262 		printf("%s: Could not initialize\n", priv->phydev->dev->name);
263 		return err;
264 	}
265 #else
266 	priv->phydev->speed = SPEED_1000;
267 	priv->phydev->link = 1;
268 	priv->phydev->duplex = DUPLEX_FULL;
269 #endif
270 
271 	err = dpni_enable(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
272 	if (err < 0) {
273 		printf("dpni_enable() failed\n");
274 		return err;
275 	}
276 
277 	/* TODO: support multiple Rx flows */
278 	err = dpni_get_rx_flow(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle,
279 			       0, 0, &rx_queue_attr);
280 	if (err) {
281 		printf("dpni_get_rx_flow() failed\n");
282 		goto err_rx_flow;
283 	}
284 
285 	priv->rx_dflt_fqid = rx_queue_attr.fqid;
286 
287 	err = dpni_get_qdid(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle,
288 			    &priv->tx_qdid);
289 	if (err) {
290 		printf("dpni_get_qdid() failed\n");
291 		goto err_qdid;
292 	}
293 
294 	if (!priv->phydev->link)
295 		printf("%s: No link.\n", priv->phydev->dev->name);
296 
297 	return priv->phydev->link ? 0 : -1;
298 
299 err_qdid:
300 err_rx_flow:
301 	dpni_disable(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
302 err_bind:
303 	ldpaa_dpbp_free();
304 err_dpbp_setup:
305 	dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
306 err_dpni_setup:
307 	return err;
308 }
309 
310 static void ldpaa_eth_stop(struct eth_device *net_dev)
311 {
312 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
313 	int err = 0;
314 
315 	if ((net_dev->state == ETH_STATE_PASSIVE) ||
316 	    (net_dev->state == ETH_STATE_INIT))
317 		return;
318 	/* Stop Tx and Rx traffic */
319 	err = dpni_disable(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
320 	if (err < 0)
321 		printf("dpni_disable() failed\n");
322 
323 #ifdef CONFIG_PHYLIB
324 	phy_shutdown(priv->phydev);
325 #endif
326 
327 	ldpaa_dpbp_free();
328 	dpni_reset(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
329 	dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
330 }
331 
332 static void ldpaa_dpbp_drain_cnt(int count)
333 {
334 	uint64_t buf_array[7];
335 	void *addr;
336 	int ret, i;
337 
338 	BUG_ON(count > 7);
339 
340 	do {
341 		ret = qbman_swp_acquire(dflt_dpio->sw_portal,
342 					dflt_dpbp->dpbp_attr.bpid,
343 					buf_array, count);
344 		if (ret < 0) {
345 			printf("qbman_swp_acquire() failed\n");
346 			return;
347 		}
348 		for (i = 0; i < ret; i++) {
349 			addr = (void *)buf_array[i];
350 			debug("Free: buffer addr =0x%p\n", addr);
351 			free(addr);
352 		}
353 	} while (ret);
354 }
355 
356 static void ldpaa_dpbp_drain(void)
357 {
358 	int i;
359 	for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7)
360 		ldpaa_dpbp_drain_cnt(7);
361 }
362 
363 static int ldpaa_bp_add_7(uint16_t bpid)
364 {
365 	uint64_t buf_array[7];
366 	u8 *addr;
367 	int i;
368 	struct qbman_release_desc rd;
369 
370 	for (i = 0; i < 7; i++) {
371 		addr = memalign(L1_CACHE_BYTES, LDPAA_ETH_RX_BUFFER_SIZE);
372 		if (!addr) {
373 			printf("addr allocation failed\n");
374 			goto err_alloc;
375 		}
376 		memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE);
377 		flush_dcache_range((u64)addr,
378 				   (u64)(addr + LDPAA_ETH_RX_BUFFER_SIZE));
379 
380 		buf_array[i] = (uint64_t)addr;
381 		debug("Release: buffer addr =0x%p\n", addr);
382 	}
383 
384 release_bufs:
385 	/* In case the portal is busy, retry until successful.
386 	 * This function is guaranteed to succeed in a reasonable amount
387 	 * of time.
388 	 */
389 
390 	do {
391 		mdelay(1);
392 		qbman_release_desc_clear(&rd);
393 		qbman_release_desc_set_bpid(&rd, bpid);
394 	} while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i));
395 
396 	return i;
397 
398 err_alloc:
399 	if (i)
400 		goto release_bufs;
401 
402 	return 0;
403 }
404 
405 static int ldpaa_dpbp_seed(uint16_t bpid)
406 {
407 	int i;
408 	int count;
409 
410 	for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) {
411 		count = ldpaa_bp_add_7(bpid);
412 		if (count < 7)
413 			printf("Buffer Seed= %d\n", count);
414 	}
415 
416 	return 0;
417 }
418 
419 static int ldpaa_dpbp_setup(void)
420 {
421 	int err;
422 
423 	err = dpbp_open(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_attr.id,
424 			&dflt_dpbp->dpbp_handle);
425 	if (err) {
426 		printf("dpbp_open() failed\n");
427 		goto err_open;
428 	}
429 
430 	err = dpbp_enable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
431 	if (err) {
432 		printf("dpbp_enable() failed\n");
433 		goto err_enable;
434 	}
435 
436 	err = dpbp_get_attributes(dflt_mc_io, MC_CMD_NO_FLAGS,
437 				  dflt_dpbp->dpbp_handle,
438 				  &dflt_dpbp->dpbp_attr);
439 	if (err) {
440 		printf("dpbp_get_attributes() failed\n");
441 		goto err_get_attr;
442 	}
443 
444 	err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid);
445 	if (err) {
446 		printf("Buffer seeding failed for DPBP %d (bpid=%d)\n",
447 		       dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid);
448 		goto err_seed;
449 	}
450 
451 	return 0;
452 
453 err_seed:
454 err_get_attr:
455 	dpbp_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
456 err_enable:
457 	dpbp_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
458 err_open:
459 	return err;
460 }
461 
462 static void ldpaa_dpbp_free(void)
463 {
464 	ldpaa_dpbp_drain();
465 	dpbp_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
466 	dpbp_reset(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
467 	dpbp_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
468 }
469 
470 static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv)
471 {
472 	int err;
473 
474 	/* and get a handle for the DPNI this interface is associate with */
475 	err = dpni_open(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_id,
476 			&priv->dpni_handle);
477 	if (err) {
478 		printf("dpni_open() failed\n");
479 		goto err_open;
480 	}
481 
482 	err = dpni_get_attributes(dflt_mc_io, MC_CMD_NO_FLAGS,
483 				  priv->dpni_handle, &priv->dpni_attrs);
484 	if (err) {
485 		printf("dpni_get_attributes() failed (err=%d)\n", err);
486 		goto err_get_attr;
487 	}
488 
489 	/* Configure our buffers' layout */
490 	priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
491 				   DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
492 				   DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
493 	priv->buf_layout.pass_parser_result = true;
494 	priv->buf_layout.pass_frame_status = true;
495 	priv->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE;
496 	/* ...rx, ... */
497 	err = dpni_set_rx_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS,
498 					priv->dpni_handle, &priv->buf_layout);
499 	if (err) {
500 		printf("dpni_set_rx_buffer_layout() failed");
501 		goto err_buf_layout;
502 	}
503 
504 	/* ... tx, ... */
505 	priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PARSER_RESULT;
506 	err = dpni_set_tx_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS,
507 					priv->dpni_handle, &priv->buf_layout);
508 	if (err) {
509 		printf("dpni_set_tx_buffer_layout() failed");
510 		goto err_buf_layout;
511 	}
512 
513 	/* ... tx-confirm. */
514 	priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
515 	err = dpni_set_tx_conf_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS,
516 					     priv->dpni_handle,
517 					     &priv->buf_layout);
518 	if (err) {
519 		printf("dpni_set_tx_conf_buffer_layout() failed");
520 		goto err_buf_layout;
521 	}
522 
523 	/* Now that we've set our tx buffer layout, retrieve the minimum
524 	 * required tx data offset.
525 	 */
526 	err = dpni_get_tx_data_offset(dflt_mc_io, MC_CMD_NO_FLAGS,
527 				      priv->dpni_handle, &priv->tx_data_offset);
528 	if (err) {
529 		printf("dpni_get_tx_data_offset() failed\n");
530 		goto err_data_offset;
531 	}
532 
533 	/* Warn in case TX data offset is not multiple of 64 bytes. */
534 	WARN_ON(priv->tx_data_offset % 64);
535 
536 	/* Accomodate SWA space. */
537 	priv->tx_data_offset += LDPAA_ETH_SWA_SIZE;
538 	debug("priv->tx_data_offset=%d\n", priv->tx_data_offset);
539 
540 	return 0;
541 
542 err_data_offset:
543 err_buf_layout:
544 err_get_attr:
545 	dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle);
546 err_open:
547 	return err;
548 }
549 
550 static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
551 {
552 	struct dpni_pools_cfg pools_params;
553 	struct dpni_tx_flow_cfg dflt_tx_flow;
554 	int err = 0;
555 
556 	pools_params.num_dpbp = 1;
557 	pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id;
558 	pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE;
559 	err = dpni_set_pools(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle,
560 			     &pools_params);
561 	if (err) {
562 		printf("dpni_set_pools() failed\n");
563 		return err;
564 	}
565 
566 	priv->tx_flow_id = DPNI_NEW_FLOW_ID;
567 	memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
568 
569 	dflt_tx_flow.options = DPNI_TX_FLOW_OPT_ONLY_TX_ERROR;
570 	dflt_tx_flow.conf_err_cfg.use_default_queue = 0;
571 	dflt_tx_flow.conf_err_cfg.errors_only = 1;
572 	err = dpni_set_tx_flow(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpni_handle,
573 			       &priv->tx_flow_id, &dflt_tx_flow);
574 	if (err) {
575 		printf("dpni_set_tx_flow() failed\n");
576 		return err;
577 	}
578 
579 	return 0;
580 }
581 
582 static int ldpaa_eth_netdev_init(struct eth_device *net_dev)
583 {
584 	int err;
585 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
586 
587 	sprintf(net_dev->name, "DPNI%d", priv->dpni_id);
588 
589 	net_dev->iobase = 0;
590 	net_dev->init = ldpaa_eth_open;
591 	net_dev->halt = ldpaa_eth_stop;
592 	net_dev->send = ldpaa_eth_tx;
593 	net_dev->recv = ldpaa_eth_pull_dequeue_rx;
594 /*
595 	TODO: PHY MDIO information
596 	priv->bus = info->bus;
597 	priv->phyaddr = info->phy_addr;
598 	priv->enet_if = info->enet_if;
599 */
600 
601 	if (init_phy(net_dev))
602 		return 0;
603 
604 	err = eth_register(net_dev);
605 	if (err < 0) {
606 		printf("eth_register() = %d\n", err);
607 		return err;
608 	}
609 
610 	return 0;
611 }
612 
613 int ldpaa_eth_init(struct dprc_obj_desc obj_desc)
614 {
615 	struct eth_device		*net_dev = NULL;
616 	struct ldpaa_eth_priv		*priv = NULL;
617 	int				err = 0;
618 
619 
620 	/* Net device */
621 	net_dev = (struct eth_device *)malloc(sizeof(struct eth_device));
622 	if (!net_dev) {
623 		printf("eth_device malloc() failed\n");
624 		return -ENOMEM;
625 	}
626 	memset(net_dev, 0, sizeof(struct eth_device));
627 
628 	/* alloc the ldpaa ethernet private struct */
629 	priv = (struct ldpaa_eth_priv *)malloc(sizeof(struct ldpaa_eth_priv));
630 	if (!priv) {
631 		printf("ldpaa_eth_priv malloc() failed\n");
632 		return -ENOMEM;
633 	}
634 	memset(priv, 0, sizeof(struct ldpaa_eth_priv));
635 
636 	net_dev->priv = (void *)priv;
637 	priv->net_dev = (struct eth_device *)net_dev;
638 	priv->dpni_id = obj_desc.id;
639 
640 	err = ldpaa_eth_netdev_init(net_dev);
641 	if (err)
642 		goto err_netdev_init;
643 
644 	debug("ldpaa ethernet: Probed interface %s\n", net_dev->name);
645 	return 0;
646 
647 err_netdev_init:
648 	free(priv);
649 	net_dev->priv = NULL;
650 	free(net_dev);
651 
652 	return err;
653 }
654