1 /*
2  * Copyright (C) 2014 Freescale Semiconductor
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/types.h>
10 #include <malloc.h>
11 #include <net.h>
12 #include <hwconfig.h>
13 #include <phy.h>
14 #include <linux/compat.h>
15 
16 #include "ldpaa_eth.h"
17 
18 #undef CONFIG_PHYLIB
19 static int init_phy(struct eth_device *dev)
20 {
21 	/*TODO for external PHY */
22 
23 	return 0;
24 }
25 
26 static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
27 			 const struct dpaa_fd *fd)
28 {
29 	u64 fd_addr;
30 	uint16_t fd_offset;
31 	uint32_t fd_length;
32 	struct ldpaa_fas *fas;
33 	uint32_t status, err;
34 	u32 timeo = (CONFIG_SYS_HZ * 2) / 1000;
35 	u32 time_start;
36 	struct qbman_release_desc releasedesc;
37 	struct qbman_swp *swp = dflt_dpio->sw_portal;
38 
39 	fd_addr = ldpaa_fd_get_addr(fd);
40 	fd_offset = ldpaa_fd_get_offset(fd);
41 	fd_length = ldpaa_fd_get_len(fd);
42 
43 	debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length);
44 
45 	if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
46 		/* Read the frame annotation status word and check for errors */
47 		fas = (struct ldpaa_fas *)
48 				((uint8_t *)(fd_addr) +
49 				priv->buf_layout.private_data_size);
50 		status = le32_to_cpu(fas->status);
51 		if (status & LDPAA_ETH_RX_ERR_MASK) {
52 			printf("Rx frame error(s): 0x%08x\n",
53 			       status & LDPAA_ETH_RX_ERR_MASK);
54 			goto error;
55 		} else if (status & LDPAA_ETH_RX_UNSUPP_MASK) {
56 			printf("Unsupported feature in bitmask: 0x%08x\n",
57 			       status & LDPAA_ETH_RX_UNSUPP_MASK);
58 			goto error;
59 		}
60 	}
61 
62 	debug("Rx frame: To Upper layer\n");
63 	net_process_received_packet((uint8_t *)(fd_addr) + fd_offset,
64 				    fd_length);
65 
66 error:
67 	flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
68 	qbman_release_desc_clear(&releasedesc);
69 	qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
70 	time_start = get_timer(0);
71 	do {
72 		/* Release buffer into the QBMAN */
73 		err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
74 	} while (get_timer(time_start) < timeo && err == -EBUSY);
75 
76 	if (err == -EBUSY)
77 		printf("Rx frame: QBMAN buffer release fails\n");
78 
79 	return;
80 }
81 
82 static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
83 {
84 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
85 	const struct ldpaa_dq *dq;
86 	const struct dpaa_fd *fd;
87 	int i = 5, err = 0, status;
88 	u32 timeo = (CONFIG_SYS_HZ * 2) / 1000;
89 	u32 time_start;
90 	static struct qbman_pull_desc pulldesc;
91 	struct qbman_swp *swp = dflt_dpio->sw_portal;
92 
93 	while (--i) {
94 		qbman_pull_desc_clear(&pulldesc);
95 		qbman_pull_desc_set_numframes(&pulldesc, 1);
96 		qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid);
97 
98 		err = qbman_swp_pull(swp, &pulldesc);
99 		if (err < 0) {
100 			printf("Dequeue frames error:0x%08x\n", err);
101 			continue;
102 		}
103 
104 		time_start = get_timer(0);
105 
106 		 do {
107 			dq = qbman_swp_dqrr_next(swp);
108 		} while (get_timer(time_start) < timeo && !dq);
109 
110 		if (dq) {
111 			/* Check for valid frame. If not sent a consume
112 			 * confirmation to QBMAN otherwise give it to NADK
113 			 * application and then send consume confirmation to
114 			 * QBMAN.
115 			 */
116 			status = (uint8_t)ldpaa_dq_flags(dq);
117 			if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
118 				debug("Dequeue RX frames:");
119 				debug("No frame delivered\n");
120 
121 				qbman_swp_dqrr_consume(swp, dq);
122 				continue;
123 			}
124 
125 			fd = ldpaa_dq_fd(dq);
126 
127 			/* Obtain FD and process it */
128 			ldpaa_eth_rx(priv, fd);
129 			qbman_swp_dqrr_consume(swp, dq);
130 			break;
131 		} else {
132 			err = -ENODATA;
133 			debug("No DQRR entries\n");
134 			break;
135 		}
136 	}
137 
138 	return err;
139 }
140 
141 static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
142 {
143 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
144 	struct dpaa_fd fd;
145 	u64 buffer_start;
146 	int data_offset, err;
147 	u32 timeo = (CONFIG_SYS_HZ * 10) / 1000;
148 	u32 time_start;
149 	struct qbman_swp *swp = dflt_dpio->sw_portal;
150 	struct qbman_eq_desc ed;
151 	struct qbman_release_desc releasedesc;
152 
153 	/* Setup the FD fields */
154 	memset(&fd, 0, sizeof(fd));
155 
156 	data_offset = priv->tx_data_offset;
157 
158 	do {
159 		err = qbman_swp_acquire(dflt_dpio->sw_portal,
160 					dflt_dpbp->dpbp_attr.bpid,
161 					&buffer_start, 1);
162 	} while (err == -EBUSY);
163 
164 	if (err < 0) {
165 		printf("qbman_swp_acquire() failed\n");
166 		return -ENOMEM;
167 	}
168 
169 	debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);
170 
171 	memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);
172 
173 	flush_dcache_range(buffer_start, buffer_start +
174 					LDPAA_ETH_RX_BUFFER_SIZE);
175 
176 	ldpaa_fd_set_addr(&fd, (u64)buffer_start);
177 	ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
178 	ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
179 	ldpaa_fd_set_len(&fd, len);
180 
181 	fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
182 				LDPAA_FD_CTRL_PTV1;
183 
184 	qbman_eq_desc_clear(&ed);
185 	qbman_eq_desc_set_no_orp(&ed, 0);
186 	qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
187 
188 	time_start = get_timer(0);
189 
190 	while (get_timer(time_start) < timeo) {
191 		err = qbman_swp_enqueue(swp, &ed,
192 				(const struct qbman_fd *)(&fd));
193 		if (err != -EBUSY)
194 			break;
195 	}
196 
197 	if (err < 0) {
198 		printf("error enqueueing Tx frame\n");
199 		goto error;
200 	}
201 
202 	return err;
203 
204 error:
205 	qbman_release_desc_clear(&releasedesc);
206 	qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
207 	time_start = get_timer(0);
208 	do {
209 		/* Release buffer into the QBMAN */
210 		err = qbman_swp_release(swp, &releasedesc, &buffer_start, 1);
211 	} while (get_timer(time_start) < timeo && err == -EBUSY);
212 
213 	if (err == -EBUSY)
214 		printf("TX data: QBMAN buffer release fails\n");
215 
216 	return err;
217 }
218 
219 static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
220 {
221 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
222 	struct dpni_queue_attr rx_queue_attr;
223 	uint8_t mac_addr[6];
224 	int err;
225 
226 	if (net_dev->state == ETH_STATE_ACTIVE)
227 		return 0;
228 
229 	/* DPNI initialization */
230 	err = ldpaa_dpni_setup(priv);
231 	if (err < 0)
232 		goto err_dpni_setup;
233 
234 	err = ldpaa_dpbp_setup();
235 	if (err < 0)
236 		goto err_dpbp_setup;
237 
238 	/* DPNI binding DPBP */
239 	err = ldpaa_dpni_bind(priv);
240 	if (err)
241 		goto err_bind;
242 
243 	err = dpni_get_primary_mac_addr(dflt_mc_io, priv->dpni_handle,
244 					mac_addr);
245 	if (err) {
246 		printf("dpni_get_primary_mac_addr() failed\n");
247 		return err;
248 	}
249 
250 	memcpy(net_dev->enetaddr, mac_addr, 0x6);
251 
252 	/* setup the MAC address */
253 	if (net_dev->enetaddr[0] & 0x01) {
254 		printf("%s: MacAddress is multcast address\n",	__func__);
255 		return 1;
256 	}
257 
258 #ifdef CONFIG_PHYLIB
259 	/* TODO Check this path */
260 	err = phy_startup(priv->phydev);
261 	if (err) {
262 		printf("%s: Could not initialize\n", priv->phydev->dev->name);
263 		return err;
264 	}
265 #else
266 	priv->phydev->speed = SPEED_1000;
267 	priv->phydev->link = 1;
268 	priv->phydev->duplex = DUPLEX_FULL;
269 #endif
270 
271 	err = dpni_enable(dflt_mc_io, priv->dpni_handle);
272 	if (err < 0) {
273 		printf("dpni_enable() failed\n");
274 		return err;
275 	}
276 
277 	/* TODO: support multiple Rx flows */
278 	err = dpni_get_rx_flow(dflt_mc_io, priv->dpni_handle, 0, 0,
279 			       &rx_queue_attr);
280 	if (err) {
281 		printf("dpni_get_rx_flow() failed\n");
282 		goto err_rx_flow;
283 	}
284 
285 	priv->rx_dflt_fqid = rx_queue_attr.fqid;
286 
287 	err = dpni_get_qdid(dflt_mc_io, priv->dpni_handle, &priv->tx_qdid);
288 	if (err) {
289 		printf("dpni_get_qdid() failed\n");
290 		goto err_qdid;
291 	}
292 
293 	if (!priv->phydev->link)
294 		printf("%s: No link.\n", priv->phydev->dev->name);
295 
296 	return priv->phydev->link ? 0 : -1;
297 
298 err_qdid:
299 err_rx_flow:
300 	dpni_disable(dflt_mc_io, priv->dpni_handle);
301 err_bind:
302 	ldpaa_dpbp_free();
303 err_dpbp_setup:
304 	dpni_close(dflt_mc_io, priv->dpni_handle);
305 err_dpni_setup:
306 	return err;
307 }
308 
309 static void ldpaa_eth_stop(struct eth_device *net_dev)
310 {
311 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
312 	int err = 0;
313 
314 	if ((net_dev->state == ETH_STATE_PASSIVE) ||
315 	    (net_dev->state == ETH_STATE_INIT))
316 		return;
317 	/* Stop Tx and Rx traffic */
318 	err = dpni_disable(dflt_mc_io, priv->dpni_handle);
319 	if (err < 0)
320 		printf("dpni_disable() failed\n");
321 
322 #ifdef CONFIG_PHYLIB
323 	phy_shutdown(priv->phydev);
324 #endif
325 
326 	ldpaa_dpbp_free();
327 	dpni_reset(dflt_mc_io, priv->dpni_handle);
328 	dpni_close(dflt_mc_io, priv->dpni_handle);
329 }
330 
331 static void ldpaa_dpbp_drain_cnt(int count)
332 {
333 	uint64_t buf_array[7];
334 	void *addr;
335 	int ret, i;
336 
337 	BUG_ON(count > 7);
338 
339 	do {
340 		ret = qbman_swp_acquire(dflt_dpio->sw_portal,
341 					dflt_dpbp->dpbp_attr.bpid,
342 					buf_array, count);
343 		if (ret < 0) {
344 			printf("qbman_swp_acquire() failed\n");
345 			return;
346 		}
347 		for (i = 0; i < ret; i++) {
348 			addr = (void *)buf_array[i];
349 			debug("Free: buffer addr =0x%p\n", addr);
350 			free(addr);
351 		}
352 	} while (ret);
353 }
354 
355 static void ldpaa_dpbp_drain(void)
356 {
357 	int i;
358 	for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7)
359 		ldpaa_dpbp_drain_cnt(7);
360 }
361 
362 static int ldpaa_bp_add_7(uint16_t bpid)
363 {
364 	uint64_t buf_array[7];
365 	u8 *addr;
366 	int i;
367 	struct qbman_release_desc rd;
368 
369 	for (i = 0; i < 7; i++) {
370 		addr = memalign(L1_CACHE_BYTES, LDPAA_ETH_RX_BUFFER_SIZE);
371 		if (!addr) {
372 			printf("addr allocation failed\n");
373 			goto err_alloc;
374 		}
375 		memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE);
376 		flush_dcache_range((u64)addr,
377 				   (u64)(addr + LDPAA_ETH_RX_BUFFER_SIZE));
378 
379 		buf_array[i] = (uint64_t)addr;
380 		debug("Release: buffer addr =0x%p\n", addr);
381 	}
382 
383 release_bufs:
384 	/* In case the portal is busy, retry until successful.
385 	 * This function is guaranteed to succeed in a reasonable amount
386 	 * of time.
387 	 */
388 
389 	do {
390 		mdelay(1);
391 		qbman_release_desc_clear(&rd);
392 		qbman_release_desc_set_bpid(&rd, bpid);
393 	} while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i));
394 
395 	return i;
396 
397 err_alloc:
398 	if (i)
399 		goto release_bufs;
400 
401 	return 0;
402 }
403 
404 static int ldpaa_dpbp_seed(uint16_t bpid)
405 {
406 	int i;
407 	int count;
408 
409 	for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) {
410 		count = ldpaa_bp_add_7(bpid);
411 		if (count < 7)
412 			printf("Buffer Seed= %d\n", count);
413 	}
414 
415 	return 0;
416 }
417 
418 static int ldpaa_dpbp_setup(void)
419 {
420 	int err;
421 
422 	err = dpbp_open(dflt_mc_io, dflt_dpbp->dpbp_attr.id,
423 			&dflt_dpbp->dpbp_handle);
424 	if (err) {
425 		printf("dpbp_open() failed\n");
426 		goto err_open;
427 	}
428 
429 	err = dpbp_enable(dflt_mc_io, dflt_dpbp->dpbp_handle);
430 	if (err) {
431 		printf("dpbp_enable() failed\n");
432 		goto err_enable;
433 	}
434 
435 	err = dpbp_get_attributes(dflt_mc_io, dflt_dpbp->dpbp_handle,
436 				  &dflt_dpbp->dpbp_attr);
437 	if (err) {
438 		printf("dpbp_get_attributes() failed\n");
439 		goto err_get_attr;
440 	}
441 
442 	err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid);
443 	if (err) {
444 		printf("Buffer seeding failed for DPBP %d (bpid=%d)\n",
445 		       dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid);
446 		goto err_seed;
447 	}
448 
449 	return 0;
450 
451 err_seed:
452 err_get_attr:
453 	dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
454 err_enable:
455 	dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
456 err_open:
457 	return err;
458 }
459 
460 static void ldpaa_dpbp_free(void)
461 {
462 	ldpaa_dpbp_drain();
463 	dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
464 	dpbp_reset(dflt_mc_io, dflt_dpbp->dpbp_handle);
465 	dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
466 }
467 
468 static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv)
469 {
470 	int err;
471 
472 	/* and get a handle for the DPNI this interface is associate with */
473 	err = dpni_open(dflt_mc_io, priv->dpni_id, &priv->dpni_handle);
474 	if (err) {
475 		printf("dpni_open() failed\n");
476 		goto err_open;
477 	}
478 
479 	err = dpni_get_attributes(dflt_mc_io, priv->dpni_handle,
480 				  &priv->dpni_attrs);
481 	if (err) {
482 		printf("dpni_get_attributes() failed (err=%d)\n", err);
483 		goto err_get_attr;
484 	}
485 
486 	/* Configure our buffers' layout */
487 	priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
488 				   DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
489 				   DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
490 	priv->buf_layout.pass_parser_result = true;
491 	priv->buf_layout.pass_frame_status = true;
492 	priv->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE;
493 	/* ...rx, ... */
494 	err = dpni_set_rx_buffer_layout(dflt_mc_io, priv->dpni_handle,
495 					&priv->buf_layout);
496 	if (err) {
497 		printf("dpni_set_rx_buffer_layout() failed");
498 		goto err_buf_layout;
499 	}
500 
501 	/* ... tx, ... */
502 	priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PARSER_RESULT;
503 	err = dpni_set_tx_buffer_layout(dflt_mc_io, priv->dpni_handle,
504 					&priv->buf_layout);
505 	if (err) {
506 		printf("dpni_set_tx_buffer_layout() failed");
507 		goto err_buf_layout;
508 	}
509 
510 	/* ... tx-confirm. */
511 	priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
512 	err = dpni_set_tx_conf_buffer_layout(dflt_mc_io, priv->dpni_handle,
513 					     &priv->buf_layout);
514 	if (err) {
515 		printf("dpni_set_tx_conf_buffer_layout() failed");
516 		goto err_buf_layout;
517 	}
518 
519 	/* Now that we've set our tx buffer layout, retrieve the minimum
520 	 * required tx data offset.
521 	 */
522 	err = dpni_get_tx_data_offset(dflt_mc_io, priv->dpni_handle,
523 				      &priv->tx_data_offset);
524 	if (err) {
525 		printf("dpni_get_tx_data_offset() failed\n");
526 		goto err_data_offset;
527 	}
528 
529 	/* Warn in case TX data offset is not multiple of 64 bytes. */
530 	WARN_ON(priv->tx_data_offset % 64);
531 
532 	/* Accomodate SWA space. */
533 	priv->tx_data_offset += LDPAA_ETH_SWA_SIZE;
534 	debug("priv->tx_data_offset=%d\n", priv->tx_data_offset);
535 
536 	return 0;
537 
538 err_data_offset:
539 err_buf_layout:
540 err_get_attr:
541 	dpni_close(dflt_mc_io, priv->dpni_handle);
542 err_open:
543 	return err;
544 }
545 
546 static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
547 {
548 	struct dpni_pools_cfg pools_params;
549 	struct dpni_tx_flow_cfg dflt_tx_flow;
550 	int err = 0;
551 
552 	pools_params.num_dpbp = 1;
553 	pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id;
554 	pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE;
555 	err = dpni_set_pools(dflt_mc_io, priv->dpni_handle, &pools_params);
556 	if (err) {
557 		printf("dpni_set_pools() failed\n");
558 		return err;
559 	}
560 
561 	priv->tx_flow_id = DPNI_NEW_FLOW_ID;
562 	memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
563 
564 	dflt_tx_flow.options = DPNI_TX_FLOW_OPT_ONLY_TX_ERROR;
565 	dflt_tx_flow.conf_err_cfg.use_default_queue = 0;
566 	dflt_tx_flow.conf_err_cfg.errors_only = 1;
567 	err = dpni_set_tx_flow(dflt_mc_io, priv->dpni_handle,
568 			       &priv->tx_flow_id, &dflt_tx_flow);
569 	if (err) {
570 		printf("dpni_set_tx_flow() failed\n");
571 		return err;
572 	}
573 
574 	return 0;
575 }
576 
577 static int ldpaa_eth_netdev_init(struct eth_device *net_dev)
578 {
579 	int err;
580 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
581 
582 	sprintf(net_dev->name, "DPNI%d", priv->dpni_id);
583 
584 	net_dev->iobase = 0;
585 	net_dev->init = ldpaa_eth_open;
586 	net_dev->halt = ldpaa_eth_stop;
587 	net_dev->send = ldpaa_eth_tx;
588 	net_dev->recv = ldpaa_eth_pull_dequeue_rx;
589 /*
590 	TODO: PHY MDIO information
591 	priv->bus = info->bus;
592 	priv->phyaddr = info->phy_addr;
593 	priv->enet_if = info->enet_if;
594 */
595 
596 	if (init_phy(net_dev))
597 		return 0;
598 
599 	err = eth_register(net_dev);
600 	if (err < 0) {
601 		printf("eth_register() = %d\n", err);
602 		return err;
603 	}
604 
605 	return 0;
606 }
607 
608 int ldpaa_eth_init(struct dprc_obj_desc obj_desc)
609 {
610 	struct eth_device		*net_dev = NULL;
611 	struct ldpaa_eth_priv		*priv = NULL;
612 	int				err = 0;
613 
614 
615 	/* Net device */
616 	net_dev = (struct eth_device *)malloc(sizeof(struct eth_device));
617 	if (!net_dev) {
618 		printf("eth_device malloc() failed\n");
619 		return -ENOMEM;
620 	}
621 	memset(net_dev, 0, sizeof(struct eth_device));
622 
623 	/* alloc the ldpaa ethernet private struct */
624 	priv = (struct ldpaa_eth_priv *)malloc(sizeof(struct ldpaa_eth_priv));
625 	if (!priv) {
626 		printf("ldpaa_eth_priv malloc() failed\n");
627 		return -ENOMEM;
628 	}
629 	memset(priv, 0, sizeof(struct ldpaa_eth_priv));
630 
631 	net_dev->priv = (void *)priv;
632 	priv->net_dev = (struct eth_device *)net_dev;
633 	priv->dpni_id = obj_desc.id;
634 
635 	err = ldpaa_eth_netdev_init(net_dev);
636 	if (err)
637 		goto err_netdev_init;
638 
639 	debug("ldpaa ethernet: Probed interface %s\n", net_dev->name);
640 	return 0;
641 
642 err_netdev_init:
643 	free(priv);
644 	net_dev->priv = NULL;
645 	free(net_dev);
646 
647 	return err;
648 }
649