1 // SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
2 
3 /* Gigabit Ethernet driver for Mellanox BlueField SoC
4  *
5  * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
6  */
7 
8 #include <linux/acpi.h>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/phy.h>
16 #include <linux/platform_device.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/skbuff.h>
19 
20 #include "mlxbf_gige.h"
21 #include "mlxbf_gige_regs.h"
22 
23 /* Allocate SKB whose payload pointer aligns with the Bluefield
24  * hardware DMA limitation, i.e. DMA operation can't cross
25  * a 4KB boundary.  A maximum packet size of 2KB is assumed in the
26  * alignment formula.  The alignment logic overallocates an SKB,
27  * and then adjusts the headroom so that the SKB data pointer is
28  * naturally aligned to a 2KB boundary.
29  */
mlxbf_gige_alloc_skb(struct mlxbf_gige * priv,unsigned int map_len,dma_addr_t * buf_dma,enum dma_data_direction dir)30 struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
31 				     unsigned int map_len,
32 				     dma_addr_t *buf_dma,
33 				     enum dma_data_direction dir)
34 {
35 	struct sk_buff *skb;
36 	u64 addr, offset;
37 
38 	/* Overallocate the SKB so that any headroom adjustment (to
39 	 * provide 2KB natural alignment) does not exceed payload area
40 	 */
41 	skb = netdev_alloc_skb(priv->netdev, MLXBF_GIGE_DEFAULT_BUF_SZ * 2);
42 	if (!skb)
43 		return NULL;
44 
45 	/* Adjust the headroom so that skb->data is naturally aligned to
46 	 * a 2KB boundary, which is the maximum packet size supported.
47 	 */
48 	addr = (long)skb->data;
49 	offset = (addr + MLXBF_GIGE_DEFAULT_BUF_SZ - 1) &
50 		~(MLXBF_GIGE_DEFAULT_BUF_SZ - 1);
51 	offset -= addr;
52 	if (offset)
53 		skb_reserve(skb, offset);
54 
55 	/* Return streaming DMA mapping to caller */
56 	*buf_dma = dma_map_single(priv->dev, skb->data, map_len, dir);
57 	if (dma_mapping_error(priv->dev, *buf_dma)) {
58 		dev_kfree_skb(skb);
59 		*buf_dma = (dma_addr_t)0;
60 		return NULL;
61 	}
62 
63 	return skb;
64 }
65 
mlxbf_gige_initial_mac(struct mlxbf_gige * priv)66 static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
67 {
68 	u8 mac[ETH_ALEN];
69 	u64 local_mac;
70 
71 	eth_zero_addr(mac);
72 	mlxbf_gige_get_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
73 				     &local_mac);
74 	u64_to_ether_addr(local_mac, mac);
75 
76 	if (is_valid_ether_addr(mac)) {
77 		eth_hw_addr_set(priv->netdev, mac);
78 	} else {
79 		/* Provide a random MAC if for some reason the device has
80 		 * not been configured with a valid MAC address already.
81 		 */
82 		eth_hw_addr_random(priv->netdev);
83 	}
84 
85 	local_mac = ether_addr_to_u64(priv->netdev->dev_addr);
86 	mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
87 				     local_mac);
88 }
89 
mlxbf_gige_cache_stats(struct mlxbf_gige * priv)90 static void mlxbf_gige_cache_stats(struct mlxbf_gige *priv)
91 {
92 	struct mlxbf_gige_stats *p;
93 
94 	/* Cache stats that will be cleared by clean port operation */
95 	p = &priv->stats;
96 	p->rx_din_dropped_pkts += readq(priv->base +
97 					MLXBF_GIGE_RX_DIN_DROP_COUNTER);
98 	p->rx_filter_passed_pkts += readq(priv->base +
99 					  MLXBF_GIGE_RX_PASS_COUNTER_ALL);
100 	p->rx_filter_discard_pkts += readq(priv->base +
101 					   MLXBF_GIGE_RX_DISC_COUNTER_ALL);
102 }
103 
mlxbf_gige_clean_port(struct mlxbf_gige * priv)104 static int mlxbf_gige_clean_port(struct mlxbf_gige *priv)
105 {
106 	u64 control;
107 	u64 temp;
108 	int err;
109 
110 	/* Set the CLEAN_PORT_EN bit to trigger SW reset */
111 	control = readq(priv->base + MLXBF_GIGE_CONTROL);
112 	control |= MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
113 	writeq(control, priv->base + MLXBF_GIGE_CONTROL);
114 
115 	/* Ensure completion of "clean port" write before polling status */
116 	mb();
117 
118 	err = readq_poll_timeout_atomic(priv->base + MLXBF_GIGE_STATUS, temp,
119 					(temp & MLXBF_GIGE_STATUS_READY),
120 					100, 100000);
121 
122 	/* Clear the CLEAN_PORT_EN bit at end of this loop */
123 	control = readq(priv->base + MLXBF_GIGE_CONTROL);
124 	control &= ~MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
125 	writeq(control, priv->base + MLXBF_GIGE_CONTROL);
126 
127 	return err;
128 }
129 
mlxbf_gige_open(struct net_device * netdev)130 static int mlxbf_gige_open(struct net_device *netdev)
131 {
132 	struct mlxbf_gige *priv = netdev_priv(netdev);
133 	struct phy_device *phydev = netdev->phydev;
134 	u64 control;
135 	u64 int_en;
136 	int err;
137 
138 	/* Perform general init of GigE block */
139 	control = readq(priv->base + MLXBF_GIGE_CONTROL);
140 	control |= MLXBF_GIGE_CONTROL_PORT_EN;
141 	writeq(control, priv->base + MLXBF_GIGE_CONTROL);
142 
143 	mlxbf_gige_cache_stats(priv);
144 	err = mlxbf_gige_clean_port(priv);
145 	if (err)
146 		return err;
147 
148 	/* Clear driver's valid_polarity to match hardware,
149 	 * since the above call to clean_port() resets the
150 	 * receive polarity used by hardware.
151 	 */
152 	priv->valid_polarity = 0;
153 
154 	phy_start(phydev);
155 
156 	err = mlxbf_gige_tx_init(priv);
157 	if (err)
158 		goto phy_deinit;
159 	err = mlxbf_gige_rx_init(priv);
160 	if (err)
161 		goto tx_deinit;
162 
163 	netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
164 	napi_enable(&priv->napi);
165 	netif_start_queue(netdev);
166 
167 	err = mlxbf_gige_request_irqs(priv);
168 	if (err)
169 		goto napi_deinit;
170 
171 	/* Set bits in INT_EN that we care about */
172 	int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
173 		 MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
174 		 MLXBF_GIGE_INT_EN_TX_SMALL_FRAME_SIZE |
175 		 MLXBF_GIGE_INT_EN_TX_PI_CI_EXCEED_WQ_SIZE |
176 		 MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR |
177 		 MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR |
178 		 MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET;
179 
180 	/* Ensure completion of all initialization before enabling interrupts */
181 	mb();
182 
183 	writeq(int_en, priv->base + MLXBF_GIGE_INT_EN);
184 
185 	return 0;
186 
187 napi_deinit:
188 	netif_stop_queue(netdev);
189 	napi_disable(&priv->napi);
190 	netif_napi_del(&priv->napi);
191 	mlxbf_gige_rx_deinit(priv);
192 
193 tx_deinit:
194 	mlxbf_gige_tx_deinit(priv);
195 
196 phy_deinit:
197 	phy_stop(phydev);
198 	return err;
199 }
200 
mlxbf_gige_stop(struct net_device * netdev)201 static int mlxbf_gige_stop(struct net_device *netdev)
202 {
203 	struct mlxbf_gige *priv = netdev_priv(netdev);
204 
205 	writeq(0, priv->base + MLXBF_GIGE_INT_EN);
206 	netif_stop_queue(netdev);
207 	napi_disable(&priv->napi);
208 	netif_napi_del(&priv->napi);
209 	mlxbf_gige_free_irqs(priv);
210 
211 	phy_stop(netdev->phydev);
212 
213 	mlxbf_gige_rx_deinit(priv);
214 	mlxbf_gige_tx_deinit(priv);
215 	mlxbf_gige_cache_stats(priv);
216 	mlxbf_gige_clean_port(priv);
217 
218 	return 0;
219 }
220 
mlxbf_gige_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)221 static int mlxbf_gige_eth_ioctl(struct net_device *netdev,
222 				struct ifreq *ifr, int cmd)
223 {
224 	if (!(netif_running(netdev)))
225 		return -EINVAL;
226 
227 	return phy_mii_ioctl(netdev->phydev, ifr, cmd);
228 }
229 
mlxbf_gige_set_rx_mode(struct net_device * netdev)230 static void mlxbf_gige_set_rx_mode(struct net_device *netdev)
231 {
232 	struct mlxbf_gige *priv = netdev_priv(netdev);
233 	bool new_promisc_enabled;
234 
235 	new_promisc_enabled = netdev->flags & IFF_PROMISC;
236 
237 	/* Only write to the hardware registers if the new setting
238 	 * of promiscuous mode is different from the current one.
239 	 */
240 	if (new_promisc_enabled != priv->promisc_enabled) {
241 		priv->promisc_enabled = new_promisc_enabled;
242 
243 		if (new_promisc_enabled)
244 			mlxbf_gige_enable_promisc(priv);
245 		else
246 			mlxbf_gige_disable_promisc(priv);
247 	}
248 }
249 
mlxbf_gige_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)250 static void mlxbf_gige_get_stats64(struct net_device *netdev,
251 				   struct rtnl_link_stats64 *stats)
252 {
253 	struct mlxbf_gige *priv = netdev_priv(netdev);
254 
255 	netdev_stats_to_stats64(stats, &netdev->stats);
256 
257 	stats->rx_length_errors = priv->stats.rx_truncate_errors;
258 	stats->rx_fifo_errors = priv->stats.rx_din_dropped_pkts +
259 				readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER);
260 	stats->rx_crc_errors = priv->stats.rx_mac_errors;
261 	stats->rx_errors = stats->rx_length_errors +
262 			   stats->rx_fifo_errors +
263 			   stats->rx_crc_errors;
264 
265 	stats->tx_fifo_errors = priv->stats.tx_fifo_full;
266 	stats->tx_errors = stats->tx_fifo_errors;
267 }
268 
269 static const struct net_device_ops mlxbf_gige_netdev_ops = {
270 	.ndo_open		= mlxbf_gige_open,
271 	.ndo_stop		= mlxbf_gige_stop,
272 	.ndo_start_xmit		= mlxbf_gige_start_xmit,
273 	.ndo_set_mac_address	= eth_mac_addr,
274 	.ndo_validate_addr	= eth_validate_addr,
275 	.ndo_eth_ioctl		= mlxbf_gige_eth_ioctl,
276 	.ndo_set_rx_mode        = mlxbf_gige_set_rx_mode,
277 	.ndo_get_stats64        = mlxbf_gige_get_stats64,
278 };
279 
mlxbf_gige_bf2_adjust_link(struct net_device * netdev)280 static void mlxbf_gige_bf2_adjust_link(struct net_device *netdev)
281 {
282 	struct phy_device *phydev = netdev->phydev;
283 
284 	phy_print_status(phydev);
285 }
286 
mlxbf_gige_bf3_adjust_link(struct net_device * netdev)287 static void mlxbf_gige_bf3_adjust_link(struct net_device *netdev)
288 {
289 	struct mlxbf_gige *priv = netdev_priv(netdev);
290 	struct phy_device *phydev = netdev->phydev;
291 	u8 sgmii_mode;
292 	u16 ipg_size;
293 	u32 val;
294 
295 	if (phydev->link && phydev->speed != priv->prev_speed) {
296 		switch (phydev->speed) {
297 		case 1000:
298 			ipg_size = MLXBF_GIGE_1G_IPG_SIZE;
299 			sgmii_mode = MLXBF_GIGE_1G_SGMII_MODE;
300 			break;
301 		case 100:
302 			ipg_size = MLXBF_GIGE_100M_IPG_SIZE;
303 			sgmii_mode = MLXBF_GIGE_100M_SGMII_MODE;
304 			break;
305 		case 10:
306 			ipg_size = MLXBF_GIGE_10M_IPG_SIZE;
307 			sgmii_mode = MLXBF_GIGE_10M_SGMII_MODE;
308 			break;
309 		default:
310 			return;
311 		}
312 
313 		val = readl(priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
314 		val &= ~(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK | MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK);
315 		val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK, ipg_size);
316 		val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK, sgmii_mode);
317 		writel(val, priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
318 
319 		val = readl(priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
320 		val &= ~MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK;
321 		val |= FIELD_PREP(MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK, sgmii_mode);
322 		writel(val, priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
323 
324 		priv->prev_speed = phydev->speed;
325 	}
326 
327 	phy_print_status(phydev);
328 }
329 
mlxbf_gige_bf2_set_phy_link_mode(struct phy_device * phydev)330 static void mlxbf_gige_bf2_set_phy_link_mode(struct phy_device *phydev)
331 {
332 	/* MAC only supports 1000T full duplex mode */
333 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
334 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
335 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
336 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
337 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
338 
339 	/* Only symmetric pause with flow control enabled is supported so no
340 	 * need to negotiate pause.
341 	 */
342 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
343 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
344 }
345 
mlxbf_gige_bf3_set_phy_link_mode(struct phy_device * phydev)346 static void mlxbf_gige_bf3_set_phy_link_mode(struct phy_device *phydev)
347 {
348 	/* MAC only supports full duplex mode */
349 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
350 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
351 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
352 
353 	/* Only symmetric pause with flow control enabled is supported so no
354 	 * need to negotiate pause.
355 	 */
356 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
357 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
358 }
359 
360 static struct mlxbf_gige_link_cfg mlxbf_gige_link_cfgs[] = {
361 	[MLXBF_GIGE_VERSION_BF2] = {
362 		.set_phy_link_mode = mlxbf_gige_bf2_set_phy_link_mode,
363 		.adjust_link = mlxbf_gige_bf2_adjust_link,
364 		.phy_mode = PHY_INTERFACE_MODE_GMII
365 	},
366 	[MLXBF_GIGE_VERSION_BF3] = {
367 		.set_phy_link_mode = mlxbf_gige_bf3_set_phy_link_mode,
368 		.adjust_link = mlxbf_gige_bf3_adjust_link,
369 		.phy_mode = PHY_INTERFACE_MODE_SGMII
370 	}
371 };
372 
mlxbf_gige_probe(struct platform_device * pdev)373 static int mlxbf_gige_probe(struct platform_device *pdev)
374 {
375 	struct phy_device *phydev;
376 	struct net_device *netdev;
377 	struct mlxbf_gige *priv;
378 	void __iomem *llu_base;
379 	void __iomem *plu_base;
380 	void __iomem *base;
381 	int addr, phy_irq;
382 	int err;
383 
384 	base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
385 	if (IS_ERR(base))
386 		return PTR_ERR(base);
387 
388 	llu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_LLU);
389 	if (IS_ERR(llu_base))
390 		return PTR_ERR(llu_base);
391 
392 	plu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_PLU);
393 	if (IS_ERR(plu_base))
394 		return PTR_ERR(plu_base);
395 
396 	netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
397 	if (!netdev)
398 		return -ENOMEM;
399 
400 	SET_NETDEV_DEV(netdev, &pdev->dev);
401 	netdev->netdev_ops = &mlxbf_gige_netdev_ops;
402 	netdev->ethtool_ops = &mlxbf_gige_ethtool_ops;
403 	priv = netdev_priv(netdev);
404 	priv->netdev = netdev;
405 
406 	platform_set_drvdata(pdev, priv);
407 	priv->dev = &pdev->dev;
408 	priv->pdev = pdev;
409 
410 	spin_lock_init(&priv->lock);
411 
412 	priv->hw_version = readq(base + MLXBF_GIGE_VERSION);
413 
414 	/* Attach MDIO device */
415 	err = mlxbf_gige_mdio_probe(pdev, priv);
416 	if (err)
417 		return err;
418 
419 	priv->base = base;
420 	priv->llu_base = llu_base;
421 	priv->plu_base = plu_base;
422 
423 	priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
424 	priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
425 
426 	/* Write initial MAC address to hardware */
427 	mlxbf_gige_initial_mac(priv);
428 
429 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
430 	if (err) {
431 		dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
432 		goto out;
433 	}
434 
435 	priv->error_irq = platform_get_irq(pdev, MLXBF_GIGE_ERROR_INTR_IDX);
436 	priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
437 	priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
438 
439 	phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy-gpios", 0);
440 	if (phy_irq < 0) {
441 		dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
442 		phy_irq = PHY_POLL;
443 	}
444 
445 	phydev = phy_find_first(priv->mdiobus);
446 	if (!phydev) {
447 		err = -ENODEV;
448 		goto out;
449 	}
450 
451 	addr = phydev->mdio.addr;
452 	priv->mdiobus->irq[addr] = phy_irq;
453 	phydev->irq = phy_irq;
454 
455 	err = phy_connect_direct(netdev, phydev,
456 				 mlxbf_gige_link_cfgs[priv->hw_version].adjust_link,
457 				 mlxbf_gige_link_cfgs[priv->hw_version].phy_mode);
458 	if (err) {
459 		dev_err(&pdev->dev, "Could not attach to PHY\n");
460 		goto out;
461 	}
462 
463 	mlxbf_gige_link_cfgs[priv->hw_version].set_phy_link_mode(phydev);
464 
465 	/* Display information about attached PHY device */
466 	phy_attached_info(phydev);
467 
468 	err = register_netdev(netdev);
469 	if (err) {
470 		dev_err(&pdev->dev, "Failed to register netdev\n");
471 		phy_disconnect(phydev);
472 		goto out;
473 	}
474 
475 	return 0;
476 
477 out:
478 	mlxbf_gige_mdio_remove(priv);
479 	return err;
480 }
481 
mlxbf_gige_remove(struct platform_device * pdev)482 static int mlxbf_gige_remove(struct platform_device *pdev)
483 {
484 	struct mlxbf_gige *priv = platform_get_drvdata(pdev);
485 
486 	unregister_netdev(priv->netdev);
487 	phy_disconnect(priv->netdev->phydev);
488 	mlxbf_gige_mdio_remove(priv);
489 	platform_set_drvdata(pdev, NULL);
490 
491 	return 0;
492 }
493 
mlxbf_gige_shutdown(struct platform_device * pdev)494 static void mlxbf_gige_shutdown(struct platform_device *pdev)
495 {
496 	struct mlxbf_gige *priv = platform_get_drvdata(pdev);
497 
498 	rtnl_lock();
499 	netif_device_detach(priv->netdev);
500 
501 	if (netif_running(priv->netdev))
502 		dev_close(priv->netdev);
503 
504 	rtnl_unlock();
505 }
506 
507 static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
508 	{ "MLNXBF17", 0 },
509 	{},
510 };
511 MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
512 
513 static struct platform_driver mlxbf_gige_driver = {
514 	.probe = mlxbf_gige_probe,
515 	.remove = mlxbf_gige_remove,
516 	.shutdown = mlxbf_gige_shutdown,
517 	.driver = {
518 		.name = KBUILD_MODNAME,
519 		.acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
520 	},
521 };
522 
523 module_platform_driver(mlxbf_gige_driver);
524 
525 MODULE_DESCRIPTION("Mellanox BlueField SoC Gigabit Ethernet Driver");
526 MODULE_AUTHOR("David Thompson <davthompson@nvidia.com>");
527 MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
528 MODULE_LICENSE("Dual BSD/GPL");
529