1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 10G controller driver for Samsung SoCs 3 * 4 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com 6 * 7 * Author: Siva Reddy Kallam <siva.kallam@samsung.com> 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/clk.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/netdevice.h> 16 #include <linux/net_tstamp.h> 17 #include <linux/phy.h> 18 #include <linux/ptp_clock_kernel.h> 19 20 #include "sxgbe_common.h" 21 #include "sxgbe_reg.h" 22 #include "sxgbe_dma.h" 23 24 struct sxgbe_stats { 25 char stat_string[ETH_GSTRING_LEN]; 26 int sizeof_stat; 27 int stat_offset; 28 }; 29 30 #define SXGBE_STAT(m) \ 31 { \ 32 #m, \ 33 sizeof_field(struct sxgbe_extra_stats, m), \ 34 offsetof(struct sxgbe_priv_data, xstats.m) \ 35 } 36 37 static const struct sxgbe_stats sxgbe_gstrings_stats[] = { 38 /* TX/RX IRQ events */ 39 SXGBE_STAT(tx_process_stopped_irq), 40 SXGBE_STAT(tx_ctxt_desc_err), 41 SXGBE_STAT(tx_threshold), 42 SXGBE_STAT(rx_threshold), 43 SXGBE_STAT(tx_pkt_n), 44 SXGBE_STAT(rx_pkt_n), 45 SXGBE_STAT(normal_irq_n), 46 SXGBE_STAT(tx_normal_irq_n), 47 SXGBE_STAT(rx_normal_irq_n), 48 SXGBE_STAT(napi_poll), 49 SXGBE_STAT(tx_clean), 50 SXGBE_STAT(tx_reset_ic_bit), 51 SXGBE_STAT(rx_process_stopped_irq), 52 SXGBE_STAT(rx_underflow_irq), 53 54 /* Bus access errors */ 55 SXGBE_STAT(fatal_bus_error_irq), 56 SXGBE_STAT(tx_read_transfer_err), 57 SXGBE_STAT(tx_write_transfer_err), 58 SXGBE_STAT(tx_desc_access_err), 59 SXGBE_STAT(tx_buffer_access_err), 60 SXGBE_STAT(tx_data_transfer_err), 61 SXGBE_STAT(rx_read_transfer_err), 62 SXGBE_STAT(rx_write_transfer_err), 63 SXGBE_STAT(rx_desc_access_err), 64 SXGBE_STAT(rx_buffer_access_err), 65 SXGBE_STAT(rx_data_transfer_err), 66 67 /* EEE-LPI stats */ 68 SXGBE_STAT(tx_lpi_entry_n), 69 SXGBE_STAT(tx_lpi_exit_n), 70 SXGBE_STAT(rx_lpi_entry_n), 71 SXGBE_STAT(rx_lpi_exit_n), 72 SXGBE_STAT(eee_wakeup_error_n), 73 74 /* RX specific */ 75 /* L2 error */ 76 SXGBE_STAT(rx_code_gmii_err), 77 SXGBE_STAT(rx_watchdog_err), 78 SXGBE_STAT(rx_crc_err), 79 SXGBE_STAT(rx_gaint_pkt_err), 80 SXGBE_STAT(ip_hdr_err), 81 SXGBE_STAT(ip_payload_err), 82 SXGBE_STAT(overflow_error), 83 84 /* L2 Pkt type */ 85 SXGBE_STAT(len_pkt), 86 SXGBE_STAT(mac_ctl_pkt), 87 SXGBE_STAT(dcb_ctl_pkt), 88 SXGBE_STAT(arp_pkt), 89 SXGBE_STAT(oam_pkt), 90 SXGBE_STAT(untag_okt), 91 SXGBE_STAT(other_pkt), 92 SXGBE_STAT(svlan_tag_pkt), 93 SXGBE_STAT(cvlan_tag_pkt), 94 SXGBE_STAT(dvlan_ocvlan_icvlan_pkt), 95 SXGBE_STAT(dvlan_osvlan_isvlan_pkt), 96 SXGBE_STAT(dvlan_osvlan_icvlan_pkt), 97 SXGBE_STAT(dvan_ocvlan_icvlan_pkt), 98 99 /* L3/L4 Pkt type */ 100 SXGBE_STAT(not_ip_pkt), 101 SXGBE_STAT(ip4_tcp_pkt), 102 SXGBE_STAT(ip4_udp_pkt), 103 SXGBE_STAT(ip4_icmp_pkt), 104 SXGBE_STAT(ip4_unknown_pkt), 105 SXGBE_STAT(ip6_tcp_pkt), 106 SXGBE_STAT(ip6_udp_pkt), 107 SXGBE_STAT(ip6_icmp_pkt), 108 SXGBE_STAT(ip6_unknown_pkt), 109 110 /* Filter specific */ 111 SXGBE_STAT(vlan_filter_match), 112 SXGBE_STAT(sa_filter_fail), 113 SXGBE_STAT(da_filter_fail), 114 SXGBE_STAT(hash_filter_pass), 115 SXGBE_STAT(l3_filter_match), 116 SXGBE_STAT(l4_filter_match), 117 118 /* RX context specific */ 119 SXGBE_STAT(timestamp_dropped), 120 SXGBE_STAT(rx_msg_type_no_ptp), 121 SXGBE_STAT(rx_ptp_type_sync), 122 SXGBE_STAT(rx_ptp_type_follow_up), 123 SXGBE_STAT(rx_ptp_type_delay_req), 124 SXGBE_STAT(rx_ptp_type_delay_resp), 125 SXGBE_STAT(rx_ptp_type_pdelay_req), 126 SXGBE_STAT(rx_ptp_type_pdelay_resp), 127 SXGBE_STAT(rx_ptp_type_pdelay_follow_up), 128 SXGBE_STAT(rx_ptp_announce), 129 SXGBE_STAT(rx_ptp_mgmt), 130 SXGBE_STAT(rx_ptp_signal), 131 SXGBE_STAT(rx_ptp_resv_msg_type), 132 }; 133 #define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats) 134 135 static int sxgbe_get_eee(struct net_device *dev, 136 struct ethtool_eee *edata) 137 { 138 struct sxgbe_priv_data *priv = netdev_priv(dev); 139 140 if (!priv->hw_cap.eee) 141 return -EOPNOTSUPP; 142 143 edata->eee_enabled = priv->eee_enabled; 144 edata->eee_active = priv->eee_active; 145 edata->tx_lpi_timer = priv->tx_lpi_timer; 146 147 return phy_ethtool_get_eee(dev->phydev, edata); 148 } 149 150 static int sxgbe_set_eee(struct net_device *dev, 151 struct ethtool_eee *edata) 152 { 153 struct sxgbe_priv_data *priv = netdev_priv(dev); 154 155 priv->eee_enabled = edata->eee_enabled; 156 157 if (!priv->eee_enabled) { 158 sxgbe_disable_eee_mode(priv); 159 } else { 160 /* We are asking for enabling the EEE but it is safe 161 * to verify all by invoking the eee_init function. 162 * In case of failure it will return an error. 163 */ 164 priv->eee_enabled = sxgbe_eee_init(priv); 165 if (!priv->eee_enabled) 166 return -EOPNOTSUPP; 167 168 /* Do not change tx_lpi_timer in case of failure */ 169 priv->tx_lpi_timer = edata->tx_lpi_timer; 170 } 171 172 return phy_ethtool_set_eee(dev->phydev, edata); 173 } 174 175 static void sxgbe_getdrvinfo(struct net_device *dev, 176 struct ethtool_drvinfo *info) 177 { 178 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 179 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 180 } 181 182 static u32 sxgbe_getmsglevel(struct net_device *dev) 183 { 184 struct sxgbe_priv_data *priv = netdev_priv(dev); 185 return priv->msg_enable; 186 } 187 188 static void sxgbe_setmsglevel(struct net_device *dev, u32 level) 189 { 190 struct sxgbe_priv_data *priv = netdev_priv(dev); 191 priv->msg_enable = level; 192 } 193 194 static void sxgbe_get_strings(struct net_device *dev, u32 stringset, u8 *data) 195 { 196 int i; 197 u8 *p = data; 198 199 switch (stringset) { 200 case ETH_SS_STATS: 201 for (i = 0; i < SXGBE_STATS_LEN; i++) { 202 memcpy(p, sxgbe_gstrings_stats[i].stat_string, 203 ETH_GSTRING_LEN); 204 p += ETH_GSTRING_LEN; 205 } 206 break; 207 default: 208 WARN_ON(1); 209 break; 210 } 211 } 212 213 static int sxgbe_get_sset_count(struct net_device *netdev, int sset) 214 { 215 int len; 216 217 switch (sset) { 218 case ETH_SS_STATS: 219 len = SXGBE_STATS_LEN; 220 return len; 221 default: 222 return -EINVAL; 223 } 224 } 225 226 static void sxgbe_get_ethtool_stats(struct net_device *dev, 227 struct ethtool_stats *dummy, u64 *data) 228 { 229 struct sxgbe_priv_data *priv = netdev_priv(dev); 230 int i; 231 char *p; 232 233 if (priv->eee_enabled) { 234 int val = phy_get_eee_err(dev->phydev); 235 236 if (val) 237 priv->xstats.eee_wakeup_error_n = val; 238 } 239 240 for (i = 0; i < SXGBE_STATS_LEN; i++) { 241 p = (char *)priv + sxgbe_gstrings_stats[i].stat_offset; 242 data[i] = (sxgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) 243 ? (*(u64 *)p) : (*(u32 *)p); 244 } 245 } 246 247 static void sxgbe_get_channels(struct net_device *dev, 248 struct ethtool_channels *channel) 249 { 250 channel->max_rx = SXGBE_MAX_RX_CHANNELS; 251 channel->max_tx = SXGBE_MAX_TX_CHANNELS; 252 channel->rx_count = SXGBE_RX_QUEUES; 253 channel->tx_count = SXGBE_TX_QUEUES; 254 } 255 256 static u32 sxgbe_riwt2usec(u32 riwt, struct sxgbe_priv_data *priv) 257 { 258 unsigned long clk = clk_get_rate(priv->sxgbe_clk); 259 260 if (!clk) 261 return 0; 262 263 return (riwt * 256) / (clk / 1000000); 264 } 265 266 static u32 sxgbe_usec2riwt(u32 usec, struct sxgbe_priv_data *priv) 267 { 268 unsigned long clk = clk_get_rate(priv->sxgbe_clk); 269 270 if (!clk) 271 return 0; 272 273 return (usec * (clk / 1000000)) / 256; 274 } 275 276 static int sxgbe_get_coalesce(struct net_device *dev, 277 struct ethtool_coalesce *ec, 278 struct kernel_ethtool_coalesce *kernel_coal, 279 struct netlink_ext_ack *extack) 280 { 281 struct sxgbe_priv_data *priv = netdev_priv(dev); 282 283 if (priv->use_riwt) 284 ec->rx_coalesce_usecs = sxgbe_riwt2usec(priv->rx_riwt, priv); 285 286 return 0; 287 } 288 289 static int sxgbe_set_coalesce(struct net_device *dev, 290 struct ethtool_coalesce *ec, 291 struct kernel_ethtool_coalesce *kernel_coal, 292 struct netlink_ext_ack *extack) 293 { 294 struct sxgbe_priv_data *priv = netdev_priv(dev); 295 unsigned int rx_riwt; 296 297 if (!ec->rx_coalesce_usecs) 298 return -EINVAL; 299 300 rx_riwt = sxgbe_usec2riwt(ec->rx_coalesce_usecs, priv); 301 302 if ((rx_riwt > SXGBE_MAX_DMA_RIWT) || (rx_riwt < SXGBE_MIN_DMA_RIWT)) 303 return -EINVAL; 304 else if (!priv->use_riwt) 305 return -EOPNOTSUPP; 306 307 priv->rx_riwt = rx_riwt; 308 priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt); 309 310 return 0; 311 } 312 313 static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv, 314 struct ethtool_rxnfc *cmd) 315 { 316 cmd->data = 0; 317 318 /* Report default options for RSS on sxgbe */ 319 switch (cmd->flow_type) { 320 case TCP_V4_FLOW: 321 case UDP_V4_FLOW: 322 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 323 fallthrough; 324 case SCTP_V4_FLOW: 325 case AH_ESP_V4_FLOW: 326 case AH_V4_FLOW: 327 case ESP_V4_FLOW: 328 case IPV4_FLOW: 329 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 330 break; 331 case TCP_V6_FLOW: 332 case UDP_V6_FLOW: 333 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 334 fallthrough; 335 case SCTP_V6_FLOW: 336 case AH_ESP_V6_FLOW: 337 case AH_V6_FLOW: 338 case ESP_V6_FLOW: 339 case IPV6_FLOW: 340 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 341 break; 342 default: 343 return -EINVAL; 344 } 345 346 return 0; 347 } 348 349 static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 350 u32 *rule_locs) 351 { 352 struct sxgbe_priv_data *priv = netdev_priv(dev); 353 int ret = -EOPNOTSUPP; 354 355 switch (cmd->cmd) { 356 case ETHTOOL_GRXFH: 357 ret = sxgbe_get_rss_hash_opts(priv, cmd); 358 break; 359 default: 360 break; 361 } 362 363 return ret; 364 } 365 366 static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv, 367 struct ethtool_rxnfc *cmd) 368 { 369 u32 reg_val = 0; 370 371 /* RSS does not support anything other than hashing 372 * to queues on src and dst IPs and ports 373 */ 374 if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | 375 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 376 return -EINVAL; 377 378 switch (cmd->flow_type) { 379 case TCP_V4_FLOW: 380 case TCP_V6_FLOW: 381 if (!(cmd->data & RXH_IP_SRC) || 382 !(cmd->data & RXH_IP_DST) || 383 !(cmd->data & RXH_L4_B_0_1) || 384 !(cmd->data & RXH_L4_B_2_3)) 385 return -EINVAL; 386 reg_val = SXGBE_CORE_RSS_CTL_TCP4TE; 387 break; 388 case UDP_V4_FLOW: 389 case UDP_V6_FLOW: 390 if (!(cmd->data & RXH_IP_SRC) || 391 !(cmd->data & RXH_IP_DST) || 392 !(cmd->data & RXH_L4_B_0_1) || 393 !(cmd->data & RXH_L4_B_2_3)) 394 return -EINVAL; 395 reg_val = SXGBE_CORE_RSS_CTL_UDP4TE; 396 break; 397 case SCTP_V4_FLOW: 398 case AH_ESP_V4_FLOW: 399 case AH_V4_FLOW: 400 case ESP_V4_FLOW: 401 case AH_ESP_V6_FLOW: 402 case AH_V6_FLOW: 403 case ESP_V6_FLOW: 404 case SCTP_V6_FLOW: 405 case IPV4_FLOW: 406 case IPV6_FLOW: 407 if (!(cmd->data & RXH_IP_SRC) || 408 !(cmd->data & RXH_IP_DST) || 409 (cmd->data & RXH_L4_B_0_1) || 410 (cmd->data & RXH_L4_B_2_3)) 411 return -EINVAL; 412 reg_val = SXGBE_CORE_RSS_CTL_IP2TE; 413 break; 414 default: 415 return -EINVAL; 416 } 417 418 /* Read SXGBE RSS control register and update */ 419 reg_val |= readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG); 420 writel(reg_val, priv->ioaddr + SXGBE_CORE_RSS_CTL_REG); 421 readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG); 422 423 return 0; 424 } 425 426 static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 427 { 428 struct sxgbe_priv_data *priv = netdev_priv(dev); 429 int ret = -EOPNOTSUPP; 430 431 switch (cmd->cmd) { 432 case ETHTOOL_SRXFH: 433 ret = sxgbe_set_rss_hash_opt(priv, cmd); 434 break; 435 default: 436 break; 437 } 438 439 return ret; 440 } 441 442 static void sxgbe_get_regs(struct net_device *dev, 443 struct ethtool_regs *regs, void *space) 444 { 445 struct sxgbe_priv_data *priv = netdev_priv(dev); 446 u32 *reg_space = (u32 *)space; 447 int reg_offset; 448 int reg_ix = 0; 449 void __iomem *ioaddr = priv->ioaddr; 450 451 memset(reg_space, 0x0, REG_SPACE_SIZE); 452 453 /* MAC registers */ 454 for (reg_offset = START_MAC_REG_OFFSET; 455 reg_offset <= MAX_MAC_REG_OFFSET; reg_offset += 4) { 456 reg_space[reg_ix] = readl(ioaddr + reg_offset); 457 reg_ix++; 458 } 459 460 /* MTL registers */ 461 for (reg_offset = START_MTL_REG_OFFSET; 462 reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) { 463 reg_space[reg_ix] = readl(ioaddr + reg_offset); 464 reg_ix++; 465 } 466 467 /* DMA registers */ 468 for (reg_offset = START_DMA_REG_OFFSET; 469 reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { 470 reg_space[reg_ix] = readl(ioaddr + reg_offset); 471 reg_ix++; 472 } 473 474 BUG_ON(reg_ix * 4 > REG_SPACE_SIZE); 475 } 476 477 static int sxgbe_get_regs_len(struct net_device *dev) 478 { 479 return REG_SPACE_SIZE; 480 } 481 482 static const struct ethtool_ops sxgbe_ethtool_ops = { 483 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, 484 .get_drvinfo = sxgbe_getdrvinfo, 485 .get_msglevel = sxgbe_getmsglevel, 486 .set_msglevel = sxgbe_setmsglevel, 487 .get_link = ethtool_op_get_link, 488 .get_strings = sxgbe_get_strings, 489 .get_ethtool_stats = sxgbe_get_ethtool_stats, 490 .get_sset_count = sxgbe_get_sset_count, 491 .get_channels = sxgbe_get_channels, 492 .get_coalesce = sxgbe_get_coalesce, 493 .set_coalesce = sxgbe_set_coalesce, 494 .get_rxnfc = sxgbe_get_rxnfc, 495 .set_rxnfc = sxgbe_set_rxnfc, 496 .get_regs = sxgbe_get_regs, 497 .get_regs_len = sxgbe_get_regs_len, 498 .get_eee = sxgbe_get_eee, 499 .set_eee = sxgbe_set_eee, 500 .get_link_ksettings = phy_ethtool_get_link_ksettings, 501 .set_link_ksettings = phy_ethtool_set_link_ksettings, 502 }; 503 504 void sxgbe_set_ethtool_ops(struct net_device *netdev) 505 { 506 netdev->ethtool_ops = &sxgbe_ethtool_ops; 507 } 508