1 /* 2 * Keystone GBE and XGBE subsystem code 3 * 4 * Copyright (C) 2014 Texas Instruments Incorporated 5 * Authors: Sandeep Nair <sandeep_n@ti.com> 6 * Sandeep Paulraj <s-paulraj@ti.com> 7 * Cyril Chemparathy <cyril@ti.com> 8 * Santosh Shilimkar <santosh.shilimkar@ti.com> 9 * Wingman Kwok <w-kwok2@ti.com> 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License as 13 * published by the Free Software Foundation version 2. 14 * 15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 16 * kind, whether express or implied; without even the implied warranty 17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 */ 20 21 #include <linux/io.h> 22 #include <linux/module.h> 23 #include <linux/of_mdio.h> 24 #include <linux/of_address.h> 25 #include <linux/if_vlan.h> 26 #include <linux/ethtool.h> 27 28 #include "cpsw_ale.h" 29 #include "netcp.h" 30 31 #define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver" 32 #define NETCP_DRIVER_VERSION "v1.0" 33 34 #define GBE_IDENT(reg) ((reg >> 16) & 0xffff) 35 #define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7) 36 #define GBE_MINOR_VERSION(reg) (reg & 0xff) 37 #define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f) 38 39 /* 1G Ethernet SS defines */ 40 #define GBE_MODULE_NAME "netcp-gbe" 41 #define GBE_SS_VERSION_14 0x4ed21104 42 43 #define GBE13_SGMII_MODULE_OFFSET 0x100 44 #define GBE13_SGMII34_MODULE_OFFSET 0x400 45 #define GBE13_SWITCH_MODULE_OFFSET 0x800 46 #define GBE13_HOST_PORT_OFFSET 0x834 47 #define GBE13_SLAVE_PORT_OFFSET 0x860 48 #define GBE13_EMAC_OFFSET 0x900 49 #define GBE13_SLAVE_PORT2_OFFSET 0xa00 50 #define GBE13_HW_STATS_OFFSET 0xb00 51 #define GBE13_ALE_OFFSET 0xe00 52 #define GBE13_HOST_PORT_NUM 0 53 #define GBE13_NUM_SLAVES 4 54 #define GBE13_NUM_ALE_PORTS (GBE13_NUM_SLAVES + 1) 55 #define GBE13_NUM_ALE_ENTRIES 1024 56 57 /* 10G Ethernet SS defines */ 58 #define XGBE_MODULE_NAME "netcp-xgbe" 59 #define XGBE_SS_VERSION_10 0x4ee42100 60 61 #define XGBE_SERDES_REG_INDEX 1 62 #define XGBE10_SGMII_MODULE_OFFSET 0x100 63 #define XGBE10_SWITCH_MODULE_OFFSET 0x1000 64 #define XGBE10_HOST_PORT_OFFSET 0x1034 65 #define XGBE10_SLAVE_PORT_OFFSET 0x1064 66 #define XGBE10_EMAC_OFFSET 0x1400 67 #define XGBE10_ALE_OFFSET 0x1700 68 #define XGBE10_HW_STATS_OFFSET 0x1800 69 #define XGBE10_HOST_PORT_NUM 0 70 #define XGBE10_NUM_SLAVES 2 71 #define XGBE10_NUM_ALE_PORTS (XGBE10_NUM_SLAVES + 1) 72 #define XGBE10_NUM_ALE_ENTRIES 1024 73 74 #define GBE_TIMER_INTERVAL (HZ / 2) 75 76 /* Soft reset register values */ 77 #define SOFT_RESET_MASK BIT(0) 78 #define SOFT_RESET BIT(0) 79 #define DEVICE_EMACSL_RESET_POLL_COUNT 100 80 #define GMACSL_RET_WARN_RESET_INCOMPLETE -2 81 82 #define MACSL_RX_ENABLE_CSF BIT(23) 83 #define MACSL_ENABLE_EXT_CTL BIT(18) 84 #define MACSL_XGMII_ENABLE BIT(13) 85 #define MACSL_XGIG_MODE BIT(8) 86 #define MACSL_GIG_MODE BIT(7) 87 #define MACSL_GMII_ENABLE BIT(5) 88 #define MACSL_FULLDUPLEX BIT(0) 89 90 #define GBE_CTL_P0_ENABLE BIT(2) 91 #define GBE_REG_VAL_STAT_ENABLE_ALL 0xff 92 #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf 93 #define GBE_STATS_CD_SEL BIT(28) 94 95 #define GBE_PORT_MASK(x) (BIT(x) - 1) 96 #define GBE_MASK_NO_PORTS 0 97 98 #define GBE_DEF_1G_MAC_CONTROL \ 99 (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \ 100 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF) 101 102 #define GBE_DEF_10G_MAC_CONTROL \ 103 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \ 104 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF) 105 106 #define GBE_STATSA_MODULE 0 107 #define GBE_STATSB_MODULE 1 108 #define GBE_STATSC_MODULE 2 109 #define GBE_STATSD_MODULE 3 110 111 #define XGBE_STATS0_MODULE 0 112 #define XGBE_STATS1_MODULE 1 113 #define XGBE_STATS2_MODULE 2 114 115 #define MAX_SLAVES GBE13_NUM_SLAVES 116 /* s: 0-based slave_port */ 117 #define SGMII_BASE(s) \ 118 (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) 119 120 #define GBE_TX_QUEUE 648 121 #define GBE_TXHOOK_ORDER 0 122 #define GBE_DEFAULT_ALE_AGEOUT 30 123 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY) 124 #define NETCP_LINK_STATE_INVALID -1 125 126 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ 127 offsetof(struct gbe##_##rb, rn) 128 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ 129 offsetof(struct xgbe##_##rb, rn) 130 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn) 131 132 struct xgbe_ss_regs { 133 u32 id_ver; 134 u32 synce_count; 135 u32 synce_mux; 136 u32 control; 137 }; 138 139 struct xgbe_switch_regs { 140 u32 id_ver; 141 u32 control; 142 u32 emcontrol; 143 u32 stat_port_en; 144 u32 ptype; 145 u32 soft_idle; 146 u32 thru_rate; 147 u32 gap_thresh; 148 u32 tx_start_wds; 149 u32 flow_control; 150 u32 cppi_thresh; 151 }; 152 153 struct xgbe_port_regs { 154 u32 blk_cnt; 155 u32 port_vlan; 156 u32 tx_pri_map; 157 u32 sa_lo; 158 u32 sa_hi; 159 u32 ts_ctl; 160 u32 ts_seq_ltype; 161 u32 ts_vlan; 162 u32 ts_ctl_ltype2; 163 u32 ts_ctl2; 164 u32 control; 165 }; 166 167 struct xgbe_host_port_regs { 168 u32 blk_cnt; 169 u32 port_vlan; 170 u32 tx_pri_map; 171 u32 src_id; 172 u32 rx_pri_map; 173 u32 rx_maxlen; 174 }; 175 176 struct xgbe_emac_regs { 177 u32 id_ver; 178 u32 mac_control; 179 u32 mac_status; 180 u32 soft_reset; 181 u32 rx_maxlen; 182 u32 __reserved_0; 183 u32 rx_pause; 184 u32 tx_pause; 185 u32 em_control; 186 u32 __reserved_1; 187 u32 tx_gap; 188 u32 rsvd[4]; 189 }; 190 191 struct xgbe_host_hw_stats { 192 u32 rx_good_frames; 193 u32 rx_broadcast_frames; 194 u32 rx_multicast_frames; 195 u32 __rsvd_0[3]; 196 u32 rx_oversized_frames; 197 u32 __rsvd_1; 198 u32 rx_undersized_frames; 199 u32 __rsvd_2; 200 u32 overrun_type4; 201 u32 overrun_type5; 202 u32 rx_bytes; 203 u32 tx_good_frames; 204 u32 tx_broadcast_frames; 205 u32 tx_multicast_frames; 206 u32 __rsvd_3[9]; 207 u32 tx_bytes; 208 u32 tx_64byte_frames; 209 u32 tx_65_to_127byte_frames; 210 u32 tx_128_to_255byte_frames; 211 u32 tx_256_to_511byte_frames; 212 u32 tx_512_to_1023byte_frames; 213 u32 tx_1024byte_frames; 214 u32 net_bytes; 215 u32 rx_sof_overruns; 216 u32 rx_mof_overruns; 217 u32 rx_dma_overruns; 218 }; 219 220 struct xgbe_hw_stats { 221 u32 rx_good_frames; 222 u32 rx_broadcast_frames; 223 u32 rx_multicast_frames; 224 u32 rx_pause_frames; 225 u32 rx_crc_errors; 226 u32 rx_align_code_errors; 227 u32 rx_oversized_frames; 228 u32 rx_jabber_frames; 229 u32 rx_undersized_frames; 230 u32 rx_fragments; 231 u32 overrun_type4; 232 u32 overrun_type5; 233 u32 rx_bytes; 234 u32 tx_good_frames; 235 u32 tx_broadcast_frames; 236 u32 tx_multicast_frames; 237 u32 tx_pause_frames; 238 u32 tx_deferred_frames; 239 u32 tx_collision_frames; 240 u32 tx_single_coll_frames; 241 u32 tx_mult_coll_frames; 242 u32 tx_excessive_collisions; 243 u32 tx_late_collisions; 244 u32 tx_underrun; 245 u32 tx_carrier_sense_errors; 246 u32 tx_bytes; 247 u32 tx_64byte_frames; 248 u32 tx_65_to_127byte_frames; 249 u32 tx_128_to_255byte_frames; 250 u32 tx_256_to_511byte_frames; 251 u32 tx_512_to_1023byte_frames; 252 u32 tx_1024byte_frames; 253 u32 net_bytes; 254 u32 rx_sof_overruns; 255 u32 rx_mof_overruns; 256 u32 rx_dma_overruns; 257 }; 258 259 #define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32)) 260 261 struct gbe_ss_regs { 262 u32 id_ver; 263 u32 synce_count; 264 u32 synce_mux; 265 }; 266 267 struct gbe_ss_regs_ofs { 268 u16 id_ver; 269 u16 control; 270 }; 271 272 struct gbe_switch_regs { 273 u32 id_ver; 274 u32 control; 275 u32 soft_reset; 276 u32 stat_port_en; 277 u32 ptype; 278 u32 soft_idle; 279 u32 thru_rate; 280 u32 gap_thresh; 281 u32 tx_start_wds; 282 u32 flow_control; 283 }; 284 285 struct gbe_switch_regs_ofs { 286 u16 id_ver; 287 u16 control; 288 u16 soft_reset; 289 u16 emcontrol; 290 u16 stat_port_en; 291 u16 ptype; 292 u16 flow_control; 293 }; 294 295 struct gbe_port_regs { 296 u32 max_blks; 297 u32 blk_cnt; 298 u32 port_vlan; 299 u32 tx_pri_map; 300 u32 sa_lo; 301 u32 sa_hi; 302 u32 ts_ctl; 303 u32 ts_seq_ltype; 304 u32 ts_vlan; 305 u32 ts_ctl_ltype2; 306 u32 ts_ctl2; 307 }; 308 309 struct gbe_port_regs_ofs { 310 u16 port_vlan; 311 u16 tx_pri_map; 312 u16 sa_lo; 313 u16 sa_hi; 314 u16 ts_ctl; 315 u16 ts_seq_ltype; 316 u16 ts_vlan; 317 u16 ts_ctl_ltype2; 318 u16 ts_ctl2; 319 }; 320 321 struct gbe_host_port_regs { 322 u32 src_id; 323 u32 port_vlan; 324 u32 rx_pri_map; 325 u32 rx_maxlen; 326 }; 327 328 struct gbe_host_port_regs_ofs { 329 u16 port_vlan; 330 u16 tx_pri_map; 331 u16 rx_maxlen; 332 }; 333 334 struct gbe_emac_regs { 335 u32 id_ver; 336 u32 mac_control; 337 u32 mac_status; 338 u32 soft_reset; 339 u32 rx_maxlen; 340 u32 __reserved_0; 341 u32 rx_pause; 342 u32 tx_pause; 343 u32 __reserved_1; 344 u32 rx_pri_map; 345 u32 rsvd[6]; 346 }; 347 348 struct gbe_emac_regs_ofs { 349 u16 mac_control; 350 u16 soft_reset; 351 u16 rx_maxlen; 352 }; 353 354 struct gbe_hw_stats { 355 u32 rx_good_frames; 356 u32 rx_broadcast_frames; 357 u32 rx_multicast_frames; 358 u32 rx_pause_frames; 359 u32 rx_crc_errors; 360 u32 rx_align_code_errors; 361 u32 rx_oversized_frames; 362 u32 rx_jabber_frames; 363 u32 rx_undersized_frames; 364 u32 rx_fragments; 365 u32 __pad_0[2]; 366 u32 rx_bytes; 367 u32 tx_good_frames; 368 u32 tx_broadcast_frames; 369 u32 tx_multicast_frames; 370 u32 tx_pause_frames; 371 u32 tx_deferred_frames; 372 u32 tx_collision_frames; 373 u32 tx_single_coll_frames; 374 u32 tx_mult_coll_frames; 375 u32 tx_excessive_collisions; 376 u32 tx_late_collisions; 377 u32 tx_underrun; 378 u32 tx_carrier_sense_errors; 379 u32 tx_bytes; 380 u32 tx_64byte_frames; 381 u32 tx_65_to_127byte_frames; 382 u32 tx_128_to_255byte_frames; 383 u32 tx_256_to_511byte_frames; 384 u32 tx_512_to_1023byte_frames; 385 u32 tx_1024byte_frames; 386 u32 net_bytes; 387 u32 rx_sof_overruns; 388 u32 rx_mof_overruns; 389 u32 rx_dma_overruns; 390 }; 391 392 #define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32)) 393 #define GBE13_NUM_HW_STATS_MOD 2 394 #define XGBE10_NUM_HW_STATS_MOD 3 395 #define GBE_MAX_HW_STAT_MODS 3 396 #define GBE_HW_STATS_REG_MAP_SZ 0x100 397 398 struct gbe_slave { 399 void __iomem *port_regs; 400 void __iomem *emac_regs; 401 struct gbe_port_regs_ofs port_regs_ofs; 402 struct gbe_emac_regs_ofs emac_regs_ofs; 403 int slave_num; /* 0 based logical number */ 404 int port_num; /* actual port number */ 405 atomic_t link_state; 406 bool open; 407 struct phy_device *phy; 408 u32 link_interface; 409 u32 mac_control; 410 u8 phy_port_t; 411 struct device_node *phy_node; 412 struct list_head slave_list; 413 }; 414 415 struct gbe_priv { 416 struct device *dev; 417 struct netcp_device *netcp_device; 418 struct timer_list timer; 419 u32 num_slaves; 420 u32 ale_entries; 421 u32 ale_ports; 422 bool enable_ale; 423 struct netcp_tx_pipe tx_pipe; 424 425 int host_port; 426 u32 rx_packet_max; 427 u32 ss_version; 428 429 void __iomem *ss_regs; 430 void __iomem *switch_regs; 431 void __iomem *host_port_regs; 432 void __iomem *ale_reg; 433 void __iomem *sgmii_port_regs; 434 void __iomem *sgmii_port34_regs; 435 void __iomem *xgbe_serdes_regs; 436 void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS]; 437 438 struct gbe_ss_regs_ofs ss_regs_ofs; 439 struct gbe_switch_regs_ofs switch_regs_ofs; 440 struct gbe_host_port_regs_ofs host_port_regs_ofs; 441 442 struct cpsw_ale *ale; 443 unsigned int tx_queue_id; 444 const char *dma_chan_name; 445 446 struct list_head gbe_intf_head; 447 struct list_head secondary_slaves; 448 struct net_device *dummy_ndev; 449 450 u64 *hw_stats; 451 const struct netcp_ethtool_stat *et_stats; 452 int num_et_stats; 453 /* Lock for updating the hwstats */ 454 spinlock_t hw_stats_lock; 455 }; 456 457 struct gbe_intf { 458 struct net_device *ndev; 459 struct device *dev; 460 struct gbe_priv *gbe_dev; 461 struct netcp_tx_pipe tx_pipe; 462 struct gbe_slave *slave; 463 struct list_head gbe_intf_list; 464 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 465 }; 466 467 static struct netcp_module gbe_module; 468 static struct netcp_module xgbe_module; 469 470 /* Statistic management */ 471 struct netcp_ethtool_stat { 472 char desc[ETH_GSTRING_LEN]; 473 int type; 474 u32 size; 475 int offset; 476 }; 477 478 #define GBE_STATSA_INFO(field) "GBE_A:"#field, GBE_STATSA_MODULE,\ 479 FIELD_SIZEOF(struct gbe_hw_stats, field), \ 480 offsetof(struct gbe_hw_stats, field) 481 482 #define GBE_STATSB_INFO(field) "GBE_B:"#field, GBE_STATSB_MODULE,\ 483 FIELD_SIZEOF(struct gbe_hw_stats, field), \ 484 offsetof(struct gbe_hw_stats, field) 485 486 #define GBE_STATSC_INFO(field) "GBE_C:"#field, GBE_STATSC_MODULE,\ 487 FIELD_SIZEOF(struct gbe_hw_stats, field), \ 488 offsetof(struct gbe_hw_stats, field) 489 490 #define GBE_STATSD_INFO(field) "GBE_D:"#field, GBE_STATSD_MODULE,\ 491 FIELD_SIZEOF(struct gbe_hw_stats, field), \ 492 offsetof(struct gbe_hw_stats, field) 493 494 static const struct netcp_ethtool_stat gbe13_et_stats[] = { 495 /* GBE module A */ 496 {GBE_STATSA_INFO(rx_good_frames)}, 497 {GBE_STATSA_INFO(rx_broadcast_frames)}, 498 {GBE_STATSA_INFO(rx_multicast_frames)}, 499 {GBE_STATSA_INFO(rx_pause_frames)}, 500 {GBE_STATSA_INFO(rx_crc_errors)}, 501 {GBE_STATSA_INFO(rx_align_code_errors)}, 502 {GBE_STATSA_INFO(rx_oversized_frames)}, 503 {GBE_STATSA_INFO(rx_jabber_frames)}, 504 {GBE_STATSA_INFO(rx_undersized_frames)}, 505 {GBE_STATSA_INFO(rx_fragments)}, 506 {GBE_STATSA_INFO(rx_bytes)}, 507 {GBE_STATSA_INFO(tx_good_frames)}, 508 {GBE_STATSA_INFO(tx_broadcast_frames)}, 509 {GBE_STATSA_INFO(tx_multicast_frames)}, 510 {GBE_STATSA_INFO(tx_pause_frames)}, 511 {GBE_STATSA_INFO(tx_deferred_frames)}, 512 {GBE_STATSA_INFO(tx_collision_frames)}, 513 {GBE_STATSA_INFO(tx_single_coll_frames)}, 514 {GBE_STATSA_INFO(tx_mult_coll_frames)}, 515 {GBE_STATSA_INFO(tx_excessive_collisions)}, 516 {GBE_STATSA_INFO(tx_late_collisions)}, 517 {GBE_STATSA_INFO(tx_underrun)}, 518 {GBE_STATSA_INFO(tx_carrier_sense_errors)}, 519 {GBE_STATSA_INFO(tx_bytes)}, 520 {GBE_STATSA_INFO(tx_64byte_frames)}, 521 {GBE_STATSA_INFO(tx_65_to_127byte_frames)}, 522 {GBE_STATSA_INFO(tx_128_to_255byte_frames)}, 523 {GBE_STATSA_INFO(tx_256_to_511byte_frames)}, 524 {GBE_STATSA_INFO(tx_512_to_1023byte_frames)}, 525 {GBE_STATSA_INFO(tx_1024byte_frames)}, 526 {GBE_STATSA_INFO(net_bytes)}, 527 {GBE_STATSA_INFO(rx_sof_overruns)}, 528 {GBE_STATSA_INFO(rx_mof_overruns)}, 529 {GBE_STATSA_INFO(rx_dma_overruns)}, 530 /* GBE module B */ 531 {GBE_STATSB_INFO(rx_good_frames)}, 532 {GBE_STATSB_INFO(rx_broadcast_frames)}, 533 {GBE_STATSB_INFO(rx_multicast_frames)}, 534 {GBE_STATSB_INFO(rx_pause_frames)}, 535 {GBE_STATSB_INFO(rx_crc_errors)}, 536 {GBE_STATSB_INFO(rx_align_code_errors)}, 537 {GBE_STATSB_INFO(rx_oversized_frames)}, 538 {GBE_STATSB_INFO(rx_jabber_frames)}, 539 {GBE_STATSB_INFO(rx_undersized_frames)}, 540 {GBE_STATSB_INFO(rx_fragments)}, 541 {GBE_STATSB_INFO(rx_bytes)}, 542 {GBE_STATSB_INFO(tx_good_frames)}, 543 {GBE_STATSB_INFO(tx_broadcast_frames)}, 544 {GBE_STATSB_INFO(tx_multicast_frames)}, 545 {GBE_STATSB_INFO(tx_pause_frames)}, 546 {GBE_STATSB_INFO(tx_deferred_frames)}, 547 {GBE_STATSB_INFO(tx_collision_frames)}, 548 {GBE_STATSB_INFO(tx_single_coll_frames)}, 549 {GBE_STATSB_INFO(tx_mult_coll_frames)}, 550 {GBE_STATSB_INFO(tx_excessive_collisions)}, 551 {GBE_STATSB_INFO(tx_late_collisions)}, 552 {GBE_STATSB_INFO(tx_underrun)}, 553 {GBE_STATSB_INFO(tx_carrier_sense_errors)}, 554 {GBE_STATSB_INFO(tx_bytes)}, 555 {GBE_STATSB_INFO(tx_64byte_frames)}, 556 {GBE_STATSB_INFO(tx_65_to_127byte_frames)}, 557 {GBE_STATSB_INFO(tx_128_to_255byte_frames)}, 558 {GBE_STATSB_INFO(tx_256_to_511byte_frames)}, 559 {GBE_STATSB_INFO(tx_512_to_1023byte_frames)}, 560 {GBE_STATSB_INFO(tx_1024byte_frames)}, 561 {GBE_STATSB_INFO(net_bytes)}, 562 {GBE_STATSB_INFO(rx_sof_overruns)}, 563 {GBE_STATSB_INFO(rx_mof_overruns)}, 564 {GBE_STATSB_INFO(rx_dma_overruns)}, 565 /* GBE module C */ 566 {GBE_STATSC_INFO(rx_good_frames)}, 567 {GBE_STATSC_INFO(rx_broadcast_frames)}, 568 {GBE_STATSC_INFO(rx_multicast_frames)}, 569 {GBE_STATSC_INFO(rx_pause_frames)}, 570 {GBE_STATSC_INFO(rx_crc_errors)}, 571 {GBE_STATSC_INFO(rx_align_code_errors)}, 572 {GBE_STATSC_INFO(rx_oversized_frames)}, 573 {GBE_STATSC_INFO(rx_jabber_frames)}, 574 {GBE_STATSC_INFO(rx_undersized_frames)}, 575 {GBE_STATSC_INFO(rx_fragments)}, 576 {GBE_STATSC_INFO(rx_bytes)}, 577 {GBE_STATSC_INFO(tx_good_frames)}, 578 {GBE_STATSC_INFO(tx_broadcast_frames)}, 579 {GBE_STATSC_INFO(tx_multicast_frames)}, 580 {GBE_STATSC_INFO(tx_pause_frames)}, 581 {GBE_STATSC_INFO(tx_deferred_frames)}, 582 {GBE_STATSC_INFO(tx_collision_frames)}, 583 {GBE_STATSC_INFO(tx_single_coll_frames)}, 584 {GBE_STATSC_INFO(tx_mult_coll_frames)}, 585 {GBE_STATSC_INFO(tx_excessive_collisions)}, 586 {GBE_STATSC_INFO(tx_late_collisions)}, 587 {GBE_STATSC_INFO(tx_underrun)}, 588 {GBE_STATSC_INFO(tx_carrier_sense_errors)}, 589 {GBE_STATSC_INFO(tx_bytes)}, 590 {GBE_STATSC_INFO(tx_64byte_frames)}, 591 {GBE_STATSC_INFO(tx_65_to_127byte_frames)}, 592 {GBE_STATSC_INFO(tx_128_to_255byte_frames)}, 593 {GBE_STATSC_INFO(tx_256_to_511byte_frames)}, 594 {GBE_STATSC_INFO(tx_512_to_1023byte_frames)}, 595 {GBE_STATSC_INFO(tx_1024byte_frames)}, 596 {GBE_STATSC_INFO(net_bytes)}, 597 {GBE_STATSC_INFO(rx_sof_overruns)}, 598 {GBE_STATSC_INFO(rx_mof_overruns)}, 599 {GBE_STATSC_INFO(rx_dma_overruns)}, 600 /* GBE module D */ 601 {GBE_STATSD_INFO(rx_good_frames)}, 602 {GBE_STATSD_INFO(rx_broadcast_frames)}, 603 {GBE_STATSD_INFO(rx_multicast_frames)}, 604 {GBE_STATSD_INFO(rx_pause_frames)}, 605 {GBE_STATSD_INFO(rx_crc_errors)}, 606 {GBE_STATSD_INFO(rx_align_code_errors)}, 607 {GBE_STATSD_INFO(rx_oversized_frames)}, 608 {GBE_STATSD_INFO(rx_jabber_frames)}, 609 {GBE_STATSD_INFO(rx_undersized_frames)}, 610 {GBE_STATSD_INFO(rx_fragments)}, 611 {GBE_STATSD_INFO(rx_bytes)}, 612 {GBE_STATSD_INFO(tx_good_frames)}, 613 {GBE_STATSD_INFO(tx_broadcast_frames)}, 614 {GBE_STATSD_INFO(tx_multicast_frames)}, 615 {GBE_STATSD_INFO(tx_pause_frames)}, 616 {GBE_STATSD_INFO(tx_deferred_frames)}, 617 {GBE_STATSD_INFO(tx_collision_frames)}, 618 {GBE_STATSD_INFO(tx_single_coll_frames)}, 619 {GBE_STATSD_INFO(tx_mult_coll_frames)}, 620 {GBE_STATSD_INFO(tx_excessive_collisions)}, 621 {GBE_STATSD_INFO(tx_late_collisions)}, 622 {GBE_STATSD_INFO(tx_underrun)}, 623 {GBE_STATSD_INFO(tx_carrier_sense_errors)}, 624 {GBE_STATSD_INFO(tx_bytes)}, 625 {GBE_STATSD_INFO(tx_64byte_frames)}, 626 {GBE_STATSD_INFO(tx_65_to_127byte_frames)}, 627 {GBE_STATSD_INFO(tx_128_to_255byte_frames)}, 628 {GBE_STATSD_INFO(tx_256_to_511byte_frames)}, 629 {GBE_STATSD_INFO(tx_512_to_1023byte_frames)}, 630 {GBE_STATSD_INFO(tx_1024byte_frames)}, 631 {GBE_STATSD_INFO(net_bytes)}, 632 {GBE_STATSD_INFO(rx_sof_overruns)}, 633 {GBE_STATSD_INFO(rx_mof_overruns)}, 634 {GBE_STATSD_INFO(rx_dma_overruns)}, 635 }; 636 637 #define XGBE_STATS0_INFO(field) "GBE_0:"#field, XGBE_STATS0_MODULE, \ 638 FIELD_SIZEOF(struct xgbe_hw_stats, field), \ 639 offsetof(struct xgbe_hw_stats, field) 640 641 #define XGBE_STATS1_INFO(field) "GBE_1:"#field, XGBE_STATS1_MODULE, \ 642 FIELD_SIZEOF(struct xgbe_hw_stats, field), \ 643 offsetof(struct xgbe_hw_stats, field) 644 645 #define XGBE_STATS2_INFO(field) "GBE_2:"#field, XGBE_STATS2_MODULE, \ 646 FIELD_SIZEOF(struct xgbe_hw_stats, field), \ 647 offsetof(struct xgbe_hw_stats, field) 648 649 static const struct netcp_ethtool_stat xgbe10_et_stats[] = { 650 /* GBE module 0 */ 651 {XGBE_STATS0_INFO(rx_good_frames)}, 652 {XGBE_STATS0_INFO(rx_broadcast_frames)}, 653 {XGBE_STATS0_INFO(rx_multicast_frames)}, 654 {XGBE_STATS0_INFO(rx_oversized_frames)}, 655 {XGBE_STATS0_INFO(rx_undersized_frames)}, 656 {XGBE_STATS0_INFO(overrun_type4)}, 657 {XGBE_STATS0_INFO(overrun_type5)}, 658 {XGBE_STATS0_INFO(rx_bytes)}, 659 {XGBE_STATS0_INFO(tx_good_frames)}, 660 {XGBE_STATS0_INFO(tx_broadcast_frames)}, 661 {XGBE_STATS0_INFO(tx_multicast_frames)}, 662 {XGBE_STATS0_INFO(tx_bytes)}, 663 {XGBE_STATS0_INFO(tx_64byte_frames)}, 664 {XGBE_STATS0_INFO(tx_65_to_127byte_frames)}, 665 {XGBE_STATS0_INFO(tx_128_to_255byte_frames)}, 666 {XGBE_STATS0_INFO(tx_256_to_511byte_frames)}, 667 {XGBE_STATS0_INFO(tx_512_to_1023byte_frames)}, 668 {XGBE_STATS0_INFO(tx_1024byte_frames)}, 669 {XGBE_STATS0_INFO(net_bytes)}, 670 {XGBE_STATS0_INFO(rx_sof_overruns)}, 671 {XGBE_STATS0_INFO(rx_mof_overruns)}, 672 {XGBE_STATS0_INFO(rx_dma_overruns)}, 673 /* XGBE module 1 */ 674 {XGBE_STATS1_INFO(rx_good_frames)}, 675 {XGBE_STATS1_INFO(rx_broadcast_frames)}, 676 {XGBE_STATS1_INFO(rx_multicast_frames)}, 677 {XGBE_STATS1_INFO(rx_pause_frames)}, 678 {XGBE_STATS1_INFO(rx_crc_errors)}, 679 {XGBE_STATS1_INFO(rx_align_code_errors)}, 680 {XGBE_STATS1_INFO(rx_oversized_frames)}, 681 {XGBE_STATS1_INFO(rx_jabber_frames)}, 682 {XGBE_STATS1_INFO(rx_undersized_frames)}, 683 {XGBE_STATS1_INFO(rx_fragments)}, 684 {XGBE_STATS1_INFO(overrun_type4)}, 685 {XGBE_STATS1_INFO(overrun_type5)}, 686 {XGBE_STATS1_INFO(rx_bytes)}, 687 {XGBE_STATS1_INFO(tx_good_frames)}, 688 {XGBE_STATS1_INFO(tx_broadcast_frames)}, 689 {XGBE_STATS1_INFO(tx_multicast_frames)}, 690 {XGBE_STATS1_INFO(tx_pause_frames)}, 691 {XGBE_STATS1_INFO(tx_deferred_frames)}, 692 {XGBE_STATS1_INFO(tx_collision_frames)}, 693 {XGBE_STATS1_INFO(tx_single_coll_frames)}, 694 {XGBE_STATS1_INFO(tx_mult_coll_frames)}, 695 {XGBE_STATS1_INFO(tx_excessive_collisions)}, 696 {XGBE_STATS1_INFO(tx_late_collisions)}, 697 {XGBE_STATS1_INFO(tx_underrun)}, 698 {XGBE_STATS1_INFO(tx_carrier_sense_errors)}, 699 {XGBE_STATS1_INFO(tx_bytes)}, 700 {XGBE_STATS1_INFO(tx_64byte_frames)}, 701 {XGBE_STATS1_INFO(tx_65_to_127byte_frames)}, 702 {XGBE_STATS1_INFO(tx_128_to_255byte_frames)}, 703 {XGBE_STATS1_INFO(tx_256_to_511byte_frames)}, 704 {XGBE_STATS1_INFO(tx_512_to_1023byte_frames)}, 705 {XGBE_STATS1_INFO(tx_1024byte_frames)}, 706 {XGBE_STATS1_INFO(net_bytes)}, 707 {XGBE_STATS1_INFO(rx_sof_overruns)}, 708 {XGBE_STATS1_INFO(rx_mof_overruns)}, 709 {XGBE_STATS1_INFO(rx_dma_overruns)}, 710 /* XGBE module 2 */ 711 {XGBE_STATS2_INFO(rx_good_frames)}, 712 {XGBE_STATS2_INFO(rx_broadcast_frames)}, 713 {XGBE_STATS2_INFO(rx_multicast_frames)}, 714 {XGBE_STATS2_INFO(rx_pause_frames)}, 715 {XGBE_STATS2_INFO(rx_crc_errors)}, 716 {XGBE_STATS2_INFO(rx_align_code_errors)}, 717 {XGBE_STATS2_INFO(rx_oversized_frames)}, 718 {XGBE_STATS2_INFO(rx_jabber_frames)}, 719 {XGBE_STATS2_INFO(rx_undersized_frames)}, 720 {XGBE_STATS2_INFO(rx_fragments)}, 721 {XGBE_STATS2_INFO(overrun_type4)}, 722 {XGBE_STATS2_INFO(overrun_type5)}, 723 {XGBE_STATS2_INFO(rx_bytes)}, 724 {XGBE_STATS2_INFO(tx_good_frames)}, 725 {XGBE_STATS2_INFO(tx_broadcast_frames)}, 726 {XGBE_STATS2_INFO(tx_multicast_frames)}, 727 {XGBE_STATS2_INFO(tx_pause_frames)}, 728 {XGBE_STATS2_INFO(tx_deferred_frames)}, 729 {XGBE_STATS2_INFO(tx_collision_frames)}, 730 {XGBE_STATS2_INFO(tx_single_coll_frames)}, 731 {XGBE_STATS2_INFO(tx_mult_coll_frames)}, 732 {XGBE_STATS2_INFO(tx_excessive_collisions)}, 733 {XGBE_STATS2_INFO(tx_late_collisions)}, 734 {XGBE_STATS2_INFO(tx_underrun)}, 735 {XGBE_STATS2_INFO(tx_carrier_sense_errors)}, 736 {XGBE_STATS2_INFO(tx_bytes)}, 737 {XGBE_STATS2_INFO(tx_64byte_frames)}, 738 {XGBE_STATS2_INFO(tx_65_to_127byte_frames)}, 739 {XGBE_STATS2_INFO(tx_128_to_255byte_frames)}, 740 {XGBE_STATS2_INFO(tx_256_to_511byte_frames)}, 741 {XGBE_STATS2_INFO(tx_512_to_1023byte_frames)}, 742 {XGBE_STATS2_INFO(tx_1024byte_frames)}, 743 {XGBE_STATS2_INFO(net_bytes)}, 744 {XGBE_STATS2_INFO(rx_sof_overruns)}, 745 {XGBE_STATS2_INFO(rx_mof_overruns)}, 746 {XGBE_STATS2_INFO(rx_dma_overruns)}, 747 }; 748 749 #define for_each_intf(i, priv) \ 750 list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list) 751 752 #define for_each_sec_slave(slave, priv) \ 753 list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list) 754 755 #define first_sec_slave(priv) \ 756 list_first_entry(&priv->secondary_slaves, \ 757 struct gbe_slave, slave_list) 758 759 static void keystone_get_drvinfo(struct net_device *ndev, 760 struct ethtool_drvinfo *info) 761 { 762 strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver)); 763 strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version)); 764 } 765 766 static u32 keystone_get_msglevel(struct net_device *ndev) 767 { 768 struct netcp_intf *netcp = netdev_priv(ndev); 769 770 return netcp->msg_enable; 771 } 772 773 static void keystone_set_msglevel(struct net_device *ndev, u32 value) 774 { 775 struct netcp_intf *netcp = netdev_priv(ndev); 776 777 netcp->msg_enable = value; 778 } 779 780 static void keystone_get_stat_strings(struct net_device *ndev, 781 uint32_t stringset, uint8_t *data) 782 { 783 struct netcp_intf *netcp = netdev_priv(ndev); 784 struct gbe_intf *gbe_intf; 785 struct gbe_priv *gbe_dev; 786 int i; 787 788 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 789 if (!gbe_intf) 790 return; 791 gbe_dev = gbe_intf->gbe_dev; 792 793 switch (stringset) { 794 case ETH_SS_STATS: 795 for (i = 0; i < gbe_dev->num_et_stats; i++) { 796 memcpy(data, gbe_dev->et_stats[i].desc, 797 ETH_GSTRING_LEN); 798 data += ETH_GSTRING_LEN; 799 } 800 break; 801 case ETH_SS_TEST: 802 break; 803 } 804 } 805 806 static int keystone_get_sset_count(struct net_device *ndev, int stringset) 807 { 808 struct netcp_intf *netcp = netdev_priv(ndev); 809 struct gbe_intf *gbe_intf; 810 struct gbe_priv *gbe_dev; 811 812 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 813 if (!gbe_intf) 814 return -EINVAL; 815 gbe_dev = gbe_intf->gbe_dev; 816 817 switch (stringset) { 818 case ETH_SS_TEST: 819 return 0; 820 case ETH_SS_STATS: 821 return gbe_dev->num_et_stats; 822 default: 823 return -EINVAL; 824 } 825 } 826 827 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data) 828 { 829 void __iomem *base = NULL; 830 u32 __iomem *p; 831 u32 tmp = 0; 832 int i; 833 834 for (i = 0; i < gbe_dev->num_et_stats; i++) { 835 base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type]; 836 p = base + gbe_dev->et_stats[i].offset; 837 tmp = readl(p); 838 gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp; 839 if (data) 840 data[i] = gbe_dev->hw_stats[i]; 841 /* write-to-decrement: 842 * new register value = old register value - write value 843 */ 844 writel(tmp, p); 845 } 846 } 847 848 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data) 849 { 850 void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0]; 851 void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1]; 852 u64 *hw_stats = &gbe_dev->hw_stats[0]; 853 void __iomem *base = NULL; 854 u32 __iomem *p; 855 u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2); 856 int i, j, pair; 857 858 for (pair = 0; pair < 2; pair++) { 859 val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); 860 861 if (pair == 0) 862 val &= ~GBE_STATS_CD_SEL; 863 else 864 val |= GBE_STATS_CD_SEL; 865 866 /* make the stat modules visible */ 867 writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); 868 869 for (i = 0; i < pair_size; i++) { 870 j = pair * pair_size + i; 871 switch (gbe_dev->et_stats[j].type) { 872 case GBE_STATSA_MODULE: 873 case GBE_STATSC_MODULE: 874 base = gbe_statsa; 875 break; 876 case GBE_STATSB_MODULE: 877 case GBE_STATSD_MODULE: 878 base = gbe_statsb; 879 break; 880 } 881 882 p = base + gbe_dev->et_stats[j].offset; 883 tmp = readl(p); 884 hw_stats[j] += tmp; 885 if (data) 886 data[j] = hw_stats[j]; 887 /* write-to-decrement: 888 * new register value = old register value - write value 889 */ 890 writel(tmp, p); 891 } 892 } 893 } 894 895 static void keystone_get_ethtool_stats(struct net_device *ndev, 896 struct ethtool_stats *stats, 897 uint64_t *data) 898 { 899 struct netcp_intf *netcp = netdev_priv(ndev); 900 struct gbe_intf *gbe_intf; 901 struct gbe_priv *gbe_dev; 902 903 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 904 if (!gbe_intf) 905 return; 906 907 gbe_dev = gbe_intf->gbe_dev; 908 spin_lock_bh(&gbe_dev->hw_stats_lock); 909 if (gbe_dev->ss_version == GBE_SS_VERSION_14) 910 gbe_update_stats_ver14(gbe_dev, data); 911 else 912 gbe_update_stats(gbe_dev, data); 913 spin_unlock_bh(&gbe_dev->hw_stats_lock); 914 } 915 916 static int keystone_get_settings(struct net_device *ndev, 917 struct ethtool_cmd *cmd) 918 { 919 struct netcp_intf *netcp = netdev_priv(ndev); 920 struct phy_device *phy = ndev->phydev; 921 struct gbe_intf *gbe_intf; 922 int ret; 923 924 if (!phy) 925 return -EINVAL; 926 927 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 928 if (!gbe_intf) 929 return -EINVAL; 930 931 if (!gbe_intf->slave) 932 return -EINVAL; 933 934 ret = phy_ethtool_gset(phy, cmd); 935 if (!ret) 936 cmd->port = gbe_intf->slave->phy_port_t; 937 938 return ret; 939 } 940 941 static int keystone_set_settings(struct net_device *ndev, 942 struct ethtool_cmd *cmd) 943 { 944 struct netcp_intf *netcp = netdev_priv(ndev); 945 struct phy_device *phy = ndev->phydev; 946 struct gbe_intf *gbe_intf; 947 u32 features = cmd->advertising & cmd->supported; 948 949 if (!phy) 950 return -EINVAL; 951 952 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 953 if (!gbe_intf) 954 return -EINVAL; 955 956 if (!gbe_intf->slave) 957 return -EINVAL; 958 959 if (cmd->port != gbe_intf->slave->phy_port_t) { 960 if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP)) 961 return -EINVAL; 962 963 if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI)) 964 return -EINVAL; 965 966 if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC)) 967 return -EINVAL; 968 969 if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII)) 970 return -EINVAL; 971 972 if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE)) 973 return -EINVAL; 974 } 975 976 gbe_intf->slave->phy_port_t = cmd->port; 977 return phy_ethtool_sset(phy, cmd); 978 } 979 980 static const struct ethtool_ops keystone_ethtool_ops = { 981 .get_drvinfo = keystone_get_drvinfo, 982 .get_link = ethtool_op_get_link, 983 .get_msglevel = keystone_get_msglevel, 984 .set_msglevel = keystone_set_msglevel, 985 .get_strings = keystone_get_stat_strings, 986 .get_sset_count = keystone_get_sset_count, 987 .get_ethtool_stats = keystone_get_ethtool_stats, 988 .get_settings = keystone_get_settings, 989 .set_settings = keystone_set_settings, 990 }; 991 992 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ 993 ((mac)[2] << 16) | ((mac)[3] << 24)) 994 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) 995 996 static void gbe_set_slave_mac(struct gbe_slave *slave, 997 struct gbe_intf *gbe_intf) 998 { 999 struct net_device *ndev = gbe_intf->ndev; 1000 1001 writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi)); 1002 writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo)); 1003 } 1004 1005 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num) 1006 { 1007 if (priv->host_port == 0) 1008 return slave_num + 1; 1009 1010 return slave_num; 1011 } 1012 1013 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, 1014 struct net_device *ndev, 1015 struct gbe_slave *slave, 1016 int up) 1017 { 1018 struct phy_device *phy = slave->phy; 1019 u32 mac_control = 0; 1020 1021 if (up) { 1022 mac_control = slave->mac_control; 1023 if (phy && (phy->speed == SPEED_1000)) { 1024 mac_control |= MACSL_GIG_MODE; 1025 mac_control &= ~MACSL_XGIG_MODE; 1026 } else if (phy && (phy->speed == SPEED_10000)) { 1027 mac_control |= MACSL_XGIG_MODE; 1028 mac_control &= ~MACSL_GIG_MODE; 1029 } 1030 1031 writel(mac_control, GBE_REG_ADDR(slave, emac_regs, 1032 mac_control)); 1033 1034 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1035 ALE_PORT_STATE, 1036 ALE_PORT_STATE_FORWARD); 1037 1038 if (ndev && slave->open) 1039 netif_carrier_on(ndev); 1040 } else { 1041 writel(mac_control, GBE_REG_ADDR(slave, emac_regs, 1042 mac_control)); 1043 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1044 ALE_PORT_STATE, 1045 ALE_PORT_STATE_DISABLE); 1046 if (ndev) 1047 netif_carrier_off(ndev); 1048 } 1049 1050 if (phy) 1051 phy_print_status(phy); 1052 } 1053 1054 static bool gbe_phy_link_status(struct gbe_slave *slave) 1055 { 1056 return !slave->phy || slave->phy->link; 1057 } 1058 1059 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, 1060 struct gbe_slave *slave, 1061 struct net_device *ndev) 1062 { 1063 int sp = slave->slave_num; 1064 int phy_link_state, sgmii_link_state = 1, link_state; 1065 1066 if (!slave->open) 1067 return; 1068 1069 if (!SLAVE_LINK_IS_XGMII(slave)) 1070 sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp), 1071 sp); 1072 phy_link_state = gbe_phy_link_status(slave); 1073 link_state = phy_link_state & sgmii_link_state; 1074 1075 if (atomic_xchg(&slave->link_state, link_state) != link_state) 1076 netcp_ethss_link_state_action(gbe_dev, ndev, slave, 1077 link_state); 1078 } 1079 1080 static void xgbe_adjust_link(struct net_device *ndev) 1081 { 1082 struct netcp_intf *netcp = netdev_priv(ndev); 1083 struct gbe_intf *gbe_intf; 1084 1085 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp); 1086 if (!gbe_intf) 1087 return; 1088 1089 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave, 1090 ndev); 1091 } 1092 1093 static void gbe_adjust_link(struct net_device *ndev) 1094 { 1095 struct netcp_intf *netcp = netdev_priv(ndev); 1096 struct gbe_intf *gbe_intf; 1097 1098 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 1099 if (!gbe_intf) 1100 return; 1101 1102 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave, 1103 ndev); 1104 } 1105 1106 static void gbe_adjust_link_sec_slaves(struct net_device *ndev) 1107 { 1108 struct gbe_priv *gbe_dev = netdev_priv(ndev); 1109 struct gbe_slave *slave; 1110 1111 for_each_sec_slave(slave, gbe_dev) 1112 netcp_ethss_update_link_state(gbe_dev, slave, NULL); 1113 } 1114 1115 /* Reset EMAC 1116 * Soft reset is set and polled until clear, or until a timeout occurs 1117 */ 1118 static int gbe_port_reset(struct gbe_slave *slave) 1119 { 1120 u32 i, v; 1121 1122 /* Set the soft reset bit */ 1123 writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset)); 1124 1125 /* Wait for the bit to clear */ 1126 for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) { 1127 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset)); 1128 if ((v & SOFT_RESET_MASK) != SOFT_RESET) 1129 return 0; 1130 } 1131 1132 /* Timeout on the reset */ 1133 return GMACSL_RET_WARN_RESET_INCOMPLETE; 1134 } 1135 1136 /* Configure EMAC */ 1137 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, 1138 int max_rx_len) 1139 { 1140 u32 xgmii_mode; 1141 1142 if (max_rx_len > NETCP_MAX_FRAME_SIZE) 1143 max_rx_len = NETCP_MAX_FRAME_SIZE; 1144 1145 /* Enable correct MII mode at SS level */ 1146 if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) && 1147 (slave->link_interface >= XGMII_LINK_MAC_PHY)) { 1148 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control)); 1149 xgmii_mode |= (1 << slave->slave_num); 1150 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control)); 1151 } 1152 1153 writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen)); 1154 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); 1155 } 1156 1157 static void gbe_slave_stop(struct gbe_intf *intf) 1158 { 1159 struct gbe_priv *gbe_dev = intf->gbe_dev; 1160 struct gbe_slave *slave = intf->slave; 1161 1162 gbe_port_reset(slave); 1163 /* Disable forwarding */ 1164 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1165 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 1166 cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast, 1167 1 << slave->port_num, 0, 0); 1168 1169 if (!slave->phy) 1170 return; 1171 1172 phy_stop(slave->phy); 1173 phy_disconnect(slave->phy); 1174 slave->phy = NULL; 1175 } 1176 1177 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) 1178 { 1179 void __iomem *sgmii_port_regs; 1180 1181 sgmii_port_regs = priv->sgmii_port_regs; 1182 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) 1183 sgmii_port_regs = priv->sgmii_port34_regs; 1184 1185 if (!SLAVE_LINK_IS_XGMII(slave)) { 1186 netcp_sgmii_reset(sgmii_port_regs, slave->slave_num); 1187 netcp_sgmii_config(sgmii_port_regs, slave->slave_num, 1188 slave->link_interface); 1189 } 1190 } 1191 1192 static int gbe_slave_open(struct gbe_intf *gbe_intf) 1193 { 1194 struct gbe_priv *priv = gbe_intf->gbe_dev; 1195 struct gbe_slave *slave = gbe_intf->slave; 1196 phy_interface_t phy_mode; 1197 bool has_phy = false; 1198 1199 void (*hndlr)(struct net_device *) = gbe_adjust_link; 1200 1201 gbe_sgmii_config(priv, slave); 1202 gbe_port_reset(slave); 1203 gbe_port_config(priv, slave, priv->rx_packet_max); 1204 gbe_set_slave_mac(slave, gbe_intf); 1205 /* enable forwarding */ 1206 cpsw_ale_control_set(priv->ale, slave->port_num, 1207 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 1208 cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast, 1209 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2); 1210 1211 if (slave->link_interface == SGMII_LINK_MAC_PHY) { 1212 has_phy = true; 1213 phy_mode = PHY_INTERFACE_MODE_SGMII; 1214 slave->phy_port_t = PORT_MII; 1215 } else if (slave->link_interface == XGMII_LINK_MAC_PHY) { 1216 has_phy = true; 1217 phy_mode = PHY_INTERFACE_MODE_NA; 1218 slave->phy_port_t = PORT_FIBRE; 1219 } 1220 1221 if (has_phy) { 1222 if (priv->ss_version == XGBE_SS_VERSION_10) 1223 hndlr = xgbe_adjust_link; 1224 1225 slave->phy = of_phy_connect(gbe_intf->ndev, 1226 slave->phy_node, 1227 hndlr, 0, 1228 phy_mode); 1229 if (!slave->phy) { 1230 dev_err(priv->dev, "phy not found on slave %d\n", 1231 slave->slave_num); 1232 return -ENODEV; 1233 } 1234 dev_dbg(priv->dev, "phy found: id is: 0x%s\n", 1235 dev_name(&slave->phy->dev)); 1236 phy_start(slave->phy); 1237 phy_read_status(slave->phy); 1238 } 1239 return 0; 1240 } 1241 1242 static void gbe_init_host_port(struct gbe_priv *priv) 1243 { 1244 int bypass_en = 1; 1245 /* Max length register */ 1246 writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs, 1247 rx_maxlen)); 1248 1249 cpsw_ale_start(priv->ale); 1250 1251 if (priv->enable_ale) 1252 bypass_en = 0; 1253 1254 cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en); 1255 1256 cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1); 1257 1258 cpsw_ale_control_set(priv->ale, priv->host_port, 1259 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 1260 1261 cpsw_ale_control_set(priv->ale, 0, 1262 ALE_PORT_UNKNOWN_VLAN_MEMBER, 1263 GBE_PORT_MASK(priv->ale_ports)); 1264 1265 cpsw_ale_control_set(priv->ale, 0, 1266 ALE_PORT_UNKNOWN_MCAST_FLOOD, 1267 GBE_PORT_MASK(priv->ale_ports - 1)); 1268 1269 cpsw_ale_control_set(priv->ale, 0, 1270 ALE_PORT_UNKNOWN_REG_MCAST_FLOOD, 1271 GBE_PORT_MASK(priv->ale_ports)); 1272 1273 cpsw_ale_control_set(priv->ale, 0, 1274 ALE_PORT_UNTAGGED_EGRESS, 1275 GBE_PORT_MASK(priv->ale_ports)); 1276 } 1277 1278 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr) 1279 { 1280 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 1281 u16 vlan_id; 1282 1283 cpsw_ale_add_mcast(gbe_dev->ale, addr, 1284 GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0, 1285 ALE_MCAST_FWD_2); 1286 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { 1287 cpsw_ale_add_mcast(gbe_dev->ale, addr, 1288 GBE_PORT_MASK(gbe_dev->ale_ports), 1289 ALE_VLAN, vlan_id, ALE_MCAST_FWD_2); 1290 } 1291 } 1292 1293 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr) 1294 { 1295 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 1296 u16 vlan_id; 1297 1298 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0); 1299 1300 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) 1301 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 1302 ALE_VLAN, vlan_id); 1303 } 1304 1305 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr) 1306 { 1307 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 1308 u16 vlan_id; 1309 1310 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0); 1311 1312 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { 1313 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id); 1314 } 1315 } 1316 1317 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr) 1318 { 1319 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 1320 u16 vlan_id; 1321 1322 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0); 1323 1324 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { 1325 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 1326 ALE_VLAN, vlan_id); 1327 } 1328 } 1329 1330 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr) 1331 { 1332 struct gbe_intf *gbe_intf = intf_priv; 1333 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 1334 1335 dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n", 1336 naddr->addr, naddr->type); 1337 1338 switch (naddr->type) { 1339 case ADDR_MCAST: 1340 case ADDR_BCAST: 1341 gbe_add_mcast_addr(gbe_intf, naddr->addr); 1342 break; 1343 case ADDR_UCAST: 1344 case ADDR_DEV: 1345 gbe_add_ucast_addr(gbe_intf, naddr->addr); 1346 break; 1347 case ADDR_ANY: 1348 /* nothing to do for promiscuous */ 1349 default: 1350 break; 1351 } 1352 1353 return 0; 1354 } 1355 1356 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr) 1357 { 1358 struct gbe_intf *gbe_intf = intf_priv; 1359 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 1360 1361 dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n", 1362 naddr->addr, naddr->type); 1363 1364 switch (naddr->type) { 1365 case ADDR_MCAST: 1366 case ADDR_BCAST: 1367 gbe_del_mcast_addr(gbe_intf, naddr->addr); 1368 break; 1369 case ADDR_UCAST: 1370 case ADDR_DEV: 1371 gbe_del_ucast_addr(gbe_intf, naddr->addr); 1372 break; 1373 case ADDR_ANY: 1374 /* nothing to do for promiscuous */ 1375 default: 1376 break; 1377 } 1378 1379 return 0; 1380 } 1381 1382 static int gbe_add_vid(void *intf_priv, int vid) 1383 { 1384 struct gbe_intf *gbe_intf = intf_priv; 1385 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 1386 1387 set_bit(vid, gbe_intf->active_vlans); 1388 1389 cpsw_ale_add_vlan(gbe_dev->ale, vid, 1390 GBE_PORT_MASK(gbe_dev->ale_ports), 1391 GBE_MASK_NO_PORTS, 1392 GBE_PORT_MASK(gbe_dev->ale_ports), 1393 GBE_PORT_MASK(gbe_dev->ale_ports - 1)); 1394 1395 return 0; 1396 } 1397 1398 static int gbe_del_vid(void *intf_priv, int vid) 1399 { 1400 struct gbe_intf *gbe_intf = intf_priv; 1401 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 1402 1403 cpsw_ale_del_vlan(gbe_dev->ale, vid, 0); 1404 clear_bit(vid, gbe_intf->active_vlans); 1405 return 0; 1406 } 1407 1408 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd) 1409 { 1410 struct gbe_intf *gbe_intf = intf_priv; 1411 struct phy_device *phy = gbe_intf->slave->phy; 1412 int ret = -EOPNOTSUPP; 1413 1414 if (phy) 1415 ret = phy_mii_ioctl(phy, req, cmd); 1416 1417 return ret; 1418 } 1419 1420 static void netcp_ethss_timer(unsigned long arg) 1421 { 1422 struct gbe_priv *gbe_dev = (struct gbe_priv *)arg; 1423 struct gbe_intf *gbe_intf; 1424 struct gbe_slave *slave; 1425 1426 /* Check & update SGMII link state of interfaces */ 1427 for_each_intf(gbe_intf, gbe_dev) { 1428 if (!gbe_intf->slave->open) 1429 continue; 1430 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave, 1431 gbe_intf->ndev); 1432 } 1433 1434 /* Check & update SGMII link state of secondary ports */ 1435 for_each_sec_slave(slave, gbe_dev) { 1436 netcp_ethss_update_link_state(gbe_dev, slave, NULL); 1437 } 1438 1439 spin_lock_bh(&gbe_dev->hw_stats_lock); 1440 1441 if (gbe_dev->ss_version == GBE_SS_VERSION_14) 1442 gbe_update_stats_ver14(gbe_dev, NULL); 1443 else 1444 gbe_update_stats(gbe_dev, NULL); 1445 1446 spin_unlock_bh(&gbe_dev->hw_stats_lock); 1447 1448 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; 1449 add_timer(&gbe_dev->timer); 1450 } 1451 1452 static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info) 1453 { 1454 struct gbe_intf *gbe_intf = data; 1455 1456 p_info->tx_pipe = &gbe_intf->tx_pipe; 1457 return 0; 1458 } 1459 1460 static int gbe_open(void *intf_priv, struct net_device *ndev) 1461 { 1462 struct gbe_intf *gbe_intf = intf_priv; 1463 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 1464 struct netcp_intf *netcp = netdev_priv(ndev); 1465 struct gbe_slave *slave = gbe_intf->slave; 1466 int port_num = slave->port_num; 1467 u32 reg; 1468 int ret; 1469 1470 reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver)); 1471 dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n", 1472 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg), 1473 GBE_RTL_VERSION(reg), GBE_IDENT(reg)); 1474 1475 if (gbe_dev->enable_ale) 1476 gbe_intf->tx_pipe.dma_psflags = 0; 1477 else 1478 gbe_intf->tx_pipe.dma_psflags = port_num; 1479 1480 dev_dbg(gbe_dev->dev, "opened TX channel %s: %p with psflags %d\n", 1481 gbe_intf->tx_pipe.dma_chan_name, 1482 gbe_intf->tx_pipe.dma_channel, 1483 gbe_intf->tx_pipe.dma_psflags); 1484 1485 gbe_slave_stop(gbe_intf); 1486 1487 /* disable priority elevation and enable statistics on all ports */ 1488 writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype)); 1489 1490 /* Control register */ 1491 writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control)); 1492 1493 /* All statistics enabled and STAT AB visible by default */ 1494 writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs, 1495 stat_port_en)); 1496 1497 ret = gbe_slave_open(gbe_intf); 1498 if (ret) 1499 goto fail; 1500 1501 netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook, 1502 gbe_intf); 1503 1504 slave->open = true; 1505 netcp_ethss_update_link_state(gbe_dev, slave, ndev); 1506 return 0; 1507 1508 fail: 1509 gbe_slave_stop(gbe_intf); 1510 return ret; 1511 } 1512 1513 static int gbe_close(void *intf_priv, struct net_device *ndev) 1514 { 1515 struct gbe_intf *gbe_intf = intf_priv; 1516 struct netcp_intf *netcp = netdev_priv(ndev); 1517 1518 gbe_slave_stop(gbe_intf); 1519 netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook, 1520 gbe_intf); 1521 1522 gbe_intf->slave->open = false; 1523 atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID); 1524 return 0; 1525 } 1526 1527 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, 1528 struct device_node *node) 1529 { 1530 int port_reg_num; 1531 u32 port_reg_ofs, emac_reg_ofs; 1532 1533 if (of_property_read_u32(node, "slave-port", &slave->slave_num)) { 1534 dev_err(gbe_dev->dev, "missing slave-port parameter\n"); 1535 return -EINVAL; 1536 } 1537 1538 if (of_property_read_u32(node, "link-interface", 1539 &slave->link_interface)) { 1540 dev_warn(gbe_dev->dev, 1541 "missing link-interface value defaulting to 1G mac-phy link\n"); 1542 slave->link_interface = SGMII_LINK_MAC_PHY; 1543 } 1544 1545 slave->open = false; 1546 slave->phy_node = of_parse_phandle(node, "phy-handle", 0); 1547 slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num); 1548 1549 if (slave->link_interface >= XGMII_LINK_MAC_PHY) 1550 slave->mac_control = GBE_DEF_10G_MAC_CONTROL; 1551 else 1552 slave->mac_control = GBE_DEF_1G_MAC_CONTROL; 1553 1554 /* Emac regs memmap are contiguous but port regs are not */ 1555 port_reg_num = slave->slave_num; 1556 if (gbe_dev->ss_version == GBE_SS_VERSION_14) { 1557 if (slave->slave_num > 1) { 1558 port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET; 1559 port_reg_num -= 2; 1560 } else { 1561 port_reg_ofs = GBE13_SLAVE_PORT_OFFSET; 1562 } 1563 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { 1564 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET; 1565 } else { 1566 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n", 1567 gbe_dev->ss_version); 1568 return -EINVAL; 1569 } 1570 1571 if (gbe_dev->ss_version == GBE_SS_VERSION_14) 1572 emac_reg_ofs = GBE13_EMAC_OFFSET; 1573 else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) 1574 emac_reg_ofs = XGBE10_EMAC_OFFSET; 1575 1576 slave->port_regs = gbe_dev->ss_regs + port_reg_ofs + 1577 (0x30 * port_reg_num); 1578 slave->emac_regs = gbe_dev->ss_regs + emac_reg_ofs + 1579 (0x40 * slave->slave_num); 1580 1581 if (gbe_dev->ss_version == GBE_SS_VERSION_14) { 1582 /* Initialize slave port register offsets */ 1583 GBE_SET_REG_OFS(slave, port_regs, port_vlan); 1584 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map); 1585 GBE_SET_REG_OFS(slave, port_regs, sa_lo); 1586 GBE_SET_REG_OFS(slave, port_regs, sa_hi); 1587 GBE_SET_REG_OFS(slave, port_regs, ts_ctl); 1588 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype); 1589 GBE_SET_REG_OFS(slave, port_regs, ts_vlan); 1590 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); 1591 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2); 1592 1593 /* Initialize EMAC register offsets */ 1594 GBE_SET_REG_OFS(slave, emac_regs, mac_control); 1595 GBE_SET_REG_OFS(slave, emac_regs, soft_reset); 1596 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen); 1597 1598 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { 1599 /* Initialize slave port register offsets */ 1600 XGBE_SET_REG_OFS(slave, port_regs, port_vlan); 1601 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map); 1602 XGBE_SET_REG_OFS(slave, port_regs, sa_lo); 1603 XGBE_SET_REG_OFS(slave, port_regs, sa_hi); 1604 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl); 1605 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype); 1606 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan); 1607 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); 1608 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2); 1609 1610 /* Initialize EMAC register offsets */ 1611 XGBE_SET_REG_OFS(slave, emac_regs, mac_control); 1612 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset); 1613 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen); 1614 } 1615 1616 atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID); 1617 return 0; 1618 } 1619 1620 static void init_secondary_ports(struct gbe_priv *gbe_dev, 1621 struct device_node *node) 1622 { 1623 struct device *dev = gbe_dev->dev; 1624 phy_interface_t phy_mode; 1625 struct gbe_priv **priv; 1626 struct device_node *port; 1627 struct gbe_slave *slave; 1628 bool mac_phy_link = false; 1629 1630 for_each_child_of_node(node, port) { 1631 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL); 1632 if (!slave) { 1633 dev_err(dev, 1634 "memomry alloc failed for secondary port(%s), skipping...\n", 1635 port->name); 1636 continue; 1637 } 1638 1639 if (init_slave(gbe_dev, slave, port)) { 1640 dev_err(dev, 1641 "Failed to initialize secondary port(%s), skipping...\n", 1642 port->name); 1643 devm_kfree(dev, slave); 1644 continue; 1645 } 1646 1647 gbe_sgmii_config(gbe_dev, slave); 1648 gbe_port_reset(slave); 1649 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max); 1650 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves); 1651 gbe_dev->num_slaves++; 1652 if ((slave->link_interface == SGMII_LINK_MAC_PHY) || 1653 (slave->link_interface == XGMII_LINK_MAC_PHY)) 1654 mac_phy_link = true; 1655 1656 slave->open = true; 1657 } 1658 1659 /* of_phy_connect() is needed only for MAC-PHY interface */ 1660 if (!mac_phy_link) 1661 return; 1662 1663 /* Allocate dummy netdev device for attaching to phy device */ 1664 gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy", 1665 NET_NAME_UNKNOWN, ether_setup); 1666 if (!gbe_dev->dummy_ndev) { 1667 dev_err(dev, 1668 "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n"); 1669 return; 1670 } 1671 priv = netdev_priv(gbe_dev->dummy_ndev); 1672 *priv = gbe_dev; 1673 1674 if (slave->link_interface == SGMII_LINK_MAC_PHY) { 1675 phy_mode = PHY_INTERFACE_MODE_SGMII; 1676 slave->phy_port_t = PORT_MII; 1677 } else { 1678 phy_mode = PHY_INTERFACE_MODE_NA; 1679 slave->phy_port_t = PORT_FIBRE; 1680 } 1681 1682 for_each_sec_slave(slave, gbe_dev) { 1683 if ((slave->link_interface != SGMII_LINK_MAC_PHY) && 1684 (slave->link_interface != XGMII_LINK_MAC_PHY)) 1685 continue; 1686 slave->phy = 1687 of_phy_connect(gbe_dev->dummy_ndev, 1688 slave->phy_node, 1689 gbe_adjust_link_sec_slaves, 1690 0, phy_mode); 1691 if (!slave->phy) { 1692 dev_err(dev, "phy not found for slave %d\n", 1693 slave->slave_num); 1694 slave->phy = NULL; 1695 } else { 1696 dev_dbg(dev, "phy found: id is: 0x%s\n", 1697 dev_name(&slave->phy->dev)); 1698 phy_start(slave->phy); 1699 phy_read_status(slave->phy); 1700 } 1701 } 1702 } 1703 1704 static void free_secondary_ports(struct gbe_priv *gbe_dev) 1705 { 1706 struct gbe_slave *slave; 1707 1708 for (;;) { 1709 slave = first_sec_slave(gbe_dev); 1710 if (!slave) 1711 break; 1712 if (slave->phy) 1713 phy_disconnect(slave->phy); 1714 list_del(&slave->slave_list); 1715 } 1716 if (gbe_dev->dummy_ndev) 1717 free_netdev(gbe_dev->dummy_ndev); 1718 } 1719 1720 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, 1721 struct device_node *node) 1722 { 1723 struct resource res; 1724 void __iomem *regs; 1725 int ret, i; 1726 1727 ret = of_address_to_resource(node, 0, &res); 1728 if (ret) { 1729 dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe subsystem regs\n", 1730 node->name); 1731 return ret; 1732 } 1733 1734 regs = devm_ioremap_resource(gbe_dev->dev, &res); 1735 if (IS_ERR(regs)) { 1736 dev_err(gbe_dev->dev, "Failed to map xgbe register base\n"); 1737 return PTR_ERR(regs); 1738 } 1739 gbe_dev->ss_regs = regs; 1740 1741 ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res); 1742 if (ret) { 1743 dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe serdes regs\n", 1744 node->name); 1745 return ret; 1746 } 1747 1748 regs = devm_ioremap_resource(gbe_dev->dev, &res); 1749 if (IS_ERR(regs)) { 1750 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n"); 1751 return PTR_ERR(regs); 1752 } 1753 gbe_dev->xgbe_serdes_regs = regs; 1754 1755 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, 1756 XGBE10_NUM_STAT_ENTRIES * 1757 (XGBE10_NUM_SLAVES + 1) * sizeof(u64), 1758 GFP_KERNEL); 1759 if (!gbe_dev->hw_stats) { 1760 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); 1761 return -ENOMEM; 1762 } 1763 1764 gbe_dev->ss_version = XGBE_SS_VERSION_10; 1765 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + 1766 XGBE10_SGMII_MODULE_OFFSET; 1767 gbe_dev->switch_regs = gbe_dev->ss_regs + XGBE10_SWITCH_MODULE_OFFSET; 1768 gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET; 1769 1770 for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++) 1771 gbe_dev->hw_stats_regs[i] = gbe_dev->ss_regs + 1772 XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i); 1773 1774 gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET; 1775 gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS; 1776 gbe_dev->host_port = XGBE10_HOST_PORT_NUM; 1777 gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES; 1778 gbe_dev->et_stats = xgbe10_et_stats; 1779 gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); 1780 1781 /* Subsystem registers */ 1782 XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); 1783 XGBE_SET_REG_OFS(gbe_dev, ss_regs, control); 1784 1785 /* Switch module registers */ 1786 XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver); 1787 XGBE_SET_REG_OFS(gbe_dev, switch_regs, control); 1788 XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype); 1789 XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); 1790 XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control); 1791 1792 /* Host port registers */ 1793 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); 1794 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map); 1795 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); 1796 return 0; 1797 } 1798 1799 static int get_gbe_resource_version(struct gbe_priv *gbe_dev, 1800 struct device_node *node) 1801 { 1802 struct resource res; 1803 void __iomem *regs; 1804 int ret; 1805 1806 ret = of_address_to_resource(node, 0, &res); 1807 if (ret) { 1808 dev_err(gbe_dev->dev, "Can't translate of node(%s) address\n", 1809 node->name); 1810 return ret; 1811 } 1812 1813 regs = devm_ioremap_resource(gbe_dev->dev, &res); 1814 if (IS_ERR(regs)) { 1815 dev_err(gbe_dev->dev, "Failed to map gbe register base\n"); 1816 return PTR_ERR(regs); 1817 } 1818 gbe_dev->ss_regs = regs; 1819 gbe_dev->ss_version = readl(gbe_dev->ss_regs); 1820 return 0; 1821 } 1822 1823 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, 1824 struct device_node *node) 1825 { 1826 void __iomem *regs; 1827 int i; 1828 1829 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, 1830 GBE13_NUM_HW_STAT_ENTRIES * 1831 GBE13_NUM_SLAVES * sizeof(u64), 1832 GFP_KERNEL); 1833 if (!gbe_dev->hw_stats) { 1834 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); 1835 return -ENOMEM; 1836 } 1837 1838 regs = gbe_dev->ss_regs; 1839 gbe_dev->sgmii_port_regs = regs + GBE13_SGMII_MODULE_OFFSET; 1840 gbe_dev->sgmii_port34_regs = regs + GBE13_SGMII34_MODULE_OFFSET; 1841 gbe_dev->switch_regs = regs + GBE13_SWITCH_MODULE_OFFSET; 1842 gbe_dev->host_port_regs = regs + GBE13_HOST_PORT_OFFSET; 1843 1844 for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++) 1845 gbe_dev->hw_stats_regs[i] = regs + GBE13_HW_STATS_OFFSET + 1846 (GBE_HW_STATS_REG_MAP_SZ * i); 1847 1848 gbe_dev->ale_reg = regs + GBE13_ALE_OFFSET; 1849 gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS; 1850 gbe_dev->host_port = GBE13_HOST_PORT_NUM; 1851 gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; 1852 gbe_dev->et_stats = gbe13_et_stats; 1853 gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); 1854 1855 /* Subsystem registers */ 1856 GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); 1857 1858 /* Switch module registers */ 1859 GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver); 1860 GBE_SET_REG_OFS(gbe_dev, switch_regs, control); 1861 GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset); 1862 GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); 1863 GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype); 1864 GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control); 1865 1866 /* Host port registers */ 1867 GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); 1868 GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); 1869 return 0; 1870 } 1871 1872 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, 1873 struct device_node *node, void **inst_priv) 1874 { 1875 struct device_node *interfaces, *interface; 1876 struct device_node *secondary_ports; 1877 struct cpsw_ale_params ale_params; 1878 struct gbe_priv *gbe_dev; 1879 u32 slave_num; 1880 int ret = 0; 1881 1882 if (!node) { 1883 dev_err(dev, "device tree info unavailable\n"); 1884 return -ENODEV; 1885 } 1886 1887 gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL); 1888 if (!gbe_dev) 1889 return -ENOMEM; 1890 1891 gbe_dev->dev = dev; 1892 gbe_dev->netcp_device = netcp_device; 1893 gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE; 1894 1895 /* init the hw stats lock */ 1896 spin_lock_init(&gbe_dev->hw_stats_lock); 1897 1898 if (of_find_property(node, "enable-ale", NULL)) { 1899 gbe_dev->enable_ale = true; 1900 dev_info(dev, "ALE enabled\n"); 1901 } else { 1902 gbe_dev->enable_ale = false; 1903 dev_dbg(dev, "ALE bypass enabled*\n"); 1904 } 1905 1906 ret = of_property_read_u32(node, "tx-queue", 1907 &gbe_dev->tx_queue_id); 1908 if (ret < 0) { 1909 dev_err(dev, "missing tx_queue parameter\n"); 1910 gbe_dev->tx_queue_id = GBE_TX_QUEUE; 1911 } 1912 1913 ret = of_property_read_string(node, "tx-channel", 1914 &gbe_dev->dma_chan_name); 1915 if (ret < 0) { 1916 dev_err(dev, "missing \"tx-channel\" parameter\n"); 1917 ret = -ENODEV; 1918 goto quit; 1919 } 1920 1921 if (!strcmp(node->name, "gbe")) { 1922 ret = get_gbe_resource_version(gbe_dev, node); 1923 if (ret) 1924 goto quit; 1925 1926 ret = set_gbe_ethss14_priv(gbe_dev, node); 1927 if (ret) 1928 goto quit; 1929 } else if (!strcmp(node->name, "xgbe")) { 1930 ret = set_xgbe_ethss10_priv(gbe_dev, node); 1931 if (ret) 1932 goto quit; 1933 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, 1934 gbe_dev->ss_regs); 1935 if (ret) 1936 goto quit; 1937 } else { 1938 dev_err(dev, "unknown GBE node(%s)\n", node->name); 1939 ret = -ENODEV; 1940 goto quit; 1941 } 1942 1943 interfaces = of_get_child_by_name(node, "interfaces"); 1944 if (!interfaces) 1945 dev_err(dev, "could not find interfaces\n"); 1946 1947 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, 1948 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); 1949 if (ret) 1950 goto quit; 1951 1952 ret = netcp_txpipe_open(&gbe_dev->tx_pipe); 1953 if (ret) 1954 goto quit; 1955 1956 /* Create network interfaces */ 1957 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); 1958 for_each_child_of_node(interfaces, interface) { 1959 ret = of_property_read_u32(interface, "slave-port", &slave_num); 1960 if (ret) { 1961 dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n", 1962 interface->name); 1963 continue; 1964 } 1965 gbe_dev->num_slaves++; 1966 } 1967 1968 if (!gbe_dev->num_slaves) 1969 dev_warn(dev, "No network interface configured\n"); 1970 1971 /* Initialize Secondary slave ports */ 1972 secondary_ports = of_get_child_by_name(node, "secondary-slave-ports"); 1973 INIT_LIST_HEAD(&gbe_dev->secondary_slaves); 1974 if (secondary_ports) 1975 init_secondary_ports(gbe_dev, secondary_ports); 1976 of_node_put(secondary_ports); 1977 1978 if (!gbe_dev->num_slaves) { 1979 dev_err(dev, "No network interface or secondary ports configured\n"); 1980 ret = -ENODEV; 1981 goto quit; 1982 } 1983 1984 memset(&ale_params, 0, sizeof(ale_params)); 1985 ale_params.dev = gbe_dev->dev; 1986 ale_params.ale_regs = gbe_dev->ale_reg; 1987 ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT; 1988 ale_params.ale_entries = gbe_dev->ale_entries; 1989 ale_params.ale_ports = gbe_dev->ale_ports; 1990 1991 gbe_dev->ale = cpsw_ale_create(&ale_params); 1992 if (!gbe_dev->ale) { 1993 dev_err(gbe_dev->dev, "error initializing ale engine\n"); 1994 ret = -ENODEV; 1995 goto quit; 1996 } else { 1997 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); 1998 } 1999 2000 /* initialize host port */ 2001 gbe_init_host_port(gbe_dev); 2002 2003 init_timer(&gbe_dev->timer); 2004 gbe_dev->timer.data = (unsigned long)gbe_dev; 2005 gbe_dev->timer.function = netcp_ethss_timer; 2006 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; 2007 add_timer(&gbe_dev->timer); 2008 *inst_priv = gbe_dev; 2009 return 0; 2010 2011 quit: 2012 if (gbe_dev->hw_stats) 2013 devm_kfree(dev, gbe_dev->hw_stats); 2014 cpsw_ale_destroy(gbe_dev->ale); 2015 if (gbe_dev->ss_regs) 2016 devm_iounmap(dev, gbe_dev->ss_regs); 2017 of_node_put(interfaces); 2018 devm_kfree(dev, gbe_dev); 2019 return ret; 2020 } 2021 2022 static int gbe_attach(void *inst_priv, struct net_device *ndev, 2023 struct device_node *node, void **intf_priv) 2024 { 2025 struct gbe_priv *gbe_dev = inst_priv; 2026 struct gbe_intf *gbe_intf; 2027 int ret; 2028 2029 if (!node) { 2030 dev_err(gbe_dev->dev, "interface node not available\n"); 2031 return -ENODEV; 2032 } 2033 2034 gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL); 2035 if (!gbe_intf) 2036 return -ENOMEM; 2037 2038 gbe_intf->ndev = ndev; 2039 gbe_intf->dev = gbe_dev->dev; 2040 gbe_intf->gbe_dev = gbe_dev; 2041 2042 gbe_intf->slave = devm_kzalloc(gbe_dev->dev, 2043 sizeof(*gbe_intf->slave), 2044 GFP_KERNEL); 2045 if (!gbe_intf->slave) { 2046 ret = -ENOMEM; 2047 goto fail; 2048 } 2049 2050 if (init_slave(gbe_dev, gbe_intf->slave, node)) { 2051 ret = -ENODEV; 2052 goto fail; 2053 } 2054 2055 gbe_intf->tx_pipe = gbe_dev->tx_pipe; 2056 ndev->ethtool_ops = &keystone_ethtool_ops; 2057 list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head); 2058 *intf_priv = gbe_intf; 2059 return 0; 2060 2061 fail: 2062 if (gbe_intf->slave) 2063 devm_kfree(gbe_dev->dev, gbe_intf->slave); 2064 if (gbe_intf) 2065 devm_kfree(gbe_dev->dev, gbe_intf); 2066 return ret; 2067 } 2068 2069 static int gbe_release(void *intf_priv) 2070 { 2071 struct gbe_intf *gbe_intf = intf_priv; 2072 2073 gbe_intf->ndev->ethtool_ops = NULL; 2074 list_del(&gbe_intf->gbe_intf_list); 2075 devm_kfree(gbe_intf->dev, gbe_intf->slave); 2076 devm_kfree(gbe_intf->dev, gbe_intf); 2077 return 0; 2078 } 2079 2080 static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv) 2081 { 2082 struct gbe_priv *gbe_dev = inst_priv; 2083 2084 del_timer_sync(&gbe_dev->timer); 2085 cpsw_ale_stop(gbe_dev->ale); 2086 cpsw_ale_destroy(gbe_dev->ale); 2087 netcp_txpipe_close(&gbe_dev->tx_pipe); 2088 free_secondary_ports(gbe_dev); 2089 2090 if (!list_empty(&gbe_dev->gbe_intf_head)) 2091 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n"); 2092 2093 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats); 2094 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs); 2095 memset(gbe_dev, 0x00, sizeof(*gbe_dev)); 2096 devm_kfree(gbe_dev->dev, gbe_dev); 2097 return 0; 2098 } 2099 2100 static struct netcp_module gbe_module = { 2101 .name = GBE_MODULE_NAME, 2102 .owner = THIS_MODULE, 2103 .primary = true, 2104 .probe = gbe_probe, 2105 .open = gbe_open, 2106 .close = gbe_close, 2107 .remove = gbe_remove, 2108 .attach = gbe_attach, 2109 .release = gbe_release, 2110 .add_addr = gbe_add_addr, 2111 .del_addr = gbe_del_addr, 2112 .add_vid = gbe_add_vid, 2113 .del_vid = gbe_del_vid, 2114 .ioctl = gbe_ioctl, 2115 }; 2116 2117 static struct netcp_module xgbe_module = { 2118 .name = XGBE_MODULE_NAME, 2119 .owner = THIS_MODULE, 2120 .primary = true, 2121 .probe = gbe_probe, 2122 .open = gbe_open, 2123 .close = gbe_close, 2124 .remove = gbe_remove, 2125 .attach = gbe_attach, 2126 .release = gbe_release, 2127 .add_addr = gbe_add_addr, 2128 .del_addr = gbe_del_addr, 2129 .add_vid = gbe_add_vid, 2130 .del_vid = gbe_del_vid, 2131 .ioctl = gbe_ioctl, 2132 }; 2133 2134 static int __init keystone_gbe_init(void) 2135 { 2136 int ret; 2137 2138 ret = netcp_register_module(&gbe_module); 2139 if (ret) 2140 return ret; 2141 2142 ret = netcp_register_module(&xgbe_module); 2143 if (ret) 2144 return ret; 2145 2146 return 0; 2147 } 2148 module_init(keystone_gbe_init); 2149 2150 static void __exit keystone_gbe_exit(void) 2151 { 2152 netcp_unregister_module(&gbe_module); 2153 netcp_unregister_module(&xgbe_module); 2154 } 2155 module_exit(keystone_gbe_exit); 2156 2157 MODULE_LICENSE("GPL v2"); 2158 MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs"); 2159 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com"); 2160