1 /* 2 * Keystone GBE and XGBE subsystem code 3 * 4 * Copyright (C) 2014 Texas Instruments Incorporated 5 * Authors: Sandeep Nair <sandeep_n@ti.com> 6 * Sandeep Paulraj <s-paulraj@ti.com> 7 * Cyril Chemparathy <cyril@ti.com> 8 * Santosh Shilimkar <santosh.shilimkar@ti.com> 9 * Wingman Kwok <w-kwok2@ti.com> 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License as 13 * published by the Free Software Foundation version 2. 14 * 15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 16 * kind, whether express or implied; without even the implied warranty 17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 */ 20 21 #include <linux/io.h> 22 #include <linux/module.h> 23 #include <linux/of_mdio.h> 24 #include <linux/of_address.h> 25 #include <linux/if_vlan.h> 26 #include <linux/ethtool.h> 27 28 #include "cpsw_ale.h" 29 #include "netcp.h" 30 31 #define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver" 32 #define NETCP_DRIVER_VERSION "v1.0" 33 34 #define GBE_IDENT(reg) ((reg >> 16) & 0xffff) 35 #define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7) 36 #define GBE_MINOR_VERSION(reg) (reg & 0xff) 37 #define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f) 38 39 /* 1G Ethernet SS defines */ 40 #define GBE_MODULE_NAME "netcp-gbe" 41 #define GBE_SS_VERSION_14 0x4ed21104 42 43 #define GBE_SS_REG_INDEX 0 44 #define GBE_SGMII34_REG_INDEX 1 45 #define GBE_SM_REG_INDEX 2 46 /* offset relative to base of GBE_SS_REG_INDEX */ 47 #define GBE13_SGMII_MODULE_OFFSET 0x100 48 /* offset relative to base of GBE_SM_REG_INDEX */ 49 #define GBE13_HOST_PORT_OFFSET 0x34 50 #define GBE13_SLAVE_PORT_OFFSET 0x60 51 #define GBE13_EMAC_OFFSET 0x100 52 #define GBE13_SLAVE_PORT2_OFFSET 0x200 53 #define GBE13_HW_STATS_OFFSET 0x300 54 #define GBE13_ALE_OFFSET 0x600 55 #define GBE13_HOST_PORT_NUM 0 56 #define GBE13_NUM_ALE_ENTRIES 1024 57 58 /* 1G Ethernet NU SS defines */ 59 #define GBENU_MODULE_NAME "netcp-gbenu" 60 #define GBE_SS_ID_NU 0x4ee6 61 #define GBE_SS_ID_2U 0x4ee8 62 63 #define IS_SS_ID_MU(d) \ 64 ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \ 65 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)) 66 67 #define IS_SS_ID_NU(d) \ 68 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) 69 70 #define GBENU_SS_REG_INDEX 0 71 #define GBENU_SM_REG_INDEX 1 72 #define GBENU_SGMII_MODULE_OFFSET 0x100 73 #define GBENU_HOST_PORT_OFFSET 0x1000 74 #define GBENU_SLAVE_PORT_OFFSET 0x2000 75 #define GBENU_EMAC_OFFSET 0x2330 76 #define GBENU_HW_STATS_OFFSET 0x1a000 77 #define GBENU_ALE_OFFSET 0x1e000 78 #define GBENU_HOST_PORT_NUM 0 79 #define GBENU_NUM_ALE_ENTRIES 1024 80 81 /* 10G Ethernet SS defines */ 82 #define XGBE_MODULE_NAME "netcp-xgbe" 83 #define XGBE_SS_VERSION_10 0x4ee42100 84 85 #define XGBE_SS_REG_INDEX 0 86 #define XGBE_SM_REG_INDEX 1 87 #define XGBE_SERDES_REG_INDEX 2 88 89 /* offset relative to base of XGBE_SS_REG_INDEX */ 90 #define XGBE10_SGMII_MODULE_OFFSET 0x100 91 /* offset relative to base of XGBE_SM_REG_INDEX */ 92 #define XGBE10_HOST_PORT_OFFSET 0x34 93 #define XGBE10_SLAVE_PORT_OFFSET 0x64 94 #define XGBE10_EMAC_OFFSET 0x400 95 #define XGBE10_ALE_OFFSET 0x700 96 #define XGBE10_HW_STATS_OFFSET 0x800 97 #define XGBE10_HOST_PORT_NUM 0 98 #define XGBE10_NUM_ALE_ENTRIES 1024 99 100 #define GBE_TIMER_INTERVAL (HZ / 2) 101 102 /* Soft reset register values */ 103 #define SOFT_RESET_MASK BIT(0) 104 #define SOFT_RESET BIT(0) 105 #define DEVICE_EMACSL_RESET_POLL_COUNT 100 106 #define GMACSL_RET_WARN_RESET_INCOMPLETE -2 107 108 #define MACSL_RX_ENABLE_CSF BIT(23) 109 #define MACSL_ENABLE_EXT_CTL BIT(18) 110 #define MACSL_XGMII_ENABLE BIT(13) 111 #define MACSL_XGIG_MODE BIT(8) 112 #define MACSL_GIG_MODE BIT(7) 113 #define MACSL_GMII_ENABLE BIT(5) 114 #define MACSL_FULLDUPLEX BIT(0) 115 116 #define GBE_CTL_P0_ENABLE BIT(2) 117 #define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff 118 #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf 119 #define GBE_STATS_CD_SEL BIT(28) 120 121 #define GBE_PORT_MASK(x) (BIT(x) - 1) 122 #define GBE_MASK_NO_PORTS 0 123 124 #define GBE_DEF_1G_MAC_CONTROL \ 125 (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \ 126 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF) 127 128 #define GBE_DEF_10G_MAC_CONTROL \ 129 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \ 130 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF) 131 132 #define GBE_STATSA_MODULE 0 133 #define GBE_STATSB_MODULE 1 134 #define GBE_STATSC_MODULE 2 135 #define GBE_STATSD_MODULE 3 136 137 #define GBENU_STATS0_MODULE 0 138 #define GBENU_STATS1_MODULE 1 139 #define GBENU_STATS2_MODULE 2 140 #define GBENU_STATS3_MODULE 3 141 #define GBENU_STATS4_MODULE 4 142 #define GBENU_STATS5_MODULE 5 143 #define GBENU_STATS6_MODULE 6 144 #define GBENU_STATS7_MODULE 7 145 #define GBENU_STATS8_MODULE 8 146 147 #define XGBE_STATS0_MODULE 0 148 #define XGBE_STATS1_MODULE 1 149 #define XGBE_STATS2_MODULE 2 150 151 /* s: 0-based slave_port */ 152 #define SGMII_BASE(s) \ 153 (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) 154 155 #define GBE_TX_QUEUE 648 156 #define GBE_TXHOOK_ORDER 0 157 #define GBE_DEFAULT_ALE_AGEOUT 30 158 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY) 159 #define NETCP_LINK_STATE_INVALID -1 160 161 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ 162 offsetof(struct gbe##_##rb, rn) 163 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ 164 offsetof(struct gbenu##_##rb, rn) 165 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ 166 offsetof(struct xgbe##_##rb, rn) 167 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn) 168 169 #define HOST_TX_PRI_MAP_DEFAULT 0x00000000 170 171 struct xgbe_ss_regs { 172 u32 id_ver; 173 u32 synce_count; 174 u32 synce_mux; 175 u32 control; 176 }; 177 178 struct xgbe_switch_regs { 179 u32 id_ver; 180 u32 control; 181 u32 emcontrol; 182 u32 stat_port_en; 183 u32 ptype; 184 u32 soft_idle; 185 u32 thru_rate; 186 u32 gap_thresh; 187 u32 tx_start_wds; 188 u32 flow_control; 189 u32 cppi_thresh; 190 }; 191 192 struct xgbe_port_regs { 193 u32 blk_cnt; 194 u32 port_vlan; 195 u32 tx_pri_map; 196 u32 sa_lo; 197 u32 sa_hi; 198 u32 ts_ctl; 199 u32 ts_seq_ltype; 200 u32 ts_vlan; 201 u32 ts_ctl_ltype2; 202 u32 ts_ctl2; 203 u32 control; 204 }; 205 206 struct xgbe_host_port_regs { 207 u32 blk_cnt; 208 u32 port_vlan; 209 u32 tx_pri_map; 210 u32 src_id; 211 u32 rx_pri_map; 212 u32 rx_maxlen; 213 }; 214 215 struct xgbe_emac_regs { 216 u32 id_ver; 217 u32 mac_control; 218 u32 mac_status; 219 u32 soft_reset; 220 u32 rx_maxlen; 221 u32 __reserved_0; 222 u32 rx_pause; 223 u32 tx_pause; 224 u32 em_control; 225 u32 __reserved_1; 226 u32 tx_gap; 227 u32 rsvd[4]; 228 }; 229 230 struct xgbe_host_hw_stats { 231 u32 rx_good_frames; 232 u32 rx_broadcast_frames; 233 u32 rx_multicast_frames; 234 u32 __rsvd_0[3]; 235 u32 rx_oversized_frames; 236 u32 __rsvd_1; 237 u32 rx_undersized_frames; 238 u32 __rsvd_2; 239 u32 overrun_type4; 240 u32 overrun_type5; 241 u32 rx_bytes; 242 u32 tx_good_frames; 243 u32 tx_broadcast_frames; 244 u32 tx_multicast_frames; 245 u32 __rsvd_3[9]; 246 u32 tx_bytes; 247 u32 tx_64byte_frames; 248 u32 tx_65_to_127byte_frames; 249 u32 tx_128_to_255byte_frames; 250 u32 tx_256_to_511byte_frames; 251 u32 tx_512_to_1023byte_frames; 252 u32 tx_1024byte_frames; 253 u32 net_bytes; 254 u32 rx_sof_overruns; 255 u32 rx_mof_overruns; 256 u32 rx_dma_overruns; 257 }; 258 259 struct xgbe_hw_stats { 260 u32 rx_good_frames; 261 u32 rx_broadcast_frames; 262 u32 rx_multicast_frames; 263 u32 rx_pause_frames; 264 u32 rx_crc_errors; 265 u32 rx_align_code_errors; 266 u32 rx_oversized_frames; 267 u32 rx_jabber_frames; 268 u32 rx_undersized_frames; 269 u32 rx_fragments; 270 u32 overrun_type4; 271 u32 overrun_type5; 272 u32 rx_bytes; 273 u32 tx_good_frames; 274 u32 tx_broadcast_frames; 275 u32 tx_multicast_frames; 276 u32 tx_pause_frames; 277 u32 tx_deferred_frames; 278 u32 tx_collision_frames; 279 u32 tx_single_coll_frames; 280 u32 tx_mult_coll_frames; 281 u32 tx_excessive_collisions; 282 u32 tx_late_collisions; 283 u32 tx_underrun; 284 u32 tx_carrier_sense_errors; 285 u32 tx_bytes; 286 u32 tx_64byte_frames; 287 u32 tx_65_to_127byte_frames; 288 u32 tx_128_to_255byte_frames; 289 u32 tx_256_to_511byte_frames; 290 u32 tx_512_to_1023byte_frames; 291 u32 tx_1024byte_frames; 292 u32 net_bytes; 293 u32 rx_sof_overruns; 294 u32 rx_mof_overruns; 295 u32 rx_dma_overruns; 296 }; 297 298 struct gbenu_ss_regs { 299 u32 id_ver; 300 u32 synce_count; /* NU */ 301 u32 synce_mux; /* NU */ 302 u32 control; /* 2U */ 303 u32 __rsvd_0[2]; /* 2U */ 304 u32 rgmii_status; /* 2U */ 305 u32 ss_status; /* 2U */ 306 }; 307 308 struct gbenu_switch_regs { 309 u32 id_ver; 310 u32 control; 311 u32 __rsvd_0[2]; 312 u32 emcontrol; 313 u32 stat_port_en; 314 u32 ptype; /* NU */ 315 u32 soft_idle; 316 u32 thru_rate; /* NU */ 317 u32 gap_thresh; /* NU */ 318 u32 tx_start_wds; /* NU */ 319 u32 eee_prescale; /* 2U */ 320 u32 tx_g_oflow_thresh_set; /* NU */ 321 u32 tx_g_oflow_thresh_clr; /* NU */ 322 u32 tx_g_buf_thresh_set_l; /* NU */ 323 u32 tx_g_buf_thresh_set_h; /* NU */ 324 u32 tx_g_buf_thresh_clr_l; /* NU */ 325 u32 tx_g_buf_thresh_clr_h; /* NU */ 326 }; 327 328 struct gbenu_port_regs { 329 u32 __rsvd_0; 330 u32 control; 331 u32 max_blks; /* 2U */ 332 u32 mem_align1; 333 u32 blk_cnt; 334 u32 port_vlan; 335 u32 tx_pri_map; /* NU */ 336 u32 pri_ctl; /* 2U */ 337 u32 rx_pri_map; 338 u32 rx_maxlen; 339 u32 tx_blks_pri; /* NU */ 340 u32 __rsvd_1; 341 u32 idle2lpi; /* 2U */ 342 u32 lpi2idle; /* 2U */ 343 u32 eee_status; /* 2U */ 344 u32 __rsvd_2; 345 u32 __rsvd_3[176]; /* NU: more to add */ 346 u32 __rsvd_4[2]; 347 u32 sa_lo; 348 u32 sa_hi; 349 u32 ts_ctl; 350 u32 ts_seq_ltype; 351 u32 ts_vlan; 352 u32 ts_ctl_ltype2; 353 u32 ts_ctl2; 354 }; 355 356 struct gbenu_host_port_regs { 357 u32 __rsvd_0; 358 u32 control; 359 u32 flow_id_offset; /* 2U */ 360 u32 __rsvd_1; 361 u32 blk_cnt; 362 u32 port_vlan; 363 u32 tx_pri_map; /* NU */ 364 u32 pri_ctl; 365 u32 rx_pri_map; 366 u32 rx_maxlen; 367 u32 tx_blks_pri; /* NU */ 368 u32 __rsvd_2; 369 u32 idle2lpi; /* 2U */ 370 u32 lpi2wake; /* 2U */ 371 u32 eee_status; /* 2U */ 372 u32 __rsvd_3; 373 u32 __rsvd_4[184]; /* NU */ 374 u32 host_blks_pri; /* NU */ 375 }; 376 377 struct gbenu_emac_regs { 378 u32 mac_control; 379 u32 mac_status; 380 u32 soft_reset; 381 u32 boff_test; 382 u32 rx_pause; 383 u32 __rsvd_0[11]; /* NU */ 384 u32 tx_pause; 385 u32 __rsvd_1[11]; /* NU */ 386 u32 em_control; 387 u32 tx_gap; 388 }; 389 390 /* Some hw stat regs are applicable to slave port only. 391 * This is handled by gbenu_et_stats struct. Also some 392 * are for SS version NU and some are for 2U. 393 */ 394 struct gbenu_hw_stats { 395 u32 rx_good_frames; 396 u32 rx_broadcast_frames; 397 u32 rx_multicast_frames; 398 u32 rx_pause_frames; /* slave */ 399 u32 rx_crc_errors; 400 u32 rx_align_code_errors; /* slave */ 401 u32 rx_oversized_frames; 402 u32 rx_jabber_frames; /* slave */ 403 u32 rx_undersized_frames; 404 u32 rx_fragments; /* slave */ 405 u32 ale_drop; 406 u32 ale_overrun_drop; 407 u32 rx_bytes; 408 u32 tx_good_frames; 409 u32 tx_broadcast_frames; 410 u32 tx_multicast_frames; 411 u32 tx_pause_frames; /* slave */ 412 u32 tx_deferred_frames; /* slave */ 413 u32 tx_collision_frames; /* slave */ 414 u32 tx_single_coll_frames; /* slave */ 415 u32 tx_mult_coll_frames; /* slave */ 416 u32 tx_excessive_collisions; /* slave */ 417 u32 tx_late_collisions; /* slave */ 418 u32 rx_ipg_error; /* slave 10G only */ 419 u32 tx_carrier_sense_errors; /* slave */ 420 u32 tx_bytes; 421 u32 tx_64B_frames; 422 u32 tx_65_to_127B_frames; 423 u32 tx_128_to_255B_frames; 424 u32 tx_256_to_511B_frames; 425 u32 tx_512_to_1023B_frames; 426 u32 tx_1024B_frames; 427 u32 net_bytes; 428 u32 rx_bottom_fifo_drop; 429 u32 rx_port_mask_drop; 430 u32 rx_top_fifo_drop; 431 u32 ale_rate_limit_drop; 432 u32 ale_vid_ingress_drop; 433 u32 ale_da_eq_sa_drop; 434 u32 __rsvd_0[3]; 435 u32 ale_unknown_ucast; 436 u32 ale_unknown_ucast_bytes; 437 u32 ale_unknown_mcast; 438 u32 ale_unknown_mcast_bytes; 439 u32 ale_unknown_bcast; 440 u32 ale_unknown_bcast_bytes; 441 u32 ale_pol_match; 442 u32 ale_pol_match_red; /* NU */ 443 u32 ale_pol_match_yellow; /* NU */ 444 u32 __rsvd_1[44]; 445 u32 tx_mem_protect_err; 446 /* following NU only */ 447 u32 tx_pri0; 448 u32 tx_pri1; 449 u32 tx_pri2; 450 u32 tx_pri3; 451 u32 tx_pri4; 452 u32 tx_pri5; 453 u32 tx_pri6; 454 u32 tx_pri7; 455 u32 tx_pri0_bcnt; 456 u32 tx_pri1_bcnt; 457 u32 tx_pri2_bcnt; 458 u32 tx_pri3_bcnt; 459 u32 tx_pri4_bcnt; 460 u32 tx_pri5_bcnt; 461 u32 tx_pri6_bcnt; 462 u32 tx_pri7_bcnt; 463 u32 tx_pri0_drop; 464 u32 tx_pri1_drop; 465 u32 tx_pri2_drop; 466 u32 tx_pri3_drop; 467 u32 tx_pri4_drop; 468 u32 tx_pri5_drop; 469 u32 tx_pri6_drop; 470 u32 tx_pri7_drop; 471 u32 tx_pri0_drop_bcnt; 472 u32 tx_pri1_drop_bcnt; 473 u32 tx_pri2_drop_bcnt; 474 u32 tx_pri3_drop_bcnt; 475 u32 tx_pri4_drop_bcnt; 476 u32 tx_pri5_drop_bcnt; 477 u32 tx_pri6_drop_bcnt; 478 u32 tx_pri7_drop_bcnt; 479 }; 480 481 #define GBENU_HW_STATS_REG_MAP_SZ 0x200 482 483 struct gbe_ss_regs { 484 u32 id_ver; 485 u32 synce_count; 486 u32 synce_mux; 487 }; 488 489 struct gbe_ss_regs_ofs { 490 u16 id_ver; 491 u16 control; 492 }; 493 494 struct gbe_switch_regs { 495 u32 id_ver; 496 u32 control; 497 u32 soft_reset; 498 u32 stat_port_en; 499 u32 ptype; 500 u32 soft_idle; 501 u32 thru_rate; 502 u32 gap_thresh; 503 u32 tx_start_wds; 504 u32 flow_control; 505 }; 506 507 struct gbe_switch_regs_ofs { 508 u16 id_ver; 509 u16 control; 510 u16 soft_reset; 511 u16 emcontrol; 512 u16 stat_port_en; 513 u16 ptype; 514 u16 flow_control; 515 }; 516 517 struct gbe_port_regs { 518 u32 max_blks; 519 u32 blk_cnt; 520 u32 port_vlan; 521 u32 tx_pri_map; 522 u32 sa_lo; 523 u32 sa_hi; 524 u32 ts_ctl; 525 u32 ts_seq_ltype; 526 u32 ts_vlan; 527 u32 ts_ctl_ltype2; 528 u32 ts_ctl2; 529 }; 530 531 struct gbe_port_regs_ofs { 532 u16 port_vlan; 533 u16 tx_pri_map; 534 u16 sa_lo; 535 u16 sa_hi; 536 u16 ts_ctl; 537 u16 ts_seq_ltype; 538 u16 ts_vlan; 539 u16 ts_ctl_ltype2; 540 u16 ts_ctl2; 541 u16 rx_maxlen; /* 2U, NU */ 542 }; 543 544 struct gbe_host_port_regs { 545 u32 src_id; 546 u32 port_vlan; 547 u32 rx_pri_map; 548 u32 rx_maxlen; 549 }; 550 551 struct gbe_host_port_regs_ofs { 552 u16 port_vlan; 553 u16 tx_pri_map; 554 u16 rx_maxlen; 555 }; 556 557 struct gbe_emac_regs { 558 u32 id_ver; 559 u32 mac_control; 560 u32 mac_status; 561 u32 soft_reset; 562 u32 rx_maxlen; 563 u32 __reserved_0; 564 u32 rx_pause; 565 u32 tx_pause; 566 u32 __reserved_1; 567 u32 rx_pri_map; 568 u32 rsvd[6]; 569 }; 570 571 struct gbe_emac_regs_ofs { 572 u16 mac_control; 573 u16 soft_reset; 574 u16 rx_maxlen; 575 }; 576 577 struct gbe_hw_stats { 578 u32 rx_good_frames; 579 u32 rx_broadcast_frames; 580 u32 rx_multicast_frames; 581 u32 rx_pause_frames; 582 u32 rx_crc_errors; 583 u32 rx_align_code_errors; 584 u32 rx_oversized_frames; 585 u32 rx_jabber_frames; 586 u32 rx_undersized_frames; 587 u32 rx_fragments; 588 u32 __pad_0[2]; 589 u32 rx_bytes; 590 u32 tx_good_frames; 591 u32 tx_broadcast_frames; 592 u32 tx_multicast_frames; 593 u32 tx_pause_frames; 594 u32 tx_deferred_frames; 595 u32 tx_collision_frames; 596 u32 tx_single_coll_frames; 597 u32 tx_mult_coll_frames; 598 u32 tx_excessive_collisions; 599 u32 tx_late_collisions; 600 u32 tx_underrun; 601 u32 tx_carrier_sense_errors; 602 u32 tx_bytes; 603 u32 tx_64byte_frames; 604 u32 tx_65_to_127byte_frames; 605 u32 tx_128_to_255byte_frames; 606 u32 tx_256_to_511byte_frames; 607 u32 tx_512_to_1023byte_frames; 608 u32 tx_1024byte_frames; 609 u32 net_bytes; 610 u32 rx_sof_overruns; 611 u32 rx_mof_overruns; 612 u32 rx_dma_overruns; 613 }; 614 615 #define GBE_MAX_HW_STAT_MODS 9 616 #define GBE_HW_STATS_REG_MAP_SZ 0x100 617 618 struct gbe_slave { 619 void __iomem *port_regs; 620 void __iomem *emac_regs; 621 struct gbe_port_regs_ofs port_regs_ofs; 622 struct gbe_emac_regs_ofs emac_regs_ofs; 623 int slave_num; /* 0 based logical number */ 624 int port_num; /* actual port number */ 625 atomic_t link_state; 626 bool open; 627 struct phy_device *phy; 628 u32 link_interface; 629 u32 mac_control; 630 u8 phy_port_t; 631 struct device_node *phy_node; 632 struct list_head slave_list; 633 }; 634 635 struct gbe_priv { 636 struct device *dev; 637 struct netcp_device *netcp_device; 638 struct timer_list timer; 639 u32 num_slaves; 640 u32 ale_entries; 641 u32 ale_ports; 642 bool enable_ale; 643 u8 max_num_slaves; 644 u8 max_num_ports; /* max_num_slaves + 1 */ 645 u8 num_stats_mods; 646 struct netcp_tx_pipe tx_pipe; 647 648 int host_port; 649 u32 rx_packet_max; 650 u32 ss_version; 651 u32 stats_en_mask; 652 653 void __iomem *ss_regs; 654 void __iomem *switch_regs; 655 void __iomem *host_port_regs; 656 void __iomem *ale_reg; 657 void __iomem *sgmii_port_regs; 658 void __iomem *sgmii_port34_regs; 659 void __iomem *xgbe_serdes_regs; 660 void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS]; 661 662 struct gbe_ss_regs_ofs ss_regs_ofs; 663 struct gbe_switch_regs_ofs switch_regs_ofs; 664 struct gbe_host_port_regs_ofs host_port_regs_ofs; 665 666 struct cpsw_ale *ale; 667 unsigned int tx_queue_id; 668 const char *dma_chan_name; 669 670 struct list_head gbe_intf_head; 671 struct list_head secondary_slaves; 672 struct net_device *dummy_ndev; 673 674 u64 *hw_stats; 675 u32 *hw_stats_prev; 676 const struct netcp_ethtool_stat *et_stats; 677 int num_et_stats; 678 /* Lock for updating the hwstats */ 679 spinlock_t hw_stats_lock; 680 }; 681 682 struct gbe_intf { 683 struct net_device *ndev; 684 struct device *dev; 685 struct gbe_priv *gbe_dev; 686 struct netcp_tx_pipe tx_pipe; 687 struct gbe_slave *slave; 688 struct list_head gbe_intf_list; 689 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 690 }; 691 692 static struct netcp_module gbe_module; 693 static struct netcp_module xgbe_module; 694 695 /* Statistic management */ 696 struct netcp_ethtool_stat { 697 char desc[ETH_GSTRING_LEN]; 698 int type; 699 u32 size; 700 int offset; 701 }; 702 703 #define GBE_STATSA_INFO(field) \ 704 { \ 705 "GBE_A:"#field, GBE_STATSA_MODULE, \ 706 FIELD_SIZEOF(struct gbe_hw_stats, field), \ 707 offsetof(struct gbe_hw_stats, field) \ 708 } 709 710 #define GBE_STATSB_INFO(field) \ 711 { \ 712 "GBE_B:"#field, GBE_STATSB_MODULE, \ 713 FIELD_SIZEOF(struct gbe_hw_stats, field), \ 714 offsetof(struct gbe_hw_stats, field) \ 715 } 716 717 #define GBE_STATSC_INFO(field) \ 718 { \ 719 "GBE_C:"#field, GBE_STATSC_MODULE, \ 720 FIELD_SIZEOF(struct gbe_hw_stats, field), \ 721 offsetof(struct gbe_hw_stats, field) \ 722 } 723 724 #define GBE_STATSD_INFO(field) \ 725 { \ 726 "GBE_D:"#field, GBE_STATSD_MODULE, \ 727 FIELD_SIZEOF(struct gbe_hw_stats, field), \ 728 offsetof(struct gbe_hw_stats, field) \ 729 } 730 731 static const struct netcp_ethtool_stat gbe13_et_stats[] = { 732 /* GBE module A */ 733 GBE_STATSA_INFO(rx_good_frames), 734 GBE_STATSA_INFO(rx_broadcast_frames), 735 GBE_STATSA_INFO(rx_multicast_frames), 736 GBE_STATSA_INFO(rx_pause_frames), 737 GBE_STATSA_INFO(rx_crc_errors), 738 GBE_STATSA_INFO(rx_align_code_errors), 739 GBE_STATSA_INFO(rx_oversized_frames), 740 GBE_STATSA_INFO(rx_jabber_frames), 741 GBE_STATSA_INFO(rx_undersized_frames), 742 GBE_STATSA_INFO(rx_fragments), 743 GBE_STATSA_INFO(rx_bytes), 744 GBE_STATSA_INFO(tx_good_frames), 745 GBE_STATSA_INFO(tx_broadcast_frames), 746 GBE_STATSA_INFO(tx_multicast_frames), 747 GBE_STATSA_INFO(tx_pause_frames), 748 GBE_STATSA_INFO(tx_deferred_frames), 749 GBE_STATSA_INFO(tx_collision_frames), 750 GBE_STATSA_INFO(tx_single_coll_frames), 751 GBE_STATSA_INFO(tx_mult_coll_frames), 752 GBE_STATSA_INFO(tx_excessive_collisions), 753 GBE_STATSA_INFO(tx_late_collisions), 754 GBE_STATSA_INFO(tx_underrun), 755 GBE_STATSA_INFO(tx_carrier_sense_errors), 756 GBE_STATSA_INFO(tx_bytes), 757 GBE_STATSA_INFO(tx_64byte_frames), 758 GBE_STATSA_INFO(tx_65_to_127byte_frames), 759 GBE_STATSA_INFO(tx_128_to_255byte_frames), 760 GBE_STATSA_INFO(tx_256_to_511byte_frames), 761 GBE_STATSA_INFO(tx_512_to_1023byte_frames), 762 GBE_STATSA_INFO(tx_1024byte_frames), 763 GBE_STATSA_INFO(net_bytes), 764 GBE_STATSA_INFO(rx_sof_overruns), 765 GBE_STATSA_INFO(rx_mof_overruns), 766 GBE_STATSA_INFO(rx_dma_overruns), 767 /* GBE module B */ 768 GBE_STATSB_INFO(rx_good_frames), 769 GBE_STATSB_INFO(rx_broadcast_frames), 770 GBE_STATSB_INFO(rx_multicast_frames), 771 GBE_STATSB_INFO(rx_pause_frames), 772 GBE_STATSB_INFO(rx_crc_errors), 773 GBE_STATSB_INFO(rx_align_code_errors), 774 GBE_STATSB_INFO(rx_oversized_frames), 775 GBE_STATSB_INFO(rx_jabber_frames), 776 GBE_STATSB_INFO(rx_undersized_frames), 777 GBE_STATSB_INFO(rx_fragments), 778 GBE_STATSB_INFO(rx_bytes), 779 GBE_STATSB_INFO(tx_good_frames), 780 GBE_STATSB_INFO(tx_broadcast_frames), 781 GBE_STATSB_INFO(tx_multicast_frames), 782 GBE_STATSB_INFO(tx_pause_frames), 783 GBE_STATSB_INFO(tx_deferred_frames), 784 GBE_STATSB_INFO(tx_collision_frames), 785 GBE_STATSB_INFO(tx_single_coll_frames), 786 GBE_STATSB_INFO(tx_mult_coll_frames), 787 GBE_STATSB_INFO(tx_excessive_collisions), 788 GBE_STATSB_INFO(tx_late_collisions), 789 GBE_STATSB_INFO(tx_underrun), 790 GBE_STATSB_INFO(tx_carrier_sense_errors), 791 GBE_STATSB_INFO(tx_bytes), 792 GBE_STATSB_INFO(tx_64byte_frames), 793 GBE_STATSB_INFO(tx_65_to_127byte_frames), 794 GBE_STATSB_INFO(tx_128_to_255byte_frames), 795 GBE_STATSB_INFO(tx_256_to_511byte_frames), 796 GBE_STATSB_INFO(tx_512_to_1023byte_frames), 797 GBE_STATSB_INFO(tx_1024byte_frames), 798 GBE_STATSB_INFO(net_bytes), 799 GBE_STATSB_INFO(rx_sof_overruns), 800 GBE_STATSB_INFO(rx_mof_overruns), 801 GBE_STATSB_INFO(rx_dma_overruns), 802 /* GBE module C */ 803 GBE_STATSC_INFO(rx_good_frames), 804 GBE_STATSC_INFO(rx_broadcast_frames), 805 GBE_STATSC_INFO(rx_multicast_frames), 806 GBE_STATSC_INFO(rx_pause_frames), 807 GBE_STATSC_INFO(rx_crc_errors), 808 GBE_STATSC_INFO(rx_align_code_errors), 809 GBE_STATSC_INFO(rx_oversized_frames), 810 GBE_STATSC_INFO(rx_jabber_frames), 811 GBE_STATSC_INFO(rx_undersized_frames), 812 GBE_STATSC_INFO(rx_fragments), 813 GBE_STATSC_INFO(rx_bytes), 814 GBE_STATSC_INFO(tx_good_frames), 815 GBE_STATSC_INFO(tx_broadcast_frames), 816 GBE_STATSC_INFO(tx_multicast_frames), 817 GBE_STATSC_INFO(tx_pause_frames), 818 GBE_STATSC_INFO(tx_deferred_frames), 819 GBE_STATSC_INFO(tx_collision_frames), 820 GBE_STATSC_INFO(tx_single_coll_frames), 821 GBE_STATSC_INFO(tx_mult_coll_frames), 822 GBE_STATSC_INFO(tx_excessive_collisions), 823 GBE_STATSC_INFO(tx_late_collisions), 824 GBE_STATSC_INFO(tx_underrun), 825 GBE_STATSC_INFO(tx_carrier_sense_errors), 826 GBE_STATSC_INFO(tx_bytes), 827 GBE_STATSC_INFO(tx_64byte_frames), 828 GBE_STATSC_INFO(tx_65_to_127byte_frames), 829 GBE_STATSC_INFO(tx_128_to_255byte_frames), 830 GBE_STATSC_INFO(tx_256_to_511byte_frames), 831 GBE_STATSC_INFO(tx_512_to_1023byte_frames), 832 GBE_STATSC_INFO(tx_1024byte_frames), 833 GBE_STATSC_INFO(net_bytes), 834 GBE_STATSC_INFO(rx_sof_overruns), 835 GBE_STATSC_INFO(rx_mof_overruns), 836 GBE_STATSC_INFO(rx_dma_overruns), 837 /* GBE module D */ 838 GBE_STATSD_INFO(rx_good_frames), 839 GBE_STATSD_INFO(rx_broadcast_frames), 840 GBE_STATSD_INFO(rx_multicast_frames), 841 GBE_STATSD_INFO(rx_pause_frames), 842 GBE_STATSD_INFO(rx_crc_errors), 843 GBE_STATSD_INFO(rx_align_code_errors), 844 GBE_STATSD_INFO(rx_oversized_frames), 845 GBE_STATSD_INFO(rx_jabber_frames), 846 GBE_STATSD_INFO(rx_undersized_frames), 847 GBE_STATSD_INFO(rx_fragments), 848 GBE_STATSD_INFO(rx_bytes), 849 GBE_STATSD_INFO(tx_good_frames), 850 GBE_STATSD_INFO(tx_broadcast_frames), 851 GBE_STATSD_INFO(tx_multicast_frames), 852 GBE_STATSD_INFO(tx_pause_frames), 853 GBE_STATSD_INFO(tx_deferred_frames), 854 GBE_STATSD_INFO(tx_collision_frames), 855 GBE_STATSD_INFO(tx_single_coll_frames), 856 GBE_STATSD_INFO(tx_mult_coll_frames), 857 GBE_STATSD_INFO(tx_excessive_collisions), 858 GBE_STATSD_INFO(tx_late_collisions), 859 GBE_STATSD_INFO(tx_underrun), 860 GBE_STATSD_INFO(tx_carrier_sense_errors), 861 GBE_STATSD_INFO(tx_bytes), 862 GBE_STATSD_INFO(tx_64byte_frames), 863 GBE_STATSD_INFO(tx_65_to_127byte_frames), 864 GBE_STATSD_INFO(tx_128_to_255byte_frames), 865 GBE_STATSD_INFO(tx_256_to_511byte_frames), 866 GBE_STATSD_INFO(tx_512_to_1023byte_frames), 867 GBE_STATSD_INFO(tx_1024byte_frames), 868 GBE_STATSD_INFO(net_bytes), 869 GBE_STATSD_INFO(rx_sof_overruns), 870 GBE_STATSD_INFO(rx_mof_overruns), 871 GBE_STATSD_INFO(rx_dma_overruns), 872 }; 873 874 /* This is the size of entries in GBENU_STATS_HOST */ 875 #define GBENU_ET_STATS_HOST_SIZE 52 876 877 #define GBENU_STATS_HOST(field) \ 878 { \ 879 "GBE_HOST:"#field, GBENU_STATS0_MODULE, \ 880 FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 881 offsetof(struct gbenu_hw_stats, field) \ 882 } 883 884 /* This is the size of entries in GBENU_STATS_PORT */ 885 #define GBENU_ET_STATS_PORT_SIZE 65 886 887 #define GBENU_STATS_P1(field) \ 888 { \ 889 "GBE_P1:"#field, GBENU_STATS1_MODULE, \ 890 FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 891 offsetof(struct gbenu_hw_stats, field) \ 892 } 893 894 #define GBENU_STATS_P2(field) \ 895 { \ 896 "GBE_P2:"#field, GBENU_STATS2_MODULE, \ 897 FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 898 offsetof(struct gbenu_hw_stats, field) \ 899 } 900 901 #define GBENU_STATS_P3(field) \ 902 { \ 903 "GBE_P3:"#field, GBENU_STATS3_MODULE, \ 904 FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 905 offsetof(struct gbenu_hw_stats, field) \ 906 } 907 908 #define GBENU_STATS_P4(field) \ 909 { \ 910 "GBE_P4:"#field, GBENU_STATS4_MODULE, \ 911 FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 912 offsetof(struct gbenu_hw_stats, field) \ 913 } 914 915 #define GBENU_STATS_P5(field) \ 916 { \ 917 "GBE_P5:"#field, GBENU_STATS5_MODULE, \ 918 FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 919 offsetof(struct gbenu_hw_stats, field) \ 920 } 921 922 #define GBENU_STATS_P6(field) \ 923 { \ 924 "GBE_P6:"#field, GBENU_STATS6_MODULE, \ 925 FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 926 offsetof(struct gbenu_hw_stats, field) \ 927 } 928 929 #define GBENU_STATS_P7(field) \ 930 { \ 931 "GBE_P7:"#field, GBENU_STATS7_MODULE, \ 932 FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 933 offsetof(struct gbenu_hw_stats, field) \ 934 } 935 936 #define GBENU_STATS_P8(field) \ 937 { \ 938 "GBE_P8:"#field, GBENU_STATS8_MODULE, \ 939 FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 940 offsetof(struct gbenu_hw_stats, field) \ 941 } 942 943 static const struct netcp_ethtool_stat gbenu_et_stats[] = { 944 /* GBENU Host Module */ 945 GBENU_STATS_HOST(rx_good_frames), 946 GBENU_STATS_HOST(rx_broadcast_frames), 947 GBENU_STATS_HOST(rx_multicast_frames), 948 GBENU_STATS_HOST(rx_crc_errors), 949 GBENU_STATS_HOST(rx_oversized_frames), 950 GBENU_STATS_HOST(rx_undersized_frames), 951 GBENU_STATS_HOST(ale_drop), 952 GBENU_STATS_HOST(ale_overrun_drop), 953 GBENU_STATS_HOST(rx_bytes), 954 GBENU_STATS_HOST(tx_good_frames), 955 GBENU_STATS_HOST(tx_broadcast_frames), 956 GBENU_STATS_HOST(tx_multicast_frames), 957 GBENU_STATS_HOST(tx_bytes), 958 GBENU_STATS_HOST(tx_64B_frames), 959 GBENU_STATS_HOST(tx_65_to_127B_frames), 960 GBENU_STATS_HOST(tx_128_to_255B_frames), 961 GBENU_STATS_HOST(tx_256_to_511B_frames), 962 GBENU_STATS_HOST(tx_512_to_1023B_frames), 963 GBENU_STATS_HOST(tx_1024B_frames), 964 GBENU_STATS_HOST(net_bytes), 965 GBENU_STATS_HOST(rx_bottom_fifo_drop), 966 GBENU_STATS_HOST(rx_port_mask_drop), 967 GBENU_STATS_HOST(rx_top_fifo_drop), 968 GBENU_STATS_HOST(ale_rate_limit_drop), 969 GBENU_STATS_HOST(ale_vid_ingress_drop), 970 GBENU_STATS_HOST(ale_da_eq_sa_drop), 971 GBENU_STATS_HOST(ale_unknown_ucast), 972 GBENU_STATS_HOST(ale_unknown_ucast_bytes), 973 GBENU_STATS_HOST(ale_unknown_mcast), 974 GBENU_STATS_HOST(ale_unknown_mcast_bytes), 975 GBENU_STATS_HOST(ale_unknown_bcast), 976 GBENU_STATS_HOST(ale_unknown_bcast_bytes), 977 GBENU_STATS_HOST(ale_pol_match), 978 GBENU_STATS_HOST(ale_pol_match_red), 979 GBENU_STATS_HOST(ale_pol_match_yellow), 980 GBENU_STATS_HOST(tx_mem_protect_err), 981 GBENU_STATS_HOST(tx_pri0_drop), 982 GBENU_STATS_HOST(tx_pri1_drop), 983 GBENU_STATS_HOST(tx_pri2_drop), 984 GBENU_STATS_HOST(tx_pri3_drop), 985 GBENU_STATS_HOST(tx_pri4_drop), 986 GBENU_STATS_HOST(tx_pri5_drop), 987 GBENU_STATS_HOST(tx_pri6_drop), 988 GBENU_STATS_HOST(tx_pri7_drop), 989 GBENU_STATS_HOST(tx_pri0_drop_bcnt), 990 GBENU_STATS_HOST(tx_pri1_drop_bcnt), 991 GBENU_STATS_HOST(tx_pri2_drop_bcnt), 992 GBENU_STATS_HOST(tx_pri3_drop_bcnt), 993 GBENU_STATS_HOST(tx_pri4_drop_bcnt), 994 GBENU_STATS_HOST(tx_pri5_drop_bcnt), 995 GBENU_STATS_HOST(tx_pri6_drop_bcnt), 996 GBENU_STATS_HOST(tx_pri7_drop_bcnt), 997 /* GBENU Module 1 */ 998 GBENU_STATS_P1(rx_good_frames), 999 GBENU_STATS_P1(rx_broadcast_frames), 1000 GBENU_STATS_P1(rx_multicast_frames), 1001 GBENU_STATS_P1(rx_pause_frames), 1002 GBENU_STATS_P1(rx_crc_errors), 1003 GBENU_STATS_P1(rx_align_code_errors), 1004 GBENU_STATS_P1(rx_oversized_frames), 1005 GBENU_STATS_P1(rx_jabber_frames), 1006 GBENU_STATS_P1(rx_undersized_frames), 1007 GBENU_STATS_P1(rx_fragments), 1008 GBENU_STATS_P1(ale_drop), 1009 GBENU_STATS_P1(ale_overrun_drop), 1010 GBENU_STATS_P1(rx_bytes), 1011 GBENU_STATS_P1(tx_good_frames), 1012 GBENU_STATS_P1(tx_broadcast_frames), 1013 GBENU_STATS_P1(tx_multicast_frames), 1014 GBENU_STATS_P1(tx_pause_frames), 1015 GBENU_STATS_P1(tx_deferred_frames), 1016 GBENU_STATS_P1(tx_collision_frames), 1017 GBENU_STATS_P1(tx_single_coll_frames), 1018 GBENU_STATS_P1(tx_mult_coll_frames), 1019 GBENU_STATS_P1(tx_excessive_collisions), 1020 GBENU_STATS_P1(tx_late_collisions), 1021 GBENU_STATS_P1(rx_ipg_error), 1022 GBENU_STATS_P1(tx_carrier_sense_errors), 1023 GBENU_STATS_P1(tx_bytes), 1024 GBENU_STATS_P1(tx_64B_frames), 1025 GBENU_STATS_P1(tx_65_to_127B_frames), 1026 GBENU_STATS_P1(tx_128_to_255B_frames), 1027 GBENU_STATS_P1(tx_256_to_511B_frames), 1028 GBENU_STATS_P1(tx_512_to_1023B_frames), 1029 GBENU_STATS_P1(tx_1024B_frames), 1030 GBENU_STATS_P1(net_bytes), 1031 GBENU_STATS_P1(rx_bottom_fifo_drop), 1032 GBENU_STATS_P1(rx_port_mask_drop), 1033 GBENU_STATS_P1(rx_top_fifo_drop), 1034 GBENU_STATS_P1(ale_rate_limit_drop), 1035 GBENU_STATS_P1(ale_vid_ingress_drop), 1036 GBENU_STATS_P1(ale_da_eq_sa_drop), 1037 GBENU_STATS_P1(ale_unknown_ucast), 1038 GBENU_STATS_P1(ale_unknown_ucast_bytes), 1039 GBENU_STATS_P1(ale_unknown_mcast), 1040 GBENU_STATS_P1(ale_unknown_mcast_bytes), 1041 GBENU_STATS_P1(ale_unknown_bcast), 1042 GBENU_STATS_P1(ale_unknown_bcast_bytes), 1043 GBENU_STATS_P1(ale_pol_match), 1044 GBENU_STATS_P1(ale_pol_match_red), 1045 GBENU_STATS_P1(ale_pol_match_yellow), 1046 GBENU_STATS_P1(tx_mem_protect_err), 1047 GBENU_STATS_P1(tx_pri0_drop), 1048 GBENU_STATS_P1(tx_pri1_drop), 1049 GBENU_STATS_P1(tx_pri2_drop), 1050 GBENU_STATS_P1(tx_pri3_drop), 1051 GBENU_STATS_P1(tx_pri4_drop), 1052 GBENU_STATS_P1(tx_pri5_drop), 1053 GBENU_STATS_P1(tx_pri6_drop), 1054 GBENU_STATS_P1(tx_pri7_drop), 1055 GBENU_STATS_P1(tx_pri0_drop_bcnt), 1056 GBENU_STATS_P1(tx_pri1_drop_bcnt), 1057 GBENU_STATS_P1(tx_pri2_drop_bcnt), 1058 GBENU_STATS_P1(tx_pri3_drop_bcnt), 1059 GBENU_STATS_P1(tx_pri4_drop_bcnt), 1060 GBENU_STATS_P1(tx_pri5_drop_bcnt), 1061 GBENU_STATS_P1(tx_pri6_drop_bcnt), 1062 GBENU_STATS_P1(tx_pri7_drop_bcnt), 1063 /* GBENU Module 2 */ 1064 GBENU_STATS_P2(rx_good_frames), 1065 GBENU_STATS_P2(rx_broadcast_frames), 1066 GBENU_STATS_P2(rx_multicast_frames), 1067 GBENU_STATS_P2(rx_pause_frames), 1068 GBENU_STATS_P2(rx_crc_errors), 1069 GBENU_STATS_P2(rx_align_code_errors), 1070 GBENU_STATS_P2(rx_oversized_frames), 1071 GBENU_STATS_P2(rx_jabber_frames), 1072 GBENU_STATS_P2(rx_undersized_frames), 1073 GBENU_STATS_P2(rx_fragments), 1074 GBENU_STATS_P2(ale_drop), 1075 GBENU_STATS_P2(ale_overrun_drop), 1076 GBENU_STATS_P2(rx_bytes), 1077 GBENU_STATS_P2(tx_good_frames), 1078 GBENU_STATS_P2(tx_broadcast_frames), 1079 GBENU_STATS_P2(tx_multicast_frames), 1080 GBENU_STATS_P2(tx_pause_frames), 1081 GBENU_STATS_P2(tx_deferred_frames), 1082 GBENU_STATS_P2(tx_collision_frames), 1083 GBENU_STATS_P2(tx_single_coll_frames), 1084 GBENU_STATS_P2(tx_mult_coll_frames), 1085 GBENU_STATS_P2(tx_excessive_collisions), 1086 GBENU_STATS_P2(tx_late_collisions), 1087 GBENU_STATS_P2(rx_ipg_error), 1088 GBENU_STATS_P2(tx_carrier_sense_errors), 1089 GBENU_STATS_P2(tx_bytes), 1090 GBENU_STATS_P2(tx_64B_frames), 1091 GBENU_STATS_P2(tx_65_to_127B_frames), 1092 GBENU_STATS_P2(tx_128_to_255B_frames), 1093 GBENU_STATS_P2(tx_256_to_511B_frames), 1094 GBENU_STATS_P2(tx_512_to_1023B_frames), 1095 GBENU_STATS_P2(tx_1024B_frames), 1096 GBENU_STATS_P2(net_bytes), 1097 GBENU_STATS_P2(rx_bottom_fifo_drop), 1098 GBENU_STATS_P2(rx_port_mask_drop), 1099 GBENU_STATS_P2(rx_top_fifo_drop), 1100 GBENU_STATS_P2(ale_rate_limit_drop), 1101 GBENU_STATS_P2(ale_vid_ingress_drop), 1102 GBENU_STATS_P2(ale_da_eq_sa_drop), 1103 GBENU_STATS_P2(ale_unknown_ucast), 1104 GBENU_STATS_P2(ale_unknown_ucast_bytes), 1105 GBENU_STATS_P2(ale_unknown_mcast), 1106 GBENU_STATS_P2(ale_unknown_mcast_bytes), 1107 GBENU_STATS_P2(ale_unknown_bcast), 1108 GBENU_STATS_P2(ale_unknown_bcast_bytes), 1109 GBENU_STATS_P2(ale_pol_match), 1110 GBENU_STATS_P2(ale_pol_match_red), 1111 GBENU_STATS_P2(ale_pol_match_yellow), 1112 GBENU_STATS_P2(tx_mem_protect_err), 1113 GBENU_STATS_P2(tx_pri0_drop), 1114 GBENU_STATS_P2(tx_pri1_drop), 1115 GBENU_STATS_P2(tx_pri2_drop), 1116 GBENU_STATS_P2(tx_pri3_drop), 1117 GBENU_STATS_P2(tx_pri4_drop), 1118 GBENU_STATS_P2(tx_pri5_drop), 1119 GBENU_STATS_P2(tx_pri6_drop), 1120 GBENU_STATS_P2(tx_pri7_drop), 1121 GBENU_STATS_P2(tx_pri0_drop_bcnt), 1122 GBENU_STATS_P2(tx_pri1_drop_bcnt), 1123 GBENU_STATS_P2(tx_pri2_drop_bcnt), 1124 GBENU_STATS_P2(tx_pri3_drop_bcnt), 1125 GBENU_STATS_P2(tx_pri4_drop_bcnt), 1126 GBENU_STATS_P2(tx_pri5_drop_bcnt), 1127 GBENU_STATS_P2(tx_pri6_drop_bcnt), 1128 GBENU_STATS_P2(tx_pri7_drop_bcnt), 1129 /* GBENU Module 3 */ 1130 GBENU_STATS_P3(rx_good_frames), 1131 GBENU_STATS_P3(rx_broadcast_frames), 1132 GBENU_STATS_P3(rx_multicast_frames), 1133 GBENU_STATS_P3(rx_pause_frames), 1134 GBENU_STATS_P3(rx_crc_errors), 1135 GBENU_STATS_P3(rx_align_code_errors), 1136 GBENU_STATS_P3(rx_oversized_frames), 1137 GBENU_STATS_P3(rx_jabber_frames), 1138 GBENU_STATS_P3(rx_undersized_frames), 1139 GBENU_STATS_P3(rx_fragments), 1140 GBENU_STATS_P3(ale_drop), 1141 GBENU_STATS_P3(ale_overrun_drop), 1142 GBENU_STATS_P3(rx_bytes), 1143 GBENU_STATS_P3(tx_good_frames), 1144 GBENU_STATS_P3(tx_broadcast_frames), 1145 GBENU_STATS_P3(tx_multicast_frames), 1146 GBENU_STATS_P3(tx_pause_frames), 1147 GBENU_STATS_P3(tx_deferred_frames), 1148 GBENU_STATS_P3(tx_collision_frames), 1149 GBENU_STATS_P3(tx_single_coll_frames), 1150 GBENU_STATS_P3(tx_mult_coll_frames), 1151 GBENU_STATS_P3(tx_excessive_collisions), 1152 GBENU_STATS_P3(tx_late_collisions), 1153 GBENU_STATS_P3(rx_ipg_error), 1154 GBENU_STATS_P3(tx_carrier_sense_errors), 1155 GBENU_STATS_P3(tx_bytes), 1156 GBENU_STATS_P3(tx_64B_frames), 1157 GBENU_STATS_P3(tx_65_to_127B_frames), 1158 GBENU_STATS_P3(tx_128_to_255B_frames), 1159 GBENU_STATS_P3(tx_256_to_511B_frames), 1160 GBENU_STATS_P3(tx_512_to_1023B_frames), 1161 GBENU_STATS_P3(tx_1024B_frames), 1162 GBENU_STATS_P3(net_bytes), 1163 GBENU_STATS_P3(rx_bottom_fifo_drop), 1164 GBENU_STATS_P3(rx_port_mask_drop), 1165 GBENU_STATS_P3(rx_top_fifo_drop), 1166 GBENU_STATS_P3(ale_rate_limit_drop), 1167 GBENU_STATS_P3(ale_vid_ingress_drop), 1168 GBENU_STATS_P3(ale_da_eq_sa_drop), 1169 GBENU_STATS_P3(ale_unknown_ucast), 1170 GBENU_STATS_P3(ale_unknown_ucast_bytes), 1171 GBENU_STATS_P3(ale_unknown_mcast), 1172 GBENU_STATS_P3(ale_unknown_mcast_bytes), 1173 GBENU_STATS_P3(ale_unknown_bcast), 1174 GBENU_STATS_P3(ale_unknown_bcast_bytes), 1175 GBENU_STATS_P3(ale_pol_match), 1176 GBENU_STATS_P3(ale_pol_match_red), 1177 GBENU_STATS_P3(ale_pol_match_yellow), 1178 GBENU_STATS_P3(tx_mem_protect_err), 1179 GBENU_STATS_P3(tx_pri0_drop), 1180 GBENU_STATS_P3(tx_pri1_drop), 1181 GBENU_STATS_P3(tx_pri2_drop), 1182 GBENU_STATS_P3(tx_pri3_drop), 1183 GBENU_STATS_P3(tx_pri4_drop), 1184 GBENU_STATS_P3(tx_pri5_drop), 1185 GBENU_STATS_P3(tx_pri6_drop), 1186 GBENU_STATS_P3(tx_pri7_drop), 1187 GBENU_STATS_P3(tx_pri0_drop_bcnt), 1188 GBENU_STATS_P3(tx_pri1_drop_bcnt), 1189 GBENU_STATS_P3(tx_pri2_drop_bcnt), 1190 GBENU_STATS_P3(tx_pri3_drop_bcnt), 1191 GBENU_STATS_P3(tx_pri4_drop_bcnt), 1192 GBENU_STATS_P3(tx_pri5_drop_bcnt), 1193 GBENU_STATS_P3(tx_pri6_drop_bcnt), 1194 GBENU_STATS_P3(tx_pri7_drop_bcnt), 1195 /* GBENU Module 4 */ 1196 GBENU_STATS_P4(rx_good_frames), 1197 GBENU_STATS_P4(rx_broadcast_frames), 1198 GBENU_STATS_P4(rx_multicast_frames), 1199 GBENU_STATS_P4(rx_pause_frames), 1200 GBENU_STATS_P4(rx_crc_errors), 1201 GBENU_STATS_P4(rx_align_code_errors), 1202 GBENU_STATS_P4(rx_oversized_frames), 1203 GBENU_STATS_P4(rx_jabber_frames), 1204 GBENU_STATS_P4(rx_undersized_frames), 1205 GBENU_STATS_P4(rx_fragments), 1206 GBENU_STATS_P4(ale_drop), 1207 GBENU_STATS_P4(ale_overrun_drop), 1208 GBENU_STATS_P4(rx_bytes), 1209 GBENU_STATS_P4(tx_good_frames), 1210 GBENU_STATS_P4(tx_broadcast_frames), 1211 GBENU_STATS_P4(tx_multicast_frames), 1212 GBENU_STATS_P4(tx_pause_frames), 1213 GBENU_STATS_P4(tx_deferred_frames), 1214 GBENU_STATS_P4(tx_collision_frames), 1215 GBENU_STATS_P4(tx_single_coll_frames), 1216 GBENU_STATS_P4(tx_mult_coll_frames), 1217 GBENU_STATS_P4(tx_excessive_collisions), 1218 GBENU_STATS_P4(tx_late_collisions), 1219 GBENU_STATS_P4(rx_ipg_error), 1220 GBENU_STATS_P4(tx_carrier_sense_errors), 1221 GBENU_STATS_P4(tx_bytes), 1222 GBENU_STATS_P4(tx_64B_frames), 1223 GBENU_STATS_P4(tx_65_to_127B_frames), 1224 GBENU_STATS_P4(tx_128_to_255B_frames), 1225 GBENU_STATS_P4(tx_256_to_511B_frames), 1226 GBENU_STATS_P4(tx_512_to_1023B_frames), 1227 GBENU_STATS_P4(tx_1024B_frames), 1228 GBENU_STATS_P4(net_bytes), 1229 GBENU_STATS_P4(rx_bottom_fifo_drop), 1230 GBENU_STATS_P4(rx_port_mask_drop), 1231 GBENU_STATS_P4(rx_top_fifo_drop), 1232 GBENU_STATS_P4(ale_rate_limit_drop), 1233 GBENU_STATS_P4(ale_vid_ingress_drop), 1234 GBENU_STATS_P4(ale_da_eq_sa_drop), 1235 GBENU_STATS_P4(ale_unknown_ucast), 1236 GBENU_STATS_P4(ale_unknown_ucast_bytes), 1237 GBENU_STATS_P4(ale_unknown_mcast), 1238 GBENU_STATS_P4(ale_unknown_mcast_bytes), 1239 GBENU_STATS_P4(ale_unknown_bcast), 1240 GBENU_STATS_P4(ale_unknown_bcast_bytes), 1241 GBENU_STATS_P4(ale_pol_match), 1242 GBENU_STATS_P4(ale_pol_match_red), 1243 GBENU_STATS_P4(ale_pol_match_yellow), 1244 GBENU_STATS_P4(tx_mem_protect_err), 1245 GBENU_STATS_P4(tx_pri0_drop), 1246 GBENU_STATS_P4(tx_pri1_drop), 1247 GBENU_STATS_P4(tx_pri2_drop), 1248 GBENU_STATS_P4(tx_pri3_drop), 1249 GBENU_STATS_P4(tx_pri4_drop), 1250 GBENU_STATS_P4(tx_pri5_drop), 1251 GBENU_STATS_P4(tx_pri6_drop), 1252 GBENU_STATS_P4(tx_pri7_drop), 1253 GBENU_STATS_P4(tx_pri0_drop_bcnt), 1254 GBENU_STATS_P4(tx_pri1_drop_bcnt), 1255 GBENU_STATS_P4(tx_pri2_drop_bcnt), 1256 GBENU_STATS_P4(tx_pri3_drop_bcnt), 1257 GBENU_STATS_P4(tx_pri4_drop_bcnt), 1258 GBENU_STATS_P4(tx_pri5_drop_bcnt), 1259 GBENU_STATS_P4(tx_pri6_drop_bcnt), 1260 GBENU_STATS_P4(tx_pri7_drop_bcnt), 1261 /* GBENU Module 5 */ 1262 GBENU_STATS_P5(rx_good_frames), 1263 GBENU_STATS_P5(rx_broadcast_frames), 1264 GBENU_STATS_P5(rx_multicast_frames), 1265 GBENU_STATS_P5(rx_pause_frames), 1266 GBENU_STATS_P5(rx_crc_errors), 1267 GBENU_STATS_P5(rx_align_code_errors), 1268 GBENU_STATS_P5(rx_oversized_frames), 1269 GBENU_STATS_P5(rx_jabber_frames), 1270 GBENU_STATS_P5(rx_undersized_frames), 1271 GBENU_STATS_P5(rx_fragments), 1272 GBENU_STATS_P5(ale_drop), 1273 GBENU_STATS_P5(ale_overrun_drop), 1274 GBENU_STATS_P5(rx_bytes), 1275 GBENU_STATS_P5(tx_good_frames), 1276 GBENU_STATS_P5(tx_broadcast_frames), 1277 GBENU_STATS_P5(tx_multicast_frames), 1278 GBENU_STATS_P5(tx_pause_frames), 1279 GBENU_STATS_P5(tx_deferred_frames), 1280 GBENU_STATS_P5(tx_collision_frames), 1281 GBENU_STATS_P5(tx_single_coll_frames), 1282 GBENU_STATS_P5(tx_mult_coll_frames), 1283 GBENU_STATS_P5(tx_excessive_collisions), 1284 GBENU_STATS_P5(tx_late_collisions), 1285 GBENU_STATS_P5(rx_ipg_error), 1286 GBENU_STATS_P5(tx_carrier_sense_errors), 1287 GBENU_STATS_P5(tx_bytes), 1288 GBENU_STATS_P5(tx_64B_frames), 1289 GBENU_STATS_P5(tx_65_to_127B_frames), 1290 GBENU_STATS_P5(tx_128_to_255B_frames), 1291 GBENU_STATS_P5(tx_256_to_511B_frames), 1292 GBENU_STATS_P5(tx_512_to_1023B_frames), 1293 GBENU_STATS_P5(tx_1024B_frames), 1294 GBENU_STATS_P5(net_bytes), 1295 GBENU_STATS_P5(rx_bottom_fifo_drop), 1296 GBENU_STATS_P5(rx_port_mask_drop), 1297 GBENU_STATS_P5(rx_top_fifo_drop), 1298 GBENU_STATS_P5(ale_rate_limit_drop), 1299 GBENU_STATS_P5(ale_vid_ingress_drop), 1300 GBENU_STATS_P5(ale_da_eq_sa_drop), 1301 GBENU_STATS_P5(ale_unknown_ucast), 1302 GBENU_STATS_P5(ale_unknown_ucast_bytes), 1303 GBENU_STATS_P5(ale_unknown_mcast), 1304 GBENU_STATS_P5(ale_unknown_mcast_bytes), 1305 GBENU_STATS_P5(ale_unknown_bcast), 1306 GBENU_STATS_P5(ale_unknown_bcast_bytes), 1307 GBENU_STATS_P5(ale_pol_match), 1308 GBENU_STATS_P5(ale_pol_match_red), 1309 GBENU_STATS_P5(ale_pol_match_yellow), 1310 GBENU_STATS_P5(tx_mem_protect_err), 1311 GBENU_STATS_P5(tx_pri0_drop), 1312 GBENU_STATS_P5(tx_pri1_drop), 1313 GBENU_STATS_P5(tx_pri2_drop), 1314 GBENU_STATS_P5(tx_pri3_drop), 1315 GBENU_STATS_P5(tx_pri4_drop), 1316 GBENU_STATS_P5(tx_pri5_drop), 1317 GBENU_STATS_P5(tx_pri6_drop), 1318 GBENU_STATS_P5(tx_pri7_drop), 1319 GBENU_STATS_P5(tx_pri0_drop_bcnt), 1320 GBENU_STATS_P5(tx_pri1_drop_bcnt), 1321 GBENU_STATS_P5(tx_pri2_drop_bcnt), 1322 GBENU_STATS_P5(tx_pri3_drop_bcnt), 1323 GBENU_STATS_P5(tx_pri4_drop_bcnt), 1324 GBENU_STATS_P5(tx_pri5_drop_bcnt), 1325 GBENU_STATS_P5(tx_pri6_drop_bcnt), 1326 GBENU_STATS_P5(tx_pri7_drop_bcnt), 1327 /* GBENU Module 6 */ 1328 GBENU_STATS_P6(rx_good_frames), 1329 GBENU_STATS_P6(rx_broadcast_frames), 1330 GBENU_STATS_P6(rx_multicast_frames), 1331 GBENU_STATS_P6(rx_pause_frames), 1332 GBENU_STATS_P6(rx_crc_errors), 1333 GBENU_STATS_P6(rx_align_code_errors), 1334 GBENU_STATS_P6(rx_oversized_frames), 1335 GBENU_STATS_P6(rx_jabber_frames), 1336 GBENU_STATS_P6(rx_undersized_frames), 1337 GBENU_STATS_P6(rx_fragments), 1338 GBENU_STATS_P6(ale_drop), 1339 GBENU_STATS_P6(ale_overrun_drop), 1340 GBENU_STATS_P6(rx_bytes), 1341 GBENU_STATS_P6(tx_good_frames), 1342 GBENU_STATS_P6(tx_broadcast_frames), 1343 GBENU_STATS_P6(tx_multicast_frames), 1344 GBENU_STATS_P6(tx_pause_frames), 1345 GBENU_STATS_P6(tx_deferred_frames), 1346 GBENU_STATS_P6(tx_collision_frames), 1347 GBENU_STATS_P6(tx_single_coll_frames), 1348 GBENU_STATS_P6(tx_mult_coll_frames), 1349 GBENU_STATS_P6(tx_excessive_collisions), 1350 GBENU_STATS_P6(tx_late_collisions), 1351 GBENU_STATS_P6(rx_ipg_error), 1352 GBENU_STATS_P6(tx_carrier_sense_errors), 1353 GBENU_STATS_P6(tx_bytes), 1354 GBENU_STATS_P6(tx_64B_frames), 1355 GBENU_STATS_P6(tx_65_to_127B_frames), 1356 GBENU_STATS_P6(tx_128_to_255B_frames), 1357 GBENU_STATS_P6(tx_256_to_511B_frames), 1358 GBENU_STATS_P6(tx_512_to_1023B_frames), 1359 GBENU_STATS_P6(tx_1024B_frames), 1360 GBENU_STATS_P6(net_bytes), 1361 GBENU_STATS_P6(rx_bottom_fifo_drop), 1362 GBENU_STATS_P6(rx_port_mask_drop), 1363 GBENU_STATS_P6(rx_top_fifo_drop), 1364 GBENU_STATS_P6(ale_rate_limit_drop), 1365 GBENU_STATS_P6(ale_vid_ingress_drop), 1366 GBENU_STATS_P6(ale_da_eq_sa_drop), 1367 GBENU_STATS_P6(ale_unknown_ucast), 1368 GBENU_STATS_P6(ale_unknown_ucast_bytes), 1369 GBENU_STATS_P6(ale_unknown_mcast), 1370 GBENU_STATS_P6(ale_unknown_mcast_bytes), 1371 GBENU_STATS_P6(ale_unknown_bcast), 1372 GBENU_STATS_P6(ale_unknown_bcast_bytes), 1373 GBENU_STATS_P6(ale_pol_match), 1374 GBENU_STATS_P6(ale_pol_match_red), 1375 GBENU_STATS_P6(ale_pol_match_yellow), 1376 GBENU_STATS_P6(tx_mem_protect_err), 1377 GBENU_STATS_P6(tx_pri0_drop), 1378 GBENU_STATS_P6(tx_pri1_drop), 1379 GBENU_STATS_P6(tx_pri2_drop), 1380 GBENU_STATS_P6(tx_pri3_drop), 1381 GBENU_STATS_P6(tx_pri4_drop), 1382 GBENU_STATS_P6(tx_pri5_drop), 1383 GBENU_STATS_P6(tx_pri6_drop), 1384 GBENU_STATS_P6(tx_pri7_drop), 1385 GBENU_STATS_P6(tx_pri0_drop_bcnt), 1386 GBENU_STATS_P6(tx_pri1_drop_bcnt), 1387 GBENU_STATS_P6(tx_pri2_drop_bcnt), 1388 GBENU_STATS_P6(tx_pri3_drop_bcnt), 1389 GBENU_STATS_P6(tx_pri4_drop_bcnt), 1390 GBENU_STATS_P6(tx_pri5_drop_bcnt), 1391 GBENU_STATS_P6(tx_pri6_drop_bcnt), 1392 GBENU_STATS_P6(tx_pri7_drop_bcnt), 1393 /* GBENU Module 7 */ 1394 GBENU_STATS_P7(rx_good_frames), 1395 GBENU_STATS_P7(rx_broadcast_frames), 1396 GBENU_STATS_P7(rx_multicast_frames), 1397 GBENU_STATS_P7(rx_pause_frames), 1398 GBENU_STATS_P7(rx_crc_errors), 1399 GBENU_STATS_P7(rx_align_code_errors), 1400 GBENU_STATS_P7(rx_oversized_frames), 1401 GBENU_STATS_P7(rx_jabber_frames), 1402 GBENU_STATS_P7(rx_undersized_frames), 1403 GBENU_STATS_P7(rx_fragments), 1404 GBENU_STATS_P7(ale_drop), 1405 GBENU_STATS_P7(ale_overrun_drop), 1406 GBENU_STATS_P7(rx_bytes), 1407 GBENU_STATS_P7(tx_good_frames), 1408 GBENU_STATS_P7(tx_broadcast_frames), 1409 GBENU_STATS_P7(tx_multicast_frames), 1410 GBENU_STATS_P7(tx_pause_frames), 1411 GBENU_STATS_P7(tx_deferred_frames), 1412 GBENU_STATS_P7(tx_collision_frames), 1413 GBENU_STATS_P7(tx_single_coll_frames), 1414 GBENU_STATS_P7(tx_mult_coll_frames), 1415 GBENU_STATS_P7(tx_excessive_collisions), 1416 GBENU_STATS_P7(tx_late_collisions), 1417 GBENU_STATS_P7(rx_ipg_error), 1418 GBENU_STATS_P7(tx_carrier_sense_errors), 1419 GBENU_STATS_P7(tx_bytes), 1420 GBENU_STATS_P7(tx_64B_frames), 1421 GBENU_STATS_P7(tx_65_to_127B_frames), 1422 GBENU_STATS_P7(tx_128_to_255B_frames), 1423 GBENU_STATS_P7(tx_256_to_511B_frames), 1424 GBENU_STATS_P7(tx_512_to_1023B_frames), 1425 GBENU_STATS_P7(tx_1024B_frames), 1426 GBENU_STATS_P7(net_bytes), 1427 GBENU_STATS_P7(rx_bottom_fifo_drop), 1428 GBENU_STATS_P7(rx_port_mask_drop), 1429 GBENU_STATS_P7(rx_top_fifo_drop), 1430 GBENU_STATS_P7(ale_rate_limit_drop), 1431 GBENU_STATS_P7(ale_vid_ingress_drop), 1432 GBENU_STATS_P7(ale_da_eq_sa_drop), 1433 GBENU_STATS_P7(ale_unknown_ucast), 1434 GBENU_STATS_P7(ale_unknown_ucast_bytes), 1435 GBENU_STATS_P7(ale_unknown_mcast), 1436 GBENU_STATS_P7(ale_unknown_mcast_bytes), 1437 GBENU_STATS_P7(ale_unknown_bcast), 1438 GBENU_STATS_P7(ale_unknown_bcast_bytes), 1439 GBENU_STATS_P7(ale_pol_match), 1440 GBENU_STATS_P7(ale_pol_match_red), 1441 GBENU_STATS_P7(ale_pol_match_yellow), 1442 GBENU_STATS_P7(tx_mem_protect_err), 1443 GBENU_STATS_P7(tx_pri0_drop), 1444 GBENU_STATS_P7(tx_pri1_drop), 1445 GBENU_STATS_P7(tx_pri2_drop), 1446 GBENU_STATS_P7(tx_pri3_drop), 1447 GBENU_STATS_P7(tx_pri4_drop), 1448 GBENU_STATS_P7(tx_pri5_drop), 1449 GBENU_STATS_P7(tx_pri6_drop), 1450 GBENU_STATS_P7(tx_pri7_drop), 1451 GBENU_STATS_P7(tx_pri0_drop_bcnt), 1452 GBENU_STATS_P7(tx_pri1_drop_bcnt), 1453 GBENU_STATS_P7(tx_pri2_drop_bcnt), 1454 GBENU_STATS_P7(tx_pri3_drop_bcnt), 1455 GBENU_STATS_P7(tx_pri4_drop_bcnt), 1456 GBENU_STATS_P7(tx_pri5_drop_bcnt), 1457 GBENU_STATS_P7(tx_pri6_drop_bcnt), 1458 GBENU_STATS_P7(tx_pri7_drop_bcnt), 1459 /* GBENU Module 8 */ 1460 GBENU_STATS_P8(rx_good_frames), 1461 GBENU_STATS_P8(rx_broadcast_frames), 1462 GBENU_STATS_P8(rx_multicast_frames), 1463 GBENU_STATS_P8(rx_pause_frames), 1464 GBENU_STATS_P8(rx_crc_errors), 1465 GBENU_STATS_P8(rx_align_code_errors), 1466 GBENU_STATS_P8(rx_oversized_frames), 1467 GBENU_STATS_P8(rx_jabber_frames), 1468 GBENU_STATS_P8(rx_undersized_frames), 1469 GBENU_STATS_P8(rx_fragments), 1470 GBENU_STATS_P8(ale_drop), 1471 GBENU_STATS_P8(ale_overrun_drop), 1472 GBENU_STATS_P8(rx_bytes), 1473 GBENU_STATS_P8(tx_good_frames), 1474 GBENU_STATS_P8(tx_broadcast_frames), 1475 GBENU_STATS_P8(tx_multicast_frames), 1476 GBENU_STATS_P8(tx_pause_frames), 1477 GBENU_STATS_P8(tx_deferred_frames), 1478 GBENU_STATS_P8(tx_collision_frames), 1479 GBENU_STATS_P8(tx_single_coll_frames), 1480 GBENU_STATS_P8(tx_mult_coll_frames), 1481 GBENU_STATS_P8(tx_excessive_collisions), 1482 GBENU_STATS_P8(tx_late_collisions), 1483 GBENU_STATS_P8(rx_ipg_error), 1484 GBENU_STATS_P8(tx_carrier_sense_errors), 1485 GBENU_STATS_P8(tx_bytes), 1486 GBENU_STATS_P8(tx_64B_frames), 1487 GBENU_STATS_P8(tx_65_to_127B_frames), 1488 GBENU_STATS_P8(tx_128_to_255B_frames), 1489 GBENU_STATS_P8(tx_256_to_511B_frames), 1490 GBENU_STATS_P8(tx_512_to_1023B_frames), 1491 GBENU_STATS_P8(tx_1024B_frames), 1492 GBENU_STATS_P8(net_bytes), 1493 GBENU_STATS_P8(rx_bottom_fifo_drop), 1494 GBENU_STATS_P8(rx_port_mask_drop), 1495 GBENU_STATS_P8(rx_top_fifo_drop), 1496 GBENU_STATS_P8(ale_rate_limit_drop), 1497 GBENU_STATS_P8(ale_vid_ingress_drop), 1498 GBENU_STATS_P8(ale_da_eq_sa_drop), 1499 GBENU_STATS_P8(ale_unknown_ucast), 1500 GBENU_STATS_P8(ale_unknown_ucast_bytes), 1501 GBENU_STATS_P8(ale_unknown_mcast), 1502 GBENU_STATS_P8(ale_unknown_mcast_bytes), 1503 GBENU_STATS_P8(ale_unknown_bcast), 1504 GBENU_STATS_P8(ale_unknown_bcast_bytes), 1505 GBENU_STATS_P8(ale_pol_match), 1506 GBENU_STATS_P8(ale_pol_match_red), 1507 GBENU_STATS_P8(ale_pol_match_yellow), 1508 GBENU_STATS_P8(tx_mem_protect_err), 1509 GBENU_STATS_P8(tx_pri0_drop), 1510 GBENU_STATS_P8(tx_pri1_drop), 1511 GBENU_STATS_P8(tx_pri2_drop), 1512 GBENU_STATS_P8(tx_pri3_drop), 1513 GBENU_STATS_P8(tx_pri4_drop), 1514 GBENU_STATS_P8(tx_pri5_drop), 1515 GBENU_STATS_P8(tx_pri6_drop), 1516 GBENU_STATS_P8(tx_pri7_drop), 1517 GBENU_STATS_P8(tx_pri0_drop_bcnt), 1518 GBENU_STATS_P8(tx_pri1_drop_bcnt), 1519 GBENU_STATS_P8(tx_pri2_drop_bcnt), 1520 GBENU_STATS_P8(tx_pri3_drop_bcnt), 1521 GBENU_STATS_P8(tx_pri4_drop_bcnt), 1522 GBENU_STATS_P8(tx_pri5_drop_bcnt), 1523 GBENU_STATS_P8(tx_pri6_drop_bcnt), 1524 GBENU_STATS_P8(tx_pri7_drop_bcnt), 1525 }; 1526 1527 #define XGBE_STATS0_INFO(field) \ 1528 { \ 1529 "GBE_0:"#field, XGBE_STATS0_MODULE, \ 1530 FIELD_SIZEOF(struct xgbe_hw_stats, field), \ 1531 offsetof(struct xgbe_hw_stats, field) \ 1532 } 1533 1534 #define XGBE_STATS1_INFO(field) \ 1535 { \ 1536 "GBE_1:"#field, XGBE_STATS1_MODULE, \ 1537 FIELD_SIZEOF(struct xgbe_hw_stats, field), \ 1538 offsetof(struct xgbe_hw_stats, field) \ 1539 } 1540 1541 #define XGBE_STATS2_INFO(field) \ 1542 { \ 1543 "GBE_2:"#field, XGBE_STATS2_MODULE, \ 1544 FIELD_SIZEOF(struct xgbe_hw_stats, field), \ 1545 offsetof(struct xgbe_hw_stats, field) \ 1546 } 1547 1548 static const struct netcp_ethtool_stat xgbe10_et_stats[] = { 1549 /* GBE module 0 */ 1550 XGBE_STATS0_INFO(rx_good_frames), 1551 XGBE_STATS0_INFO(rx_broadcast_frames), 1552 XGBE_STATS0_INFO(rx_multicast_frames), 1553 XGBE_STATS0_INFO(rx_oversized_frames), 1554 XGBE_STATS0_INFO(rx_undersized_frames), 1555 XGBE_STATS0_INFO(overrun_type4), 1556 XGBE_STATS0_INFO(overrun_type5), 1557 XGBE_STATS0_INFO(rx_bytes), 1558 XGBE_STATS0_INFO(tx_good_frames), 1559 XGBE_STATS0_INFO(tx_broadcast_frames), 1560 XGBE_STATS0_INFO(tx_multicast_frames), 1561 XGBE_STATS0_INFO(tx_bytes), 1562 XGBE_STATS0_INFO(tx_64byte_frames), 1563 XGBE_STATS0_INFO(tx_65_to_127byte_frames), 1564 XGBE_STATS0_INFO(tx_128_to_255byte_frames), 1565 XGBE_STATS0_INFO(tx_256_to_511byte_frames), 1566 XGBE_STATS0_INFO(tx_512_to_1023byte_frames), 1567 XGBE_STATS0_INFO(tx_1024byte_frames), 1568 XGBE_STATS0_INFO(net_bytes), 1569 XGBE_STATS0_INFO(rx_sof_overruns), 1570 XGBE_STATS0_INFO(rx_mof_overruns), 1571 XGBE_STATS0_INFO(rx_dma_overruns), 1572 /* XGBE module 1 */ 1573 XGBE_STATS1_INFO(rx_good_frames), 1574 XGBE_STATS1_INFO(rx_broadcast_frames), 1575 XGBE_STATS1_INFO(rx_multicast_frames), 1576 XGBE_STATS1_INFO(rx_pause_frames), 1577 XGBE_STATS1_INFO(rx_crc_errors), 1578 XGBE_STATS1_INFO(rx_align_code_errors), 1579 XGBE_STATS1_INFO(rx_oversized_frames), 1580 XGBE_STATS1_INFO(rx_jabber_frames), 1581 XGBE_STATS1_INFO(rx_undersized_frames), 1582 XGBE_STATS1_INFO(rx_fragments), 1583 XGBE_STATS1_INFO(overrun_type4), 1584 XGBE_STATS1_INFO(overrun_type5), 1585 XGBE_STATS1_INFO(rx_bytes), 1586 XGBE_STATS1_INFO(tx_good_frames), 1587 XGBE_STATS1_INFO(tx_broadcast_frames), 1588 XGBE_STATS1_INFO(tx_multicast_frames), 1589 XGBE_STATS1_INFO(tx_pause_frames), 1590 XGBE_STATS1_INFO(tx_deferred_frames), 1591 XGBE_STATS1_INFO(tx_collision_frames), 1592 XGBE_STATS1_INFO(tx_single_coll_frames), 1593 XGBE_STATS1_INFO(tx_mult_coll_frames), 1594 XGBE_STATS1_INFO(tx_excessive_collisions), 1595 XGBE_STATS1_INFO(tx_late_collisions), 1596 XGBE_STATS1_INFO(tx_underrun), 1597 XGBE_STATS1_INFO(tx_carrier_sense_errors), 1598 XGBE_STATS1_INFO(tx_bytes), 1599 XGBE_STATS1_INFO(tx_64byte_frames), 1600 XGBE_STATS1_INFO(tx_65_to_127byte_frames), 1601 XGBE_STATS1_INFO(tx_128_to_255byte_frames), 1602 XGBE_STATS1_INFO(tx_256_to_511byte_frames), 1603 XGBE_STATS1_INFO(tx_512_to_1023byte_frames), 1604 XGBE_STATS1_INFO(tx_1024byte_frames), 1605 XGBE_STATS1_INFO(net_bytes), 1606 XGBE_STATS1_INFO(rx_sof_overruns), 1607 XGBE_STATS1_INFO(rx_mof_overruns), 1608 XGBE_STATS1_INFO(rx_dma_overruns), 1609 /* XGBE module 2 */ 1610 XGBE_STATS2_INFO(rx_good_frames), 1611 XGBE_STATS2_INFO(rx_broadcast_frames), 1612 XGBE_STATS2_INFO(rx_multicast_frames), 1613 XGBE_STATS2_INFO(rx_pause_frames), 1614 XGBE_STATS2_INFO(rx_crc_errors), 1615 XGBE_STATS2_INFO(rx_align_code_errors), 1616 XGBE_STATS2_INFO(rx_oversized_frames), 1617 XGBE_STATS2_INFO(rx_jabber_frames), 1618 XGBE_STATS2_INFO(rx_undersized_frames), 1619 XGBE_STATS2_INFO(rx_fragments), 1620 XGBE_STATS2_INFO(overrun_type4), 1621 XGBE_STATS2_INFO(overrun_type5), 1622 XGBE_STATS2_INFO(rx_bytes), 1623 XGBE_STATS2_INFO(tx_good_frames), 1624 XGBE_STATS2_INFO(tx_broadcast_frames), 1625 XGBE_STATS2_INFO(tx_multicast_frames), 1626 XGBE_STATS2_INFO(tx_pause_frames), 1627 XGBE_STATS2_INFO(tx_deferred_frames), 1628 XGBE_STATS2_INFO(tx_collision_frames), 1629 XGBE_STATS2_INFO(tx_single_coll_frames), 1630 XGBE_STATS2_INFO(tx_mult_coll_frames), 1631 XGBE_STATS2_INFO(tx_excessive_collisions), 1632 XGBE_STATS2_INFO(tx_late_collisions), 1633 XGBE_STATS2_INFO(tx_underrun), 1634 XGBE_STATS2_INFO(tx_carrier_sense_errors), 1635 XGBE_STATS2_INFO(tx_bytes), 1636 XGBE_STATS2_INFO(tx_64byte_frames), 1637 XGBE_STATS2_INFO(tx_65_to_127byte_frames), 1638 XGBE_STATS2_INFO(tx_128_to_255byte_frames), 1639 XGBE_STATS2_INFO(tx_256_to_511byte_frames), 1640 XGBE_STATS2_INFO(tx_512_to_1023byte_frames), 1641 XGBE_STATS2_INFO(tx_1024byte_frames), 1642 XGBE_STATS2_INFO(net_bytes), 1643 XGBE_STATS2_INFO(rx_sof_overruns), 1644 XGBE_STATS2_INFO(rx_mof_overruns), 1645 XGBE_STATS2_INFO(rx_dma_overruns), 1646 }; 1647 1648 #define for_each_intf(i, priv) \ 1649 list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list) 1650 1651 #define for_each_sec_slave(slave, priv) \ 1652 list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list) 1653 1654 #define first_sec_slave(priv) \ 1655 list_first_entry(&priv->secondary_slaves, \ 1656 struct gbe_slave, slave_list) 1657 1658 static void keystone_get_drvinfo(struct net_device *ndev, 1659 struct ethtool_drvinfo *info) 1660 { 1661 strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver)); 1662 strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version)); 1663 } 1664 1665 static u32 keystone_get_msglevel(struct net_device *ndev) 1666 { 1667 struct netcp_intf *netcp = netdev_priv(ndev); 1668 1669 return netcp->msg_enable; 1670 } 1671 1672 static void keystone_set_msglevel(struct net_device *ndev, u32 value) 1673 { 1674 struct netcp_intf *netcp = netdev_priv(ndev); 1675 1676 netcp->msg_enable = value; 1677 } 1678 1679 static void keystone_get_stat_strings(struct net_device *ndev, 1680 uint32_t stringset, uint8_t *data) 1681 { 1682 struct netcp_intf *netcp = netdev_priv(ndev); 1683 struct gbe_intf *gbe_intf; 1684 struct gbe_priv *gbe_dev; 1685 int i; 1686 1687 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 1688 if (!gbe_intf) 1689 return; 1690 gbe_dev = gbe_intf->gbe_dev; 1691 1692 switch (stringset) { 1693 case ETH_SS_STATS: 1694 for (i = 0; i < gbe_dev->num_et_stats; i++) { 1695 memcpy(data, gbe_dev->et_stats[i].desc, 1696 ETH_GSTRING_LEN); 1697 data += ETH_GSTRING_LEN; 1698 } 1699 break; 1700 case ETH_SS_TEST: 1701 break; 1702 } 1703 } 1704 1705 static int keystone_get_sset_count(struct net_device *ndev, int stringset) 1706 { 1707 struct netcp_intf *netcp = netdev_priv(ndev); 1708 struct gbe_intf *gbe_intf; 1709 struct gbe_priv *gbe_dev; 1710 1711 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 1712 if (!gbe_intf) 1713 return -EINVAL; 1714 gbe_dev = gbe_intf->gbe_dev; 1715 1716 switch (stringset) { 1717 case ETH_SS_TEST: 1718 return 0; 1719 case ETH_SS_STATS: 1720 return gbe_dev->num_et_stats; 1721 default: 1722 return -EINVAL; 1723 } 1724 } 1725 1726 static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod) 1727 { 1728 void __iomem *base = gbe_dev->hw_stats_regs[stats_mod]; 1729 u32 __iomem *p_stats_entry; 1730 int i; 1731 1732 for (i = 0; i < gbe_dev->num_et_stats; i++) { 1733 if (gbe_dev->et_stats[i].type == stats_mod) { 1734 p_stats_entry = base + gbe_dev->et_stats[i].offset; 1735 gbe_dev->hw_stats[i] = 0; 1736 gbe_dev->hw_stats_prev[i] = readl(p_stats_entry); 1737 } 1738 } 1739 } 1740 1741 static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev, 1742 int et_stats_entry) 1743 { 1744 void __iomem *base = NULL; 1745 u32 __iomem *p_stats_entry; 1746 u32 curr, delta; 1747 1748 /* The hw_stats_regs pointers are already 1749 * properly set to point to the right base: 1750 */ 1751 base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type]; 1752 p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset; 1753 curr = readl(p_stats_entry); 1754 delta = curr - gbe_dev->hw_stats_prev[et_stats_entry]; 1755 gbe_dev->hw_stats_prev[et_stats_entry] = curr; 1756 gbe_dev->hw_stats[et_stats_entry] += delta; 1757 } 1758 1759 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data) 1760 { 1761 int i; 1762 1763 for (i = 0; i < gbe_dev->num_et_stats; i++) { 1764 gbe_update_hw_stats_entry(gbe_dev, i); 1765 1766 if (data) 1767 data[i] = gbe_dev->hw_stats[i]; 1768 } 1769 } 1770 1771 static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev, 1772 int stats_mod) 1773 { 1774 u32 val; 1775 1776 val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); 1777 1778 switch (stats_mod) { 1779 case GBE_STATSA_MODULE: 1780 case GBE_STATSB_MODULE: 1781 val &= ~GBE_STATS_CD_SEL; 1782 break; 1783 case GBE_STATSC_MODULE: 1784 case GBE_STATSD_MODULE: 1785 val |= GBE_STATS_CD_SEL; 1786 break; 1787 default: 1788 return; 1789 } 1790 1791 /* make the stat module visible */ 1792 writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); 1793 } 1794 1795 static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod) 1796 { 1797 gbe_stats_mod_visible_ver14(gbe_dev, stats_mod); 1798 gbe_reset_mod_stats(gbe_dev, stats_mod); 1799 } 1800 1801 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data) 1802 { 1803 u32 half_num_et_stats = (gbe_dev->num_et_stats / 2); 1804 int et_entry, j, pair; 1805 1806 for (pair = 0; pair < 2; pair++) { 1807 gbe_stats_mod_visible_ver14(gbe_dev, (pair ? 1808 GBE_STATSC_MODULE : 1809 GBE_STATSA_MODULE)); 1810 1811 for (j = 0; j < half_num_et_stats; j++) { 1812 et_entry = pair * half_num_et_stats + j; 1813 gbe_update_hw_stats_entry(gbe_dev, et_entry); 1814 1815 if (data) 1816 data[et_entry] = gbe_dev->hw_stats[et_entry]; 1817 } 1818 } 1819 } 1820 1821 static void keystone_get_ethtool_stats(struct net_device *ndev, 1822 struct ethtool_stats *stats, 1823 uint64_t *data) 1824 { 1825 struct netcp_intf *netcp = netdev_priv(ndev); 1826 struct gbe_intf *gbe_intf; 1827 struct gbe_priv *gbe_dev; 1828 1829 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 1830 if (!gbe_intf) 1831 return; 1832 1833 gbe_dev = gbe_intf->gbe_dev; 1834 spin_lock_bh(&gbe_dev->hw_stats_lock); 1835 if (gbe_dev->ss_version == GBE_SS_VERSION_14) 1836 gbe_update_stats_ver14(gbe_dev, data); 1837 else 1838 gbe_update_stats(gbe_dev, data); 1839 spin_unlock_bh(&gbe_dev->hw_stats_lock); 1840 } 1841 1842 static int keystone_get_settings(struct net_device *ndev, 1843 struct ethtool_cmd *cmd) 1844 { 1845 struct netcp_intf *netcp = netdev_priv(ndev); 1846 struct phy_device *phy = ndev->phydev; 1847 struct gbe_intf *gbe_intf; 1848 int ret; 1849 1850 if (!phy) 1851 return -EINVAL; 1852 1853 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 1854 if (!gbe_intf) 1855 return -EINVAL; 1856 1857 if (!gbe_intf->slave) 1858 return -EINVAL; 1859 1860 ret = phy_ethtool_gset(phy, cmd); 1861 if (!ret) 1862 cmd->port = gbe_intf->slave->phy_port_t; 1863 1864 return ret; 1865 } 1866 1867 static int keystone_set_settings(struct net_device *ndev, 1868 struct ethtool_cmd *cmd) 1869 { 1870 struct netcp_intf *netcp = netdev_priv(ndev); 1871 struct phy_device *phy = ndev->phydev; 1872 struct gbe_intf *gbe_intf; 1873 u32 features = cmd->advertising & cmd->supported; 1874 1875 if (!phy) 1876 return -EINVAL; 1877 1878 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 1879 if (!gbe_intf) 1880 return -EINVAL; 1881 1882 if (!gbe_intf->slave) 1883 return -EINVAL; 1884 1885 if (cmd->port != gbe_intf->slave->phy_port_t) { 1886 if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP)) 1887 return -EINVAL; 1888 1889 if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI)) 1890 return -EINVAL; 1891 1892 if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC)) 1893 return -EINVAL; 1894 1895 if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII)) 1896 return -EINVAL; 1897 1898 if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE)) 1899 return -EINVAL; 1900 } 1901 1902 gbe_intf->slave->phy_port_t = cmd->port; 1903 return phy_ethtool_sset(phy, cmd); 1904 } 1905 1906 static const struct ethtool_ops keystone_ethtool_ops = { 1907 .get_drvinfo = keystone_get_drvinfo, 1908 .get_link = ethtool_op_get_link, 1909 .get_msglevel = keystone_get_msglevel, 1910 .set_msglevel = keystone_set_msglevel, 1911 .get_strings = keystone_get_stat_strings, 1912 .get_sset_count = keystone_get_sset_count, 1913 .get_ethtool_stats = keystone_get_ethtool_stats, 1914 .get_settings = keystone_get_settings, 1915 .set_settings = keystone_set_settings, 1916 }; 1917 1918 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ 1919 ((mac)[2] << 16) | ((mac)[3] << 24)) 1920 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) 1921 1922 static void gbe_set_slave_mac(struct gbe_slave *slave, 1923 struct gbe_intf *gbe_intf) 1924 { 1925 struct net_device *ndev = gbe_intf->ndev; 1926 1927 writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi)); 1928 writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo)); 1929 } 1930 1931 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num) 1932 { 1933 if (priv->host_port == 0) 1934 return slave_num + 1; 1935 1936 return slave_num; 1937 } 1938 1939 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, 1940 struct net_device *ndev, 1941 struct gbe_slave *slave, 1942 int up) 1943 { 1944 struct phy_device *phy = slave->phy; 1945 u32 mac_control = 0; 1946 1947 if (up) { 1948 mac_control = slave->mac_control; 1949 if (phy && (phy->speed == SPEED_1000)) { 1950 mac_control |= MACSL_GIG_MODE; 1951 mac_control &= ~MACSL_XGIG_MODE; 1952 } else if (phy && (phy->speed == SPEED_10000)) { 1953 mac_control |= MACSL_XGIG_MODE; 1954 mac_control &= ~MACSL_GIG_MODE; 1955 } 1956 1957 writel(mac_control, GBE_REG_ADDR(slave, emac_regs, 1958 mac_control)); 1959 1960 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1961 ALE_PORT_STATE, 1962 ALE_PORT_STATE_FORWARD); 1963 1964 if (ndev && slave->open && 1965 slave->link_interface != SGMII_LINK_MAC_PHY && 1966 slave->link_interface != XGMII_LINK_MAC_PHY) 1967 netif_carrier_on(ndev); 1968 } else { 1969 writel(mac_control, GBE_REG_ADDR(slave, emac_regs, 1970 mac_control)); 1971 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1972 ALE_PORT_STATE, 1973 ALE_PORT_STATE_DISABLE); 1974 if (ndev && 1975 slave->link_interface != SGMII_LINK_MAC_PHY && 1976 slave->link_interface != XGMII_LINK_MAC_PHY) 1977 netif_carrier_off(ndev); 1978 } 1979 1980 if (phy) 1981 phy_print_status(phy); 1982 } 1983 1984 static bool gbe_phy_link_status(struct gbe_slave *slave) 1985 { 1986 return !slave->phy || slave->phy->link; 1987 } 1988 1989 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, 1990 struct gbe_slave *slave, 1991 struct net_device *ndev) 1992 { 1993 int sp = slave->slave_num; 1994 int phy_link_state, sgmii_link_state = 1, link_state; 1995 1996 if (!slave->open) 1997 return; 1998 1999 if (!SLAVE_LINK_IS_XGMII(slave)) { 2000 if (gbe_dev->ss_version == GBE_SS_VERSION_14) 2001 sgmii_link_state = 2002 netcp_sgmii_get_port_link(SGMII_BASE(sp), sp); 2003 else 2004 sgmii_link_state = 2005 netcp_sgmii_get_port_link( 2006 gbe_dev->sgmii_port_regs, sp); 2007 } 2008 2009 phy_link_state = gbe_phy_link_status(slave); 2010 link_state = phy_link_state & sgmii_link_state; 2011 2012 if (atomic_xchg(&slave->link_state, link_state) != link_state) 2013 netcp_ethss_link_state_action(gbe_dev, ndev, slave, 2014 link_state); 2015 } 2016 2017 static void xgbe_adjust_link(struct net_device *ndev) 2018 { 2019 struct netcp_intf *netcp = netdev_priv(ndev); 2020 struct gbe_intf *gbe_intf; 2021 2022 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp); 2023 if (!gbe_intf) 2024 return; 2025 2026 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave, 2027 ndev); 2028 } 2029 2030 static void gbe_adjust_link(struct net_device *ndev) 2031 { 2032 struct netcp_intf *netcp = netdev_priv(ndev); 2033 struct gbe_intf *gbe_intf; 2034 2035 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); 2036 if (!gbe_intf) 2037 return; 2038 2039 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave, 2040 ndev); 2041 } 2042 2043 static void gbe_adjust_link_sec_slaves(struct net_device *ndev) 2044 { 2045 struct gbe_priv *gbe_dev = netdev_priv(ndev); 2046 struct gbe_slave *slave; 2047 2048 for_each_sec_slave(slave, gbe_dev) 2049 netcp_ethss_update_link_state(gbe_dev, slave, NULL); 2050 } 2051 2052 /* Reset EMAC 2053 * Soft reset is set and polled until clear, or until a timeout occurs 2054 */ 2055 static int gbe_port_reset(struct gbe_slave *slave) 2056 { 2057 u32 i, v; 2058 2059 /* Set the soft reset bit */ 2060 writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset)); 2061 2062 /* Wait for the bit to clear */ 2063 for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) { 2064 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset)); 2065 if ((v & SOFT_RESET_MASK) != SOFT_RESET) 2066 return 0; 2067 } 2068 2069 /* Timeout on the reset */ 2070 return GMACSL_RET_WARN_RESET_INCOMPLETE; 2071 } 2072 2073 /* Configure EMAC */ 2074 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, 2075 int max_rx_len) 2076 { 2077 void __iomem *rx_maxlen_reg; 2078 u32 xgmii_mode; 2079 2080 if (max_rx_len > NETCP_MAX_FRAME_SIZE) 2081 max_rx_len = NETCP_MAX_FRAME_SIZE; 2082 2083 /* Enable correct MII mode at SS level */ 2084 if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) && 2085 (slave->link_interface >= XGMII_LINK_MAC_PHY)) { 2086 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control)); 2087 xgmii_mode |= (1 << slave->slave_num); 2088 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control)); 2089 } 2090 2091 if (IS_SS_ID_MU(gbe_dev)) 2092 rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen); 2093 else 2094 rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen); 2095 2096 writel(max_rx_len, rx_maxlen_reg); 2097 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); 2098 } 2099 2100 static void gbe_sgmii_rtreset(struct gbe_priv *priv, 2101 struct gbe_slave *slave, bool set) 2102 { 2103 void __iomem *sgmii_port_regs; 2104 2105 if (SLAVE_LINK_IS_XGMII(slave)) 2106 return; 2107 2108 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) 2109 sgmii_port_regs = priv->sgmii_port34_regs; 2110 else 2111 sgmii_port_regs = priv->sgmii_port_regs; 2112 2113 netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set); 2114 } 2115 2116 static void gbe_slave_stop(struct gbe_intf *intf) 2117 { 2118 struct gbe_priv *gbe_dev = intf->gbe_dev; 2119 struct gbe_slave *slave = intf->slave; 2120 2121 gbe_sgmii_rtreset(gbe_dev, slave, true); 2122 gbe_port_reset(slave); 2123 /* Disable forwarding */ 2124 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 2125 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 2126 cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast, 2127 1 << slave->port_num, 0, 0); 2128 2129 if (!slave->phy) 2130 return; 2131 2132 phy_stop(slave->phy); 2133 phy_disconnect(slave->phy); 2134 slave->phy = NULL; 2135 } 2136 2137 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) 2138 { 2139 void __iomem *sgmii_port_regs; 2140 2141 sgmii_port_regs = priv->sgmii_port_regs; 2142 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) 2143 sgmii_port_regs = priv->sgmii_port34_regs; 2144 2145 if (!SLAVE_LINK_IS_XGMII(slave)) { 2146 netcp_sgmii_reset(sgmii_port_regs, slave->slave_num); 2147 netcp_sgmii_config(sgmii_port_regs, slave->slave_num, 2148 slave->link_interface); 2149 } 2150 } 2151 2152 static int gbe_slave_open(struct gbe_intf *gbe_intf) 2153 { 2154 struct gbe_priv *priv = gbe_intf->gbe_dev; 2155 struct gbe_slave *slave = gbe_intf->slave; 2156 phy_interface_t phy_mode; 2157 bool has_phy = false; 2158 2159 void (*hndlr)(struct net_device *) = gbe_adjust_link; 2160 2161 gbe_sgmii_config(priv, slave); 2162 gbe_port_reset(slave); 2163 gbe_sgmii_rtreset(priv, slave, false); 2164 gbe_port_config(priv, slave, priv->rx_packet_max); 2165 gbe_set_slave_mac(slave, gbe_intf); 2166 /* enable forwarding */ 2167 cpsw_ale_control_set(priv->ale, slave->port_num, 2168 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 2169 cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast, 2170 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2); 2171 2172 if (slave->link_interface == SGMII_LINK_MAC_PHY) { 2173 has_phy = true; 2174 phy_mode = PHY_INTERFACE_MODE_SGMII; 2175 slave->phy_port_t = PORT_MII; 2176 } else if (slave->link_interface == XGMII_LINK_MAC_PHY) { 2177 has_phy = true; 2178 phy_mode = PHY_INTERFACE_MODE_NA; 2179 slave->phy_port_t = PORT_FIBRE; 2180 } 2181 2182 if (has_phy) { 2183 if (priv->ss_version == XGBE_SS_VERSION_10) 2184 hndlr = xgbe_adjust_link; 2185 2186 slave->phy = of_phy_connect(gbe_intf->ndev, 2187 slave->phy_node, 2188 hndlr, 0, 2189 phy_mode); 2190 if (!slave->phy) { 2191 dev_err(priv->dev, "phy not found on slave %d\n", 2192 slave->slave_num); 2193 return -ENODEV; 2194 } 2195 dev_dbg(priv->dev, "phy found: id is: 0x%s\n", 2196 dev_name(&slave->phy->dev)); 2197 phy_start(slave->phy); 2198 phy_read_status(slave->phy); 2199 } 2200 return 0; 2201 } 2202 2203 static void gbe_init_host_port(struct gbe_priv *priv) 2204 { 2205 int bypass_en = 1; 2206 2207 /* Host Tx Pri */ 2208 if (IS_SS_ID_NU(priv)) 2209 writel(HOST_TX_PRI_MAP_DEFAULT, 2210 GBE_REG_ADDR(priv, host_port_regs, tx_pri_map)); 2211 2212 /* Max length register */ 2213 writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs, 2214 rx_maxlen)); 2215 2216 cpsw_ale_start(priv->ale); 2217 2218 if (priv->enable_ale) 2219 bypass_en = 0; 2220 2221 cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en); 2222 2223 cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1); 2224 2225 cpsw_ale_control_set(priv->ale, priv->host_port, 2226 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 2227 2228 cpsw_ale_control_set(priv->ale, 0, 2229 ALE_PORT_UNKNOWN_VLAN_MEMBER, 2230 GBE_PORT_MASK(priv->ale_ports)); 2231 2232 cpsw_ale_control_set(priv->ale, 0, 2233 ALE_PORT_UNKNOWN_MCAST_FLOOD, 2234 GBE_PORT_MASK(priv->ale_ports - 1)); 2235 2236 cpsw_ale_control_set(priv->ale, 0, 2237 ALE_PORT_UNKNOWN_REG_MCAST_FLOOD, 2238 GBE_PORT_MASK(priv->ale_ports)); 2239 2240 cpsw_ale_control_set(priv->ale, 0, 2241 ALE_PORT_UNTAGGED_EGRESS, 2242 GBE_PORT_MASK(priv->ale_ports)); 2243 } 2244 2245 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr) 2246 { 2247 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 2248 u16 vlan_id; 2249 2250 cpsw_ale_add_mcast(gbe_dev->ale, addr, 2251 GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0, 2252 ALE_MCAST_FWD_2); 2253 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { 2254 cpsw_ale_add_mcast(gbe_dev->ale, addr, 2255 GBE_PORT_MASK(gbe_dev->ale_ports), 2256 ALE_VLAN, vlan_id, ALE_MCAST_FWD_2); 2257 } 2258 } 2259 2260 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr) 2261 { 2262 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 2263 u16 vlan_id; 2264 2265 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0); 2266 2267 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) 2268 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 2269 ALE_VLAN, vlan_id); 2270 } 2271 2272 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr) 2273 { 2274 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 2275 u16 vlan_id; 2276 2277 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0); 2278 2279 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { 2280 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id); 2281 } 2282 } 2283 2284 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr) 2285 { 2286 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 2287 u16 vlan_id; 2288 2289 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0); 2290 2291 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { 2292 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 2293 ALE_VLAN, vlan_id); 2294 } 2295 } 2296 2297 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr) 2298 { 2299 struct gbe_intf *gbe_intf = intf_priv; 2300 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 2301 2302 dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n", 2303 naddr->addr, naddr->type); 2304 2305 switch (naddr->type) { 2306 case ADDR_MCAST: 2307 case ADDR_BCAST: 2308 gbe_add_mcast_addr(gbe_intf, naddr->addr); 2309 break; 2310 case ADDR_UCAST: 2311 case ADDR_DEV: 2312 gbe_add_ucast_addr(gbe_intf, naddr->addr); 2313 break; 2314 case ADDR_ANY: 2315 /* nothing to do for promiscuous */ 2316 default: 2317 break; 2318 } 2319 2320 return 0; 2321 } 2322 2323 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr) 2324 { 2325 struct gbe_intf *gbe_intf = intf_priv; 2326 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 2327 2328 dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n", 2329 naddr->addr, naddr->type); 2330 2331 switch (naddr->type) { 2332 case ADDR_MCAST: 2333 case ADDR_BCAST: 2334 gbe_del_mcast_addr(gbe_intf, naddr->addr); 2335 break; 2336 case ADDR_UCAST: 2337 case ADDR_DEV: 2338 gbe_del_ucast_addr(gbe_intf, naddr->addr); 2339 break; 2340 case ADDR_ANY: 2341 /* nothing to do for promiscuous */ 2342 default: 2343 break; 2344 } 2345 2346 return 0; 2347 } 2348 2349 static int gbe_add_vid(void *intf_priv, int vid) 2350 { 2351 struct gbe_intf *gbe_intf = intf_priv; 2352 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 2353 2354 set_bit(vid, gbe_intf->active_vlans); 2355 2356 cpsw_ale_add_vlan(gbe_dev->ale, vid, 2357 GBE_PORT_MASK(gbe_dev->ale_ports), 2358 GBE_MASK_NO_PORTS, 2359 GBE_PORT_MASK(gbe_dev->ale_ports), 2360 GBE_PORT_MASK(gbe_dev->ale_ports - 1)); 2361 2362 return 0; 2363 } 2364 2365 static int gbe_del_vid(void *intf_priv, int vid) 2366 { 2367 struct gbe_intf *gbe_intf = intf_priv; 2368 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 2369 2370 cpsw_ale_del_vlan(gbe_dev->ale, vid, 0); 2371 clear_bit(vid, gbe_intf->active_vlans); 2372 return 0; 2373 } 2374 2375 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd) 2376 { 2377 struct gbe_intf *gbe_intf = intf_priv; 2378 struct phy_device *phy = gbe_intf->slave->phy; 2379 int ret = -EOPNOTSUPP; 2380 2381 if (phy) 2382 ret = phy_mii_ioctl(phy, req, cmd); 2383 2384 return ret; 2385 } 2386 2387 static void netcp_ethss_timer(unsigned long arg) 2388 { 2389 struct gbe_priv *gbe_dev = (struct gbe_priv *)arg; 2390 struct gbe_intf *gbe_intf; 2391 struct gbe_slave *slave; 2392 2393 /* Check & update SGMII link state of interfaces */ 2394 for_each_intf(gbe_intf, gbe_dev) { 2395 if (!gbe_intf->slave->open) 2396 continue; 2397 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave, 2398 gbe_intf->ndev); 2399 } 2400 2401 /* Check & update SGMII link state of secondary ports */ 2402 for_each_sec_slave(slave, gbe_dev) { 2403 netcp_ethss_update_link_state(gbe_dev, slave, NULL); 2404 } 2405 2406 /* A timer runs as a BH, no need to block them */ 2407 spin_lock(&gbe_dev->hw_stats_lock); 2408 2409 if (gbe_dev->ss_version == GBE_SS_VERSION_14) 2410 gbe_update_stats_ver14(gbe_dev, NULL); 2411 else 2412 gbe_update_stats(gbe_dev, NULL); 2413 2414 spin_unlock(&gbe_dev->hw_stats_lock); 2415 2416 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; 2417 add_timer(&gbe_dev->timer); 2418 } 2419 2420 static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info) 2421 { 2422 struct gbe_intf *gbe_intf = data; 2423 2424 p_info->tx_pipe = &gbe_intf->tx_pipe; 2425 return 0; 2426 } 2427 2428 static int gbe_open(void *intf_priv, struct net_device *ndev) 2429 { 2430 struct gbe_intf *gbe_intf = intf_priv; 2431 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; 2432 struct netcp_intf *netcp = netdev_priv(ndev); 2433 struct gbe_slave *slave = gbe_intf->slave; 2434 int port_num = slave->port_num; 2435 u32 reg; 2436 int ret; 2437 2438 reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver)); 2439 dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n", 2440 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg), 2441 GBE_RTL_VERSION(reg), GBE_IDENT(reg)); 2442 2443 /* For 10G and on NetCP 1.5, use directed to port */ 2444 if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev)) 2445 gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO; 2446 2447 if (gbe_dev->enable_ale) 2448 gbe_intf->tx_pipe.switch_to_port = 0; 2449 else 2450 gbe_intf->tx_pipe.switch_to_port = port_num; 2451 2452 dev_dbg(gbe_dev->dev, 2453 "opened TX channel %s: %p with to port %d, flags %d\n", 2454 gbe_intf->tx_pipe.dma_chan_name, 2455 gbe_intf->tx_pipe.dma_channel, 2456 gbe_intf->tx_pipe.switch_to_port, 2457 gbe_intf->tx_pipe.flags); 2458 2459 gbe_slave_stop(gbe_intf); 2460 2461 /* disable priority elevation and enable statistics on all ports */ 2462 writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype)); 2463 2464 /* Control register */ 2465 writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control)); 2466 2467 /* All statistics enabled and STAT AB visible by default */ 2468 writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs, 2469 stat_port_en)); 2470 2471 ret = gbe_slave_open(gbe_intf); 2472 if (ret) 2473 goto fail; 2474 2475 netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook, 2476 gbe_intf); 2477 2478 slave->open = true; 2479 netcp_ethss_update_link_state(gbe_dev, slave, ndev); 2480 return 0; 2481 2482 fail: 2483 gbe_slave_stop(gbe_intf); 2484 return ret; 2485 } 2486 2487 static int gbe_close(void *intf_priv, struct net_device *ndev) 2488 { 2489 struct gbe_intf *gbe_intf = intf_priv; 2490 struct netcp_intf *netcp = netdev_priv(ndev); 2491 2492 gbe_slave_stop(gbe_intf); 2493 netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook, 2494 gbe_intf); 2495 2496 gbe_intf->slave->open = false; 2497 atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID); 2498 return 0; 2499 } 2500 2501 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, 2502 struct device_node *node) 2503 { 2504 int port_reg_num; 2505 u32 port_reg_ofs, emac_reg_ofs; 2506 u32 port_reg_blk_sz, emac_reg_blk_sz; 2507 2508 if (of_property_read_u32(node, "slave-port", &slave->slave_num)) { 2509 dev_err(gbe_dev->dev, "missing slave-port parameter\n"); 2510 return -EINVAL; 2511 } 2512 2513 if (of_property_read_u32(node, "link-interface", 2514 &slave->link_interface)) { 2515 dev_warn(gbe_dev->dev, 2516 "missing link-interface value defaulting to 1G mac-phy link\n"); 2517 slave->link_interface = SGMII_LINK_MAC_PHY; 2518 } 2519 2520 slave->open = false; 2521 slave->phy_node = of_parse_phandle(node, "phy-handle", 0); 2522 slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num); 2523 2524 if (slave->link_interface >= XGMII_LINK_MAC_PHY) 2525 slave->mac_control = GBE_DEF_10G_MAC_CONTROL; 2526 else 2527 slave->mac_control = GBE_DEF_1G_MAC_CONTROL; 2528 2529 /* Emac regs memmap are contiguous but port regs are not */ 2530 port_reg_num = slave->slave_num; 2531 if (gbe_dev->ss_version == GBE_SS_VERSION_14) { 2532 if (slave->slave_num > 1) { 2533 port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET; 2534 port_reg_num -= 2; 2535 } else { 2536 port_reg_ofs = GBE13_SLAVE_PORT_OFFSET; 2537 } 2538 emac_reg_ofs = GBE13_EMAC_OFFSET; 2539 port_reg_blk_sz = 0x30; 2540 emac_reg_blk_sz = 0x40; 2541 } else if (IS_SS_ID_MU(gbe_dev)) { 2542 port_reg_ofs = GBENU_SLAVE_PORT_OFFSET; 2543 emac_reg_ofs = GBENU_EMAC_OFFSET; 2544 port_reg_blk_sz = 0x1000; 2545 emac_reg_blk_sz = 0x1000; 2546 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { 2547 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET; 2548 emac_reg_ofs = XGBE10_EMAC_OFFSET; 2549 port_reg_blk_sz = 0x30; 2550 emac_reg_blk_sz = 0x40; 2551 } else { 2552 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n", 2553 gbe_dev->ss_version); 2554 return -EINVAL; 2555 } 2556 2557 slave->port_regs = gbe_dev->switch_regs + port_reg_ofs + 2558 (port_reg_blk_sz * port_reg_num); 2559 slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs + 2560 (emac_reg_blk_sz * slave->slave_num); 2561 2562 if (gbe_dev->ss_version == GBE_SS_VERSION_14) { 2563 /* Initialize slave port register offsets */ 2564 GBE_SET_REG_OFS(slave, port_regs, port_vlan); 2565 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map); 2566 GBE_SET_REG_OFS(slave, port_regs, sa_lo); 2567 GBE_SET_REG_OFS(slave, port_regs, sa_hi); 2568 GBE_SET_REG_OFS(slave, port_regs, ts_ctl); 2569 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype); 2570 GBE_SET_REG_OFS(slave, port_regs, ts_vlan); 2571 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); 2572 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2); 2573 2574 /* Initialize EMAC register offsets */ 2575 GBE_SET_REG_OFS(slave, emac_regs, mac_control); 2576 GBE_SET_REG_OFS(slave, emac_regs, soft_reset); 2577 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen); 2578 2579 } else if (IS_SS_ID_MU(gbe_dev)) { 2580 /* Initialize slave port register offsets */ 2581 GBENU_SET_REG_OFS(slave, port_regs, port_vlan); 2582 GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map); 2583 GBENU_SET_REG_OFS(slave, port_regs, sa_lo); 2584 GBENU_SET_REG_OFS(slave, port_regs, sa_hi); 2585 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl); 2586 GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype); 2587 GBENU_SET_REG_OFS(slave, port_regs, ts_vlan); 2588 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); 2589 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2); 2590 GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen); 2591 2592 /* Initialize EMAC register offsets */ 2593 GBENU_SET_REG_OFS(slave, emac_regs, mac_control); 2594 GBENU_SET_REG_OFS(slave, emac_regs, soft_reset); 2595 2596 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { 2597 /* Initialize slave port register offsets */ 2598 XGBE_SET_REG_OFS(slave, port_regs, port_vlan); 2599 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map); 2600 XGBE_SET_REG_OFS(slave, port_regs, sa_lo); 2601 XGBE_SET_REG_OFS(slave, port_regs, sa_hi); 2602 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl); 2603 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype); 2604 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan); 2605 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); 2606 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2); 2607 2608 /* Initialize EMAC register offsets */ 2609 XGBE_SET_REG_OFS(slave, emac_regs, mac_control); 2610 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset); 2611 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen); 2612 } 2613 2614 atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID); 2615 return 0; 2616 } 2617 2618 static void init_secondary_ports(struct gbe_priv *gbe_dev, 2619 struct device_node *node) 2620 { 2621 struct device *dev = gbe_dev->dev; 2622 phy_interface_t phy_mode; 2623 struct gbe_priv **priv; 2624 struct device_node *port; 2625 struct gbe_slave *slave; 2626 bool mac_phy_link = false; 2627 2628 for_each_child_of_node(node, port) { 2629 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL); 2630 if (!slave) { 2631 dev_err(dev, 2632 "memomry alloc failed for secondary port(%s), skipping...\n", 2633 port->name); 2634 continue; 2635 } 2636 2637 if (init_slave(gbe_dev, slave, port)) { 2638 dev_err(dev, 2639 "Failed to initialize secondary port(%s), skipping...\n", 2640 port->name); 2641 devm_kfree(dev, slave); 2642 continue; 2643 } 2644 2645 gbe_sgmii_config(gbe_dev, slave); 2646 gbe_port_reset(slave); 2647 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max); 2648 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves); 2649 gbe_dev->num_slaves++; 2650 if ((slave->link_interface == SGMII_LINK_MAC_PHY) || 2651 (slave->link_interface == XGMII_LINK_MAC_PHY)) 2652 mac_phy_link = true; 2653 2654 slave->open = true; 2655 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 2656 break; 2657 } 2658 2659 /* of_phy_connect() is needed only for MAC-PHY interface */ 2660 if (!mac_phy_link) 2661 return; 2662 2663 /* Allocate dummy netdev device for attaching to phy device */ 2664 gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy", 2665 NET_NAME_UNKNOWN, ether_setup); 2666 if (!gbe_dev->dummy_ndev) { 2667 dev_err(dev, 2668 "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n"); 2669 return; 2670 } 2671 priv = netdev_priv(gbe_dev->dummy_ndev); 2672 *priv = gbe_dev; 2673 2674 if (slave->link_interface == SGMII_LINK_MAC_PHY) { 2675 phy_mode = PHY_INTERFACE_MODE_SGMII; 2676 slave->phy_port_t = PORT_MII; 2677 } else { 2678 phy_mode = PHY_INTERFACE_MODE_NA; 2679 slave->phy_port_t = PORT_FIBRE; 2680 } 2681 2682 for_each_sec_slave(slave, gbe_dev) { 2683 if ((slave->link_interface != SGMII_LINK_MAC_PHY) && 2684 (slave->link_interface != XGMII_LINK_MAC_PHY)) 2685 continue; 2686 slave->phy = 2687 of_phy_connect(gbe_dev->dummy_ndev, 2688 slave->phy_node, 2689 gbe_adjust_link_sec_slaves, 2690 0, phy_mode); 2691 if (!slave->phy) { 2692 dev_err(dev, "phy not found for slave %d\n", 2693 slave->slave_num); 2694 slave->phy = NULL; 2695 } else { 2696 dev_dbg(dev, "phy found: id is: 0x%s\n", 2697 dev_name(&slave->phy->dev)); 2698 phy_start(slave->phy); 2699 phy_read_status(slave->phy); 2700 } 2701 } 2702 } 2703 2704 static void free_secondary_ports(struct gbe_priv *gbe_dev) 2705 { 2706 struct gbe_slave *slave; 2707 2708 while (!list_empty(&gbe_dev->secondary_slaves)) { 2709 slave = first_sec_slave(gbe_dev); 2710 2711 if (slave->phy) 2712 phy_disconnect(slave->phy); 2713 list_del(&slave->slave_list); 2714 } 2715 if (gbe_dev->dummy_ndev) 2716 free_netdev(gbe_dev->dummy_ndev); 2717 } 2718 2719 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, 2720 struct device_node *node) 2721 { 2722 struct resource res; 2723 void __iomem *regs; 2724 int ret, i; 2725 2726 ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res); 2727 if (ret) { 2728 dev_err(gbe_dev->dev, 2729 "Can't xlate xgbe of node(%s) ss address at %d\n", 2730 node->name, XGBE_SS_REG_INDEX); 2731 return ret; 2732 } 2733 2734 regs = devm_ioremap_resource(gbe_dev->dev, &res); 2735 if (IS_ERR(regs)) { 2736 dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n"); 2737 return PTR_ERR(regs); 2738 } 2739 gbe_dev->ss_regs = regs; 2740 2741 ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res); 2742 if (ret) { 2743 dev_err(gbe_dev->dev, 2744 "Can't xlate xgbe of node(%s) sm address at %d\n", 2745 node->name, XGBE_SM_REG_INDEX); 2746 return ret; 2747 } 2748 2749 regs = devm_ioremap_resource(gbe_dev->dev, &res); 2750 if (IS_ERR(regs)) { 2751 dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n"); 2752 return PTR_ERR(regs); 2753 } 2754 gbe_dev->switch_regs = regs; 2755 2756 ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res); 2757 if (ret) { 2758 dev_err(gbe_dev->dev, 2759 "Can't xlate xgbe serdes of node(%s) address at %d\n", 2760 node->name, XGBE_SERDES_REG_INDEX); 2761 return ret; 2762 } 2763 2764 regs = devm_ioremap_resource(gbe_dev->dev, &res); 2765 if (IS_ERR(regs)) { 2766 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n"); 2767 return PTR_ERR(regs); 2768 } 2769 gbe_dev->xgbe_serdes_regs = regs; 2770 2771 gbe_dev->num_stats_mods = gbe_dev->max_num_ports; 2772 gbe_dev->et_stats = xgbe10_et_stats; 2773 gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); 2774 2775 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, 2776 gbe_dev->num_et_stats * sizeof(u64), 2777 GFP_KERNEL); 2778 if (!gbe_dev->hw_stats) { 2779 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); 2780 return -ENOMEM; 2781 } 2782 2783 gbe_dev->hw_stats_prev = 2784 devm_kzalloc(gbe_dev->dev, 2785 gbe_dev->num_et_stats * sizeof(u32), 2786 GFP_KERNEL); 2787 if (!gbe_dev->hw_stats_prev) { 2788 dev_err(gbe_dev->dev, 2789 "hw_stats_prev memory allocation failed\n"); 2790 return -ENOMEM; 2791 } 2792 2793 gbe_dev->ss_version = XGBE_SS_VERSION_10; 2794 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + 2795 XGBE10_SGMII_MODULE_OFFSET; 2796 gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET; 2797 2798 for (i = 0; i < gbe_dev->max_num_ports; i++) 2799 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs + 2800 XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i); 2801 2802 gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET; 2803 gbe_dev->ale_ports = gbe_dev->max_num_ports; 2804 gbe_dev->host_port = XGBE10_HOST_PORT_NUM; 2805 gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES; 2806 gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; 2807 2808 /* Subsystem registers */ 2809 XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); 2810 XGBE_SET_REG_OFS(gbe_dev, ss_regs, control); 2811 2812 /* Switch module registers */ 2813 XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver); 2814 XGBE_SET_REG_OFS(gbe_dev, switch_regs, control); 2815 XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype); 2816 XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); 2817 XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control); 2818 2819 /* Host port registers */ 2820 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); 2821 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map); 2822 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); 2823 return 0; 2824 } 2825 2826 static int get_gbe_resource_version(struct gbe_priv *gbe_dev, 2827 struct device_node *node) 2828 { 2829 struct resource res; 2830 void __iomem *regs; 2831 int ret; 2832 2833 ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res); 2834 if (ret) { 2835 dev_err(gbe_dev->dev, 2836 "Can't translate of node(%s) of gbe ss address at %d\n", 2837 node->name, GBE_SS_REG_INDEX); 2838 return ret; 2839 } 2840 2841 regs = devm_ioremap_resource(gbe_dev->dev, &res); 2842 if (IS_ERR(regs)) { 2843 dev_err(gbe_dev->dev, "Failed to map gbe register base\n"); 2844 return PTR_ERR(regs); 2845 } 2846 gbe_dev->ss_regs = regs; 2847 gbe_dev->ss_version = readl(gbe_dev->ss_regs); 2848 return 0; 2849 } 2850 2851 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, 2852 struct device_node *node) 2853 { 2854 struct resource res; 2855 void __iomem *regs; 2856 int i, ret; 2857 2858 ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res); 2859 if (ret) { 2860 dev_err(gbe_dev->dev, 2861 "Can't translate of gbe node(%s) address at index %d\n", 2862 node->name, GBE_SGMII34_REG_INDEX); 2863 return ret; 2864 } 2865 2866 regs = devm_ioremap_resource(gbe_dev->dev, &res); 2867 if (IS_ERR(regs)) { 2868 dev_err(gbe_dev->dev, 2869 "Failed to map gbe sgmii port34 register base\n"); 2870 return PTR_ERR(regs); 2871 } 2872 gbe_dev->sgmii_port34_regs = regs; 2873 2874 ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res); 2875 if (ret) { 2876 dev_err(gbe_dev->dev, 2877 "Can't translate of gbe node(%s) address at index %d\n", 2878 node->name, GBE_SM_REG_INDEX); 2879 return ret; 2880 } 2881 2882 regs = devm_ioremap_resource(gbe_dev->dev, &res); 2883 if (IS_ERR(regs)) { 2884 dev_err(gbe_dev->dev, 2885 "Failed to map gbe switch module register base\n"); 2886 return PTR_ERR(regs); 2887 } 2888 gbe_dev->switch_regs = regs; 2889 2890 gbe_dev->num_stats_mods = gbe_dev->max_num_slaves; 2891 gbe_dev->et_stats = gbe13_et_stats; 2892 gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); 2893 2894 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, 2895 gbe_dev->num_et_stats * sizeof(u64), 2896 GFP_KERNEL); 2897 if (!gbe_dev->hw_stats) { 2898 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); 2899 return -ENOMEM; 2900 } 2901 2902 gbe_dev->hw_stats_prev = 2903 devm_kzalloc(gbe_dev->dev, 2904 gbe_dev->num_et_stats * sizeof(u32), 2905 GFP_KERNEL); 2906 if (!gbe_dev->hw_stats_prev) { 2907 dev_err(gbe_dev->dev, 2908 "hw_stats_prev memory allocation failed\n"); 2909 return -ENOMEM; 2910 } 2911 2912 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET; 2913 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET; 2914 2915 /* K2HK has only 2 hw stats modules visible at a time, so 2916 * module 0 & 2 points to one base and 2917 * module 1 & 3 points to the other base 2918 */ 2919 for (i = 0; i < gbe_dev->max_num_slaves; i++) { 2920 gbe_dev->hw_stats_regs[i] = 2921 gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET + 2922 (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1)); 2923 } 2924 2925 gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET; 2926 gbe_dev->ale_ports = gbe_dev->max_num_ports; 2927 gbe_dev->host_port = GBE13_HOST_PORT_NUM; 2928 gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; 2929 gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL; 2930 2931 /* Subsystem registers */ 2932 GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); 2933 2934 /* Switch module registers */ 2935 GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver); 2936 GBE_SET_REG_OFS(gbe_dev, switch_regs, control); 2937 GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset); 2938 GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); 2939 GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype); 2940 GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control); 2941 2942 /* Host port registers */ 2943 GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); 2944 GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); 2945 return 0; 2946 } 2947 2948 static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, 2949 struct device_node *node) 2950 { 2951 struct resource res; 2952 void __iomem *regs; 2953 int i, ret; 2954 2955 gbe_dev->num_stats_mods = gbe_dev->max_num_ports; 2956 gbe_dev->et_stats = gbenu_et_stats; 2957 2958 if (IS_SS_ID_NU(gbe_dev)) 2959 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + 2960 (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); 2961 else 2962 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + 2963 GBENU_ET_STATS_PORT_SIZE; 2964 2965 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, 2966 gbe_dev->num_et_stats * sizeof(u64), 2967 GFP_KERNEL); 2968 if (!gbe_dev->hw_stats) { 2969 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); 2970 return -ENOMEM; 2971 } 2972 2973 gbe_dev->hw_stats_prev = 2974 devm_kzalloc(gbe_dev->dev, 2975 gbe_dev->num_et_stats * sizeof(u32), 2976 GFP_KERNEL); 2977 if (!gbe_dev->hw_stats_prev) { 2978 dev_err(gbe_dev->dev, 2979 "hw_stats_prev memory allocation failed\n"); 2980 return -ENOMEM; 2981 } 2982 2983 ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res); 2984 if (ret) { 2985 dev_err(gbe_dev->dev, 2986 "Can't translate of gbenu node(%s) addr at index %d\n", 2987 node->name, GBENU_SM_REG_INDEX); 2988 return ret; 2989 } 2990 2991 regs = devm_ioremap_resource(gbe_dev->dev, &res); 2992 if (IS_ERR(regs)) { 2993 dev_err(gbe_dev->dev, 2994 "Failed to map gbenu switch module register base\n"); 2995 return PTR_ERR(regs); 2996 } 2997 gbe_dev->switch_regs = regs; 2998 2999 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; 3000 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; 3001 3002 for (i = 0; i < (gbe_dev->max_num_ports); i++) 3003 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs + 3004 GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i); 3005 3006 gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET; 3007 gbe_dev->ale_ports = gbe_dev->max_num_ports; 3008 gbe_dev->host_port = GBENU_HOST_PORT_NUM; 3009 gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; 3010 gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; 3011 3012 /* Subsystem registers */ 3013 GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver); 3014 3015 /* Switch module registers */ 3016 GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver); 3017 GBENU_SET_REG_OFS(gbe_dev, switch_regs, control); 3018 GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); 3019 GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype); 3020 3021 /* Host port registers */ 3022 GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); 3023 GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); 3024 3025 /* For NU only. 2U does not need tx_pri_map. 3026 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads 3027 * while 2U has only 1 such thread 3028 */ 3029 GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map); 3030 return 0; 3031 } 3032 3033 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, 3034 struct device_node *node, void **inst_priv) 3035 { 3036 struct device_node *interfaces, *interface; 3037 struct device_node *secondary_ports; 3038 struct cpsw_ale_params ale_params; 3039 struct gbe_priv *gbe_dev; 3040 u32 slave_num; 3041 int i, ret = 0; 3042 3043 if (!node) { 3044 dev_err(dev, "device tree info unavailable\n"); 3045 return -ENODEV; 3046 } 3047 3048 gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL); 3049 if (!gbe_dev) 3050 return -ENOMEM; 3051 3052 if (of_device_is_compatible(node, "ti,netcp-gbe-5") || 3053 of_device_is_compatible(node, "ti,netcp-gbe")) { 3054 gbe_dev->max_num_slaves = 4; 3055 } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) { 3056 gbe_dev->max_num_slaves = 8; 3057 } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) { 3058 gbe_dev->max_num_slaves = 1; 3059 } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) { 3060 gbe_dev->max_num_slaves = 2; 3061 } else { 3062 dev_err(dev, "device tree node for unknown device\n"); 3063 return -EINVAL; 3064 } 3065 gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1; 3066 3067 gbe_dev->dev = dev; 3068 gbe_dev->netcp_device = netcp_device; 3069 gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE; 3070 3071 /* init the hw stats lock */ 3072 spin_lock_init(&gbe_dev->hw_stats_lock); 3073 3074 if (of_find_property(node, "enable-ale", NULL)) { 3075 gbe_dev->enable_ale = true; 3076 dev_info(dev, "ALE enabled\n"); 3077 } else { 3078 gbe_dev->enable_ale = false; 3079 dev_dbg(dev, "ALE bypass enabled*\n"); 3080 } 3081 3082 ret = of_property_read_u32(node, "tx-queue", 3083 &gbe_dev->tx_queue_id); 3084 if (ret < 0) { 3085 dev_err(dev, "missing tx_queue parameter\n"); 3086 gbe_dev->tx_queue_id = GBE_TX_QUEUE; 3087 } 3088 3089 ret = of_property_read_string(node, "tx-channel", 3090 &gbe_dev->dma_chan_name); 3091 if (ret < 0) { 3092 dev_err(dev, "missing \"tx-channel\" parameter\n"); 3093 return -EINVAL; 3094 } 3095 3096 if (!strcmp(node->name, "gbe")) { 3097 ret = get_gbe_resource_version(gbe_dev, node); 3098 if (ret) 3099 return ret; 3100 3101 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); 3102 3103 if (gbe_dev->ss_version == GBE_SS_VERSION_14) 3104 ret = set_gbe_ethss14_priv(gbe_dev, node); 3105 else if (IS_SS_ID_MU(gbe_dev)) 3106 ret = set_gbenu_ethss_priv(gbe_dev, node); 3107 else 3108 ret = -ENODEV; 3109 3110 } else if (!strcmp(node->name, "xgbe")) { 3111 ret = set_xgbe_ethss10_priv(gbe_dev, node); 3112 if (ret) 3113 return ret; 3114 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, 3115 gbe_dev->ss_regs); 3116 } else { 3117 dev_err(dev, "unknown GBE node(%s)\n", node->name); 3118 ret = -ENODEV; 3119 } 3120 3121 if (ret) 3122 return ret; 3123 3124 interfaces = of_get_child_by_name(node, "interfaces"); 3125 if (!interfaces) 3126 dev_err(dev, "could not find interfaces\n"); 3127 3128 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, 3129 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); 3130 if (ret) 3131 return ret; 3132 3133 ret = netcp_txpipe_open(&gbe_dev->tx_pipe); 3134 if (ret) 3135 return ret; 3136 3137 /* Create network interfaces */ 3138 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); 3139 for_each_child_of_node(interfaces, interface) { 3140 ret = of_property_read_u32(interface, "slave-port", &slave_num); 3141 if (ret) { 3142 dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n", 3143 interface->name); 3144 continue; 3145 } 3146 gbe_dev->num_slaves++; 3147 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 3148 break; 3149 } 3150 of_node_put(interfaces); 3151 3152 if (!gbe_dev->num_slaves) 3153 dev_warn(dev, "No network interface configured\n"); 3154 3155 /* Initialize Secondary slave ports */ 3156 secondary_ports = of_get_child_by_name(node, "secondary-slave-ports"); 3157 INIT_LIST_HEAD(&gbe_dev->secondary_slaves); 3158 if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves)) 3159 init_secondary_ports(gbe_dev, secondary_ports); 3160 of_node_put(secondary_ports); 3161 3162 if (!gbe_dev->num_slaves) { 3163 dev_err(dev, 3164 "No network interface or secondary ports configured\n"); 3165 ret = -ENODEV; 3166 goto free_sec_ports; 3167 } 3168 3169 memset(&ale_params, 0, sizeof(ale_params)); 3170 ale_params.dev = gbe_dev->dev; 3171 ale_params.ale_regs = gbe_dev->ale_reg; 3172 ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT; 3173 ale_params.ale_entries = gbe_dev->ale_entries; 3174 ale_params.ale_ports = gbe_dev->ale_ports; 3175 3176 gbe_dev->ale = cpsw_ale_create(&ale_params); 3177 if (!gbe_dev->ale) { 3178 dev_err(gbe_dev->dev, "error initializing ale engine\n"); 3179 ret = -ENODEV; 3180 goto free_sec_ports; 3181 } else { 3182 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); 3183 } 3184 3185 /* initialize host port */ 3186 gbe_init_host_port(gbe_dev); 3187 3188 spin_lock_bh(&gbe_dev->hw_stats_lock); 3189 for (i = 0; i < gbe_dev->num_stats_mods; i++) { 3190 if (gbe_dev->ss_version == GBE_SS_VERSION_14) 3191 gbe_reset_mod_stats_ver14(gbe_dev, i); 3192 else 3193 gbe_reset_mod_stats(gbe_dev, i); 3194 } 3195 spin_unlock_bh(&gbe_dev->hw_stats_lock); 3196 3197 init_timer(&gbe_dev->timer); 3198 gbe_dev->timer.data = (unsigned long)gbe_dev; 3199 gbe_dev->timer.function = netcp_ethss_timer; 3200 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; 3201 add_timer(&gbe_dev->timer); 3202 *inst_priv = gbe_dev; 3203 return 0; 3204 3205 free_sec_ports: 3206 free_secondary_ports(gbe_dev); 3207 return ret; 3208 } 3209 3210 static int gbe_attach(void *inst_priv, struct net_device *ndev, 3211 struct device_node *node, void **intf_priv) 3212 { 3213 struct gbe_priv *gbe_dev = inst_priv; 3214 struct gbe_intf *gbe_intf; 3215 int ret; 3216 3217 if (!node) { 3218 dev_err(gbe_dev->dev, "interface node not available\n"); 3219 return -ENODEV; 3220 } 3221 3222 gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL); 3223 if (!gbe_intf) 3224 return -ENOMEM; 3225 3226 gbe_intf->ndev = ndev; 3227 gbe_intf->dev = gbe_dev->dev; 3228 gbe_intf->gbe_dev = gbe_dev; 3229 3230 gbe_intf->slave = devm_kzalloc(gbe_dev->dev, 3231 sizeof(*gbe_intf->slave), 3232 GFP_KERNEL); 3233 if (!gbe_intf->slave) { 3234 ret = -ENOMEM; 3235 goto fail; 3236 } 3237 3238 if (init_slave(gbe_dev, gbe_intf->slave, node)) { 3239 ret = -ENODEV; 3240 goto fail; 3241 } 3242 3243 gbe_intf->tx_pipe = gbe_dev->tx_pipe; 3244 ndev->ethtool_ops = &keystone_ethtool_ops; 3245 list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head); 3246 *intf_priv = gbe_intf; 3247 return 0; 3248 3249 fail: 3250 if (gbe_intf->slave) 3251 devm_kfree(gbe_dev->dev, gbe_intf->slave); 3252 if (gbe_intf) 3253 devm_kfree(gbe_dev->dev, gbe_intf); 3254 return ret; 3255 } 3256 3257 static int gbe_release(void *intf_priv) 3258 { 3259 struct gbe_intf *gbe_intf = intf_priv; 3260 3261 gbe_intf->ndev->ethtool_ops = NULL; 3262 list_del(&gbe_intf->gbe_intf_list); 3263 devm_kfree(gbe_intf->dev, gbe_intf->slave); 3264 devm_kfree(gbe_intf->dev, gbe_intf); 3265 return 0; 3266 } 3267 3268 static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv) 3269 { 3270 struct gbe_priv *gbe_dev = inst_priv; 3271 3272 del_timer_sync(&gbe_dev->timer); 3273 cpsw_ale_stop(gbe_dev->ale); 3274 cpsw_ale_destroy(gbe_dev->ale); 3275 netcp_txpipe_close(&gbe_dev->tx_pipe); 3276 free_secondary_ports(gbe_dev); 3277 3278 if (!list_empty(&gbe_dev->gbe_intf_head)) 3279 dev_alert(gbe_dev->dev, 3280 "unreleased ethss interfaces present\n"); 3281 3282 return 0; 3283 } 3284 3285 static struct netcp_module gbe_module = { 3286 .name = GBE_MODULE_NAME, 3287 .owner = THIS_MODULE, 3288 .primary = true, 3289 .probe = gbe_probe, 3290 .open = gbe_open, 3291 .close = gbe_close, 3292 .remove = gbe_remove, 3293 .attach = gbe_attach, 3294 .release = gbe_release, 3295 .add_addr = gbe_add_addr, 3296 .del_addr = gbe_del_addr, 3297 .add_vid = gbe_add_vid, 3298 .del_vid = gbe_del_vid, 3299 .ioctl = gbe_ioctl, 3300 }; 3301 3302 static struct netcp_module xgbe_module = { 3303 .name = XGBE_MODULE_NAME, 3304 .owner = THIS_MODULE, 3305 .primary = true, 3306 .probe = gbe_probe, 3307 .open = gbe_open, 3308 .close = gbe_close, 3309 .remove = gbe_remove, 3310 .attach = gbe_attach, 3311 .release = gbe_release, 3312 .add_addr = gbe_add_addr, 3313 .del_addr = gbe_del_addr, 3314 .add_vid = gbe_add_vid, 3315 .del_vid = gbe_del_vid, 3316 .ioctl = gbe_ioctl, 3317 }; 3318 3319 static int __init keystone_gbe_init(void) 3320 { 3321 int ret; 3322 3323 ret = netcp_register_module(&gbe_module); 3324 if (ret) 3325 return ret; 3326 3327 ret = netcp_register_module(&xgbe_module); 3328 if (ret) 3329 return ret; 3330 3331 return 0; 3332 } 3333 module_init(keystone_gbe_init); 3334 3335 static void __exit keystone_gbe_exit(void) 3336 { 3337 netcp_unregister_module(&gbe_module); 3338 netcp_unregister_module(&xgbe_module); 3339 } 3340 module_exit(keystone_gbe_exit); 3341 3342 MODULE_LICENSE("GPL v2"); 3343 MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs"); 3344 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com"); 3345