1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 31 #include <common.h> 32 #include <clk.h> 33 #include <dm.h> 34 #include <errno.h> 35 #include <memalign.h> 36 #include <miiphy.h> 37 #include <net.h> 38 #include <netdev.h> 39 #include <phy.h> 40 #include <reset.h> 41 #include <wait_bit.h> 42 #include <asm/gpio.h> 43 #include <asm/io.h> 44 45 /* Core registers */ 46 47 #define EQOS_MAC_REGS_BASE 0x000 48 struct eqos_mac_regs { 49 uint32_t configuration; /* 0x000 */ 50 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 51 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 52 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 53 uint32_t rx_flow_ctrl; /* 0x090 */ 54 uint32_t unused_094; /* 0x094 */ 55 uint32_t txq_prty_map0; /* 0x098 */ 56 uint32_t unused_09c; /* 0x09c */ 57 uint32_t rxq_ctrl0; /* 0x0a0 */ 58 uint32_t unused_0a4; /* 0x0a4 */ 59 uint32_t rxq_ctrl2; /* 0x0a8 */ 60 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 61 uint32_t us_tic_counter; /* 0x0dc */ 62 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 63 uint32_t hw_feature0; /* 0x11c */ 64 uint32_t hw_feature1; /* 0x120 */ 65 uint32_t hw_feature2; /* 0x124 */ 66 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 67 uint32_t mdio_address; /* 0x200 */ 68 uint32_t mdio_data; /* 0x204 */ 69 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 70 uint32_t address0_high; /* 0x300 */ 71 uint32_t address0_low; /* 0x304 */ 72 }; 73 74 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 75 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 76 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 77 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 78 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 79 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 80 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 81 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 82 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 83 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 84 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 85 86 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 87 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 88 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 89 90 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 91 92 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 93 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 94 95 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 96 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 97 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0 98 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2 99 100 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 101 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 102 103 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 104 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 105 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 106 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 107 108 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 109 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 110 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 111 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2 112 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 113 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 114 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 115 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 116 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 117 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 118 119 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 120 121 #define EQOS_MTL_REGS_BASE 0xd00 122 struct eqos_mtl_regs { 123 uint32_t txq0_operation_mode; /* 0xd00 */ 124 uint32_t unused_d04; /* 0xd04 */ 125 uint32_t txq0_debug; /* 0xd08 */ 126 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 127 uint32_t txq0_quantum_weight; /* 0xd18 */ 128 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 129 uint32_t rxq0_operation_mode; /* 0xd30 */ 130 uint32_t unused_d34; /* 0xd34 */ 131 uint32_t rxq0_debug; /* 0xd38 */ 132 }; 133 134 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 135 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 136 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 137 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 138 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 139 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 140 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 141 142 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 143 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 144 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 145 146 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 147 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 148 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 149 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 150 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 151 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 152 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 153 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 154 155 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 156 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 157 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 158 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 159 160 #define EQOS_DMA_REGS_BASE 0x1000 161 struct eqos_dma_regs { 162 uint32_t mode; /* 0x1000 */ 163 uint32_t sysbus_mode; /* 0x1004 */ 164 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 165 uint32_t ch0_control; /* 0x1100 */ 166 uint32_t ch0_tx_control; /* 0x1104 */ 167 uint32_t ch0_rx_control; /* 0x1108 */ 168 uint32_t unused_110c; /* 0x110c */ 169 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 170 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 171 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 172 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 173 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 174 uint32_t unused_1124; /* 0x1124 */ 175 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 176 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 177 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 178 }; 179 180 #define EQOS_DMA_MODE_SWR BIT(0) 181 182 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 183 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 184 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 185 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 186 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 187 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 188 189 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 190 191 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 192 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 193 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 194 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 195 196 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 197 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 198 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 199 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 200 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 201 202 /* These registers are Tegra186-specific */ 203 #define EQOS_TEGRA186_REGS_BASE 0x8800 204 struct eqos_tegra186_regs { 205 uint32_t sdmemcomppadctrl; /* 0x8800 */ 206 uint32_t auto_cal_config; /* 0x8804 */ 207 uint32_t unused_8808; /* 0x8808 */ 208 uint32_t auto_cal_status; /* 0x880c */ 209 }; 210 211 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 212 213 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 214 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 215 216 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 217 218 /* Descriptors */ 219 220 #define EQOS_DESCRIPTOR_WORDS 4 221 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 222 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 223 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 224 #define EQOS_DESCRIPTORS_TX 4 225 #define EQOS_DESCRIPTORS_RX 4 226 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 227 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 228 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 229 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 230 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 231 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 232 233 /* 234 * Warn if the cache-line size is larger than the descriptor size. In such 235 * cases the driver will likely fail because the CPU needs to flush the cache 236 * when requeuing RX buffers, therefore descriptors written by the hardware 237 * may be discarded. Architectures with full IO coherence, such as x86, do not 238 * experience this issue, and hence are excluded from this condition. 239 * 240 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 241 * the driver to allocate descriptors from a pool of non-cached memory. 242 */ 243 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 244 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 245 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 246 #warning Cache line size is larger than descriptor size 247 #endif 248 #endif 249 250 struct eqos_desc { 251 u32 des0; 252 u32 des1; 253 u32 des2; 254 u32 des3; 255 }; 256 257 #define EQOS_DESC3_OWN BIT(31) 258 #define EQOS_DESC3_FD BIT(29) 259 #define EQOS_DESC3_LD BIT(28) 260 #define EQOS_DESC3_BUF1V BIT(24) 261 262 struct eqos_config { 263 bool reg_access_always_ok; 264 }; 265 266 struct eqos_priv { 267 struct udevice *dev; 268 const struct eqos_config *config; 269 fdt_addr_t regs; 270 struct eqos_mac_regs *mac_regs; 271 struct eqos_mtl_regs *mtl_regs; 272 struct eqos_dma_regs *dma_regs; 273 struct eqos_tegra186_regs *tegra186_regs; 274 struct reset_ctl reset_ctl; 275 struct gpio_desc phy_reset_gpio; 276 struct clk clk_master_bus; 277 struct clk clk_rx; 278 struct clk clk_ptp_ref; 279 struct clk clk_tx; 280 struct clk clk_slave_bus; 281 struct mii_dev *mii; 282 struct phy_device *phy; 283 void *descs; 284 struct eqos_desc *tx_descs; 285 struct eqos_desc *rx_descs; 286 int tx_desc_idx, rx_desc_idx; 287 void *tx_dma_buf; 288 void *rx_dma_buf; 289 void *rx_pkt; 290 bool started; 291 bool reg_access_ok; 292 }; 293 294 /* 295 * TX and RX descriptors are 16 bytes. This causes problems with the cache 296 * maintenance on CPUs where the cache-line size exceeds the size of these 297 * descriptors. What will happen is that when the driver receives a packet 298 * it will be immediately requeued for the hardware to reuse. The CPU will 299 * therefore need to flush the cache-line containing the descriptor, which 300 * will cause all other descriptors in the same cache-line to be flushed 301 * along with it. If one of those descriptors had been written to by the 302 * device those changes (and the associated packet) will be lost. 303 * 304 * To work around this, we make use of non-cached memory if available. If 305 * descriptors are mapped uncached there's no need to manually flush them 306 * or invalidate them. 307 * 308 * Note that this only applies to descriptors. The packet data buffers do 309 * not have the same constraints since they are 1536 bytes large, so they 310 * are unlikely to share cache-lines. 311 */ 312 static void *eqos_alloc_descs(unsigned int num) 313 { 314 #ifdef CONFIG_SYS_NONCACHED_MEMORY 315 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 316 EQOS_DESCRIPTOR_ALIGN); 317 #else 318 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 319 #endif 320 } 321 322 static void eqos_free_descs(void *descs) 323 { 324 #ifdef CONFIG_SYS_NONCACHED_MEMORY 325 /* FIXME: noncached_alloc() has no opposite */ 326 #else 327 free(descs); 328 #endif 329 } 330 331 static void eqos_inval_desc(void *desc) 332 { 333 #ifndef CONFIG_SYS_NONCACHED_MEMORY 334 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 335 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 336 ARCH_DMA_MINALIGN); 337 338 invalidate_dcache_range(start, end); 339 #endif 340 } 341 342 static void eqos_flush_desc(void *desc) 343 { 344 #ifndef CONFIG_SYS_NONCACHED_MEMORY 345 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 346 #endif 347 } 348 349 static void eqos_inval_buffer(void *buf, size_t size) 350 { 351 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 352 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 353 354 invalidate_dcache_range(start, end); 355 } 356 357 static void eqos_flush_buffer(void *buf, size_t size) 358 { 359 flush_cache((unsigned long)buf, size); 360 } 361 362 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 363 { 364 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 365 EQOS_MAC_MDIO_ADDRESS_GB, false, 366 1000000, true); 367 } 368 369 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 370 int mdio_reg) 371 { 372 struct eqos_priv *eqos = bus->priv; 373 u32 val; 374 int ret; 375 376 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 377 mdio_reg); 378 379 ret = eqos_mdio_wait_idle(eqos); 380 if (ret) { 381 pr_err("MDIO not idle at entry"); 382 return ret; 383 } 384 385 val = readl(&eqos->mac_regs->mdio_address); 386 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 387 EQOS_MAC_MDIO_ADDRESS_C45E; 388 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 389 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 390 (EQOS_MAC_MDIO_ADDRESS_CR_20_35 << 391 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 392 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 393 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 394 EQOS_MAC_MDIO_ADDRESS_GB; 395 writel(val, &eqos->mac_regs->mdio_address); 396 397 udelay(10); 398 399 ret = eqos_mdio_wait_idle(eqos); 400 if (ret) { 401 pr_err("MDIO read didn't complete"); 402 return ret; 403 } 404 405 val = readl(&eqos->mac_regs->mdio_data); 406 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 407 408 debug("%s: val=%x\n", __func__, val); 409 410 return val; 411 } 412 413 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 414 int mdio_reg, u16 mdio_val) 415 { 416 struct eqos_priv *eqos = bus->priv; 417 u32 val; 418 int ret; 419 420 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 421 mdio_addr, mdio_reg, mdio_val); 422 423 ret = eqos_mdio_wait_idle(eqos); 424 if (ret) { 425 pr_err("MDIO not idle at entry"); 426 return ret; 427 } 428 429 writel(mdio_val, &eqos->mac_regs->mdio_data); 430 431 val = readl(&eqos->mac_regs->mdio_address); 432 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 433 EQOS_MAC_MDIO_ADDRESS_C45E; 434 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 435 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 436 (EQOS_MAC_MDIO_ADDRESS_CR_20_35 << 437 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 438 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 439 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 440 EQOS_MAC_MDIO_ADDRESS_GB; 441 writel(val, &eqos->mac_regs->mdio_address); 442 443 udelay(10); 444 445 ret = eqos_mdio_wait_idle(eqos); 446 if (ret) { 447 pr_err("MDIO read didn't complete"); 448 return ret; 449 } 450 451 return 0; 452 } 453 454 static int eqos_start_clks_tegra186(struct udevice *dev) 455 { 456 struct eqos_priv *eqos = dev_get_priv(dev); 457 int ret; 458 459 debug("%s(dev=%p):\n", __func__, dev); 460 461 ret = clk_enable(&eqos->clk_slave_bus); 462 if (ret < 0) { 463 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 464 goto err; 465 } 466 467 ret = clk_enable(&eqos->clk_master_bus); 468 if (ret < 0) { 469 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 470 goto err_disable_clk_slave_bus; 471 } 472 473 ret = clk_enable(&eqos->clk_rx); 474 if (ret < 0) { 475 pr_err("clk_enable(clk_rx) failed: %d", ret); 476 goto err_disable_clk_master_bus; 477 } 478 479 ret = clk_enable(&eqos->clk_ptp_ref); 480 if (ret < 0) { 481 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 482 goto err_disable_clk_rx; 483 } 484 485 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 486 if (ret < 0) { 487 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 488 goto err_disable_clk_ptp_ref; 489 } 490 491 ret = clk_enable(&eqos->clk_tx); 492 if (ret < 0) { 493 pr_err("clk_enable(clk_tx) failed: %d", ret); 494 goto err_disable_clk_ptp_ref; 495 } 496 497 debug("%s: OK\n", __func__); 498 return 0; 499 500 err_disable_clk_ptp_ref: 501 clk_disable(&eqos->clk_ptp_ref); 502 err_disable_clk_rx: 503 clk_disable(&eqos->clk_rx); 504 err_disable_clk_master_bus: 505 clk_disable(&eqos->clk_master_bus); 506 err_disable_clk_slave_bus: 507 clk_disable(&eqos->clk_slave_bus); 508 err: 509 debug("%s: FAILED: %d\n", __func__, ret); 510 return ret; 511 } 512 513 void eqos_stop_clks_tegra186(struct udevice *dev) 514 { 515 struct eqos_priv *eqos = dev_get_priv(dev); 516 517 debug("%s(dev=%p):\n", __func__, dev); 518 519 clk_disable(&eqos->clk_tx); 520 clk_disable(&eqos->clk_ptp_ref); 521 clk_disable(&eqos->clk_rx); 522 clk_disable(&eqos->clk_master_bus); 523 clk_disable(&eqos->clk_slave_bus); 524 525 debug("%s: OK\n", __func__); 526 } 527 528 static int eqos_start_resets_tegra186(struct udevice *dev) 529 { 530 struct eqos_priv *eqos = dev_get_priv(dev); 531 int ret; 532 533 debug("%s(dev=%p):\n", __func__, dev); 534 535 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 536 if (ret < 0) { 537 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 538 return ret; 539 } 540 541 udelay(2); 542 543 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 544 if (ret < 0) { 545 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 546 return ret; 547 } 548 549 ret = reset_assert(&eqos->reset_ctl); 550 if (ret < 0) { 551 pr_err("reset_assert() failed: %d", ret); 552 return ret; 553 } 554 555 udelay(2); 556 557 ret = reset_deassert(&eqos->reset_ctl); 558 if (ret < 0) { 559 pr_err("reset_deassert() failed: %d", ret); 560 return ret; 561 } 562 563 debug("%s: OK\n", __func__); 564 return 0; 565 } 566 567 static int eqos_stop_resets_tegra186(struct udevice *dev) 568 { 569 struct eqos_priv *eqos = dev_get_priv(dev); 570 571 reset_assert(&eqos->reset_ctl); 572 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 573 574 return 0; 575 } 576 577 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 578 { 579 struct eqos_priv *eqos = dev_get_priv(dev); 580 int ret; 581 582 debug("%s(dev=%p):\n", __func__, dev); 583 584 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 585 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 586 587 udelay(1); 588 589 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 590 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 591 592 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 593 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 594 if (ret) { 595 pr_err("calibrate didn't start"); 596 goto failed; 597 } 598 599 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 600 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 601 if (ret) { 602 pr_err("calibrate didn't finish"); 603 goto failed; 604 } 605 606 ret = 0; 607 608 failed: 609 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 610 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 611 612 debug("%s: returns %d\n", __func__, ret); 613 614 return ret; 615 } 616 617 static int eqos_disable_calibration_tegra186(struct udevice *dev) 618 { 619 struct eqos_priv *eqos = dev_get_priv(dev); 620 621 debug("%s(dev=%p):\n", __func__, dev); 622 623 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 624 EQOS_AUTO_CAL_CONFIG_ENABLE); 625 626 return 0; 627 } 628 629 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 630 { 631 struct eqos_priv *eqos = dev_get_priv(dev); 632 633 return clk_get_rate(&eqos->clk_slave_bus); 634 } 635 636 static int eqos_set_full_duplex(struct udevice *dev) 637 { 638 struct eqos_priv *eqos = dev_get_priv(dev); 639 640 debug("%s(dev=%p):\n", __func__, dev); 641 642 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 643 644 return 0; 645 } 646 647 static int eqos_set_half_duplex(struct udevice *dev) 648 { 649 struct eqos_priv *eqos = dev_get_priv(dev); 650 651 debug("%s(dev=%p):\n", __func__, dev); 652 653 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 654 655 /* WAR: Flush TX queue when switching to half-duplex */ 656 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 657 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 658 659 return 0; 660 } 661 662 static int eqos_set_gmii_speed(struct udevice *dev) 663 { 664 struct eqos_priv *eqos = dev_get_priv(dev); 665 666 debug("%s(dev=%p):\n", __func__, dev); 667 668 clrbits_le32(&eqos->mac_regs->configuration, 669 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 670 671 return 0; 672 } 673 674 static int eqos_set_mii_speed_100(struct udevice *dev) 675 { 676 struct eqos_priv *eqos = dev_get_priv(dev); 677 678 debug("%s(dev=%p):\n", __func__, dev); 679 680 setbits_le32(&eqos->mac_regs->configuration, 681 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 682 683 return 0; 684 } 685 686 static int eqos_set_mii_speed_10(struct udevice *dev) 687 { 688 struct eqos_priv *eqos = dev_get_priv(dev); 689 690 debug("%s(dev=%p):\n", __func__, dev); 691 692 clrsetbits_le32(&eqos->mac_regs->configuration, 693 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 694 695 return 0; 696 } 697 698 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 699 { 700 struct eqos_priv *eqos = dev_get_priv(dev); 701 ulong rate; 702 int ret; 703 704 debug("%s(dev=%p):\n", __func__, dev); 705 706 switch (eqos->phy->speed) { 707 case SPEED_1000: 708 rate = 125 * 1000 * 1000; 709 break; 710 case SPEED_100: 711 rate = 25 * 1000 * 1000; 712 break; 713 case SPEED_10: 714 rate = 2.5 * 1000 * 1000; 715 break; 716 default: 717 pr_err("invalid speed %d", eqos->phy->speed); 718 return -EINVAL; 719 } 720 721 ret = clk_set_rate(&eqos->clk_tx, rate); 722 if (ret < 0) { 723 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 724 return ret; 725 } 726 727 return 0; 728 } 729 730 static int eqos_adjust_link(struct udevice *dev) 731 { 732 struct eqos_priv *eqos = dev_get_priv(dev); 733 int ret; 734 bool en_calibration; 735 736 debug("%s(dev=%p):\n", __func__, dev); 737 738 if (eqos->phy->duplex) 739 ret = eqos_set_full_duplex(dev); 740 else 741 ret = eqos_set_half_duplex(dev); 742 if (ret < 0) { 743 pr_err("eqos_set_*_duplex() failed: %d", ret); 744 return ret; 745 } 746 747 switch (eqos->phy->speed) { 748 case SPEED_1000: 749 en_calibration = true; 750 ret = eqos_set_gmii_speed(dev); 751 break; 752 case SPEED_100: 753 en_calibration = true; 754 ret = eqos_set_mii_speed_100(dev); 755 break; 756 case SPEED_10: 757 en_calibration = false; 758 ret = eqos_set_mii_speed_10(dev); 759 break; 760 default: 761 pr_err("invalid speed %d", eqos->phy->speed); 762 return -EINVAL; 763 } 764 if (ret < 0) { 765 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 766 return ret; 767 } 768 769 if (en_calibration) { 770 ret = eqos_calibrate_pads_tegra186(dev); 771 if (ret < 0) { 772 pr_err("eqos_calibrate_pads_tegra186() failed: %d", ret); 773 return ret; 774 } 775 } else { 776 ret = eqos_disable_calibration_tegra186(dev); 777 if (ret < 0) { 778 pr_err("eqos_disable_calibration_tegra186() failed: %d", 779 ret); 780 return ret; 781 } 782 } 783 784 ret = eqos_set_tx_clk_speed_tegra186(dev); 785 if (ret < 0) { 786 pr_err("eqos_set_tx_clk_speed_tegra186() failed: %d", ret); 787 return ret; 788 } 789 790 return 0; 791 } 792 793 static int eqos_write_hwaddr(struct udevice *dev) 794 { 795 struct eth_pdata *plat = dev_get_platdata(dev); 796 struct eqos_priv *eqos = dev_get_priv(dev); 797 uint32_t val; 798 799 /* 800 * This function may be called before start() or after stop(). At that 801 * time, on at least some configurations of the EQoS HW, all clocks to 802 * the EQoS HW block will be stopped, and a reset signal applied. If 803 * any register access is attempted in this state, bus timeouts or CPU 804 * hangs may occur. This check prevents that. 805 * 806 * A simple solution to this problem would be to not implement 807 * write_hwaddr(), since start() always writes the MAC address into HW 808 * anyway. However, it is desirable to implement write_hwaddr() to 809 * support the case of SW that runs subsequent to U-Boot which expects 810 * the MAC address to already be programmed into the EQoS registers, 811 * which must happen irrespective of whether the U-Boot user (or 812 * scripts) actually made use of the EQoS device, and hence 813 * irrespective of whether start() was ever called. 814 * 815 * Note that this requirement by subsequent SW is not valid for 816 * Tegra186, and is likely not valid for any non-PCI instantiation of 817 * the EQoS HW block. This function is implemented solely as 818 * future-proofing with the expectation the driver will eventually be 819 * ported to some system where the expectation above is true. 820 */ 821 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 822 return 0; 823 824 /* Update the MAC address */ 825 val = (plat->enetaddr[5] << 8) | 826 (plat->enetaddr[4]); 827 writel(val, &eqos->mac_regs->address0_high); 828 val = (plat->enetaddr[3] << 24) | 829 (plat->enetaddr[2] << 16) | 830 (plat->enetaddr[1] << 8) | 831 (plat->enetaddr[0]); 832 writel(val, &eqos->mac_regs->address0_low); 833 834 return 0; 835 } 836 837 static int eqos_start(struct udevice *dev) 838 { 839 struct eqos_priv *eqos = dev_get_priv(dev); 840 int ret, i; 841 ulong rate; 842 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 843 ulong last_rx_desc; 844 845 debug("%s(dev=%p):\n", __func__, dev); 846 847 eqos->tx_desc_idx = 0; 848 eqos->rx_desc_idx = 0; 849 850 ret = eqos_start_clks_tegra186(dev); 851 if (ret < 0) { 852 pr_err("eqos_start_clks_tegra186() failed: %d", ret); 853 goto err; 854 } 855 856 ret = eqos_start_resets_tegra186(dev); 857 if (ret < 0) { 858 pr_err("eqos_start_resets_tegra186() failed: %d", ret); 859 goto err_stop_clks; 860 } 861 862 udelay(10); 863 864 eqos->reg_access_ok = true; 865 866 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 867 EQOS_DMA_MODE_SWR, false, 10, false); 868 if (ret) { 869 pr_err("EQOS_DMA_MODE_SWR stuck"); 870 goto err_stop_resets; 871 } 872 873 ret = eqos_calibrate_pads_tegra186(dev); 874 if (ret < 0) { 875 pr_err("eqos_calibrate_pads_tegra186() failed: %d", ret); 876 goto err_stop_resets; 877 } 878 879 rate = eqos_get_tick_clk_rate_tegra186(dev); 880 val = (rate / 1000000) - 1; 881 writel(val, &eqos->mac_regs->us_tic_counter); 882 883 eqos->phy = phy_connect(eqos->mii, 0, dev, 0); 884 if (!eqos->phy) { 885 pr_err("phy_connect() failed"); 886 goto err_stop_resets; 887 } 888 ret = phy_config(eqos->phy); 889 if (ret < 0) { 890 pr_err("phy_config() failed: %d", ret); 891 goto err_shutdown_phy; 892 } 893 ret = phy_startup(eqos->phy); 894 if (ret < 0) { 895 pr_err("phy_startup() failed: %d", ret); 896 goto err_shutdown_phy; 897 } 898 899 if (!eqos->phy->link) { 900 pr_err("No link"); 901 goto err_shutdown_phy; 902 } 903 904 ret = eqos_adjust_link(dev); 905 if (ret < 0) { 906 pr_err("eqos_adjust_link() failed: %d", ret); 907 goto err_shutdown_phy; 908 } 909 910 /* Configure MTL */ 911 912 /* Enable Store and Forward mode for TX */ 913 /* Program Tx operating mode */ 914 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 915 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 916 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 917 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 918 919 /* Transmit Queue weight */ 920 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 921 922 /* Enable Store and Forward mode for RX, since no jumbo frame */ 923 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 924 EQOS_MTL_RXQ0_OPERATION_MODE_RSF); 925 926 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 927 val = readl(&eqos->mac_regs->hw_feature1); 928 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 929 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 930 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 931 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 932 933 /* 934 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 935 * r/tqs is encoded as (n / 256) - 1. 936 */ 937 tqs = (128 << tx_fifo_sz) / 256 - 1; 938 rqs = (128 << rx_fifo_sz) / 256 - 1; 939 940 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 941 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 942 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 943 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 944 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 945 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 946 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 947 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 948 949 /* Flow control used only if each channel gets 4KB or more FIFO */ 950 if (rqs >= ((4096 / 256) - 1)) { 951 u32 rfd, rfa; 952 953 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 954 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 955 956 /* 957 * Set Threshold for Activating Flow Contol space for min 2 958 * frames ie, (1500 * 1) = 1500 bytes. 959 * 960 * Set Threshold for Deactivating Flow Contol for space of 961 * min 1 frame (frame size 1500bytes) in receive fifo 962 */ 963 if (rqs == ((4096 / 256) - 1)) { 964 /* 965 * This violates the above formula because of FIFO size 966 * limit therefore overflow may occur inspite of this. 967 */ 968 rfd = 0x3; /* Full-3K */ 969 rfa = 0x1; /* Full-1.5K */ 970 } else if (rqs == ((8192 / 256) - 1)) { 971 rfd = 0x6; /* Full-4K */ 972 rfa = 0xa; /* Full-6K */ 973 } else if (rqs == ((16384 / 256) - 1)) { 974 rfd = 0x6; /* Full-4K */ 975 rfa = 0x12; /* Full-10K */ 976 } else { 977 rfd = 0x6; /* Full-4K */ 978 rfa = 0x1E; /* Full-16K */ 979 } 980 981 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 982 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 983 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 984 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 985 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 986 (rfd << 987 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 988 (rfa << 989 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 990 } 991 992 /* Configure MAC */ 993 994 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 995 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 996 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 997 EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB << 998 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 999 1000 /* Set TX flow control parameters */ 1001 /* Set Pause Time */ 1002 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1003 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1004 /* Assign priority for TX flow control */ 1005 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1006 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1007 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1008 /* Assign priority for RX flow control */ 1009 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1010 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1011 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1012 /* Enable flow control */ 1013 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1014 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1015 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1016 EQOS_MAC_RX_FLOW_CTRL_RFE); 1017 1018 clrsetbits_le32(&eqos->mac_regs->configuration, 1019 EQOS_MAC_CONFIGURATION_GPSLCE | 1020 EQOS_MAC_CONFIGURATION_WD | 1021 EQOS_MAC_CONFIGURATION_JD | 1022 EQOS_MAC_CONFIGURATION_JE, 1023 EQOS_MAC_CONFIGURATION_CST | 1024 EQOS_MAC_CONFIGURATION_ACS); 1025 1026 eqos_write_hwaddr(dev); 1027 1028 /* Configure DMA */ 1029 1030 /* Enable OSP mode */ 1031 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1032 EQOS_DMA_CH0_TX_CONTROL_OSP); 1033 1034 /* RX buffer size. Must be a multiple of bus width */ 1035 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1036 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1037 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1038 EQOS_MAX_PACKET_SIZE << 1039 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1040 1041 setbits_le32(&eqos->dma_regs->ch0_control, 1042 EQOS_DMA_CH0_CONTROL_PBLX8); 1043 1044 /* 1045 * Burst length must be < 1/2 FIFO size. 1046 * FIFO size in tqs is encoded as (n / 256) - 1. 1047 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1048 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1049 */ 1050 pbl = tqs + 1; 1051 if (pbl > 32) 1052 pbl = 32; 1053 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1054 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1055 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1056 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1057 1058 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1059 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1060 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1061 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1062 1063 /* DMA performance configuration */ 1064 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1065 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1066 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1067 writel(val, &eqos->dma_regs->sysbus_mode); 1068 1069 /* Set up descriptors */ 1070 1071 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1072 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1073 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1074 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1075 (i * EQOS_MAX_PACKET_SIZE)); 1076 rx_desc->des3 |= EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1077 } 1078 flush_cache((unsigned long)eqos->descs, EQOS_DESCRIPTORS_SIZE); 1079 1080 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1081 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1082 writel(EQOS_DESCRIPTORS_TX - 1, 1083 &eqos->dma_regs->ch0_txdesc_ring_length); 1084 1085 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1086 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1087 writel(EQOS_DESCRIPTORS_RX - 1, 1088 &eqos->dma_regs->ch0_rxdesc_ring_length); 1089 1090 /* Enable everything */ 1091 1092 setbits_le32(&eqos->mac_regs->configuration, 1093 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1094 1095 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1096 EQOS_DMA_CH0_TX_CONTROL_ST); 1097 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1098 EQOS_DMA_CH0_RX_CONTROL_SR); 1099 1100 /* TX tail pointer not written until we need to TX a packet */ 1101 /* 1102 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1103 * first descriptor, implying all descriptors were available. However, 1104 * that's not distinguishable from none of the descriptors being 1105 * available. 1106 */ 1107 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1108 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1109 1110 eqos->started = true; 1111 1112 debug("%s: OK\n", __func__); 1113 return 0; 1114 1115 err_shutdown_phy: 1116 phy_shutdown(eqos->phy); 1117 eqos->phy = NULL; 1118 err_stop_resets: 1119 eqos_stop_resets_tegra186(dev); 1120 err_stop_clks: 1121 eqos_stop_clks_tegra186(dev); 1122 err: 1123 pr_err("FAILED: %d", ret); 1124 return ret; 1125 } 1126 1127 void eqos_stop(struct udevice *dev) 1128 { 1129 struct eqos_priv *eqos = dev_get_priv(dev); 1130 int i; 1131 1132 debug("%s(dev=%p):\n", __func__, dev); 1133 1134 if (!eqos->started) 1135 return; 1136 eqos->started = false; 1137 eqos->reg_access_ok = false; 1138 1139 /* Disable TX DMA */ 1140 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1141 EQOS_DMA_CH0_TX_CONTROL_ST); 1142 1143 /* Wait for TX all packets to drain out of MTL */ 1144 for (i = 0; i < 1000000; i++) { 1145 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1146 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1147 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1148 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1149 if ((trcsts != 1) && (!txqsts)) 1150 break; 1151 } 1152 1153 /* Turn off MAC TX and RX */ 1154 clrbits_le32(&eqos->mac_regs->configuration, 1155 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1156 1157 /* Wait for all RX packets to drain out of MTL */ 1158 for (i = 0; i < 1000000; i++) { 1159 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1160 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1161 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1162 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1163 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1164 if ((!prxq) && (!rxqsts)) 1165 break; 1166 } 1167 1168 /* Turn off RX DMA */ 1169 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1170 EQOS_DMA_CH0_RX_CONTROL_SR); 1171 1172 if (eqos->phy) { 1173 phy_shutdown(eqos->phy); 1174 eqos->phy = NULL; 1175 } 1176 eqos_stop_resets_tegra186(dev); 1177 eqos_stop_clks_tegra186(dev); 1178 1179 debug("%s: OK\n", __func__); 1180 } 1181 1182 int eqos_send(struct udevice *dev, void *packet, int length) 1183 { 1184 struct eqos_priv *eqos = dev_get_priv(dev); 1185 struct eqos_desc *tx_desc; 1186 int i; 1187 1188 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1189 length); 1190 1191 memcpy(eqos->tx_dma_buf, packet, length); 1192 eqos_flush_buffer(eqos->tx_dma_buf, length); 1193 1194 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1195 eqos->tx_desc_idx++; 1196 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1197 1198 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1199 tx_desc->des1 = 0; 1200 tx_desc->des2 = length; 1201 /* 1202 * Make sure that if HW sees the _OWN write below, it will see all the 1203 * writes to the rest of the descriptor too. 1204 */ 1205 mb(); 1206 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1207 eqos_flush_desc(tx_desc); 1208 1209 writel((ulong)(tx_desc + 1), &eqos->dma_regs->ch0_txdesc_tail_pointer); 1210 1211 for (i = 0; i < 1000000; i++) { 1212 eqos_inval_desc(tx_desc); 1213 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1214 return 0; 1215 udelay(1); 1216 } 1217 1218 debug("%s: TX timeout\n", __func__); 1219 1220 return -ETIMEDOUT; 1221 } 1222 1223 int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1224 { 1225 struct eqos_priv *eqos = dev_get_priv(dev); 1226 struct eqos_desc *rx_desc; 1227 int length; 1228 1229 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1230 1231 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1232 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1233 debug("%s: RX packet not available\n", __func__); 1234 return -EAGAIN; 1235 } 1236 1237 *packetp = eqos->rx_dma_buf + 1238 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1239 length = rx_desc->des3 & 0x7fff; 1240 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1241 1242 eqos_inval_buffer(*packetp, length); 1243 1244 return length; 1245 } 1246 1247 int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1248 { 1249 struct eqos_priv *eqos = dev_get_priv(dev); 1250 uchar *packet_expected; 1251 struct eqos_desc *rx_desc; 1252 1253 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1254 1255 packet_expected = eqos->rx_dma_buf + 1256 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1257 if (packet != packet_expected) { 1258 debug("%s: Unexpected packet (expected %p)\n", __func__, 1259 packet_expected); 1260 return -EINVAL; 1261 } 1262 1263 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1264 rx_desc->des0 = (u32)(ulong)packet; 1265 rx_desc->des1 = 0; 1266 rx_desc->des2 = 0; 1267 /* 1268 * Make sure that if HW sees the _OWN write below, it will see all the 1269 * writes to the rest of the descriptor too. 1270 */ 1271 mb(); 1272 rx_desc->des3 |= EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1273 eqos_flush_desc(rx_desc); 1274 1275 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1276 1277 eqos->rx_desc_idx++; 1278 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1279 1280 return 0; 1281 } 1282 1283 static int eqos_probe_resources_core(struct udevice *dev) 1284 { 1285 struct eqos_priv *eqos = dev_get_priv(dev); 1286 int ret; 1287 1288 debug("%s(dev=%p):\n", __func__, dev); 1289 1290 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1291 EQOS_DESCRIPTORS_RX); 1292 if (!eqos->descs) { 1293 debug("%s: eqos_alloc_descs() failed\n", __func__); 1294 ret = -ENOMEM; 1295 goto err; 1296 } 1297 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1298 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1299 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1300 eqos->rx_descs); 1301 1302 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1303 if (!eqos->tx_dma_buf) { 1304 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1305 ret = -ENOMEM; 1306 goto err_free_descs; 1307 } 1308 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1309 1310 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1311 if (!eqos->rx_dma_buf) { 1312 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1313 ret = -ENOMEM; 1314 goto err_free_tx_dma_buf; 1315 } 1316 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1317 1318 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1319 if (!eqos->rx_pkt) { 1320 debug("%s: malloc(rx_pkt) failed\n", __func__); 1321 ret = -ENOMEM; 1322 goto err_free_rx_dma_buf; 1323 } 1324 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1325 1326 debug("%s: OK\n", __func__); 1327 return 0; 1328 1329 err_free_rx_dma_buf: 1330 free(eqos->rx_dma_buf); 1331 err_free_tx_dma_buf: 1332 free(eqos->tx_dma_buf); 1333 err_free_descs: 1334 eqos_free_descs(eqos->descs); 1335 err: 1336 1337 debug("%s: returns %d\n", __func__, ret); 1338 return ret; 1339 } 1340 1341 static int eqos_remove_resources_core(struct udevice *dev) 1342 { 1343 struct eqos_priv *eqos = dev_get_priv(dev); 1344 1345 debug("%s(dev=%p):\n", __func__, dev); 1346 1347 free(eqos->rx_pkt); 1348 free(eqos->rx_dma_buf); 1349 free(eqos->tx_dma_buf); 1350 eqos_free_descs(eqos->descs); 1351 1352 debug("%s: OK\n", __func__); 1353 return 0; 1354 } 1355 1356 static int eqos_probe_resources_tegra186(struct udevice *dev) 1357 { 1358 struct eqos_priv *eqos = dev_get_priv(dev); 1359 int ret; 1360 1361 debug("%s(dev=%p):\n", __func__, dev); 1362 1363 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1364 if (ret) { 1365 pr_err("reset_get_by_name(rst) failed: %d", ret); 1366 return ret; 1367 } 1368 1369 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1370 &eqos->phy_reset_gpio, 1371 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1372 if (ret) { 1373 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1374 goto err_free_reset_eqos; 1375 } 1376 1377 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1378 if (ret) { 1379 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1380 goto err_free_gpio_phy_reset; 1381 } 1382 1383 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1384 if (ret) { 1385 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1386 goto err_free_clk_slave_bus; 1387 } 1388 1389 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1390 if (ret) { 1391 pr_err("clk_get_by_name(rx) failed: %d", ret); 1392 goto err_free_clk_master_bus; 1393 } 1394 1395 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1396 if (ret) { 1397 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1398 goto err_free_clk_rx; 1399 return ret; 1400 } 1401 1402 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1403 if (ret) { 1404 pr_err("clk_get_by_name(tx) failed: %d", ret); 1405 goto err_free_clk_ptp_ref; 1406 } 1407 1408 debug("%s: OK\n", __func__); 1409 return 0; 1410 1411 err_free_clk_ptp_ref: 1412 clk_free(&eqos->clk_ptp_ref); 1413 err_free_clk_rx: 1414 clk_free(&eqos->clk_rx); 1415 err_free_clk_master_bus: 1416 clk_free(&eqos->clk_master_bus); 1417 err_free_clk_slave_bus: 1418 clk_free(&eqos->clk_slave_bus); 1419 err_free_gpio_phy_reset: 1420 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1421 err_free_reset_eqos: 1422 reset_free(&eqos->reset_ctl); 1423 1424 debug("%s: returns %d\n", __func__, ret); 1425 return ret; 1426 } 1427 1428 static int eqos_remove_resources_tegra186(struct udevice *dev) 1429 { 1430 struct eqos_priv *eqos = dev_get_priv(dev); 1431 1432 debug("%s(dev=%p):\n", __func__, dev); 1433 1434 clk_free(&eqos->clk_tx); 1435 clk_free(&eqos->clk_ptp_ref); 1436 clk_free(&eqos->clk_rx); 1437 clk_free(&eqos->clk_slave_bus); 1438 clk_free(&eqos->clk_master_bus); 1439 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1440 reset_free(&eqos->reset_ctl); 1441 1442 debug("%s: OK\n", __func__); 1443 return 0; 1444 } 1445 1446 static int eqos_probe(struct udevice *dev) 1447 { 1448 struct eqos_priv *eqos = dev_get_priv(dev); 1449 int ret; 1450 1451 debug("%s(dev=%p):\n", __func__, dev); 1452 1453 eqos->dev = dev; 1454 eqos->config = (void *)dev_get_driver_data(dev); 1455 1456 eqos->regs = devfdt_get_addr(dev); 1457 if (eqos->regs == FDT_ADDR_T_NONE) { 1458 pr_err("devfdt_get_addr() failed"); 1459 return -ENODEV; 1460 } 1461 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 1462 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 1463 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 1464 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 1465 1466 ret = eqos_probe_resources_core(dev); 1467 if (ret < 0) { 1468 pr_err("eqos_probe_resources_core() failed: %d", ret); 1469 return ret; 1470 } 1471 1472 ret = eqos_probe_resources_tegra186(dev); 1473 if (ret < 0) { 1474 pr_err("eqos_probe_resources_tegra186() failed: %d", ret); 1475 goto err_remove_resources_core; 1476 } 1477 1478 eqos->mii = mdio_alloc(); 1479 if (!eqos->mii) { 1480 pr_err("mdio_alloc() failed"); 1481 goto err_remove_resources_tegra; 1482 } 1483 eqos->mii->read = eqos_mdio_read; 1484 eqos->mii->write = eqos_mdio_write; 1485 eqos->mii->priv = eqos; 1486 strcpy(eqos->mii->name, dev->name); 1487 1488 ret = mdio_register(eqos->mii); 1489 if (ret < 0) { 1490 pr_err("mdio_register() failed: %d", ret); 1491 goto err_free_mdio; 1492 } 1493 1494 debug("%s: OK\n", __func__); 1495 return 0; 1496 1497 err_free_mdio: 1498 mdio_free(eqos->mii); 1499 err_remove_resources_tegra: 1500 eqos_remove_resources_tegra186(dev); 1501 err_remove_resources_core: 1502 eqos_remove_resources_core(dev); 1503 1504 debug("%s: returns %d\n", __func__, ret); 1505 return ret; 1506 } 1507 1508 static int eqos_remove(struct udevice *dev) 1509 { 1510 struct eqos_priv *eqos = dev_get_priv(dev); 1511 1512 debug("%s(dev=%p):\n", __func__, dev); 1513 1514 mdio_unregister(eqos->mii); 1515 mdio_free(eqos->mii); 1516 eqos_remove_resources_tegra186(dev); 1517 eqos_probe_resources_core(dev); 1518 1519 debug("%s: OK\n", __func__); 1520 return 0; 1521 } 1522 1523 static const struct eth_ops eqos_ops = { 1524 .start = eqos_start, 1525 .stop = eqos_stop, 1526 .send = eqos_send, 1527 .recv = eqos_recv, 1528 .free_pkt = eqos_free_pkt, 1529 .write_hwaddr = eqos_write_hwaddr, 1530 }; 1531 1532 static const struct eqos_config eqos_tegra186_config = { 1533 .reg_access_always_ok = false, 1534 }; 1535 1536 static const struct udevice_id eqos_ids[] = { 1537 { 1538 .compatible = "nvidia,tegra186-eqos", 1539 .data = (ulong)&eqos_tegra186_config 1540 }, 1541 { } 1542 }; 1543 1544 U_BOOT_DRIVER(eth_eqos) = { 1545 .name = "eth_eqos", 1546 .id = UCLASS_ETH, 1547 .of_match = eqos_ids, 1548 .probe = eqos_probe, 1549 .remove = eqos_remove, 1550 .ops = &eqos_ops, 1551 .priv_auto_alloc_size = sizeof(struct eqos_priv), 1552 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 1553 }; 1554