1 /* 2 * DaVinci MDIO Module driver 3 * 4 * Copyright (C) 2010 Texas Instruments. 5 * 6 * Shamelessly ripped out of davinci_emac.c, original copyrights follow: 7 * 8 * Copyright (C) 2009 Texas Instruments. 9 * 10 * --------------------------------------------------------------------------- 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 * --------------------------------------------------------------------------- 26 */ 27 #include <linux/module.h> 28 #include <linux/kernel.h> 29 #include <linux/platform_device.h> 30 #include <linux/delay.h> 31 #include <linux/sched.h> 32 #include <linux/slab.h> 33 #include <linux/phy.h> 34 #include <linux/clk.h> 35 #include <linux/err.h> 36 #include <linux/io.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/davinci_emac.h> 39 #include <linux/of.h> 40 #include <linux/of_device.h> 41 #include <linux/of_mdio.h> 42 #include <linux/pinctrl/consumer.h> 43 44 /* 45 * This timeout definition is a worst-case ultra defensive measure against 46 * unexpected controller lock ups. Ideally, we should never ever hit this 47 * scenario in practice. 48 */ 49 #define MDIO_TIMEOUT 100 /* msecs */ 50 51 #define PHY_REG_MASK 0x1f 52 #define PHY_ID_MASK 0x1f 53 54 #define DEF_OUT_FREQ 2200000 /* 2.2 MHz */ 55 56 struct davinci_mdio_regs { 57 u32 version; 58 u32 control; 59 #define CONTROL_IDLE BIT(31) 60 #define CONTROL_ENABLE BIT(30) 61 #define CONTROL_MAX_DIV (0xffff) 62 63 u32 alive; 64 u32 link; 65 u32 linkintraw; 66 u32 linkintmasked; 67 u32 __reserved_0[2]; 68 u32 userintraw; 69 u32 userintmasked; 70 u32 userintmaskset; 71 u32 userintmaskclr; 72 u32 __reserved_1[20]; 73 74 struct { 75 u32 access; 76 #define USERACCESS_GO BIT(31) 77 #define USERACCESS_WRITE BIT(30) 78 #define USERACCESS_ACK BIT(29) 79 #define USERACCESS_READ (0) 80 #define USERACCESS_DATA (0xffff) 81 82 u32 physel; 83 } user[0]; 84 }; 85 86 static const struct mdio_platform_data default_pdata = { 87 .bus_freq = DEF_OUT_FREQ, 88 }; 89 90 struct davinci_mdio_data { 91 struct mdio_platform_data pdata; 92 struct davinci_mdio_regs __iomem *regs; 93 spinlock_t lock; 94 struct clk *clk; 95 struct device *dev; 96 struct mii_bus *bus; 97 bool suspended; 98 unsigned long access_time; /* jiffies */ 99 /* Indicates that driver shouldn't modify phy_mask in case 100 * if MDIO bus is registered from DT. 101 */ 102 bool skip_scan; 103 }; 104 105 static void __davinci_mdio_reset(struct davinci_mdio_data *data) 106 { 107 u32 mdio_in, div, mdio_out_khz, access_time; 108 109 mdio_in = clk_get_rate(data->clk); 110 div = (mdio_in / data->pdata.bus_freq) - 1; 111 if (div > CONTROL_MAX_DIV) 112 div = CONTROL_MAX_DIV; 113 114 /* set enable and clock divider */ 115 __raw_writel(div | CONTROL_ENABLE, &data->regs->control); 116 117 /* 118 * One mdio transaction consists of: 119 * 32 bits of preamble 120 * 32 bits of transferred data 121 * 24 bits of bus yield (not needed unless shared?) 122 */ 123 mdio_out_khz = mdio_in / (1000 * (div + 1)); 124 access_time = (88 * 1000) / mdio_out_khz; 125 126 /* 127 * In the worst case, we could be kicking off a user-access immediately 128 * after the mdio bus scan state-machine triggered its own read. If 129 * so, our request could get deferred by one access cycle. We 130 * defensively allow for 4 access cycles. 131 */ 132 data->access_time = usecs_to_jiffies(access_time * 4); 133 if (!data->access_time) 134 data->access_time = 1; 135 } 136 137 static int davinci_mdio_reset(struct mii_bus *bus) 138 { 139 struct davinci_mdio_data *data = bus->priv; 140 u32 phy_mask, ver; 141 142 __davinci_mdio_reset(data); 143 144 /* wait for scan logic to settle */ 145 msleep(PHY_MAX_ADDR * data->access_time); 146 147 /* dump hardware version info */ 148 ver = __raw_readl(&data->regs->version); 149 dev_info(data->dev, "davinci mdio revision %d.%d\n", 150 (ver >> 8) & 0xff, ver & 0xff); 151 152 if (data->skip_scan) 153 return 0; 154 155 /* get phy mask from the alive register */ 156 phy_mask = __raw_readl(&data->regs->alive); 157 if (phy_mask) { 158 /* restrict mdio bus to live phys only */ 159 dev_info(data->dev, "detected phy mask %x\n", ~phy_mask); 160 phy_mask = ~phy_mask; 161 } else { 162 /* desperately scan all phys */ 163 dev_warn(data->dev, "no live phy, scanning all\n"); 164 phy_mask = 0; 165 } 166 data->bus->phy_mask = phy_mask; 167 168 return 0; 169 } 170 171 /* wait until hardware is ready for another user access */ 172 static inline int wait_for_user_access(struct davinci_mdio_data *data) 173 { 174 struct davinci_mdio_regs __iomem *regs = data->regs; 175 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); 176 u32 reg; 177 178 while (time_after(timeout, jiffies)) { 179 reg = __raw_readl(®s->user[0].access); 180 if ((reg & USERACCESS_GO) == 0) 181 return 0; 182 183 reg = __raw_readl(®s->control); 184 if ((reg & CONTROL_IDLE) == 0) 185 continue; 186 187 /* 188 * An emac soft_reset may have clobbered the mdio controller's 189 * state machine. We need to reset and retry the current 190 * operation 191 */ 192 dev_warn(data->dev, "resetting idled controller\n"); 193 __davinci_mdio_reset(data); 194 return -EAGAIN; 195 } 196 197 reg = __raw_readl(®s->user[0].access); 198 if ((reg & USERACCESS_GO) == 0) 199 return 0; 200 201 dev_err(data->dev, "timed out waiting for user access\n"); 202 return -ETIMEDOUT; 203 } 204 205 /* wait until hardware state machine is idle */ 206 static inline int wait_for_idle(struct davinci_mdio_data *data) 207 { 208 struct davinci_mdio_regs __iomem *regs = data->regs; 209 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); 210 211 while (time_after(timeout, jiffies)) { 212 if (__raw_readl(®s->control) & CONTROL_IDLE) 213 return 0; 214 } 215 dev_err(data->dev, "timed out waiting for idle\n"); 216 return -ETIMEDOUT; 217 } 218 219 static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) 220 { 221 struct davinci_mdio_data *data = bus->priv; 222 u32 reg; 223 int ret; 224 225 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) 226 return -EINVAL; 227 228 spin_lock(&data->lock); 229 230 if (data->suspended) { 231 spin_unlock(&data->lock); 232 return -ENODEV; 233 } 234 235 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | 236 (phy_id << 16)); 237 238 while (1) { 239 ret = wait_for_user_access(data); 240 if (ret == -EAGAIN) 241 continue; 242 if (ret < 0) 243 break; 244 245 __raw_writel(reg, &data->regs->user[0].access); 246 247 ret = wait_for_user_access(data); 248 if (ret == -EAGAIN) 249 continue; 250 if (ret < 0) 251 break; 252 253 reg = __raw_readl(&data->regs->user[0].access); 254 ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO; 255 break; 256 } 257 258 spin_unlock(&data->lock); 259 260 return ret; 261 } 262 263 static int davinci_mdio_write(struct mii_bus *bus, int phy_id, 264 int phy_reg, u16 phy_data) 265 { 266 struct davinci_mdio_data *data = bus->priv; 267 u32 reg; 268 int ret; 269 270 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) 271 return -EINVAL; 272 273 spin_lock(&data->lock); 274 275 if (data->suspended) { 276 spin_unlock(&data->lock); 277 return -ENODEV; 278 } 279 280 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | 281 (phy_id << 16) | (phy_data & USERACCESS_DATA)); 282 283 while (1) { 284 ret = wait_for_user_access(data); 285 if (ret == -EAGAIN) 286 continue; 287 if (ret < 0) 288 break; 289 290 __raw_writel(reg, &data->regs->user[0].access); 291 292 ret = wait_for_user_access(data); 293 if (ret == -EAGAIN) 294 continue; 295 break; 296 } 297 298 spin_unlock(&data->lock); 299 300 return 0; 301 } 302 303 #if IS_ENABLED(CONFIG_OF) 304 static int davinci_mdio_probe_dt(struct mdio_platform_data *data, 305 struct platform_device *pdev) 306 { 307 struct device_node *node = pdev->dev.of_node; 308 u32 prop; 309 310 if (!node) 311 return -EINVAL; 312 313 if (of_property_read_u32(node, "bus_freq", &prop)) { 314 dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n"); 315 return -EINVAL; 316 } 317 data->bus_freq = prop; 318 319 return 0; 320 } 321 #endif 322 323 static int davinci_mdio_probe(struct platform_device *pdev) 324 { 325 struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev); 326 struct device *dev = &pdev->dev; 327 struct davinci_mdio_data *data; 328 struct resource *res; 329 struct phy_device *phy; 330 int ret, addr; 331 332 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 333 if (!data) 334 return -ENOMEM; 335 336 data->bus = devm_mdiobus_alloc(dev); 337 if (!data->bus) { 338 dev_err(dev, "failed to alloc mii bus\n"); 339 return -ENOMEM; 340 } 341 342 if (dev->of_node) { 343 if (davinci_mdio_probe_dt(&data->pdata, pdev)) 344 data->pdata = default_pdata; 345 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); 346 } else { 347 data->pdata = pdata ? (*pdata) : default_pdata; 348 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", 349 pdev->name, pdev->id); 350 } 351 352 data->bus->name = dev_name(dev); 353 data->bus->read = davinci_mdio_read, 354 data->bus->write = davinci_mdio_write, 355 data->bus->reset = davinci_mdio_reset, 356 data->bus->parent = dev; 357 data->bus->priv = data; 358 359 pm_runtime_enable(&pdev->dev); 360 pm_runtime_get_sync(&pdev->dev); 361 data->clk = devm_clk_get(dev, "fck"); 362 if (IS_ERR(data->clk)) { 363 dev_err(dev, "failed to get device clock\n"); 364 ret = PTR_ERR(data->clk); 365 data->clk = NULL; 366 goto bail_out; 367 } 368 369 dev_set_drvdata(dev, data); 370 data->dev = dev; 371 spin_lock_init(&data->lock); 372 373 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 374 data->regs = devm_ioremap_resource(dev, res); 375 if (IS_ERR(data->regs)) { 376 ret = PTR_ERR(data->regs); 377 goto bail_out; 378 } 379 380 /* register the mii bus 381 * Create PHYs from DT only in case if PHY child nodes are explicitly 382 * defined to support backward compatibility with DTs which assume that 383 * Davinci MDIO will always scan the bus for PHYs detection. 384 */ 385 if (dev->of_node && of_get_child_count(dev->of_node)) { 386 data->skip_scan = true; 387 ret = of_mdiobus_register(data->bus, dev->of_node); 388 } else { 389 ret = mdiobus_register(data->bus); 390 } 391 if (ret) 392 goto bail_out; 393 394 /* scan and dump the bus */ 395 for (addr = 0; addr < PHY_MAX_ADDR; addr++) { 396 phy = data->bus->phy_map[addr]; 397 if (phy) { 398 dev_info(dev, "phy[%d]: device %s, driver %s\n", 399 phy->addr, dev_name(&phy->dev), 400 phy->drv ? phy->drv->name : "unknown"); 401 } 402 } 403 404 return 0; 405 406 bail_out: 407 pm_runtime_put_sync(&pdev->dev); 408 pm_runtime_disable(&pdev->dev); 409 410 return ret; 411 } 412 413 static int davinci_mdio_remove(struct platform_device *pdev) 414 { 415 struct davinci_mdio_data *data = platform_get_drvdata(pdev); 416 417 if (data->bus) 418 mdiobus_unregister(data->bus); 419 420 pm_runtime_put_sync(&pdev->dev); 421 pm_runtime_disable(&pdev->dev); 422 423 return 0; 424 } 425 426 static int davinci_mdio_suspend(struct device *dev) 427 { 428 struct davinci_mdio_data *data = dev_get_drvdata(dev); 429 u32 ctrl; 430 431 spin_lock(&data->lock); 432 433 /* shutdown the scan state machine */ 434 ctrl = __raw_readl(&data->regs->control); 435 ctrl &= ~CONTROL_ENABLE; 436 __raw_writel(ctrl, &data->regs->control); 437 wait_for_idle(data); 438 439 data->suspended = true; 440 spin_unlock(&data->lock); 441 pm_runtime_put_sync(data->dev); 442 443 /* Select sleep pin state */ 444 pinctrl_pm_select_sleep_state(dev); 445 446 return 0; 447 } 448 449 static int davinci_mdio_resume(struct device *dev) 450 { 451 struct davinci_mdio_data *data = dev_get_drvdata(dev); 452 453 /* Select default pin state */ 454 pinctrl_pm_select_default_state(dev); 455 456 pm_runtime_get_sync(data->dev); 457 458 spin_lock(&data->lock); 459 /* restart the scan state machine */ 460 __davinci_mdio_reset(data); 461 462 data->suspended = false; 463 spin_unlock(&data->lock); 464 465 return 0; 466 } 467 468 static const struct dev_pm_ops davinci_mdio_pm_ops = { 469 .suspend_late = davinci_mdio_suspend, 470 .resume_early = davinci_mdio_resume, 471 }; 472 473 #if IS_ENABLED(CONFIG_OF) 474 static const struct of_device_id davinci_mdio_of_mtable[] = { 475 { .compatible = "ti,davinci_mdio", }, 476 { /* sentinel */ }, 477 }; 478 MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); 479 #endif 480 481 static struct platform_driver davinci_mdio_driver = { 482 .driver = { 483 .name = "davinci_mdio", 484 .owner = THIS_MODULE, 485 .pm = &davinci_mdio_pm_ops, 486 .of_match_table = of_match_ptr(davinci_mdio_of_mtable), 487 }, 488 .probe = davinci_mdio_probe, 489 .remove = davinci_mdio_remove, 490 }; 491 492 static int __init davinci_mdio_init(void) 493 { 494 return platform_driver_register(&davinci_mdio_driver); 495 } 496 device_initcall(davinci_mdio_init); 497 498 static void __exit davinci_mdio_exit(void) 499 { 500 platform_driver_unregister(&davinci_mdio_driver); 501 } 502 module_exit(davinci_mdio_exit); 503 504 MODULE_LICENSE("GPL"); 505 MODULE_DESCRIPTION("DaVinci MDIO driver"); 506