1 /* 2 * Driver for the MDIO interface of Marvell network interfaces. 3 * 4 * Since the MDIO interface of Marvell network interfaces is shared 5 * between all network interfaces, having a single driver allows to 6 * handle concurrent accesses properly (you may have four Ethernet 7 * ports, but they in fact share the same SMI interface to access 8 * the MDIO bus). This driver is currently used by the mvneta and 9 * mv643xx_eth drivers. 10 * 11 * Copyright (C) 2012 Marvell 12 * 13 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 14 * 15 * This file is licensed under the terms of the GNU General Public 16 * License version 2. This program is licensed "as is" without any 17 * warranty of any kind, whether express or implied. 18 */ 19 20 #include <linux/clk.h> 21 #include <linux/delay.h> 22 #include <linux/interrupt.h> 23 #include <linux/io.h> 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/of_device.h> 27 #include <linux/of_mdio.h> 28 #include <linux/phy.h> 29 #include <linux/platform_device.h> 30 #include <linux/sched.h> 31 #include <linux/wait.h> 32 33 #define MVMDIO_SMI_DATA_SHIFT 0 34 #define MVMDIO_SMI_PHY_ADDR_SHIFT 16 35 #define MVMDIO_SMI_PHY_REG_SHIFT 21 36 #define MVMDIO_SMI_READ_OPERATION BIT(26) 37 #define MVMDIO_SMI_WRITE_OPERATION 0 38 #define MVMDIO_SMI_READ_VALID BIT(27) 39 #define MVMDIO_SMI_BUSY BIT(28) 40 #define MVMDIO_ERR_INT_CAUSE 0x007C 41 #define MVMDIO_ERR_INT_SMI_DONE 0x00000010 42 #define MVMDIO_ERR_INT_MASK 0x0080 43 44 #define MVMDIO_XSMI_MGNT_REG 0x0 45 #define MVMDIO_XSMI_PHYADDR_SHIFT 16 46 #define MVMDIO_XSMI_DEVADDR_SHIFT 21 47 #define MVMDIO_XSMI_WRITE_OPERATION (0x5 << 26) 48 #define MVMDIO_XSMI_READ_OPERATION (0x7 << 26) 49 #define MVMDIO_XSMI_READ_VALID BIT(29) 50 #define MVMDIO_XSMI_BUSY BIT(30) 51 #define MVMDIO_XSMI_ADDR_REG 0x8 52 53 /* 54 * SMI Timeout measurements: 55 * - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt) 56 * - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled) 57 */ 58 #define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */ 59 #define MVMDIO_SMI_POLL_INTERVAL_MIN 45 60 #define MVMDIO_SMI_POLL_INTERVAL_MAX 55 61 62 #define MVMDIO_XSMI_POLL_INTERVAL_MIN 150 63 #define MVMDIO_XSMI_POLL_INTERVAL_MAX 160 64 65 struct orion_mdio_dev { 66 void __iomem *regs; 67 struct clk *clk[4]; 68 /* 69 * If we have access to the error interrupt pin (which is 70 * somewhat misnamed as it not only reflects internal errors 71 * but also reflects SMI completion), use that to wait for 72 * SMI access completion instead of polling the SMI busy bit. 73 */ 74 int err_interrupt; 75 wait_queue_head_t smi_busy_wait; 76 }; 77 78 enum orion_mdio_bus_type { 79 BUS_TYPE_SMI, 80 BUS_TYPE_XSMI 81 }; 82 83 struct orion_mdio_ops { 84 int (*is_done)(struct orion_mdio_dev *); 85 unsigned int poll_interval_min; 86 unsigned int poll_interval_max; 87 }; 88 89 /* Wait for the SMI unit to be ready for another operation 90 */ 91 static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops, 92 struct mii_bus *bus) 93 { 94 struct orion_mdio_dev *dev = bus->priv; 95 unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT); 96 unsigned long end = jiffies + timeout; 97 int timedout = 0; 98 99 while (1) { 100 if (ops->is_done(dev)) 101 return 0; 102 else if (timedout) 103 break; 104 105 if (dev->err_interrupt <= 0) { 106 usleep_range(ops->poll_interval_min, 107 ops->poll_interval_max); 108 109 if (time_is_before_jiffies(end)) 110 ++timedout; 111 } else { 112 /* wait_event_timeout does not guarantee a delay of at 113 * least one whole jiffie, so timeout must be no less 114 * than two. 115 */ 116 if (timeout < 2) 117 timeout = 2; 118 wait_event_timeout(dev->smi_busy_wait, 119 ops->is_done(dev), timeout); 120 121 ++timedout; 122 } 123 } 124 125 dev_err(bus->parent, "Timeout: SMI busy for too long\n"); 126 return -ETIMEDOUT; 127 } 128 129 static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev) 130 { 131 return !(readl(dev->regs) & MVMDIO_SMI_BUSY); 132 } 133 134 static const struct orion_mdio_ops orion_mdio_smi_ops = { 135 .is_done = orion_mdio_smi_is_done, 136 .poll_interval_min = MVMDIO_SMI_POLL_INTERVAL_MIN, 137 .poll_interval_max = MVMDIO_SMI_POLL_INTERVAL_MAX, 138 }; 139 140 static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id, 141 int regnum) 142 { 143 struct orion_mdio_dev *dev = bus->priv; 144 u32 val; 145 int ret; 146 147 if (regnum & MII_ADDR_C45) 148 return -EOPNOTSUPP; 149 150 ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus); 151 if (ret < 0) 152 return ret; 153 154 writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | 155 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | 156 MVMDIO_SMI_READ_OPERATION), 157 dev->regs); 158 159 ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus); 160 if (ret < 0) 161 return ret; 162 163 val = readl(dev->regs); 164 if (!(val & MVMDIO_SMI_READ_VALID)) { 165 dev_err(bus->parent, "SMI bus read not valid\n"); 166 return -ENODEV; 167 } 168 169 return val & GENMASK(15, 0); 170 } 171 172 static int orion_mdio_smi_write(struct mii_bus *bus, int mii_id, 173 int regnum, u16 value) 174 { 175 struct orion_mdio_dev *dev = bus->priv; 176 int ret; 177 178 if (regnum & MII_ADDR_C45) 179 return -EOPNOTSUPP; 180 181 ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus); 182 if (ret < 0) 183 return ret; 184 185 writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | 186 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | 187 MVMDIO_SMI_WRITE_OPERATION | 188 (value << MVMDIO_SMI_DATA_SHIFT)), 189 dev->regs); 190 191 return 0; 192 } 193 194 static int orion_mdio_xsmi_is_done(struct orion_mdio_dev *dev) 195 { 196 return !(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & MVMDIO_XSMI_BUSY); 197 } 198 199 static const struct orion_mdio_ops orion_mdio_xsmi_ops = { 200 .is_done = orion_mdio_xsmi_is_done, 201 .poll_interval_min = MVMDIO_XSMI_POLL_INTERVAL_MIN, 202 .poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX, 203 }; 204 205 static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id, 206 int regnum) 207 { 208 struct orion_mdio_dev *dev = bus->priv; 209 u16 dev_addr = (regnum >> 16) & GENMASK(4, 0); 210 int ret; 211 212 if (!(regnum & MII_ADDR_C45)) 213 return -EOPNOTSUPP; 214 215 ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus); 216 if (ret < 0) 217 return ret; 218 219 writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG); 220 writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) | 221 (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) | 222 MVMDIO_XSMI_READ_OPERATION, 223 dev->regs + MVMDIO_XSMI_MGNT_REG); 224 225 ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus); 226 if (ret < 0) 227 return ret; 228 229 if (!(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & 230 MVMDIO_XSMI_READ_VALID)) { 231 dev_err(bus->parent, "XSMI bus read not valid\n"); 232 return -ENODEV; 233 } 234 235 return readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & GENMASK(15, 0); 236 } 237 238 static int orion_mdio_xsmi_write(struct mii_bus *bus, int mii_id, 239 int regnum, u16 value) 240 { 241 struct orion_mdio_dev *dev = bus->priv; 242 u16 dev_addr = (regnum >> 16) & GENMASK(4, 0); 243 int ret; 244 245 if (!(regnum & MII_ADDR_C45)) 246 return -EOPNOTSUPP; 247 248 ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus); 249 if (ret < 0) 250 return ret; 251 252 writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG); 253 writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) | 254 (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) | 255 MVMDIO_XSMI_WRITE_OPERATION | value, 256 dev->regs + MVMDIO_XSMI_MGNT_REG); 257 258 return 0; 259 } 260 261 static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id) 262 { 263 struct orion_mdio_dev *dev = dev_id; 264 265 if (readl(dev->regs + MVMDIO_ERR_INT_CAUSE) & 266 MVMDIO_ERR_INT_SMI_DONE) { 267 writel(~MVMDIO_ERR_INT_SMI_DONE, 268 dev->regs + MVMDIO_ERR_INT_CAUSE); 269 wake_up(&dev->smi_busy_wait); 270 return IRQ_HANDLED; 271 } 272 273 return IRQ_NONE; 274 } 275 276 static int orion_mdio_probe(struct platform_device *pdev) 277 { 278 enum orion_mdio_bus_type type; 279 struct resource *r; 280 struct mii_bus *bus; 281 struct orion_mdio_dev *dev; 282 int i, ret; 283 284 type = (enum orion_mdio_bus_type)of_device_get_match_data(&pdev->dev); 285 286 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 287 if (!r) { 288 dev_err(&pdev->dev, "No SMI register address given\n"); 289 return -ENODEV; 290 } 291 292 bus = devm_mdiobus_alloc_size(&pdev->dev, 293 sizeof(struct orion_mdio_dev)); 294 if (!bus) 295 return -ENOMEM; 296 297 switch (type) { 298 case BUS_TYPE_SMI: 299 bus->read = orion_mdio_smi_read; 300 bus->write = orion_mdio_smi_write; 301 break; 302 case BUS_TYPE_XSMI: 303 bus->read = orion_mdio_xsmi_read; 304 bus->write = orion_mdio_xsmi_write; 305 break; 306 } 307 308 bus->name = "orion_mdio_bus"; 309 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", 310 dev_name(&pdev->dev)); 311 bus->parent = &pdev->dev; 312 313 dev = bus->priv; 314 dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); 315 if (!dev->regs) { 316 dev_err(&pdev->dev, "Unable to remap SMI register\n"); 317 return -ENODEV; 318 } 319 320 init_waitqueue_head(&dev->smi_busy_wait); 321 322 if (pdev->dev.of_node) { 323 for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { 324 dev->clk[i] = of_clk_get(pdev->dev.of_node, i); 325 if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) { 326 ret = -EPROBE_DEFER; 327 goto out_clk; 328 } 329 if (IS_ERR(dev->clk[i])) 330 break; 331 clk_prepare_enable(dev->clk[i]); 332 } 333 334 if (!IS_ERR(of_clk_get(pdev->dev.of_node, 335 ARRAY_SIZE(dev->clk)))) 336 dev_warn(&pdev->dev, 337 "unsupported number of clocks, limiting to the first " 338 __stringify(ARRAY_SIZE(dev->clk)) "\n"); 339 } else { 340 dev->clk[0] = clk_get(&pdev->dev, NULL); 341 if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) { 342 ret = -EPROBE_DEFER; 343 goto out_clk; 344 } 345 if (!IS_ERR(dev->clk[0])) 346 clk_prepare_enable(dev->clk[0]); 347 } 348 349 350 dev->err_interrupt = platform_get_irq_optional(pdev, 0); 351 if (dev->err_interrupt > 0 && 352 resource_size(r) < MVMDIO_ERR_INT_MASK + 4) { 353 dev_err(&pdev->dev, 354 "disabling interrupt, resource size is too small\n"); 355 dev->err_interrupt = 0; 356 } 357 if (dev->err_interrupt > 0) { 358 ret = devm_request_irq(&pdev->dev, dev->err_interrupt, 359 orion_mdio_err_irq, 360 IRQF_SHARED, pdev->name, dev); 361 if (ret) 362 goto out_mdio; 363 364 writel(MVMDIO_ERR_INT_SMI_DONE, 365 dev->regs + MVMDIO_ERR_INT_MASK); 366 367 } else if (dev->err_interrupt == -EPROBE_DEFER) { 368 ret = -EPROBE_DEFER; 369 goto out_mdio; 370 } 371 372 ret = of_mdiobus_register(bus, pdev->dev.of_node); 373 if (ret < 0) { 374 dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); 375 goto out_mdio; 376 } 377 378 platform_set_drvdata(pdev, bus); 379 380 return 0; 381 382 out_mdio: 383 if (dev->err_interrupt > 0) 384 writel(0, dev->regs + MVMDIO_ERR_INT_MASK); 385 386 out_clk: 387 for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { 388 if (IS_ERR(dev->clk[i])) 389 break; 390 clk_disable_unprepare(dev->clk[i]); 391 clk_put(dev->clk[i]); 392 } 393 394 return ret; 395 } 396 397 static int orion_mdio_remove(struct platform_device *pdev) 398 { 399 struct mii_bus *bus = platform_get_drvdata(pdev); 400 struct orion_mdio_dev *dev = bus->priv; 401 int i; 402 403 if (dev->err_interrupt > 0) 404 writel(0, dev->regs + MVMDIO_ERR_INT_MASK); 405 mdiobus_unregister(bus); 406 407 for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { 408 if (IS_ERR(dev->clk[i])) 409 break; 410 clk_disable_unprepare(dev->clk[i]); 411 clk_put(dev->clk[i]); 412 } 413 414 return 0; 415 } 416 417 static const struct of_device_id orion_mdio_match[] = { 418 { .compatible = "marvell,orion-mdio", .data = (void *)BUS_TYPE_SMI }, 419 { .compatible = "marvell,xmdio", .data = (void *)BUS_TYPE_XSMI }, 420 { } 421 }; 422 MODULE_DEVICE_TABLE(of, orion_mdio_match); 423 424 static struct platform_driver orion_mdio_driver = { 425 .probe = orion_mdio_probe, 426 .remove = orion_mdio_remove, 427 .driver = { 428 .name = "orion-mdio", 429 .of_match_table = orion_mdio_match, 430 }, 431 }; 432 433 module_platform_driver(orion_mdio_driver); 434 435 MODULE_DESCRIPTION("Marvell MDIO interface driver"); 436 MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 437 MODULE_LICENSE("GPL"); 438 MODULE_ALIAS("platform:orion-mdio"); 439