1 /* 2 * Aspeed 24XX/25XX I2C Controller. 3 * 4 * Copyright (C) 2012-2017 ASPEED Technology Inc. 5 * Copyright 2017 IBM Corporation 6 * Copyright 2017 Google, Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/clk.h> 14 #include <linux/completion.h> 15 #include <linux/err.h> 16 #include <linux/errno.h> 17 #include <linux/i2c.h> 18 #include <linux/init.h> 19 #include <linux/interrupt.h> 20 #include <linux/io.h> 21 #include <linux/irq.h> 22 #include <linux/irqchip/chained_irq.h> 23 #include <linux/irqdomain.h> 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/of_address.h> 27 #include <linux/of_irq.h> 28 #include <linux/of_platform.h> 29 #include <linux/platform_device.h> 30 #include <linux/reset.h> 31 #include <linux/slab.h> 32 33 /* I2C Register */ 34 #define ASPEED_I2C_FUN_CTRL_REG 0x00 35 #define ASPEED_I2C_AC_TIMING_REG1 0x04 36 #define ASPEED_I2C_AC_TIMING_REG2 0x08 37 #define ASPEED_I2C_INTR_CTRL_REG 0x0c 38 #define ASPEED_I2C_INTR_STS_REG 0x10 39 #define ASPEED_I2C_CMD_REG 0x14 40 #define ASPEED_I2C_DEV_ADDR_REG 0x18 41 #define ASPEED_I2C_BYTE_BUF_REG 0x20 42 43 /* Global Register Definition */ 44 /* 0x00 : I2C Interrupt Status Register */ 45 /* 0x08 : I2C Interrupt Target Assignment */ 46 47 /* Device Register Definition */ 48 /* 0x00 : I2CD Function Control Register */ 49 #define ASPEED_I2CD_MULTI_MASTER_DIS BIT(15) 50 #define ASPEED_I2CD_SDA_DRIVE_1T_EN BIT(8) 51 #define ASPEED_I2CD_M_SDA_DRIVE_1T_EN BIT(7) 52 #define ASPEED_I2CD_M_HIGH_SPEED_EN BIT(6) 53 #define ASPEED_I2CD_SLAVE_EN BIT(1) 54 #define ASPEED_I2CD_MASTER_EN BIT(0) 55 56 /* 0x04 : I2CD Clock and AC Timing Control Register #1 */ 57 #define ASPEED_I2CD_TIME_TBUF_MASK GENMASK(31, 28) 58 #define ASPEED_I2CD_TIME_THDSTA_MASK GENMASK(27, 24) 59 #define ASPEED_I2CD_TIME_TACST_MASK GENMASK(23, 20) 60 #define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT 16 61 #define ASPEED_I2CD_TIME_SCL_HIGH_MASK GENMASK(19, 16) 62 #define ASPEED_I2CD_TIME_SCL_LOW_SHIFT 12 63 #define ASPEED_I2CD_TIME_SCL_LOW_MASK GENMASK(15, 12) 64 #define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK GENMASK(3, 0) 65 #define ASPEED_I2CD_TIME_SCL_REG_MAX GENMASK(3, 0) 66 /* 0x08 : I2CD Clock and AC Timing Control Register #2 */ 67 #define ASPEED_NO_TIMEOUT_CTRL 0 68 69 /* 0x0c : I2CD Interrupt Control Register & 70 * 0x10 : I2CD Interrupt Status Register 71 * 72 * These share bit definitions, so use the same values for the enable & 73 * status bits. 74 */ 75 #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14) 76 #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13) 77 #define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7) 78 #define ASPEED_I2CD_INTR_SCL_TIMEOUT BIT(6) 79 #define ASPEED_I2CD_INTR_ABNORMAL BIT(5) 80 #define ASPEED_I2CD_INTR_NORMAL_STOP BIT(4) 81 #define ASPEED_I2CD_INTR_ARBIT_LOSS BIT(3) 82 #define ASPEED_I2CD_INTR_RX_DONE BIT(2) 83 #define ASPEED_I2CD_INTR_TX_NAK BIT(1) 84 #define ASPEED_I2CD_INTR_TX_ACK BIT(0) 85 #define ASPEED_I2CD_INTR_MASTER_ERRORS \ 86 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \ 87 ASPEED_I2CD_INTR_SCL_TIMEOUT | \ 88 ASPEED_I2CD_INTR_ABNORMAL | \ 89 ASPEED_I2CD_INTR_ARBIT_LOSS) 90 #define ASPEED_I2CD_INTR_ALL \ 91 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \ 92 ASPEED_I2CD_INTR_BUS_RECOVER_DONE | \ 93 ASPEED_I2CD_INTR_SCL_TIMEOUT | \ 94 ASPEED_I2CD_INTR_ABNORMAL | \ 95 ASPEED_I2CD_INTR_NORMAL_STOP | \ 96 ASPEED_I2CD_INTR_ARBIT_LOSS | \ 97 ASPEED_I2CD_INTR_RX_DONE | \ 98 ASPEED_I2CD_INTR_TX_NAK | \ 99 ASPEED_I2CD_INTR_TX_ACK) 100 101 /* 0x14 : I2CD Command/Status Register */ 102 #define ASPEED_I2CD_SCL_LINE_STS BIT(18) 103 #define ASPEED_I2CD_SDA_LINE_STS BIT(17) 104 #define ASPEED_I2CD_BUS_BUSY_STS BIT(16) 105 #define ASPEED_I2CD_BUS_RECOVER_CMD BIT(11) 106 107 /* Command Bit */ 108 #define ASPEED_I2CD_M_STOP_CMD BIT(5) 109 #define ASPEED_I2CD_M_S_RX_CMD_LAST BIT(4) 110 #define ASPEED_I2CD_M_RX_CMD BIT(3) 111 #define ASPEED_I2CD_S_TX_CMD BIT(2) 112 #define ASPEED_I2CD_M_TX_CMD BIT(1) 113 #define ASPEED_I2CD_M_START_CMD BIT(0) 114 115 /* 0x18 : I2CD Slave Device Address Register */ 116 #define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0) 117 118 enum aspeed_i2c_master_state { 119 ASPEED_I2C_MASTER_INACTIVE, 120 ASPEED_I2C_MASTER_PENDING, 121 ASPEED_I2C_MASTER_START, 122 ASPEED_I2C_MASTER_TX_FIRST, 123 ASPEED_I2C_MASTER_TX, 124 ASPEED_I2C_MASTER_RX_FIRST, 125 ASPEED_I2C_MASTER_RX, 126 ASPEED_I2C_MASTER_STOP, 127 }; 128 129 enum aspeed_i2c_slave_state { 130 ASPEED_I2C_SLAVE_INACTIVE, 131 ASPEED_I2C_SLAVE_START, 132 ASPEED_I2C_SLAVE_READ_REQUESTED, 133 ASPEED_I2C_SLAVE_READ_PROCESSED, 134 ASPEED_I2C_SLAVE_WRITE_REQUESTED, 135 ASPEED_I2C_SLAVE_WRITE_RECEIVED, 136 ASPEED_I2C_SLAVE_STOP, 137 }; 138 139 struct aspeed_i2c_bus { 140 struct i2c_adapter adap; 141 struct device *dev; 142 void __iomem *base; 143 struct reset_control *rst; 144 /* Synchronizes I/O mem access to base. */ 145 spinlock_t lock; 146 struct completion cmd_complete; 147 u32 (*get_clk_reg_val)(struct device *dev, 148 u32 divisor); 149 unsigned long parent_clk_frequency; 150 u32 bus_frequency; 151 /* Transaction state. */ 152 enum aspeed_i2c_master_state master_state; 153 struct i2c_msg *msgs; 154 size_t buf_index; 155 size_t msgs_index; 156 size_t msgs_count; 157 bool send_stop; 158 int cmd_err; 159 /* Protected only by i2c_lock_bus */ 160 int master_xfer_result; 161 /* Multi-master */ 162 bool multi_master; 163 #if IS_ENABLED(CONFIG_I2C_SLAVE) 164 struct i2c_client *slave; 165 enum aspeed_i2c_slave_state slave_state; 166 #endif /* CONFIG_I2C_SLAVE */ 167 }; 168 169 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus); 170 171 static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) 172 { 173 unsigned long time_left, flags; 174 int ret = 0; 175 u32 command; 176 177 spin_lock_irqsave(&bus->lock, flags); 178 command = readl(bus->base + ASPEED_I2C_CMD_REG); 179 180 if (command & ASPEED_I2CD_SDA_LINE_STS) { 181 /* Bus is idle: no recovery needed. */ 182 if (command & ASPEED_I2CD_SCL_LINE_STS) 183 goto out; 184 dev_dbg(bus->dev, "SCL hung (state %x), attempting recovery\n", 185 command); 186 187 reinit_completion(&bus->cmd_complete); 188 writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); 189 spin_unlock_irqrestore(&bus->lock, flags); 190 191 time_left = wait_for_completion_timeout( 192 &bus->cmd_complete, bus->adap.timeout); 193 194 spin_lock_irqsave(&bus->lock, flags); 195 if (time_left == 0) 196 goto reset_out; 197 else if (bus->cmd_err) 198 goto reset_out; 199 /* Recovery failed. */ 200 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) & 201 ASPEED_I2CD_SCL_LINE_STS)) 202 goto reset_out; 203 /* Bus error. */ 204 } else { 205 dev_dbg(bus->dev, "SDA hung (state %x), attempting recovery\n", 206 command); 207 208 reinit_completion(&bus->cmd_complete); 209 /* Writes 1 to 8 SCL clock cycles until SDA is released. */ 210 writel(ASPEED_I2CD_BUS_RECOVER_CMD, 211 bus->base + ASPEED_I2C_CMD_REG); 212 spin_unlock_irqrestore(&bus->lock, flags); 213 214 time_left = wait_for_completion_timeout( 215 &bus->cmd_complete, bus->adap.timeout); 216 217 spin_lock_irqsave(&bus->lock, flags); 218 if (time_left == 0) 219 goto reset_out; 220 else if (bus->cmd_err) 221 goto reset_out; 222 /* Recovery failed. */ 223 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) & 224 ASPEED_I2CD_SDA_LINE_STS)) 225 goto reset_out; 226 } 227 228 out: 229 spin_unlock_irqrestore(&bus->lock, flags); 230 231 return ret; 232 233 reset_out: 234 spin_unlock_irqrestore(&bus->lock, flags); 235 236 return aspeed_i2c_reset(bus); 237 } 238 239 #if IS_ENABLED(CONFIG_I2C_SLAVE) 240 static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status) 241 { 242 u32 command, irq_handled = 0; 243 struct i2c_client *slave = bus->slave; 244 u8 value; 245 246 if (!slave) 247 return 0; 248 249 command = readl(bus->base + ASPEED_I2C_CMD_REG); 250 251 /* Slave was requested, restart state machine. */ 252 if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) { 253 irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH; 254 bus->slave_state = ASPEED_I2C_SLAVE_START; 255 } 256 257 /* Slave is not currently active, irq was for someone else. */ 258 if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE) 259 return irq_handled; 260 261 dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n", 262 irq_status, command); 263 264 /* Slave was sent something. */ 265 if (irq_status & ASPEED_I2CD_INTR_RX_DONE) { 266 value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8; 267 /* Handle address frame. */ 268 if (bus->slave_state == ASPEED_I2C_SLAVE_START) { 269 if (value & 0x1) 270 bus->slave_state = 271 ASPEED_I2C_SLAVE_READ_REQUESTED; 272 else 273 bus->slave_state = 274 ASPEED_I2C_SLAVE_WRITE_REQUESTED; 275 } 276 irq_handled |= ASPEED_I2CD_INTR_RX_DONE; 277 } 278 279 /* Slave was asked to stop. */ 280 if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) { 281 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; 282 bus->slave_state = ASPEED_I2C_SLAVE_STOP; 283 } 284 if (irq_status & ASPEED_I2CD_INTR_TX_NAK && 285 bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) { 286 irq_handled |= ASPEED_I2CD_INTR_TX_NAK; 287 bus->slave_state = ASPEED_I2C_SLAVE_STOP; 288 } 289 290 switch (bus->slave_state) { 291 case ASPEED_I2C_SLAVE_READ_REQUESTED: 292 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK)) 293 dev_err(bus->dev, "Unexpected ACK on read request.\n"); 294 bus->slave_state = ASPEED_I2C_SLAVE_READ_PROCESSED; 295 i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value); 296 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG); 297 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG); 298 break; 299 case ASPEED_I2C_SLAVE_READ_PROCESSED: 300 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { 301 dev_err(bus->dev, 302 "Expected ACK after processed read.\n"); 303 break; 304 } 305 irq_handled |= ASPEED_I2CD_INTR_TX_ACK; 306 i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value); 307 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG); 308 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG); 309 break; 310 case ASPEED_I2C_SLAVE_WRITE_REQUESTED: 311 bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED; 312 i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); 313 break; 314 case ASPEED_I2C_SLAVE_WRITE_RECEIVED: 315 i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value); 316 break; 317 case ASPEED_I2C_SLAVE_STOP: 318 i2c_slave_event(slave, I2C_SLAVE_STOP, &value); 319 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; 320 break; 321 case ASPEED_I2C_SLAVE_START: 322 /* Slave was just started. Waiting for the next event. */; 323 break; 324 default: 325 dev_err(bus->dev, "unknown slave_state: %d\n", 326 bus->slave_state); 327 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; 328 break; 329 } 330 331 return irq_handled; 332 } 333 #endif /* CONFIG_I2C_SLAVE */ 334 335 /* precondition: bus.lock has been acquired. */ 336 static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus) 337 { 338 u32 command = ASPEED_I2CD_M_START_CMD | ASPEED_I2CD_M_TX_CMD; 339 struct i2c_msg *msg = &bus->msgs[bus->msgs_index]; 340 u8 slave_addr = i2c_8bit_addr_from_msg(msg); 341 342 bus->master_state = ASPEED_I2C_MASTER_START; 343 344 #if IS_ENABLED(CONFIG_I2C_SLAVE) 345 /* 346 * If it's requested in the middle of a slave session, set the master 347 * state to 'pending' then H/W will continue handling this master 348 * command when the bus comes back to the idle state. 349 */ 350 if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) 351 bus->master_state = ASPEED_I2C_MASTER_PENDING; 352 #endif /* CONFIG_I2C_SLAVE */ 353 354 bus->buf_index = 0; 355 356 if (msg->flags & I2C_M_RD) { 357 command |= ASPEED_I2CD_M_RX_CMD; 358 /* Need to let the hardware know to NACK after RX. */ 359 if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN)) 360 command |= ASPEED_I2CD_M_S_RX_CMD_LAST; 361 } 362 363 writel(slave_addr, bus->base + ASPEED_I2C_BYTE_BUF_REG); 364 writel(command, bus->base + ASPEED_I2C_CMD_REG); 365 } 366 367 /* precondition: bus.lock has been acquired. */ 368 static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) 369 { 370 bus->master_state = ASPEED_I2C_MASTER_STOP; 371 writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); 372 } 373 374 /* precondition: bus.lock has been acquired. */ 375 static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus) 376 { 377 if (bus->msgs_index + 1 < bus->msgs_count) { 378 bus->msgs_index++; 379 aspeed_i2c_do_start(bus); 380 } else { 381 aspeed_i2c_do_stop(bus); 382 } 383 } 384 385 static int aspeed_i2c_is_irq_error(u32 irq_status) 386 { 387 if (irq_status & ASPEED_I2CD_INTR_ARBIT_LOSS) 388 return -EAGAIN; 389 if (irq_status & (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | 390 ASPEED_I2CD_INTR_SCL_TIMEOUT)) 391 return -EBUSY; 392 if (irq_status & (ASPEED_I2CD_INTR_ABNORMAL)) 393 return -EPROTO; 394 395 return 0; 396 } 397 398 static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status) 399 { 400 u32 irq_handled = 0, command = 0; 401 struct i2c_msg *msg; 402 u8 recv_byte; 403 int ret; 404 405 if (irq_status & ASPEED_I2CD_INTR_BUS_RECOVER_DONE) { 406 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 407 irq_handled |= ASPEED_I2CD_INTR_BUS_RECOVER_DONE; 408 goto out_complete; 409 } 410 411 /* 412 * We encountered an interrupt that reports an error: the hardware 413 * should clear the command queue effectively taking us back to the 414 * INACTIVE state. 415 */ 416 ret = aspeed_i2c_is_irq_error(irq_status); 417 if (ret) { 418 dev_dbg(bus->dev, "received error interrupt: 0x%08x\n", 419 irq_status); 420 irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS); 421 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) { 422 bus->cmd_err = ret; 423 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 424 goto out_complete; 425 } 426 } 427 428 #if IS_ENABLED(CONFIG_I2C_SLAVE) 429 /* 430 * A pending master command will be started by H/W when the bus comes 431 * back to idle state after completing a slave operation so change the 432 * master state from 'pending' to 'start' at here if slave is inactive. 433 */ 434 if (bus->master_state == ASPEED_I2C_MASTER_PENDING) { 435 if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) 436 goto out_no_complete; 437 438 bus->master_state = ASPEED_I2C_MASTER_START; 439 } 440 #endif /* CONFIG_I2C_SLAVE */ 441 442 /* Master is not currently active, irq was for someone else. */ 443 if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE || 444 bus->master_state == ASPEED_I2C_MASTER_PENDING) 445 goto out_no_complete; 446 447 /* We are in an invalid state; reset bus to a known state. */ 448 if (!bus->msgs) { 449 dev_err(bus->dev, "bus in unknown state. irq_status: 0x%x\n", 450 irq_status); 451 bus->cmd_err = -EIO; 452 if (bus->master_state != ASPEED_I2C_MASTER_STOP && 453 bus->master_state != ASPEED_I2C_MASTER_INACTIVE) 454 aspeed_i2c_do_stop(bus); 455 goto out_no_complete; 456 } 457 msg = &bus->msgs[bus->msgs_index]; 458 459 /* 460 * START is a special case because we still have to handle a subsequent 461 * TX or RX immediately after we handle it, so we handle it here and 462 * then update the state and handle the new state below. 463 */ 464 if (bus->master_state == ASPEED_I2C_MASTER_START) { 465 #if IS_ENABLED(CONFIG_I2C_SLAVE) 466 /* 467 * If a peer master starts a xfer immediately after it queues a 468 * master command, change its state to 'pending' then H/W will 469 * continue the queued master xfer just after completing the 470 * slave mode session. 471 */ 472 if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) { 473 bus->master_state = ASPEED_I2C_MASTER_PENDING; 474 dev_dbg(bus->dev, 475 "master goes pending due to a slave start\n"); 476 goto out_no_complete; 477 } 478 #endif /* CONFIG_I2C_SLAVE */ 479 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { 480 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_NAK))) { 481 bus->cmd_err = -ENXIO; 482 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 483 goto out_complete; 484 } 485 pr_devel("no slave present at %02x\n", msg->addr); 486 irq_handled |= ASPEED_I2CD_INTR_TX_NAK; 487 bus->cmd_err = -ENXIO; 488 aspeed_i2c_do_stop(bus); 489 goto out_no_complete; 490 } 491 irq_handled |= ASPEED_I2CD_INTR_TX_ACK; 492 if (msg->len == 0) { /* SMBUS_QUICK */ 493 aspeed_i2c_do_stop(bus); 494 goto out_no_complete; 495 } 496 if (msg->flags & I2C_M_RD) 497 bus->master_state = ASPEED_I2C_MASTER_RX_FIRST; 498 else 499 bus->master_state = ASPEED_I2C_MASTER_TX_FIRST; 500 } 501 502 switch (bus->master_state) { 503 case ASPEED_I2C_MASTER_TX: 504 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) { 505 dev_dbg(bus->dev, "slave NACKed TX\n"); 506 irq_handled |= ASPEED_I2CD_INTR_TX_NAK; 507 goto error_and_stop; 508 } else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { 509 dev_err(bus->dev, "slave failed to ACK TX\n"); 510 goto error_and_stop; 511 } 512 irq_handled |= ASPEED_I2CD_INTR_TX_ACK; 513 /* fall through */ 514 case ASPEED_I2C_MASTER_TX_FIRST: 515 if (bus->buf_index < msg->len) { 516 bus->master_state = ASPEED_I2C_MASTER_TX; 517 writel(msg->buf[bus->buf_index++], 518 bus->base + ASPEED_I2C_BYTE_BUF_REG); 519 writel(ASPEED_I2CD_M_TX_CMD, 520 bus->base + ASPEED_I2C_CMD_REG); 521 } else { 522 aspeed_i2c_next_msg_or_stop(bus); 523 } 524 goto out_no_complete; 525 case ASPEED_I2C_MASTER_RX_FIRST: 526 /* RX may not have completed yet (only address cycle) */ 527 if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE)) 528 goto out_no_complete; 529 /* fall through */ 530 case ASPEED_I2C_MASTER_RX: 531 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) { 532 dev_err(bus->dev, "master failed to RX\n"); 533 goto error_and_stop; 534 } 535 irq_handled |= ASPEED_I2CD_INTR_RX_DONE; 536 537 recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8; 538 msg->buf[bus->buf_index++] = recv_byte; 539 540 if (msg->flags & I2C_M_RECV_LEN) { 541 if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) { 542 bus->cmd_err = -EPROTO; 543 aspeed_i2c_do_stop(bus); 544 goto out_no_complete; 545 } 546 msg->len = recv_byte + 547 ((msg->flags & I2C_CLIENT_PEC) ? 2 : 1); 548 msg->flags &= ~I2C_M_RECV_LEN; 549 } 550 551 if (bus->buf_index < msg->len) { 552 bus->master_state = ASPEED_I2C_MASTER_RX; 553 command = ASPEED_I2CD_M_RX_CMD; 554 if (bus->buf_index + 1 == msg->len) 555 command |= ASPEED_I2CD_M_S_RX_CMD_LAST; 556 writel(command, bus->base + ASPEED_I2C_CMD_REG); 557 } else { 558 aspeed_i2c_next_msg_or_stop(bus); 559 } 560 goto out_no_complete; 561 case ASPEED_I2C_MASTER_STOP: 562 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) { 563 dev_err(bus->dev, 564 "master failed to STOP. irq_status:0x%x\n", 565 irq_status); 566 bus->cmd_err = -EIO; 567 /* Do not STOP as we have already tried. */ 568 } else { 569 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; 570 } 571 572 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 573 goto out_complete; 574 case ASPEED_I2C_MASTER_INACTIVE: 575 dev_err(bus->dev, 576 "master received interrupt 0x%08x, but is inactive\n", 577 irq_status); 578 bus->cmd_err = -EIO; 579 /* Do not STOP as we should be inactive. */ 580 goto out_complete; 581 default: 582 WARN(1, "unknown master state\n"); 583 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 584 bus->cmd_err = -EINVAL; 585 goto out_complete; 586 } 587 error_and_stop: 588 bus->cmd_err = -EIO; 589 aspeed_i2c_do_stop(bus); 590 goto out_no_complete; 591 out_complete: 592 bus->msgs = NULL; 593 if (bus->cmd_err) 594 bus->master_xfer_result = bus->cmd_err; 595 else 596 bus->master_xfer_result = bus->msgs_index + 1; 597 complete(&bus->cmd_complete); 598 out_no_complete: 599 return irq_handled; 600 } 601 602 static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) 603 { 604 struct aspeed_i2c_bus *bus = dev_id; 605 u32 irq_received, irq_remaining, irq_handled; 606 607 spin_lock(&bus->lock); 608 irq_received = readl(bus->base + ASPEED_I2C_INTR_STS_REG); 609 /* Ack all interrupts except for Rx done */ 610 writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE, 611 bus->base + ASPEED_I2C_INTR_STS_REG); 612 irq_remaining = irq_received; 613 614 #if IS_ENABLED(CONFIG_I2C_SLAVE) 615 /* 616 * In most cases, interrupt bits will be set one by one, although 617 * multiple interrupt bits could be set at the same time. It's also 618 * possible that master interrupt bits could be set along with slave 619 * interrupt bits. Each case needs to be handled using corresponding 620 * handlers depending on the current state. 621 */ 622 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE && 623 bus->master_state != ASPEED_I2C_MASTER_PENDING) { 624 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining); 625 irq_remaining &= ~irq_handled; 626 if (irq_remaining) 627 irq_handled |= aspeed_i2c_slave_irq(bus, irq_remaining); 628 } else { 629 irq_handled = aspeed_i2c_slave_irq(bus, irq_remaining); 630 irq_remaining &= ~irq_handled; 631 if (irq_remaining) 632 irq_handled |= aspeed_i2c_master_irq(bus, 633 irq_remaining); 634 } 635 #else 636 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining); 637 #endif /* CONFIG_I2C_SLAVE */ 638 639 irq_remaining &= ~irq_handled; 640 if (irq_remaining) 641 dev_err(bus->dev, 642 "irq handled != irq. expected 0x%08x, but was 0x%08x\n", 643 irq_received, irq_handled); 644 645 /* Ack Rx done */ 646 if (irq_received & ASPEED_I2CD_INTR_RX_DONE) 647 writel(ASPEED_I2CD_INTR_RX_DONE, 648 bus->base + ASPEED_I2C_INTR_STS_REG); 649 spin_unlock(&bus->lock); 650 return irq_remaining ? IRQ_NONE : IRQ_HANDLED; 651 } 652 653 static int aspeed_i2c_master_xfer(struct i2c_adapter *adap, 654 struct i2c_msg *msgs, int num) 655 { 656 struct aspeed_i2c_bus *bus = i2c_get_adapdata(adap); 657 unsigned long time_left, flags; 658 659 spin_lock_irqsave(&bus->lock, flags); 660 bus->cmd_err = 0; 661 662 /* If bus is busy in a single master environment, attempt recovery. */ 663 if (!bus->multi_master && 664 (readl(bus->base + ASPEED_I2C_CMD_REG) & 665 ASPEED_I2CD_BUS_BUSY_STS)) { 666 int ret; 667 668 spin_unlock_irqrestore(&bus->lock, flags); 669 ret = aspeed_i2c_recover_bus(bus); 670 if (ret) 671 return ret; 672 spin_lock_irqsave(&bus->lock, flags); 673 } 674 675 bus->cmd_err = 0; 676 bus->msgs = msgs; 677 bus->msgs_index = 0; 678 bus->msgs_count = num; 679 680 reinit_completion(&bus->cmd_complete); 681 aspeed_i2c_do_start(bus); 682 spin_unlock_irqrestore(&bus->lock, flags); 683 684 time_left = wait_for_completion_timeout(&bus->cmd_complete, 685 bus->adap.timeout); 686 687 if (time_left == 0) { 688 /* 689 * If timed out and bus is still busy in a multi master 690 * environment, attempt recovery at here. 691 */ 692 if (bus->multi_master && 693 (readl(bus->base + ASPEED_I2C_CMD_REG) & 694 ASPEED_I2CD_BUS_BUSY_STS)) 695 aspeed_i2c_recover_bus(bus); 696 697 return -ETIMEDOUT; 698 } 699 700 return bus->master_xfer_result; 701 } 702 703 static u32 aspeed_i2c_functionality(struct i2c_adapter *adap) 704 { 705 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA; 706 } 707 708 #if IS_ENABLED(CONFIG_I2C_SLAVE) 709 /* precondition: bus.lock has been acquired. */ 710 static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr) 711 { 712 u32 addr_reg_val, func_ctrl_reg_val; 713 714 /* Set slave addr. */ 715 addr_reg_val = readl(bus->base + ASPEED_I2C_DEV_ADDR_REG); 716 addr_reg_val &= ~ASPEED_I2CD_DEV_ADDR_MASK; 717 addr_reg_val |= slave_addr & ASPEED_I2CD_DEV_ADDR_MASK; 718 writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG); 719 720 /* Turn on slave mode. */ 721 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG); 722 func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN; 723 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG); 724 } 725 726 static int aspeed_i2c_reg_slave(struct i2c_client *client) 727 { 728 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter); 729 unsigned long flags; 730 731 spin_lock_irqsave(&bus->lock, flags); 732 if (bus->slave) { 733 spin_unlock_irqrestore(&bus->lock, flags); 734 return -EINVAL; 735 } 736 737 __aspeed_i2c_reg_slave(bus, client->addr); 738 739 bus->slave = client; 740 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; 741 spin_unlock_irqrestore(&bus->lock, flags); 742 743 return 0; 744 } 745 746 static int aspeed_i2c_unreg_slave(struct i2c_client *client) 747 { 748 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter); 749 u32 func_ctrl_reg_val; 750 unsigned long flags; 751 752 spin_lock_irqsave(&bus->lock, flags); 753 if (!bus->slave) { 754 spin_unlock_irqrestore(&bus->lock, flags); 755 return -EINVAL; 756 } 757 758 /* Turn off slave mode. */ 759 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG); 760 func_ctrl_reg_val &= ~ASPEED_I2CD_SLAVE_EN; 761 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG); 762 763 bus->slave = NULL; 764 spin_unlock_irqrestore(&bus->lock, flags); 765 766 return 0; 767 } 768 #endif /* CONFIG_I2C_SLAVE */ 769 770 static const struct i2c_algorithm aspeed_i2c_algo = { 771 .master_xfer = aspeed_i2c_master_xfer, 772 .functionality = aspeed_i2c_functionality, 773 #if IS_ENABLED(CONFIG_I2C_SLAVE) 774 .reg_slave = aspeed_i2c_reg_slave, 775 .unreg_slave = aspeed_i2c_unreg_slave, 776 #endif /* CONFIG_I2C_SLAVE */ 777 }; 778 779 static u32 aspeed_i2c_get_clk_reg_val(struct device *dev, 780 u32 clk_high_low_mask, 781 u32 divisor) 782 { 783 u32 base_clk_divisor, clk_high_low_max, clk_high, clk_low, tmp; 784 785 /* 786 * SCL_high and SCL_low represent a value 1 greater than what is stored 787 * since a zero divider is meaningless. Thus, the max value each can 788 * store is every bit set + 1. Since SCL_high and SCL_low are added 789 * together (see below), the max value of both is the max value of one 790 * them times two. 791 */ 792 clk_high_low_max = (clk_high_low_mask + 1) * 2; 793 794 /* 795 * The actual clock frequency of SCL is: 796 * SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low)) 797 * = APB_freq / divisor 798 * where base_freq is a programmable clock divider; its value is 799 * base_freq = 1 << base_clk_divisor 800 * SCL_high is the number of base_freq clock cycles that SCL stays high 801 * and SCL_low is the number of base_freq clock cycles that SCL stays 802 * low for a period of SCL. 803 * The actual register has a minimum SCL_high and SCL_low minimum of 1; 804 * thus, they start counting at zero. So 805 * SCL_high = clk_high + 1 806 * SCL_low = clk_low + 1 807 * Thus, 808 * SCL_freq = APB_freq / 809 * ((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1)) 810 * The documentation recommends clk_high >= clk_high_max / 2 and 811 * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint 812 * gives us the following solution: 813 */ 814 base_clk_divisor = divisor > clk_high_low_max ? 815 ilog2((divisor - 1) / clk_high_low_max) + 1 : 0; 816 817 if (base_clk_divisor > ASPEED_I2CD_TIME_BASE_DIVISOR_MASK) { 818 base_clk_divisor = ASPEED_I2CD_TIME_BASE_DIVISOR_MASK; 819 clk_low = clk_high_low_mask; 820 clk_high = clk_high_low_mask; 821 dev_err(dev, 822 "clamping clock divider: divider requested, %u, is greater than largest possible divider, %u.\n", 823 divisor, (1 << base_clk_divisor) * clk_high_low_max); 824 } else { 825 tmp = (divisor + (1 << base_clk_divisor) - 1) 826 >> base_clk_divisor; 827 clk_low = tmp / 2; 828 clk_high = tmp - clk_low; 829 830 if (clk_high) 831 clk_high--; 832 833 if (clk_low) 834 clk_low--; 835 } 836 837 838 return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT) 839 & ASPEED_I2CD_TIME_SCL_HIGH_MASK) 840 | ((clk_low << ASPEED_I2CD_TIME_SCL_LOW_SHIFT) 841 & ASPEED_I2CD_TIME_SCL_LOW_MASK) 842 | (base_clk_divisor 843 & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK); 844 } 845 846 static u32 aspeed_i2c_24xx_get_clk_reg_val(struct device *dev, u32 divisor) 847 { 848 /* 849 * clk_high and clk_low are each 3 bits wide, so each can hold a max 850 * value of 8 giving a clk_high_low_max of 16. 851 */ 852 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(2, 0), divisor); 853 } 854 855 static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor) 856 { 857 /* 858 * clk_high and clk_low are each 4 bits wide, so each can hold a max 859 * value of 16 giving a clk_high_low_max of 32. 860 */ 861 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(3, 0), divisor); 862 } 863 864 /* precondition: bus.lock has been acquired. */ 865 static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus) 866 { 867 u32 divisor, clk_reg_val; 868 869 divisor = DIV_ROUND_UP(bus->parent_clk_frequency, bus->bus_frequency); 870 clk_reg_val = readl(bus->base + ASPEED_I2C_AC_TIMING_REG1); 871 clk_reg_val &= (ASPEED_I2CD_TIME_TBUF_MASK | 872 ASPEED_I2CD_TIME_THDSTA_MASK | 873 ASPEED_I2CD_TIME_TACST_MASK); 874 clk_reg_val |= bus->get_clk_reg_val(bus->dev, divisor); 875 writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1); 876 writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2); 877 878 return 0; 879 } 880 881 /* precondition: bus.lock has been acquired. */ 882 static int aspeed_i2c_init(struct aspeed_i2c_bus *bus, 883 struct platform_device *pdev) 884 { 885 u32 fun_ctrl_reg = ASPEED_I2CD_MASTER_EN; 886 int ret; 887 888 /* Disable everything. */ 889 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG); 890 891 ret = aspeed_i2c_init_clk(bus); 892 if (ret < 0) 893 return ret; 894 895 if (of_property_read_bool(pdev->dev.of_node, "multi-master")) 896 bus->multi_master = true; 897 else 898 fun_ctrl_reg |= ASPEED_I2CD_MULTI_MASTER_DIS; 899 900 /* Enable Master Mode */ 901 writel(readl(bus->base + ASPEED_I2C_FUN_CTRL_REG) | fun_ctrl_reg, 902 bus->base + ASPEED_I2C_FUN_CTRL_REG); 903 904 #if IS_ENABLED(CONFIG_I2C_SLAVE) 905 /* If slave has already been registered, re-enable it. */ 906 if (bus->slave) 907 __aspeed_i2c_reg_slave(bus, bus->slave->addr); 908 #endif /* CONFIG_I2C_SLAVE */ 909 910 /* Set interrupt generation of I2C controller */ 911 writel(ASPEED_I2CD_INTR_ALL, bus->base + ASPEED_I2C_INTR_CTRL_REG); 912 913 return 0; 914 } 915 916 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus) 917 { 918 struct platform_device *pdev = to_platform_device(bus->dev); 919 unsigned long flags; 920 int ret; 921 922 spin_lock_irqsave(&bus->lock, flags); 923 924 /* Disable and ack all interrupts. */ 925 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); 926 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG); 927 928 ret = aspeed_i2c_init(bus, pdev); 929 930 spin_unlock_irqrestore(&bus->lock, flags); 931 932 return ret; 933 } 934 935 static const struct of_device_id aspeed_i2c_bus_of_table[] = { 936 { 937 .compatible = "aspeed,ast2400-i2c-bus", 938 .data = aspeed_i2c_24xx_get_clk_reg_val, 939 }, 940 { 941 .compatible = "aspeed,ast2500-i2c-bus", 942 .data = aspeed_i2c_25xx_get_clk_reg_val, 943 }, 944 { }, 945 }; 946 MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table); 947 948 static int aspeed_i2c_probe_bus(struct platform_device *pdev) 949 { 950 const struct of_device_id *match; 951 struct aspeed_i2c_bus *bus; 952 struct clk *parent_clk; 953 struct resource *res; 954 int irq, ret; 955 956 bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); 957 if (!bus) 958 return -ENOMEM; 959 960 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 961 bus->base = devm_ioremap_resource(&pdev->dev, res); 962 if (IS_ERR(bus->base)) 963 return PTR_ERR(bus->base); 964 965 parent_clk = devm_clk_get(&pdev->dev, NULL); 966 if (IS_ERR(parent_clk)) 967 return PTR_ERR(parent_clk); 968 bus->parent_clk_frequency = clk_get_rate(parent_clk); 969 /* We just need the clock rate, we don't actually use the clk object. */ 970 devm_clk_put(&pdev->dev, parent_clk); 971 972 bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL); 973 if (IS_ERR(bus->rst)) { 974 dev_err(&pdev->dev, 975 "missing or invalid reset controller device tree entry\n"); 976 return PTR_ERR(bus->rst); 977 } 978 reset_control_deassert(bus->rst); 979 980 ret = of_property_read_u32(pdev->dev.of_node, 981 "bus-frequency", &bus->bus_frequency); 982 if (ret < 0) { 983 dev_err(&pdev->dev, 984 "Could not read bus-frequency property\n"); 985 bus->bus_frequency = 100000; 986 } 987 988 match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node); 989 if (!match) 990 bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val; 991 else 992 bus->get_clk_reg_val = (u32 (*)(struct device *, u32)) 993 match->data; 994 995 /* Initialize the I2C adapter */ 996 spin_lock_init(&bus->lock); 997 init_completion(&bus->cmd_complete); 998 bus->adap.owner = THIS_MODULE; 999 bus->adap.retries = 0; 1000 bus->adap.algo = &aspeed_i2c_algo; 1001 bus->adap.dev.parent = &pdev->dev; 1002 bus->adap.dev.of_node = pdev->dev.of_node; 1003 strlcpy(bus->adap.name, pdev->name, sizeof(bus->adap.name)); 1004 i2c_set_adapdata(&bus->adap, bus); 1005 1006 bus->dev = &pdev->dev; 1007 1008 /* Clean up any left over interrupt state. */ 1009 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); 1010 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG); 1011 /* 1012 * bus.lock does not need to be held because the interrupt handler has 1013 * not been enabled yet. 1014 */ 1015 ret = aspeed_i2c_init(bus, pdev); 1016 if (ret < 0) 1017 return ret; 1018 1019 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 1020 ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq, 1021 0, dev_name(&pdev->dev), bus); 1022 if (ret < 0) 1023 return ret; 1024 1025 ret = i2c_add_adapter(&bus->adap); 1026 if (ret < 0) 1027 return ret; 1028 1029 platform_set_drvdata(pdev, bus); 1030 1031 dev_info(bus->dev, "i2c bus %d registered, irq %d\n", 1032 bus->adap.nr, irq); 1033 1034 return 0; 1035 } 1036 1037 static int aspeed_i2c_remove_bus(struct platform_device *pdev) 1038 { 1039 struct aspeed_i2c_bus *bus = platform_get_drvdata(pdev); 1040 unsigned long flags; 1041 1042 spin_lock_irqsave(&bus->lock, flags); 1043 1044 /* Disable everything. */ 1045 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG); 1046 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); 1047 1048 spin_unlock_irqrestore(&bus->lock, flags); 1049 1050 reset_control_assert(bus->rst); 1051 1052 i2c_del_adapter(&bus->adap); 1053 1054 return 0; 1055 } 1056 1057 static struct platform_driver aspeed_i2c_bus_driver = { 1058 .probe = aspeed_i2c_probe_bus, 1059 .remove = aspeed_i2c_remove_bus, 1060 .driver = { 1061 .name = "aspeed-i2c-bus", 1062 .of_match_table = aspeed_i2c_bus_of_table, 1063 }, 1064 }; 1065 module_platform_driver(aspeed_i2c_bus_driver); 1066 1067 MODULE_AUTHOR("Brendan Higgins <brendanhiggins@google.com>"); 1068 MODULE_DESCRIPTION("Aspeed I2C Bus Driver"); 1069 MODULE_LICENSE("GPL v2"); 1070