1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Aspeed 24XX/25XX I2C Controller. 4 * 5 * Copyright (C) 2012-2017 ASPEED Technology Inc. 6 * Copyright 2017 IBM Corporation 7 * Copyright 2017 Google, Inc. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/completion.h> 12 #include <linux/err.h> 13 #include <linux/errno.h> 14 #include <linux/i2c.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/irq.h> 19 #include <linux/irqchip/chained_irq.h> 20 #include <linux/irqdomain.h> 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/of_address.h> 24 #include <linux/of_irq.h> 25 #include <linux/of_platform.h> 26 #include <linux/platform_device.h> 27 #include <linux/reset.h> 28 #include <linux/slab.h> 29 30 /* I2C Register */ 31 #define ASPEED_I2C_FUN_CTRL_REG 0x00 32 #define ASPEED_I2C_AC_TIMING_REG1 0x04 33 #define ASPEED_I2C_AC_TIMING_REG2 0x08 34 #define ASPEED_I2C_INTR_CTRL_REG 0x0c 35 #define ASPEED_I2C_INTR_STS_REG 0x10 36 #define ASPEED_I2C_CMD_REG 0x14 37 #define ASPEED_I2C_DEV_ADDR_REG 0x18 38 #define ASPEED_I2C_BYTE_BUF_REG 0x20 39 40 /* Global Register Definition */ 41 /* 0x00 : I2C Interrupt Status Register */ 42 /* 0x08 : I2C Interrupt Target Assignment */ 43 44 /* Device Register Definition */ 45 /* 0x00 : I2CD Function Control Register */ 46 #define ASPEED_I2CD_MULTI_MASTER_DIS BIT(15) 47 #define ASPEED_I2CD_SDA_DRIVE_1T_EN BIT(8) 48 #define ASPEED_I2CD_M_SDA_DRIVE_1T_EN BIT(7) 49 #define ASPEED_I2CD_M_HIGH_SPEED_EN BIT(6) 50 #define ASPEED_I2CD_SLAVE_EN BIT(1) 51 #define ASPEED_I2CD_MASTER_EN BIT(0) 52 53 /* 0x04 : I2CD Clock and AC Timing Control Register #1 */ 54 #define ASPEED_I2CD_TIME_TBUF_MASK GENMASK(31, 28) 55 #define ASPEED_I2CD_TIME_THDSTA_MASK GENMASK(27, 24) 56 #define ASPEED_I2CD_TIME_TACST_MASK GENMASK(23, 20) 57 #define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT 16 58 #define ASPEED_I2CD_TIME_SCL_HIGH_MASK GENMASK(19, 16) 59 #define ASPEED_I2CD_TIME_SCL_LOW_SHIFT 12 60 #define ASPEED_I2CD_TIME_SCL_LOW_MASK GENMASK(15, 12) 61 #define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK GENMASK(3, 0) 62 #define ASPEED_I2CD_TIME_SCL_REG_MAX GENMASK(3, 0) 63 /* 0x08 : I2CD Clock and AC Timing Control Register #2 */ 64 #define ASPEED_NO_TIMEOUT_CTRL 0 65 66 /* 0x0c : I2CD Interrupt Control Register & 67 * 0x10 : I2CD Interrupt Status Register 68 * 69 * These share bit definitions, so use the same values for the enable & 70 * status bits. 71 */ 72 #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14) 73 #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13) 74 #define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7) 75 #define ASPEED_I2CD_INTR_SCL_TIMEOUT BIT(6) 76 #define ASPEED_I2CD_INTR_ABNORMAL BIT(5) 77 #define ASPEED_I2CD_INTR_NORMAL_STOP BIT(4) 78 #define ASPEED_I2CD_INTR_ARBIT_LOSS BIT(3) 79 #define ASPEED_I2CD_INTR_RX_DONE BIT(2) 80 #define ASPEED_I2CD_INTR_TX_NAK BIT(1) 81 #define ASPEED_I2CD_INTR_TX_ACK BIT(0) 82 #define ASPEED_I2CD_INTR_MASTER_ERRORS \ 83 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \ 84 ASPEED_I2CD_INTR_SCL_TIMEOUT | \ 85 ASPEED_I2CD_INTR_ABNORMAL | \ 86 ASPEED_I2CD_INTR_ARBIT_LOSS) 87 #define ASPEED_I2CD_INTR_ALL \ 88 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \ 89 ASPEED_I2CD_INTR_BUS_RECOVER_DONE | \ 90 ASPEED_I2CD_INTR_SCL_TIMEOUT | \ 91 ASPEED_I2CD_INTR_ABNORMAL | \ 92 ASPEED_I2CD_INTR_NORMAL_STOP | \ 93 ASPEED_I2CD_INTR_ARBIT_LOSS | \ 94 ASPEED_I2CD_INTR_RX_DONE | \ 95 ASPEED_I2CD_INTR_TX_NAK | \ 96 ASPEED_I2CD_INTR_TX_ACK) 97 98 /* 0x14 : I2CD Command/Status Register */ 99 #define ASPEED_I2CD_SCL_LINE_STS BIT(18) 100 #define ASPEED_I2CD_SDA_LINE_STS BIT(17) 101 #define ASPEED_I2CD_BUS_BUSY_STS BIT(16) 102 #define ASPEED_I2CD_BUS_RECOVER_CMD BIT(11) 103 104 /* Command Bit */ 105 #define ASPEED_I2CD_M_STOP_CMD BIT(5) 106 #define ASPEED_I2CD_M_S_RX_CMD_LAST BIT(4) 107 #define ASPEED_I2CD_M_RX_CMD BIT(3) 108 #define ASPEED_I2CD_S_TX_CMD BIT(2) 109 #define ASPEED_I2CD_M_TX_CMD BIT(1) 110 #define ASPEED_I2CD_M_START_CMD BIT(0) 111 112 /* 0x18 : I2CD Slave Device Address Register */ 113 #define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0) 114 115 enum aspeed_i2c_master_state { 116 ASPEED_I2C_MASTER_INACTIVE, 117 ASPEED_I2C_MASTER_PENDING, 118 ASPEED_I2C_MASTER_START, 119 ASPEED_I2C_MASTER_TX_FIRST, 120 ASPEED_I2C_MASTER_TX, 121 ASPEED_I2C_MASTER_RX_FIRST, 122 ASPEED_I2C_MASTER_RX, 123 ASPEED_I2C_MASTER_STOP, 124 }; 125 126 enum aspeed_i2c_slave_state { 127 ASPEED_I2C_SLAVE_INACTIVE, 128 ASPEED_I2C_SLAVE_START, 129 ASPEED_I2C_SLAVE_READ_REQUESTED, 130 ASPEED_I2C_SLAVE_READ_PROCESSED, 131 ASPEED_I2C_SLAVE_WRITE_REQUESTED, 132 ASPEED_I2C_SLAVE_WRITE_RECEIVED, 133 ASPEED_I2C_SLAVE_STOP, 134 }; 135 136 struct aspeed_i2c_bus { 137 struct i2c_adapter adap; 138 struct device *dev; 139 void __iomem *base; 140 struct reset_control *rst; 141 /* Synchronizes I/O mem access to base. */ 142 spinlock_t lock; 143 struct completion cmd_complete; 144 u32 (*get_clk_reg_val)(struct device *dev, 145 u32 divisor); 146 unsigned long parent_clk_frequency; 147 u32 bus_frequency; 148 /* Transaction state. */ 149 enum aspeed_i2c_master_state master_state; 150 struct i2c_msg *msgs; 151 size_t buf_index; 152 size_t msgs_index; 153 size_t msgs_count; 154 bool send_stop; 155 int cmd_err; 156 /* Protected only by i2c_lock_bus */ 157 int master_xfer_result; 158 /* Multi-master */ 159 bool multi_master; 160 #if IS_ENABLED(CONFIG_I2C_SLAVE) 161 struct i2c_client *slave; 162 enum aspeed_i2c_slave_state slave_state; 163 #endif /* CONFIG_I2C_SLAVE */ 164 }; 165 166 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus); 167 168 static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) 169 { 170 unsigned long time_left, flags; 171 int ret = 0; 172 u32 command; 173 174 spin_lock_irqsave(&bus->lock, flags); 175 command = readl(bus->base + ASPEED_I2C_CMD_REG); 176 177 if (command & ASPEED_I2CD_SDA_LINE_STS) { 178 /* Bus is idle: no recovery needed. */ 179 if (command & ASPEED_I2CD_SCL_LINE_STS) 180 goto out; 181 dev_dbg(bus->dev, "SCL hung (state %x), attempting recovery\n", 182 command); 183 184 reinit_completion(&bus->cmd_complete); 185 writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); 186 spin_unlock_irqrestore(&bus->lock, flags); 187 188 time_left = wait_for_completion_timeout( 189 &bus->cmd_complete, bus->adap.timeout); 190 191 spin_lock_irqsave(&bus->lock, flags); 192 if (time_left == 0) 193 goto reset_out; 194 else if (bus->cmd_err) 195 goto reset_out; 196 /* Recovery failed. */ 197 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) & 198 ASPEED_I2CD_SCL_LINE_STS)) 199 goto reset_out; 200 /* Bus error. */ 201 } else { 202 dev_dbg(bus->dev, "SDA hung (state %x), attempting recovery\n", 203 command); 204 205 reinit_completion(&bus->cmd_complete); 206 /* Writes 1 to 8 SCL clock cycles until SDA is released. */ 207 writel(ASPEED_I2CD_BUS_RECOVER_CMD, 208 bus->base + ASPEED_I2C_CMD_REG); 209 spin_unlock_irqrestore(&bus->lock, flags); 210 211 time_left = wait_for_completion_timeout( 212 &bus->cmd_complete, bus->adap.timeout); 213 214 spin_lock_irqsave(&bus->lock, flags); 215 if (time_left == 0) 216 goto reset_out; 217 else if (bus->cmd_err) 218 goto reset_out; 219 /* Recovery failed. */ 220 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) & 221 ASPEED_I2CD_SDA_LINE_STS)) 222 goto reset_out; 223 } 224 225 out: 226 spin_unlock_irqrestore(&bus->lock, flags); 227 228 return ret; 229 230 reset_out: 231 spin_unlock_irqrestore(&bus->lock, flags); 232 233 return aspeed_i2c_reset(bus); 234 } 235 236 #if IS_ENABLED(CONFIG_I2C_SLAVE) 237 static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status) 238 { 239 u32 command, irq_handled = 0; 240 struct i2c_client *slave = bus->slave; 241 u8 value; 242 243 if (!slave) 244 return 0; 245 246 command = readl(bus->base + ASPEED_I2C_CMD_REG); 247 248 /* Slave was requested, restart state machine. */ 249 if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) { 250 irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH; 251 bus->slave_state = ASPEED_I2C_SLAVE_START; 252 } 253 254 /* Slave is not currently active, irq was for someone else. */ 255 if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE) 256 return irq_handled; 257 258 dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n", 259 irq_status, command); 260 261 /* Slave was sent something. */ 262 if (irq_status & ASPEED_I2CD_INTR_RX_DONE) { 263 value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8; 264 /* Handle address frame. */ 265 if (bus->slave_state == ASPEED_I2C_SLAVE_START) { 266 if (value & 0x1) 267 bus->slave_state = 268 ASPEED_I2C_SLAVE_READ_REQUESTED; 269 else 270 bus->slave_state = 271 ASPEED_I2C_SLAVE_WRITE_REQUESTED; 272 } 273 irq_handled |= ASPEED_I2CD_INTR_RX_DONE; 274 } 275 276 /* Slave was asked to stop. */ 277 if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) { 278 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; 279 bus->slave_state = ASPEED_I2C_SLAVE_STOP; 280 } 281 if (irq_status & ASPEED_I2CD_INTR_TX_NAK && 282 bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) { 283 irq_handled |= ASPEED_I2CD_INTR_TX_NAK; 284 bus->slave_state = ASPEED_I2C_SLAVE_STOP; 285 } 286 287 switch (bus->slave_state) { 288 case ASPEED_I2C_SLAVE_READ_REQUESTED: 289 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK)) 290 dev_err(bus->dev, "Unexpected ACK on read request.\n"); 291 bus->slave_state = ASPEED_I2C_SLAVE_READ_PROCESSED; 292 i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value); 293 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG); 294 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG); 295 break; 296 case ASPEED_I2C_SLAVE_READ_PROCESSED: 297 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { 298 dev_err(bus->dev, 299 "Expected ACK after processed read.\n"); 300 break; 301 } 302 irq_handled |= ASPEED_I2CD_INTR_TX_ACK; 303 i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value); 304 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG); 305 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG); 306 break; 307 case ASPEED_I2C_SLAVE_WRITE_REQUESTED: 308 bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED; 309 i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); 310 break; 311 case ASPEED_I2C_SLAVE_WRITE_RECEIVED: 312 i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value); 313 break; 314 case ASPEED_I2C_SLAVE_STOP: 315 i2c_slave_event(slave, I2C_SLAVE_STOP, &value); 316 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; 317 break; 318 case ASPEED_I2C_SLAVE_START: 319 /* Slave was just started. Waiting for the next event. */; 320 break; 321 default: 322 dev_err(bus->dev, "unknown slave_state: %d\n", 323 bus->slave_state); 324 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; 325 break; 326 } 327 328 return irq_handled; 329 } 330 #endif /* CONFIG_I2C_SLAVE */ 331 332 /* precondition: bus.lock has been acquired. */ 333 static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus) 334 { 335 u32 command = ASPEED_I2CD_M_START_CMD | ASPEED_I2CD_M_TX_CMD; 336 struct i2c_msg *msg = &bus->msgs[bus->msgs_index]; 337 u8 slave_addr = i2c_8bit_addr_from_msg(msg); 338 339 bus->master_state = ASPEED_I2C_MASTER_START; 340 341 #if IS_ENABLED(CONFIG_I2C_SLAVE) 342 /* 343 * If it's requested in the middle of a slave session, set the master 344 * state to 'pending' then H/W will continue handling this master 345 * command when the bus comes back to the idle state. 346 */ 347 if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) 348 bus->master_state = ASPEED_I2C_MASTER_PENDING; 349 #endif /* CONFIG_I2C_SLAVE */ 350 351 bus->buf_index = 0; 352 353 if (msg->flags & I2C_M_RD) { 354 command |= ASPEED_I2CD_M_RX_CMD; 355 /* Need to let the hardware know to NACK after RX. */ 356 if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN)) 357 command |= ASPEED_I2CD_M_S_RX_CMD_LAST; 358 } 359 360 writel(slave_addr, bus->base + ASPEED_I2C_BYTE_BUF_REG); 361 writel(command, bus->base + ASPEED_I2C_CMD_REG); 362 } 363 364 /* precondition: bus.lock has been acquired. */ 365 static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) 366 { 367 bus->master_state = ASPEED_I2C_MASTER_STOP; 368 writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); 369 } 370 371 /* precondition: bus.lock has been acquired. */ 372 static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus) 373 { 374 if (bus->msgs_index + 1 < bus->msgs_count) { 375 bus->msgs_index++; 376 aspeed_i2c_do_start(bus); 377 } else { 378 aspeed_i2c_do_stop(bus); 379 } 380 } 381 382 static int aspeed_i2c_is_irq_error(u32 irq_status) 383 { 384 if (irq_status & ASPEED_I2CD_INTR_ARBIT_LOSS) 385 return -EAGAIN; 386 if (irq_status & (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | 387 ASPEED_I2CD_INTR_SCL_TIMEOUT)) 388 return -EBUSY; 389 if (irq_status & (ASPEED_I2CD_INTR_ABNORMAL)) 390 return -EPROTO; 391 392 return 0; 393 } 394 395 static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status) 396 { 397 u32 irq_handled = 0, command = 0; 398 struct i2c_msg *msg; 399 u8 recv_byte; 400 int ret; 401 402 if (irq_status & ASPEED_I2CD_INTR_BUS_RECOVER_DONE) { 403 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 404 irq_handled |= ASPEED_I2CD_INTR_BUS_RECOVER_DONE; 405 goto out_complete; 406 } 407 408 /* 409 * We encountered an interrupt that reports an error: the hardware 410 * should clear the command queue effectively taking us back to the 411 * INACTIVE state. 412 */ 413 ret = aspeed_i2c_is_irq_error(irq_status); 414 if (ret) { 415 dev_dbg(bus->dev, "received error interrupt: 0x%08x\n", 416 irq_status); 417 irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS); 418 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) { 419 bus->cmd_err = ret; 420 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 421 goto out_complete; 422 } 423 } 424 425 #if IS_ENABLED(CONFIG_I2C_SLAVE) 426 /* 427 * A pending master command will be started by H/W when the bus comes 428 * back to idle state after completing a slave operation so change the 429 * master state from 'pending' to 'start' at here if slave is inactive. 430 */ 431 if (bus->master_state == ASPEED_I2C_MASTER_PENDING) { 432 if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) 433 goto out_no_complete; 434 435 bus->master_state = ASPEED_I2C_MASTER_START; 436 } 437 #endif /* CONFIG_I2C_SLAVE */ 438 439 /* Master is not currently active, irq was for someone else. */ 440 if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE || 441 bus->master_state == ASPEED_I2C_MASTER_PENDING) 442 goto out_no_complete; 443 444 /* We are in an invalid state; reset bus to a known state. */ 445 if (!bus->msgs) { 446 dev_err(bus->dev, "bus in unknown state. irq_status: 0x%x\n", 447 irq_status); 448 bus->cmd_err = -EIO; 449 if (bus->master_state != ASPEED_I2C_MASTER_STOP && 450 bus->master_state != ASPEED_I2C_MASTER_INACTIVE) 451 aspeed_i2c_do_stop(bus); 452 goto out_no_complete; 453 } 454 msg = &bus->msgs[bus->msgs_index]; 455 456 /* 457 * START is a special case because we still have to handle a subsequent 458 * TX or RX immediately after we handle it, so we handle it here and 459 * then update the state and handle the new state below. 460 */ 461 if (bus->master_state == ASPEED_I2C_MASTER_START) { 462 #if IS_ENABLED(CONFIG_I2C_SLAVE) 463 /* 464 * If a peer master starts a xfer immediately after it queues a 465 * master command, change its state to 'pending' then H/W will 466 * continue the queued master xfer just after completing the 467 * slave mode session. 468 */ 469 if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) { 470 bus->master_state = ASPEED_I2C_MASTER_PENDING; 471 dev_dbg(bus->dev, 472 "master goes pending due to a slave start\n"); 473 goto out_no_complete; 474 } 475 #endif /* CONFIG_I2C_SLAVE */ 476 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { 477 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_NAK))) { 478 bus->cmd_err = -ENXIO; 479 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 480 goto out_complete; 481 } 482 pr_devel("no slave present at %02x\n", msg->addr); 483 irq_handled |= ASPEED_I2CD_INTR_TX_NAK; 484 bus->cmd_err = -ENXIO; 485 aspeed_i2c_do_stop(bus); 486 goto out_no_complete; 487 } 488 irq_handled |= ASPEED_I2CD_INTR_TX_ACK; 489 if (msg->len == 0) { /* SMBUS_QUICK */ 490 aspeed_i2c_do_stop(bus); 491 goto out_no_complete; 492 } 493 if (msg->flags & I2C_M_RD) 494 bus->master_state = ASPEED_I2C_MASTER_RX_FIRST; 495 else 496 bus->master_state = ASPEED_I2C_MASTER_TX_FIRST; 497 } 498 499 switch (bus->master_state) { 500 case ASPEED_I2C_MASTER_TX: 501 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) { 502 dev_dbg(bus->dev, "slave NACKed TX\n"); 503 irq_handled |= ASPEED_I2CD_INTR_TX_NAK; 504 goto error_and_stop; 505 } else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { 506 dev_err(bus->dev, "slave failed to ACK TX\n"); 507 goto error_and_stop; 508 } 509 irq_handled |= ASPEED_I2CD_INTR_TX_ACK; 510 /* fall through */ 511 case ASPEED_I2C_MASTER_TX_FIRST: 512 if (bus->buf_index < msg->len) { 513 bus->master_state = ASPEED_I2C_MASTER_TX; 514 writel(msg->buf[bus->buf_index++], 515 bus->base + ASPEED_I2C_BYTE_BUF_REG); 516 writel(ASPEED_I2CD_M_TX_CMD, 517 bus->base + ASPEED_I2C_CMD_REG); 518 } else { 519 aspeed_i2c_next_msg_or_stop(bus); 520 } 521 goto out_no_complete; 522 case ASPEED_I2C_MASTER_RX_FIRST: 523 /* RX may not have completed yet (only address cycle) */ 524 if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE)) 525 goto out_no_complete; 526 /* fall through */ 527 case ASPEED_I2C_MASTER_RX: 528 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) { 529 dev_err(bus->dev, "master failed to RX\n"); 530 goto error_and_stop; 531 } 532 irq_handled |= ASPEED_I2CD_INTR_RX_DONE; 533 534 recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8; 535 msg->buf[bus->buf_index++] = recv_byte; 536 537 if (msg->flags & I2C_M_RECV_LEN) { 538 if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) { 539 bus->cmd_err = -EPROTO; 540 aspeed_i2c_do_stop(bus); 541 goto out_no_complete; 542 } 543 msg->len = recv_byte + 544 ((msg->flags & I2C_CLIENT_PEC) ? 2 : 1); 545 msg->flags &= ~I2C_M_RECV_LEN; 546 } 547 548 if (bus->buf_index < msg->len) { 549 bus->master_state = ASPEED_I2C_MASTER_RX; 550 command = ASPEED_I2CD_M_RX_CMD; 551 if (bus->buf_index + 1 == msg->len) 552 command |= ASPEED_I2CD_M_S_RX_CMD_LAST; 553 writel(command, bus->base + ASPEED_I2C_CMD_REG); 554 } else { 555 aspeed_i2c_next_msg_or_stop(bus); 556 } 557 goto out_no_complete; 558 case ASPEED_I2C_MASTER_STOP: 559 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) { 560 dev_err(bus->dev, 561 "master failed to STOP. irq_status:0x%x\n", 562 irq_status); 563 bus->cmd_err = -EIO; 564 /* Do not STOP as we have already tried. */ 565 } else { 566 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; 567 } 568 569 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 570 goto out_complete; 571 case ASPEED_I2C_MASTER_INACTIVE: 572 dev_err(bus->dev, 573 "master received interrupt 0x%08x, but is inactive\n", 574 irq_status); 575 bus->cmd_err = -EIO; 576 /* Do not STOP as we should be inactive. */ 577 goto out_complete; 578 default: 579 WARN(1, "unknown master state\n"); 580 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 581 bus->cmd_err = -EINVAL; 582 goto out_complete; 583 } 584 error_and_stop: 585 bus->cmd_err = -EIO; 586 aspeed_i2c_do_stop(bus); 587 goto out_no_complete; 588 out_complete: 589 bus->msgs = NULL; 590 if (bus->cmd_err) 591 bus->master_xfer_result = bus->cmd_err; 592 else 593 bus->master_xfer_result = bus->msgs_index + 1; 594 complete(&bus->cmd_complete); 595 out_no_complete: 596 return irq_handled; 597 } 598 599 static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) 600 { 601 struct aspeed_i2c_bus *bus = dev_id; 602 u32 irq_received, irq_remaining, irq_handled; 603 604 spin_lock(&bus->lock); 605 irq_received = readl(bus->base + ASPEED_I2C_INTR_STS_REG); 606 /* Ack all interrupts except for Rx done */ 607 writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE, 608 bus->base + ASPEED_I2C_INTR_STS_REG); 609 irq_remaining = irq_received; 610 611 #if IS_ENABLED(CONFIG_I2C_SLAVE) 612 /* 613 * In most cases, interrupt bits will be set one by one, although 614 * multiple interrupt bits could be set at the same time. It's also 615 * possible that master interrupt bits could be set along with slave 616 * interrupt bits. Each case needs to be handled using corresponding 617 * handlers depending on the current state. 618 */ 619 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE && 620 bus->master_state != ASPEED_I2C_MASTER_PENDING) { 621 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining); 622 irq_remaining &= ~irq_handled; 623 if (irq_remaining) 624 irq_handled |= aspeed_i2c_slave_irq(bus, irq_remaining); 625 } else { 626 irq_handled = aspeed_i2c_slave_irq(bus, irq_remaining); 627 irq_remaining &= ~irq_handled; 628 if (irq_remaining) 629 irq_handled |= aspeed_i2c_master_irq(bus, 630 irq_remaining); 631 } 632 #else 633 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining); 634 #endif /* CONFIG_I2C_SLAVE */ 635 636 irq_remaining &= ~irq_handled; 637 if (irq_remaining) 638 dev_err(bus->dev, 639 "irq handled != irq. expected 0x%08x, but was 0x%08x\n", 640 irq_received, irq_handled); 641 642 /* Ack Rx done */ 643 if (irq_received & ASPEED_I2CD_INTR_RX_DONE) 644 writel(ASPEED_I2CD_INTR_RX_DONE, 645 bus->base + ASPEED_I2C_INTR_STS_REG); 646 spin_unlock(&bus->lock); 647 return irq_remaining ? IRQ_NONE : IRQ_HANDLED; 648 } 649 650 static int aspeed_i2c_master_xfer(struct i2c_adapter *adap, 651 struct i2c_msg *msgs, int num) 652 { 653 struct aspeed_i2c_bus *bus = i2c_get_adapdata(adap); 654 unsigned long time_left, flags; 655 656 spin_lock_irqsave(&bus->lock, flags); 657 bus->cmd_err = 0; 658 659 /* If bus is busy in a single master environment, attempt recovery. */ 660 if (!bus->multi_master && 661 (readl(bus->base + ASPEED_I2C_CMD_REG) & 662 ASPEED_I2CD_BUS_BUSY_STS)) { 663 int ret; 664 665 spin_unlock_irqrestore(&bus->lock, flags); 666 ret = aspeed_i2c_recover_bus(bus); 667 if (ret) 668 return ret; 669 spin_lock_irqsave(&bus->lock, flags); 670 } 671 672 bus->cmd_err = 0; 673 bus->msgs = msgs; 674 bus->msgs_index = 0; 675 bus->msgs_count = num; 676 677 reinit_completion(&bus->cmd_complete); 678 aspeed_i2c_do_start(bus); 679 spin_unlock_irqrestore(&bus->lock, flags); 680 681 time_left = wait_for_completion_timeout(&bus->cmd_complete, 682 bus->adap.timeout); 683 684 if (time_left == 0) { 685 /* 686 * If timed out and bus is still busy in a multi master 687 * environment, attempt recovery at here. 688 */ 689 if (bus->multi_master && 690 (readl(bus->base + ASPEED_I2C_CMD_REG) & 691 ASPEED_I2CD_BUS_BUSY_STS)) 692 aspeed_i2c_recover_bus(bus); 693 694 return -ETIMEDOUT; 695 } 696 697 return bus->master_xfer_result; 698 } 699 700 static u32 aspeed_i2c_functionality(struct i2c_adapter *adap) 701 { 702 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA; 703 } 704 705 #if IS_ENABLED(CONFIG_I2C_SLAVE) 706 /* precondition: bus.lock has been acquired. */ 707 static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr) 708 { 709 u32 addr_reg_val, func_ctrl_reg_val; 710 711 /* Set slave addr. */ 712 addr_reg_val = readl(bus->base + ASPEED_I2C_DEV_ADDR_REG); 713 addr_reg_val &= ~ASPEED_I2CD_DEV_ADDR_MASK; 714 addr_reg_val |= slave_addr & ASPEED_I2CD_DEV_ADDR_MASK; 715 writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG); 716 717 /* Turn on slave mode. */ 718 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG); 719 func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN; 720 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG); 721 } 722 723 static int aspeed_i2c_reg_slave(struct i2c_client *client) 724 { 725 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter); 726 unsigned long flags; 727 728 spin_lock_irqsave(&bus->lock, flags); 729 if (bus->slave) { 730 spin_unlock_irqrestore(&bus->lock, flags); 731 return -EINVAL; 732 } 733 734 __aspeed_i2c_reg_slave(bus, client->addr); 735 736 bus->slave = client; 737 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; 738 spin_unlock_irqrestore(&bus->lock, flags); 739 740 return 0; 741 } 742 743 static int aspeed_i2c_unreg_slave(struct i2c_client *client) 744 { 745 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter); 746 u32 func_ctrl_reg_val; 747 unsigned long flags; 748 749 spin_lock_irqsave(&bus->lock, flags); 750 if (!bus->slave) { 751 spin_unlock_irqrestore(&bus->lock, flags); 752 return -EINVAL; 753 } 754 755 /* Turn off slave mode. */ 756 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG); 757 func_ctrl_reg_val &= ~ASPEED_I2CD_SLAVE_EN; 758 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG); 759 760 bus->slave = NULL; 761 spin_unlock_irqrestore(&bus->lock, flags); 762 763 return 0; 764 } 765 #endif /* CONFIG_I2C_SLAVE */ 766 767 static const struct i2c_algorithm aspeed_i2c_algo = { 768 .master_xfer = aspeed_i2c_master_xfer, 769 .functionality = aspeed_i2c_functionality, 770 #if IS_ENABLED(CONFIG_I2C_SLAVE) 771 .reg_slave = aspeed_i2c_reg_slave, 772 .unreg_slave = aspeed_i2c_unreg_slave, 773 #endif /* CONFIG_I2C_SLAVE */ 774 }; 775 776 static u32 aspeed_i2c_get_clk_reg_val(struct device *dev, 777 u32 clk_high_low_mask, 778 u32 divisor) 779 { 780 u32 base_clk_divisor, clk_high_low_max, clk_high, clk_low, tmp; 781 782 /* 783 * SCL_high and SCL_low represent a value 1 greater than what is stored 784 * since a zero divider is meaningless. Thus, the max value each can 785 * store is every bit set + 1. Since SCL_high and SCL_low are added 786 * together (see below), the max value of both is the max value of one 787 * them times two. 788 */ 789 clk_high_low_max = (clk_high_low_mask + 1) * 2; 790 791 /* 792 * The actual clock frequency of SCL is: 793 * SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low)) 794 * = APB_freq / divisor 795 * where base_freq is a programmable clock divider; its value is 796 * base_freq = 1 << base_clk_divisor 797 * SCL_high is the number of base_freq clock cycles that SCL stays high 798 * and SCL_low is the number of base_freq clock cycles that SCL stays 799 * low for a period of SCL. 800 * The actual register has a minimum SCL_high and SCL_low minimum of 1; 801 * thus, they start counting at zero. So 802 * SCL_high = clk_high + 1 803 * SCL_low = clk_low + 1 804 * Thus, 805 * SCL_freq = APB_freq / 806 * ((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1)) 807 * The documentation recommends clk_high >= clk_high_max / 2 and 808 * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint 809 * gives us the following solution: 810 */ 811 base_clk_divisor = divisor > clk_high_low_max ? 812 ilog2((divisor - 1) / clk_high_low_max) + 1 : 0; 813 814 if (base_clk_divisor > ASPEED_I2CD_TIME_BASE_DIVISOR_MASK) { 815 base_clk_divisor = ASPEED_I2CD_TIME_BASE_DIVISOR_MASK; 816 clk_low = clk_high_low_mask; 817 clk_high = clk_high_low_mask; 818 dev_err(dev, 819 "clamping clock divider: divider requested, %u, is greater than largest possible divider, %u.\n", 820 divisor, (1 << base_clk_divisor) * clk_high_low_max); 821 } else { 822 tmp = (divisor + (1 << base_clk_divisor) - 1) 823 >> base_clk_divisor; 824 clk_low = tmp / 2; 825 clk_high = tmp - clk_low; 826 827 if (clk_high) 828 clk_high--; 829 830 if (clk_low) 831 clk_low--; 832 } 833 834 835 return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT) 836 & ASPEED_I2CD_TIME_SCL_HIGH_MASK) 837 | ((clk_low << ASPEED_I2CD_TIME_SCL_LOW_SHIFT) 838 & ASPEED_I2CD_TIME_SCL_LOW_MASK) 839 | (base_clk_divisor 840 & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK); 841 } 842 843 static u32 aspeed_i2c_24xx_get_clk_reg_val(struct device *dev, u32 divisor) 844 { 845 /* 846 * clk_high and clk_low are each 3 bits wide, so each can hold a max 847 * value of 8 giving a clk_high_low_max of 16. 848 */ 849 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(2, 0), divisor); 850 } 851 852 static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor) 853 { 854 /* 855 * clk_high and clk_low are each 4 bits wide, so each can hold a max 856 * value of 16 giving a clk_high_low_max of 32. 857 */ 858 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(3, 0), divisor); 859 } 860 861 /* precondition: bus.lock has been acquired. */ 862 static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus) 863 { 864 u32 divisor, clk_reg_val; 865 866 divisor = DIV_ROUND_UP(bus->parent_clk_frequency, bus->bus_frequency); 867 clk_reg_val = readl(bus->base + ASPEED_I2C_AC_TIMING_REG1); 868 clk_reg_val &= (ASPEED_I2CD_TIME_TBUF_MASK | 869 ASPEED_I2CD_TIME_THDSTA_MASK | 870 ASPEED_I2CD_TIME_TACST_MASK); 871 clk_reg_val |= bus->get_clk_reg_val(bus->dev, divisor); 872 writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1); 873 writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2); 874 875 return 0; 876 } 877 878 /* precondition: bus.lock has been acquired. */ 879 static int aspeed_i2c_init(struct aspeed_i2c_bus *bus, 880 struct platform_device *pdev) 881 { 882 u32 fun_ctrl_reg = ASPEED_I2CD_MASTER_EN; 883 int ret; 884 885 /* Disable everything. */ 886 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG); 887 888 ret = aspeed_i2c_init_clk(bus); 889 if (ret < 0) 890 return ret; 891 892 if (of_property_read_bool(pdev->dev.of_node, "multi-master")) 893 bus->multi_master = true; 894 else 895 fun_ctrl_reg |= ASPEED_I2CD_MULTI_MASTER_DIS; 896 897 /* Enable Master Mode */ 898 writel(readl(bus->base + ASPEED_I2C_FUN_CTRL_REG) | fun_ctrl_reg, 899 bus->base + ASPEED_I2C_FUN_CTRL_REG); 900 901 #if IS_ENABLED(CONFIG_I2C_SLAVE) 902 /* If slave has already been registered, re-enable it. */ 903 if (bus->slave) 904 __aspeed_i2c_reg_slave(bus, bus->slave->addr); 905 #endif /* CONFIG_I2C_SLAVE */ 906 907 /* Set interrupt generation of I2C controller */ 908 writel(ASPEED_I2CD_INTR_ALL, bus->base + ASPEED_I2C_INTR_CTRL_REG); 909 910 return 0; 911 } 912 913 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus) 914 { 915 struct platform_device *pdev = to_platform_device(bus->dev); 916 unsigned long flags; 917 int ret; 918 919 spin_lock_irqsave(&bus->lock, flags); 920 921 /* Disable and ack all interrupts. */ 922 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); 923 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG); 924 925 ret = aspeed_i2c_init(bus, pdev); 926 927 spin_unlock_irqrestore(&bus->lock, flags); 928 929 return ret; 930 } 931 932 static const struct of_device_id aspeed_i2c_bus_of_table[] = { 933 { 934 .compatible = "aspeed,ast2400-i2c-bus", 935 .data = aspeed_i2c_24xx_get_clk_reg_val, 936 }, 937 { 938 .compatible = "aspeed,ast2500-i2c-bus", 939 .data = aspeed_i2c_25xx_get_clk_reg_val, 940 }, 941 { }, 942 }; 943 MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table); 944 945 static int aspeed_i2c_probe_bus(struct platform_device *pdev) 946 { 947 const struct of_device_id *match; 948 struct aspeed_i2c_bus *bus; 949 struct clk *parent_clk; 950 struct resource *res; 951 int irq, ret; 952 953 bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); 954 if (!bus) 955 return -ENOMEM; 956 957 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 958 bus->base = devm_ioremap_resource(&pdev->dev, res); 959 if (IS_ERR(bus->base)) 960 return PTR_ERR(bus->base); 961 962 parent_clk = devm_clk_get(&pdev->dev, NULL); 963 if (IS_ERR(parent_clk)) 964 return PTR_ERR(parent_clk); 965 bus->parent_clk_frequency = clk_get_rate(parent_clk); 966 /* We just need the clock rate, we don't actually use the clk object. */ 967 devm_clk_put(&pdev->dev, parent_clk); 968 969 bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL); 970 if (IS_ERR(bus->rst)) { 971 dev_err(&pdev->dev, 972 "missing or invalid reset controller device tree entry\n"); 973 return PTR_ERR(bus->rst); 974 } 975 reset_control_deassert(bus->rst); 976 977 ret = of_property_read_u32(pdev->dev.of_node, 978 "bus-frequency", &bus->bus_frequency); 979 if (ret < 0) { 980 dev_err(&pdev->dev, 981 "Could not read bus-frequency property\n"); 982 bus->bus_frequency = 100000; 983 } 984 985 match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node); 986 if (!match) 987 bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val; 988 else 989 bus->get_clk_reg_val = (u32 (*)(struct device *, u32)) 990 match->data; 991 992 /* Initialize the I2C adapter */ 993 spin_lock_init(&bus->lock); 994 init_completion(&bus->cmd_complete); 995 bus->adap.owner = THIS_MODULE; 996 bus->adap.retries = 0; 997 bus->adap.algo = &aspeed_i2c_algo; 998 bus->adap.dev.parent = &pdev->dev; 999 bus->adap.dev.of_node = pdev->dev.of_node; 1000 strlcpy(bus->adap.name, pdev->name, sizeof(bus->adap.name)); 1001 i2c_set_adapdata(&bus->adap, bus); 1002 1003 bus->dev = &pdev->dev; 1004 1005 /* Clean up any left over interrupt state. */ 1006 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); 1007 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG); 1008 /* 1009 * bus.lock does not need to be held because the interrupt handler has 1010 * not been enabled yet. 1011 */ 1012 ret = aspeed_i2c_init(bus, pdev); 1013 if (ret < 0) 1014 return ret; 1015 1016 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 1017 ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq, 1018 0, dev_name(&pdev->dev), bus); 1019 if (ret < 0) 1020 return ret; 1021 1022 ret = i2c_add_adapter(&bus->adap); 1023 if (ret < 0) 1024 return ret; 1025 1026 platform_set_drvdata(pdev, bus); 1027 1028 dev_info(bus->dev, "i2c bus %d registered, irq %d\n", 1029 bus->adap.nr, irq); 1030 1031 return 0; 1032 } 1033 1034 static int aspeed_i2c_remove_bus(struct platform_device *pdev) 1035 { 1036 struct aspeed_i2c_bus *bus = platform_get_drvdata(pdev); 1037 unsigned long flags; 1038 1039 spin_lock_irqsave(&bus->lock, flags); 1040 1041 /* Disable everything. */ 1042 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG); 1043 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); 1044 1045 spin_unlock_irqrestore(&bus->lock, flags); 1046 1047 reset_control_assert(bus->rst); 1048 1049 i2c_del_adapter(&bus->adap); 1050 1051 return 0; 1052 } 1053 1054 static struct platform_driver aspeed_i2c_bus_driver = { 1055 .probe = aspeed_i2c_probe_bus, 1056 .remove = aspeed_i2c_remove_bus, 1057 .driver = { 1058 .name = "aspeed-i2c-bus", 1059 .of_match_table = aspeed_i2c_bus_of_table, 1060 }, 1061 }; 1062 module_platform_driver(aspeed_i2c_bus_driver); 1063 1064 MODULE_AUTHOR("Brendan Higgins <brendanhiggins@google.com>"); 1065 MODULE_DESCRIPTION("Aspeed I2C Bus Driver"); 1066 MODULE_LICENSE("GPL v2"); 1067