1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synopsys DesignWare I2C adapter driver (master only). 4 * 5 * Based on the TI DAVINCI I2C adapter driver. 6 * 7 * Copyright (C) 2006 Texas Instruments. 8 * Copyright (C) 2007 MontaVista Software Inc. 9 * Copyright (C) 2009 Provigent Ltd. 10 */ 11 #include <linux/delay.h> 12 #include <linux/err.h> 13 #include <linux/errno.h> 14 #include <linux/export.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/i2c.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/module.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/regmap.h> 22 #include <linux/reset.h> 23 24 #include "i2c-designware-core.h" 25 26 #define AMD_TIMEOUT_MIN_US 25 27 #define AMD_TIMEOUT_MAX_US 250 28 #define AMD_MASTERCFG_MASK GENMASK(15, 0) 29 30 static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev) 31 { 32 /* Configure Tx/Rx FIFO threshold levels */ 33 regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2); 34 regmap_write(dev->map, DW_IC_RX_TL, 0); 35 36 /* Configure the I2C master */ 37 regmap_write(dev->map, DW_IC_CON, dev->master_cfg); 38 } 39 40 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) 41 { 42 unsigned int comp_param1; 43 u32 sda_falling_time, scl_falling_time; 44 struct i2c_timings *t = &dev->timings; 45 const char *fp_str = ""; 46 u32 ic_clk; 47 int ret; 48 49 ret = i2c_dw_acquire_lock(dev); 50 if (ret) 51 return ret; 52 53 ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1); 54 i2c_dw_release_lock(dev); 55 if (ret) 56 return ret; 57 58 /* Set standard and fast speed dividers for high/low periods */ 59 sda_falling_time = t->sda_fall_ns ?: 300; /* ns */ 60 scl_falling_time = t->scl_fall_ns ?: 300; /* ns */ 61 62 /* Calculate SCL timing parameters for standard mode if not set */ 63 if (!dev->ss_hcnt || !dev->ss_lcnt) { 64 ic_clk = i2c_dw_clk_rate(dev); 65 dev->ss_hcnt = 66 i2c_dw_scl_hcnt(ic_clk, 67 4000, /* tHD;STA = tHIGH = 4.0 us */ 68 sda_falling_time, 69 0, /* 0: DW default, 1: Ideal */ 70 0); /* No offset */ 71 dev->ss_lcnt = 72 i2c_dw_scl_lcnt(ic_clk, 73 4700, /* tLOW = 4.7 us */ 74 scl_falling_time, 75 0); /* No offset */ 76 } 77 dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n", 78 dev->ss_hcnt, dev->ss_lcnt); 79 80 /* 81 * Set SCL timing parameters for fast mode or fast mode plus. Only 82 * difference is the timing parameter values since the registers are 83 * the same. 84 */ 85 if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) { 86 /* 87 * Check are Fast Mode Plus parameters available. Calculate 88 * SCL timing parameters for Fast Mode Plus if not set. 89 */ 90 if (dev->fp_hcnt && dev->fp_lcnt) { 91 dev->fs_hcnt = dev->fp_hcnt; 92 dev->fs_lcnt = dev->fp_lcnt; 93 } else { 94 ic_clk = i2c_dw_clk_rate(dev); 95 dev->fs_hcnt = 96 i2c_dw_scl_hcnt(ic_clk, 97 260, /* tHIGH = 260 ns */ 98 sda_falling_time, 99 0, /* DW default */ 100 0); /* No offset */ 101 dev->fs_lcnt = 102 i2c_dw_scl_lcnt(ic_clk, 103 500, /* tLOW = 500 ns */ 104 scl_falling_time, 105 0); /* No offset */ 106 } 107 fp_str = " Plus"; 108 } 109 /* 110 * Calculate SCL timing parameters for fast mode if not set. They are 111 * needed also in high speed mode. 112 */ 113 if (!dev->fs_hcnt || !dev->fs_lcnt) { 114 ic_clk = i2c_dw_clk_rate(dev); 115 dev->fs_hcnt = 116 i2c_dw_scl_hcnt(ic_clk, 117 600, /* tHD;STA = tHIGH = 0.6 us */ 118 sda_falling_time, 119 0, /* 0: DW default, 1: Ideal */ 120 0); /* No offset */ 121 dev->fs_lcnt = 122 i2c_dw_scl_lcnt(ic_clk, 123 1300, /* tLOW = 1.3 us */ 124 scl_falling_time, 125 0); /* No offset */ 126 } 127 dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n", 128 fp_str, dev->fs_hcnt, dev->fs_lcnt); 129 130 /* Check is high speed possible and fall back to fast mode if not */ 131 if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) == 132 DW_IC_CON_SPEED_HIGH) { 133 if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK) 134 != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) { 135 dev_err(dev->dev, "High Speed not supported!\n"); 136 t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; 137 dev->master_cfg &= ~DW_IC_CON_SPEED_MASK; 138 dev->master_cfg |= DW_IC_CON_SPEED_FAST; 139 dev->hs_hcnt = 0; 140 dev->hs_lcnt = 0; 141 } else if (!dev->hs_hcnt || !dev->hs_lcnt) { 142 ic_clk = i2c_dw_clk_rate(dev); 143 dev->hs_hcnt = 144 i2c_dw_scl_hcnt(ic_clk, 145 160, /* tHIGH = 160 ns */ 146 sda_falling_time, 147 0, /* DW default */ 148 0); /* No offset */ 149 dev->hs_lcnt = 150 i2c_dw_scl_lcnt(ic_clk, 151 320, /* tLOW = 320 ns */ 152 scl_falling_time, 153 0); /* No offset */ 154 } 155 dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", 156 dev->hs_hcnt, dev->hs_lcnt); 157 } 158 159 ret = i2c_dw_set_sda_hold(dev); 160 if (ret) 161 return ret; 162 163 dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz)); 164 return 0; 165 } 166 167 /** 168 * i2c_dw_init_master() - Initialize the designware I2C master hardware 169 * @dev: device private data 170 * 171 * This functions configures and enables the I2C master. 172 * This function is called during I2C init function, and in case of timeout at 173 * run time. 174 */ 175 static int i2c_dw_init_master(struct dw_i2c_dev *dev) 176 { 177 int ret; 178 179 ret = i2c_dw_acquire_lock(dev); 180 if (ret) 181 return ret; 182 183 /* Disable the adapter */ 184 __i2c_dw_disable(dev); 185 186 /* Write standard speed timing parameters */ 187 regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt); 188 regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt); 189 190 /* Write fast mode/fast mode plus timing parameters */ 191 regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt); 192 regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt); 193 194 /* Write high speed timing parameters if supported */ 195 if (dev->hs_hcnt && dev->hs_lcnt) { 196 regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt); 197 regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt); 198 } 199 200 /* Write SDA hold time if supported */ 201 if (dev->sda_hold_time) 202 regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time); 203 204 i2c_dw_configure_fifo_master(dev); 205 i2c_dw_release_lock(dev); 206 207 return 0; 208 } 209 210 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) 211 { 212 struct i2c_msg *msgs = dev->msgs; 213 u32 ic_con = 0, ic_tar = 0; 214 unsigned int dummy; 215 216 /* Disable the adapter */ 217 __i2c_dw_disable(dev); 218 219 /* If the slave address is ten bit address, enable 10BITADDR */ 220 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { 221 ic_con = DW_IC_CON_10BITADDR_MASTER; 222 /* 223 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing 224 * mode has to be enabled via bit 12 of IC_TAR register. 225 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be 226 * detected from registers. 227 */ 228 ic_tar = DW_IC_TAR_10BITADDR_MASTER; 229 } 230 231 regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER, 232 ic_con); 233 234 /* 235 * Set the slave (target) address and enable 10-bit addressing mode 236 * if applicable. 237 */ 238 regmap_write(dev->map, DW_IC_TAR, 239 msgs[dev->msg_write_idx].addr | ic_tar); 240 241 /* Enforce disabled interrupts (due to HW issues) */ 242 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 243 244 /* Enable the adapter */ 245 __i2c_dw_enable(dev); 246 247 /* Dummy read to avoid the register getting stuck on Bay Trail */ 248 regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy); 249 250 /* Clear and enable interrupts */ 251 regmap_read(dev->map, DW_IC_CLR_INTR, &dummy); 252 regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK); 253 } 254 255 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev) 256 { 257 u32 val; 258 int ret; 259 260 ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val, 261 !(val & DW_IC_INTR_STOP_DET), 262 1100, 20000); 263 if (ret) 264 dev_err(dev->dev, "i2c timeout error %d\n", ret); 265 266 return ret; 267 } 268 269 static int i2c_dw_status(struct dw_i2c_dev *dev) 270 { 271 int status; 272 273 status = i2c_dw_wait_bus_not_busy(dev); 274 if (status) 275 return status; 276 277 return i2c_dw_check_stopbit(dev); 278 } 279 280 /* 281 * Initiate and continue master read/write transaction with polling 282 * based transfer routine afterward write messages into the Tx buffer. 283 */ 284 static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs) 285 { 286 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 287 int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx; 288 int cmd = 0, status; 289 u8 *tx_buf; 290 unsigned int val; 291 292 /* 293 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card, 294 * it is mandatory to set the right value in specific register 295 * (offset:0x474) as per the hardware IP specification. 296 */ 297 regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN); 298 299 dev->msgs = msgs; 300 dev->msgs_num = num_msgs; 301 i2c_dw_xfer_init(dev); 302 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 303 304 /* Initiate messages read/write transaction */ 305 for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) { 306 tx_buf = msgs[msg_wrt_idx].buf; 307 buf_len = msgs[msg_wrt_idx].len; 308 309 if (!(msgs[msg_wrt_idx].flags & I2C_M_RD)) 310 regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1); 311 /* 312 * Initiate the i2c read/write transaction of buffer length, 313 * and poll for bus busy status. For the last message transfer, 314 * update the command with stopbit enable. 315 */ 316 for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) { 317 if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1) 318 cmd |= BIT(9); 319 320 if (msgs[msg_wrt_idx].flags & I2C_M_RD) { 321 /* Due to hardware bug, need to write the same command twice. */ 322 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100); 323 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd); 324 if (cmd) { 325 regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1)); 326 regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1)); 327 /* 328 * Need to check the stop bit. However, it cannot be 329 * detected from the registers so we check it always 330 * when read/write the last byte. 331 */ 332 status = i2c_dw_status(dev); 333 if (status) 334 return status; 335 336 for (data_idx = 0; data_idx < buf_len; data_idx++) { 337 regmap_read(dev->map, DW_IC_DATA_CMD, &val); 338 tx_buf[data_idx] = val; 339 } 340 status = i2c_dw_check_stopbit(dev); 341 if (status) 342 return status; 343 } 344 } else { 345 regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd); 346 usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US); 347 } 348 } 349 status = i2c_dw_check_stopbit(dev); 350 if (status) 351 return status; 352 } 353 354 return 0; 355 } 356 357 /* 358 * Initiate (and continue) low level master read/write transaction. 359 * This function is only called from i2c_dw_isr, and pumping i2c_msg 360 * messages into the tx buffer. Even if the size of i2c_msg data is 361 * longer than the size of the tx buffer, it handles everything. 362 */ 363 static void 364 i2c_dw_xfer_msg(struct dw_i2c_dev *dev) 365 { 366 struct i2c_msg *msgs = dev->msgs; 367 u32 intr_mask; 368 int tx_limit, rx_limit; 369 u32 addr = msgs[dev->msg_write_idx].addr; 370 u32 buf_len = dev->tx_buf_len; 371 u8 *buf = dev->tx_buf; 372 bool need_restart = false; 373 unsigned int flr; 374 375 intr_mask = DW_IC_INTR_MASTER_MASK; 376 377 for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { 378 u32 flags = msgs[dev->msg_write_idx].flags; 379 380 /* 381 * If target address has changed, we need to 382 * reprogram the target address in the I2C 383 * adapter when we are done with this transfer. 384 */ 385 if (msgs[dev->msg_write_idx].addr != addr) { 386 dev_err(dev->dev, 387 "%s: invalid target address\n", __func__); 388 dev->msg_err = -EINVAL; 389 break; 390 } 391 392 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { 393 /* new i2c_msg */ 394 buf = msgs[dev->msg_write_idx].buf; 395 buf_len = msgs[dev->msg_write_idx].len; 396 397 /* If both IC_EMPTYFIFO_HOLD_MASTER_EN and 398 * IC_RESTART_EN are set, we must manually 399 * set restart bit between messages. 400 */ 401 if ((dev->master_cfg & DW_IC_CON_RESTART_EN) && 402 (dev->msg_write_idx > 0)) 403 need_restart = true; 404 } 405 406 regmap_read(dev->map, DW_IC_TXFLR, &flr); 407 tx_limit = dev->tx_fifo_depth - flr; 408 409 regmap_read(dev->map, DW_IC_RXFLR, &flr); 410 rx_limit = dev->rx_fifo_depth - flr; 411 412 while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { 413 u32 cmd = 0; 414 415 /* 416 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must 417 * manually set the stop bit. However, it cannot be 418 * detected from the registers so we set it always 419 * when writing/reading the last byte. 420 */ 421 422 /* 423 * i2c-core always sets the buffer length of 424 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will 425 * be adjusted when receiving the first byte. 426 * Thus we can't stop the transaction here. 427 */ 428 if (dev->msg_write_idx == dev->msgs_num - 1 && 429 buf_len == 1 && !(flags & I2C_M_RECV_LEN)) 430 cmd |= BIT(9); 431 432 if (need_restart) { 433 cmd |= BIT(10); 434 need_restart = false; 435 } 436 437 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { 438 439 /* Avoid rx buffer overrun */ 440 if (dev->rx_outstanding >= dev->rx_fifo_depth) 441 break; 442 443 regmap_write(dev->map, DW_IC_DATA_CMD, 444 cmd | 0x100); 445 rx_limit--; 446 dev->rx_outstanding++; 447 } else { 448 regmap_write(dev->map, DW_IC_DATA_CMD, 449 cmd | *buf++); 450 } 451 tx_limit--; buf_len--; 452 } 453 454 dev->tx_buf = buf; 455 dev->tx_buf_len = buf_len; 456 457 /* 458 * Because we don't know the buffer length in the 459 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop 460 * the transaction here. 461 */ 462 if (buf_len > 0 || flags & I2C_M_RECV_LEN) { 463 /* more bytes to be written */ 464 dev->status |= STATUS_WRITE_IN_PROGRESS; 465 break; 466 } else 467 dev->status &= ~STATUS_WRITE_IN_PROGRESS; 468 } 469 470 /* 471 * If i2c_msg index search is completed, we don't need TX_EMPTY 472 * interrupt any more. 473 */ 474 if (dev->msg_write_idx == dev->msgs_num) 475 intr_mask &= ~DW_IC_INTR_TX_EMPTY; 476 477 if (dev->msg_err) 478 intr_mask = 0; 479 480 regmap_write(dev->map, DW_IC_INTR_MASK, intr_mask); 481 } 482 483 static u8 484 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len) 485 { 486 struct i2c_msg *msgs = dev->msgs; 487 u32 flags = msgs[dev->msg_read_idx].flags; 488 489 /* 490 * Adjust the buffer length and mask the flag 491 * after receiving the first byte. 492 */ 493 len += (flags & I2C_CLIENT_PEC) ? 2 : 1; 494 dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding); 495 msgs[dev->msg_read_idx].len = len; 496 msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN; 497 498 return len; 499 } 500 501 static void 502 i2c_dw_read(struct dw_i2c_dev *dev) 503 { 504 struct i2c_msg *msgs = dev->msgs; 505 unsigned int rx_valid; 506 507 for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { 508 unsigned int tmp; 509 u32 len; 510 u8 *buf; 511 512 if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) 513 continue; 514 515 if (!(dev->status & STATUS_READ_IN_PROGRESS)) { 516 len = msgs[dev->msg_read_idx].len; 517 buf = msgs[dev->msg_read_idx].buf; 518 } else { 519 len = dev->rx_buf_len; 520 buf = dev->rx_buf; 521 } 522 523 regmap_read(dev->map, DW_IC_RXFLR, &rx_valid); 524 525 for (; len > 0 && rx_valid > 0; len--, rx_valid--) { 526 u32 flags = msgs[dev->msg_read_idx].flags; 527 528 regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); 529 /* Ensure length byte is a valid value */ 530 if (flags & I2C_M_RECV_LEN && 531 (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) { 532 len = i2c_dw_recv_len(dev, tmp); 533 } 534 *buf++ = tmp; 535 dev->rx_outstanding--; 536 } 537 538 if (len > 0) { 539 dev->status |= STATUS_READ_IN_PROGRESS; 540 dev->rx_buf_len = len; 541 dev->rx_buf = buf; 542 return; 543 } else 544 dev->status &= ~STATUS_READ_IN_PROGRESS; 545 } 546 } 547 548 /* 549 * Prepare controller for a transaction and call i2c_dw_xfer_msg. 550 */ 551 static int 552 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) 553 { 554 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 555 int ret; 556 557 dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); 558 559 pm_runtime_get_sync(dev->dev); 560 561 /* 562 * Initiate I2C message transfer when AMD NAVI GPU card is enabled, 563 * As it is polling based transfer mechanism, which does not support 564 * interrupt based functionalities of existing DesignWare driver. 565 */ 566 if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) { 567 ret = amd_i2c_dw_xfer_quirk(adap, msgs, num); 568 goto done_nolock; 569 } 570 571 reinit_completion(&dev->cmd_complete); 572 dev->msgs = msgs; 573 dev->msgs_num = num; 574 dev->cmd_err = 0; 575 dev->msg_write_idx = 0; 576 dev->msg_read_idx = 0; 577 dev->msg_err = 0; 578 dev->status = 0; 579 dev->abort_source = 0; 580 dev->rx_outstanding = 0; 581 582 ret = i2c_dw_acquire_lock(dev); 583 if (ret) 584 goto done_nolock; 585 586 ret = i2c_dw_wait_bus_not_busy(dev); 587 if (ret < 0) 588 goto done; 589 590 /* Start the transfers */ 591 i2c_dw_xfer_init(dev); 592 593 /* Wait for tx to complete */ 594 if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) { 595 dev_err(dev->dev, "controller timed out\n"); 596 /* i2c_dw_init implicitly disables the adapter */ 597 i2c_recover_bus(&dev->adapter); 598 i2c_dw_init_master(dev); 599 ret = -ETIMEDOUT; 600 goto done; 601 } 602 603 /* 604 * We must disable the adapter before returning and signaling the end 605 * of the current transfer. Otherwise the hardware might continue 606 * generating interrupts which in turn causes a race condition with 607 * the following transfer. Needs some more investigation if the 608 * additional interrupts are a hardware bug or this driver doesn't 609 * handle them correctly yet. 610 */ 611 __i2c_dw_disable_nowait(dev); 612 613 if (dev->msg_err) { 614 ret = dev->msg_err; 615 goto done; 616 } 617 618 /* No error */ 619 if (likely(!dev->cmd_err && !dev->status)) { 620 ret = num; 621 goto done; 622 } 623 624 /* We have an error */ 625 if (dev->cmd_err == DW_IC_ERR_TX_ABRT) { 626 ret = i2c_dw_handle_tx_abort(dev); 627 goto done; 628 } 629 630 if (dev->status) 631 dev_err(dev->dev, 632 "transfer terminated early - interrupt latency too high?\n"); 633 634 ret = -EIO; 635 636 done: 637 i2c_dw_release_lock(dev); 638 639 done_nolock: 640 pm_runtime_mark_last_busy(dev->dev); 641 pm_runtime_put_autosuspend(dev->dev); 642 643 return ret; 644 } 645 646 static const struct i2c_algorithm i2c_dw_algo = { 647 .master_xfer = i2c_dw_xfer, 648 .functionality = i2c_dw_func, 649 }; 650 651 static const struct i2c_adapter_quirks i2c_dw_quirks = { 652 .flags = I2C_AQ_NO_ZERO_LEN, 653 }; 654 655 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) 656 { 657 unsigned int stat, dummy; 658 659 /* 660 * The IC_INTR_STAT register just indicates "enabled" interrupts. 661 * The unmasked raw version of interrupt status bits is available 662 * in the IC_RAW_INTR_STAT register. 663 * 664 * That is, 665 * stat = readl(IC_INTR_STAT); 666 * equals to, 667 * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK); 668 * 669 * The raw version might be useful for debugging purposes. 670 */ 671 regmap_read(dev->map, DW_IC_INTR_STAT, &stat); 672 673 /* 674 * Do not use the IC_CLR_INTR register to clear interrupts, or 675 * you'll miss some interrupts, triggered during the period from 676 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR). 677 * 678 * Instead, use the separately-prepared IC_CLR_* registers. 679 */ 680 if (stat & DW_IC_INTR_RX_UNDER) 681 regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy); 682 if (stat & DW_IC_INTR_RX_OVER) 683 regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy); 684 if (stat & DW_IC_INTR_TX_OVER) 685 regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy); 686 if (stat & DW_IC_INTR_RD_REQ) 687 regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy); 688 if (stat & DW_IC_INTR_TX_ABRT) { 689 /* 690 * The IC_TX_ABRT_SOURCE register is cleared whenever 691 * the IC_CLR_TX_ABRT is read. Preserve it beforehand. 692 */ 693 regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source); 694 regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy); 695 } 696 if (stat & DW_IC_INTR_RX_DONE) 697 regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy); 698 if (stat & DW_IC_INTR_ACTIVITY) 699 regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy); 700 if ((stat & DW_IC_INTR_STOP_DET) && 701 ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL))) 702 regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy); 703 if (stat & DW_IC_INTR_START_DET) 704 regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy); 705 if (stat & DW_IC_INTR_GEN_CALL) 706 regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy); 707 708 return stat; 709 } 710 711 /* 712 * Interrupt service routine. This gets called whenever an I2C master interrupt 713 * occurs. 714 */ 715 static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) 716 { 717 struct dw_i2c_dev *dev = dev_id; 718 unsigned int stat, enabled; 719 720 regmap_read(dev->map, DW_IC_ENABLE, &enabled); 721 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); 722 if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) 723 return IRQ_NONE; 724 if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0)) 725 return IRQ_NONE; 726 dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat); 727 728 stat = i2c_dw_read_clear_intrbits(dev); 729 730 if (!(dev->status & STATUS_ACTIVE)) { 731 /* 732 * Unexpected interrupt in driver point of view. State 733 * variables are either unset or stale so acknowledge and 734 * disable interrupts for suppressing further interrupts if 735 * interrupt really came from this HW (E.g. firmware has left 736 * the HW active). 737 */ 738 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 739 return IRQ_HANDLED; 740 } 741 742 if (stat & DW_IC_INTR_TX_ABRT) { 743 dev->cmd_err |= DW_IC_ERR_TX_ABRT; 744 dev->status &= ~STATUS_MASK; 745 dev->rx_outstanding = 0; 746 747 /* 748 * Anytime TX_ABRT is set, the contents of the tx/rx 749 * buffers are flushed. Make sure to skip them. 750 */ 751 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 752 goto tx_aborted; 753 } 754 755 if (stat & DW_IC_INTR_RX_FULL) 756 i2c_dw_read(dev); 757 758 if (stat & DW_IC_INTR_TX_EMPTY) 759 i2c_dw_xfer_msg(dev); 760 761 /* 762 * No need to modify or disable the interrupt mask here. 763 * i2c_dw_xfer_msg() will take care of it according to 764 * the current transmit status. 765 */ 766 767 tx_aborted: 768 if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) && 769 (dev->rx_outstanding == 0)) 770 complete(&dev->cmd_complete); 771 else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { 772 /* Workaround to trigger pending interrupt */ 773 regmap_read(dev->map, DW_IC_INTR_MASK, &stat); 774 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 775 regmap_write(dev->map, DW_IC_INTR_MASK, stat); 776 } 777 778 return IRQ_HANDLED; 779 } 780 781 void i2c_dw_configure_master(struct dw_i2c_dev *dev) 782 { 783 struct i2c_timings *t = &dev->timings; 784 785 dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; 786 787 dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | 788 DW_IC_CON_RESTART_EN; 789 790 dev->mode = DW_IC_MASTER; 791 792 switch (t->bus_freq_hz) { 793 case I2C_MAX_STANDARD_MODE_FREQ: 794 dev->master_cfg |= DW_IC_CON_SPEED_STD; 795 break; 796 case I2C_MAX_HIGH_SPEED_MODE_FREQ: 797 dev->master_cfg |= DW_IC_CON_SPEED_HIGH; 798 break; 799 default: 800 dev->master_cfg |= DW_IC_CON_SPEED_FAST; 801 } 802 } 803 EXPORT_SYMBOL_GPL(i2c_dw_configure_master); 804 805 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap) 806 { 807 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 808 809 i2c_dw_disable(dev); 810 reset_control_assert(dev->rst); 811 i2c_dw_prepare_clk(dev, false); 812 } 813 814 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap) 815 { 816 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 817 818 i2c_dw_prepare_clk(dev, true); 819 reset_control_deassert(dev->rst); 820 i2c_dw_init_master(dev); 821 } 822 823 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) 824 { 825 struct i2c_bus_recovery_info *rinfo = &dev->rinfo; 826 struct i2c_adapter *adap = &dev->adapter; 827 struct gpio_desc *gpio; 828 829 gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH); 830 if (IS_ERR_OR_NULL(gpio)) 831 return PTR_ERR_OR_ZERO(gpio); 832 833 rinfo->scl_gpiod = gpio; 834 835 gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN); 836 if (IS_ERR(gpio)) 837 return PTR_ERR(gpio); 838 rinfo->sda_gpiod = gpio; 839 840 rinfo->recover_bus = i2c_generic_scl_recovery; 841 rinfo->prepare_recovery = i2c_dw_prepare_recovery; 842 rinfo->unprepare_recovery = i2c_dw_unprepare_recovery; 843 adap->bus_recovery_info = rinfo; 844 845 dev_info(dev->dev, "running with gpio recovery mode! scl%s", 846 rinfo->sda_gpiod ? ",sda" : ""); 847 848 return 0; 849 } 850 851 static int amd_i2c_adap_quirk(struct dw_i2c_dev *dev) 852 { 853 struct i2c_adapter *adap = &dev->adapter; 854 int ret; 855 856 pm_runtime_get_noresume(dev->dev); 857 ret = i2c_add_numbered_adapter(adap); 858 if (ret) 859 dev_err(dev->dev, "Failed to add adapter: %d\n", ret); 860 pm_runtime_put_noidle(dev->dev); 861 862 return ret; 863 } 864 865 int i2c_dw_probe_master(struct dw_i2c_dev *dev) 866 { 867 struct i2c_adapter *adap = &dev->adapter; 868 unsigned long irq_flags; 869 unsigned int ic_con; 870 int ret; 871 872 init_completion(&dev->cmd_complete); 873 874 dev->init = i2c_dw_init_master; 875 dev->disable = i2c_dw_disable; 876 877 ret = i2c_dw_init_regmap(dev); 878 if (ret) 879 return ret; 880 881 ret = i2c_dw_set_timings_master(dev); 882 if (ret) 883 return ret; 884 885 ret = i2c_dw_set_fifo_size(dev); 886 if (ret) 887 return ret; 888 889 /* Lock the bus for accessing DW_IC_CON */ 890 ret = i2c_dw_acquire_lock(dev); 891 if (ret) 892 return ret; 893 894 /* 895 * On AMD platforms BIOS advertises the bus clear feature 896 * and enables the SCL/SDA stuck low. SMU FW does the 897 * bus recovery process. Driver should not ignore this BIOS 898 * advertisement of bus clear feature. 899 */ 900 ret = regmap_read(dev->map, DW_IC_CON, &ic_con); 901 i2c_dw_release_lock(dev); 902 if (ret) 903 return ret; 904 905 if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL) 906 dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL; 907 908 ret = dev->init(dev); 909 if (ret) 910 return ret; 911 912 snprintf(adap->name, sizeof(adap->name), 913 "Synopsys DesignWare I2C adapter"); 914 adap->retries = 3; 915 adap->algo = &i2c_dw_algo; 916 adap->quirks = &i2c_dw_quirks; 917 adap->dev.parent = dev->dev; 918 i2c_set_adapdata(adap, dev); 919 920 if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) 921 return amd_i2c_adap_quirk(dev); 922 923 if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { 924 irq_flags = IRQF_NO_SUSPEND; 925 } else { 926 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; 927 } 928 929 ret = i2c_dw_acquire_lock(dev); 930 if (ret) 931 return ret; 932 933 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 934 i2c_dw_release_lock(dev); 935 936 ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags, 937 dev_name(dev->dev), dev); 938 if (ret) { 939 dev_err(dev->dev, "failure requesting irq %i: %d\n", 940 dev->irq, ret); 941 return ret; 942 } 943 944 ret = i2c_dw_init_recovery_info(dev); 945 if (ret) 946 return ret; 947 948 /* 949 * Increment PM usage count during adapter registration in order to 950 * avoid possible spurious runtime suspend when adapter device is 951 * registered to the device core and immediate resume in case bus has 952 * registered I2C slaves that do I2C transfers in their probe. 953 */ 954 pm_runtime_get_noresume(dev->dev); 955 ret = i2c_add_numbered_adapter(adap); 956 if (ret) 957 dev_err(dev->dev, "failure adding adapter: %d\n", ret); 958 pm_runtime_put_noidle(dev->dev); 959 960 return ret; 961 } 962 EXPORT_SYMBOL_GPL(i2c_dw_probe_master); 963 964 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter"); 965 MODULE_LICENSE("GPL"); 966