1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Synopsys DesignWare I2C adapter driver (master only).
4 *
5 * Based on the TI DAVINCI I2C adapter driver.
6 *
7 * Copyright (C) 2006 Texas Instruments.
8 * Copyright (C) 2007 MontaVista Software Inc.
9 * Copyright (C) 2009 Provigent Ltd.
10 */
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/i2c.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/reset.h>
24
25 #include "i2c-designware-core.h"
26
27 #define AMD_TIMEOUT_MIN_US 25
28 #define AMD_TIMEOUT_MAX_US 250
29 #define AMD_MASTERCFG_MASK GENMASK(15, 0)
30
i2c_dw_configure_fifo_master(struct dw_i2c_dev * dev)31 static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
32 {
33 /* Configure Tx/Rx FIFO threshold levels */
34 regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2);
35 regmap_write(dev->map, DW_IC_RX_TL, 0);
36
37 /* Configure the I2C master */
38 regmap_write(dev->map, DW_IC_CON, dev->master_cfg);
39 }
40
i2c_dw_set_timings_master(struct dw_i2c_dev * dev)41 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
42 {
43 unsigned int comp_param1;
44 u32 sda_falling_time, scl_falling_time;
45 struct i2c_timings *t = &dev->timings;
46 const char *fp_str = "";
47 u32 ic_clk;
48 int ret;
49
50 ret = i2c_dw_acquire_lock(dev);
51 if (ret)
52 return ret;
53
54 ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
55 i2c_dw_release_lock(dev);
56 if (ret)
57 return ret;
58
59 /* Set standard and fast speed dividers for high/low periods */
60 sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
61 scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
62
63 /* Calculate SCL timing parameters for standard mode if not set */
64 if (!dev->ss_hcnt || !dev->ss_lcnt) {
65 ic_clk = i2c_dw_clk_rate(dev);
66 dev->ss_hcnt =
67 i2c_dw_scl_hcnt(ic_clk,
68 4000, /* tHD;STA = tHIGH = 4.0 us */
69 sda_falling_time,
70 0, /* 0: DW default, 1: Ideal */
71 0); /* No offset */
72 dev->ss_lcnt =
73 i2c_dw_scl_lcnt(ic_clk,
74 4700, /* tLOW = 4.7 us */
75 scl_falling_time,
76 0); /* No offset */
77 }
78 dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
79 dev->ss_hcnt, dev->ss_lcnt);
80
81 /*
82 * Set SCL timing parameters for fast mode or fast mode plus. Only
83 * difference is the timing parameter values since the registers are
84 * the same.
85 */
86 if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
87 /*
88 * Check are Fast Mode Plus parameters available. Calculate
89 * SCL timing parameters for Fast Mode Plus if not set.
90 */
91 if (dev->fp_hcnt && dev->fp_lcnt) {
92 dev->fs_hcnt = dev->fp_hcnt;
93 dev->fs_lcnt = dev->fp_lcnt;
94 } else {
95 ic_clk = i2c_dw_clk_rate(dev);
96 dev->fs_hcnt =
97 i2c_dw_scl_hcnt(ic_clk,
98 260, /* tHIGH = 260 ns */
99 sda_falling_time,
100 0, /* DW default */
101 0); /* No offset */
102 dev->fs_lcnt =
103 i2c_dw_scl_lcnt(ic_clk,
104 500, /* tLOW = 500 ns */
105 scl_falling_time,
106 0); /* No offset */
107 }
108 fp_str = " Plus";
109 }
110 /*
111 * Calculate SCL timing parameters for fast mode if not set. They are
112 * needed also in high speed mode.
113 */
114 if (!dev->fs_hcnt || !dev->fs_lcnt) {
115 ic_clk = i2c_dw_clk_rate(dev);
116 dev->fs_hcnt =
117 i2c_dw_scl_hcnt(ic_clk,
118 600, /* tHD;STA = tHIGH = 0.6 us */
119 sda_falling_time,
120 0, /* 0: DW default, 1: Ideal */
121 0); /* No offset */
122 dev->fs_lcnt =
123 i2c_dw_scl_lcnt(ic_clk,
124 1300, /* tLOW = 1.3 us */
125 scl_falling_time,
126 0); /* No offset */
127 }
128 dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
129 fp_str, dev->fs_hcnt, dev->fs_lcnt);
130
131 /* Check is high speed possible and fall back to fast mode if not */
132 if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
133 DW_IC_CON_SPEED_HIGH) {
134 if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
135 != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
136 dev_err(dev->dev, "High Speed not supported!\n");
137 t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
138 dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
139 dev->master_cfg |= DW_IC_CON_SPEED_FAST;
140 dev->hs_hcnt = 0;
141 dev->hs_lcnt = 0;
142 } else if (!dev->hs_hcnt || !dev->hs_lcnt) {
143 ic_clk = i2c_dw_clk_rate(dev);
144 dev->hs_hcnt =
145 i2c_dw_scl_hcnt(ic_clk,
146 160, /* tHIGH = 160 ns */
147 sda_falling_time,
148 0, /* DW default */
149 0); /* No offset */
150 dev->hs_lcnt =
151 i2c_dw_scl_lcnt(ic_clk,
152 320, /* tLOW = 320 ns */
153 scl_falling_time,
154 0); /* No offset */
155 }
156 dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
157 dev->hs_hcnt, dev->hs_lcnt);
158 }
159
160 ret = i2c_dw_set_sda_hold(dev);
161 if (ret)
162 return ret;
163
164 dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
165 return 0;
166 }
167
168 /**
169 * i2c_dw_init_master() - Initialize the designware I2C master hardware
170 * @dev: device private data
171 *
172 * This functions configures and enables the I2C master.
173 * This function is called during I2C init function, and in case of timeout at
174 * run time.
175 */
i2c_dw_init_master(struct dw_i2c_dev * dev)176 static int i2c_dw_init_master(struct dw_i2c_dev *dev)
177 {
178 int ret;
179
180 ret = i2c_dw_acquire_lock(dev);
181 if (ret)
182 return ret;
183
184 /* Disable the adapter */
185 __i2c_dw_disable(dev);
186
187 /* Write standard speed timing parameters */
188 regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
189 regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
190
191 /* Write fast mode/fast mode plus timing parameters */
192 regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt);
193 regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt);
194
195 /* Write high speed timing parameters if supported */
196 if (dev->hs_hcnt && dev->hs_lcnt) {
197 regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt);
198 regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt);
199 }
200
201 /* Write SDA hold time if supported */
202 if (dev->sda_hold_time)
203 regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time);
204
205 i2c_dw_configure_fifo_master(dev);
206 i2c_dw_release_lock(dev);
207
208 return 0;
209 }
210
i2c_dw_xfer_init(struct dw_i2c_dev * dev)211 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
212 {
213 struct i2c_msg *msgs = dev->msgs;
214 u32 ic_con = 0, ic_tar = 0;
215 unsigned int dummy;
216
217 /* Disable the adapter */
218 __i2c_dw_disable(dev);
219
220 /* If the slave address is ten bit address, enable 10BITADDR */
221 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
222 ic_con = DW_IC_CON_10BITADDR_MASTER;
223 /*
224 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
225 * mode has to be enabled via bit 12 of IC_TAR register.
226 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
227 * detected from registers.
228 */
229 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
230 }
231
232 regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
233 ic_con);
234
235 /*
236 * Set the slave (target) address and enable 10-bit addressing mode
237 * if applicable.
238 */
239 regmap_write(dev->map, DW_IC_TAR,
240 msgs[dev->msg_write_idx].addr | ic_tar);
241
242 /* Enforce disabled interrupts (due to HW issues) */
243 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
244
245 /* Enable the adapter */
246 __i2c_dw_enable(dev);
247
248 /* Dummy read to avoid the register getting stuck on Bay Trail */
249 regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
250
251 /* Clear and enable interrupts */
252 regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
253 regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK);
254 }
255
256 /*
257 * This function waits for the controller to be idle before disabling I2C
258 * When the controller is not in the IDLE state, the MST_ACTIVITY bit
259 * (IC_STATUS[5]) is set.
260 *
261 * Values:
262 * 0x1 (ACTIVE): Controller not idle
263 * 0x0 (IDLE): Controller is idle
264 *
265 * The function is called after completing the current transfer.
266 *
267 * Returns:
268 * False when the controller is in the IDLE state.
269 * True when the controller is in the ACTIVE state.
270 */
i2c_dw_is_controller_active(struct dw_i2c_dev * dev)271 static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev)
272 {
273 u32 status;
274
275 regmap_read(dev->map, DW_IC_STATUS, &status);
276 if (!(status & DW_IC_STATUS_MASTER_ACTIVITY))
277 return false;
278
279 return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
280 !(status & DW_IC_STATUS_MASTER_ACTIVITY),
281 1100, 20000) != 0;
282 }
283
i2c_dw_check_stopbit(struct dw_i2c_dev * dev)284 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
285 {
286 u32 val;
287 int ret;
288
289 ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
290 !(val & DW_IC_INTR_STOP_DET),
291 1100, 20000);
292 if (ret)
293 dev_err(dev->dev, "i2c timeout error %d\n", ret);
294
295 return ret;
296 }
297
i2c_dw_status(struct dw_i2c_dev * dev)298 static int i2c_dw_status(struct dw_i2c_dev *dev)
299 {
300 int status;
301
302 status = i2c_dw_wait_bus_not_busy(dev);
303 if (status)
304 return status;
305
306 return i2c_dw_check_stopbit(dev);
307 }
308
309 /*
310 * Initiate and continue master read/write transaction with polling
311 * based transfer routine afterward write messages into the Tx buffer.
312 */
amd_i2c_dw_xfer_quirk(struct i2c_adapter * adap,struct i2c_msg * msgs,int num_msgs)313 static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
314 {
315 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
316 int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
317 int cmd = 0, status;
318 u8 *tx_buf;
319 unsigned int val;
320
321 /*
322 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
323 * it is mandatory to set the right value in specific register
324 * (offset:0x474) as per the hardware IP specification.
325 */
326 regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
327
328 dev->msgs = msgs;
329 dev->msgs_num = num_msgs;
330 i2c_dw_xfer_init(dev);
331 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
332
333 /* Initiate messages read/write transaction */
334 for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
335 tx_buf = msgs[msg_wrt_idx].buf;
336 buf_len = msgs[msg_wrt_idx].len;
337
338 if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
339 regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
340 /*
341 * Initiate the i2c read/write transaction of buffer length,
342 * and poll for bus busy status. For the last message transfer,
343 * update the command with stopbit enable.
344 */
345 for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
346 if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
347 cmd |= BIT(9);
348
349 if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
350 /* Due to hardware bug, need to write the same command twice. */
351 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
352 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
353 if (cmd) {
354 regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
355 regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
356 /*
357 * Need to check the stop bit. However, it cannot be
358 * detected from the registers so we check it always
359 * when read/write the last byte.
360 */
361 status = i2c_dw_status(dev);
362 if (status)
363 return status;
364
365 for (data_idx = 0; data_idx < buf_len; data_idx++) {
366 regmap_read(dev->map, DW_IC_DATA_CMD, &val);
367 tx_buf[data_idx] = val;
368 }
369 status = i2c_dw_check_stopbit(dev);
370 if (status)
371 return status;
372 }
373 } else {
374 regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
375 usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
376 }
377 }
378 status = i2c_dw_check_stopbit(dev);
379 if (status)
380 return status;
381 }
382
383 return 0;
384 }
385
i2c_dw_poll_tx_empty(struct dw_i2c_dev * dev)386 static int i2c_dw_poll_tx_empty(struct dw_i2c_dev *dev)
387 {
388 u32 val;
389
390 return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
391 val & DW_IC_INTR_TX_EMPTY,
392 100, 1000);
393 }
394
i2c_dw_poll_rx_full(struct dw_i2c_dev * dev)395 static int i2c_dw_poll_rx_full(struct dw_i2c_dev *dev)
396 {
397 u32 val;
398
399 return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
400 val & DW_IC_INTR_RX_FULL,
401 100, 1000);
402 }
403
txgbe_i2c_dw_xfer_quirk(struct i2c_adapter * adap,struct i2c_msg * msgs,int num_msgs)404 static int txgbe_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
405 int num_msgs)
406 {
407 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
408 int msg_idx, buf_len, data_idx, ret;
409 unsigned int val, stop = 0;
410 u8 *buf;
411
412 dev->msgs = msgs;
413 dev->msgs_num = num_msgs;
414 i2c_dw_xfer_init(dev);
415 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
416
417 for (msg_idx = 0; msg_idx < num_msgs; msg_idx++) {
418 buf = msgs[msg_idx].buf;
419 buf_len = msgs[msg_idx].len;
420
421 for (data_idx = 0; data_idx < buf_len; data_idx++) {
422 if (msg_idx == num_msgs - 1 && data_idx == buf_len - 1)
423 stop |= BIT(9);
424
425 if (msgs[msg_idx].flags & I2C_M_RD) {
426 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | stop);
427
428 ret = i2c_dw_poll_rx_full(dev);
429 if (ret)
430 return ret;
431
432 regmap_read(dev->map, DW_IC_DATA_CMD, &val);
433 buf[data_idx] = val;
434 } else {
435 ret = i2c_dw_poll_tx_empty(dev);
436 if (ret)
437 return ret;
438
439 regmap_write(dev->map, DW_IC_DATA_CMD,
440 buf[data_idx] | stop);
441 }
442 }
443 }
444
445 return num_msgs;
446 }
447
448 /*
449 * Initiate (and continue) low level master read/write transaction.
450 * This function is only called from i2c_dw_isr, and pumping i2c_msg
451 * messages into the tx buffer. Even if the size of i2c_msg data is
452 * longer than the size of the tx buffer, it handles everything.
453 */
454 static void
i2c_dw_xfer_msg(struct dw_i2c_dev * dev)455 i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
456 {
457 struct i2c_msg *msgs = dev->msgs;
458 u32 intr_mask;
459 int tx_limit, rx_limit;
460 u32 addr = msgs[dev->msg_write_idx].addr;
461 u32 buf_len = dev->tx_buf_len;
462 u8 *buf = dev->tx_buf;
463 bool need_restart = false;
464 unsigned int flr;
465
466 intr_mask = DW_IC_INTR_MASTER_MASK;
467
468 for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
469 u32 flags = msgs[dev->msg_write_idx].flags;
470
471 /*
472 * If target address has changed, we need to
473 * reprogram the target address in the I2C
474 * adapter when we are done with this transfer.
475 */
476 if (msgs[dev->msg_write_idx].addr != addr) {
477 dev_err(dev->dev,
478 "%s: invalid target address\n", __func__);
479 dev->msg_err = -EINVAL;
480 break;
481 }
482
483 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
484 /* new i2c_msg */
485 buf = msgs[dev->msg_write_idx].buf;
486 buf_len = msgs[dev->msg_write_idx].len;
487
488 /* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
489 * IC_RESTART_EN are set, we must manually
490 * set restart bit between messages.
491 */
492 if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
493 (dev->msg_write_idx > 0))
494 need_restart = true;
495 }
496
497 regmap_read(dev->map, DW_IC_TXFLR, &flr);
498 tx_limit = dev->tx_fifo_depth - flr;
499
500 regmap_read(dev->map, DW_IC_RXFLR, &flr);
501 rx_limit = dev->rx_fifo_depth - flr;
502
503 while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
504 u32 cmd = 0;
505
506 /*
507 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
508 * manually set the stop bit. However, it cannot be
509 * detected from the registers so we set it always
510 * when writing/reading the last byte.
511 */
512
513 /*
514 * i2c-core always sets the buffer length of
515 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
516 * be adjusted when receiving the first byte.
517 * Thus we can't stop the transaction here.
518 */
519 if (dev->msg_write_idx == dev->msgs_num - 1 &&
520 buf_len == 1 && !(flags & I2C_M_RECV_LEN))
521 cmd |= BIT(9);
522
523 if (need_restart) {
524 cmd |= BIT(10);
525 need_restart = false;
526 }
527
528 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
529
530 /* Avoid rx buffer overrun */
531 if (dev->rx_outstanding >= dev->rx_fifo_depth)
532 break;
533
534 regmap_write(dev->map, DW_IC_DATA_CMD,
535 cmd | 0x100);
536 rx_limit--;
537 dev->rx_outstanding++;
538 } else {
539 regmap_write(dev->map, DW_IC_DATA_CMD,
540 cmd | *buf++);
541 }
542 tx_limit--; buf_len--;
543 }
544
545 dev->tx_buf = buf;
546 dev->tx_buf_len = buf_len;
547
548 /*
549 * Because we don't know the buffer length in the
550 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
551 * transaction here. Also disable the TX_EMPTY IRQ
552 * while waiting for the data length byte to avoid the
553 * bogus interrupts flood.
554 */
555 if (flags & I2C_M_RECV_LEN) {
556 dev->status |= STATUS_WRITE_IN_PROGRESS;
557 intr_mask &= ~DW_IC_INTR_TX_EMPTY;
558 break;
559 } else if (buf_len > 0) {
560 /* more bytes to be written */
561 dev->status |= STATUS_WRITE_IN_PROGRESS;
562 break;
563 } else
564 dev->status &= ~STATUS_WRITE_IN_PROGRESS;
565 }
566
567 /*
568 * If i2c_msg index search is completed, we don't need TX_EMPTY
569 * interrupt any more.
570 */
571 if (dev->msg_write_idx == dev->msgs_num)
572 intr_mask &= ~DW_IC_INTR_TX_EMPTY;
573
574 if (dev->msg_err)
575 intr_mask = 0;
576
577 regmap_write(dev->map, DW_IC_INTR_MASK, intr_mask);
578 }
579
580 static u8
i2c_dw_recv_len(struct dw_i2c_dev * dev,u8 len)581 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
582 {
583 struct i2c_msg *msgs = dev->msgs;
584 u32 flags = msgs[dev->msg_read_idx].flags;
585
586 /*
587 * Adjust the buffer length and mask the flag
588 * after receiving the first byte.
589 */
590 len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
591 dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
592 msgs[dev->msg_read_idx].len = len;
593 msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
594
595 /*
596 * Received buffer length, re-enable TX_EMPTY interrupt
597 * to resume the SMBUS transaction.
598 */
599 regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
600 DW_IC_INTR_TX_EMPTY);
601
602 return len;
603 }
604
605 static void
i2c_dw_read(struct dw_i2c_dev * dev)606 i2c_dw_read(struct dw_i2c_dev *dev)
607 {
608 struct i2c_msg *msgs = dev->msgs;
609 unsigned int rx_valid;
610
611 for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
612 unsigned int tmp;
613 u32 len;
614 u8 *buf;
615
616 if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
617 continue;
618
619 if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
620 len = msgs[dev->msg_read_idx].len;
621 buf = msgs[dev->msg_read_idx].buf;
622 } else {
623 len = dev->rx_buf_len;
624 buf = dev->rx_buf;
625 }
626
627 regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
628
629 for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
630 u32 flags = msgs[dev->msg_read_idx].flags;
631
632 regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
633 tmp &= DW_IC_DATA_CMD_DAT;
634 /* Ensure length byte is a valid value */
635 if (flags & I2C_M_RECV_LEN) {
636 /*
637 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
638 * detected from the registers, the controller can be
639 * disabled if the STOP bit is set. But it is only set
640 * after receiving block data response length in
641 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
642 * another byte with STOP bit set when the block data
643 * response length is invalid to complete the transaction.
644 */
645 if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
646 tmp = 1;
647
648 len = i2c_dw_recv_len(dev, tmp);
649 }
650 *buf++ = tmp;
651 dev->rx_outstanding--;
652 }
653
654 if (len > 0) {
655 dev->status |= STATUS_READ_IN_PROGRESS;
656 dev->rx_buf_len = len;
657 dev->rx_buf = buf;
658 return;
659 } else
660 dev->status &= ~STATUS_READ_IN_PROGRESS;
661 }
662 }
663
664 /*
665 * Prepare controller for a transaction and call i2c_dw_xfer_msg.
666 */
667 static int
i2c_dw_xfer(struct i2c_adapter * adap,struct i2c_msg msgs[],int num)668 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
669 {
670 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
671 int ret;
672
673 dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
674
675 pm_runtime_get_sync(dev->dev);
676
677 /*
678 * Initiate I2C message transfer when polling mode is enabled,
679 * As it is polling based transfer mechanism, which does not support
680 * interrupt based functionalities of existing DesignWare driver.
681 */
682 switch (dev->flags & MODEL_MASK) {
683 case MODEL_AMD_NAVI_GPU:
684 ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
685 goto done_nolock;
686 case MODEL_WANGXUN_SP:
687 ret = txgbe_i2c_dw_xfer_quirk(adap, msgs, num);
688 goto done_nolock;
689 default:
690 break;
691 }
692
693 reinit_completion(&dev->cmd_complete);
694 dev->msgs = msgs;
695 dev->msgs_num = num;
696 dev->cmd_err = 0;
697 dev->msg_write_idx = 0;
698 dev->msg_read_idx = 0;
699 dev->msg_err = 0;
700 dev->status = 0;
701 dev->abort_source = 0;
702 dev->rx_outstanding = 0;
703
704 ret = i2c_dw_acquire_lock(dev);
705 if (ret)
706 goto done_nolock;
707
708 ret = i2c_dw_wait_bus_not_busy(dev);
709 if (ret < 0)
710 goto done;
711
712 /* Start the transfers */
713 i2c_dw_xfer_init(dev);
714
715 /* Wait for tx to complete */
716 if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) {
717 dev_err(dev->dev, "controller timed out\n");
718 /* i2c_dw_init implicitly disables the adapter */
719 i2c_recover_bus(&dev->adapter);
720 i2c_dw_init_master(dev);
721 ret = -ETIMEDOUT;
722 goto done;
723 }
724
725 /*
726 * This happens rarely (~1:500) and is hard to reproduce. Debug trace
727 * showed that IC_STATUS had value of 0x23 when STOP_DET occurred,
728 * if disable IC_ENABLE.ENABLE immediately that can result in
729 * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if
730 * controller is still ACTIVE before disabling I2C.
731 */
732 if (i2c_dw_is_controller_active(dev))
733 dev_err(dev->dev, "controller active\n");
734
735 /*
736 * We must disable the adapter before returning and signaling the end
737 * of the current transfer. Otherwise the hardware might continue
738 * generating interrupts which in turn causes a race condition with
739 * the following transfer. Needs some more investigation if the
740 * additional interrupts are a hardware bug or this driver doesn't
741 * handle them correctly yet.
742 */
743 __i2c_dw_disable_nowait(dev);
744
745 if (dev->msg_err) {
746 ret = dev->msg_err;
747 goto done;
748 }
749
750 /* No error */
751 if (likely(!dev->cmd_err && !dev->status)) {
752 ret = num;
753 goto done;
754 }
755
756 /* We have an error */
757 if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
758 ret = i2c_dw_handle_tx_abort(dev);
759 goto done;
760 }
761
762 if (dev->status)
763 dev_err(dev->dev,
764 "transfer terminated early - interrupt latency too high?\n");
765
766 ret = -EIO;
767
768 done:
769 i2c_dw_release_lock(dev);
770
771 done_nolock:
772 pm_runtime_mark_last_busy(dev->dev);
773 pm_runtime_put_autosuspend(dev->dev);
774
775 return ret;
776 }
777
778 static const struct i2c_algorithm i2c_dw_algo = {
779 .master_xfer = i2c_dw_xfer,
780 .functionality = i2c_dw_func,
781 };
782
783 static const struct i2c_adapter_quirks i2c_dw_quirks = {
784 .flags = I2C_AQ_NO_ZERO_LEN,
785 };
786
i2c_dw_read_clear_intrbits(struct dw_i2c_dev * dev)787 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
788 {
789 unsigned int stat, dummy;
790
791 /*
792 * The IC_INTR_STAT register just indicates "enabled" interrupts.
793 * The unmasked raw version of interrupt status bits is available
794 * in the IC_RAW_INTR_STAT register.
795 *
796 * That is,
797 * stat = readl(IC_INTR_STAT);
798 * equals to,
799 * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
800 *
801 * The raw version might be useful for debugging purposes.
802 */
803 regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
804
805 /*
806 * Do not use the IC_CLR_INTR register to clear interrupts, or
807 * you'll miss some interrupts, triggered during the period from
808 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
809 *
810 * Instead, use the separately-prepared IC_CLR_* registers.
811 */
812 if (stat & DW_IC_INTR_RX_UNDER)
813 regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
814 if (stat & DW_IC_INTR_RX_OVER)
815 regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
816 if (stat & DW_IC_INTR_TX_OVER)
817 regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
818 if (stat & DW_IC_INTR_RD_REQ)
819 regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
820 if (stat & DW_IC_INTR_TX_ABRT) {
821 /*
822 * The IC_TX_ABRT_SOURCE register is cleared whenever
823 * the IC_CLR_TX_ABRT is read. Preserve it beforehand.
824 */
825 regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
826 regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
827 }
828 if (stat & DW_IC_INTR_RX_DONE)
829 regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
830 if (stat & DW_IC_INTR_ACTIVITY)
831 regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
832 if ((stat & DW_IC_INTR_STOP_DET) &&
833 ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
834 regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
835 if (stat & DW_IC_INTR_START_DET)
836 regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
837 if (stat & DW_IC_INTR_GEN_CALL)
838 regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
839
840 return stat;
841 }
842
843 /*
844 * Interrupt service routine. This gets called whenever an I2C master interrupt
845 * occurs.
846 */
i2c_dw_isr(int this_irq,void * dev_id)847 static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
848 {
849 struct dw_i2c_dev *dev = dev_id;
850 unsigned int stat, enabled;
851
852 regmap_read(dev->map, DW_IC_ENABLE, &enabled);
853 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
854 if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
855 return IRQ_NONE;
856 if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
857 return IRQ_NONE;
858 dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
859
860 stat = i2c_dw_read_clear_intrbits(dev);
861
862 if (!(dev->status & STATUS_ACTIVE)) {
863 /*
864 * Unexpected interrupt in driver point of view. State
865 * variables are either unset or stale so acknowledge and
866 * disable interrupts for suppressing further interrupts if
867 * interrupt really came from this HW (E.g. firmware has left
868 * the HW active).
869 */
870 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
871 return IRQ_HANDLED;
872 }
873
874 if (stat & DW_IC_INTR_TX_ABRT) {
875 dev->cmd_err |= DW_IC_ERR_TX_ABRT;
876 dev->status &= ~STATUS_MASK;
877 dev->rx_outstanding = 0;
878
879 /*
880 * Anytime TX_ABRT is set, the contents of the tx/rx
881 * buffers are flushed. Make sure to skip them.
882 */
883 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
884 goto tx_aborted;
885 }
886
887 if (stat & DW_IC_INTR_RX_FULL)
888 i2c_dw_read(dev);
889
890 if (stat & DW_IC_INTR_TX_EMPTY)
891 i2c_dw_xfer_msg(dev);
892
893 /*
894 * No need to modify or disable the interrupt mask here.
895 * i2c_dw_xfer_msg() will take care of it according to
896 * the current transmit status.
897 */
898
899 tx_aborted:
900 if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
901 (dev->rx_outstanding == 0))
902 complete(&dev->cmd_complete);
903 else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
904 /* Workaround to trigger pending interrupt */
905 regmap_read(dev->map, DW_IC_INTR_MASK, &stat);
906 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
907 regmap_write(dev->map, DW_IC_INTR_MASK, stat);
908 }
909
910 return IRQ_HANDLED;
911 }
912
i2c_dw_configure_master(struct dw_i2c_dev * dev)913 void i2c_dw_configure_master(struct dw_i2c_dev *dev)
914 {
915 struct i2c_timings *t = &dev->timings;
916
917 dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
918
919 dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
920 DW_IC_CON_RESTART_EN;
921
922 dev->mode = DW_IC_MASTER;
923
924 switch (t->bus_freq_hz) {
925 case I2C_MAX_STANDARD_MODE_FREQ:
926 dev->master_cfg |= DW_IC_CON_SPEED_STD;
927 break;
928 case I2C_MAX_HIGH_SPEED_MODE_FREQ:
929 dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
930 break;
931 default:
932 dev->master_cfg |= DW_IC_CON_SPEED_FAST;
933 }
934 }
935 EXPORT_SYMBOL_GPL(i2c_dw_configure_master);
936
i2c_dw_prepare_recovery(struct i2c_adapter * adap)937 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
938 {
939 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
940
941 i2c_dw_disable(dev);
942 reset_control_assert(dev->rst);
943 i2c_dw_prepare_clk(dev, false);
944 }
945
i2c_dw_unprepare_recovery(struct i2c_adapter * adap)946 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
947 {
948 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
949
950 i2c_dw_prepare_clk(dev, true);
951 reset_control_deassert(dev->rst);
952 i2c_dw_init_master(dev);
953 }
954
i2c_dw_init_recovery_info(struct dw_i2c_dev * dev)955 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
956 {
957 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
958 struct i2c_adapter *adap = &dev->adapter;
959 struct gpio_desc *gpio;
960
961 gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
962 if (IS_ERR_OR_NULL(gpio))
963 return PTR_ERR_OR_ZERO(gpio);
964
965 rinfo->scl_gpiod = gpio;
966
967 gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
968 if (IS_ERR(gpio))
969 return PTR_ERR(gpio);
970 rinfo->sda_gpiod = gpio;
971
972 rinfo->pinctrl = devm_pinctrl_get(dev->dev);
973 if (IS_ERR(rinfo->pinctrl)) {
974 if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
975 return PTR_ERR(rinfo->pinctrl);
976
977 rinfo->pinctrl = NULL;
978 dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n");
979 } else if (!rinfo->pinctrl) {
980 dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n");
981 }
982
983 rinfo->recover_bus = i2c_generic_scl_recovery;
984 rinfo->prepare_recovery = i2c_dw_prepare_recovery;
985 rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
986 adap->bus_recovery_info = rinfo;
987
988 dev_info(dev->dev, "running with gpio recovery mode! scl%s",
989 rinfo->sda_gpiod ? ",sda" : "");
990
991 return 0;
992 }
993
i2c_dw_poll_adap_quirk(struct dw_i2c_dev * dev)994 static int i2c_dw_poll_adap_quirk(struct dw_i2c_dev *dev)
995 {
996 struct i2c_adapter *adap = &dev->adapter;
997 int ret;
998
999 pm_runtime_get_noresume(dev->dev);
1000 ret = i2c_add_numbered_adapter(adap);
1001 if (ret)
1002 dev_err(dev->dev, "Failed to add adapter: %d\n", ret);
1003 pm_runtime_put_noidle(dev->dev);
1004
1005 return ret;
1006 }
1007
i2c_dw_is_model_poll(struct dw_i2c_dev * dev)1008 static bool i2c_dw_is_model_poll(struct dw_i2c_dev *dev)
1009 {
1010 switch (dev->flags & MODEL_MASK) {
1011 case MODEL_AMD_NAVI_GPU:
1012 case MODEL_WANGXUN_SP:
1013 return true;
1014 default:
1015 return false;
1016 }
1017 }
1018
i2c_dw_probe_master(struct dw_i2c_dev * dev)1019 int i2c_dw_probe_master(struct dw_i2c_dev *dev)
1020 {
1021 struct i2c_adapter *adap = &dev->adapter;
1022 unsigned long irq_flags;
1023 unsigned int ic_con;
1024 int ret;
1025
1026 init_completion(&dev->cmd_complete);
1027
1028 dev->init = i2c_dw_init_master;
1029 dev->disable = i2c_dw_disable;
1030
1031 ret = i2c_dw_init_regmap(dev);
1032 if (ret)
1033 return ret;
1034
1035 ret = i2c_dw_set_timings_master(dev);
1036 if (ret)
1037 return ret;
1038
1039 ret = i2c_dw_set_fifo_size(dev);
1040 if (ret)
1041 return ret;
1042
1043 /* Lock the bus for accessing DW_IC_CON */
1044 ret = i2c_dw_acquire_lock(dev);
1045 if (ret)
1046 return ret;
1047
1048 /*
1049 * On AMD platforms BIOS advertises the bus clear feature
1050 * and enables the SCL/SDA stuck low. SMU FW does the
1051 * bus recovery process. Driver should not ignore this BIOS
1052 * advertisement of bus clear feature.
1053 */
1054 ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
1055 i2c_dw_release_lock(dev);
1056 if (ret)
1057 return ret;
1058
1059 if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL)
1060 dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL;
1061
1062 ret = dev->init(dev);
1063 if (ret)
1064 return ret;
1065
1066 snprintf(adap->name, sizeof(adap->name),
1067 "Synopsys DesignWare I2C adapter");
1068 adap->retries = 3;
1069 adap->algo = &i2c_dw_algo;
1070 adap->quirks = &i2c_dw_quirks;
1071 adap->dev.parent = dev->dev;
1072 i2c_set_adapdata(adap, dev);
1073
1074 if (i2c_dw_is_model_poll(dev))
1075 return i2c_dw_poll_adap_quirk(dev);
1076
1077 if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
1078 irq_flags = IRQF_NO_SUSPEND;
1079 } else {
1080 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
1081 }
1082
1083 ret = i2c_dw_acquire_lock(dev);
1084 if (ret)
1085 return ret;
1086
1087 regmap_write(dev->map, DW_IC_INTR_MASK, 0);
1088 i2c_dw_release_lock(dev);
1089
1090 ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags,
1091 dev_name(dev->dev), dev);
1092 if (ret) {
1093 dev_err(dev->dev, "failure requesting irq %i: %d\n",
1094 dev->irq, ret);
1095 return ret;
1096 }
1097
1098 ret = i2c_dw_init_recovery_info(dev);
1099 if (ret)
1100 return ret;
1101
1102 /*
1103 * Increment PM usage count during adapter registration in order to
1104 * avoid possible spurious runtime suspend when adapter device is
1105 * registered to the device core and immediate resume in case bus has
1106 * registered I2C slaves that do I2C transfers in their probe.
1107 */
1108 pm_runtime_get_noresume(dev->dev);
1109 ret = i2c_add_numbered_adapter(adap);
1110 if (ret)
1111 dev_err(dev->dev, "failure adding adapter: %d\n", ret);
1112 pm_runtime_put_noidle(dev->dev);
1113
1114 return ret;
1115 }
1116 EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
1117
1118 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
1119 MODULE_LICENSE("GPL");
1120