1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synopsys DesignWare I2C adapter driver (master only).
4  *
5  * Based on the TI DAVINCI I2C adapter driver.
6  *
7  * Copyright (C) 2006 Texas Instruments.
8  * Copyright (C) 2007 MontaVista Software Inc.
9  * Copyright (C) 2009 Provigent Ltd.
10  */
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/i2c.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/reset.h>
24 
25 #include "i2c-designware-core.h"
26 
27 #define AMD_TIMEOUT_MIN_US	25
28 #define AMD_TIMEOUT_MAX_US	250
29 #define AMD_MASTERCFG_MASK	GENMASK(15, 0)
30 
i2c_dw_configure_fifo_master(struct dw_i2c_dev * dev)31 static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
32 {
33 	/* Configure Tx/Rx FIFO threshold levels */
34 	regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2);
35 	regmap_write(dev->map, DW_IC_RX_TL, 0);
36 
37 	/* Configure the I2C master */
38 	regmap_write(dev->map, DW_IC_CON, dev->master_cfg);
39 }
40 
i2c_dw_set_timings_master(struct dw_i2c_dev * dev)41 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
42 {
43 	unsigned int comp_param1;
44 	u32 sda_falling_time, scl_falling_time;
45 	struct i2c_timings *t = &dev->timings;
46 	const char *fp_str = "";
47 	u32 ic_clk;
48 	int ret;
49 
50 	ret = i2c_dw_acquire_lock(dev);
51 	if (ret)
52 		return ret;
53 
54 	ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
55 	i2c_dw_release_lock(dev);
56 	if (ret)
57 		return ret;
58 
59 	/* Set standard and fast speed dividers for high/low periods */
60 	sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
61 	scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
62 
63 	/* Calculate SCL timing parameters for standard mode if not set */
64 	if (!dev->ss_hcnt || !dev->ss_lcnt) {
65 		ic_clk = i2c_dw_clk_rate(dev);
66 		dev->ss_hcnt =
67 			i2c_dw_scl_hcnt(ic_clk,
68 					4000,	/* tHD;STA = tHIGH = 4.0 us */
69 					sda_falling_time,
70 					0,	/* 0: DW default, 1: Ideal */
71 					0);	/* No offset */
72 		dev->ss_lcnt =
73 			i2c_dw_scl_lcnt(ic_clk,
74 					4700,	/* tLOW = 4.7 us */
75 					scl_falling_time,
76 					0);	/* No offset */
77 	}
78 	dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
79 		dev->ss_hcnt, dev->ss_lcnt);
80 
81 	/*
82 	 * Set SCL timing parameters for fast mode or fast mode plus. Only
83 	 * difference is the timing parameter values since the registers are
84 	 * the same.
85 	 */
86 	if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
87 		/*
88 		 * Check are Fast Mode Plus parameters available. Calculate
89 		 * SCL timing parameters for Fast Mode Plus if not set.
90 		 */
91 		if (dev->fp_hcnt && dev->fp_lcnt) {
92 			dev->fs_hcnt = dev->fp_hcnt;
93 			dev->fs_lcnt = dev->fp_lcnt;
94 		} else {
95 			ic_clk = i2c_dw_clk_rate(dev);
96 			dev->fs_hcnt =
97 				i2c_dw_scl_hcnt(ic_clk,
98 						260,	/* tHIGH = 260 ns */
99 						sda_falling_time,
100 						0,	/* DW default */
101 						0);	/* No offset */
102 			dev->fs_lcnt =
103 				i2c_dw_scl_lcnt(ic_clk,
104 						500,	/* tLOW = 500 ns */
105 						scl_falling_time,
106 						0);	/* No offset */
107 		}
108 		fp_str = " Plus";
109 	}
110 	/*
111 	 * Calculate SCL timing parameters for fast mode if not set. They are
112 	 * needed also in high speed mode.
113 	 */
114 	if (!dev->fs_hcnt || !dev->fs_lcnt) {
115 		ic_clk = i2c_dw_clk_rate(dev);
116 		dev->fs_hcnt =
117 			i2c_dw_scl_hcnt(ic_clk,
118 					600,	/* tHD;STA = tHIGH = 0.6 us */
119 					sda_falling_time,
120 					0,	/* 0: DW default, 1: Ideal */
121 					0);	/* No offset */
122 		dev->fs_lcnt =
123 			i2c_dw_scl_lcnt(ic_clk,
124 					1300,	/* tLOW = 1.3 us */
125 					scl_falling_time,
126 					0);	/* No offset */
127 	}
128 	dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
129 		fp_str, dev->fs_hcnt, dev->fs_lcnt);
130 
131 	/* Check is high speed possible and fall back to fast mode if not */
132 	if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
133 		DW_IC_CON_SPEED_HIGH) {
134 		if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
135 			!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
136 			dev_err(dev->dev, "High Speed not supported!\n");
137 			t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
138 			dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
139 			dev->master_cfg |= DW_IC_CON_SPEED_FAST;
140 			dev->hs_hcnt = 0;
141 			dev->hs_lcnt = 0;
142 		} else if (!dev->hs_hcnt || !dev->hs_lcnt) {
143 			ic_clk = i2c_dw_clk_rate(dev);
144 			dev->hs_hcnt =
145 				i2c_dw_scl_hcnt(ic_clk,
146 						160,	/* tHIGH = 160 ns */
147 						sda_falling_time,
148 						0,	/* DW default */
149 						0);	/* No offset */
150 			dev->hs_lcnt =
151 				i2c_dw_scl_lcnt(ic_clk,
152 						320,	/* tLOW = 320 ns */
153 						scl_falling_time,
154 						0);	/* No offset */
155 		}
156 		dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
157 			dev->hs_hcnt, dev->hs_lcnt);
158 	}
159 
160 	ret = i2c_dw_set_sda_hold(dev);
161 	if (ret)
162 		return ret;
163 
164 	dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
165 	return 0;
166 }
167 
168 /**
169  * i2c_dw_init_master() - Initialize the designware I2C master hardware
170  * @dev: device private data
171  *
172  * This functions configures and enables the I2C master.
173  * This function is called during I2C init function, and in case of timeout at
174  * run time.
175  */
i2c_dw_init_master(struct dw_i2c_dev * dev)176 static int i2c_dw_init_master(struct dw_i2c_dev *dev)
177 {
178 	int ret;
179 
180 	ret = i2c_dw_acquire_lock(dev);
181 	if (ret)
182 		return ret;
183 
184 	/* Disable the adapter */
185 	__i2c_dw_disable(dev);
186 
187 	/* Write standard speed timing parameters */
188 	regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
189 	regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
190 
191 	/* Write fast mode/fast mode plus timing parameters */
192 	regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt);
193 	regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt);
194 
195 	/* Write high speed timing parameters if supported */
196 	if (dev->hs_hcnt && dev->hs_lcnt) {
197 		regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt);
198 		regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt);
199 	}
200 
201 	/* Write SDA hold time if supported */
202 	if (dev->sda_hold_time)
203 		regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time);
204 
205 	i2c_dw_configure_fifo_master(dev);
206 	i2c_dw_release_lock(dev);
207 
208 	return 0;
209 }
210 
i2c_dw_xfer_init(struct dw_i2c_dev * dev)211 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
212 {
213 	struct i2c_msg *msgs = dev->msgs;
214 	u32 ic_con = 0, ic_tar = 0;
215 	unsigned int dummy;
216 
217 	/* Disable the adapter */
218 	__i2c_dw_disable(dev);
219 
220 	/* If the slave address is ten bit address, enable 10BITADDR */
221 	if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
222 		ic_con = DW_IC_CON_10BITADDR_MASTER;
223 		/*
224 		 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
225 		 * mode has to be enabled via bit 12 of IC_TAR register.
226 		 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
227 		 * detected from registers.
228 		 */
229 		ic_tar = DW_IC_TAR_10BITADDR_MASTER;
230 	}
231 
232 	regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
233 			   ic_con);
234 
235 	/*
236 	 * Set the slave (target) address and enable 10-bit addressing mode
237 	 * if applicable.
238 	 */
239 	regmap_write(dev->map, DW_IC_TAR,
240 		     msgs[dev->msg_write_idx].addr | ic_tar);
241 
242 	/* Enforce disabled interrupts (due to HW issues) */
243 	regmap_write(dev->map, DW_IC_INTR_MASK, 0);
244 
245 	/* Enable the adapter */
246 	__i2c_dw_enable(dev);
247 
248 	/* Dummy read to avoid the register getting stuck on Bay Trail */
249 	regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
250 
251 	/* Clear and enable interrupts */
252 	regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
253 	regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK);
254 }
255 
i2c_dw_check_stopbit(struct dw_i2c_dev * dev)256 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
257 {
258 	u32 val;
259 	int ret;
260 
261 	ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
262 				       !(val & DW_IC_INTR_STOP_DET),
263 					1100, 20000);
264 	if (ret)
265 		dev_err(dev->dev, "i2c timeout error %d\n", ret);
266 
267 	return ret;
268 }
269 
i2c_dw_status(struct dw_i2c_dev * dev)270 static int i2c_dw_status(struct dw_i2c_dev *dev)
271 {
272 	int status;
273 
274 	status = i2c_dw_wait_bus_not_busy(dev);
275 	if (status)
276 		return status;
277 
278 	return i2c_dw_check_stopbit(dev);
279 }
280 
281 /*
282  * Initiate and continue master read/write transaction with polling
283  * based transfer routine afterward write messages into the Tx buffer.
284  */
amd_i2c_dw_xfer_quirk(struct i2c_adapter * adap,struct i2c_msg * msgs,int num_msgs)285 static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
286 {
287 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
288 	int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
289 	int cmd = 0, status;
290 	u8 *tx_buf;
291 	unsigned int val;
292 
293 	/*
294 	 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
295 	 * it is mandatory to set the right value in specific register
296 	 * (offset:0x474) as per the hardware IP specification.
297 	 */
298 	regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
299 
300 	dev->msgs = msgs;
301 	dev->msgs_num = num_msgs;
302 	i2c_dw_xfer_init(dev);
303 	regmap_write(dev->map, DW_IC_INTR_MASK, 0);
304 
305 	/* Initiate messages read/write transaction */
306 	for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
307 		tx_buf = msgs[msg_wrt_idx].buf;
308 		buf_len = msgs[msg_wrt_idx].len;
309 
310 		if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
311 			regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
312 		/*
313 		 * Initiate the i2c read/write transaction of buffer length,
314 		 * and poll for bus busy status. For the last message transfer,
315 		 * update the command with stopbit enable.
316 		 */
317 		for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
318 			if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
319 				cmd |= BIT(9);
320 
321 			if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
322 				/* Due to hardware bug, need to write the same command twice. */
323 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
324 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
325 				if (cmd) {
326 					regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
327 					regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
328 					/*
329 					 * Need to check the stop bit. However, it cannot be
330 					 * detected from the registers so we check it always
331 					 * when read/write the last byte.
332 					 */
333 					status = i2c_dw_status(dev);
334 					if (status)
335 						return status;
336 
337 					for (data_idx = 0; data_idx < buf_len; data_idx++) {
338 						regmap_read(dev->map, DW_IC_DATA_CMD, &val);
339 						tx_buf[data_idx] = val;
340 					}
341 					status = i2c_dw_check_stopbit(dev);
342 					if (status)
343 						return status;
344 				}
345 			} else {
346 				regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
347 				usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
348 			}
349 		}
350 		status = i2c_dw_check_stopbit(dev);
351 		if (status)
352 			return status;
353 	}
354 
355 	return 0;
356 }
357 
i2c_dw_poll_tx_empty(struct dw_i2c_dev * dev)358 static int i2c_dw_poll_tx_empty(struct dw_i2c_dev *dev)
359 {
360 	u32 val;
361 
362 	return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
363 					val & DW_IC_INTR_TX_EMPTY,
364 					100, 1000);
365 }
366 
i2c_dw_poll_rx_full(struct dw_i2c_dev * dev)367 static int i2c_dw_poll_rx_full(struct dw_i2c_dev *dev)
368 {
369 	u32 val;
370 
371 	return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
372 					val & DW_IC_INTR_RX_FULL,
373 					100, 1000);
374 }
375 
txgbe_i2c_dw_xfer_quirk(struct i2c_adapter * adap,struct i2c_msg * msgs,int num_msgs)376 static int txgbe_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
377 				   int num_msgs)
378 {
379 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
380 	int msg_idx, buf_len, data_idx, ret;
381 	unsigned int val, stop = 0;
382 	u8 *buf;
383 
384 	dev->msgs = msgs;
385 	dev->msgs_num = num_msgs;
386 	i2c_dw_xfer_init(dev);
387 	regmap_write(dev->map, DW_IC_INTR_MASK, 0);
388 
389 	for (msg_idx = 0; msg_idx < num_msgs; msg_idx++) {
390 		buf = msgs[msg_idx].buf;
391 		buf_len = msgs[msg_idx].len;
392 
393 		for (data_idx = 0; data_idx < buf_len; data_idx++) {
394 			if (msg_idx == num_msgs - 1 && data_idx == buf_len - 1)
395 				stop |= BIT(9);
396 
397 			if (msgs[msg_idx].flags & I2C_M_RD) {
398 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | stop);
399 
400 				ret = i2c_dw_poll_rx_full(dev);
401 				if (ret)
402 					return ret;
403 
404 				regmap_read(dev->map, DW_IC_DATA_CMD, &val);
405 				buf[data_idx] = val;
406 			} else {
407 				ret = i2c_dw_poll_tx_empty(dev);
408 				if (ret)
409 					return ret;
410 
411 				regmap_write(dev->map, DW_IC_DATA_CMD,
412 					     buf[data_idx] | stop);
413 			}
414 		}
415 	}
416 
417 	return num_msgs;
418 }
419 
420 /*
421  * Initiate (and continue) low level master read/write transaction.
422  * This function is only called from i2c_dw_isr, and pumping i2c_msg
423  * messages into the tx buffer.  Even if the size of i2c_msg data is
424  * longer than the size of the tx buffer, it handles everything.
425  */
426 static void
i2c_dw_xfer_msg(struct dw_i2c_dev * dev)427 i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
428 {
429 	struct i2c_msg *msgs = dev->msgs;
430 	u32 intr_mask;
431 	int tx_limit, rx_limit;
432 	u32 addr = msgs[dev->msg_write_idx].addr;
433 	u32 buf_len = dev->tx_buf_len;
434 	u8 *buf = dev->tx_buf;
435 	bool need_restart = false;
436 	unsigned int flr;
437 
438 	intr_mask = DW_IC_INTR_MASTER_MASK;
439 
440 	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
441 		u32 flags = msgs[dev->msg_write_idx].flags;
442 
443 		/*
444 		 * If target address has changed, we need to
445 		 * reprogram the target address in the I2C
446 		 * adapter when we are done with this transfer.
447 		 */
448 		if (msgs[dev->msg_write_idx].addr != addr) {
449 			dev_err(dev->dev,
450 				"%s: invalid target address\n", __func__);
451 			dev->msg_err = -EINVAL;
452 			break;
453 		}
454 
455 		if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
456 			/* new i2c_msg */
457 			buf = msgs[dev->msg_write_idx].buf;
458 			buf_len = msgs[dev->msg_write_idx].len;
459 
460 			/* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
461 			 * IC_RESTART_EN are set, we must manually
462 			 * set restart bit between messages.
463 			 */
464 			if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
465 					(dev->msg_write_idx > 0))
466 				need_restart = true;
467 		}
468 
469 		regmap_read(dev->map, DW_IC_TXFLR, &flr);
470 		tx_limit = dev->tx_fifo_depth - flr;
471 
472 		regmap_read(dev->map, DW_IC_RXFLR, &flr);
473 		rx_limit = dev->rx_fifo_depth - flr;
474 
475 		while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
476 			u32 cmd = 0;
477 
478 			/*
479 			 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
480 			 * manually set the stop bit. However, it cannot be
481 			 * detected from the registers so we set it always
482 			 * when writing/reading the last byte.
483 			 */
484 
485 			/*
486 			 * i2c-core always sets the buffer length of
487 			 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
488 			 * be adjusted when receiving the first byte.
489 			 * Thus we can't stop the transaction here.
490 			 */
491 			if (dev->msg_write_idx == dev->msgs_num - 1 &&
492 			    buf_len == 1 && !(flags & I2C_M_RECV_LEN))
493 				cmd |= BIT(9);
494 
495 			if (need_restart) {
496 				cmd |= BIT(10);
497 				need_restart = false;
498 			}
499 
500 			if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
501 
502 				/* Avoid rx buffer overrun */
503 				if (dev->rx_outstanding >= dev->rx_fifo_depth)
504 					break;
505 
506 				regmap_write(dev->map, DW_IC_DATA_CMD,
507 					     cmd | 0x100);
508 				rx_limit--;
509 				dev->rx_outstanding++;
510 			} else {
511 				regmap_write(dev->map, DW_IC_DATA_CMD,
512 					     cmd | *buf++);
513 			}
514 			tx_limit--; buf_len--;
515 		}
516 
517 		dev->tx_buf = buf;
518 		dev->tx_buf_len = buf_len;
519 
520 		/*
521 		 * Because we don't know the buffer length in the
522 		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
523 		 * transaction here. Also disable the TX_EMPTY IRQ
524 		 * while waiting for the data length byte to avoid the
525 		 * bogus interrupts flood.
526 		 */
527 		if (flags & I2C_M_RECV_LEN) {
528 			dev->status |= STATUS_WRITE_IN_PROGRESS;
529 			intr_mask &= ~DW_IC_INTR_TX_EMPTY;
530 			break;
531 		} else if (buf_len > 0) {
532 			/* more bytes to be written */
533 			dev->status |= STATUS_WRITE_IN_PROGRESS;
534 			break;
535 		} else
536 			dev->status &= ~STATUS_WRITE_IN_PROGRESS;
537 	}
538 
539 	/*
540 	 * If i2c_msg index search is completed, we don't need TX_EMPTY
541 	 * interrupt any more.
542 	 */
543 	if (dev->msg_write_idx == dev->msgs_num)
544 		intr_mask &= ~DW_IC_INTR_TX_EMPTY;
545 
546 	if (dev->msg_err)
547 		intr_mask = 0;
548 
549 	regmap_write(dev->map,  DW_IC_INTR_MASK, intr_mask);
550 }
551 
552 static u8
i2c_dw_recv_len(struct dw_i2c_dev * dev,u8 len)553 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
554 {
555 	struct i2c_msg *msgs = dev->msgs;
556 	u32 flags = msgs[dev->msg_read_idx].flags;
557 
558 	/*
559 	 * Adjust the buffer length and mask the flag
560 	 * after receiving the first byte.
561 	 */
562 	len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
563 	dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
564 	msgs[dev->msg_read_idx].len = len;
565 	msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
566 
567 	/*
568 	 * Received buffer length, re-enable TX_EMPTY interrupt
569 	 * to resume the SMBUS transaction.
570 	 */
571 	regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
572 			   DW_IC_INTR_TX_EMPTY);
573 
574 	return len;
575 }
576 
577 static void
i2c_dw_read(struct dw_i2c_dev * dev)578 i2c_dw_read(struct dw_i2c_dev *dev)
579 {
580 	struct i2c_msg *msgs = dev->msgs;
581 	unsigned int rx_valid;
582 
583 	for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
584 		unsigned int tmp;
585 		u32 len;
586 		u8 *buf;
587 
588 		if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
589 			continue;
590 
591 		if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
592 			len = msgs[dev->msg_read_idx].len;
593 			buf = msgs[dev->msg_read_idx].buf;
594 		} else {
595 			len = dev->rx_buf_len;
596 			buf = dev->rx_buf;
597 		}
598 
599 		regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
600 
601 		for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
602 			u32 flags = msgs[dev->msg_read_idx].flags;
603 
604 			regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
605 			tmp &= DW_IC_DATA_CMD_DAT;
606 			/* Ensure length byte is a valid value */
607 			if (flags & I2C_M_RECV_LEN) {
608 				/*
609 				 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
610 				 * detected from the registers, the controller can be
611 				 * disabled if the STOP bit is set. But it is only set
612 				 * after receiving block data response length in
613 				 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
614 				 * another byte with STOP bit set when the block data
615 				 * response length is invalid to complete the transaction.
616 				 */
617 				if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
618 					tmp = 1;
619 
620 				len = i2c_dw_recv_len(dev, tmp);
621 			}
622 			*buf++ = tmp;
623 			dev->rx_outstanding--;
624 		}
625 
626 		if (len > 0) {
627 			dev->status |= STATUS_READ_IN_PROGRESS;
628 			dev->rx_buf_len = len;
629 			dev->rx_buf = buf;
630 			return;
631 		} else
632 			dev->status &= ~STATUS_READ_IN_PROGRESS;
633 	}
634 }
635 
636 /*
637  * Prepare controller for a transaction and call i2c_dw_xfer_msg.
638  */
639 static int
i2c_dw_xfer(struct i2c_adapter * adap,struct i2c_msg msgs[],int num)640 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
641 {
642 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
643 	int ret;
644 
645 	dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
646 
647 	pm_runtime_get_sync(dev->dev);
648 
649 	/*
650 	 * Initiate I2C message transfer when polling mode is enabled,
651 	 * As it is polling based transfer mechanism, which does not support
652 	 * interrupt based functionalities of existing DesignWare driver.
653 	 */
654 	switch (dev->flags & MODEL_MASK) {
655 	case MODEL_AMD_NAVI_GPU:
656 		ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
657 		goto done_nolock;
658 	case MODEL_WANGXUN_SP:
659 		ret = txgbe_i2c_dw_xfer_quirk(adap, msgs, num);
660 		goto done_nolock;
661 	default:
662 		break;
663 	}
664 
665 	reinit_completion(&dev->cmd_complete);
666 	dev->msgs = msgs;
667 	dev->msgs_num = num;
668 	dev->cmd_err = 0;
669 	dev->msg_write_idx = 0;
670 	dev->msg_read_idx = 0;
671 	dev->msg_err = 0;
672 	dev->status = 0;
673 	dev->abort_source = 0;
674 	dev->rx_outstanding = 0;
675 
676 	ret = i2c_dw_acquire_lock(dev);
677 	if (ret)
678 		goto done_nolock;
679 
680 	ret = i2c_dw_wait_bus_not_busy(dev);
681 	if (ret < 0)
682 		goto done;
683 
684 	/* Start the transfers */
685 	i2c_dw_xfer_init(dev);
686 
687 	/* Wait for tx to complete */
688 	if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) {
689 		dev_err(dev->dev, "controller timed out\n");
690 		/* i2c_dw_init implicitly disables the adapter */
691 		i2c_recover_bus(&dev->adapter);
692 		i2c_dw_init_master(dev);
693 		ret = -ETIMEDOUT;
694 		goto done;
695 	}
696 
697 	/*
698 	 * We must disable the adapter before returning and signaling the end
699 	 * of the current transfer. Otherwise the hardware might continue
700 	 * generating interrupts which in turn causes a race condition with
701 	 * the following transfer.  Needs some more investigation if the
702 	 * additional interrupts are a hardware bug or this driver doesn't
703 	 * handle them correctly yet.
704 	 */
705 	__i2c_dw_disable_nowait(dev);
706 
707 	if (dev->msg_err) {
708 		ret = dev->msg_err;
709 		goto done;
710 	}
711 
712 	/* No error */
713 	if (likely(!dev->cmd_err && !dev->status)) {
714 		ret = num;
715 		goto done;
716 	}
717 
718 	/* We have an error */
719 	if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
720 		ret = i2c_dw_handle_tx_abort(dev);
721 		goto done;
722 	}
723 
724 	if (dev->status)
725 		dev_err(dev->dev,
726 			"transfer terminated early - interrupt latency too high?\n");
727 
728 	ret = -EIO;
729 
730 done:
731 	i2c_dw_release_lock(dev);
732 
733 done_nolock:
734 	pm_runtime_mark_last_busy(dev->dev);
735 	pm_runtime_put_autosuspend(dev->dev);
736 
737 	return ret;
738 }
739 
740 static const struct i2c_algorithm i2c_dw_algo = {
741 	.master_xfer = i2c_dw_xfer,
742 	.functionality = i2c_dw_func,
743 };
744 
745 static const struct i2c_adapter_quirks i2c_dw_quirks = {
746 	.flags = I2C_AQ_NO_ZERO_LEN,
747 };
748 
i2c_dw_read_clear_intrbits(struct dw_i2c_dev * dev)749 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
750 {
751 	unsigned int stat, dummy;
752 
753 	/*
754 	 * The IC_INTR_STAT register just indicates "enabled" interrupts.
755 	 * The unmasked raw version of interrupt status bits is available
756 	 * in the IC_RAW_INTR_STAT register.
757 	 *
758 	 * That is,
759 	 *   stat = readl(IC_INTR_STAT);
760 	 * equals to,
761 	 *   stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
762 	 *
763 	 * The raw version might be useful for debugging purposes.
764 	 */
765 	regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
766 
767 	/*
768 	 * Do not use the IC_CLR_INTR register to clear interrupts, or
769 	 * you'll miss some interrupts, triggered during the period from
770 	 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
771 	 *
772 	 * Instead, use the separately-prepared IC_CLR_* registers.
773 	 */
774 	if (stat & DW_IC_INTR_RX_UNDER)
775 		regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
776 	if (stat & DW_IC_INTR_RX_OVER)
777 		regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
778 	if (stat & DW_IC_INTR_TX_OVER)
779 		regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
780 	if (stat & DW_IC_INTR_RD_REQ)
781 		regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
782 	if (stat & DW_IC_INTR_TX_ABRT) {
783 		/*
784 		 * The IC_TX_ABRT_SOURCE register is cleared whenever
785 		 * the IC_CLR_TX_ABRT is read.  Preserve it beforehand.
786 		 */
787 		regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
788 		regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
789 	}
790 	if (stat & DW_IC_INTR_RX_DONE)
791 		regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
792 	if (stat & DW_IC_INTR_ACTIVITY)
793 		regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
794 	if ((stat & DW_IC_INTR_STOP_DET) &&
795 	    ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
796 		regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
797 	if (stat & DW_IC_INTR_START_DET)
798 		regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
799 	if (stat & DW_IC_INTR_GEN_CALL)
800 		regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
801 
802 	return stat;
803 }
804 
805 /*
806  * Interrupt service routine. This gets called whenever an I2C master interrupt
807  * occurs.
808  */
i2c_dw_isr(int this_irq,void * dev_id)809 static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
810 {
811 	struct dw_i2c_dev *dev = dev_id;
812 	unsigned int stat, enabled;
813 
814 	regmap_read(dev->map, DW_IC_ENABLE, &enabled);
815 	regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
816 	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
817 		return IRQ_NONE;
818 	if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
819 		return IRQ_NONE;
820 	dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
821 
822 	stat = i2c_dw_read_clear_intrbits(dev);
823 
824 	if (!(dev->status & STATUS_ACTIVE)) {
825 		/*
826 		 * Unexpected interrupt in driver point of view. State
827 		 * variables are either unset or stale so acknowledge and
828 		 * disable interrupts for suppressing further interrupts if
829 		 * interrupt really came from this HW (E.g. firmware has left
830 		 * the HW active).
831 		 */
832 		regmap_write(dev->map, DW_IC_INTR_MASK, 0);
833 		return IRQ_HANDLED;
834 	}
835 
836 	if (stat & DW_IC_INTR_TX_ABRT) {
837 		dev->cmd_err |= DW_IC_ERR_TX_ABRT;
838 		dev->status &= ~STATUS_MASK;
839 		dev->rx_outstanding = 0;
840 
841 		/*
842 		 * Anytime TX_ABRT is set, the contents of the tx/rx
843 		 * buffers are flushed. Make sure to skip them.
844 		 */
845 		regmap_write(dev->map, DW_IC_INTR_MASK, 0);
846 		goto tx_aborted;
847 	}
848 
849 	if (stat & DW_IC_INTR_RX_FULL)
850 		i2c_dw_read(dev);
851 
852 	if (stat & DW_IC_INTR_TX_EMPTY)
853 		i2c_dw_xfer_msg(dev);
854 
855 	/*
856 	 * No need to modify or disable the interrupt mask here.
857 	 * i2c_dw_xfer_msg() will take care of it according to
858 	 * the current transmit status.
859 	 */
860 
861 tx_aborted:
862 	if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
863 	     (dev->rx_outstanding == 0))
864 		complete(&dev->cmd_complete);
865 	else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
866 		/* Workaround to trigger pending interrupt */
867 		regmap_read(dev->map, DW_IC_INTR_MASK, &stat);
868 		regmap_write(dev->map, DW_IC_INTR_MASK, 0);
869 		regmap_write(dev->map, DW_IC_INTR_MASK, stat);
870 	}
871 
872 	return IRQ_HANDLED;
873 }
874 
i2c_dw_configure_master(struct dw_i2c_dev * dev)875 void i2c_dw_configure_master(struct dw_i2c_dev *dev)
876 {
877 	struct i2c_timings *t = &dev->timings;
878 
879 	dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
880 
881 	dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
882 			  DW_IC_CON_RESTART_EN;
883 
884 	dev->mode = DW_IC_MASTER;
885 
886 	switch (t->bus_freq_hz) {
887 	case I2C_MAX_STANDARD_MODE_FREQ:
888 		dev->master_cfg |= DW_IC_CON_SPEED_STD;
889 		break;
890 	case I2C_MAX_HIGH_SPEED_MODE_FREQ:
891 		dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
892 		break;
893 	default:
894 		dev->master_cfg |= DW_IC_CON_SPEED_FAST;
895 	}
896 }
897 EXPORT_SYMBOL_GPL(i2c_dw_configure_master);
898 
i2c_dw_prepare_recovery(struct i2c_adapter * adap)899 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
900 {
901 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
902 
903 	i2c_dw_disable(dev);
904 	reset_control_assert(dev->rst);
905 	i2c_dw_prepare_clk(dev, false);
906 }
907 
i2c_dw_unprepare_recovery(struct i2c_adapter * adap)908 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
909 {
910 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
911 
912 	i2c_dw_prepare_clk(dev, true);
913 	reset_control_deassert(dev->rst);
914 	i2c_dw_init_master(dev);
915 }
916 
i2c_dw_init_recovery_info(struct dw_i2c_dev * dev)917 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
918 {
919 	struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
920 	struct i2c_adapter *adap = &dev->adapter;
921 	struct gpio_desc *gpio;
922 
923 	gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
924 	if (IS_ERR_OR_NULL(gpio))
925 		return PTR_ERR_OR_ZERO(gpio);
926 
927 	rinfo->scl_gpiod = gpio;
928 
929 	gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
930 	if (IS_ERR(gpio))
931 		return PTR_ERR(gpio);
932 	rinfo->sda_gpiod = gpio;
933 
934 	rinfo->pinctrl = devm_pinctrl_get(dev->dev);
935 	if (IS_ERR(rinfo->pinctrl)) {
936 		if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
937 			return PTR_ERR(rinfo->pinctrl);
938 
939 		rinfo->pinctrl = NULL;
940 		dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n");
941 	} else if (!rinfo->pinctrl) {
942 		dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n");
943 	}
944 
945 	rinfo->recover_bus = i2c_generic_scl_recovery;
946 	rinfo->prepare_recovery = i2c_dw_prepare_recovery;
947 	rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
948 	adap->bus_recovery_info = rinfo;
949 
950 	dev_info(dev->dev, "running with gpio recovery mode! scl%s",
951 		 rinfo->sda_gpiod ? ",sda" : "");
952 
953 	return 0;
954 }
955 
i2c_dw_poll_adap_quirk(struct dw_i2c_dev * dev)956 static int i2c_dw_poll_adap_quirk(struct dw_i2c_dev *dev)
957 {
958 	struct i2c_adapter *adap = &dev->adapter;
959 	int ret;
960 
961 	pm_runtime_get_noresume(dev->dev);
962 	ret = i2c_add_numbered_adapter(adap);
963 	if (ret)
964 		dev_err(dev->dev, "Failed to add adapter: %d\n", ret);
965 	pm_runtime_put_noidle(dev->dev);
966 
967 	return ret;
968 }
969 
i2c_dw_is_model_poll(struct dw_i2c_dev * dev)970 static bool i2c_dw_is_model_poll(struct dw_i2c_dev *dev)
971 {
972 	switch (dev->flags & MODEL_MASK) {
973 	case MODEL_AMD_NAVI_GPU:
974 	case MODEL_WANGXUN_SP:
975 		return true;
976 	default:
977 		return false;
978 	}
979 }
980 
i2c_dw_probe_master(struct dw_i2c_dev * dev)981 int i2c_dw_probe_master(struct dw_i2c_dev *dev)
982 {
983 	struct i2c_adapter *adap = &dev->adapter;
984 	unsigned long irq_flags;
985 	unsigned int ic_con;
986 	int ret;
987 
988 	init_completion(&dev->cmd_complete);
989 
990 	dev->init = i2c_dw_init_master;
991 	dev->disable = i2c_dw_disable;
992 
993 	ret = i2c_dw_init_regmap(dev);
994 	if (ret)
995 		return ret;
996 
997 	ret = i2c_dw_set_timings_master(dev);
998 	if (ret)
999 		return ret;
1000 
1001 	ret = i2c_dw_set_fifo_size(dev);
1002 	if (ret)
1003 		return ret;
1004 
1005 	/* Lock the bus for accessing DW_IC_CON */
1006 	ret = i2c_dw_acquire_lock(dev);
1007 	if (ret)
1008 		return ret;
1009 
1010 	/*
1011 	 * On AMD platforms BIOS advertises the bus clear feature
1012 	 * and enables the SCL/SDA stuck low. SMU FW does the
1013 	 * bus recovery process. Driver should not ignore this BIOS
1014 	 * advertisement of bus clear feature.
1015 	 */
1016 	ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
1017 	i2c_dw_release_lock(dev);
1018 	if (ret)
1019 		return ret;
1020 
1021 	if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL)
1022 		dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL;
1023 
1024 	ret = dev->init(dev);
1025 	if (ret)
1026 		return ret;
1027 
1028 	snprintf(adap->name, sizeof(adap->name),
1029 		 "Synopsys DesignWare I2C adapter");
1030 	adap->retries = 3;
1031 	adap->algo = &i2c_dw_algo;
1032 	adap->quirks = &i2c_dw_quirks;
1033 	adap->dev.parent = dev->dev;
1034 	i2c_set_adapdata(adap, dev);
1035 
1036 	if (i2c_dw_is_model_poll(dev))
1037 		return i2c_dw_poll_adap_quirk(dev);
1038 
1039 	if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
1040 		irq_flags = IRQF_NO_SUSPEND;
1041 	} else {
1042 		irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
1043 	}
1044 
1045 	ret = i2c_dw_acquire_lock(dev);
1046 	if (ret)
1047 		return ret;
1048 
1049 	regmap_write(dev->map, DW_IC_INTR_MASK, 0);
1050 	i2c_dw_release_lock(dev);
1051 
1052 	ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags,
1053 			       dev_name(dev->dev), dev);
1054 	if (ret) {
1055 		dev_err(dev->dev, "failure requesting irq %i: %d\n",
1056 			dev->irq, ret);
1057 		return ret;
1058 	}
1059 
1060 	ret = i2c_dw_init_recovery_info(dev);
1061 	if (ret)
1062 		return ret;
1063 
1064 	/*
1065 	 * Increment PM usage count during adapter registration in order to
1066 	 * avoid possible spurious runtime suspend when adapter device is
1067 	 * registered to the device core and immediate resume in case bus has
1068 	 * registered I2C slaves that do I2C transfers in their probe.
1069 	 */
1070 	pm_runtime_get_noresume(dev->dev);
1071 	ret = i2c_add_numbered_adapter(adap);
1072 	if (ret)
1073 		dev_err(dev->dev, "failure adding adapter: %d\n", ret);
1074 	pm_runtime_put_noidle(dev->dev);
1075 
1076 	return ret;
1077 }
1078 EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
1079 
1080 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
1081 MODULE_LICENSE("GPL");
1082