1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Cadence Design Systems Inc.
4  *
5  * Author: Boris Brezillon <boris.brezillon@bootlin.com>
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/i3c/master.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/ioport.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/workqueue.h>
25 
26 #define DEV_ID				0x0
27 #define DEV_ID_I3C_MASTER		0x5034
28 
29 #define CONF_STATUS0			0x4
30 #define CONF_STATUS0_CMDR_DEPTH(x)	(4 << (((x) & GENMASK(31, 29)) >> 29))
31 #define CONF_STATUS0_ECC_CHK		BIT(28)
32 #define CONF_STATUS0_INTEG_CHK		BIT(27)
33 #define CONF_STATUS0_CSR_DAP_CHK	BIT(26)
34 #define CONF_STATUS0_TRANS_TOUT_CHK	BIT(25)
35 #define CONF_STATUS0_PROT_FAULTS_CHK	BIT(24)
36 #define CONF_STATUS0_GPO_NUM(x)		(((x) & GENMASK(23, 16)) >> 16)
37 #define CONF_STATUS0_GPI_NUM(x)		(((x) & GENMASK(15, 8)) >> 8)
38 #define CONF_STATUS0_IBIR_DEPTH(x)	(4 << (((x) & GENMASK(7, 6)) >> 7))
39 #define CONF_STATUS0_SUPPORTS_DDR	BIT(5)
40 #define CONF_STATUS0_SEC_MASTER		BIT(4)
41 #define CONF_STATUS0_DEVS_NUM(x)	((x) & GENMASK(3, 0))
42 
43 #define CONF_STATUS1			0x8
44 #define CONF_STATUS1_IBI_HW_RES(x)	((((x) & GENMASK(31, 28)) >> 28) + 1)
45 #define CONF_STATUS1_CMD_DEPTH(x)	(4 << (((x) & GENMASK(27, 26)) >> 26))
46 #define CONF_STATUS1_SLVDDR_RX_DEPTH(x)	(8 << (((x) & GENMASK(25, 21)) >> 21))
47 #define CONF_STATUS1_SLVDDR_TX_DEPTH(x)	(8 << (((x) & GENMASK(20, 16)) >> 16))
48 #define CONF_STATUS1_IBI_DEPTH(x)	(2 << (((x) & GENMASK(12, 10)) >> 10))
49 #define CONF_STATUS1_RX_DEPTH(x)	(8 << (((x) & GENMASK(9, 5)) >> 5))
50 #define CONF_STATUS1_TX_DEPTH(x)	(8 << ((x) & GENMASK(4, 0)))
51 
52 #define REV_ID				0xc
53 #define REV_ID_VID(id)			(((id) & GENMASK(31, 20)) >> 20)
54 #define REV_ID_PID(id)			(((id) & GENMASK(19, 8)) >> 8)
55 #define REV_ID_REV_MAJOR(id)		(((id) & GENMASK(7, 4)) >> 4)
56 #define REV_ID_REV_MINOR(id)		((id) & GENMASK(3, 0))
57 
58 #define CTRL				0x10
59 #define CTRL_DEV_EN			BIT(31)
60 #define CTRL_HALT_EN			BIT(30)
61 #define CTRL_MCS			BIT(29)
62 #define CTRL_MCS_EN			BIT(28)
63 #define CTRL_HJ_DISEC			BIT(8)
64 #define CTRL_MST_ACK			BIT(7)
65 #define CTRL_HJ_ACK			BIT(6)
66 #define CTRL_HJ_INIT			BIT(5)
67 #define CTRL_MST_INIT			BIT(4)
68 #define CTRL_AHDR_OPT			BIT(3)
69 #define CTRL_PURE_BUS_MODE		0
70 #define CTRL_MIXED_FAST_BUS_MODE	2
71 #define CTRL_MIXED_SLOW_BUS_MODE	3
72 #define CTRL_BUS_MODE_MASK		GENMASK(1, 0)
73 
74 #define PRESCL_CTRL0			0x14
75 #define PRESCL_CTRL0_I2C(x)		((x) << 16)
76 #define PRESCL_CTRL0_I3C(x)		(x)
77 #define PRESCL_CTRL0_MAX		GENMASK(9, 0)
78 
79 #define PRESCL_CTRL1			0x18
80 #define PRESCL_CTRL1_PP_LOW_MASK	GENMASK(15, 8)
81 #define PRESCL_CTRL1_PP_LOW(x)		((x) << 8)
82 #define PRESCL_CTRL1_OD_LOW_MASK	GENMASK(7, 0)
83 #define PRESCL_CTRL1_OD_LOW(x)		(x)
84 
85 #define MST_IER				0x20
86 #define MST_IDR				0x24
87 #define MST_IMR				0x28
88 #define MST_ICR				0x2c
89 #define MST_ISR				0x30
90 #define MST_INT_HALTED			BIT(18)
91 #define MST_INT_MR_DONE			BIT(17)
92 #define MST_INT_IMM_COMP		BIT(16)
93 #define MST_INT_TX_THR			BIT(15)
94 #define MST_INT_TX_OVF			BIT(14)
95 #define MST_INT_IBID_THR		BIT(12)
96 #define MST_INT_IBID_UNF		BIT(11)
97 #define MST_INT_IBIR_THR		BIT(10)
98 #define MST_INT_IBIR_UNF		BIT(9)
99 #define MST_INT_IBIR_OVF		BIT(8)
100 #define MST_INT_RX_THR			BIT(7)
101 #define MST_INT_RX_UNF			BIT(6)
102 #define MST_INT_CMDD_EMP		BIT(5)
103 #define MST_INT_CMDD_THR		BIT(4)
104 #define MST_INT_CMDD_OVF		BIT(3)
105 #define MST_INT_CMDR_THR		BIT(2)
106 #define MST_INT_CMDR_UNF		BIT(1)
107 #define MST_INT_CMDR_OVF		BIT(0)
108 
109 #define MST_STATUS0			0x34
110 #define MST_STATUS0_IDLE		BIT(18)
111 #define MST_STATUS0_HALTED		BIT(17)
112 #define MST_STATUS0_MASTER_MODE		BIT(16)
113 #define MST_STATUS0_TX_FULL		BIT(13)
114 #define MST_STATUS0_IBID_FULL		BIT(12)
115 #define MST_STATUS0_IBIR_FULL		BIT(11)
116 #define MST_STATUS0_RX_FULL		BIT(10)
117 #define MST_STATUS0_CMDD_FULL		BIT(9)
118 #define MST_STATUS0_CMDR_FULL		BIT(8)
119 #define MST_STATUS0_TX_EMP		BIT(5)
120 #define MST_STATUS0_IBID_EMP		BIT(4)
121 #define MST_STATUS0_IBIR_EMP		BIT(3)
122 #define MST_STATUS0_RX_EMP		BIT(2)
123 #define MST_STATUS0_CMDD_EMP		BIT(1)
124 #define MST_STATUS0_CMDR_EMP		BIT(0)
125 
126 #define CMDR				0x38
127 #define CMDR_NO_ERROR			0
128 #define CMDR_DDR_PREAMBLE_ERROR		1
129 #define CMDR_DDR_PARITY_ERROR		2
130 #define CMDR_DDR_RX_FIFO_OVF		3
131 #define CMDR_DDR_TX_FIFO_UNF		4
132 #define CMDR_M0_ERROR			5
133 #define CMDR_M1_ERROR			6
134 #define CMDR_M2_ERROR			7
135 #define CMDR_MST_ABORT			8
136 #define CMDR_NACK_RESP			9
137 #define CMDR_INVALID_DA			10
138 #define CMDR_DDR_DROPPED		11
139 #define CMDR_ERROR(x)			(((x) & GENMASK(27, 24)) >> 24)
140 #define CMDR_XFER_BYTES(x)		(((x) & GENMASK(19, 8)) >> 8)
141 #define CMDR_CMDID_HJACK_DISEC		0xfe
142 #define CMDR_CMDID_HJACK_ENTDAA		0xff
143 #define CMDR_CMDID(x)			((x) & GENMASK(7, 0))
144 
145 #define IBIR				0x3c
146 #define IBIR_ACKED			BIT(12)
147 #define IBIR_SLVID(x)			(((x) & GENMASK(11, 8)) >> 8)
148 #define IBIR_ERROR			BIT(7)
149 #define IBIR_XFER_BYTES(x)		(((x) & GENMASK(6, 2)) >> 2)
150 #define IBIR_TYPE_IBI			0
151 #define IBIR_TYPE_HJ			1
152 #define IBIR_TYPE_MR			2
153 #define IBIR_TYPE(x)			((x) & GENMASK(1, 0))
154 
155 #define SLV_IER				0x40
156 #define SLV_IDR				0x44
157 #define SLV_IMR				0x48
158 #define SLV_ICR				0x4c
159 #define SLV_ISR				0x50
160 #define SLV_INT_TM			BIT(20)
161 #define SLV_INT_ERROR			BIT(19)
162 #define SLV_INT_EVENT_UP		BIT(18)
163 #define SLV_INT_HJ_DONE			BIT(17)
164 #define SLV_INT_MR_DONE			BIT(16)
165 #define SLV_INT_DA_UPD			BIT(15)
166 #define SLV_INT_SDR_FAIL		BIT(14)
167 #define SLV_INT_DDR_FAIL		BIT(13)
168 #define SLV_INT_M_RD_ABORT		BIT(12)
169 #define SLV_INT_DDR_RX_THR		BIT(11)
170 #define SLV_INT_DDR_TX_THR		BIT(10)
171 #define SLV_INT_SDR_RX_THR		BIT(9)
172 #define SLV_INT_SDR_TX_THR		BIT(8)
173 #define SLV_INT_DDR_RX_UNF		BIT(7)
174 #define SLV_INT_DDR_TX_OVF		BIT(6)
175 #define SLV_INT_SDR_RX_UNF		BIT(5)
176 #define SLV_INT_SDR_TX_OVF		BIT(4)
177 #define SLV_INT_DDR_RD_COMP		BIT(3)
178 #define SLV_INT_DDR_WR_COMP		BIT(2)
179 #define SLV_INT_SDR_RD_COMP		BIT(1)
180 #define SLV_INT_SDR_WR_COMP		BIT(0)
181 
182 #define SLV_STATUS0			0x54
183 #define SLV_STATUS0_REG_ADDR(s)		(((s) & GENMASK(23, 16)) >> 16)
184 #define SLV_STATUS0_XFRD_BYTES(s)	((s) & GENMASK(15, 0))
185 
186 #define SLV_STATUS1			0x58
187 #define SLV_STATUS1_AS(s)		(((s) & GENMASK(21, 20)) >> 20)
188 #define SLV_STATUS1_VEN_TM		BIT(19)
189 #define SLV_STATUS1_HJ_DIS		BIT(18)
190 #define SLV_STATUS1_MR_DIS		BIT(17)
191 #define SLV_STATUS1_PROT_ERR		BIT(16)
192 #define SLV_STATUS1_DA(x)		(((s) & GENMASK(15, 9)) >> 9)
193 #define SLV_STATUS1_HAS_DA		BIT(8)
194 #define SLV_STATUS1_DDR_RX_FULL		BIT(7)
195 #define SLV_STATUS1_DDR_TX_FULL		BIT(6)
196 #define SLV_STATUS1_DDR_RX_EMPTY	BIT(5)
197 #define SLV_STATUS1_DDR_TX_EMPTY	BIT(4)
198 #define SLV_STATUS1_SDR_RX_FULL		BIT(3)
199 #define SLV_STATUS1_SDR_TX_FULL		BIT(2)
200 #define SLV_STATUS1_SDR_RX_EMPTY	BIT(1)
201 #define SLV_STATUS1_SDR_TX_EMPTY	BIT(0)
202 
203 #define CMD0_FIFO			0x60
204 #define CMD0_FIFO_IS_DDR		BIT(31)
205 #define CMD0_FIFO_IS_CCC		BIT(30)
206 #define CMD0_FIFO_BCH			BIT(29)
207 #define XMIT_BURST_STATIC_SUBADDR	0
208 #define XMIT_SINGLE_INC_SUBADDR		1
209 #define XMIT_SINGLE_STATIC_SUBADDR	2
210 #define XMIT_BURST_WITHOUT_SUBADDR	3
211 #define CMD0_FIFO_PRIV_XMIT_MODE(m)	((m) << 27)
212 #define CMD0_FIFO_SBCA			BIT(26)
213 #define CMD0_FIFO_RSBC			BIT(25)
214 #define CMD0_FIFO_IS_10B		BIT(24)
215 #define CMD0_FIFO_PL_LEN(l)		((l) << 12)
216 #define CMD0_FIFO_PL_LEN_MAX		4095
217 #define CMD0_FIFO_DEV_ADDR(a)		((a) << 1)
218 #define CMD0_FIFO_RNW			BIT(0)
219 
220 #define CMD1_FIFO			0x64
221 #define CMD1_FIFO_CMDID(id)		((id) << 24)
222 #define CMD1_FIFO_CSRADDR(a)		(a)
223 #define CMD1_FIFO_CCC(id)		(id)
224 
225 #define TX_FIFO				0x68
226 
227 #define IMD_CMD0			0x70
228 #define IMD_CMD0_PL_LEN(l)		((l) << 12)
229 #define IMD_CMD0_DEV_ADDR(a)		((a) << 1)
230 #define IMD_CMD0_RNW			BIT(0)
231 
232 #define IMD_CMD1			0x74
233 #define IMD_CMD1_CCC(id)		(id)
234 
235 #define IMD_DATA			0x78
236 #define RX_FIFO				0x80
237 #define IBI_DATA_FIFO			0x84
238 #define SLV_DDR_TX_FIFO			0x88
239 #define SLV_DDR_RX_FIFO			0x8c
240 
241 #define CMD_IBI_THR_CTRL		0x90
242 #define IBIR_THR(t)			((t) << 24)
243 #define CMDR_THR(t)			((t) << 16)
244 #define IBI_THR(t)			((t) << 8)
245 #define CMD_THR(t)			(t)
246 
247 #define TX_RX_THR_CTRL			0x94
248 #define RX_THR(t)			((t) << 16)
249 #define TX_THR(t)			(t)
250 
251 #define SLV_DDR_TX_RX_THR_CTRL		0x98
252 #define SLV_DDR_RX_THR(t)		((t) << 16)
253 #define SLV_DDR_TX_THR(t)		(t)
254 
255 #define FLUSH_CTRL			0x9c
256 #define FLUSH_IBI_RESP			BIT(23)
257 #define FLUSH_CMD_RESP			BIT(22)
258 #define FLUSH_SLV_DDR_RX_FIFO		BIT(22)
259 #define FLUSH_SLV_DDR_TX_FIFO		BIT(21)
260 #define FLUSH_IMM_FIFO			BIT(20)
261 #define FLUSH_IBI_FIFO			BIT(19)
262 #define FLUSH_RX_FIFO			BIT(18)
263 #define FLUSH_TX_FIFO			BIT(17)
264 #define FLUSH_CMD_FIFO			BIT(16)
265 
266 #define TTO_PRESCL_CTRL0		0xb0
267 #define TTO_PRESCL_CTRL0_DIVB(x)	((x) << 16)
268 #define TTO_PRESCL_CTRL0_DIVA(x)	(x)
269 
270 #define TTO_PRESCL_CTRL1		0xb4
271 #define TTO_PRESCL_CTRL1_DIVB(x)	((x) << 16)
272 #define TTO_PRESCL_CTRL1_DIVA(x)	(x)
273 
274 #define DEVS_CTRL			0xb8
275 #define DEVS_CTRL_DEV_CLR_SHIFT		16
276 #define DEVS_CTRL_DEV_CLR_ALL		GENMASK(31, 16)
277 #define DEVS_CTRL_DEV_CLR(dev)		BIT(16 + (dev))
278 #define DEVS_CTRL_DEV_ACTIVE(dev)	BIT(dev)
279 #define DEVS_CTRL_DEVS_ACTIVE_MASK	GENMASK(15, 0)
280 #define MAX_DEVS			16
281 
282 #define DEV_ID_RR0(d)			(0xc0 + ((d) * 0x10))
283 #define DEV_ID_RR0_LVR_EXT_ADDR		BIT(11)
284 #define DEV_ID_RR0_HDR_CAP		BIT(10)
285 #define DEV_ID_RR0_IS_I3C		BIT(9)
286 #define DEV_ID_RR0_DEV_ADDR_MASK	(GENMASK(6, 0) | GENMASK(15, 13))
287 #define DEV_ID_RR0_SET_DEV_ADDR(a)	(((a) & GENMASK(6, 0)) |	\
288 					 (((a) & GENMASK(9, 7)) << 6))
289 #define DEV_ID_RR0_GET_DEV_ADDR(x)	((((x) >> 1) & GENMASK(6, 0)) |	\
290 					 (((x) >> 6) & GENMASK(9, 7)))
291 
292 #define DEV_ID_RR1(d)			(0xc4 + ((d) * 0x10))
293 #define DEV_ID_RR1_PID_MSB(pid)		(pid)
294 
295 #define DEV_ID_RR2(d)			(0xc8 + ((d) * 0x10))
296 #define DEV_ID_RR2_PID_LSB(pid)		((pid) << 16)
297 #define DEV_ID_RR2_BCR(bcr)		((bcr) << 8)
298 #define DEV_ID_RR2_DCR(dcr)		(dcr)
299 #define DEV_ID_RR2_LVR(lvr)		(lvr)
300 
301 #define SIR_MAP(x)			(0x180 + ((x) * 4))
302 #define SIR_MAP_DEV_REG(d)		SIR_MAP((d) / 2)
303 #define SIR_MAP_DEV_SHIFT(d, fs)	((fs) + (((d) % 2) ? 16 : 0))
304 #define SIR_MAP_DEV_CONF_MASK(d)	(GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
305 #define SIR_MAP_DEV_CONF(d, c)		((c) << (((d) % 2) ? 16 : 0))
306 #define DEV_ROLE_SLAVE			0
307 #define DEV_ROLE_MASTER			1
308 #define SIR_MAP_DEV_ROLE(role)		((role) << 14)
309 #define SIR_MAP_DEV_SLOW		BIT(13)
310 #define SIR_MAP_DEV_PL(l)		((l) << 8)
311 #define SIR_MAP_PL_MAX			GENMASK(4, 0)
312 #define SIR_MAP_DEV_DA(a)		((a) << 1)
313 #define SIR_MAP_DEV_ACK			BIT(0)
314 
315 #define GPIR_WORD(x)			(0x200 + ((x) * 4))
316 #define GPI_REG(val, id)		\
317 	(((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
318 
319 #define GPOR_WORD(x)			(0x220 + ((x) * 4))
320 #define GPO_REG(val, id)		\
321 	(((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
322 
323 #define ASF_INT_STATUS			0x300
324 #define ASF_INT_RAW_STATUS		0x304
325 #define ASF_INT_MASK			0x308
326 #define ASF_INT_TEST			0x30c
327 #define ASF_INT_FATAL_SELECT		0x310
328 #define ASF_INTEGRITY_ERR		BIT(6)
329 #define ASF_PROTOCOL_ERR		BIT(5)
330 #define ASF_TRANS_TIMEOUT_ERR		BIT(4)
331 #define ASF_CSR_ERR			BIT(3)
332 #define ASF_DAP_ERR			BIT(2)
333 #define ASF_SRAM_UNCORR_ERR		BIT(1)
334 #define ASF_SRAM_CORR_ERR		BIT(0)
335 
336 #define ASF_SRAM_CORR_FAULT_STATUS	0x320
337 #define ASF_SRAM_UNCORR_FAULT_STATUS	0x324
338 #define ASF_SRAM_CORR_FAULT_INSTANCE(x)	((x) >> 24)
339 #define ASF_SRAM_CORR_FAULT_ADDR(x)	((x) & GENMASK(23, 0))
340 
341 #define ASF_SRAM_FAULT_STATS		0x328
342 #define ASF_SRAM_FAULT_UNCORR_STATS(x)	((x) >> 16)
343 #define ASF_SRAM_FAULT_CORR_STATS(x)	((x) & GENMASK(15, 0))
344 
345 #define ASF_TRANS_TOUT_CTRL		0x330
346 #define ASF_TRANS_TOUT_EN		BIT(31)
347 #define ASF_TRANS_TOUT_VAL(x)	(x)
348 
349 #define ASF_TRANS_TOUT_FAULT_MASK	0x334
350 #define ASF_TRANS_TOUT_FAULT_STATUS	0x338
351 #define ASF_TRANS_TOUT_FAULT_APB	BIT(3)
352 #define ASF_TRANS_TOUT_FAULT_SCL_LOW	BIT(2)
353 #define ASF_TRANS_TOUT_FAULT_SCL_HIGH	BIT(1)
354 #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH	BIT(0)
355 
356 #define ASF_PROTO_FAULT_MASK		0x340
357 #define ASF_PROTO_FAULT_STATUS		0x344
358 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT	BIT(31)
359 #define ASF_PROTO_FAULT_SLVDDR_FAIL	BIT(30)
360 #define ASF_PROTO_FAULT_S(x)		BIT(16 + (x))
361 #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT	BIT(15)
362 #define ASF_PROTO_FAULT_MSTDDR_FAIL	BIT(14)
363 #define ASF_PROTO_FAULT_M(x)		BIT(x)
364 
365 struct cdns_i3c_master_caps {
366 	u32 cmdfifodepth;
367 	u32 cmdrfifodepth;
368 	u32 txfifodepth;
369 	u32 rxfifodepth;
370 	u32 ibirfifodepth;
371 };
372 
373 struct cdns_i3c_cmd {
374 	u32 cmd0;
375 	u32 cmd1;
376 	u32 tx_len;
377 	const void *tx_buf;
378 	u32 rx_len;
379 	void *rx_buf;
380 	u32 error;
381 };
382 
383 struct cdns_i3c_xfer {
384 	struct list_head node;
385 	struct completion comp;
386 	int ret;
387 	unsigned int ncmds;
388 	struct cdns_i3c_cmd cmds[0];
389 };
390 
391 struct cdns_i3c_master {
392 	struct work_struct hj_work;
393 	struct i3c_master_controller base;
394 	u32 free_rr_slots;
395 	unsigned int maxdevs;
396 	struct {
397 		unsigned int num_slots;
398 		struct i3c_dev_desc **slots;
399 		spinlock_t lock;
400 	} ibi;
401 	struct {
402 		struct list_head list;
403 		struct cdns_i3c_xfer *cur;
404 		spinlock_t lock;
405 	} xferqueue;
406 	void __iomem *regs;
407 	struct clk *sysclk;
408 	struct clk *pclk;
409 	struct cdns_i3c_master_caps caps;
410 	unsigned long i3c_scl_lim;
411 };
412 
413 static inline struct cdns_i3c_master *
414 to_cdns_i3c_master(struct i3c_master_controller *master)
415 {
416 	return container_of(master, struct cdns_i3c_master, base);
417 }
418 
419 static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
420 					  const u8 *bytes, int nbytes)
421 {
422 	writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
423 	if (nbytes & 3) {
424 		u32 tmp = 0;
425 
426 		memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
427 		writesl(master->regs + TX_FIFO, &tmp, 1);
428 	}
429 }
430 
431 static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
432 					    u8 *bytes, int nbytes)
433 {
434 	readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
435 	if (nbytes & 3) {
436 		u32 tmp;
437 
438 		readsl(master->regs + RX_FIFO, &tmp, 1);
439 		memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
440 	}
441 }
442 
443 static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
444 					     const struct i3c_ccc_cmd *cmd)
445 {
446 	if (cmd->ndests > 1)
447 		return false;
448 
449 	switch (cmd->id) {
450 	case I3C_CCC_ENEC(true):
451 	case I3C_CCC_ENEC(false):
452 	case I3C_CCC_DISEC(true):
453 	case I3C_CCC_DISEC(false):
454 	case I3C_CCC_ENTAS(0, true):
455 	case I3C_CCC_ENTAS(0, false):
456 	case I3C_CCC_RSTDAA(true):
457 	case I3C_CCC_RSTDAA(false):
458 	case I3C_CCC_ENTDAA:
459 	case I3C_CCC_SETMWL(true):
460 	case I3C_CCC_SETMWL(false):
461 	case I3C_CCC_SETMRL(true):
462 	case I3C_CCC_SETMRL(false):
463 	case I3C_CCC_DEFSLVS:
464 	case I3C_CCC_ENTHDR(0):
465 	case I3C_CCC_SETDASA:
466 	case I3C_CCC_SETNEWDA:
467 	case I3C_CCC_GETMWL:
468 	case I3C_CCC_GETMRL:
469 	case I3C_CCC_GETPID:
470 	case I3C_CCC_GETBCR:
471 	case I3C_CCC_GETDCR:
472 	case I3C_CCC_GETSTATUS:
473 	case I3C_CCC_GETACCMST:
474 	case I3C_CCC_GETMXDS:
475 	case I3C_CCC_GETHDRCAP:
476 		return true;
477 	default:
478 		break;
479 	}
480 
481 	return false;
482 }
483 
484 static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
485 {
486 	u32 status;
487 
488 	writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
489 
490 	return readl_poll_timeout(master->regs + MST_STATUS0, status,
491 				  status & MST_STATUS0_IDLE, 10, 1000000);
492 }
493 
494 static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
495 {
496 	writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
497 }
498 
499 static struct cdns_i3c_xfer *
500 cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
501 {
502 	struct cdns_i3c_xfer *xfer;
503 
504 	xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
505 	if (!xfer)
506 		return NULL;
507 
508 	INIT_LIST_HEAD(&xfer->node);
509 	xfer->ncmds = ncmds;
510 	xfer->ret = -ETIMEDOUT;
511 
512 	return xfer;
513 }
514 
515 static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
516 {
517 	kfree(xfer);
518 }
519 
520 static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
521 {
522 	struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
523 	unsigned int i;
524 
525 	if (!xfer)
526 		return;
527 
528 	writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
529 	for (i = 0; i < xfer->ncmds; i++) {
530 		struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
531 
532 		cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
533 					      cmd->tx_len);
534 	}
535 
536 	for (i = 0; i < xfer->ncmds; i++) {
537 		struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
538 
539 		writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
540 		       master->regs + CMD1_FIFO);
541 		writel(cmd->cmd0, master->regs + CMD0_FIFO);
542 	}
543 
544 	writel(readl(master->regs + CTRL) | CTRL_MCS,
545 	       master->regs + CTRL);
546 	writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
547 }
548 
549 static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
550 					    u32 isr)
551 {
552 	struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
553 	int i, ret = 0;
554 	u32 status0;
555 
556 	if (!xfer)
557 		return;
558 
559 	if (!(isr & MST_INT_CMDD_EMP))
560 		return;
561 
562 	writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
563 
564 	for (status0 = readl(master->regs + MST_STATUS0);
565 	     !(status0 & MST_STATUS0_CMDR_EMP);
566 	     status0 = readl(master->regs + MST_STATUS0)) {
567 		struct cdns_i3c_cmd *cmd;
568 		u32 cmdr, rx_len, id;
569 
570 		cmdr = readl(master->regs + CMDR);
571 		id = CMDR_CMDID(cmdr);
572 		if (id == CMDR_CMDID_HJACK_DISEC ||
573 		    id == CMDR_CMDID_HJACK_ENTDAA ||
574 		    WARN_ON(id >= xfer->ncmds))
575 			continue;
576 
577 		cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
578 		rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
579 		cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
580 		cmd->error = CMDR_ERROR(cmdr);
581 	}
582 
583 	for (i = 0; i < xfer->ncmds; i++) {
584 		switch (xfer->cmds[i].error) {
585 		case CMDR_NO_ERROR:
586 			break;
587 
588 		case CMDR_DDR_PREAMBLE_ERROR:
589 		case CMDR_DDR_PARITY_ERROR:
590 		case CMDR_M0_ERROR:
591 		case CMDR_M1_ERROR:
592 		case CMDR_M2_ERROR:
593 		case CMDR_MST_ABORT:
594 		case CMDR_NACK_RESP:
595 		case CMDR_DDR_DROPPED:
596 			ret = -EIO;
597 			break;
598 
599 		case CMDR_DDR_RX_FIFO_OVF:
600 		case CMDR_DDR_TX_FIFO_UNF:
601 			ret = -ENOSPC;
602 			break;
603 
604 		case CMDR_INVALID_DA:
605 		default:
606 			ret = -EINVAL;
607 			break;
608 		}
609 	}
610 
611 	xfer->ret = ret;
612 	complete(&xfer->comp);
613 
614 	xfer = list_first_entry_or_null(&master->xferqueue.list,
615 					struct cdns_i3c_xfer, node);
616 	if (xfer)
617 		list_del_init(&xfer->node);
618 
619 	master->xferqueue.cur = xfer;
620 	cdns_i3c_master_start_xfer_locked(master);
621 }
622 
623 static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
624 				       struct cdns_i3c_xfer *xfer)
625 {
626 	unsigned long flags;
627 
628 	init_completion(&xfer->comp);
629 	spin_lock_irqsave(&master->xferqueue.lock, flags);
630 	if (master->xferqueue.cur) {
631 		list_add_tail(&xfer->node, &master->xferqueue.list);
632 	} else {
633 		master->xferqueue.cur = xfer;
634 		cdns_i3c_master_start_xfer_locked(master);
635 	}
636 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
637 }
638 
639 static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
640 					 struct cdns_i3c_xfer *xfer)
641 {
642 	unsigned long flags;
643 
644 	spin_lock_irqsave(&master->xferqueue.lock, flags);
645 	if (master->xferqueue.cur == xfer) {
646 		u32 status;
647 
648 		writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
649 		       master->regs + CTRL);
650 		readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
651 					  status & MST_STATUS0_IDLE, 10,
652 					  1000000);
653 		master->xferqueue.cur = NULL;
654 		writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
655 		       FLUSH_CMD_RESP,
656 		       master->regs + FLUSH_CTRL);
657 		writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
658 		writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
659 		       master->regs + CTRL);
660 	} else {
661 		list_del_init(&xfer->node);
662 	}
663 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
664 }
665 
666 static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
667 {
668 	switch (cmd->error) {
669 	case CMDR_M0_ERROR:
670 		return I3C_ERROR_M0;
671 
672 	case CMDR_M1_ERROR:
673 		return I3C_ERROR_M1;
674 
675 	case CMDR_M2_ERROR:
676 	case CMDR_NACK_RESP:
677 		return I3C_ERROR_M2;
678 
679 	default:
680 		break;
681 	}
682 
683 	return I3C_ERROR_UNKNOWN;
684 }
685 
686 static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
687 					struct i3c_ccc_cmd *cmd)
688 {
689 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
690 	struct cdns_i3c_xfer *xfer;
691 	struct cdns_i3c_cmd *ccmd;
692 	int ret;
693 
694 	xfer = cdns_i3c_master_alloc_xfer(master, 1);
695 	if (!xfer)
696 		return -ENOMEM;
697 
698 	ccmd = xfer->cmds;
699 	ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
700 	ccmd->cmd0 = CMD0_FIFO_IS_CCC |
701 		     CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
702 
703 	if (cmd->id & I3C_CCC_DIRECT)
704 		ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
705 
706 	if (cmd->rnw) {
707 		ccmd->cmd0 |= CMD0_FIFO_RNW;
708 		ccmd->rx_buf = cmd->dests[0].payload.data;
709 		ccmd->rx_len = cmd->dests[0].payload.len;
710 	} else {
711 		ccmd->tx_buf = cmd->dests[0].payload.data;
712 		ccmd->tx_len = cmd->dests[0].payload.len;
713 	}
714 
715 	cdns_i3c_master_queue_xfer(master, xfer);
716 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
717 		cdns_i3c_master_unqueue_xfer(master, xfer);
718 
719 	ret = xfer->ret;
720 	cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
721 	cdns_i3c_master_free_xfer(xfer);
722 
723 	return ret;
724 }
725 
726 static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
727 				      struct i3c_priv_xfer *xfers,
728 				      int nxfers)
729 {
730 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
731 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
732 	int txslots = 0, rxslots = 0, i, ret;
733 	struct cdns_i3c_xfer *cdns_xfer;
734 
735 	for (i = 0; i < nxfers; i++) {
736 		if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
737 			return -ENOTSUPP;
738 	}
739 
740 	if (!nxfers)
741 		return 0;
742 
743 	if (nxfers > master->caps.cmdfifodepth ||
744 	    nxfers > master->caps.cmdrfifodepth)
745 		return -ENOTSUPP;
746 
747 	/*
748 	 * First make sure that all transactions (block of transfers separated
749 	 * by a STOP marker) fit in the FIFOs.
750 	 */
751 	for (i = 0; i < nxfers; i++) {
752 		if (xfers[i].rnw)
753 			rxslots += DIV_ROUND_UP(xfers[i].len, 4);
754 		else
755 			txslots += DIV_ROUND_UP(xfers[i].len, 4);
756 	}
757 
758 	if (rxslots > master->caps.rxfifodepth ||
759 	    txslots > master->caps.txfifodepth)
760 		return -ENOTSUPP;
761 
762 	cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
763 	if (!cdns_xfer)
764 		return -ENOMEM;
765 
766 	for (i = 0; i < nxfers; i++) {
767 		struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
768 		u32 pl_len = xfers[i].len;
769 
770 		ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
771 			CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
772 
773 		if (xfers[i].rnw) {
774 			ccmd->cmd0 |= CMD0_FIFO_RNW;
775 			ccmd->rx_buf = xfers[i].data.in;
776 			ccmd->rx_len = xfers[i].len;
777 			pl_len++;
778 		} else {
779 			ccmd->tx_buf = xfers[i].data.out;
780 			ccmd->tx_len = xfers[i].len;
781 		}
782 
783 		ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
784 
785 		if (i < nxfers - 1)
786 			ccmd->cmd0 |= CMD0_FIFO_RSBC;
787 
788 		if (!i)
789 			ccmd->cmd0 |= CMD0_FIFO_BCH;
790 	}
791 
792 	cdns_i3c_master_queue_xfer(master, cdns_xfer);
793 	if (!wait_for_completion_timeout(&cdns_xfer->comp,
794 					 msecs_to_jiffies(1000)))
795 		cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
796 
797 	ret = cdns_xfer->ret;
798 
799 	for (i = 0; i < nxfers; i++)
800 		xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
801 
802 	cdns_i3c_master_free_xfer(cdns_xfer);
803 
804 	return ret;
805 }
806 
807 static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
808 				     const struct i2c_msg *xfers, int nxfers)
809 {
810 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
811 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
812 	unsigned int nrxwords = 0, ntxwords = 0;
813 	struct cdns_i3c_xfer *xfer;
814 	int i, ret = 0;
815 
816 	if (nxfers > master->caps.cmdfifodepth)
817 		return -ENOTSUPP;
818 
819 	for (i = 0; i < nxfers; i++) {
820 		if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
821 			return -ENOTSUPP;
822 
823 		if (xfers[i].flags & I2C_M_RD)
824 			nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
825 		else
826 			ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
827 	}
828 
829 	if (ntxwords > master->caps.txfifodepth ||
830 	    nrxwords > master->caps.rxfifodepth)
831 		return -ENOTSUPP;
832 
833 	xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
834 	if (!xfer)
835 		return -ENOMEM;
836 
837 	for (i = 0; i < nxfers; i++) {
838 		struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
839 
840 		ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
841 			CMD0_FIFO_PL_LEN(xfers[i].len) |
842 			CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
843 
844 		if (xfers[i].flags & I2C_M_TEN)
845 			ccmd->cmd0 |= CMD0_FIFO_IS_10B;
846 
847 		if (xfers[i].flags & I2C_M_RD) {
848 			ccmd->cmd0 |= CMD0_FIFO_RNW;
849 			ccmd->rx_buf = xfers[i].buf;
850 			ccmd->rx_len = xfers[i].len;
851 		} else {
852 			ccmd->tx_buf = xfers[i].buf;
853 			ccmd->tx_len = xfers[i].len;
854 		}
855 	}
856 
857 	cdns_i3c_master_queue_xfer(master, xfer);
858 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
859 		cdns_i3c_master_unqueue_xfer(master, xfer);
860 
861 	ret = xfer->ret;
862 	cdns_i3c_master_free_xfer(xfer);
863 
864 	return ret;
865 }
866 
867 struct cdns_i3c_i2c_dev_data {
868 	u16 id;
869 	s16 ibi;
870 	struct i3c_generic_ibi_pool *ibi_pool;
871 };
872 
873 static u32 prepare_rr0_dev_address(u32 addr)
874 {
875 	u32 ret = (addr << 1) & 0xff;
876 
877 	/* RR0[7:1] = addr[6:0] */
878 	ret |= (addr & GENMASK(6, 0)) << 1;
879 
880 	/* RR0[15:13] = addr[9:7] */
881 	ret |= (addr & GENMASK(9, 7)) << 6;
882 
883 	/* RR0[0] = ~XOR(addr[6:0]) */
884 	if (!(hweight8(addr & 0x7f) & 1))
885 		ret |= 1;
886 
887 	return ret;
888 }
889 
890 static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
891 {
892 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
893 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
894 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
895 	u32 rr;
896 
897 	rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
898 				     dev->info.dyn_addr :
899 				     dev->info.static_addr);
900 	writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
901 }
902 
903 static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
904 				       u8 dyn_addr)
905 {
906 	u32 activedevs, rr;
907 	int i;
908 
909 	if (!dyn_addr) {
910 		if (!master->free_rr_slots)
911 			return -ENOSPC;
912 
913 		return ffs(master->free_rr_slots) - 1;
914 	}
915 
916 	activedevs = readl(master->regs + DEVS_CTRL) &
917 		     DEVS_CTRL_DEVS_ACTIVE_MASK;
918 
919 	for (i = 1; i <= master->maxdevs; i++) {
920 		if (!(BIT(i) & activedevs))
921 			continue;
922 
923 		rr = readl(master->regs + DEV_ID_RR0(i));
924 		if (!(rr & DEV_ID_RR0_IS_I3C) ||
925 		    DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
926 			continue;
927 
928 		return i;
929 	}
930 
931 	return -EINVAL;
932 }
933 
934 static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
935 					    u8 old_dyn_addr)
936 {
937 	cdns_i3c_master_upd_i3c_addr(dev);
938 
939 	return 0;
940 }
941 
942 static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
943 {
944 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
945 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
946 	struct cdns_i3c_i2c_dev_data *data;
947 	int slot;
948 
949 	data = kzalloc(sizeof(*data), GFP_KERNEL);
950 	if (!data)
951 		return -ENOMEM;
952 
953 	slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
954 	if (slot < 0) {
955 		kfree(data);
956 		return slot;
957 	}
958 
959 	data->ibi = -1;
960 	data->id = slot;
961 	i3c_dev_set_master_data(dev, data);
962 	master->free_rr_slots &= ~BIT(slot);
963 
964 	if (!dev->info.dyn_addr) {
965 		cdns_i3c_master_upd_i3c_addr(dev);
966 		writel(readl(master->regs + DEVS_CTRL) |
967 		       DEVS_CTRL_DEV_ACTIVE(data->id),
968 		       master->regs + DEVS_CTRL);
969 	}
970 
971 	return 0;
972 }
973 
974 static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
975 {
976 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
977 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
978 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
979 
980 	writel(readl(master->regs + DEVS_CTRL) |
981 	       DEVS_CTRL_DEV_CLR(data->id),
982 	       master->regs + DEVS_CTRL);
983 
984 	i3c_dev_set_master_data(dev, NULL);
985 	master->free_rr_slots |= BIT(data->id);
986 	kfree(data);
987 }
988 
989 static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
990 {
991 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
992 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
993 	struct cdns_i3c_i2c_dev_data *data;
994 	int slot;
995 
996 	slot = cdns_i3c_master_get_rr_slot(master, 0);
997 	if (slot < 0)
998 		return slot;
999 
1000 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1001 	if (!data)
1002 		return -ENOMEM;
1003 
1004 	data->id = slot;
1005 	master->free_rr_slots &= ~BIT(slot);
1006 	i2c_dev_set_master_data(dev, data);
1007 
1008 	writel(prepare_rr0_dev_address(dev->boardinfo->base.addr),
1009 	       master->regs + DEV_ID_RR0(data->id));
1010 	writel(dev->boardinfo->lvr, master->regs + DEV_ID_RR2(data->id));
1011 	writel(readl(master->regs + DEVS_CTRL) |
1012 	       DEVS_CTRL_DEV_ACTIVE(data->id),
1013 	       master->regs + DEVS_CTRL);
1014 
1015 	return 0;
1016 }
1017 
1018 static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
1019 {
1020 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
1021 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1022 	struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1023 
1024 	writel(readl(master->regs + DEVS_CTRL) |
1025 	       DEVS_CTRL_DEV_CLR(data->id),
1026 	       master->regs + DEVS_CTRL);
1027 	master->free_rr_slots |= BIT(data->id);
1028 
1029 	i2c_dev_set_master_data(dev, NULL);
1030 	kfree(data);
1031 }
1032 
1033 static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
1034 {
1035 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1036 
1037 	cdns_i3c_master_disable(master);
1038 }
1039 
1040 static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
1041 					   unsigned int slot,
1042 					   struct i3c_device_info *info)
1043 {
1044 	u32 rr;
1045 
1046 	memset(info, 0, sizeof(*info));
1047 	rr = readl(master->regs + DEV_ID_RR0(slot));
1048 	info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
1049 	rr = readl(master->regs + DEV_ID_RR2(slot));
1050 	info->dcr = rr;
1051 	info->bcr = rr >> 8;
1052 	info->pid = rr >> 16;
1053 	info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
1054 }
1055 
1056 static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
1057 {
1058 	struct i3c_master_controller *m = &master->base;
1059 	unsigned long i3c_lim_period, pres_step, ncycles;
1060 	struct i3c_bus *bus = i3c_master_get_bus(m);
1061 	unsigned long new_i3c_scl_lim = 0;
1062 	struct i3c_dev_desc *dev;
1063 	u32 prescl1, ctrl;
1064 
1065 	i3c_bus_for_each_i3cdev(bus, dev) {
1066 		unsigned long max_fscl;
1067 
1068 		max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
1069 			       I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
1070 		switch (max_fscl) {
1071 		case I3C_SDR1_FSCL_8MHZ:
1072 			max_fscl = 8000000;
1073 			break;
1074 		case I3C_SDR2_FSCL_6MHZ:
1075 			max_fscl = 6000000;
1076 			break;
1077 		case I3C_SDR3_FSCL_4MHZ:
1078 			max_fscl = 4000000;
1079 			break;
1080 		case I3C_SDR4_FSCL_2MHZ:
1081 			max_fscl = 2000000;
1082 			break;
1083 		case I3C_SDR0_FSCL_MAX:
1084 		default:
1085 			max_fscl = 0;
1086 			break;
1087 		}
1088 
1089 		if (max_fscl &&
1090 		    (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
1091 			new_i3c_scl_lim = max_fscl;
1092 	}
1093 
1094 	/* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
1095 	if (new_i3c_scl_lim == master->i3c_scl_lim)
1096 		return;
1097 	master->i3c_scl_lim = new_i3c_scl_lim;
1098 	if (!new_i3c_scl_lim)
1099 		return;
1100 	pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
1101 
1102 	/* Configure PP_LOW to meet I3C slave limitations. */
1103 	prescl1 = readl(master->regs + PRESCL_CTRL1) &
1104 		  ~PRESCL_CTRL1_PP_LOW_MASK;
1105 	ctrl = readl(master->regs + CTRL);
1106 
1107 	i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
1108 	ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
1109 	if (ncycles < 4)
1110 		ncycles = 0;
1111 	else
1112 		ncycles -= 4;
1113 
1114 	prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
1115 
1116 	/* Disable I3C master before updating PRESCL_CTRL1. */
1117 	if (ctrl & CTRL_DEV_EN)
1118 		cdns_i3c_master_disable(master);
1119 
1120 	writel(prescl1, master->regs + PRESCL_CTRL1);
1121 
1122 	if (ctrl & CTRL_DEV_EN)
1123 		cdns_i3c_master_enable(master);
1124 }
1125 
1126 static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
1127 {
1128 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1129 	u32 olddevs, newdevs;
1130 	int ret, slot;
1131 	u8 addrs[MAX_DEVS] = { };
1132 	u8 last_addr = 0;
1133 
1134 	olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1135 
1136 	/* Prepare RR slots before launching DAA. */
1137 	for (slot = 1; slot <= master->maxdevs; slot++) {
1138 		if (olddevs & BIT(slot))
1139 			continue;
1140 
1141 		ret = i3c_master_get_free_addr(m, last_addr + 1);
1142 		if (ret < 0)
1143 			return -ENOSPC;
1144 
1145 		last_addr = ret;
1146 		addrs[slot] = last_addr;
1147 		writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
1148 		       master->regs + DEV_ID_RR0(slot));
1149 		writel(0, master->regs + DEV_ID_RR1(slot));
1150 		writel(0, master->regs + DEV_ID_RR2(slot));
1151 	}
1152 
1153 	ret = i3c_master_entdaa_locked(&master->base);
1154 	if (ret && ret != I3C_ERROR_M2)
1155 		return ret;
1156 
1157 	newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1158 	newdevs &= ~olddevs;
1159 
1160 	/*
1161 	 * Clear all retaining registers filled during DAA. We already
1162 	 * have the addressed assigned to them in the addrs array.
1163 	 */
1164 	for (slot = 1; slot <= master->maxdevs; slot++) {
1165 		if (newdevs & BIT(slot))
1166 			i3c_master_add_i3c_dev_locked(m, addrs[slot]);
1167 	}
1168 
1169 	/*
1170 	 * Clear slots that ended up not being used. Can be caused by I3C
1171 	 * device creation failure or when the I3C device was already known
1172 	 * by the system but with a different address (in this case the device
1173 	 * already has a slot and does not need a new one).
1174 	 */
1175 	writel(readl(master->regs + DEVS_CTRL) |
1176 	       master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
1177 	       master->regs + DEVS_CTRL);
1178 
1179 	i3c_master_defslvs_locked(&master->base);
1180 
1181 	cdns_i3c_master_upd_i3c_scl_lim(master);
1182 
1183 	/* Unmask Hot-Join and Mastership request interrupts. */
1184 	i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
1185 			       I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
1186 
1187 	return 0;
1188 }
1189 
1190 static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
1191 {
1192 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1193 	unsigned long pres_step, sysclk_rate, max_i2cfreq;
1194 	struct i3c_bus *bus = i3c_master_get_bus(m);
1195 	u32 ctrl, prescl0, prescl1, pres, low;
1196 	struct i3c_device_info info = { };
1197 	int ret, ncycles;
1198 
1199 	switch (bus->mode) {
1200 	case I3C_BUS_MODE_PURE:
1201 		ctrl = CTRL_PURE_BUS_MODE;
1202 		break;
1203 
1204 	case I3C_BUS_MODE_MIXED_FAST:
1205 		ctrl = CTRL_MIXED_FAST_BUS_MODE;
1206 		break;
1207 
1208 	case I3C_BUS_MODE_MIXED_SLOW:
1209 		ctrl = CTRL_MIXED_SLOW_BUS_MODE;
1210 		break;
1211 
1212 	default:
1213 		return -EINVAL;
1214 	}
1215 
1216 	sysclk_rate = clk_get_rate(master->sysclk);
1217 	if (!sysclk_rate)
1218 		return -EINVAL;
1219 
1220 	pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
1221 	if (pres > PRESCL_CTRL0_MAX)
1222 		return -ERANGE;
1223 
1224 	bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
1225 
1226 	prescl0 = PRESCL_CTRL0_I3C(pres);
1227 
1228 	low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
1229 	prescl1 = PRESCL_CTRL1_OD_LOW(low);
1230 
1231 	max_i2cfreq = bus->scl_rate.i2c;
1232 
1233 	pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
1234 	if (pres > PRESCL_CTRL0_MAX)
1235 		return -ERANGE;
1236 
1237 	bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
1238 
1239 	prescl0 |= PRESCL_CTRL0_I2C(pres);
1240 	writel(prescl0, master->regs + PRESCL_CTRL0);
1241 
1242 	/* Calculate OD and PP low. */
1243 	pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
1244 	ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
1245 	if (ncycles < 0)
1246 		ncycles = 0;
1247 	prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
1248 	writel(prescl1, master->regs + PRESCL_CTRL1);
1249 
1250 	/* Get an address for the master. */
1251 	ret = i3c_master_get_free_addr(m, 0);
1252 	if (ret < 0)
1253 		return ret;
1254 
1255 	writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
1256 	       master->regs + DEV_ID_RR0(0));
1257 
1258 	cdns_i3c_master_dev_rr_to_info(master, 0, &info);
1259 	if (info.bcr & I3C_BCR_HDR_CAP)
1260 		info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
1261 
1262 	ret = i3c_master_set_info(&master->base, &info);
1263 	if (ret)
1264 		return ret;
1265 
1266 	/*
1267 	 * Enable Hot-Join, and, when a Hot-Join request happens, disable all
1268 	 * events coming from this device.
1269 	 *
1270 	 * We will issue ENTDAA afterwards from the threaded IRQ handler.
1271 	 */
1272 	ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
1273 	writel(ctrl, master->regs + CTRL);
1274 
1275 	cdns_i3c_master_enable(master);
1276 
1277 	return 0;
1278 }
1279 
1280 static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
1281 				       u32 ibir)
1282 {
1283 	struct cdns_i3c_i2c_dev_data *data;
1284 	bool data_consumed = false;
1285 	struct i3c_ibi_slot *slot;
1286 	u32 id = IBIR_SLVID(ibir);
1287 	struct i3c_dev_desc *dev;
1288 	size_t nbytes;
1289 	u8 *buf;
1290 
1291 	/*
1292 	 * FIXME: maybe we should report the FIFO OVF errors to the upper
1293 	 * layer.
1294 	 */
1295 	if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
1296 		goto out;
1297 
1298 	dev = master->ibi.slots[id];
1299 	spin_lock(&master->ibi.lock);
1300 
1301 	data = i3c_dev_get_master_data(dev);
1302 	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
1303 	if (!slot)
1304 		goto out_unlock;
1305 
1306 	buf = slot->data;
1307 
1308 	nbytes = IBIR_XFER_BYTES(ibir);
1309 	readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
1310 	if (nbytes % 3) {
1311 		u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
1312 
1313 		memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
1314 	}
1315 
1316 	slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
1317 			  dev->ibi->max_payload_len);
1318 	i3c_master_queue_ibi(dev, slot);
1319 	data_consumed = true;
1320 
1321 out_unlock:
1322 	spin_unlock(&master->ibi.lock);
1323 
1324 out:
1325 	/* Consume data from the FIFO if it's not been done already. */
1326 	if (!data_consumed) {
1327 		int i;
1328 
1329 		for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
1330 			readl(master->regs + IBI_DATA_FIFO);
1331 	}
1332 }
1333 
1334 static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
1335 {
1336 	u32 status0;
1337 
1338 	writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
1339 
1340 	for (status0 = readl(master->regs + MST_STATUS0);
1341 	     !(status0 & MST_STATUS0_IBIR_EMP);
1342 	     status0 = readl(master->regs + MST_STATUS0)) {
1343 		u32 ibir = readl(master->regs + IBIR);
1344 
1345 		switch (IBIR_TYPE(ibir)) {
1346 		case IBIR_TYPE_IBI:
1347 			cdns_i3c_master_handle_ibi(master, ibir);
1348 			break;
1349 
1350 		case IBIR_TYPE_HJ:
1351 			WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1352 			queue_work(master->base.wq, &master->hj_work);
1353 			break;
1354 
1355 		case IBIR_TYPE_MR:
1356 			WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1357 		default:
1358 			break;
1359 		}
1360 	}
1361 }
1362 
1363 static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
1364 {
1365 	struct cdns_i3c_master *master = data;
1366 	u32 status;
1367 
1368 	status = readl(master->regs + MST_ISR);
1369 	if (!(status & readl(master->regs + MST_IMR)))
1370 		return IRQ_NONE;
1371 
1372 	spin_lock(&master->xferqueue.lock);
1373 	cdns_i3c_master_end_xfer_locked(master, status);
1374 	spin_unlock(&master->xferqueue.lock);
1375 
1376 	if (status & MST_INT_IBIR_THR)
1377 		cnds_i3c_master_demux_ibis(master);
1378 
1379 	return IRQ_HANDLED;
1380 }
1381 
1382 static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1383 {
1384 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1385 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1386 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1387 	unsigned long flags;
1388 	u32 sirmap;
1389 	int ret;
1390 
1391 	ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
1392 				      I3C_CCC_EVENT_SIR);
1393 	if (ret)
1394 		return ret;
1395 
1396 	spin_lock_irqsave(&master->ibi.lock, flags);
1397 	sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1398 	sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1399 	sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1400 				   SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1401 	writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1402 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1403 
1404 	return ret;
1405 }
1406 
1407 static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1408 {
1409 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1410 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1411 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1412 	unsigned long flags;
1413 	u32 sircfg, sirmap;
1414 	int ret;
1415 
1416 	spin_lock_irqsave(&master->ibi.lock, flags);
1417 	sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1418 	sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1419 	sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
1420 		 SIR_MAP_DEV_DA(dev->info.dyn_addr) |
1421 		 SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
1422 		 SIR_MAP_DEV_ACK;
1423 
1424 	if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
1425 		sircfg |= SIR_MAP_DEV_SLOW;
1426 
1427 	sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
1428 	writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1429 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1430 
1431 	ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
1432 				     I3C_CCC_EVENT_SIR);
1433 	if (ret) {
1434 		spin_lock_irqsave(&master->ibi.lock, flags);
1435 		sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1436 		sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1437 		sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1438 					   SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1439 		writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1440 		spin_unlock_irqrestore(&master->ibi.lock, flags);
1441 	}
1442 
1443 	return ret;
1444 }
1445 
1446 static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1447 				       const struct i3c_ibi_setup *req)
1448 {
1449 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1450 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1451 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1452 	unsigned long flags;
1453 	unsigned int i;
1454 
1455 	data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1456 	if (IS_ERR(data->ibi_pool))
1457 		return PTR_ERR(data->ibi_pool);
1458 
1459 	spin_lock_irqsave(&master->ibi.lock, flags);
1460 	for (i = 0; i < master->ibi.num_slots; i++) {
1461 		if (!master->ibi.slots[i]) {
1462 			data->ibi = i;
1463 			master->ibi.slots[i] = dev;
1464 			break;
1465 		}
1466 	}
1467 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1468 
1469 	if (i < master->ibi.num_slots)
1470 		return 0;
1471 
1472 	i3c_generic_ibi_free_pool(data->ibi_pool);
1473 	data->ibi_pool = NULL;
1474 
1475 	return -ENOSPC;
1476 }
1477 
1478 static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1479 {
1480 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1481 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1482 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1483 	unsigned long flags;
1484 
1485 	spin_lock_irqsave(&master->ibi.lock, flags);
1486 	master->ibi.slots[data->ibi] = NULL;
1487 	data->ibi = -1;
1488 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1489 
1490 	i3c_generic_ibi_free_pool(data->ibi_pool);
1491 }
1492 
1493 static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1494 					     struct i3c_ibi_slot *slot)
1495 {
1496 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1497 
1498 	i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1499 }
1500 
1501 static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
1502 	.bus_init = cdns_i3c_master_bus_init,
1503 	.bus_cleanup = cdns_i3c_master_bus_cleanup,
1504 	.do_daa = cdns_i3c_master_do_daa,
1505 	.attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
1506 	.reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
1507 	.detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
1508 	.attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
1509 	.detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
1510 	.supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
1511 	.send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
1512 	.priv_xfers = cdns_i3c_master_priv_xfers,
1513 	.i2c_xfers = cdns_i3c_master_i2c_xfers,
1514 	.enable_ibi = cdns_i3c_master_enable_ibi,
1515 	.disable_ibi = cdns_i3c_master_disable_ibi,
1516 	.request_ibi = cdns_i3c_master_request_ibi,
1517 	.free_ibi = cdns_i3c_master_free_ibi,
1518 	.recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
1519 };
1520 
1521 static void cdns_i3c_master_hj(struct work_struct *work)
1522 {
1523 	struct cdns_i3c_master *master = container_of(work,
1524 						      struct cdns_i3c_master,
1525 						      hj_work);
1526 
1527 	i3c_master_do_daa(&master->base);
1528 }
1529 
1530 static int cdns_i3c_master_probe(struct platform_device *pdev)
1531 {
1532 	struct cdns_i3c_master *master;
1533 	struct resource *res;
1534 	int ret, irq;
1535 	u32 val;
1536 
1537 	master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
1538 	if (!master)
1539 		return -ENOMEM;
1540 
1541 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1542 	master->regs = devm_ioremap_resource(&pdev->dev, res);
1543 	if (IS_ERR(master->regs))
1544 		return PTR_ERR(master->regs);
1545 
1546 	master->pclk = devm_clk_get(&pdev->dev, "pclk");
1547 	if (IS_ERR(master->pclk))
1548 		return PTR_ERR(master->pclk);
1549 
1550 	master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
1551 	if (IS_ERR(master->sysclk))
1552 		return PTR_ERR(master->sysclk);
1553 
1554 	irq = platform_get_irq(pdev, 0);
1555 	if (irq < 0)
1556 		return irq;
1557 
1558 	ret = clk_prepare_enable(master->pclk);
1559 	if (ret)
1560 		return ret;
1561 
1562 	ret = clk_prepare_enable(master->sysclk);
1563 	if (ret)
1564 		goto err_disable_pclk;
1565 
1566 	if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
1567 		ret = -EINVAL;
1568 		goto err_disable_sysclk;
1569 	}
1570 
1571 	spin_lock_init(&master->xferqueue.lock);
1572 	INIT_LIST_HEAD(&master->xferqueue.list);
1573 
1574 	INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
1575 	writel(0xffffffff, master->regs + MST_IDR);
1576 	writel(0xffffffff, master->regs + SLV_IDR);
1577 	ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
1578 			       dev_name(&pdev->dev), master);
1579 	if (ret)
1580 		goto err_disable_sysclk;
1581 
1582 	platform_set_drvdata(pdev, master);
1583 
1584 	val = readl(master->regs + CONF_STATUS0);
1585 
1586 	/* Device ID0 is reserved to describe this master. */
1587 	master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
1588 	master->free_rr_slots = GENMASK(master->maxdevs, 1);
1589 
1590 	val = readl(master->regs + CONF_STATUS1);
1591 	master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
1592 	master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
1593 	master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
1594 	master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
1595 	master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
1596 
1597 	spin_lock_init(&master->ibi.lock);
1598 	master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
1599 	master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1600 					 sizeof(*master->ibi.slots),
1601 					 GFP_KERNEL);
1602 	if (!master->ibi.slots)
1603 		goto err_disable_sysclk;
1604 
1605 	writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
1606 	writel(MST_INT_IBIR_THR, master->regs + MST_IER);
1607 	writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
1608 
1609 	ret = i3c_master_register(&master->base, &pdev->dev,
1610 				  &cdns_i3c_master_ops, false);
1611 	if (ret)
1612 		goto err_disable_sysclk;
1613 
1614 	return 0;
1615 
1616 err_disable_sysclk:
1617 	clk_disable_unprepare(master->sysclk);
1618 
1619 err_disable_pclk:
1620 	clk_disable_unprepare(master->pclk);
1621 
1622 	return ret;
1623 }
1624 
1625 static int cdns_i3c_master_remove(struct platform_device *pdev)
1626 {
1627 	struct cdns_i3c_master *master = platform_get_drvdata(pdev);
1628 	int ret;
1629 
1630 	ret = i3c_master_unregister(&master->base);
1631 	if (ret)
1632 		return ret;
1633 
1634 	clk_disable_unprepare(master->sysclk);
1635 	clk_disable_unprepare(master->pclk);
1636 
1637 	return 0;
1638 }
1639 
1640 static const struct of_device_id cdns_i3c_master_of_ids[] = {
1641 	{ .compatible = "cdns,i3c-master" },
1642 	{ /* sentinel */ },
1643 };
1644 
1645 static struct platform_driver cdns_i3c_master = {
1646 	.probe = cdns_i3c_master_probe,
1647 	.remove = cdns_i3c_master_remove,
1648 	.driver = {
1649 		.name = "cdns-i3c-master",
1650 		.of_match_table = cdns_i3c_master_of_ids,
1651 	},
1652 };
1653 module_platform_driver(cdns_i3c_master);
1654 
1655 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
1656 MODULE_DESCRIPTION("Cadence I3C master driver");
1657 MODULE_LICENSE("GPL v2");
1658 MODULE_ALIAS("platform:cdns-i3c-master");
1659