1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Silvaco dual-role I3C master driver
4  *
5  * Copyright (C) 2020 Silvaco
6  * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7  * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG      0x000
26 #define   SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define   SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define   SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define   SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define   SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define   SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define   SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define   SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define   SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define   SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
36 
37 #define SVC_I3C_MCTRL        0x084
38 #define   SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
39 #define   SVC_I3C_MCTRL_REQUEST_NONE 0
40 #define   SVC_I3C_MCTRL_REQUEST_START_ADDR 1
41 #define   SVC_I3C_MCTRL_REQUEST_STOP 2
42 #define   SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
43 #define   SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
44 #define   SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
45 #define   SVC_I3C_MCTRL_TYPE_I3C 0
46 #define   SVC_I3C_MCTRL_TYPE_I2C BIT(4)
47 #define   SVC_I3C_MCTRL_IBIRESP_AUTO 0
48 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
49 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
50 #define   SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
51 #define   SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
52 #define   SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
53 #define   SVC_I3C_MCTRL_DIR_WRITE 0
54 #define   SVC_I3C_MCTRL_DIR_READ 1
55 #define   SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
56 #define   SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
57 
58 #define SVC_I3C_MSTATUS      0x088
59 #define   SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
60 #define   SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
61 #define   SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
62 #define   SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
63 #define   SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
64 #define   SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
65 #define   SVC_I3C_MSTATUS_IBITYPE_IBI 1
66 #define   SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
67 #define   SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
68 #define   SVC_I3C_MINT_SLVSTART BIT(8)
69 #define   SVC_I3C_MINT_MCTRLDONE BIT(9)
70 #define   SVC_I3C_MINT_COMPLETE BIT(10)
71 #define   SVC_I3C_MINT_RXPEND BIT(11)
72 #define   SVC_I3C_MINT_TXNOTFULL BIT(12)
73 #define   SVC_I3C_MINT_IBIWON BIT(13)
74 #define   SVC_I3C_MINT_ERRWARN BIT(15)
75 #define   SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
76 #define   SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
77 #define   SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
78 #define   SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
79 #define   SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
80 #define   SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
81 #define   SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
82 #define   SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
83 
84 #define SVC_I3C_IBIRULES     0x08C
85 #define   SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
86 						       ((addr) & 0x3F) << ((slot) * 6))
87 #define   SVC_I3C_IBIRULES_ADDRS 5
88 #define   SVC_I3C_IBIRULES_MSB0 BIT(30)
89 #define   SVC_I3C_IBIRULES_NOBYTE BIT(31)
90 #define   SVC_I3C_IBIRULES_MANDBYTE 0
91 #define SVC_I3C_MINTSET      0x090
92 #define SVC_I3C_MINTCLR      0x094
93 #define SVC_I3C_MINTMASKED   0x098
94 #define SVC_I3C_MERRWARN     0x09C
95 #define   SVC_I3C_MERRWARN_NACK BIT(2)
96 #define   SVC_I3C_MERRWARN_TIMEOUT BIT(20)
97 #define SVC_I3C_MDMACTRL     0x0A0
98 #define SVC_I3C_MDATACTRL    0x0AC
99 #define   SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
100 #define   SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
101 #define   SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
102 #define   SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
103 #define   SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
104 #define   SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
105 #define   SVC_I3C_MDATACTRL_TXFULL BIT(30)
106 #define   SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
107 
108 #define SVC_I3C_MWDATAB      0x0B0
109 #define   SVC_I3C_MWDATAB_END BIT(8)
110 
111 #define SVC_I3C_MWDATABE     0x0B4
112 #define SVC_I3C_MWDATAH      0x0B8
113 #define SVC_I3C_MWDATAHE     0x0BC
114 #define SVC_I3C_MRDATAB      0x0C0
115 #define SVC_I3C_MRDATAH      0x0C8
116 #define SVC_I3C_MWMSG_SDR    0x0D0
117 #define SVC_I3C_MRMSG_SDR    0x0D4
118 #define SVC_I3C_MWMSG_DDR    0x0D8
119 #define SVC_I3C_MRMSG_DDR    0x0DC
120 
121 #define SVC_I3C_MDYNADDR     0x0E4
122 #define   SVC_MDYNADDR_VALID BIT(0)
123 #define   SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
124 
125 #define SVC_I3C_MAX_DEVS 32
126 #define SVC_I3C_PM_TIMEOUT_MS 1000
127 
128 /* This parameter depends on the implementation and may be tuned */
129 #define SVC_I3C_FIFO_SIZE 16
130 
131 struct svc_i3c_cmd {
132 	u8 addr;
133 	bool rnw;
134 	u8 *in;
135 	const void *out;
136 	unsigned int len;
137 	unsigned int actual_len;
138 	struct i3c_priv_xfer *xfer;
139 	bool continued;
140 };
141 
142 struct svc_i3c_xfer {
143 	struct list_head node;
144 	struct completion comp;
145 	int ret;
146 	unsigned int type;
147 	unsigned int ncmds;
148 	struct svc_i3c_cmd cmds[];
149 };
150 
151 struct svc_i3c_regs_save {
152 	u32 mconfig;
153 	u32 mdynaddr;
154 };
155 
156 /**
157  * struct svc_i3c_master - Silvaco I3C Master structure
158  * @base: I3C master controller
159  * @dev: Corresponding device
160  * @regs: Memory mapping
161  * @saved_regs: Volatile values for PM operations
162  * @free_slots: Bit array of available slots
163  * @addrs: Array containing the dynamic addresses of each attached device
164  * @descs: Array of descriptors, one per attached device
165  * @hj_work: Hot-join work
166  * @ibi_work: IBI work
167  * @irq: Main interrupt
168  * @pclk: System clock
169  * @fclk: Fast clock (bus)
170  * @sclk: Slow clock (other events)
171  * @xferqueue: Transfer queue structure
172  * @xferqueue.list: List member
173  * @xferqueue.cur: Current ongoing transfer
174  * @xferqueue.lock: Queue lock
175  * @ibi: IBI structure
176  * @ibi.num_slots: Number of slots available in @ibi.slots
177  * @ibi.slots: Available IBI slots
178  * @ibi.tbq_slot: To be queued IBI slot
179  * @ibi.lock: IBI lock
180  * @lock: Transfer lock, protect between IBI work thread and callbacks from master
181  */
182 struct svc_i3c_master {
183 	struct i3c_master_controller base;
184 	struct device *dev;
185 	void __iomem *regs;
186 	struct svc_i3c_regs_save saved_regs;
187 	u32 free_slots;
188 	u8 addrs[SVC_I3C_MAX_DEVS];
189 	struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
190 	struct work_struct hj_work;
191 	struct work_struct ibi_work;
192 	int irq;
193 	struct clk *pclk;
194 	struct clk *fclk;
195 	struct clk *sclk;
196 	struct {
197 		struct list_head list;
198 		struct svc_i3c_xfer *cur;
199 		/* Prevent races between transfers */
200 		spinlock_t lock;
201 	} xferqueue;
202 	struct {
203 		unsigned int num_slots;
204 		struct i3c_dev_desc **slots;
205 		struct i3c_ibi_slot *tbq_slot;
206 		/* Prevent races within IBI handlers */
207 		spinlock_t lock;
208 	} ibi;
209 	struct mutex lock;
210 };
211 
212 /**
213  * struct svc_i3c_i2c_dev_data - Device specific data
214  * @index: Index in the master tables corresponding to this device
215  * @ibi: IBI slot index in the master structure
216  * @ibi_pool: IBI pool associated to this device
217  */
218 struct svc_i3c_i2c_dev_data {
219 	u8 index;
220 	int ibi;
221 	struct i3c_generic_ibi_pool *ibi_pool;
222 };
223 
svc_i3c_master_error(struct svc_i3c_master * master)224 static bool svc_i3c_master_error(struct svc_i3c_master *master)
225 {
226 	u32 mstatus, merrwarn;
227 
228 	mstatus = readl(master->regs + SVC_I3C_MSTATUS);
229 	if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
230 		merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
231 		writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
232 
233 		/* Ignore timeout error */
234 		if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
235 			dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
236 				mstatus, merrwarn);
237 			return false;
238 		}
239 
240 		dev_err(master->dev,
241 			"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
242 			mstatus, merrwarn);
243 
244 		return true;
245 	}
246 
247 	return false;
248 }
249 
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)250 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
251 {
252 	writel(mask, master->regs + SVC_I3C_MINTSET);
253 }
254 
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)255 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
256 {
257 	u32 mask = readl(master->regs + SVC_I3C_MINTSET);
258 
259 	writel(mask, master->regs + SVC_I3C_MINTCLR);
260 }
261 
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)262 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
263 {
264 	/* Clear pending warnings */
265 	writel(readl(master->regs + SVC_I3C_MERRWARN),
266 	       master->regs + SVC_I3C_MERRWARN);
267 }
268 
svc_i3c_master_flush_fifo(struct svc_i3c_master * master)269 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
270 {
271 	/* Flush FIFOs */
272 	writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
273 	       master->regs + SVC_I3C_MDATACTRL);
274 }
275 
svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master * master)276 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
277 {
278 	u32 reg;
279 
280 	/* Set RX and TX tigger levels, flush FIFOs */
281 	reg = SVC_I3C_MDATACTRL_FLUSHTB |
282 	      SVC_I3C_MDATACTRL_FLUSHRB |
283 	      SVC_I3C_MDATACTRL_UNLOCK_TRIG |
284 	      SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
285 	      SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
286 	writel(reg, master->regs + SVC_I3C_MDATACTRL);
287 }
288 
svc_i3c_master_reset(struct svc_i3c_master * master)289 static void svc_i3c_master_reset(struct svc_i3c_master *master)
290 {
291 	svc_i3c_master_clear_merrwarn(master);
292 	svc_i3c_master_reset_fifo_trigger(master);
293 	svc_i3c_master_disable_interrupts(master);
294 }
295 
296 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)297 to_svc_i3c_master(struct i3c_master_controller *master)
298 {
299 	return container_of(master, struct svc_i3c_master, base);
300 }
301 
svc_i3c_master_hj_work(struct work_struct * work)302 static void svc_i3c_master_hj_work(struct work_struct *work)
303 {
304 	struct svc_i3c_master *master;
305 
306 	master = container_of(work, struct svc_i3c_master, hj_work);
307 	i3c_master_do_daa(&master->base);
308 }
309 
310 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)311 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
312 			     unsigned int ibiaddr)
313 {
314 	int i;
315 
316 	for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
317 		if (master->addrs[i] == ibiaddr)
318 			break;
319 
320 	if (i == SVC_I3C_MAX_DEVS)
321 		return NULL;
322 
323 	return master->descs[i];
324 }
325 
svc_i3c_master_emit_stop(struct svc_i3c_master * master)326 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
327 {
328 	writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
329 
330 	/*
331 	 * This delay is necessary after the emission of a stop, otherwise eg.
332 	 * repeating IBIs do not get detected. There is a note in the manual
333 	 * about it, stating that the stop condition might not be settled
334 	 * correctly if a start condition follows too rapidly.
335 	 */
336 	udelay(1);
337 }
338 
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)339 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
340 				     struct i3c_dev_desc *dev)
341 {
342 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
343 	struct i3c_ibi_slot *slot;
344 	unsigned int count;
345 	u32 mdatactrl;
346 	int ret, val;
347 	u8 *buf;
348 
349 	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
350 	if (!slot)
351 		return -ENOSPC;
352 
353 	slot->len = 0;
354 	buf = slot->data;
355 
356 	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
357 						SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
358 	if (ret) {
359 		dev_err(master->dev, "Timeout when polling for COMPLETE\n");
360 		return ret;
361 	}
362 
363 	while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS))  &&
364 	       slot->len < SVC_I3C_FIFO_SIZE) {
365 		mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
366 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
367 		readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
368 		slot->len += count;
369 		buf += count;
370 	}
371 
372 	master->ibi.tbq_slot = slot;
373 
374 	return 0;
375 }
376 
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)377 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
378 				   bool mandatory_byte)
379 {
380 	unsigned int ibi_ack_nack;
381 
382 	ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
383 	if (mandatory_byte)
384 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
385 	else
386 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
387 
388 	writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
389 }
390 
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)391 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
392 {
393 	writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
394 	       SVC_I3C_MCTRL_IBIRESP_NACK,
395 	       master->regs + SVC_I3C_MCTRL);
396 }
397 
svc_i3c_master_ibi_work(struct work_struct * work)398 static void svc_i3c_master_ibi_work(struct work_struct *work)
399 {
400 	struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
401 	struct svc_i3c_i2c_dev_data *data;
402 	unsigned int ibitype, ibiaddr;
403 	struct i3c_dev_desc *dev;
404 	u32 status, val;
405 	int ret;
406 
407 	mutex_lock(&master->lock);
408 	/*
409 	 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
410 	 * readl_relaxed_poll_timeout() to return immediately. Consequently,
411 	 * ibitype will be 0 since it was last updated only after the 8th SCL
412 	 * cycle, leading to missed client IBI handlers.
413 	 *
414 	 * A typical scenario is when IBIWON occurs and bus arbitration is lost
415 	 * at svc_i3c_master_priv_xfers().
416 	 *
417 	 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
418 	 */
419 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
420 
421 	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
422 	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
423 	       SVC_I3C_MCTRL_IBIRESP_AUTO,
424 	       master->regs + SVC_I3C_MCTRL);
425 
426 	/* Wait for IBIWON, should take approximately 100us */
427 	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
428 					 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
429 	if (ret) {
430 		dev_err(master->dev, "Timeout when polling for IBIWON\n");
431 		svc_i3c_master_emit_stop(master);
432 		goto reenable_ibis;
433 	}
434 
435 	status = readl(master->regs + SVC_I3C_MSTATUS);
436 	ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
437 	ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
438 
439 	/* Handle the critical responses to IBI's */
440 	switch (ibitype) {
441 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
442 		dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
443 		if (!dev)
444 			svc_i3c_master_nack_ibi(master);
445 		else
446 			svc_i3c_master_handle_ibi(master, dev);
447 		break;
448 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
449 		svc_i3c_master_ack_ibi(master, false);
450 		break;
451 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
452 		svc_i3c_master_nack_ibi(master);
453 		break;
454 	default:
455 		break;
456 	}
457 
458 	/*
459 	 * If an error happened, we probably got interrupted and the exchange
460 	 * timedout. In this case we just drop everything, emit a stop and wait
461 	 * for the slave to interrupt again.
462 	 */
463 	if (svc_i3c_master_error(master)) {
464 		if (master->ibi.tbq_slot) {
465 			data = i3c_dev_get_master_data(dev);
466 			i3c_generic_ibi_recycle_slot(data->ibi_pool,
467 						     master->ibi.tbq_slot);
468 			master->ibi.tbq_slot = NULL;
469 		}
470 
471 		svc_i3c_master_emit_stop(master);
472 
473 		goto reenable_ibis;
474 	}
475 
476 	/* Handle the non critical tasks */
477 	switch (ibitype) {
478 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
479 		if (dev) {
480 			i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
481 			master->ibi.tbq_slot = NULL;
482 		}
483 		svc_i3c_master_emit_stop(master);
484 		break;
485 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
486 		queue_work(master->base.wq, &master->hj_work);
487 		break;
488 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
489 	default:
490 		break;
491 	}
492 
493 reenable_ibis:
494 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
495 	mutex_unlock(&master->lock);
496 }
497 
svc_i3c_master_irq_handler(int irq,void * dev_id)498 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
499 {
500 	struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
501 	u32 active = readl(master->regs + SVC_I3C_MSTATUS);
502 
503 	if (!SVC_I3C_MSTATUS_SLVSTART(active))
504 		return IRQ_NONE;
505 
506 	/* Clear the interrupt status */
507 	writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
508 
509 	svc_i3c_master_disable_interrupts(master);
510 
511 	/* Handle the interrupt in a non atomic context */
512 	queue_work(master->base.wq, &master->ibi_work);
513 
514 	return IRQ_HANDLED;
515 }
516 
svc_i3c_master_bus_init(struct i3c_master_controller * m)517 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
518 {
519 	struct svc_i3c_master *master = to_svc_i3c_master(m);
520 	struct i3c_bus *bus = i3c_master_get_bus(m);
521 	struct i3c_device_info info = {};
522 	unsigned long fclk_rate, fclk_period_ns;
523 	unsigned int high_period_ns, od_low_period_ns;
524 	u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
525 	int ret;
526 
527 	ret = pm_runtime_resume_and_get(master->dev);
528 	if (ret < 0) {
529 		dev_err(master->dev,
530 			"<%s> cannot resume i3c bus master, err: %d\n",
531 			__func__, ret);
532 		return ret;
533 	}
534 
535 	/* Timings derivation */
536 	fclk_rate = clk_get_rate(master->fclk);
537 	if (!fclk_rate) {
538 		ret = -EINVAL;
539 		goto rpm_out;
540 	}
541 
542 	fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
543 
544 	/*
545 	 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
546 	 * Simplest configuration is using a 50% duty-cycle of 40ns.
547 	 */
548 	ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
549 	pplow = 0;
550 
551 	/*
552 	 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
553 	 * duty-cycle tuned so that high levels are filetered out by
554 	 * the 50ns filter (target being 40ns).
555 	 */
556 	odhpp = 1;
557 	high_period_ns = (ppbaud + 1) * fclk_period_ns;
558 	odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
559 	od_low_period_ns = (odbaud + 1) * high_period_ns;
560 
561 	switch (bus->mode) {
562 	case I3C_BUS_MODE_PURE:
563 		i2cbaud = 0;
564 		odstop = 0;
565 		break;
566 	case I3C_BUS_MODE_MIXED_FAST:
567 	case I3C_BUS_MODE_MIXED_LIMITED:
568 		/*
569 		 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
570 		 * between the high and low period does not really matter.
571 		 */
572 		i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
573 		odstop = 1;
574 		break;
575 	case I3C_BUS_MODE_MIXED_SLOW:
576 		/*
577 		 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
578 		 * constraints as the FM+ mode.
579 		 */
580 		i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
581 		odstop = 1;
582 		break;
583 	default:
584 		goto rpm_out;
585 	}
586 
587 	reg = SVC_I3C_MCONFIG_MASTER_EN |
588 	      SVC_I3C_MCONFIG_DISTO(0) |
589 	      SVC_I3C_MCONFIG_HKEEP(0) |
590 	      SVC_I3C_MCONFIG_ODSTOP(odstop) |
591 	      SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
592 	      SVC_I3C_MCONFIG_PPLOW(pplow) |
593 	      SVC_I3C_MCONFIG_ODBAUD(odbaud) |
594 	      SVC_I3C_MCONFIG_ODHPP(odhpp) |
595 	      SVC_I3C_MCONFIG_SKEW(0) |
596 	      SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
597 	writel(reg, master->regs + SVC_I3C_MCONFIG);
598 
599 	/* Master core's registration */
600 	ret = i3c_master_get_free_addr(m, 0);
601 	if (ret < 0)
602 		goto rpm_out;
603 
604 	info.dyn_addr = ret;
605 
606 	writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
607 	       master->regs + SVC_I3C_MDYNADDR);
608 
609 	ret = i3c_master_set_info(&master->base, &info);
610 	if (ret)
611 		goto rpm_out;
612 
613 rpm_out:
614 	pm_runtime_mark_last_busy(master->dev);
615 	pm_runtime_put_autosuspend(master->dev);
616 
617 	return ret;
618 }
619 
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)620 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
621 {
622 	struct svc_i3c_master *master = to_svc_i3c_master(m);
623 	int ret;
624 
625 	ret = pm_runtime_resume_and_get(master->dev);
626 	if (ret < 0) {
627 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
628 		return;
629 	}
630 
631 	svc_i3c_master_disable_interrupts(master);
632 
633 	/* Disable master */
634 	writel(0, master->regs + SVC_I3C_MCONFIG);
635 
636 	pm_runtime_mark_last_busy(master->dev);
637 	pm_runtime_put_autosuspend(master->dev);
638 }
639 
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)640 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
641 {
642 	unsigned int slot;
643 
644 	if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
645 		return -ENOSPC;
646 
647 	slot = ffs(master->free_slots) - 1;
648 
649 	master->free_slots &= ~BIT(slot);
650 
651 	return slot;
652 }
653 
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)654 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
655 					unsigned int slot)
656 {
657 	master->free_slots |= BIT(slot);
658 }
659 
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)660 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
661 {
662 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
663 	struct svc_i3c_master *master = to_svc_i3c_master(m);
664 	struct svc_i3c_i2c_dev_data *data;
665 	int slot;
666 
667 	slot = svc_i3c_master_reserve_slot(master);
668 	if (slot < 0)
669 		return slot;
670 
671 	data = kzalloc(sizeof(*data), GFP_KERNEL);
672 	if (!data) {
673 		svc_i3c_master_release_slot(master, slot);
674 		return -ENOMEM;
675 	}
676 
677 	data->ibi = -1;
678 	data->index = slot;
679 	master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
680 						   dev->info.static_addr;
681 	master->descs[slot] = dev;
682 
683 	i3c_dev_set_master_data(dev, data);
684 
685 	return 0;
686 }
687 
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)688 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
689 					   u8 old_dyn_addr)
690 {
691 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
692 	struct svc_i3c_master *master = to_svc_i3c_master(m);
693 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
694 
695 	master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
696 							  dev->info.static_addr;
697 
698 	return 0;
699 }
700 
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)701 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
702 {
703 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
704 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
705 	struct svc_i3c_master *master = to_svc_i3c_master(m);
706 
707 	master->addrs[data->index] = 0;
708 	svc_i3c_master_release_slot(master, data->index);
709 
710 	kfree(data);
711 }
712 
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)713 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
714 {
715 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
716 	struct svc_i3c_master *master = to_svc_i3c_master(m);
717 	struct svc_i3c_i2c_dev_data *data;
718 	int slot;
719 
720 	slot = svc_i3c_master_reserve_slot(master);
721 	if (slot < 0)
722 		return slot;
723 
724 	data = kzalloc(sizeof(*data), GFP_KERNEL);
725 	if (!data) {
726 		svc_i3c_master_release_slot(master, slot);
727 		return -ENOMEM;
728 	}
729 
730 	data->index = slot;
731 	master->addrs[slot] = dev->addr;
732 
733 	i2c_dev_set_master_data(dev, data);
734 
735 	return 0;
736 }
737 
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)738 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
739 {
740 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
741 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
742 	struct svc_i3c_master *master = to_svc_i3c_master(m);
743 
744 	svc_i3c_master_release_slot(master, data->index);
745 
746 	kfree(data);
747 }
748 
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)749 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
750 				unsigned int len)
751 {
752 	int ret, i;
753 	u32 reg;
754 
755 	for (i = 0; i < len; i++) {
756 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
757 						reg,
758 						SVC_I3C_MSTATUS_RXPEND(reg),
759 						0, 1000);
760 		if (ret)
761 			return ret;
762 
763 		dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
764 	}
765 
766 	return 0;
767 }
768 
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)769 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
770 					u8 *addrs, unsigned int *count)
771 {
772 	u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
773 	unsigned int dev_nb = 0, last_addr = 0;
774 	u32 reg;
775 	int ret, i;
776 
777 	while (true) {
778 		/* Enter/proceed with DAA */
779 		writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
780 		       SVC_I3C_MCTRL_TYPE_I3C |
781 		       SVC_I3C_MCTRL_IBIRESP_NACK |
782 		       SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
783 		       master->regs + SVC_I3C_MCTRL);
784 
785 		/*
786 		 * Either one slave will send its ID, or the assignment process
787 		 * is done.
788 		 */
789 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
790 						reg,
791 						SVC_I3C_MSTATUS_RXPEND(reg) |
792 						SVC_I3C_MSTATUS_MCTRLDONE(reg),
793 						1, 1000);
794 		if (ret)
795 			return ret;
796 
797 		if (SVC_I3C_MSTATUS_RXPEND(reg)) {
798 			u8 data[6];
799 
800 			/*
801 			 * We only care about the 48-bit provisional ID yet to
802 			 * be sure a device does not nack an address twice.
803 			 * Otherwise, we would just need to flush the RX FIFO.
804 			 */
805 			ret = svc_i3c_master_readb(master, data, 6);
806 			if (ret)
807 				return ret;
808 
809 			for (i = 0; i < 6; i++)
810 				prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
811 
812 			/* We do not care about the BCR and DCR yet */
813 			ret = svc_i3c_master_readb(master, data, 2);
814 			if (ret)
815 				return ret;
816 		} else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
817 			if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
818 			    SVC_I3C_MSTATUS_COMPLETE(reg)) {
819 				/*
820 				 * All devices received and acked they dynamic
821 				 * address, this is the natural end of the DAA
822 				 * procedure.
823 				 */
824 				break;
825 			} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
826 				/* No I3C devices attached */
827 				if (dev_nb == 0)
828 					break;
829 
830 				/*
831 				 * A slave device nacked the address, this is
832 				 * allowed only once, DAA will be stopped and
833 				 * then resumed. The same device is supposed to
834 				 * answer again immediately and shall ack the
835 				 * address this time.
836 				 */
837 				if (prov_id[dev_nb] == nacking_prov_id)
838 					return -EIO;
839 
840 				dev_nb--;
841 				nacking_prov_id = prov_id[dev_nb];
842 				svc_i3c_master_emit_stop(master);
843 
844 				continue;
845 			} else {
846 				return -EIO;
847 			}
848 		}
849 
850 		/* Wait for the slave to be ready to receive its address */
851 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
852 						reg,
853 						SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
854 						SVC_I3C_MSTATUS_STATE_DAA(reg) &&
855 						SVC_I3C_MSTATUS_BETWEEN(reg),
856 						0, 1000);
857 		if (ret)
858 			return ret;
859 
860 		/* Give the slave device a suitable dynamic address */
861 		ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
862 		if (ret < 0)
863 			return ret;
864 
865 		addrs[dev_nb] = ret;
866 		dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
867 			dev_nb, addrs[dev_nb]);
868 
869 		writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
870 		last_addr = addrs[dev_nb++];
871 	}
872 
873 	*count = dev_nb;
874 
875 	return 0;
876 }
877 
svc_i3c_update_ibirules(struct svc_i3c_master * master)878 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
879 {
880 	struct i3c_dev_desc *dev;
881 	u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
882 	unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
883 		nobyte_addr_ko = 0;
884 	bool list_mbyte = false, list_nobyte = false;
885 
886 	/* Create the IBIRULES register for both cases */
887 	i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
888 		if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
889 			continue;
890 
891 		if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
892 			reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
893 							   dev->info.dyn_addr);
894 
895 			/* IBI rules cannot be applied to devices with MSb=1 */
896 			if (dev->info.dyn_addr & BIT(7))
897 				mbyte_addr_ko++;
898 			else
899 				mbyte_addr_ok++;
900 		} else {
901 			reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
902 							    dev->info.dyn_addr);
903 
904 			/* IBI rules cannot be applied to devices with MSb=1 */
905 			if (dev->info.dyn_addr & BIT(7))
906 				nobyte_addr_ko++;
907 			else
908 				nobyte_addr_ok++;
909 		}
910 	}
911 
912 	/* Device list cannot be handled by hardware */
913 	if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
914 		list_mbyte = true;
915 
916 	if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
917 		list_nobyte = true;
918 
919 	/* No list can be properly handled, return an error */
920 	if (!list_mbyte && !list_nobyte)
921 		return -ERANGE;
922 
923 	/* Pick the first list that can be handled by hardware, randomly */
924 	if (list_mbyte)
925 		writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
926 	else
927 		writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
928 
929 	return 0;
930 }
931 
svc_i3c_master_do_daa(struct i3c_master_controller * m)932 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
933 {
934 	struct svc_i3c_master *master = to_svc_i3c_master(m);
935 	u8 addrs[SVC_I3C_MAX_DEVS];
936 	unsigned long flags;
937 	unsigned int dev_nb;
938 	int ret, i;
939 
940 	ret = pm_runtime_resume_and_get(master->dev);
941 	if (ret < 0) {
942 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
943 		return ret;
944 	}
945 
946 	spin_lock_irqsave(&master->xferqueue.lock, flags);
947 	ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
948 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
949 	if (ret) {
950 		svc_i3c_master_emit_stop(master);
951 		svc_i3c_master_clear_merrwarn(master);
952 		goto rpm_out;
953 	}
954 
955 	/* Register all devices who participated to the core */
956 	for (i = 0; i < dev_nb; i++) {
957 		ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
958 		if (ret)
959 			goto rpm_out;
960 	}
961 
962 	/* Configure IBI auto-rules */
963 	ret = svc_i3c_update_ibirules(master);
964 	if (ret)
965 		dev_err(master->dev, "Cannot handle such a list of devices");
966 
967 rpm_out:
968 	pm_runtime_mark_last_busy(master->dev);
969 	pm_runtime_put_autosuspend(master->dev);
970 
971 	return ret;
972 }
973 
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)974 static int svc_i3c_master_read(struct svc_i3c_master *master,
975 			       u8 *in, unsigned int len)
976 {
977 	int offset = 0, i;
978 	u32 mdctrl, mstatus;
979 	bool completed = false;
980 	unsigned int count;
981 	unsigned long start = jiffies;
982 
983 	while (!completed) {
984 		mstatus = readl(master->regs + SVC_I3C_MSTATUS);
985 		if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
986 			completed = true;
987 
988 		if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
989 			dev_dbg(master->dev, "I3C read timeout\n");
990 			return -ETIMEDOUT;
991 		}
992 
993 		mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
994 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
995 		if (offset + count > len) {
996 			dev_err(master->dev, "I3C receive length too long!\n");
997 			return -EINVAL;
998 		}
999 		for (i = 0; i < count; i++)
1000 			in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1001 
1002 		offset += count;
1003 	}
1004 
1005 	return offset;
1006 }
1007 
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)1008 static int svc_i3c_master_write(struct svc_i3c_master *master,
1009 				const u8 *out, unsigned int len)
1010 {
1011 	int offset = 0, ret;
1012 	u32 mdctrl;
1013 
1014 	while (offset < len) {
1015 		ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1016 					 mdctrl,
1017 					 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1018 					 0, 1000);
1019 		if (ret)
1020 			return ret;
1021 
1022 		/*
1023 		 * The last byte to be sent over the bus must either have the
1024 		 * "end" bit set or be written in MWDATABE.
1025 		 */
1026 		if (likely(offset < (len - 1)))
1027 			writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1028 		else
1029 			writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1030 	}
1031 
1032 	return 0;
1033 }
1034 
svc_i3c_master_xfer(struct svc_i3c_master * master,bool rnw,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int * actual_len,bool continued)1035 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1036 			       bool rnw, unsigned int xfer_type, u8 addr,
1037 			       u8 *in, const u8 *out, unsigned int xfer_len,
1038 			       unsigned int *actual_len, bool continued)
1039 {
1040 	u32 reg;
1041 	int ret;
1042 
1043 	/* clean SVC_I3C_MINT_IBIWON w1c bits */
1044 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1045 
1046 	writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1047 	       xfer_type |
1048 	       SVC_I3C_MCTRL_IBIRESP_NACK |
1049 	       SVC_I3C_MCTRL_DIR(rnw) |
1050 	       SVC_I3C_MCTRL_ADDR(addr) |
1051 	       SVC_I3C_MCTRL_RDTERM(*actual_len),
1052 	       master->regs + SVC_I3C_MCTRL);
1053 
1054 	ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1055 				 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1056 	if (ret)
1057 		goto emit_stop;
1058 
1059 	if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1060 		ret = -ENXIO;
1061 		*actual_len = 0;
1062 		goto emit_stop;
1063 	}
1064 
1065 	/*
1066 	 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
1067 	 * with I3C Target Address.
1068 	 *
1069 	 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
1070 	 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
1071 	 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
1072 	 * a Hot-Join Request has been made.
1073 	 *
1074 	 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
1075 	 * and yield the above events handler.
1076 	 */
1077 	if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1078 		ret = -EAGAIN;
1079 		*actual_len = 0;
1080 		goto emit_stop;
1081 	}
1082 
1083 	if (rnw)
1084 		ret = svc_i3c_master_read(master, in, xfer_len);
1085 	else
1086 		ret = svc_i3c_master_write(master, out, xfer_len);
1087 	if (ret < 0)
1088 		goto emit_stop;
1089 
1090 	if (rnw)
1091 		*actual_len = ret;
1092 
1093 	ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1094 				 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1095 	if (ret)
1096 		goto emit_stop;
1097 
1098 	writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1099 
1100 	if (!continued) {
1101 		svc_i3c_master_emit_stop(master);
1102 
1103 		/* Wait idle if stop is sent. */
1104 		readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1105 				   SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1106 	}
1107 
1108 	return 0;
1109 
1110 emit_stop:
1111 	svc_i3c_master_emit_stop(master);
1112 	svc_i3c_master_clear_merrwarn(master);
1113 
1114 	return ret;
1115 }
1116 
1117 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1118 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1119 {
1120 	struct svc_i3c_xfer *xfer;
1121 
1122 	xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1123 	if (!xfer)
1124 		return NULL;
1125 
1126 	INIT_LIST_HEAD(&xfer->node);
1127 	xfer->ncmds = ncmds;
1128 	xfer->ret = -ETIMEDOUT;
1129 
1130 	return xfer;
1131 }
1132 
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1133 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1134 {
1135 	kfree(xfer);
1136 }
1137 
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1138 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1139 					       struct svc_i3c_xfer *xfer)
1140 {
1141 	if (master->xferqueue.cur == xfer)
1142 		master->xferqueue.cur = NULL;
1143 	else
1144 		list_del_init(&xfer->node);
1145 }
1146 
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1147 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1148 					struct svc_i3c_xfer *xfer)
1149 {
1150 	unsigned long flags;
1151 
1152 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1153 	svc_i3c_master_dequeue_xfer_locked(master, xfer);
1154 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1155 }
1156 
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1157 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1158 {
1159 	struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1160 	int ret, i;
1161 
1162 	if (!xfer)
1163 		return;
1164 
1165 	svc_i3c_master_clear_merrwarn(master);
1166 	svc_i3c_master_flush_fifo(master);
1167 
1168 	for (i = 0; i < xfer->ncmds; i++) {
1169 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1170 
1171 		ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1172 					  cmd->addr, cmd->in, cmd->out,
1173 					  cmd->len, &cmd->actual_len,
1174 					  cmd->continued);
1175 		/* cmd->xfer is NULL if I2C or CCC transfer */
1176 		if (cmd->xfer)
1177 			cmd->xfer->actual_len = cmd->actual_len;
1178 
1179 		if (ret)
1180 			break;
1181 	}
1182 
1183 	xfer->ret = ret;
1184 	complete(&xfer->comp);
1185 
1186 	if (ret < 0)
1187 		svc_i3c_master_dequeue_xfer_locked(master, xfer);
1188 
1189 	xfer = list_first_entry_or_null(&master->xferqueue.list,
1190 					struct svc_i3c_xfer,
1191 					node);
1192 	if (xfer)
1193 		list_del_init(&xfer->node);
1194 
1195 	master->xferqueue.cur = xfer;
1196 	svc_i3c_master_start_xfer_locked(master);
1197 }
1198 
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1199 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1200 					struct svc_i3c_xfer *xfer)
1201 {
1202 	unsigned long flags;
1203 	int ret;
1204 
1205 	ret = pm_runtime_resume_and_get(master->dev);
1206 	if (ret < 0) {
1207 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1208 		return;
1209 	}
1210 
1211 	init_completion(&xfer->comp);
1212 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1213 	if (master->xferqueue.cur) {
1214 		list_add_tail(&xfer->node, &master->xferqueue.list);
1215 	} else {
1216 		master->xferqueue.cur = xfer;
1217 		svc_i3c_master_start_xfer_locked(master);
1218 	}
1219 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1220 
1221 	pm_runtime_mark_last_busy(master->dev);
1222 	pm_runtime_put_autosuspend(master->dev);
1223 }
1224 
1225 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1226 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1227 				const struct i3c_ccc_cmd *cmd)
1228 {
1229 	/* No software support for CCC commands targeting more than one slave */
1230 	return (cmd->ndests == 1);
1231 }
1232 
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1233 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1234 					      struct i3c_ccc_cmd *ccc)
1235 {
1236 	unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1237 	struct svc_i3c_xfer *xfer;
1238 	struct svc_i3c_cmd *cmd;
1239 	u8 *buf;
1240 	int ret;
1241 
1242 	xfer = svc_i3c_master_alloc_xfer(master, 1);
1243 	if (!xfer)
1244 		return -ENOMEM;
1245 
1246 	buf = kmalloc(xfer_len, GFP_KERNEL);
1247 	if (!buf) {
1248 		svc_i3c_master_free_xfer(xfer);
1249 		return -ENOMEM;
1250 	}
1251 
1252 	buf[0] = ccc->id;
1253 	memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1254 
1255 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1256 
1257 	cmd = &xfer->cmds[0];
1258 	cmd->addr = ccc->dests[0].addr;
1259 	cmd->rnw = ccc->rnw;
1260 	cmd->in = NULL;
1261 	cmd->out = buf;
1262 	cmd->len = xfer_len;
1263 	cmd->actual_len = 0;
1264 	cmd->continued = false;
1265 
1266 	mutex_lock(&master->lock);
1267 	svc_i3c_master_enqueue_xfer(master, xfer);
1268 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1269 		svc_i3c_master_dequeue_xfer(master, xfer);
1270 	mutex_unlock(&master->lock);
1271 
1272 	ret = xfer->ret;
1273 	kfree(buf);
1274 	svc_i3c_master_free_xfer(xfer);
1275 
1276 	return ret;
1277 }
1278 
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1279 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1280 					      struct i3c_ccc_cmd *ccc)
1281 {
1282 	unsigned int xfer_len = ccc->dests[0].payload.len;
1283 	unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1284 	struct svc_i3c_xfer *xfer;
1285 	struct svc_i3c_cmd *cmd;
1286 	int ret;
1287 
1288 	xfer = svc_i3c_master_alloc_xfer(master, 2);
1289 	if (!xfer)
1290 		return -ENOMEM;
1291 
1292 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1293 
1294 	/* Broadcasted message */
1295 	cmd = &xfer->cmds[0];
1296 	cmd->addr = I3C_BROADCAST_ADDR;
1297 	cmd->rnw = 0;
1298 	cmd->in = NULL;
1299 	cmd->out = &ccc->id;
1300 	cmd->len = 1;
1301 	cmd->actual_len = 0;
1302 	cmd->continued = true;
1303 
1304 	/* Directed message */
1305 	cmd = &xfer->cmds[1];
1306 	cmd->addr = ccc->dests[0].addr;
1307 	cmd->rnw = ccc->rnw;
1308 	cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1309 	cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
1310 	cmd->len = xfer_len;
1311 	cmd->actual_len = actual_len;
1312 	cmd->continued = false;
1313 
1314 	mutex_lock(&master->lock);
1315 	svc_i3c_master_enqueue_xfer(master, xfer);
1316 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1317 		svc_i3c_master_dequeue_xfer(master, xfer);
1318 	mutex_unlock(&master->lock);
1319 
1320 	if (cmd->actual_len != xfer_len)
1321 		ccc->dests[0].payload.len = cmd->actual_len;
1322 
1323 	ret = xfer->ret;
1324 	svc_i3c_master_free_xfer(xfer);
1325 
1326 	return ret;
1327 }
1328 
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1329 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1330 				       struct i3c_ccc_cmd *cmd)
1331 {
1332 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1333 	bool broadcast = cmd->id < 0x80;
1334 	int ret;
1335 
1336 	if (broadcast)
1337 		ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1338 	else
1339 		ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1340 
1341 	if (ret)
1342 		cmd->err = I3C_ERROR_M2;
1343 
1344 	return ret;
1345 }
1346 
svc_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)1347 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1348 				     struct i3c_priv_xfer *xfers,
1349 				     int nxfers)
1350 {
1351 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1352 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1353 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1354 	struct svc_i3c_xfer *xfer;
1355 	int ret, i;
1356 
1357 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1358 	if (!xfer)
1359 		return -ENOMEM;
1360 
1361 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1362 
1363 	for (i = 0; i < nxfers; i++) {
1364 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1365 
1366 		cmd->xfer = &xfers[i];
1367 		cmd->addr = master->addrs[data->index];
1368 		cmd->rnw = xfers[i].rnw;
1369 		cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1370 		cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1371 		cmd->len = xfers[i].len;
1372 		cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
1373 		cmd->continued = (i + 1) < nxfers;
1374 	}
1375 
1376 	mutex_lock(&master->lock);
1377 	svc_i3c_master_enqueue_xfer(master, xfer);
1378 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1379 		svc_i3c_master_dequeue_xfer(master, xfer);
1380 	mutex_unlock(&master->lock);
1381 
1382 	ret = xfer->ret;
1383 	svc_i3c_master_free_xfer(xfer);
1384 
1385 	return ret;
1386 }
1387 
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,const struct i2c_msg * xfers,int nxfers)1388 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1389 				    const struct i2c_msg *xfers,
1390 				    int nxfers)
1391 {
1392 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
1393 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1394 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1395 	struct svc_i3c_xfer *xfer;
1396 	int ret, i;
1397 
1398 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1399 	if (!xfer)
1400 		return -ENOMEM;
1401 
1402 	xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1403 
1404 	for (i = 0; i < nxfers; i++) {
1405 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1406 
1407 		cmd->addr = master->addrs[data->index];
1408 		cmd->rnw = xfers[i].flags & I2C_M_RD;
1409 		cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1410 		cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1411 		cmd->len = xfers[i].len;
1412 		cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1413 		cmd->continued = (i + 1 < nxfers);
1414 	}
1415 
1416 	mutex_lock(&master->lock);
1417 	svc_i3c_master_enqueue_xfer(master, xfer);
1418 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1419 		svc_i3c_master_dequeue_xfer(master, xfer);
1420 	mutex_unlock(&master->lock);
1421 
1422 	ret = xfer->ret;
1423 	svc_i3c_master_free_xfer(xfer);
1424 
1425 	return ret;
1426 }
1427 
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1428 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1429 				      const struct i3c_ibi_setup *req)
1430 {
1431 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1432 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1433 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1434 	unsigned long flags;
1435 	unsigned int i;
1436 
1437 	if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1438 		dev_err(master->dev, "IBI max payload %d should be < %d\n",
1439 			dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1440 		return -ERANGE;
1441 	}
1442 
1443 	data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1444 	if (IS_ERR(data->ibi_pool))
1445 		return PTR_ERR(data->ibi_pool);
1446 
1447 	spin_lock_irqsave(&master->ibi.lock, flags);
1448 	for (i = 0; i < master->ibi.num_slots; i++) {
1449 		if (!master->ibi.slots[i]) {
1450 			data->ibi = i;
1451 			master->ibi.slots[i] = dev;
1452 			break;
1453 		}
1454 	}
1455 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1456 
1457 	if (i < master->ibi.num_slots)
1458 		return 0;
1459 
1460 	i3c_generic_ibi_free_pool(data->ibi_pool);
1461 	data->ibi_pool = NULL;
1462 
1463 	return -ENOSPC;
1464 }
1465 
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1466 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1467 {
1468 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1469 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1470 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1471 	unsigned long flags;
1472 
1473 	spin_lock_irqsave(&master->ibi.lock, flags);
1474 	master->ibi.slots[data->ibi] = NULL;
1475 	data->ibi = -1;
1476 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1477 
1478 	i3c_generic_ibi_free_pool(data->ibi_pool);
1479 }
1480 
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1481 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1482 {
1483 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1484 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1485 	int ret;
1486 
1487 	ret = pm_runtime_resume_and_get(master->dev);
1488 	if (ret < 0) {
1489 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1490 		return ret;
1491 	}
1492 
1493 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1494 
1495 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1496 }
1497 
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1498 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1499 {
1500 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1501 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1502 	int ret;
1503 
1504 	svc_i3c_master_disable_interrupts(master);
1505 
1506 	ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1507 
1508 	pm_runtime_mark_last_busy(master->dev);
1509 	pm_runtime_put_autosuspend(master->dev);
1510 
1511 	return ret;
1512 }
1513 
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1514 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1515 					    struct i3c_ibi_slot *slot)
1516 {
1517 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1518 
1519 	i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1520 }
1521 
1522 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1523 	.bus_init = svc_i3c_master_bus_init,
1524 	.bus_cleanup = svc_i3c_master_bus_cleanup,
1525 	.attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1526 	.detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1527 	.reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1528 	.attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1529 	.detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1530 	.do_daa = svc_i3c_master_do_daa,
1531 	.supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1532 	.send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1533 	.priv_xfers = svc_i3c_master_priv_xfers,
1534 	.i2c_xfers = svc_i3c_master_i2c_xfers,
1535 	.request_ibi = svc_i3c_master_request_ibi,
1536 	.free_ibi = svc_i3c_master_free_ibi,
1537 	.recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1538 	.enable_ibi = svc_i3c_master_enable_ibi,
1539 	.disable_ibi = svc_i3c_master_disable_ibi,
1540 };
1541 
svc_i3c_master_prepare_clks(struct svc_i3c_master * master)1542 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
1543 {
1544 	int ret = 0;
1545 
1546 	ret = clk_prepare_enable(master->pclk);
1547 	if (ret)
1548 		return ret;
1549 
1550 	ret = clk_prepare_enable(master->fclk);
1551 	if (ret) {
1552 		clk_disable_unprepare(master->pclk);
1553 		return ret;
1554 	}
1555 
1556 	ret = clk_prepare_enable(master->sclk);
1557 	if (ret) {
1558 		clk_disable_unprepare(master->pclk);
1559 		clk_disable_unprepare(master->fclk);
1560 		return ret;
1561 	}
1562 
1563 	return 0;
1564 }
1565 
svc_i3c_master_unprepare_clks(struct svc_i3c_master * master)1566 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
1567 {
1568 	clk_disable_unprepare(master->pclk);
1569 	clk_disable_unprepare(master->fclk);
1570 	clk_disable_unprepare(master->sclk);
1571 }
1572 
svc_i3c_master_probe(struct platform_device * pdev)1573 static int svc_i3c_master_probe(struct platform_device *pdev)
1574 {
1575 	struct device *dev = &pdev->dev;
1576 	struct svc_i3c_master *master;
1577 	int ret;
1578 
1579 	master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1580 	if (!master)
1581 		return -ENOMEM;
1582 
1583 	master->regs = devm_platform_ioremap_resource(pdev, 0);
1584 	if (IS_ERR(master->regs))
1585 		return PTR_ERR(master->regs);
1586 
1587 	master->pclk = devm_clk_get(dev, "pclk");
1588 	if (IS_ERR(master->pclk))
1589 		return PTR_ERR(master->pclk);
1590 
1591 	master->fclk = devm_clk_get(dev, "fast_clk");
1592 	if (IS_ERR(master->fclk))
1593 		return PTR_ERR(master->fclk);
1594 
1595 	master->sclk = devm_clk_get(dev, "slow_clk");
1596 	if (IS_ERR(master->sclk))
1597 		return PTR_ERR(master->sclk);
1598 
1599 	master->irq = platform_get_irq(pdev, 0);
1600 	if (master->irq < 0)
1601 		return master->irq;
1602 
1603 	master->dev = dev;
1604 
1605 	ret = svc_i3c_master_prepare_clks(master);
1606 	if (ret)
1607 		return ret;
1608 
1609 	INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1610 	INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
1611 	mutex_init(&master->lock);
1612 
1613 	ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1614 			       IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1615 	if (ret)
1616 		goto err_disable_clks;
1617 
1618 	master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1619 
1620 	spin_lock_init(&master->xferqueue.lock);
1621 	INIT_LIST_HEAD(&master->xferqueue.list);
1622 
1623 	spin_lock_init(&master->ibi.lock);
1624 	master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1625 	master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1626 					 sizeof(*master->ibi.slots),
1627 					 GFP_KERNEL);
1628 	if (!master->ibi.slots) {
1629 		ret = -ENOMEM;
1630 		goto err_disable_clks;
1631 	}
1632 
1633 	platform_set_drvdata(pdev, master);
1634 
1635 	pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1636 	pm_runtime_use_autosuspend(&pdev->dev);
1637 	pm_runtime_get_noresume(&pdev->dev);
1638 	pm_runtime_set_active(&pdev->dev);
1639 	pm_runtime_enable(&pdev->dev);
1640 
1641 	svc_i3c_master_reset(master);
1642 
1643 	/* Register the master */
1644 	ret = i3c_master_register(&master->base, &pdev->dev,
1645 				  &svc_i3c_master_ops, false);
1646 	if (ret)
1647 		goto rpm_disable;
1648 
1649 	pm_runtime_mark_last_busy(&pdev->dev);
1650 	pm_runtime_put_autosuspend(&pdev->dev);
1651 
1652 	return 0;
1653 
1654 rpm_disable:
1655 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1656 	pm_runtime_put_noidle(&pdev->dev);
1657 	pm_runtime_set_suspended(&pdev->dev);
1658 	pm_runtime_disable(&pdev->dev);
1659 
1660 err_disable_clks:
1661 	svc_i3c_master_unprepare_clks(master);
1662 
1663 	return ret;
1664 }
1665 
svc_i3c_master_remove(struct platform_device * pdev)1666 static void svc_i3c_master_remove(struct platform_device *pdev)
1667 {
1668 	struct svc_i3c_master *master = platform_get_drvdata(pdev);
1669 
1670 	i3c_master_unregister(&master->base);
1671 
1672 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1673 	pm_runtime_disable(&pdev->dev);
1674 }
1675 
svc_i3c_save_regs(struct svc_i3c_master * master)1676 static void svc_i3c_save_regs(struct svc_i3c_master *master)
1677 {
1678 	master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
1679 	master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
1680 }
1681 
svc_i3c_restore_regs(struct svc_i3c_master * master)1682 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
1683 {
1684 	if (readl(master->regs + SVC_I3C_MDYNADDR) !=
1685 	    master->saved_regs.mdynaddr) {
1686 		writel(master->saved_regs.mconfig,
1687 		       master->regs + SVC_I3C_MCONFIG);
1688 		writel(master->saved_regs.mdynaddr,
1689 		       master->regs + SVC_I3C_MDYNADDR);
1690 	}
1691 }
1692 
svc_i3c_runtime_suspend(struct device * dev)1693 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
1694 {
1695 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1696 
1697 	svc_i3c_save_regs(master);
1698 	svc_i3c_master_unprepare_clks(master);
1699 	pinctrl_pm_select_sleep_state(dev);
1700 
1701 	return 0;
1702 }
1703 
svc_i3c_runtime_resume(struct device * dev)1704 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
1705 {
1706 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1707 
1708 	pinctrl_pm_select_default_state(dev);
1709 	svc_i3c_master_prepare_clks(master);
1710 
1711 	svc_i3c_restore_regs(master);
1712 
1713 	return 0;
1714 }
1715 
1716 static const struct dev_pm_ops svc_i3c_pm_ops = {
1717 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1718 				      pm_runtime_force_resume)
1719 	SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
1720 			   svc_i3c_runtime_resume, NULL)
1721 };
1722 
1723 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
1724 	{ .compatible = "silvaco,i3c-master" },
1725 	{ /* sentinel */ },
1726 };
1727 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
1728 
1729 static struct platform_driver svc_i3c_master = {
1730 	.probe = svc_i3c_master_probe,
1731 	.remove_new = svc_i3c_master_remove,
1732 	.driver = {
1733 		.name = "silvaco-i3c-master",
1734 		.of_match_table = svc_i3c_master_of_match_tbl,
1735 		.pm = &svc_i3c_pm_ops,
1736 	},
1737 };
1738 module_platform_driver(svc_i3c_master);
1739 
1740 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
1741 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
1742 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
1743 MODULE_LICENSE("GPL v2");
1744