1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Silvaco dual-role I3C master driver
4  *
5  * Copyright (C) 2020 Silvaco
6  * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7  * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG      0x000
26 #define   SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define   SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define   SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define   SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define   SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define   SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define   SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define   SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define   SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define   SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
36 
37 #define SVC_I3C_MCTRL        0x084
38 #define   SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
39 #define   SVC_I3C_MCTRL_REQUEST_NONE 0
40 #define   SVC_I3C_MCTRL_REQUEST_START_ADDR 1
41 #define   SVC_I3C_MCTRL_REQUEST_STOP 2
42 #define   SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
43 #define   SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
44 #define   SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
45 #define   SVC_I3C_MCTRL_TYPE_I3C 0
46 #define   SVC_I3C_MCTRL_TYPE_I2C BIT(4)
47 #define   SVC_I3C_MCTRL_IBIRESP_AUTO 0
48 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
49 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
50 #define   SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
51 #define   SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
52 #define   SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
53 #define   SVC_I3C_MCTRL_DIR_WRITE 0
54 #define   SVC_I3C_MCTRL_DIR_READ 1
55 #define   SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
56 #define   SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
57 
58 #define SVC_I3C_MSTATUS      0x088
59 #define   SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
60 #define   SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
61 #define   SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
62 #define   SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
63 #define   SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
64 #define   SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
65 #define   SVC_I3C_MSTATUS_IBITYPE_IBI 1
66 #define   SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
67 #define   SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
68 #define   SVC_I3C_MINT_SLVSTART BIT(8)
69 #define   SVC_I3C_MINT_MCTRLDONE BIT(9)
70 #define   SVC_I3C_MINT_COMPLETE BIT(10)
71 #define   SVC_I3C_MINT_RXPEND BIT(11)
72 #define   SVC_I3C_MINT_TXNOTFULL BIT(12)
73 #define   SVC_I3C_MINT_IBIWON BIT(13)
74 #define   SVC_I3C_MINT_ERRWARN BIT(15)
75 #define   SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
76 #define   SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
77 #define   SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
78 #define   SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
79 #define   SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
80 #define   SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
81 #define   SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
82 #define   SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
83 
84 #define SVC_I3C_IBIRULES     0x08C
85 #define   SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
86 						       ((addr) & 0x3F) << ((slot) * 6))
87 #define   SVC_I3C_IBIRULES_ADDRS 5
88 #define   SVC_I3C_IBIRULES_MSB0 BIT(30)
89 #define   SVC_I3C_IBIRULES_NOBYTE BIT(31)
90 #define   SVC_I3C_IBIRULES_MANDBYTE 0
91 #define SVC_I3C_MINTSET      0x090
92 #define SVC_I3C_MINTCLR      0x094
93 #define SVC_I3C_MINTMASKED   0x098
94 #define SVC_I3C_MERRWARN     0x09C
95 #define SVC_I3C_MDMACTRL     0x0A0
96 #define SVC_I3C_MDATACTRL    0x0AC
97 #define   SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
98 #define   SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
99 #define   SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
100 #define   SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
101 #define   SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
102 #define   SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
103 #define   SVC_I3C_MDATACTRL_TXFULL BIT(30)
104 #define   SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
105 
106 #define SVC_I3C_MWDATAB      0x0B0
107 #define   SVC_I3C_MWDATAB_END BIT(8)
108 
109 #define SVC_I3C_MWDATABE     0x0B4
110 #define SVC_I3C_MWDATAH      0x0B8
111 #define SVC_I3C_MWDATAHE     0x0BC
112 #define SVC_I3C_MRDATAB      0x0C0
113 #define SVC_I3C_MRDATAH      0x0C8
114 #define SVC_I3C_MWMSG_SDR    0x0D0
115 #define SVC_I3C_MRMSG_SDR    0x0D4
116 #define SVC_I3C_MWMSG_DDR    0x0D8
117 #define SVC_I3C_MRMSG_DDR    0x0DC
118 
119 #define SVC_I3C_MDYNADDR     0x0E4
120 #define   SVC_MDYNADDR_VALID BIT(0)
121 #define   SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
122 
123 #define SVC_I3C_MAX_DEVS 32
124 #define SVC_I3C_PM_TIMEOUT_MS 1000
125 
126 /* This parameter depends on the implementation and may be tuned */
127 #define SVC_I3C_FIFO_SIZE 16
128 
129 struct svc_i3c_cmd {
130 	u8 addr;
131 	bool rnw;
132 	u8 *in;
133 	const void *out;
134 	unsigned int len;
135 	unsigned int read_len;
136 	bool continued;
137 };
138 
139 struct svc_i3c_xfer {
140 	struct list_head node;
141 	struct completion comp;
142 	int ret;
143 	unsigned int type;
144 	unsigned int ncmds;
145 	struct svc_i3c_cmd cmds[];
146 };
147 
148 /**
149  * struct svc_i3c_master - Silvaco I3C Master structure
150  * @base: I3C master controller
151  * @dev: Corresponding device
152  * @regs: Memory mapping
153  * @free_slots: Bit array of available slots
154  * @addrs: Array containing the dynamic addresses of each attached device
155  * @descs: Array of descriptors, one per attached device
156  * @hj_work: Hot-join work
157  * @ibi_work: IBI work
158  * @irq: Main interrupt
159  * @pclk: System clock
160  * @fclk: Fast clock (bus)
161  * @sclk: Slow clock (other events)
162  * @xferqueue: Transfer queue structure
163  * @xferqueue.list: List member
164  * @xferqueue.cur: Current ongoing transfer
165  * @xferqueue.lock: Queue lock
166  * @ibi: IBI structure
167  * @ibi.num_slots: Number of slots available in @ibi.slots
168  * @ibi.slots: Available IBI slots
169  * @ibi.tbq_slot: To be queued IBI slot
170  * @ibi.lock: IBI lock
171  */
172 struct svc_i3c_master {
173 	struct i3c_master_controller base;
174 	struct device *dev;
175 	void __iomem *regs;
176 	u32 free_slots;
177 	u8 addrs[SVC_I3C_MAX_DEVS];
178 	struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
179 	struct work_struct hj_work;
180 	struct work_struct ibi_work;
181 	int irq;
182 	struct clk *pclk;
183 	struct clk *fclk;
184 	struct clk *sclk;
185 	struct {
186 		struct list_head list;
187 		struct svc_i3c_xfer *cur;
188 		/* Prevent races between transfers */
189 		spinlock_t lock;
190 	} xferqueue;
191 	struct {
192 		unsigned int num_slots;
193 		struct i3c_dev_desc **slots;
194 		struct i3c_ibi_slot *tbq_slot;
195 		/* Prevent races within IBI handlers */
196 		spinlock_t lock;
197 	} ibi;
198 };
199 
200 /**
201  * struct svc_i3c_i2c_dev_data - Device specific data
202  * @index: Index in the master tables corresponding to this device
203  * @ibi: IBI slot index in the master structure
204  * @ibi_pool: IBI pool associated to this device
205  */
206 struct svc_i3c_i2c_dev_data {
207 	u8 index;
208 	int ibi;
209 	struct i3c_generic_ibi_pool *ibi_pool;
210 };
211 
212 static bool svc_i3c_master_error(struct svc_i3c_master *master)
213 {
214 	u32 mstatus, merrwarn;
215 
216 	mstatus = readl(master->regs + SVC_I3C_MSTATUS);
217 	if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
218 		merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
219 		writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
220 		dev_err(master->dev,
221 			"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
222 			mstatus, merrwarn);
223 
224 		return true;
225 	}
226 
227 	return false;
228 }
229 
230 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
231 {
232 	writel(mask, master->regs + SVC_I3C_MINTSET);
233 }
234 
235 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
236 {
237 	u32 mask = readl(master->regs + SVC_I3C_MINTSET);
238 
239 	writel(mask, master->regs + SVC_I3C_MINTCLR);
240 }
241 
242 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
243 {
244 	/* Clear pending warnings */
245 	writel(readl(master->regs + SVC_I3C_MERRWARN),
246 	       master->regs + SVC_I3C_MERRWARN);
247 }
248 
249 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
250 {
251 	/* Flush FIFOs */
252 	writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
253 	       master->regs + SVC_I3C_MDATACTRL);
254 }
255 
256 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
257 {
258 	u32 reg;
259 
260 	/* Set RX and TX tigger levels, flush FIFOs */
261 	reg = SVC_I3C_MDATACTRL_FLUSHTB |
262 	      SVC_I3C_MDATACTRL_FLUSHRB |
263 	      SVC_I3C_MDATACTRL_UNLOCK_TRIG |
264 	      SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
265 	      SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
266 	writel(reg, master->regs + SVC_I3C_MDATACTRL);
267 }
268 
269 static void svc_i3c_master_reset(struct svc_i3c_master *master)
270 {
271 	svc_i3c_master_clear_merrwarn(master);
272 	svc_i3c_master_reset_fifo_trigger(master);
273 	svc_i3c_master_disable_interrupts(master);
274 }
275 
276 static inline struct svc_i3c_master *
277 to_svc_i3c_master(struct i3c_master_controller *master)
278 {
279 	return container_of(master, struct svc_i3c_master, base);
280 }
281 
282 static void svc_i3c_master_hj_work(struct work_struct *work)
283 {
284 	struct svc_i3c_master *master;
285 
286 	master = container_of(work, struct svc_i3c_master, hj_work);
287 	i3c_master_do_daa(&master->base);
288 }
289 
290 static struct i3c_dev_desc *
291 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
292 			     unsigned int ibiaddr)
293 {
294 	int i;
295 
296 	for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
297 		if (master->addrs[i] == ibiaddr)
298 			break;
299 
300 	if (i == SVC_I3C_MAX_DEVS)
301 		return NULL;
302 
303 	return master->descs[i];
304 }
305 
306 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
307 {
308 	writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
309 
310 	/*
311 	 * This delay is necessary after the emission of a stop, otherwise eg.
312 	 * repeating IBIs do not get detected. There is a note in the manual
313 	 * about it, stating that the stop condition might not be settled
314 	 * correctly if a start condition follows too rapidly.
315 	 */
316 	udelay(1);
317 }
318 
319 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
320 				     struct i3c_dev_desc *dev)
321 {
322 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
323 	struct i3c_ibi_slot *slot;
324 	unsigned int count;
325 	u32 mdatactrl;
326 	u8 *buf;
327 
328 	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
329 	if (!slot)
330 		return -ENOSPC;
331 
332 	slot->len = 0;
333 	buf = slot->data;
334 
335 	while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS))  &&
336 	       slot->len < SVC_I3C_FIFO_SIZE) {
337 		mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
338 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
339 		readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
340 		slot->len += count;
341 		buf += count;
342 	}
343 
344 	master->ibi.tbq_slot = slot;
345 
346 	return 0;
347 }
348 
349 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
350 				   bool mandatory_byte)
351 {
352 	unsigned int ibi_ack_nack;
353 
354 	ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
355 	if (mandatory_byte)
356 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
357 	else
358 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
359 
360 	writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
361 }
362 
363 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
364 {
365 	writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
366 	       SVC_I3C_MCTRL_IBIRESP_NACK,
367 	       master->regs + SVC_I3C_MCTRL);
368 }
369 
370 static void svc_i3c_master_ibi_work(struct work_struct *work)
371 {
372 	struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
373 	struct svc_i3c_i2c_dev_data *data;
374 	unsigned int ibitype, ibiaddr;
375 	struct i3c_dev_desc *dev;
376 	u32 status, val;
377 	int ret;
378 
379 	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
380 	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
381 	       SVC_I3C_MCTRL_IBIRESP_AUTO,
382 	       master->regs + SVC_I3C_MCTRL);
383 
384 	/* Wait for IBIWON, should take approximately 100us */
385 	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
386 					 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
387 	if (ret) {
388 		dev_err(master->dev, "Timeout when polling for IBIWON\n");
389 		goto reenable_ibis;
390 	}
391 
392 	/* Clear the interrupt status */
393 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
394 
395 	status = readl(master->regs + SVC_I3C_MSTATUS);
396 	ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
397 	ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
398 
399 	/* Handle the critical responses to IBI's */
400 	switch (ibitype) {
401 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
402 		dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
403 		if (!dev)
404 			svc_i3c_master_nack_ibi(master);
405 		else
406 			svc_i3c_master_handle_ibi(master, dev);
407 		break;
408 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
409 		svc_i3c_master_ack_ibi(master, false);
410 		break;
411 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
412 		svc_i3c_master_nack_ibi(master);
413 		break;
414 	default:
415 		break;
416 	}
417 
418 	/*
419 	 * If an error happened, we probably got interrupted and the exchange
420 	 * timedout. In this case we just drop everything, emit a stop and wait
421 	 * for the slave to interrupt again.
422 	 */
423 	if (svc_i3c_master_error(master)) {
424 		if (master->ibi.tbq_slot) {
425 			data = i3c_dev_get_master_data(dev);
426 			i3c_generic_ibi_recycle_slot(data->ibi_pool,
427 						     master->ibi.tbq_slot);
428 			master->ibi.tbq_slot = NULL;
429 		}
430 
431 		svc_i3c_master_emit_stop(master);
432 
433 		goto reenable_ibis;
434 	}
435 
436 	/* Handle the non critical tasks */
437 	switch (ibitype) {
438 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
439 		if (dev) {
440 			i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
441 			master->ibi.tbq_slot = NULL;
442 		}
443 		svc_i3c_master_emit_stop(master);
444 		break;
445 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
446 		queue_work(master->base.wq, &master->hj_work);
447 		break;
448 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
449 	default:
450 		break;
451 	}
452 
453 reenable_ibis:
454 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
455 }
456 
457 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
458 {
459 	struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
460 	u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
461 
462 	if (!SVC_I3C_MSTATUS_SLVSTART(active))
463 		return IRQ_NONE;
464 
465 	/* Clear the interrupt status */
466 	writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
467 
468 	svc_i3c_master_disable_interrupts(master);
469 
470 	/* Handle the interrupt in a non atomic context */
471 	queue_work(master->base.wq, &master->ibi_work);
472 
473 	return IRQ_HANDLED;
474 }
475 
476 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
477 {
478 	struct svc_i3c_master *master = to_svc_i3c_master(m);
479 	struct i3c_bus *bus = i3c_master_get_bus(m);
480 	struct i3c_device_info info = {};
481 	unsigned long fclk_rate, fclk_period_ns;
482 	unsigned int high_period_ns, od_low_period_ns;
483 	u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
484 	int ret;
485 
486 	ret = pm_runtime_resume_and_get(master->dev);
487 	if (ret < 0) {
488 		dev_err(master->dev,
489 			"<%s> cannot resume i3c bus master, err: %d\n",
490 			__func__, ret);
491 		return ret;
492 	}
493 
494 	/* Timings derivation */
495 	fclk_rate = clk_get_rate(master->fclk);
496 	if (!fclk_rate) {
497 		ret = -EINVAL;
498 		goto rpm_out;
499 	}
500 
501 	fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
502 
503 	/*
504 	 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
505 	 * Simplest configuration is using a 50% duty-cycle of 40ns.
506 	 */
507 	ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
508 	pplow = 0;
509 
510 	/*
511 	 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
512 	 * duty-cycle tuned so that high levels are filetered out by
513 	 * the 50ns filter (target being 40ns).
514 	 */
515 	odhpp = 1;
516 	high_period_ns = (ppbaud + 1) * fclk_period_ns;
517 	odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
518 	od_low_period_ns = (odbaud + 1) * high_period_ns;
519 
520 	switch (bus->mode) {
521 	case I3C_BUS_MODE_PURE:
522 		i2cbaud = 0;
523 		odstop = 0;
524 		break;
525 	case I3C_BUS_MODE_MIXED_FAST:
526 	case I3C_BUS_MODE_MIXED_LIMITED:
527 		/*
528 		 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
529 		 * between the high and low period does not really matter.
530 		 */
531 		i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
532 		odstop = 1;
533 		break;
534 	case I3C_BUS_MODE_MIXED_SLOW:
535 		/*
536 		 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
537 		 * constraints as the FM+ mode.
538 		 */
539 		i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
540 		odstop = 1;
541 		break;
542 	default:
543 		goto rpm_out;
544 	}
545 
546 	reg = SVC_I3C_MCONFIG_MASTER_EN |
547 	      SVC_I3C_MCONFIG_DISTO(0) |
548 	      SVC_I3C_MCONFIG_HKEEP(0) |
549 	      SVC_I3C_MCONFIG_ODSTOP(odstop) |
550 	      SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
551 	      SVC_I3C_MCONFIG_PPLOW(pplow) |
552 	      SVC_I3C_MCONFIG_ODBAUD(odbaud) |
553 	      SVC_I3C_MCONFIG_ODHPP(odhpp) |
554 	      SVC_I3C_MCONFIG_SKEW(0) |
555 	      SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
556 	writel(reg, master->regs + SVC_I3C_MCONFIG);
557 
558 	/* Master core's registration */
559 	ret = i3c_master_get_free_addr(m, 0);
560 	if (ret < 0)
561 		goto rpm_out;
562 
563 	info.dyn_addr = ret;
564 
565 	writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
566 	       master->regs + SVC_I3C_MDYNADDR);
567 
568 	ret = i3c_master_set_info(&master->base, &info);
569 	if (ret)
570 		goto rpm_out;
571 
572 rpm_out:
573 	pm_runtime_mark_last_busy(master->dev);
574 	pm_runtime_put_autosuspend(master->dev);
575 
576 	return ret;
577 }
578 
579 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
580 {
581 	struct svc_i3c_master *master = to_svc_i3c_master(m);
582 	int ret;
583 
584 	ret = pm_runtime_resume_and_get(master->dev);
585 	if (ret < 0) {
586 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
587 		return;
588 	}
589 
590 	svc_i3c_master_disable_interrupts(master);
591 
592 	/* Disable master */
593 	writel(0, master->regs + SVC_I3C_MCONFIG);
594 
595 	pm_runtime_mark_last_busy(master->dev);
596 	pm_runtime_put_autosuspend(master->dev);
597 }
598 
599 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
600 {
601 	unsigned int slot;
602 
603 	if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
604 		return -ENOSPC;
605 
606 	slot = ffs(master->free_slots) - 1;
607 
608 	master->free_slots &= ~BIT(slot);
609 
610 	return slot;
611 }
612 
613 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
614 					unsigned int slot)
615 {
616 	master->free_slots |= BIT(slot);
617 }
618 
619 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
620 {
621 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
622 	struct svc_i3c_master *master = to_svc_i3c_master(m);
623 	struct svc_i3c_i2c_dev_data *data;
624 	int slot;
625 
626 	slot = svc_i3c_master_reserve_slot(master);
627 	if (slot < 0)
628 		return slot;
629 
630 	data = kzalloc(sizeof(*data), GFP_KERNEL);
631 	if (!data) {
632 		svc_i3c_master_release_slot(master, slot);
633 		return -ENOMEM;
634 	}
635 
636 	data->ibi = -1;
637 	data->index = slot;
638 	master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
639 						   dev->info.static_addr;
640 	master->descs[slot] = dev;
641 
642 	i3c_dev_set_master_data(dev, data);
643 
644 	return 0;
645 }
646 
647 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
648 					   u8 old_dyn_addr)
649 {
650 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
651 	struct svc_i3c_master *master = to_svc_i3c_master(m);
652 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
653 
654 	master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
655 							  dev->info.static_addr;
656 
657 	return 0;
658 }
659 
660 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
661 {
662 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
663 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
664 	struct svc_i3c_master *master = to_svc_i3c_master(m);
665 
666 	master->addrs[data->index] = 0;
667 	svc_i3c_master_release_slot(master, data->index);
668 
669 	kfree(data);
670 }
671 
672 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
673 {
674 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
675 	struct svc_i3c_master *master = to_svc_i3c_master(m);
676 	struct svc_i3c_i2c_dev_data *data;
677 	int slot;
678 
679 	slot = svc_i3c_master_reserve_slot(master);
680 	if (slot < 0)
681 		return slot;
682 
683 	data = kzalloc(sizeof(*data), GFP_KERNEL);
684 	if (!data) {
685 		svc_i3c_master_release_slot(master, slot);
686 		return -ENOMEM;
687 	}
688 
689 	data->index = slot;
690 	master->addrs[slot] = dev->addr;
691 
692 	i2c_dev_set_master_data(dev, data);
693 
694 	return 0;
695 }
696 
697 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
698 {
699 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
700 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
701 	struct svc_i3c_master *master = to_svc_i3c_master(m);
702 
703 	svc_i3c_master_release_slot(master, data->index);
704 
705 	kfree(data);
706 }
707 
708 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
709 				unsigned int len)
710 {
711 	int ret, i;
712 	u32 reg;
713 
714 	for (i = 0; i < len; i++) {
715 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
716 						reg,
717 						SVC_I3C_MSTATUS_RXPEND(reg),
718 						0, 1000);
719 		if (ret)
720 			return ret;
721 
722 		dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
723 	}
724 
725 	return 0;
726 }
727 
728 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
729 					u8 *addrs, unsigned int *count)
730 {
731 	u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
732 	unsigned int dev_nb = 0, last_addr = 0;
733 	u32 reg;
734 	int ret, i;
735 
736 	while (true) {
737 		/* Enter/proceed with DAA */
738 		writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
739 		       SVC_I3C_MCTRL_TYPE_I3C |
740 		       SVC_I3C_MCTRL_IBIRESP_NACK |
741 		       SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
742 		       master->regs + SVC_I3C_MCTRL);
743 
744 		/*
745 		 * Either one slave will send its ID, or the assignment process
746 		 * is done.
747 		 */
748 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
749 						reg,
750 						SVC_I3C_MSTATUS_RXPEND(reg) |
751 						SVC_I3C_MSTATUS_MCTRLDONE(reg),
752 						1, 1000);
753 		if (ret)
754 			return ret;
755 
756 		if (SVC_I3C_MSTATUS_RXPEND(reg)) {
757 			u8 data[6];
758 
759 			/*
760 			 * We only care about the 48-bit provisional ID yet to
761 			 * be sure a device does not nack an address twice.
762 			 * Otherwise, we would just need to flush the RX FIFO.
763 			 */
764 			ret = svc_i3c_master_readb(master, data, 6);
765 			if (ret)
766 				return ret;
767 
768 			for (i = 0; i < 6; i++)
769 				prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
770 
771 			/* We do not care about the BCR and DCR yet */
772 			ret = svc_i3c_master_readb(master, data, 2);
773 			if (ret)
774 				return ret;
775 		} else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
776 			if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
777 			    SVC_I3C_MSTATUS_COMPLETE(reg)) {
778 				/*
779 				 * All devices received and acked they dynamic
780 				 * address, this is the natural end of the DAA
781 				 * procedure.
782 				 */
783 				break;
784 			} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
785 				/*
786 				 * A slave device nacked the address, this is
787 				 * allowed only once, DAA will be stopped and
788 				 * then resumed. The same device is supposed to
789 				 * answer again immediately and shall ack the
790 				 * address this time.
791 				 */
792 				if (prov_id[dev_nb] == nacking_prov_id)
793 					return -EIO;
794 
795 				dev_nb--;
796 				nacking_prov_id = prov_id[dev_nb];
797 				svc_i3c_master_emit_stop(master);
798 
799 				continue;
800 			} else {
801 				return -EIO;
802 			}
803 		}
804 
805 		/* Wait for the slave to be ready to receive its address */
806 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
807 						reg,
808 						SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
809 						SVC_I3C_MSTATUS_STATE_DAA(reg) &&
810 						SVC_I3C_MSTATUS_BETWEEN(reg),
811 						0, 1000);
812 		if (ret)
813 			return ret;
814 
815 		/* Give the slave device a suitable dynamic address */
816 		ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
817 		if (ret < 0)
818 			return ret;
819 
820 		addrs[dev_nb] = ret;
821 		dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
822 			dev_nb, addrs[dev_nb]);
823 
824 		writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
825 		last_addr = addrs[dev_nb++];
826 	}
827 
828 	*count = dev_nb;
829 
830 	return 0;
831 }
832 
833 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
834 {
835 	struct i3c_dev_desc *dev;
836 	u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
837 	unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
838 		nobyte_addr_ko = 0;
839 	bool list_mbyte = false, list_nobyte = false;
840 
841 	/* Create the IBIRULES register for both cases */
842 	i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
843 		if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
844 			continue;
845 
846 		if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
847 			reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
848 							   dev->info.dyn_addr);
849 
850 			/* IBI rules cannot be applied to devices with MSb=1 */
851 			if (dev->info.dyn_addr & BIT(7))
852 				mbyte_addr_ko++;
853 			else
854 				mbyte_addr_ok++;
855 		} else {
856 			reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
857 							    dev->info.dyn_addr);
858 
859 			/* IBI rules cannot be applied to devices with MSb=1 */
860 			if (dev->info.dyn_addr & BIT(7))
861 				nobyte_addr_ko++;
862 			else
863 				nobyte_addr_ok++;
864 		}
865 	}
866 
867 	/* Device list cannot be handled by hardware */
868 	if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
869 		list_mbyte = true;
870 
871 	if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
872 		list_nobyte = true;
873 
874 	/* No list can be properly handled, return an error */
875 	if (!list_mbyte && !list_nobyte)
876 		return -ERANGE;
877 
878 	/* Pick the first list that can be handled by hardware, randomly */
879 	if (list_mbyte)
880 		writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
881 	else
882 		writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
883 
884 	return 0;
885 }
886 
887 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
888 {
889 	struct svc_i3c_master *master = to_svc_i3c_master(m);
890 	u8 addrs[SVC_I3C_MAX_DEVS];
891 	unsigned long flags;
892 	unsigned int dev_nb;
893 	int ret, i;
894 
895 	ret = pm_runtime_resume_and_get(master->dev);
896 	if (ret < 0) {
897 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
898 		return ret;
899 	}
900 
901 	spin_lock_irqsave(&master->xferqueue.lock, flags);
902 	ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
903 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
904 	if (ret) {
905 		svc_i3c_master_emit_stop(master);
906 		svc_i3c_master_clear_merrwarn(master);
907 		goto rpm_out;
908 	}
909 
910 	/* Register all devices who participated to the core */
911 	for (i = 0; i < dev_nb; i++) {
912 		ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
913 		if (ret)
914 			goto rpm_out;
915 	}
916 
917 	/* Configure IBI auto-rules */
918 	ret = svc_i3c_update_ibirules(master);
919 	if (ret)
920 		dev_err(master->dev, "Cannot handle such a list of devices");
921 
922 rpm_out:
923 	pm_runtime_mark_last_busy(master->dev);
924 	pm_runtime_put_autosuspend(master->dev);
925 
926 	return ret;
927 }
928 
929 static int svc_i3c_master_read(struct svc_i3c_master *master,
930 			       u8 *in, unsigned int len)
931 {
932 	int offset = 0, i;
933 	u32 mdctrl, mstatus;
934 	bool completed = false;
935 	unsigned int count;
936 	unsigned long start = jiffies;
937 
938 	while (!completed) {
939 		mstatus = readl(master->regs + SVC_I3C_MSTATUS);
940 		if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
941 			completed = true;
942 
943 		if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
944 			dev_dbg(master->dev, "I3C read timeout\n");
945 			return -ETIMEDOUT;
946 		}
947 
948 		mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
949 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
950 		if (offset + count > len) {
951 			dev_err(master->dev, "I3C receive length too long!\n");
952 			return -EINVAL;
953 		}
954 		for (i = 0; i < count; i++)
955 			in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
956 
957 		offset += count;
958 	}
959 
960 	return offset;
961 }
962 
963 static int svc_i3c_master_write(struct svc_i3c_master *master,
964 				const u8 *out, unsigned int len)
965 {
966 	int offset = 0, ret;
967 	u32 mdctrl;
968 
969 	while (offset < len) {
970 		ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
971 					 mdctrl,
972 					 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
973 					 0, 1000);
974 		if (ret)
975 			return ret;
976 
977 		/*
978 		 * The last byte to be sent over the bus must either have the
979 		 * "end" bit set or be written in MWDATABE.
980 		 */
981 		if (likely(offset < (len - 1)))
982 			writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
983 		else
984 			writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
985 	}
986 
987 	return 0;
988 }
989 
990 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
991 			       bool rnw, unsigned int xfer_type, u8 addr,
992 			       u8 *in, const u8 *out, unsigned int xfer_len,
993 			       unsigned int *read_len, bool continued)
994 {
995 	u32 reg;
996 	int ret;
997 
998 	writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
999 	       xfer_type |
1000 	       SVC_I3C_MCTRL_IBIRESP_NACK |
1001 	       SVC_I3C_MCTRL_DIR(rnw) |
1002 	       SVC_I3C_MCTRL_ADDR(addr) |
1003 	       SVC_I3C_MCTRL_RDTERM(*read_len),
1004 	       master->regs + SVC_I3C_MCTRL);
1005 
1006 	ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1007 				 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1008 	if (ret)
1009 		goto emit_stop;
1010 
1011 	if (rnw)
1012 		ret = svc_i3c_master_read(master, in, xfer_len);
1013 	else
1014 		ret = svc_i3c_master_write(master, out, xfer_len);
1015 	if (ret < 0)
1016 		goto emit_stop;
1017 
1018 	if (rnw)
1019 		*read_len = ret;
1020 
1021 	ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1022 				 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1023 	if (ret)
1024 		goto emit_stop;
1025 
1026 	writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1027 
1028 	if (!continued) {
1029 		svc_i3c_master_emit_stop(master);
1030 
1031 		/* Wait idle if stop is sent. */
1032 		readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1033 				   SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1034 	}
1035 
1036 	return 0;
1037 
1038 emit_stop:
1039 	svc_i3c_master_emit_stop(master);
1040 	svc_i3c_master_clear_merrwarn(master);
1041 
1042 	return ret;
1043 }
1044 
1045 static struct svc_i3c_xfer *
1046 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1047 {
1048 	struct svc_i3c_xfer *xfer;
1049 
1050 	xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1051 	if (!xfer)
1052 		return NULL;
1053 
1054 	INIT_LIST_HEAD(&xfer->node);
1055 	xfer->ncmds = ncmds;
1056 	xfer->ret = -ETIMEDOUT;
1057 
1058 	return xfer;
1059 }
1060 
1061 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1062 {
1063 	kfree(xfer);
1064 }
1065 
1066 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1067 					       struct svc_i3c_xfer *xfer)
1068 {
1069 	if (master->xferqueue.cur == xfer)
1070 		master->xferqueue.cur = NULL;
1071 	else
1072 		list_del_init(&xfer->node);
1073 }
1074 
1075 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1076 					struct svc_i3c_xfer *xfer)
1077 {
1078 	unsigned long flags;
1079 
1080 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1081 	svc_i3c_master_dequeue_xfer_locked(master, xfer);
1082 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1083 }
1084 
1085 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1086 {
1087 	struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1088 	int ret, i;
1089 
1090 	if (!xfer)
1091 		return;
1092 
1093 	ret = pm_runtime_resume_and_get(master->dev);
1094 	if (ret < 0) {
1095 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1096 		return;
1097 	}
1098 
1099 	svc_i3c_master_clear_merrwarn(master);
1100 	svc_i3c_master_flush_fifo(master);
1101 
1102 	for (i = 0; i < xfer->ncmds; i++) {
1103 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1104 
1105 		ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1106 					  cmd->addr, cmd->in, cmd->out,
1107 					  cmd->len, &cmd->read_len,
1108 					  cmd->continued);
1109 		if (ret)
1110 			break;
1111 	}
1112 
1113 	pm_runtime_mark_last_busy(master->dev);
1114 	pm_runtime_put_autosuspend(master->dev);
1115 
1116 	xfer->ret = ret;
1117 	complete(&xfer->comp);
1118 
1119 	if (ret < 0)
1120 		svc_i3c_master_dequeue_xfer_locked(master, xfer);
1121 
1122 	xfer = list_first_entry_or_null(&master->xferqueue.list,
1123 					struct svc_i3c_xfer,
1124 					node);
1125 	if (xfer)
1126 		list_del_init(&xfer->node);
1127 
1128 	master->xferqueue.cur = xfer;
1129 	svc_i3c_master_start_xfer_locked(master);
1130 }
1131 
1132 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1133 					struct svc_i3c_xfer *xfer)
1134 {
1135 	unsigned long flags;
1136 
1137 	init_completion(&xfer->comp);
1138 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1139 	if (master->xferqueue.cur) {
1140 		list_add_tail(&xfer->node, &master->xferqueue.list);
1141 	} else {
1142 		master->xferqueue.cur = xfer;
1143 		svc_i3c_master_start_xfer_locked(master);
1144 	}
1145 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1146 }
1147 
1148 static bool
1149 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1150 				const struct i3c_ccc_cmd *cmd)
1151 {
1152 	/* No software support for CCC commands targeting more than one slave */
1153 	return (cmd->ndests == 1);
1154 }
1155 
1156 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1157 					      struct i3c_ccc_cmd *ccc)
1158 {
1159 	unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1160 	struct svc_i3c_xfer *xfer;
1161 	struct svc_i3c_cmd *cmd;
1162 	u8 *buf;
1163 	int ret;
1164 
1165 	xfer = svc_i3c_master_alloc_xfer(master, 1);
1166 	if (!xfer)
1167 		return -ENOMEM;
1168 
1169 	buf = kmalloc(xfer_len, GFP_KERNEL);
1170 	if (!buf) {
1171 		svc_i3c_master_free_xfer(xfer);
1172 		return -ENOMEM;
1173 	}
1174 
1175 	buf[0] = ccc->id;
1176 	memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1177 
1178 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1179 
1180 	cmd = &xfer->cmds[0];
1181 	cmd->addr = ccc->dests[0].addr;
1182 	cmd->rnw = ccc->rnw;
1183 	cmd->in = NULL;
1184 	cmd->out = buf;
1185 	cmd->len = xfer_len;
1186 	cmd->read_len = 0;
1187 	cmd->continued = false;
1188 
1189 	svc_i3c_master_enqueue_xfer(master, xfer);
1190 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1191 		svc_i3c_master_dequeue_xfer(master, xfer);
1192 
1193 	ret = xfer->ret;
1194 	kfree(buf);
1195 	svc_i3c_master_free_xfer(xfer);
1196 
1197 	return ret;
1198 }
1199 
1200 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1201 					      struct i3c_ccc_cmd *ccc)
1202 {
1203 	unsigned int xfer_len = ccc->dests[0].payload.len;
1204 	unsigned int read_len = ccc->rnw ? xfer_len : 0;
1205 	struct svc_i3c_xfer *xfer;
1206 	struct svc_i3c_cmd *cmd;
1207 	int ret;
1208 
1209 	xfer = svc_i3c_master_alloc_xfer(master, 2);
1210 	if (!xfer)
1211 		return -ENOMEM;
1212 
1213 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1214 
1215 	/* Broadcasted message */
1216 	cmd = &xfer->cmds[0];
1217 	cmd->addr = I3C_BROADCAST_ADDR;
1218 	cmd->rnw = 0;
1219 	cmd->in = NULL;
1220 	cmd->out = &ccc->id;
1221 	cmd->len = 1;
1222 	cmd->read_len = 0;
1223 	cmd->continued = true;
1224 
1225 	/* Directed message */
1226 	cmd = &xfer->cmds[1];
1227 	cmd->addr = ccc->dests[0].addr;
1228 	cmd->rnw = ccc->rnw;
1229 	cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1230 	cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
1231 	cmd->len = xfer_len;
1232 	cmd->read_len = read_len;
1233 	cmd->continued = false;
1234 
1235 	svc_i3c_master_enqueue_xfer(master, xfer);
1236 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1237 		svc_i3c_master_dequeue_xfer(master, xfer);
1238 
1239 	if (cmd->read_len != xfer_len)
1240 		ccc->dests[0].payload.len = cmd->read_len;
1241 
1242 	ret = xfer->ret;
1243 	svc_i3c_master_free_xfer(xfer);
1244 
1245 	return ret;
1246 }
1247 
1248 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1249 				       struct i3c_ccc_cmd *cmd)
1250 {
1251 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1252 	bool broadcast = cmd->id < 0x80;
1253 
1254 	if (broadcast)
1255 		return svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1256 	else
1257 		return svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1258 }
1259 
1260 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1261 				     struct i3c_priv_xfer *xfers,
1262 				     int nxfers)
1263 {
1264 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1265 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1266 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1267 	struct svc_i3c_xfer *xfer;
1268 	int ret, i;
1269 
1270 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1271 	if (!xfer)
1272 		return -ENOMEM;
1273 
1274 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1275 
1276 	for (i = 0; i < nxfers; i++) {
1277 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1278 
1279 		cmd->addr = master->addrs[data->index];
1280 		cmd->rnw = xfers[i].rnw;
1281 		cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1282 		cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1283 		cmd->len = xfers[i].len;
1284 		cmd->read_len = xfers[i].rnw ? xfers[i].len : 0;
1285 		cmd->continued = (i + 1) < nxfers;
1286 	}
1287 
1288 	svc_i3c_master_enqueue_xfer(master, xfer);
1289 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1290 		svc_i3c_master_dequeue_xfer(master, xfer);
1291 
1292 	ret = xfer->ret;
1293 	svc_i3c_master_free_xfer(xfer);
1294 
1295 	return ret;
1296 }
1297 
1298 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1299 				    const struct i2c_msg *xfers,
1300 				    int nxfers)
1301 {
1302 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
1303 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1304 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1305 	struct svc_i3c_xfer *xfer;
1306 	int ret, i;
1307 
1308 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1309 	if (!xfer)
1310 		return -ENOMEM;
1311 
1312 	xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1313 
1314 	for (i = 0; i < nxfers; i++) {
1315 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1316 
1317 		cmd->addr = master->addrs[data->index];
1318 		cmd->rnw = xfers[i].flags & I2C_M_RD;
1319 		cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1320 		cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1321 		cmd->len = xfers[i].len;
1322 		cmd->read_len = cmd->rnw ? xfers[i].len : 0;
1323 		cmd->continued = (i + 1 < nxfers);
1324 	}
1325 
1326 	svc_i3c_master_enqueue_xfer(master, xfer);
1327 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1328 		svc_i3c_master_dequeue_xfer(master, xfer);
1329 
1330 	ret = xfer->ret;
1331 	svc_i3c_master_free_xfer(xfer);
1332 
1333 	return ret;
1334 }
1335 
1336 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1337 				      const struct i3c_ibi_setup *req)
1338 {
1339 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1340 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1341 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1342 	unsigned long flags;
1343 	unsigned int i;
1344 
1345 	if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1346 		dev_err(master->dev, "IBI max payload %d should be < %d\n",
1347 			dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1348 		return -ERANGE;
1349 	}
1350 
1351 	data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1352 	if (IS_ERR(data->ibi_pool))
1353 		return PTR_ERR(data->ibi_pool);
1354 
1355 	spin_lock_irqsave(&master->ibi.lock, flags);
1356 	for (i = 0; i < master->ibi.num_slots; i++) {
1357 		if (!master->ibi.slots[i]) {
1358 			data->ibi = i;
1359 			master->ibi.slots[i] = dev;
1360 			break;
1361 		}
1362 	}
1363 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1364 
1365 	if (i < master->ibi.num_slots)
1366 		return 0;
1367 
1368 	i3c_generic_ibi_free_pool(data->ibi_pool);
1369 	data->ibi_pool = NULL;
1370 
1371 	return -ENOSPC;
1372 }
1373 
1374 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1375 {
1376 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1377 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1378 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1379 	unsigned long flags;
1380 
1381 	spin_lock_irqsave(&master->ibi.lock, flags);
1382 	master->ibi.slots[data->ibi] = NULL;
1383 	data->ibi = -1;
1384 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1385 
1386 	i3c_generic_ibi_free_pool(data->ibi_pool);
1387 }
1388 
1389 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1390 {
1391 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1392 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1393 	int ret;
1394 
1395 	ret = pm_runtime_resume_and_get(master->dev);
1396 	if (ret < 0) {
1397 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1398 		return ret;
1399 	}
1400 
1401 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1402 
1403 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1404 }
1405 
1406 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1407 {
1408 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1409 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1410 	int ret;
1411 
1412 	svc_i3c_master_disable_interrupts(master);
1413 
1414 	ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1415 
1416 	pm_runtime_mark_last_busy(master->dev);
1417 	pm_runtime_put_autosuspend(master->dev);
1418 
1419 	return ret;
1420 }
1421 
1422 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1423 					    struct i3c_ibi_slot *slot)
1424 {
1425 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1426 
1427 	i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1428 }
1429 
1430 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1431 	.bus_init = svc_i3c_master_bus_init,
1432 	.bus_cleanup = svc_i3c_master_bus_cleanup,
1433 	.attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1434 	.detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1435 	.reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1436 	.attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1437 	.detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1438 	.do_daa = svc_i3c_master_do_daa,
1439 	.supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1440 	.send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1441 	.priv_xfers = svc_i3c_master_priv_xfers,
1442 	.i2c_xfers = svc_i3c_master_i2c_xfers,
1443 	.request_ibi = svc_i3c_master_request_ibi,
1444 	.free_ibi = svc_i3c_master_free_ibi,
1445 	.recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1446 	.enable_ibi = svc_i3c_master_enable_ibi,
1447 	.disable_ibi = svc_i3c_master_disable_ibi,
1448 };
1449 
1450 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
1451 {
1452 	int ret = 0;
1453 
1454 	ret = clk_prepare_enable(master->pclk);
1455 	if (ret)
1456 		return ret;
1457 
1458 	ret = clk_prepare_enable(master->fclk);
1459 	if (ret) {
1460 		clk_disable_unprepare(master->pclk);
1461 		return ret;
1462 	}
1463 
1464 	ret = clk_prepare_enable(master->sclk);
1465 	if (ret) {
1466 		clk_disable_unprepare(master->pclk);
1467 		clk_disable_unprepare(master->fclk);
1468 		return ret;
1469 	}
1470 
1471 	return 0;
1472 }
1473 
1474 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
1475 {
1476 	clk_disable_unprepare(master->pclk);
1477 	clk_disable_unprepare(master->fclk);
1478 	clk_disable_unprepare(master->sclk);
1479 }
1480 
1481 static int svc_i3c_master_probe(struct platform_device *pdev)
1482 {
1483 	struct device *dev = &pdev->dev;
1484 	struct svc_i3c_master *master;
1485 	int ret;
1486 
1487 	master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1488 	if (!master)
1489 		return -ENOMEM;
1490 
1491 	master->regs = devm_platform_ioremap_resource(pdev, 0);
1492 	if (IS_ERR(master->regs))
1493 		return PTR_ERR(master->regs);
1494 
1495 	master->pclk = devm_clk_get(dev, "pclk");
1496 	if (IS_ERR(master->pclk))
1497 		return PTR_ERR(master->pclk);
1498 
1499 	master->fclk = devm_clk_get(dev, "fast_clk");
1500 	if (IS_ERR(master->fclk))
1501 		return PTR_ERR(master->fclk);
1502 
1503 	master->sclk = devm_clk_get(dev, "slow_clk");
1504 	if (IS_ERR(master->sclk))
1505 		return PTR_ERR(master->sclk);
1506 
1507 	master->irq = platform_get_irq(pdev, 0);
1508 	if (master->irq <= 0)
1509 		return -ENOENT;
1510 
1511 	master->dev = dev;
1512 
1513 	ret = svc_i3c_master_prepare_clks(master);
1514 	if (ret)
1515 		return ret;
1516 
1517 	INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1518 	INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
1519 	ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1520 			       IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1521 	if (ret)
1522 		goto err_disable_clks;
1523 
1524 	master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1525 
1526 	spin_lock_init(&master->xferqueue.lock);
1527 	INIT_LIST_HEAD(&master->xferqueue.list);
1528 
1529 	spin_lock_init(&master->ibi.lock);
1530 	master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1531 	master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1532 					 sizeof(*master->ibi.slots),
1533 					 GFP_KERNEL);
1534 	if (!master->ibi.slots) {
1535 		ret = -ENOMEM;
1536 		goto err_disable_clks;
1537 	}
1538 
1539 	platform_set_drvdata(pdev, master);
1540 
1541 	pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1542 	pm_runtime_use_autosuspend(&pdev->dev);
1543 	pm_runtime_get_noresume(&pdev->dev);
1544 	pm_runtime_set_active(&pdev->dev);
1545 	pm_runtime_enable(&pdev->dev);
1546 
1547 	svc_i3c_master_reset(master);
1548 
1549 	/* Register the master */
1550 	ret = i3c_master_register(&master->base, &pdev->dev,
1551 				  &svc_i3c_master_ops, false);
1552 	if (ret)
1553 		goto rpm_disable;
1554 
1555 	pm_runtime_mark_last_busy(&pdev->dev);
1556 	pm_runtime_put_autosuspend(&pdev->dev);
1557 
1558 	return 0;
1559 
1560 rpm_disable:
1561 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1562 	pm_runtime_put_noidle(&pdev->dev);
1563 	pm_runtime_set_suspended(&pdev->dev);
1564 	pm_runtime_disable(&pdev->dev);
1565 
1566 err_disable_clks:
1567 	svc_i3c_master_unprepare_clks(master);
1568 
1569 	return ret;
1570 }
1571 
1572 static void svc_i3c_master_remove(struct platform_device *pdev)
1573 {
1574 	struct svc_i3c_master *master = platform_get_drvdata(pdev);
1575 
1576 	i3c_master_unregister(&master->base);
1577 
1578 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1579 	pm_runtime_disable(&pdev->dev);
1580 }
1581 
1582 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
1583 {
1584 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1585 
1586 	svc_i3c_master_unprepare_clks(master);
1587 	pinctrl_pm_select_sleep_state(dev);
1588 
1589 	return 0;
1590 }
1591 
1592 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
1593 {
1594 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1595 
1596 	pinctrl_pm_select_default_state(dev);
1597 	svc_i3c_master_prepare_clks(master);
1598 
1599 	return 0;
1600 }
1601 
1602 static const struct dev_pm_ops svc_i3c_pm_ops = {
1603 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1604 				      pm_runtime_force_resume)
1605 	SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
1606 			   svc_i3c_runtime_resume, NULL)
1607 };
1608 
1609 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
1610 	{ .compatible = "silvaco,i3c-master" },
1611 	{ /* sentinel */ },
1612 };
1613 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
1614 
1615 static struct platform_driver svc_i3c_master = {
1616 	.probe = svc_i3c_master_probe,
1617 	.remove_new = svc_i3c_master_remove,
1618 	.driver = {
1619 		.name = "silvaco-i3c-master",
1620 		.of_match_table = svc_i3c_master_of_match_tbl,
1621 		.pm = &svc_i3c_pm_ops,
1622 	},
1623 };
1624 module_platform_driver(svc_i3c_master);
1625 
1626 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
1627 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
1628 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
1629 MODULE_LICENSE("GPL v2");
1630