xref: /openbmc/linux/drivers/i3c/master/svc-i3c-master.c (revision 278002edb19bce2c628fafb0af936e77000f3a5b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Silvaco dual-role I3C master driver
4  *
5  * Copyright (C) 2020 Silvaco
6  * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7  * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG      0x000
26 #define   SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define   SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define   SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define   SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define   SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define   SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define   SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define   SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define   SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define   SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
36 
37 #define SVC_I3C_MCTRL        0x084
38 #define   SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
39 #define   SVC_I3C_MCTRL_REQUEST_NONE 0
40 #define   SVC_I3C_MCTRL_REQUEST_START_ADDR 1
41 #define   SVC_I3C_MCTRL_REQUEST_STOP 2
42 #define   SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
43 #define   SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
44 #define   SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
45 #define   SVC_I3C_MCTRL_TYPE_I3C 0
46 #define   SVC_I3C_MCTRL_TYPE_I2C BIT(4)
47 #define   SVC_I3C_MCTRL_IBIRESP_AUTO 0
48 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
49 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
50 #define   SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
51 #define   SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
52 #define   SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
53 #define   SVC_I3C_MCTRL_DIR_WRITE 0
54 #define   SVC_I3C_MCTRL_DIR_READ 1
55 #define   SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
56 #define   SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
57 
58 #define SVC_I3C_MSTATUS      0x088
59 #define   SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
60 #define   SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
61 #define   SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
62 #define   SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
63 #define   SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
64 #define   SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
65 #define   SVC_I3C_MSTATUS_IBITYPE_IBI 1
66 #define   SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
67 #define   SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
68 #define   SVC_I3C_MINT_SLVSTART BIT(8)
69 #define   SVC_I3C_MINT_MCTRLDONE BIT(9)
70 #define   SVC_I3C_MINT_COMPLETE BIT(10)
71 #define   SVC_I3C_MINT_RXPEND BIT(11)
72 #define   SVC_I3C_MINT_TXNOTFULL BIT(12)
73 #define   SVC_I3C_MINT_IBIWON BIT(13)
74 #define   SVC_I3C_MINT_ERRWARN BIT(15)
75 #define   SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
76 #define   SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
77 #define   SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
78 #define   SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
79 #define   SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
80 #define   SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
81 #define   SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
82 #define   SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
83 
84 #define SVC_I3C_IBIRULES     0x08C
85 #define   SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
86 						       ((addr) & 0x3F) << ((slot) * 6))
87 #define   SVC_I3C_IBIRULES_ADDRS 5
88 #define   SVC_I3C_IBIRULES_MSB0 BIT(30)
89 #define   SVC_I3C_IBIRULES_NOBYTE BIT(31)
90 #define   SVC_I3C_IBIRULES_MANDBYTE 0
91 #define SVC_I3C_MINTSET      0x090
92 #define SVC_I3C_MINTCLR      0x094
93 #define SVC_I3C_MINTMASKED   0x098
94 #define SVC_I3C_MERRWARN     0x09C
95 #define   SVC_I3C_MERRWARN_NACK BIT(2)
96 #define   SVC_I3C_MERRWARN_TIMEOUT BIT(20)
97 #define SVC_I3C_MDMACTRL     0x0A0
98 #define SVC_I3C_MDATACTRL    0x0AC
99 #define   SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
100 #define   SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
101 #define   SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
102 #define   SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
103 #define   SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
104 #define   SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
105 #define   SVC_I3C_MDATACTRL_TXFULL BIT(30)
106 #define   SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
107 
108 #define SVC_I3C_MWDATAB      0x0B0
109 #define   SVC_I3C_MWDATAB_END BIT(8)
110 
111 #define SVC_I3C_MWDATABE     0x0B4
112 #define SVC_I3C_MWDATAH      0x0B8
113 #define SVC_I3C_MWDATAHE     0x0BC
114 #define SVC_I3C_MRDATAB      0x0C0
115 #define SVC_I3C_MRDATAH      0x0C8
116 #define SVC_I3C_MWMSG_SDR    0x0D0
117 #define SVC_I3C_MRMSG_SDR    0x0D4
118 #define SVC_I3C_MWMSG_DDR    0x0D8
119 #define SVC_I3C_MRMSG_DDR    0x0DC
120 
121 #define SVC_I3C_MDYNADDR     0x0E4
122 #define   SVC_MDYNADDR_VALID BIT(0)
123 #define   SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
124 
125 #define SVC_I3C_MAX_DEVS 32
126 #define SVC_I3C_PM_TIMEOUT_MS 1000
127 
128 /* This parameter depends on the implementation and may be tuned */
129 #define SVC_I3C_FIFO_SIZE 16
130 
131 #define SVC_I3C_EVENT_IBI	GENMASK(7, 0)
132 #define SVC_I3C_EVENT_HOTJOIN	BIT(31)
133 
134 struct svc_i3c_cmd {
135 	u8 addr;
136 	bool rnw;
137 	u8 *in;
138 	const void *out;
139 	unsigned int len;
140 	unsigned int actual_len;
141 	struct i3c_priv_xfer *xfer;
142 	bool continued;
143 };
144 
145 struct svc_i3c_xfer {
146 	struct list_head node;
147 	struct completion comp;
148 	int ret;
149 	unsigned int type;
150 	unsigned int ncmds;
151 	struct svc_i3c_cmd cmds[];
152 };
153 
154 struct svc_i3c_regs_save {
155 	u32 mconfig;
156 	u32 mdynaddr;
157 };
158 
159 /**
160  * struct svc_i3c_master - Silvaco I3C Master structure
161  * @base: I3C master controller
162  * @dev: Corresponding device
163  * @regs: Memory mapping
164  * @saved_regs: Volatile values for PM operations
165  * @free_slots: Bit array of available slots
166  * @addrs: Array containing the dynamic addresses of each attached device
167  * @descs: Array of descriptors, one per attached device
168  * @hj_work: Hot-join work
169  * @ibi_work: IBI work
170  * @irq: Main interrupt
171  * @pclk: System clock
172  * @fclk: Fast clock (bus)
173  * @sclk: Slow clock (other events)
174  * @xferqueue: Transfer queue structure
175  * @xferqueue.list: List member
176  * @xferqueue.cur: Current ongoing transfer
177  * @xferqueue.lock: Queue lock
178  * @ibi: IBI structure
179  * @ibi.num_slots: Number of slots available in @ibi.slots
180  * @ibi.slots: Available IBI slots
181  * @ibi.tbq_slot: To be queued IBI slot
182  * @ibi.lock: IBI lock
183  * @lock: Transfer lock, protect between IBI work thread and callbacks from master
184  * @enabled_events: Bit masks for enable events (IBI, HotJoin).
185  * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
186  */
187 struct svc_i3c_master {
188 	struct i3c_master_controller base;
189 	struct device *dev;
190 	void __iomem *regs;
191 	struct svc_i3c_regs_save saved_regs;
192 	u32 free_slots;
193 	u8 addrs[SVC_I3C_MAX_DEVS];
194 	struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
195 	struct work_struct hj_work;
196 	struct work_struct ibi_work;
197 	int irq;
198 	struct clk *pclk;
199 	struct clk *fclk;
200 	struct clk *sclk;
201 	struct {
202 		struct list_head list;
203 		struct svc_i3c_xfer *cur;
204 		/* Prevent races between transfers */
205 		spinlock_t lock;
206 	} xferqueue;
207 	struct {
208 		unsigned int num_slots;
209 		struct i3c_dev_desc **slots;
210 		struct i3c_ibi_slot *tbq_slot;
211 		/* Prevent races within IBI handlers */
212 		spinlock_t lock;
213 	} ibi;
214 	struct mutex lock;
215 	u32 enabled_events;
216 	u32 mctrl_config;
217 };
218 
219 /**
220  * struct svc_i3c_i2c_dev_data - Device specific data
221  * @index: Index in the master tables corresponding to this device
222  * @ibi: IBI slot index in the master structure
223  * @ibi_pool: IBI pool associated to this device
224  */
225 struct svc_i3c_i2c_dev_data {
226 	u8 index;
227 	int ibi;
228 	struct i3c_generic_ibi_pool *ibi_pool;
229 };
230 
is_events_enabled(struct svc_i3c_master * master,u32 mask)231 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
232 {
233 	return !!(master->enabled_events & mask);
234 }
235 
svc_i3c_master_error(struct svc_i3c_master * master)236 static bool svc_i3c_master_error(struct svc_i3c_master *master)
237 {
238 	u32 mstatus, merrwarn;
239 
240 	mstatus = readl(master->regs + SVC_I3C_MSTATUS);
241 	if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
242 		merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
243 		writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
244 
245 		/* Ignore timeout error */
246 		if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
247 			dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
248 				mstatus, merrwarn);
249 			return false;
250 		}
251 
252 		dev_err(master->dev,
253 			"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
254 			mstatus, merrwarn);
255 
256 		return true;
257 	}
258 
259 	return false;
260 }
261 
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)262 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
263 {
264 	writel(mask, master->regs + SVC_I3C_MINTSET);
265 }
266 
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)267 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
268 {
269 	u32 mask = readl(master->regs + SVC_I3C_MINTSET);
270 
271 	writel(mask, master->regs + SVC_I3C_MINTCLR);
272 }
273 
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)274 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
275 {
276 	/* Clear pending warnings */
277 	writel(readl(master->regs + SVC_I3C_MERRWARN),
278 	       master->regs + SVC_I3C_MERRWARN);
279 }
280 
svc_i3c_master_flush_fifo(struct svc_i3c_master * master)281 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
282 {
283 	/* Flush FIFOs */
284 	writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
285 	       master->regs + SVC_I3C_MDATACTRL);
286 }
287 
svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master * master)288 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
289 {
290 	u32 reg;
291 
292 	/* Set RX and TX tigger levels, flush FIFOs */
293 	reg = SVC_I3C_MDATACTRL_FLUSHTB |
294 	      SVC_I3C_MDATACTRL_FLUSHRB |
295 	      SVC_I3C_MDATACTRL_UNLOCK_TRIG |
296 	      SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
297 	      SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
298 	writel(reg, master->regs + SVC_I3C_MDATACTRL);
299 }
300 
svc_i3c_master_reset(struct svc_i3c_master * master)301 static void svc_i3c_master_reset(struct svc_i3c_master *master)
302 {
303 	svc_i3c_master_clear_merrwarn(master);
304 	svc_i3c_master_reset_fifo_trigger(master);
305 	svc_i3c_master_disable_interrupts(master);
306 }
307 
308 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)309 to_svc_i3c_master(struct i3c_master_controller *master)
310 {
311 	return container_of(master, struct svc_i3c_master, base);
312 }
313 
svc_i3c_master_hj_work(struct work_struct * work)314 static void svc_i3c_master_hj_work(struct work_struct *work)
315 {
316 	struct svc_i3c_master *master;
317 
318 	master = container_of(work, struct svc_i3c_master, hj_work);
319 	i3c_master_do_daa(&master->base);
320 }
321 
322 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)323 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
324 			     unsigned int ibiaddr)
325 {
326 	int i;
327 
328 	for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
329 		if (master->addrs[i] == ibiaddr)
330 			break;
331 
332 	if (i == SVC_I3C_MAX_DEVS)
333 		return NULL;
334 
335 	return master->descs[i];
336 }
337 
svc_i3c_master_emit_stop(struct svc_i3c_master * master)338 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
339 {
340 	writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
341 
342 	/*
343 	 * This delay is necessary after the emission of a stop, otherwise eg.
344 	 * repeating IBIs do not get detected. There is a note in the manual
345 	 * about it, stating that the stop condition might not be settled
346 	 * correctly if a start condition follows too rapidly.
347 	 */
348 	udelay(1);
349 }
350 
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)351 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
352 				     struct i3c_dev_desc *dev)
353 {
354 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
355 	struct i3c_ibi_slot *slot;
356 	unsigned int count;
357 	u32 mdatactrl;
358 	int ret, val;
359 	u8 *buf;
360 
361 	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
362 	if (!slot)
363 		return -ENOSPC;
364 
365 	slot->len = 0;
366 	buf = slot->data;
367 
368 	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
369 						SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
370 	if (ret) {
371 		dev_err(master->dev, "Timeout when polling for COMPLETE\n");
372 		return ret;
373 	}
374 
375 	while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS))  &&
376 	       slot->len < SVC_I3C_FIFO_SIZE) {
377 		mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
378 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
379 		readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
380 		slot->len += count;
381 		buf += count;
382 	}
383 
384 	master->ibi.tbq_slot = slot;
385 
386 	return 0;
387 }
388 
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)389 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
390 				   bool mandatory_byte)
391 {
392 	unsigned int ibi_ack_nack;
393 
394 	ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
395 	if (mandatory_byte)
396 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
397 	else
398 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
399 
400 	writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
401 }
402 
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)403 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
404 {
405 	writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
406 	       SVC_I3C_MCTRL_IBIRESP_NACK,
407 	       master->regs + SVC_I3C_MCTRL);
408 }
409 
svc_i3c_master_ibi_work(struct work_struct * work)410 static void svc_i3c_master_ibi_work(struct work_struct *work)
411 {
412 	struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
413 	struct svc_i3c_i2c_dev_data *data;
414 	unsigned int ibitype, ibiaddr;
415 	struct i3c_dev_desc *dev;
416 	u32 status, val;
417 	int ret;
418 
419 	mutex_lock(&master->lock);
420 	/*
421 	 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
422 	 * readl_relaxed_poll_timeout() to return immediately. Consequently,
423 	 * ibitype will be 0 since it was last updated only after the 8th SCL
424 	 * cycle, leading to missed client IBI handlers.
425 	 *
426 	 * A typical scenario is when IBIWON occurs and bus arbitration is lost
427 	 * at svc_i3c_master_priv_xfers().
428 	 *
429 	 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
430 	 */
431 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
432 
433 	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
434 	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
435 	       SVC_I3C_MCTRL_IBIRESP_AUTO,
436 	       master->regs + SVC_I3C_MCTRL);
437 
438 	/* Wait for IBIWON, should take approximately 100us */
439 	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
440 					 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
441 	if (ret) {
442 		dev_err(master->dev, "Timeout when polling for IBIWON\n");
443 		svc_i3c_master_emit_stop(master);
444 		goto reenable_ibis;
445 	}
446 
447 	status = readl(master->regs + SVC_I3C_MSTATUS);
448 	ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
449 	ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
450 
451 	/* Handle the critical responses to IBI's */
452 	switch (ibitype) {
453 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
454 		dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
455 		if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
456 			svc_i3c_master_nack_ibi(master);
457 		else
458 			svc_i3c_master_handle_ibi(master, dev);
459 		break;
460 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
461 		if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
462 			svc_i3c_master_ack_ibi(master, false);
463 		else
464 			svc_i3c_master_nack_ibi(master);
465 		break;
466 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
467 		svc_i3c_master_nack_ibi(master);
468 		break;
469 	default:
470 		break;
471 	}
472 
473 	/*
474 	 * If an error happened, we probably got interrupted and the exchange
475 	 * timedout. In this case we just drop everything, emit a stop and wait
476 	 * for the slave to interrupt again.
477 	 */
478 	if (svc_i3c_master_error(master)) {
479 		if (master->ibi.tbq_slot) {
480 			data = i3c_dev_get_master_data(dev);
481 			i3c_generic_ibi_recycle_slot(data->ibi_pool,
482 						     master->ibi.tbq_slot);
483 			master->ibi.tbq_slot = NULL;
484 		}
485 
486 		svc_i3c_master_emit_stop(master);
487 
488 		goto reenable_ibis;
489 	}
490 
491 	/* Handle the non critical tasks */
492 	switch (ibitype) {
493 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
494 		if (dev) {
495 			i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
496 			master->ibi.tbq_slot = NULL;
497 		}
498 		svc_i3c_master_emit_stop(master);
499 		break;
500 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
501 		svc_i3c_master_emit_stop(master);
502 		if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
503 			queue_work(master->base.wq, &master->hj_work);
504 		break;
505 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
506 	default:
507 		break;
508 	}
509 
510 reenable_ibis:
511 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
512 	mutex_unlock(&master->lock);
513 }
514 
svc_i3c_master_irq_handler(int irq,void * dev_id)515 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
516 {
517 	struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
518 	u32 active = readl(master->regs + SVC_I3C_MSTATUS);
519 
520 	if (!SVC_I3C_MSTATUS_SLVSTART(active))
521 		return IRQ_NONE;
522 
523 	/* Clear the interrupt status */
524 	writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
525 
526 	svc_i3c_master_disable_interrupts(master);
527 
528 	/* Handle the interrupt in a non atomic context */
529 	queue_work(master->base.wq, &master->ibi_work);
530 
531 	return IRQ_HANDLED;
532 }
533 
svc_i3c_master_set_speed(struct i3c_master_controller * m,enum i3c_open_drain_speed speed)534 static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
535 				     enum i3c_open_drain_speed speed)
536 {
537 	struct svc_i3c_master *master = to_svc_i3c_master(m);
538 	struct i3c_bus *bus = i3c_master_get_bus(&master->base);
539 	u32 ppbaud, odbaud, odhpp, mconfig;
540 	unsigned long fclk_rate;
541 	int ret;
542 
543 	ret = pm_runtime_resume_and_get(master->dev);
544 	if (ret < 0) {
545 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
546 		return ret;
547 	}
548 
549 	switch (speed) {
550 	case I3C_OPEN_DRAIN_SLOW_SPEED:
551 		fclk_rate = clk_get_rate(master->fclk);
552 		if (!fclk_rate) {
553 			ret = -EINVAL;
554 			goto rpm_out;
555 		}
556 		/*
557 		 * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
558 		 * broadcast address is visible to all I2C/I3C devices on the I3C bus.
559 		 * I3C device working as a I2C device will turn off its 50ns Spike
560 		 * Filter to change to I3C mode.
561 		 */
562 		mconfig = master->mctrl_config;
563 		ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
564 		odhpp = 0;
565 		odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
566 		mconfig &= ~GENMASK(24, 16);
567 		mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
568 		writel(mconfig, master->regs + SVC_I3C_MCONFIG);
569 		break;
570 	case I3C_OPEN_DRAIN_NORMAL_SPEED:
571 		writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
572 		break;
573 	}
574 
575 rpm_out:
576 	pm_runtime_mark_last_busy(master->dev);
577 	pm_runtime_put_autosuspend(master->dev);
578 
579 	return ret;
580 }
581 
svc_i3c_master_bus_init(struct i3c_master_controller * m)582 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
583 {
584 	struct svc_i3c_master *master = to_svc_i3c_master(m);
585 	struct i3c_bus *bus = i3c_master_get_bus(m);
586 	struct i3c_device_info info = {};
587 	unsigned long fclk_rate, fclk_period_ns;
588 	unsigned int high_period_ns, od_low_period_ns;
589 	u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
590 	int ret;
591 
592 	ret = pm_runtime_resume_and_get(master->dev);
593 	if (ret < 0) {
594 		dev_err(master->dev,
595 			"<%s> cannot resume i3c bus master, err: %d\n",
596 			__func__, ret);
597 		return ret;
598 	}
599 
600 	/* Timings derivation */
601 	fclk_rate = clk_get_rate(master->fclk);
602 	if (!fclk_rate) {
603 		ret = -EINVAL;
604 		goto rpm_out;
605 	}
606 
607 	fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
608 
609 	/*
610 	 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
611 	 * Simplest configuration is using a 50% duty-cycle of 40ns.
612 	 */
613 	ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
614 	pplow = 0;
615 
616 	/*
617 	 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
618 	 * duty-cycle tuned so that high levels are filetered out by
619 	 * the 50ns filter (target being 40ns).
620 	 */
621 	odhpp = 1;
622 	high_period_ns = (ppbaud + 1) * fclk_period_ns;
623 	odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
624 	od_low_period_ns = (odbaud + 1) * high_period_ns;
625 
626 	switch (bus->mode) {
627 	case I3C_BUS_MODE_PURE:
628 		i2cbaud = 0;
629 		odstop = 0;
630 		break;
631 	case I3C_BUS_MODE_MIXED_FAST:
632 	case I3C_BUS_MODE_MIXED_LIMITED:
633 		/*
634 		 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
635 		 * between the high and low period does not really matter.
636 		 */
637 		i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
638 		odstop = 1;
639 		break;
640 	case I3C_BUS_MODE_MIXED_SLOW:
641 		/*
642 		 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
643 		 * constraints as the FM+ mode.
644 		 */
645 		i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
646 		odstop = 1;
647 		break;
648 	default:
649 		goto rpm_out;
650 	}
651 
652 	reg = SVC_I3C_MCONFIG_MASTER_EN |
653 	      SVC_I3C_MCONFIG_DISTO(0) |
654 	      SVC_I3C_MCONFIG_HKEEP(0) |
655 	      SVC_I3C_MCONFIG_ODSTOP(odstop) |
656 	      SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
657 	      SVC_I3C_MCONFIG_PPLOW(pplow) |
658 	      SVC_I3C_MCONFIG_ODBAUD(odbaud) |
659 	      SVC_I3C_MCONFIG_ODHPP(odhpp) |
660 	      SVC_I3C_MCONFIG_SKEW(0) |
661 	      SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
662 	writel(reg, master->regs + SVC_I3C_MCONFIG);
663 
664 	master->mctrl_config = reg;
665 	/* Master core's registration */
666 	ret = i3c_master_get_free_addr(m, 0);
667 	if (ret < 0)
668 		goto rpm_out;
669 
670 	info.dyn_addr = ret;
671 
672 	writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
673 	       master->regs + SVC_I3C_MDYNADDR);
674 
675 	ret = i3c_master_set_info(&master->base, &info);
676 	if (ret)
677 		goto rpm_out;
678 
679 rpm_out:
680 	pm_runtime_mark_last_busy(master->dev);
681 	pm_runtime_put_autosuspend(master->dev);
682 
683 	return ret;
684 }
685 
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)686 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
687 {
688 	struct svc_i3c_master *master = to_svc_i3c_master(m);
689 	int ret;
690 
691 	ret = pm_runtime_resume_and_get(master->dev);
692 	if (ret < 0) {
693 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
694 		return;
695 	}
696 
697 	svc_i3c_master_disable_interrupts(master);
698 
699 	/* Disable master */
700 	writel(0, master->regs + SVC_I3C_MCONFIG);
701 
702 	pm_runtime_mark_last_busy(master->dev);
703 	pm_runtime_put_autosuspend(master->dev);
704 }
705 
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)706 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
707 {
708 	unsigned int slot;
709 
710 	if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
711 		return -ENOSPC;
712 
713 	slot = ffs(master->free_slots) - 1;
714 
715 	master->free_slots &= ~BIT(slot);
716 
717 	return slot;
718 }
719 
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)720 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
721 					unsigned int slot)
722 {
723 	master->free_slots |= BIT(slot);
724 }
725 
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)726 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
727 {
728 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
729 	struct svc_i3c_master *master = to_svc_i3c_master(m);
730 	struct svc_i3c_i2c_dev_data *data;
731 	int slot;
732 
733 	slot = svc_i3c_master_reserve_slot(master);
734 	if (slot < 0)
735 		return slot;
736 
737 	data = kzalloc(sizeof(*data), GFP_KERNEL);
738 	if (!data) {
739 		svc_i3c_master_release_slot(master, slot);
740 		return -ENOMEM;
741 	}
742 
743 	data->ibi = -1;
744 	data->index = slot;
745 	master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
746 						   dev->info.static_addr;
747 	master->descs[slot] = dev;
748 
749 	i3c_dev_set_master_data(dev, data);
750 
751 	return 0;
752 }
753 
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)754 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
755 					   u8 old_dyn_addr)
756 {
757 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
758 	struct svc_i3c_master *master = to_svc_i3c_master(m);
759 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
760 
761 	master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
762 							  dev->info.static_addr;
763 
764 	return 0;
765 }
766 
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)767 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
768 {
769 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
770 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
771 	struct svc_i3c_master *master = to_svc_i3c_master(m);
772 
773 	master->addrs[data->index] = 0;
774 	svc_i3c_master_release_slot(master, data->index);
775 
776 	kfree(data);
777 }
778 
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)779 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
780 {
781 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
782 	struct svc_i3c_master *master = to_svc_i3c_master(m);
783 	struct svc_i3c_i2c_dev_data *data;
784 	int slot;
785 
786 	slot = svc_i3c_master_reserve_slot(master);
787 	if (slot < 0)
788 		return slot;
789 
790 	data = kzalloc(sizeof(*data), GFP_KERNEL);
791 	if (!data) {
792 		svc_i3c_master_release_slot(master, slot);
793 		return -ENOMEM;
794 	}
795 
796 	data->index = slot;
797 	master->addrs[slot] = dev->addr;
798 
799 	i2c_dev_set_master_data(dev, data);
800 
801 	return 0;
802 }
803 
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)804 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
805 {
806 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
807 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
808 	struct svc_i3c_master *master = to_svc_i3c_master(m);
809 
810 	svc_i3c_master_release_slot(master, data->index);
811 
812 	kfree(data);
813 }
814 
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)815 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
816 				unsigned int len)
817 {
818 	int ret, i;
819 	u32 reg;
820 
821 	for (i = 0; i < len; i++) {
822 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
823 						reg,
824 						SVC_I3C_MSTATUS_RXPEND(reg),
825 						0, 1000);
826 		if (ret)
827 			return ret;
828 
829 		dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
830 	}
831 
832 	return 0;
833 }
834 
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)835 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
836 					u8 *addrs, unsigned int *count)
837 {
838 	u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
839 	unsigned int dev_nb = 0, last_addr = 0;
840 	u32 reg;
841 	int ret, i;
842 
843 	while (true) {
844 		/* Enter/proceed with DAA */
845 		writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
846 		       SVC_I3C_MCTRL_TYPE_I3C |
847 		       SVC_I3C_MCTRL_IBIRESP_NACK |
848 		       SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
849 		       master->regs + SVC_I3C_MCTRL);
850 
851 		/*
852 		 * Either one slave will send its ID, or the assignment process
853 		 * is done.
854 		 */
855 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
856 						reg,
857 						SVC_I3C_MSTATUS_RXPEND(reg) |
858 						SVC_I3C_MSTATUS_MCTRLDONE(reg),
859 						1, 1000);
860 		if (ret)
861 			return ret;
862 
863 		if (SVC_I3C_MSTATUS_RXPEND(reg)) {
864 			u8 data[6];
865 
866 			/*
867 			 * We only care about the 48-bit provisional ID yet to
868 			 * be sure a device does not nack an address twice.
869 			 * Otherwise, we would just need to flush the RX FIFO.
870 			 */
871 			ret = svc_i3c_master_readb(master, data, 6);
872 			if (ret)
873 				return ret;
874 
875 			for (i = 0; i < 6; i++)
876 				prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
877 
878 			/* We do not care about the BCR and DCR yet */
879 			ret = svc_i3c_master_readb(master, data, 2);
880 			if (ret)
881 				return ret;
882 		} else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
883 			if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
884 			    SVC_I3C_MSTATUS_COMPLETE(reg)) {
885 				/*
886 				 * All devices received and acked they dynamic
887 				 * address, this is the natural end of the DAA
888 				 * procedure.
889 				 */
890 				break;
891 			} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
892 				/* No I3C devices attached */
893 				if (dev_nb == 0)
894 					break;
895 
896 				/*
897 				 * A slave device nacked the address, this is
898 				 * allowed only once, DAA will be stopped and
899 				 * then resumed. The same device is supposed to
900 				 * answer again immediately and shall ack the
901 				 * address this time.
902 				 */
903 				if (prov_id[dev_nb] == nacking_prov_id)
904 					return -EIO;
905 
906 				dev_nb--;
907 				nacking_prov_id = prov_id[dev_nb];
908 				svc_i3c_master_emit_stop(master);
909 
910 				continue;
911 			} else {
912 				return -EIO;
913 			}
914 		}
915 
916 		/* Wait for the slave to be ready to receive its address */
917 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
918 						reg,
919 						SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
920 						SVC_I3C_MSTATUS_STATE_DAA(reg) &&
921 						SVC_I3C_MSTATUS_BETWEEN(reg),
922 						0, 1000);
923 		if (ret)
924 			return ret;
925 
926 		/* Give the slave device a suitable dynamic address */
927 		ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
928 		if (ret < 0)
929 			return ret;
930 
931 		addrs[dev_nb] = ret;
932 		dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
933 			dev_nb, addrs[dev_nb]);
934 
935 		writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
936 		last_addr = addrs[dev_nb++];
937 	}
938 
939 	*count = dev_nb;
940 
941 	return 0;
942 }
943 
svc_i3c_update_ibirules(struct svc_i3c_master * master)944 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
945 {
946 	struct i3c_dev_desc *dev;
947 	u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
948 	unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
949 		nobyte_addr_ko = 0;
950 	bool list_mbyte = false, list_nobyte = false;
951 
952 	/* Create the IBIRULES register for both cases */
953 	i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
954 		if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
955 			continue;
956 
957 		if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
958 			reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
959 							   dev->info.dyn_addr);
960 
961 			/* IBI rules cannot be applied to devices with MSb=1 */
962 			if (dev->info.dyn_addr & BIT(7))
963 				mbyte_addr_ko++;
964 			else
965 				mbyte_addr_ok++;
966 		} else {
967 			reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
968 							    dev->info.dyn_addr);
969 
970 			/* IBI rules cannot be applied to devices with MSb=1 */
971 			if (dev->info.dyn_addr & BIT(7))
972 				nobyte_addr_ko++;
973 			else
974 				nobyte_addr_ok++;
975 		}
976 	}
977 
978 	/* Device list cannot be handled by hardware */
979 	if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
980 		list_mbyte = true;
981 
982 	if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
983 		list_nobyte = true;
984 
985 	/* No list can be properly handled, return an error */
986 	if (!list_mbyte && !list_nobyte)
987 		return -ERANGE;
988 
989 	/* Pick the first list that can be handled by hardware, randomly */
990 	if (list_mbyte)
991 		writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
992 	else
993 		writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
994 
995 	return 0;
996 }
997 
svc_i3c_master_do_daa(struct i3c_master_controller * m)998 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
999 {
1000 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1001 	u8 addrs[SVC_I3C_MAX_DEVS];
1002 	unsigned long flags;
1003 	unsigned int dev_nb;
1004 	int ret, i;
1005 
1006 	ret = pm_runtime_resume_and_get(master->dev);
1007 	if (ret < 0) {
1008 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1009 		return ret;
1010 	}
1011 
1012 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1013 	ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
1014 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1015 	if (ret) {
1016 		svc_i3c_master_emit_stop(master);
1017 		svc_i3c_master_clear_merrwarn(master);
1018 		goto rpm_out;
1019 	}
1020 
1021 	/*
1022 	 * Register all devices who participated to the core
1023 	 *
1024 	 * If two devices (A and B) are detected in DAA and address 0xa is assigned to
1025 	 * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked()
1026 	 * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being
1027 	 * registered on the bus. The I3C stack might still consider 0xb a free
1028 	 * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A,
1029 	 * causing both devices A and B to use the same address 0xb, violating the I3C
1030 	 * specification.
1031 	 *
1032 	 * The return value for i3c_master_add_i3c_dev_locked() should not be checked
1033 	 * because subsequent steps will scan the entire I3C bus, independent of
1034 	 * whether i3c_master_add_i3c_dev_locked() returns success.
1035 	 *
1036 	 * If device A registration fails, there is still a chance to register device
1037 	 * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while
1038 	 * retrieving device information.
1039 	 */
1040 	for (i = 0; i < dev_nb; i++)
1041 		i3c_master_add_i3c_dev_locked(m, addrs[i]);
1042 
1043 	/* Configure IBI auto-rules */
1044 	ret = svc_i3c_update_ibirules(master);
1045 	if (ret)
1046 		dev_err(master->dev, "Cannot handle such a list of devices");
1047 
1048 rpm_out:
1049 	pm_runtime_mark_last_busy(master->dev);
1050 	pm_runtime_put_autosuspend(master->dev);
1051 
1052 	return ret;
1053 }
1054 
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)1055 static int svc_i3c_master_read(struct svc_i3c_master *master,
1056 			       u8 *in, unsigned int len)
1057 {
1058 	int offset = 0, i;
1059 	u32 mdctrl, mstatus;
1060 	bool completed = false;
1061 	unsigned int count;
1062 	unsigned long start = jiffies;
1063 
1064 	while (!completed) {
1065 		mstatus = readl(master->regs + SVC_I3C_MSTATUS);
1066 		if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
1067 			completed = true;
1068 
1069 		if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
1070 			dev_dbg(master->dev, "I3C read timeout\n");
1071 			return -ETIMEDOUT;
1072 		}
1073 
1074 		mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
1075 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
1076 		if (offset + count > len) {
1077 			dev_err(master->dev, "I3C receive length too long!\n");
1078 			return -EINVAL;
1079 		}
1080 		for (i = 0; i < count; i++)
1081 			in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1082 
1083 		offset += count;
1084 	}
1085 
1086 	return offset;
1087 }
1088 
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)1089 static int svc_i3c_master_write(struct svc_i3c_master *master,
1090 				const u8 *out, unsigned int len)
1091 {
1092 	int offset = 0, ret;
1093 	u32 mdctrl;
1094 
1095 	while (offset < len) {
1096 		ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1097 					 mdctrl,
1098 					 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1099 					 0, 1000);
1100 		if (ret)
1101 			return ret;
1102 
1103 		/*
1104 		 * The last byte to be sent over the bus must either have the
1105 		 * "end" bit set or be written in MWDATABE.
1106 		 */
1107 		if (likely(offset < (len - 1)))
1108 			writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1109 		else
1110 			writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1111 	}
1112 
1113 	return 0;
1114 }
1115 
svc_i3c_master_xfer(struct svc_i3c_master * master,bool rnw,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int * actual_len,bool continued)1116 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1117 			       bool rnw, unsigned int xfer_type, u8 addr,
1118 			       u8 *in, const u8 *out, unsigned int xfer_len,
1119 			       unsigned int *actual_len, bool continued)
1120 {
1121 	int retry = 2;
1122 	u32 reg;
1123 	int ret;
1124 
1125 	/* clean SVC_I3C_MINT_IBIWON w1c bits */
1126 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1127 
1128 
1129 	while (retry--) {
1130 		writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1131 		       xfer_type |
1132 		       SVC_I3C_MCTRL_IBIRESP_NACK |
1133 		       SVC_I3C_MCTRL_DIR(rnw) |
1134 		       SVC_I3C_MCTRL_ADDR(addr) |
1135 		       SVC_I3C_MCTRL_RDTERM(*actual_len),
1136 		       master->regs + SVC_I3C_MCTRL);
1137 
1138 		ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1139 				 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1140 		if (ret)
1141 			goto emit_stop;
1142 
1143 		if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1144 			/*
1145 			 * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
1146 			 * If the Controller chooses to start an I3C Message with an I3C Dynamic
1147 			 * Address, then special provisions shall be made because that same I3C
1148 			 * Target may be initiating an IBI or a Controller Role Request. So, one of
1149 			 * three things may happen: (skip 1, 2)
1150 			 *
1151 			 * 3. The Addresses match and the RnW bits also match, and so neither
1152 			 * Controller nor Target will ACK since both are expecting the other side to
1153 			 * provide ACK. As a result, each side might think it had "won" arbitration,
1154 			 * but neither side would continue, as each would subsequently see that the
1155 			 * other did not provide ACK.
1156 			 * ...
1157 			 * For either value of RnW: Due to the NACK, the Controller shall defer the
1158 			 * Private Write or Private Read, and should typically transmit the Target
1159 			 * Address again after a Repeated START (i.e., the next one or any one prior
1160 			 * to a STOP in the Frame). Since the Address Header following a Repeated
1161 			 * START is not arbitrated, the Controller will always win (see Section
1162 			 * 5.1.2.2.4).
1163 			 */
1164 			if (retry && addr != 0x7e) {
1165 				writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
1166 			} else {
1167 				ret = -ENXIO;
1168 				*actual_len = 0;
1169 				goto emit_stop;
1170 			}
1171 		} else {
1172 			break;
1173 		}
1174 	}
1175 
1176 	/*
1177 	 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
1178 	 * with I3C Target Address.
1179 	 *
1180 	 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
1181 	 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
1182 	 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
1183 	 * a Hot-Join Request has been made.
1184 	 *
1185 	 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
1186 	 * and yield the above events handler.
1187 	 */
1188 	if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1189 		ret = -EAGAIN;
1190 		*actual_len = 0;
1191 		goto emit_stop;
1192 	}
1193 
1194 	if (rnw)
1195 		ret = svc_i3c_master_read(master, in, xfer_len);
1196 	else
1197 		ret = svc_i3c_master_write(master, out, xfer_len);
1198 	if (ret < 0)
1199 		goto emit_stop;
1200 
1201 	if (rnw)
1202 		*actual_len = ret;
1203 
1204 	ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1205 				 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1206 	if (ret)
1207 		goto emit_stop;
1208 
1209 	writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1210 
1211 	if (!continued) {
1212 		svc_i3c_master_emit_stop(master);
1213 
1214 		/* Wait idle if stop is sent. */
1215 		readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1216 				   SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1217 	}
1218 
1219 	return 0;
1220 
1221 emit_stop:
1222 	svc_i3c_master_emit_stop(master);
1223 	svc_i3c_master_clear_merrwarn(master);
1224 
1225 	return ret;
1226 }
1227 
1228 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1229 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1230 {
1231 	struct svc_i3c_xfer *xfer;
1232 
1233 	xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1234 	if (!xfer)
1235 		return NULL;
1236 
1237 	INIT_LIST_HEAD(&xfer->node);
1238 	xfer->ncmds = ncmds;
1239 	xfer->ret = -ETIMEDOUT;
1240 
1241 	return xfer;
1242 }
1243 
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1244 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1245 {
1246 	kfree(xfer);
1247 }
1248 
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1249 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1250 					       struct svc_i3c_xfer *xfer)
1251 {
1252 	if (master->xferqueue.cur == xfer)
1253 		master->xferqueue.cur = NULL;
1254 	else
1255 		list_del_init(&xfer->node);
1256 }
1257 
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1258 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1259 					struct svc_i3c_xfer *xfer)
1260 {
1261 	unsigned long flags;
1262 
1263 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1264 	svc_i3c_master_dequeue_xfer_locked(master, xfer);
1265 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1266 }
1267 
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1268 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1269 {
1270 	struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1271 	int ret, i;
1272 
1273 	if (!xfer)
1274 		return;
1275 
1276 	svc_i3c_master_clear_merrwarn(master);
1277 	svc_i3c_master_flush_fifo(master);
1278 
1279 	for (i = 0; i < xfer->ncmds; i++) {
1280 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1281 
1282 		ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1283 					  cmd->addr, cmd->in, cmd->out,
1284 					  cmd->len, &cmd->actual_len,
1285 					  cmd->continued);
1286 		/* cmd->xfer is NULL if I2C or CCC transfer */
1287 		if (cmd->xfer)
1288 			cmd->xfer->actual_len = cmd->actual_len;
1289 
1290 		if (ret)
1291 			break;
1292 	}
1293 
1294 	xfer->ret = ret;
1295 	complete(&xfer->comp);
1296 
1297 	if (ret < 0)
1298 		svc_i3c_master_dequeue_xfer_locked(master, xfer);
1299 
1300 	xfer = list_first_entry_or_null(&master->xferqueue.list,
1301 					struct svc_i3c_xfer,
1302 					node);
1303 	if (xfer)
1304 		list_del_init(&xfer->node);
1305 
1306 	master->xferqueue.cur = xfer;
1307 	svc_i3c_master_start_xfer_locked(master);
1308 }
1309 
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1310 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1311 					struct svc_i3c_xfer *xfer)
1312 {
1313 	unsigned long flags;
1314 	int ret;
1315 
1316 	ret = pm_runtime_resume_and_get(master->dev);
1317 	if (ret < 0) {
1318 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1319 		return;
1320 	}
1321 
1322 	init_completion(&xfer->comp);
1323 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1324 	if (master->xferqueue.cur) {
1325 		list_add_tail(&xfer->node, &master->xferqueue.list);
1326 	} else {
1327 		master->xferqueue.cur = xfer;
1328 		svc_i3c_master_start_xfer_locked(master);
1329 	}
1330 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1331 
1332 	pm_runtime_mark_last_busy(master->dev);
1333 	pm_runtime_put_autosuspend(master->dev);
1334 }
1335 
1336 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1337 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1338 				const struct i3c_ccc_cmd *cmd)
1339 {
1340 	/* No software support for CCC commands targeting more than one slave */
1341 	return (cmd->ndests == 1);
1342 }
1343 
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1344 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1345 					      struct i3c_ccc_cmd *ccc)
1346 {
1347 	unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1348 	struct svc_i3c_xfer *xfer;
1349 	struct svc_i3c_cmd *cmd;
1350 	u8 *buf;
1351 	int ret;
1352 
1353 	xfer = svc_i3c_master_alloc_xfer(master, 1);
1354 	if (!xfer)
1355 		return -ENOMEM;
1356 
1357 	buf = kmalloc(xfer_len, GFP_KERNEL);
1358 	if (!buf) {
1359 		svc_i3c_master_free_xfer(xfer);
1360 		return -ENOMEM;
1361 	}
1362 
1363 	buf[0] = ccc->id;
1364 	memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1365 
1366 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1367 
1368 	cmd = &xfer->cmds[0];
1369 	cmd->addr = ccc->dests[0].addr;
1370 	cmd->rnw = ccc->rnw;
1371 	cmd->in = NULL;
1372 	cmd->out = buf;
1373 	cmd->len = xfer_len;
1374 	cmd->actual_len = 0;
1375 	cmd->continued = false;
1376 
1377 	mutex_lock(&master->lock);
1378 	svc_i3c_master_enqueue_xfer(master, xfer);
1379 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1380 		svc_i3c_master_dequeue_xfer(master, xfer);
1381 	mutex_unlock(&master->lock);
1382 
1383 	ret = xfer->ret;
1384 	kfree(buf);
1385 	svc_i3c_master_free_xfer(xfer);
1386 
1387 	return ret;
1388 }
1389 
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1390 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1391 					      struct i3c_ccc_cmd *ccc)
1392 {
1393 	unsigned int xfer_len = ccc->dests[0].payload.len;
1394 	unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1395 	struct svc_i3c_xfer *xfer;
1396 	struct svc_i3c_cmd *cmd;
1397 	int ret;
1398 
1399 	xfer = svc_i3c_master_alloc_xfer(master, 2);
1400 	if (!xfer)
1401 		return -ENOMEM;
1402 
1403 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1404 
1405 	/* Broadcasted message */
1406 	cmd = &xfer->cmds[0];
1407 	cmd->addr = I3C_BROADCAST_ADDR;
1408 	cmd->rnw = 0;
1409 	cmd->in = NULL;
1410 	cmd->out = &ccc->id;
1411 	cmd->len = 1;
1412 	cmd->actual_len = 0;
1413 	cmd->continued = true;
1414 
1415 	/* Directed message */
1416 	cmd = &xfer->cmds[1];
1417 	cmd->addr = ccc->dests[0].addr;
1418 	cmd->rnw = ccc->rnw;
1419 	cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1420 	cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
1421 	cmd->len = xfer_len;
1422 	cmd->actual_len = actual_len;
1423 	cmd->continued = false;
1424 
1425 	mutex_lock(&master->lock);
1426 	svc_i3c_master_enqueue_xfer(master, xfer);
1427 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1428 		svc_i3c_master_dequeue_xfer(master, xfer);
1429 	mutex_unlock(&master->lock);
1430 
1431 	if (cmd->actual_len != xfer_len)
1432 		ccc->dests[0].payload.len = cmd->actual_len;
1433 
1434 	ret = xfer->ret;
1435 	svc_i3c_master_free_xfer(xfer);
1436 
1437 	return ret;
1438 }
1439 
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1440 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1441 				       struct i3c_ccc_cmd *cmd)
1442 {
1443 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1444 	bool broadcast = cmd->id < 0x80;
1445 	int ret;
1446 
1447 	if (broadcast)
1448 		ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1449 	else
1450 		ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1451 
1452 	if (ret)
1453 		cmd->err = I3C_ERROR_M2;
1454 
1455 	return ret;
1456 }
1457 
svc_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)1458 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1459 				     struct i3c_priv_xfer *xfers,
1460 				     int nxfers)
1461 {
1462 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1463 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1464 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1465 	struct svc_i3c_xfer *xfer;
1466 	int ret, i;
1467 
1468 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1469 	if (!xfer)
1470 		return -ENOMEM;
1471 
1472 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1473 
1474 	for (i = 0; i < nxfers; i++) {
1475 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1476 
1477 		cmd->xfer = &xfers[i];
1478 		cmd->addr = master->addrs[data->index];
1479 		cmd->rnw = xfers[i].rnw;
1480 		cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1481 		cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1482 		cmd->len = xfers[i].len;
1483 		cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
1484 		cmd->continued = (i + 1) < nxfers;
1485 	}
1486 
1487 	mutex_lock(&master->lock);
1488 	svc_i3c_master_enqueue_xfer(master, xfer);
1489 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1490 		svc_i3c_master_dequeue_xfer(master, xfer);
1491 	mutex_unlock(&master->lock);
1492 
1493 	ret = xfer->ret;
1494 	svc_i3c_master_free_xfer(xfer);
1495 
1496 	return ret;
1497 }
1498 
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,const struct i2c_msg * xfers,int nxfers)1499 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1500 				    const struct i2c_msg *xfers,
1501 				    int nxfers)
1502 {
1503 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
1504 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1505 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1506 	struct svc_i3c_xfer *xfer;
1507 	int ret, i;
1508 
1509 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1510 	if (!xfer)
1511 		return -ENOMEM;
1512 
1513 	xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1514 
1515 	for (i = 0; i < nxfers; i++) {
1516 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1517 
1518 		cmd->addr = master->addrs[data->index];
1519 		cmd->rnw = xfers[i].flags & I2C_M_RD;
1520 		cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1521 		cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1522 		cmd->len = xfers[i].len;
1523 		cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1524 		cmd->continued = (i + 1 < nxfers);
1525 	}
1526 
1527 	mutex_lock(&master->lock);
1528 	svc_i3c_master_enqueue_xfer(master, xfer);
1529 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1530 		svc_i3c_master_dequeue_xfer(master, xfer);
1531 	mutex_unlock(&master->lock);
1532 
1533 	ret = xfer->ret;
1534 	svc_i3c_master_free_xfer(xfer);
1535 
1536 	return ret;
1537 }
1538 
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1539 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1540 				      const struct i3c_ibi_setup *req)
1541 {
1542 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1543 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1544 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1545 	unsigned long flags;
1546 	unsigned int i;
1547 
1548 	if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1549 		dev_err(master->dev, "IBI max payload %d should be < %d\n",
1550 			dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1551 		return -ERANGE;
1552 	}
1553 
1554 	data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1555 	if (IS_ERR(data->ibi_pool))
1556 		return PTR_ERR(data->ibi_pool);
1557 
1558 	spin_lock_irqsave(&master->ibi.lock, flags);
1559 	for (i = 0; i < master->ibi.num_slots; i++) {
1560 		if (!master->ibi.slots[i]) {
1561 			data->ibi = i;
1562 			master->ibi.slots[i] = dev;
1563 			break;
1564 		}
1565 	}
1566 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1567 
1568 	if (i < master->ibi.num_slots)
1569 		return 0;
1570 
1571 	i3c_generic_ibi_free_pool(data->ibi_pool);
1572 	data->ibi_pool = NULL;
1573 
1574 	return -ENOSPC;
1575 }
1576 
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1577 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1578 {
1579 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1580 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1581 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1582 	unsigned long flags;
1583 
1584 	spin_lock_irqsave(&master->ibi.lock, flags);
1585 	master->ibi.slots[data->ibi] = NULL;
1586 	data->ibi = -1;
1587 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1588 
1589 	i3c_generic_ibi_free_pool(data->ibi_pool);
1590 }
1591 
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1592 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1593 {
1594 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1595 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1596 	int ret;
1597 
1598 	ret = pm_runtime_resume_and_get(master->dev);
1599 	if (ret < 0) {
1600 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1601 		return ret;
1602 	}
1603 
1604 	master->enabled_events++;
1605 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1606 
1607 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1608 }
1609 
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1610 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1611 {
1612 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1613 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1614 	int ret;
1615 
1616 	master->enabled_events--;
1617 	if (!master->enabled_events)
1618 		svc_i3c_master_disable_interrupts(master);
1619 
1620 	ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1621 
1622 	pm_runtime_mark_last_busy(master->dev);
1623 	pm_runtime_put_autosuspend(master->dev);
1624 
1625 	return ret;
1626 }
1627 
svc_i3c_master_enable_hotjoin(struct i3c_master_controller * m)1628 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
1629 {
1630 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1631 	int ret;
1632 
1633 	ret = pm_runtime_resume_and_get(master->dev);
1634 	if (ret < 0) {
1635 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1636 		return ret;
1637 	}
1638 
1639 	master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
1640 
1641 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1642 
1643 	return 0;
1644 }
1645 
svc_i3c_master_disable_hotjoin(struct i3c_master_controller * m)1646 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
1647 {
1648 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1649 
1650 	master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
1651 
1652 	if (!master->enabled_events)
1653 		svc_i3c_master_disable_interrupts(master);
1654 
1655 	pm_runtime_mark_last_busy(master->dev);
1656 	pm_runtime_put_autosuspend(master->dev);
1657 
1658 	return 0;
1659 }
1660 
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1661 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1662 					    struct i3c_ibi_slot *slot)
1663 {
1664 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1665 
1666 	i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1667 }
1668 
1669 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1670 	.bus_init = svc_i3c_master_bus_init,
1671 	.bus_cleanup = svc_i3c_master_bus_cleanup,
1672 	.attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1673 	.detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1674 	.reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1675 	.attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1676 	.detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1677 	.do_daa = svc_i3c_master_do_daa,
1678 	.supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1679 	.send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1680 	.priv_xfers = svc_i3c_master_priv_xfers,
1681 	.i2c_xfers = svc_i3c_master_i2c_xfers,
1682 	.request_ibi = svc_i3c_master_request_ibi,
1683 	.free_ibi = svc_i3c_master_free_ibi,
1684 	.recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1685 	.enable_ibi = svc_i3c_master_enable_ibi,
1686 	.disable_ibi = svc_i3c_master_disable_ibi,
1687 	.enable_hotjoin = svc_i3c_master_enable_hotjoin,
1688 	.disable_hotjoin = svc_i3c_master_disable_hotjoin,
1689 	.set_speed = svc_i3c_master_set_speed,
1690 };
1691 
svc_i3c_master_prepare_clks(struct svc_i3c_master * master)1692 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
1693 {
1694 	int ret = 0;
1695 
1696 	ret = clk_prepare_enable(master->pclk);
1697 	if (ret)
1698 		return ret;
1699 
1700 	ret = clk_prepare_enable(master->fclk);
1701 	if (ret) {
1702 		clk_disable_unprepare(master->pclk);
1703 		return ret;
1704 	}
1705 
1706 	ret = clk_prepare_enable(master->sclk);
1707 	if (ret) {
1708 		clk_disable_unprepare(master->pclk);
1709 		clk_disable_unprepare(master->fclk);
1710 		return ret;
1711 	}
1712 
1713 	return 0;
1714 }
1715 
svc_i3c_master_unprepare_clks(struct svc_i3c_master * master)1716 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
1717 {
1718 	clk_disable_unprepare(master->pclk);
1719 	clk_disable_unprepare(master->fclk);
1720 	clk_disable_unprepare(master->sclk);
1721 }
1722 
svc_i3c_master_probe(struct platform_device * pdev)1723 static int svc_i3c_master_probe(struct platform_device *pdev)
1724 {
1725 	struct device *dev = &pdev->dev;
1726 	struct svc_i3c_master *master;
1727 	int ret;
1728 
1729 	master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1730 	if (!master)
1731 		return -ENOMEM;
1732 
1733 	master->regs = devm_platform_ioremap_resource(pdev, 0);
1734 	if (IS_ERR(master->regs))
1735 		return PTR_ERR(master->regs);
1736 
1737 	master->pclk = devm_clk_get(dev, "pclk");
1738 	if (IS_ERR(master->pclk))
1739 		return PTR_ERR(master->pclk);
1740 
1741 	master->fclk = devm_clk_get(dev, "fast_clk");
1742 	if (IS_ERR(master->fclk))
1743 		return PTR_ERR(master->fclk);
1744 
1745 	master->sclk = devm_clk_get(dev, "slow_clk");
1746 	if (IS_ERR(master->sclk))
1747 		return PTR_ERR(master->sclk);
1748 
1749 	master->irq = platform_get_irq(pdev, 0);
1750 	if (master->irq < 0)
1751 		return master->irq;
1752 
1753 	master->dev = dev;
1754 
1755 	ret = svc_i3c_master_prepare_clks(master);
1756 	if (ret)
1757 		return ret;
1758 
1759 	INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1760 	INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
1761 	mutex_init(&master->lock);
1762 
1763 	ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1764 			       IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1765 	if (ret)
1766 		goto err_disable_clks;
1767 
1768 	master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1769 
1770 	spin_lock_init(&master->xferqueue.lock);
1771 	INIT_LIST_HEAD(&master->xferqueue.list);
1772 
1773 	spin_lock_init(&master->ibi.lock);
1774 	master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1775 	master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1776 					 sizeof(*master->ibi.slots),
1777 					 GFP_KERNEL);
1778 	if (!master->ibi.slots) {
1779 		ret = -ENOMEM;
1780 		goto err_disable_clks;
1781 	}
1782 
1783 	platform_set_drvdata(pdev, master);
1784 
1785 	pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1786 	pm_runtime_use_autosuspend(&pdev->dev);
1787 	pm_runtime_get_noresume(&pdev->dev);
1788 	pm_runtime_set_active(&pdev->dev);
1789 	pm_runtime_enable(&pdev->dev);
1790 
1791 	svc_i3c_master_reset(master);
1792 
1793 	/* Register the master */
1794 	ret = i3c_master_register(&master->base, &pdev->dev,
1795 				  &svc_i3c_master_ops, false);
1796 	if (ret)
1797 		goto rpm_disable;
1798 
1799 	pm_runtime_mark_last_busy(&pdev->dev);
1800 	pm_runtime_put_autosuspend(&pdev->dev);
1801 
1802 	return 0;
1803 
1804 rpm_disable:
1805 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1806 	pm_runtime_put_noidle(&pdev->dev);
1807 	pm_runtime_disable(&pdev->dev);
1808 	pm_runtime_set_suspended(&pdev->dev);
1809 
1810 err_disable_clks:
1811 	svc_i3c_master_unprepare_clks(master);
1812 
1813 	return ret;
1814 }
1815 
svc_i3c_master_remove(struct platform_device * pdev)1816 static void svc_i3c_master_remove(struct platform_device *pdev)
1817 {
1818 	struct svc_i3c_master *master = platform_get_drvdata(pdev);
1819 
1820 	cancel_work_sync(&master->hj_work);
1821 	i3c_master_unregister(&master->base);
1822 
1823 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1824 	pm_runtime_disable(&pdev->dev);
1825 }
1826 
svc_i3c_save_regs(struct svc_i3c_master * master)1827 static void svc_i3c_save_regs(struct svc_i3c_master *master)
1828 {
1829 	master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
1830 	master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
1831 }
1832 
svc_i3c_restore_regs(struct svc_i3c_master * master)1833 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
1834 {
1835 	if (readl(master->regs + SVC_I3C_MDYNADDR) !=
1836 	    master->saved_regs.mdynaddr) {
1837 		writel(master->saved_regs.mconfig,
1838 		       master->regs + SVC_I3C_MCONFIG);
1839 		writel(master->saved_regs.mdynaddr,
1840 		       master->regs + SVC_I3C_MDYNADDR);
1841 	}
1842 }
1843 
svc_i3c_runtime_suspend(struct device * dev)1844 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
1845 {
1846 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1847 
1848 	svc_i3c_save_regs(master);
1849 	svc_i3c_master_unprepare_clks(master);
1850 	pinctrl_pm_select_sleep_state(dev);
1851 
1852 	return 0;
1853 }
1854 
svc_i3c_runtime_resume(struct device * dev)1855 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
1856 {
1857 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1858 
1859 	pinctrl_pm_select_default_state(dev);
1860 	svc_i3c_master_prepare_clks(master);
1861 
1862 	svc_i3c_restore_regs(master);
1863 
1864 	return 0;
1865 }
1866 
1867 static const struct dev_pm_ops svc_i3c_pm_ops = {
1868 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1869 				      pm_runtime_force_resume)
1870 	SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
1871 			   svc_i3c_runtime_resume, NULL)
1872 };
1873 
1874 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
1875 	{ .compatible = "silvaco,i3c-master" },
1876 	{ /* sentinel */ },
1877 };
1878 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
1879 
1880 static struct platform_driver svc_i3c_master = {
1881 	.probe = svc_i3c_master_probe,
1882 	.remove_new = svc_i3c_master_remove,
1883 	.driver = {
1884 		.name = "silvaco-i3c-master",
1885 		.of_match_table = svc_i3c_master_of_match_tbl,
1886 		.pm = &svc_i3c_pm_ops,
1887 	},
1888 };
1889 module_platform_driver(svc_i3c_master);
1890 
1891 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
1892 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
1893 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
1894 MODULE_LICENSE("GPL v2");
1895