1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Silvaco dual-role I3C master driver
4 *
5 * Copyright (C) 2020 Silvaco
6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7 * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG 0x000
26 #define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
36
37 #define SVC_I3C_MCTRL 0x084
38 #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
39 #define SVC_I3C_MCTRL_REQUEST_NONE 0
40 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
41 #define SVC_I3C_MCTRL_REQUEST_STOP 2
42 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
43 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
44 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
45 #define SVC_I3C_MCTRL_TYPE_I3C 0
46 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
47 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0
48 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
49 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
50 #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
51 #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
52 #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
53 #define SVC_I3C_MCTRL_DIR_WRITE 0
54 #define SVC_I3C_MCTRL_DIR_READ 1
55 #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
56 #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
57
58 #define SVC_I3C_MSTATUS 0x088
59 #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
60 #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
61 #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
62 #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
63 #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
64 #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
65 #define SVC_I3C_MSTATUS_IBITYPE_IBI 1
66 #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
67 #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
68 #define SVC_I3C_MINT_SLVSTART BIT(8)
69 #define SVC_I3C_MINT_MCTRLDONE BIT(9)
70 #define SVC_I3C_MINT_COMPLETE BIT(10)
71 #define SVC_I3C_MINT_RXPEND BIT(11)
72 #define SVC_I3C_MINT_TXNOTFULL BIT(12)
73 #define SVC_I3C_MINT_IBIWON BIT(13)
74 #define SVC_I3C_MINT_ERRWARN BIT(15)
75 #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
76 #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
77 #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
78 #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
79 #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
80 #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
81 #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
82 #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
83
84 #define SVC_I3C_IBIRULES 0x08C
85 #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
86 ((addr) & 0x3F) << ((slot) * 6))
87 #define SVC_I3C_IBIRULES_ADDRS 5
88 #define SVC_I3C_IBIRULES_MSB0 BIT(30)
89 #define SVC_I3C_IBIRULES_NOBYTE BIT(31)
90 #define SVC_I3C_IBIRULES_MANDBYTE 0
91 #define SVC_I3C_MINTSET 0x090
92 #define SVC_I3C_MINTCLR 0x094
93 #define SVC_I3C_MINTMASKED 0x098
94 #define SVC_I3C_MERRWARN 0x09C
95 #define SVC_I3C_MERRWARN_NACK BIT(2)
96 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
97 #define SVC_I3C_MDMACTRL 0x0A0
98 #define SVC_I3C_MDATACTRL 0x0AC
99 #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
100 #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
101 #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
102 #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
103 #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
104 #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
105 #define SVC_I3C_MDATACTRL_TXFULL BIT(30)
106 #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
107
108 #define SVC_I3C_MWDATAB 0x0B0
109 #define SVC_I3C_MWDATAB_END BIT(8)
110
111 #define SVC_I3C_MWDATABE 0x0B4
112 #define SVC_I3C_MWDATAH 0x0B8
113 #define SVC_I3C_MWDATAHE 0x0BC
114 #define SVC_I3C_MRDATAB 0x0C0
115 #define SVC_I3C_MRDATAH 0x0C8
116 #define SVC_I3C_MWMSG_SDR 0x0D0
117 #define SVC_I3C_MRMSG_SDR 0x0D4
118 #define SVC_I3C_MWMSG_DDR 0x0D8
119 #define SVC_I3C_MRMSG_DDR 0x0DC
120
121 #define SVC_I3C_MDYNADDR 0x0E4
122 #define SVC_MDYNADDR_VALID BIT(0)
123 #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
124
125 #define SVC_I3C_MAX_DEVS 32
126 #define SVC_I3C_PM_TIMEOUT_MS 1000
127
128 /* This parameter depends on the implementation and may be tuned */
129 #define SVC_I3C_FIFO_SIZE 16
130
131 struct svc_i3c_cmd {
132 u8 addr;
133 bool rnw;
134 u8 *in;
135 const void *out;
136 unsigned int len;
137 unsigned int actual_len;
138 struct i3c_priv_xfer *xfer;
139 bool continued;
140 };
141
142 struct svc_i3c_xfer {
143 struct list_head node;
144 struct completion comp;
145 int ret;
146 unsigned int type;
147 unsigned int ncmds;
148 struct svc_i3c_cmd cmds[];
149 };
150
151 struct svc_i3c_regs_save {
152 u32 mconfig;
153 u32 mdynaddr;
154 };
155
156 /**
157 * struct svc_i3c_master - Silvaco I3C Master structure
158 * @base: I3C master controller
159 * @dev: Corresponding device
160 * @regs: Memory mapping
161 * @saved_regs: Volatile values for PM operations
162 * @free_slots: Bit array of available slots
163 * @addrs: Array containing the dynamic addresses of each attached device
164 * @descs: Array of descriptors, one per attached device
165 * @hj_work: Hot-join work
166 * @ibi_work: IBI work
167 * @irq: Main interrupt
168 * @pclk: System clock
169 * @fclk: Fast clock (bus)
170 * @sclk: Slow clock (other events)
171 * @xferqueue: Transfer queue structure
172 * @xferqueue.list: List member
173 * @xferqueue.cur: Current ongoing transfer
174 * @xferqueue.lock: Queue lock
175 * @ibi: IBI structure
176 * @ibi.num_slots: Number of slots available in @ibi.slots
177 * @ibi.slots: Available IBI slots
178 * @ibi.tbq_slot: To be queued IBI slot
179 * @ibi.lock: IBI lock
180 * @lock: Transfer lock, protect between IBI work thread and callbacks from master
181 */
182 struct svc_i3c_master {
183 struct i3c_master_controller base;
184 struct device *dev;
185 void __iomem *regs;
186 struct svc_i3c_regs_save saved_regs;
187 u32 free_slots;
188 u8 addrs[SVC_I3C_MAX_DEVS];
189 struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
190 struct work_struct hj_work;
191 struct work_struct ibi_work;
192 int irq;
193 struct clk *pclk;
194 struct clk *fclk;
195 struct clk *sclk;
196 struct {
197 struct list_head list;
198 struct svc_i3c_xfer *cur;
199 /* Prevent races between transfers */
200 spinlock_t lock;
201 } xferqueue;
202 struct {
203 unsigned int num_slots;
204 struct i3c_dev_desc **slots;
205 struct i3c_ibi_slot *tbq_slot;
206 /* Prevent races within IBI handlers */
207 spinlock_t lock;
208 } ibi;
209 struct mutex lock;
210 };
211
212 /**
213 * struct svc_i3c_i2c_dev_data - Device specific data
214 * @index: Index in the master tables corresponding to this device
215 * @ibi: IBI slot index in the master structure
216 * @ibi_pool: IBI pool associated to this device
217 */
218 struct svc_i3c_i2c_dev_data {
219 u8 index;
220 int ibi;
221 struct i3c_generic_ibi_pool *ibi_pool;
222 };
223
svc_i3c_master_error(struct svc_i3c_master * master)224 static bool svc_i3c_master_error(struct svc_i3c_master *master)
225 {
226 u32 mstatus, merrwarn;
227
228 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
229 if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
230 merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
231 writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
232
233 /* Ignore timeout error */
234 if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
235 dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
236 mstatus, merrwarn);
237 return false;
238 }
239
240 dev_err(master->dev,
241 "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
242 mstatus, merrwarn);
243
244 return true;
245 }
246
247 return false;
248 }
249
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)250 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
251 {
252 writel(mask, master->regs + SVC_I3C_MINTSET);
253 }
254
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)255 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
256 {
257 u32 mask = readl(master->regs + SVC_I3C_MINTSET);
258
259 writel(mask, master->regs + SVC_I3C_MINTCLR);
260 }
261
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)262 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
263 {
264 /* Clear pending warnings */
265 writel(readl(master->regs + SVC_I3C_MERRWARN),
266 master->regs + SVC_I3C_MERRWARN);
267 }
268
svc_i3c_master_flush_fifo(struct svc_i3c_master * master)269 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
270 {
271 /* Flush FIFOs */
272 writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
273 master->regs + SVC_I3C_MDATACTRL);
274 }
275
svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master * master)276 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
277 {
278 u32 reg;
279
280 /* Set RX and TX tigger levels, flush FIFOs */
281 reg = SVC_I3C_MDATACTRL_FLUSHTB |
282 SVC_I3C_MDATACTRL_FLUSHRB |
283 SVC_I3C_MDATACTRL_UNLOCK_TRIG |
284 SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
285 SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
286 writel(reg, master->regs + SVC_I3C_MDATACTRL);
287 }
288
svc_i3c_master_reset(struct svc_i3c_master * master)289 static void svc_i3c_master_reset(struct svc_i3c_master *master)
290 {
291 svc_i3c_master_clear_merrwarn(master);
292 svc_i3c_master_reset_fifo_trigger(master);
293 svc_i3c_master_disable_interrupts(master);
294 }
295
296 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)297 to_svc_i3c_master(struct i3c_master_controller *master)
298 {
299 return container_of(master, struct svc_i3c_master, base);
300 }
301
svc_i3c_master_hj_work(struct work_struct * work)302 static void svc_i3c_master_hj_work(struct work_struct *work)
303 {
304 struct svc_i3c_master *master;
305
306 master = container_of(work, struct svc_i3c_master, hj_work);
307 i3c_master_do_daa(&master->base);
308 }
309
310 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)311 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
312 unsigned int ibiaddr)
313 {
314 int i;
315
316 for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
317 if (master->addrs[i] == ibiaddr)
318 break;
319
320 if (i == SVC_I3C_MAX_DEVS)
321 return NULL;
322
323 return master->descs[i];
324 }
325
svc_i3c_master_emit_stop(struct svc_i3c_master * master)326 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
327 {
328 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
329
330 /*
331 * This delay is necessary after the emission of a stop, otherwise eg.
332 * repeating IBIs do not get detected. There is a note in the manual
333 * about it, stating that the stop condition might not be settled
334 * correctly if a start condition follows too rapidly.
335 */
336 udelay(1);
337 }
338
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)339 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
340 struct i3c_dev_desc *dev)
341 {
342 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
343 struct i3c_ibi_slot *slot;
344 unsigned int count;
345 u32 mdatactrl;
346 int ret, val;
347 u8 *buf;
348
349 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
350 if (!slot)
351 return -ENOSPC;
352
353 slot->len = 0;
354 buf = slot->data;
355
356 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
357 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
358 if (ret) {
359 dev_err(master->dev, "Timeout when polling for COMPLETE\n");
360 return ret;
361 }
362
363 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
364 slot->len < SVC_I3C_FIFO_SIZE) {
365 mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
366 count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
367 readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
368 slot->len += count;
369 buf += count;
370 }
371
372 master->ibi.tbq_slot = slot;
373
374 return 0;
375 }
376
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)377 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
378 bool mandatory_byte)
379 {
380 unsigned int ibi_ack_nack;
381
382 ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
383 if (mandatory_byte)
384 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
385 else
386 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
387
388 writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
389 }
390
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)391 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
392 {
393 writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
394 SVC_I3C_MCTRL_IBIRESP_NACK,
395 master->regs + SVC_I3C_MCTRL);
396 }
397
svc_i3c_master_ibi_work(struct work_struct * work)398 static void svc_i3c_master_ibi_work(struct work_struct *work)
399 {
400 struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
401 struct svc_i3c_i2c_dev_data *data;
402 unsigned int ibitype, ibiaddr;
403 struct i3c_dev_desc *dev;
404 u32 status, val;
405 int ret;
406
407 mutex_lock(&master->lock);
408 /*
409 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
410 * readl_relaxed_poll_timeout() to return immediately. Consequently,
411 * ibitype will be 0 since it was last updated only after the 8th SCL
412 * cycle, leading to missed client IBI handlers.
413 *
414 * A typical scenario is when IBIWON occurs and bus arbitration is lost
415 * at svc_i3c_master_priv_xfers().
416 *
417 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
418 */
419 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
420
421 /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
422 writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
423 SVC_I3C_MCTRL_IBIRESP_AUTO,
424 master->regs + SVC_I3C_MCTRL);
425
426 /* Wait for IBIWON, should take approximately 100us */
427 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
428 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
429 if (ret) {
430 dev_err(master->dev, "Timeout when polling for IBIWON\n");
431 svc_i3c_master_emit_stop(master);
432 goto reenable_ibis;
433 }
434
435 status = readl(master->regs + SVC_I3C_MSTATUS);
436 ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
437 ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
438
439 /* Handle the critical responses to IBI's */
440 switch (ibitype) {
441 case SVC_I3C_MSTATUS_IBITYPE_IBI:
442 dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
443 if (!dev)
444 svc_i3c_master_nack_ibi(master);
445 else
446 svc_i3c_master_handle_ibi(master, dev);
447 break;
448 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
449 svc_i3c_master_ack_ibi(master, false);
450 break;
451 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
452 svc_i3c_master_nack_ibi(master);
453 break;
454 default:
455 break;
456 }
457
458 /*
459 * If an error happened, we probably got interrupted and the exchange
460 * timedout. In this case we just drop everything, emit a stop and wait
461 * for the slave to interrupt again.
462 */
463 if (svc_i3c_master_error(master)) {
464 if (master->ibi.tbq_slot) {
465 data = i3c_dev_get_master_data(dev);
466 i3c_generic_ibi_recycle_slot(data->ibi_pool,
467 master->ibi.tbq_slot);
468 master->ibi.tbq_slot = NULL;
469 }
470
471 svc_i3c_master_emit_stop(master);
472
473 goto reenable_ibis;
474 }
475
476 /* Handle the non critical tasks */
477 switch (ibitype) {
478 case SVC_I3C_MSTATUS_IBITYPE_IBI:
479 if (dev) {
480 i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
481 master->ibi.tbq_slot = NULL;
482 }
483 svc_i3c_master_emit_stop(master);
484 break;
485 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
486 queue_work(master->base.wq, &master->hj_work);
487 break;
488 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
489 default:
490 break;
491 }
492
493 reenable_ibis:
494 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
495 mutex_unlock(&master->lock);
496 }
497
svc_i3c_master_irq_handler(int irq,void * dev_id)498 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
499 {
500 struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
501 u32 active = readl(master->regs + SVC_I3C_MSTATUS);
502
503 if (!SVC_I3C_MSTATUS_SLVSTART(active))
504 return IRQ_NONE;
505
506 /* Clear the interrupt status */
507 writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
508
509 svc_i3c_master_disable_interrupts(master);
510
511 /* Handle the interrupt in a non atomic context */
512 queue_work(master->base.wq, &master->ibi_work);
513
514 return IRQ_HANDLED;
515 }
516
svc_i3c_master_bus_init(struct i3c_master_controller * m)517 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
518 {
519 struct svc_i3c_master *master = to_svc_i3c_master(m);
520 struct i3c_bus *bus = i3c_master_get_bus(m);
521 struct i3c_device_info info = {};
522 unsigned long fclk_rate, fclk_period_ns;
523 unsigned int high_period_ns, od_low_period_ns;
524 u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
525 int ret;
526
527 ret = pm_runtime_resume_and_get(master->dev);
528 if (ret < 0) {
529 dev_err(master->dev,
530 "<%s> cannot resume i3c bus master, err: %d\n",
531 __func__, ret);
532 return ret;
533 }
534
535 /* Timings derivation */
536 fclk_rate = clk_get_rate(master->fclk);
537 if (!fclk_rate) {
538 ret = -EINVAL;
539 goto rpm_out;
540 }
541
542 fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
543
544 /*
545 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
546 * Simplest configuration is using a 50% duty-cycle of 40ns.
547 */
548 ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
549 pplow = 0;
550
551 /*
552 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
553 * duty-cycle tuned so that high levels are filetered out by
554 * the 50ns filter (target being 40ns).
555 */
556 odhpp = 1;
557 high_period_ns = (ppbaud + 1) * fclk_period_ns;
558 odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
559 od_low_period_ns = (odbaud + 1) * high_period_ns;
560
561 switch (bus->mode) {
562 case I3C_BUS_MODE_PURE:
563 i2cbaud = 0;
564 odstop = 0;
565 break;
566 case I3C_BUS_MODE_MIXED_FAST:
567 case I3C_BUS_MODE_MIXED_LIMITED:
568 /*
569 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
570 * between the high and low period does not really matter.
571 */
572 i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
573 odstop = 1;
574 break;
575 case I3C_BUS_MODE_MIXED_SLOW:
576 /*
577 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
578 * constraints as the FM+ mode.
579 */
580 i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
581 odstop = 1;
582 break;
583 default:
584 goto rpm_out;
585 }
586
587 reg = SVC_I3C_MCONFIG_MASTER_EN |
588 SVC_I3C_MCONFIG_DISTO(0) |
589 SVC_I3C_MCONFIG_HKEEP(0) |
590 SVC_I3C_MCONFIG_ODSTOP(odstop) |
591 SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
592 SVC_I3C_MCONFIG_PPLOW(pplow) |
593 SVC_I3C_MCONFIG_ODBAUD(odbaud) |
594 SVC_I3C_MCONFIG_ODHPP(odhpp) |
595 SVC_I3C_MCONFIG_SKEW(0) |
596 SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
597 writel(reg, master->regs + SVC_I3C_MCONFIG);
598
599 /* Master core's registration */
600 ret = i3c_master_get_free_addr(m, 0);
601 if (ret < 0)
602 goto rpm_out;
603
604 info.dyn_addr = ret;
605
606 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
607 master->regs + SVC_I3C_MDYNADDR);
608
609 ret = i3c_master_set_info(&master->base, &info);
610 if (ret)
611 goto rpm_out;
612
613 rpm_out:
614 pm_runtime_mark_last_busy(master->dev);
615 pm_runtime_put_autosuspend(master->dev);
616
617 return ret;
618 }
619
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)620 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
621 {
622 struct svc_i3c_master *master = to_svc_i3c_master(m);
623 int ret;
624
625 ret = pm_runtime_resume_and_get(master->dev);
626 if (ret < 0) {
627 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
628 return;
629 }
630
631 svc_i3c_master_disable_interrupts(master);
632
633 /* Disable master */
634 writel(0, master->regs + SVC_I3C_MCONFIG);
635
636 pm_runtime_mark_last_busy(master->dev);
637 pm_runtime_put_autosuspend(master->dev);
638 }
639
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)640 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
641 {
642 unsigned int slot;
643
644 if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
645 return -ENOSPC;
646
647 slot = ffs(master->free_slots) - 1;
648
649 master->free_slots &= ~BIT(slot);
650
651 return slot;
652 }
653
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)654 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
655 unsigned int slot)
656 {
657 master->free_slots |= BIT(slot);
658 }
659
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)660 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
661 {
662 struct i3c_master_controller *m = i3c_dev_get_master(dev);
663 struct svc_i3c_master *master = to_svc_i3c_master(m);
664 struct svc_i3c_i2c_dev_data *data;
665 int slot;
666
667 slot = svc_i3c_master_reserve_slot(master);
668 if (slot < 0)
669 return slot;
670
671 data = kzalloc(sizeof(*data), GFP_KERNEL);
672 if (!data) {
673 svc_i3c_master_release_slot(master, slot);
674 return -ENOMEM;
675 }
676
677 data->ibi = -1;
678 data->index = slot;
679 master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
680 dev->info.static_addr;
681 master->descs[slot] = dev;
682
683 i3c_dev_set_master_data(dev, data);
684
685 return 0;
686 }
687
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)688 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
689 u8 old_dyn_addr)
690 {
691 struct i3c_master_controller *m = i3c_dev_get_master(dev);
692 struct svc_i3c_master *master = to_svc_i3c_master(m);
693 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
694
695 master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
696 dev->info.static_addr;
697
698 return 0;
699 }
700
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)701 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
702 {
703 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
704 struct i3c_master_controller *m = i3c_dev_get_master(dev);
705 struct svc_i3c_master *master = to_svc_i3c_master(m);
706
707 master->addrs[data->index] = 0;
708 svc_i3c_master_release_slot(master, data->index);
709
710 kfree(data);
711 }
712
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)713 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
714 {
715 struct i3c_master_controller *m = i2c_dev_get_master(dev);
716 struct svc_i3c_master *master = to_svc_i3c_master(m);
717 struct svc_i3c_i2c_dev_data *data;
718 int slot;
719
720 slot = svc_i3c_master_reserve_slot(master);
721 if (slot < 0)
722 return slot;
723
724 data = kzalloc(sizeof(*data), GFP_KERNEL);
725 if (!data) {
726 svc_i3c_master_release_slot(master, slot);
727 return -ENOMEM;
728 }
729
730 data->index = slot;
731 master->addrs[slot] = dev->addr;
732
733 i2c_dev_set_master_data(dev, data);
734
735 return 0;
736 }
737
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)738 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
739 {
740 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
741 struct i3c_master_controller *m = i2c_dev_get_master(dev);
742 struct svc_i3c_master *master = to_svc_i3c_master(m);
743
744 svc_i3c_master_release_slot(master, data->index);
745
746 kfree(data);
747 }
748
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)749 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
750 unsigned int len)
751 {
752 int ret, i;
753 u32 reg;
754
755 for (i = 0; i < len; i++) {
756 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
757 reg,
758 SVC_I3C_MSTATUS_RXPEND(reg),
759 0, 1000);
760 if (ret)
761 return ret;
762
763 dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
764 }
765
766 return 0;
767 }
768
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)769 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
770 u8 *addrs, unsigned int *count)
771 {
772 u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
773 unsigned int dev_nb = 0, last_addr = 0;
774 u32 reg;
775 int ret, i;
776
777 while (true) {
778 /* Enter/proceed with DAA */
779 writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
780 SVC_I3C_MCTRL_TYPE_I3C |
781 SVC_I3C_MCTRL_IBIRESP_NACK |
782 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
783 master->regs + SVC_I3C_MCTRL);
784
785 /*
786 * Either one slave will send its ID, or the assignment process
787 * is done.
788 */
789 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
790 reg,
791 SVC_I3C_MSTATUS_RXPEND(reg) |
792 SVC_I3C_MSTATUS_MCTRLDONE(reg),
793 1, 1000);
794 if (ret)
795 return ret;
796
797 if (SVC_I3C_MSTATUS_RXPEND(reg)) {
798 u8 data[6];
799
800 /*
801 * We only care about the 48-bit provisional ID yet to
802 * be sure a device does not nack an address twice.
803 * Otherwise, we would just need to flush the RX FIFO.
804 */
805 ret = svc_i3c_master_readb(master, data, 6);
806 if (ret)
807 return ret;
808
809 for (i = 0; i < 6; i++)
810 prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
811
812 /* We do not care about the BCR and DCR yet */
813 ret = svc_i3c_master_readb(master, data, 2);
814 if (ret)
815 return ret;
816 } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
817 if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
818 SVC_I3C_MSTATUS_COMPLETE(reg)) {
819 /*
820 * All devices received and acked they dynamic
821 * address, this is the natural end of the DAA
822 * procedure.
823 */
824 break;
825 } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
826 /* No I3C devices attached */
827 if (dev_nb == 0)
828 break;
829
830 /*
831 * A slave device nacked the address, this is
832 * allowed only once, DAA will be stopped and
833 * then resumed. The same device is supposed to
834 * answer again immediately and shall ack the
835 * address this time.
836 */
837 if (prov_id[dev_nb] == nacking_prov_id)
838 return -EIO;
839
840 dev_nb--;
841 nacking_prov_id = prov_id[dev_nb];
842 svc_i3c_master_emit_stop(master);
843
844 continue;
845 } else {
846 return -EIO;
847 }
848 }
849
850 /* Wait for the slave to be ready to receive its address */
851 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
852 reg,
853 SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
854 SVC_I3C_MSTATUS_STATE_DAA(reg) &&
855 SVC_I3C_MSTATUS_BETWEEN(reg),
856 0, 1000);
857 if (ret)
858 return ret;
859
860 /* Give the slave device a suitable dynamic address */
861 ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
862 if (ret < 0)
863 return ret;
864
865 addrs[dev_nb] = ret;
866 dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
867 dev_nb, addrs[dev_nb]);
868
869 writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
870 last_addr = addrs[dev_nb++];
871 }
872
873 *count = dev_nb;
874
875 return 0;
876 }
877
svc_i3c_update_ibirules(struct svc_i3c_master * master)878 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
879 {
880 struct i3c_dev_desc *dev;
881 u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
882 unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
883 nobyte_addr_ko = 0;
884 bool list_mbyte = false, list_nobyte = false;
885
886 /* Create the IBIRULES register for both cases */
887 i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
888 if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
889 continue;
890
891 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
892 reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
893 dev->info.dyn_addr);
894
895 /* IBI rules cannot be applied to devices with MSb=1 */
896 if (dev->info.dyn_addr & BIT(7))
897 mbyte_addr_ko++;
898 else
899 mbyte_addr_ok++;
900 } else {
901 reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
902 dev->info.dyn_addr);
903
904 /* IBI rules cannot be applied to devices with MSb=1 */
905 if (dev->info.dyn_addr & BIT(7))
906 nobyte_addr_ko++;
907 else
908 nobyte_addr_ok++;
909 }
910 }
911
912 /* Device list cannot be handled by hardware */
913 if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
914 list_mbyte = true;
915
916 if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
917 list_nobyte = true;
918
919 /* No list can be properly handled, return an error */
920 if (!list_mbyte && !list_nobyte)
921 return -ERANGE;
922
923 /* Pick the first list that can be handled by hardware, randomly */
924 if (list_mbyte)
925 writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
926 else
927 writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
928
929 return 0;
930 }
931
svc_i3c_master_do_daa(struct i3c_master_controller * m)932 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
933 {
934 struct svc_i3c_master *master = to_svc_i3c_master(m);
935 u8 addrs[SVC_I3C_MAX_DEVS];
936 unsigned long flags;
937 unsigned int dev_nb;
938 int ret, i;
939
940 ret = pm_runtime_resume_and_get(master->dev);
941 if (ret < 0) {
942 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
943 return ret;
944 }
945
946 spin_lock_irqsave(&master->xferqueue.lock, flags);
947 ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
948 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
949 if (ret) {
950 svc_i3c_master_emit_stop(master);
951 svc_i3c_master_clear_merrwarn(master);
952 goto rpm_out;
953 }
954
955 /* Register all devices who participated to the core */
956 for (i = 0; i < dev_nb; i++) {
957 ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
958 if (ret)
959 goto rpm_out;
960 }
961
962 /* Configure IBI auto-rules */
963 ret = svc_i3c_update_ibirules(master);
964 if (ret)
965 dev_err(master->dev, "Cannot handle such a list of devices");
966
967 rpm_out:
968 pm_runtime_mark_last_busy(master->dev);
969 pm_runtime_put_autosuspend(master->dev);
970
971 return ret;
972 }
973
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)974 static int svc_i3c_master_read(struct svc_i3c_master *master,
975 u8 *in, unsigned int len)
976 {
977 int offset = 0, i;
978 u32 mdctrl, mstatus;
979 bool completed = false;
980 unsigned int count;
981 unsigned long start = jiffies;
982
983 while (!completed) {
984 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
985 if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
986 completed = true;
987
988 if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
989 dev_dbg(master->dev, "I3C read timeout\n");
990 return -ETIMEDOUT;
991 }
992
993 mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
994 count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
995 if (offset + count > len) {
996 dev_err(master->dev, "I3C receive length too long!\n");
997 return -EINVAL;
998 }
999 for (i = 0; i < count; i++)
1000 in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1001
1002 offset += count;
1003 }
1004
1005 return offset;
1006 }
1007
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)1008 static int svc_i3c_master_write(struct svc_i3c_master *master,
1009 const u8 *out, unsigned int len)
1010 {
1011 int offset = 0, ret;
1012 u32 mdctrl;
1013
1014 while (offset < len) {
1015 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1016 mdctrl,
1017 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1018 0, 1000);
1019 if (ret)
1020 return ret;
1021
1022 /*
1023 * The last byte to be sent over the bus must either have the
1024 * "end" bit set or be written in MWDATABE.
1025 */
1026 if (likely(offset < (len - 1)))
1027 writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1028 else
1029 writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1030 }
1031
1032 return 0;
1033 }
1034
svc_i3c_master_xfer(struct svc_i3c_master * master,bool rnw,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int * actual_len,bool continued)1035 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1036 bool rnw, unsigned int xfer_type, u8 addr,
1037 u8 *in, const u8 *out, unsigned int xfer_len,
1038 unsigned int *actual_len, bool continued)
1039 {
1040 int retry = 2;
1041 u32 reg;
1042 int ret;
1043
1044 /* clean SVC_I3C_MINT_IBIWON w1c bits */
1045 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1046
1047
1048 while (retry--) {
1049 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1050 xfer_type |
1051 SVC_I3C_MCTRL_IBIRESP_NACK |
1052 SVC_I3C_MCTRL_DIR(rnw) |
1053 SVC_I3C_MCTRL_ADDR(addr) |
1054 SVC_I3C_MCTRL_RDTERM(*actual_len),
1055 master->regs + SVC_I3C_MCTRL);
1056
1057 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1058 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1059 if (ret)
1060 goto emit_stop;
1061
1062 if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1063 /*
1064 * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
1065 * If the Controller chooses to start an I3C Message with an I3C Dynamic
1066 * Address, then special provisions shall be made because that same I3C
1067 * Target may be initiating an IBI or a Controller Role Request. So, one of
1068 * three things may happen: (skip 1, 2)
1069 *
1070 * 3. The Addresses match and the RnW bits also match, and so neither
1071 * Controller nor Target will ACK since both are expecting the other side to
1072 * provide ACK. As a result, each side might think it had "won" arbitration,
1073 * but neither side would continue, as each would subsequently see that the
1074 * other did not provide ACK.
1075 * ...
1076 * For either value of RnW: Due to the NACK, the Controller shall defer the
1077 * Private Write or Private Read, and should typically transmit the Target
1078 * Address again after a Repeated START (i.e., the next one or any one prior
1079 * to a STOP in the Frame). Since the Address Header following a Repeated
1080 * START is not arbitrated, the Controller will always win (see Section
1081 * 5.1.2.2.4).
1082 */
1083 if (retry && addr != 0x7e) {
1084 writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
1085 } else {
1086 ret = -ENXIO;
1087 *actual_len = 0;
1088 goto emit_stop;
1089 }
1090 } else {
1091 break;
1092 }
1093 }
1094
1095 /*
1096 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
1097 * with I3C Target Address.
1098 *
1099 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
1100 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
1101 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
1102 * a Hot-Join Request has been made.
1103 *
1104 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
1105 * and yield the above events handler.
1106 */
1107 if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1108 ret = -EAGAIN;
1109 *actual_len = 0;
1110 goto emit_stop;
1111 }
1112
1113 if (rnw)
1114 ret = svc_i3c_master_read(master, in, xfer_len);
1115 else
1116 ret = svc_i3c_master_write(master, out, xfer_len);
1117 if (ret < 0)
1118 goto emit_stop;
1119
1120 if (rnw)
1121 *actual_len = ret;
1122
1123 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1124 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1125 if (ret)
1126 goto emit_stop;
1127
1128 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1129
1130 if (!continued) {
1131 svc_i3c_master_emit_stop(master);
1132
1133 /* Wait idle if stop is sent. */
1134 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1135 SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1136 }
1137
1138 return 0;
1139
1140 emit_stop:
1141 svc_i3c_master_emit_stop(master);
1142 svc_i3c_master_clear_merrwarn(master);
1143
1144 return ret;
1145 }
1146
1147 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1148 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1149 {
1150 struct svc_i3c_xfer *xfer;
1151
1152 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1153 if (!xfer)
1154 return NULL;
1155
1156 INIT_LIST_HEAD(&xfer->node);
1157 xfer->ncmds = ncmds;
1158 xfer->ret = -ETIMEDOUT;
1159
1160 return xfer;
1161 }
1162
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1163 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1164 {
1165 kfree(xfer);
1166 }
1167
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1168 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1169 struct svc_i3c_xfer *xfer)
1170 {
1171 if (master->xferqueue.cur == xfer)
1172 master->xferqueue.cur = NULL;
1173 else
1174 list_del_init(&xfer->node);
1175 }
1176
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1177 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1178 struct svc_i3c_xfer *xfer)
1179 {
1180 unsigned long flags;
1181
1182 spin_lock_irqsave(&master->xferqueue.lock, flags);
1183 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1184 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1185 }
1186
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1187 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1188 {
1189 struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1190 int ret, i;
1191
1192 if (!xfer)
1193 return;
1194
1195 svc_i3c_master_clear_merrwarn(master);
1196 svc_i3c_master_flush_fifo(master);
1197
1198 for (i = 0; i < xfer->ncmds; i++) {
1199 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1200
1201 ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1202 cmd->addr, cmd->in, cmd->out,
1203 cmd->len, &cmd->actual_len,
1204 cmd->continued);
1205 /* cmd->xfer is NULL if I2C or CCC transfer */
1206 if (cmd->xfer)
1207 cmd->xfer->actual_len = cmd->actual_len;
1208
1209 if (ret)
1210 break;
1211 }
1212
1213 xfer->ret = ret;
1214 complete(&xfer->comp);
1215
1216 if (ret < 0)
1217 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1218
1219 xfer = list_first_entry_or_null(&master->xferqueue.list,
1220 struct svc_i3c_xfer,
1221 node);
1222 if (xfer)
1223 list_del_init(&xfer->node);
1224
1225 master->xferqueue.cur = xfer;
1226 svc_i3c_master_start_xfer_locked(master);
1227 }
1228
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1229 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1230 struct svc_i3c_xfer *xfer)
1231 {
1232 unsigned long flags;
1233 int ret;
1234
1235 ret = pm_runtime_resume_and_get(master->dev);
1236 if (ret < 0) {
1237 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1238 return;
1239 }
1240
1241 init_completion(&xfer->comp);
1242 spin_lock_irqsave(&master->xferqueue.lock, flags);
1243 if (master->xferqueue.cur) {
1244 list_add_tail(&xfer->node, &master->xferqueue.list);
1245 } else {
1246 master->xferqueue.cur = xfer;
1247 svc_i3c_master_start_xfer_locked(master);
1248 }
1249 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1250
1251 pm_runtime_mark_last_busy(master->dev);
1252 pm_runtime_put_autosuspend(master->dev);
1253 }
1254
1255 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1256 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1257 const struct i3c_ccc_cmd *cmd)
1258 {
1259 /* No software support for CCC commands targeting more than one slave */
1260 return (cmd->ndests == 1);
1261 }
1262
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1263 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1264 struct i3c_ccc_cmd *ccc)
1265 {
1266 unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1267 struct svc_i3c_xfer *xfer;
1268 struct svc_i3c_cmd *cmd;
1269 u8 *buf;
1270 int ret;
1271
1272 xfer = svc_i3c_master_alloc_xfer(master, 1);
1273 if (!xfer)
1274 return -ENOMEM;
1275
1276 buf = kmalloc(xfer_len, GFP_KERNEL);
1277 if (!buf) {
1278 svc_i3c_master_free_xfer(xfer);
1279 return -ENOMEM;
1280 }
1281
1282 buf[0] = ccc->id;
1283 memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1284
1285 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1286
1287 cmd = &xfer->cmds[0];
1288 cmd->addr = ccc->dests[0].addr;
1289 cmd->rnw = ccc->rnw;
1290 cmd->in = NULL;
1291 cmd->out = buf;
1292 cmd->len = xfer_len;
1293 cmd->actual_len = 0;
1294 cmd->continued = false;
1295
1296 mutex_lock(&master->lock);
1297 svc_i3c_master_enqueue_xfer(master, xfer);
1298 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1299 svc_i3c_master_dequeue_xfer(master, xfer);
1300 mutex_unlock(&master->lock);
1301
1302 ret = xfer->ret;
1303 kfree(buf);
1304 svc_i3c_master_free_xfer(xfer);
1305
1306 return ret;
1307 }
1308
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1309 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1310 struct i3c_ccc_cmd *ccc)
1311 {
1312 unsigned int xfer_len = ccc->dests[0].payload.len;
1313 unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1314 struct svc_i3c_xfer *xfer;
1315 struct svc_i3c_cmd *cmd;
1316 int ret;
1317
1318 xfer = svc_i3c_master_alloc_xfer(master, 2);
1319 if (!xfer)
1320 return -ENOMEM;
1321
1322 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1323
1324 /* Broadcasted message */
1325 cmd = &xfer->cmds[0];
1326 cmd->addr = I3C_BROADCAST_ADDR;
1327 cmd->rnw = 0;
1328 cmd->in = NULL;
1329 cmd->out = &ccc->id;
1330 cmd->len = 1;
1331 cmd->actual_len = 0;
1332 cmd->continued = true;
1333
1334 /* Directed message */
1335 cmd = &xfer->cmds[1];
1336 cmd->addr = ccc->dests[0].addr;
1337 cmd->rnw = ccc->rnw;
1338 cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1339 cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
1340 cmd->len = xfer_len;
1341 cmd->actual_len = actual_len;
1342 cmd->continued = false;
1343
1344 mutex_lock(&master->lock);
1345 svc_i3c_master_enqueue_xfer(master, xfer);
1346 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1347 svc_i3c_master_dequeue_xfer(master, xfer);
1348 mutex_unlock(&master->lock);
1349
1350 if (cmd->actual_len != xfer_len)
1351 ccc->dests[0].payload.len = cmd->actual_len;
1352
1353 ret = xfer->ret;
1354 svc_i3c_master_free_xfer(xfer);
1355
1356 return ret;
1357 }
1358
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1359 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1360 struct i3c_ccc_cmd *cmd)
1361 {
1362 struct svc_i3c_master *master = to_svc_i3c_master(m);
1363 bool broadcast = cmd->id < 0x80;
1364 int ret;
1365
1366 if (broadcast)
1367 ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1368 else
1369 ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1370
1371 if (ret)
1372 cmd->err = I3C_ERROR_M2;
1373
1374 return ret;
1375 }
1376
svc_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)1377 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1378 struct i3c_priv_xfer *xfers,
1379 int nxfers)
1380 {
1381 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1382 struct svc_i3c_master *master = to_svc_i3c_master(m);
1383 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1384 struct svc_i3c_xfer *xfer;
1385 int ret, i;
1386
1387 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1388 if (!xfer)
1389 return -ENOMEM;
1390
1391 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1392
1393 for (i = 0; i < nxfers; i++) {
1394 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1395
1396 cmd->xfer = &xfers[i];
1397 cmd->addr = master->addrs[data->index];
1398 cmd->rnw = xfers[i].rnw;
1399 cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1400 cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1401 cmd->len = xfers[i].len;
1402 cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
1403 cmd->continued = (i + 1) < nxfers;
1404 }
1405
1406 mutex_lock(&master->lock);
1407 svc_i3c_master_enqueue_xfer(master, xfer);
1408 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1409 svc_i3c_master_dequeue_xfer(master, xfer);
1410 mutex_unlock(&master->lock);
1411
1412 ret = xfer->ret;
1413 svc_i3c_master_free_xfer(xfer);
1414
1415 return ret;
1416 }
1417
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,const struct i2c_msg * xfers,int nxfers)1418 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1419 const struct i2c_msg *xfers,
1420 int nxfers)
1421 {
1422 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1423 struct svc_i3c_master *master = to_svc_i3c_master(m);
1424 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1425 struct svc_i3c_xfer *xfer;
1426 int ret, i;
1427
1428 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1429 if (!xfer)
1430 return -ENOMEM;
1431
1432 xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1433
1434 for (i = 0; i < nxfers; i++) {
1435 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1436
1437 cmd->addr = master->addrs[data->index];
1438 cmd->rnw = xfers[i].flags & I2C_M_RD;
1439 cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1440 cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1441 cmd->len = xfers[i].len;
1442 cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1443 cmd->continued = (i + 1 < nxfers);
1444 }
1445
1446 mutex_lock(&master->lock);
1447 svc_i3c_master_enqueue_xfer(master, xfer);
1448 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1449 svc_i3c_master_dequeue_xfer(master, xfer);
1450 mutex_unlock(&master->lock);
1451
1452 ret = xfer->ret;
1453 svc_i3c_master_free_xfer(xfer);
1454
1455 return ret;
1456 }
1457
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1458 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1459 const struct i3c_ibi_setup *req)
1460 {
1461 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1462 struct svc_i3c_master *master = to_svc_i3c_master(m);
1463 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1464 unsigned long flags;
1465 unsigned int i;
1466
1467 if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1468 dev_err(master->dev, "IBI max payload %d should be < %d\n",
1469 dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1470 return -ERANGE;
1471 }
1472
1473 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1474 if (IS_ERR(data->ibi_pool))
1475 return PTR_ERR(data->ibi_pool);
1476
1477 spin_lock_irqsave(&master->ibi.lock, flags);
1478 for (i = 0; i < master->ibi.num_slots; i++) {
1479 if (!master->ibi.slots[i]) {
1480 data->ibi = i;
1481 master->ibi.slots[i] = dev;
1482 break;
1483 }
1484 }
1485 spin_unlock_irqrestore(&master->ibi.lock, flags);
1486
1487 if (i < master->ibi.num_slots)
1488 return 0;
1489
1490 i3c_generic_ibi_free_pool(data->ibi_pool);
1491 data->ibi_pool = NULL;
1492
1493 return -ENOSPC;
1494 }
1495
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1496 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1497 {
1498 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1499 struct svc_i3c_master *master = to_svc_i3c_master(m);
1500 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1501 unsigned long flags;
1502
1503 spin_lock_irqsave(&master->ibi.lock, flags);
1504 master->ibi.slots[data->ibi] = NULL;
1505 data->ibi = -1;
1506 spin_unlock_irqrestore(&master->ibi.lock, flags);
1507
1508 i3c_generic_ibi_free_pool(data->ibi_pool);
1509 }
1510
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1511 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1512 {
1513 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1514 struct svc_i3c_master *master = to_svc_i3c_master(m);
1515 int ret;
1516
1517 ret = pm_runtime_resume_and_get(master->dev);
1518 if (ret < 0) {
1519 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1520 return ret;
1521 }
1522
1523 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1524
1525 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1526 }
1527
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1528 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1529 {
1530 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1531 struct svc_i3c_master *master = to_svc_i3c_master(m);
1532 int ret;
1533
1534 svc_i3c_master_disable_interrupts(master);
1535
1536 ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1537
1538 pm_runtime_mark_last_busy(master->dev);
1539 pm_runtime_put_autosuspend(master->dev);
1540
1541 return ret;
1542 }
1543
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1544 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1545 struct i3c_ibi_slot *slot)
1546 {
1547 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1548
1549 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1550 }
1551
1552 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1553 .bus_init = svc_i3c_master_bus_init,
1554 .bus_cleanup = svc_i3c_master_bus_cleanup,
1555 .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1556 .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1557 .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1558 .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1559 .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1560 .do_daa = svc_i3c_master_do_daa,
1561 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1562 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1563 .priv_xfers = svc_i3c_master_priv_xfers,
1564 .i2c_xfers = svc_i3c_master_i2c_xfers,
1565 .request_ibi = svc_i3c_master_request_ibi,
1566 .free_ibi = svc_i3c_master_free_ibi,
1567 .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1568 .enable_ibi = svc_i3c_master_enable_ibi,
1569 .disable_ibi = svc_i3c_master_disable_ibi,
1570 };
1571
svc_i3c_master_prepare_clks(struct svc_i3c_master * master)1572 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
1573 {
1574 int ret = 0;
1575
1576 ret = clk_prepare_enable(master->pclk);
1577 if (ret)
1578 return ret;
1579
1580 ret = clk_prepare_enable(master->fclk);
1581 if (ret) {
1582 clk_disable_unprepare(master->pclk);
1583 return ret;
1584 }
1585
1586 ret = clk_prepare_enable(master->sclk);
1587 if (ret) {
1588 clk_disable_unprepare(master->pclk);
1589 clk_disable_unprepare(master->fclk);
1590 return ret;
1591 }
1592
1593 return 0;
1594 }
1595
svc_i3c_master_unprepare_clks(struct svc_i3c_master * master)1596 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
1597 {
1598 clk_disable_unprepare(master->pclk);
1599 clk_disable_unprepare(master->fclk);
1600 clk_disable_unprepare(master->sclk);
1601 }
1602
svc_i3c_master_probe(struct platform_device * pdev)1603 static int svc_i3c_master_probe(struct platform_device *pdev)
1604 {
1605 struct device *dev = &pdev->dev;
1606 struct svc_i3c_master *master;
1607 int ret;
1608
1609 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1610 if (!master)
1611 return -ENOMEM;
1612
1613 master->regs = devm_platform_ioremap_resource(pdev, 0);
1614 if (IS_ERR(master->regs))
1615 return PTR_ERR(master->regs);
1616
1617 master->pclk = devm_clk_get(dev, "pclk");
1618 if (IS_ERR(master->pclk))
1619 return PTR_ERR(master->pclk);
1620
1621 master->fclk = devm_clk_get(dev, "fast_clk");
1622 if (IS_ERR(master->fclk))
1623 return PTR_ERR(master->fclk);
1624
1625 master->sclk = devm_clk_get(dev, "slow_clk");
1626 if (IS_ERR(master->sclk))
1627 return PTR_ERR(master->sclk);
1628
1629 master->irq = platform_get_irq(pdev, 0);
1630 if (master->irq < 0)
1631 return master->irq;
1632
1633 master->dev = dev;
1634
1635 ret = svc_i3c_master_prepare_clks(master);
1636 if (ret)
1637 return ret;
1638
1639 INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1640 INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
1641 mutex_init(&master->lock);
1642
1643 ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1644 IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1645 if (ret)
1646 goto err_disable_clks;
1647
1648 master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1649
1650 spin_lock_init(&master->xferqueue.lock);
1651 INIT_LIST_HEAD(&master->xferqueue.list);
1652
1653 spin_lock_init(&master->ibi.lock);
1654 master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1655 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1656 sizeof(*master->ibi.slots),
1657 GFP_KERNEL);
1658 if (!master->ibi.slots) {
1659 ret = -ENOMEM;
1660 goto err_disable_clks;
1661 }
1662
1663 platform_set_drvdata(pdev, master);
1664
1665 pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1666 pm_runtime_use_autosuspend(&pdev->dev);
1667 pm_runtime_get_noresume(&pdev->dev);
1668 pm_runtime_set_active(&pdev->dev);
1669 pm_runtime_enable(&pdev->dev);
1670
1671 svc_i3c_master_reset(master);
1672
1673 /* Register the master */
1674 ret = i3c_master_register(&master->base, &pdev->dev,
1675 &svc_i3c_master_ops, false);
1676 if (ret)
1677 goto rpm_disable;
1678
1679 pm_runtime_mark_last_busy(&pdev->dev);
1680 pm_runtime_put_autosuspend(&pdev->dev);
1681
1682 return 0;
1683
1684 rpm_disable:
1685 pm_runtime_dont_use_autosuspend(&pdev->dev);
1686 pm_runtime_put_noidle(&pdev->dev);
1687 pm_runtime_set_suspended(&pdev->dev);
1688 pm_runtime_disable(&pdev->dev);
1689
1690 err_disable_clks:
1691 svc_i3c_master_unprepare_clks(master);
1692
1693 return ret;
1694 }
1695
svc_i3c_master_remove(struct platform_device * pdev)1696 static void svc_i3c_master_remove(struct platform_device *pdev)
1697 {
1698 struct svc_i3c_master *master = platform_get_drvdata(pdev);
1699
1700 cancel_work_sync(&master->hj_work);
1701 i3c_master_unregister(&master->base);
1702
1703 pm_runtime_dont_use_autosuspend(&pdev->dev);
1704 pm_runtime_disable(&pdev->dev);
1705 }
1706
svc_i3c_save_regs(struct svc_i3c_master * master)1707 static void svc_i3c_save_regs(struct svc_i3c_master *master)
1708 {
1709 master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
1710 master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
1711 }
1712
svc_i3c_restore_regs(struct svc_i3c_master * master)1713 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
1714 {
1715 if (readl(master->regs + SVC_I3C_MDYNADDR) !=
1716 master->saved_regs.mdynaddr) {
1717 writel(master->saved_regs.mconfig,
1718 master->regs + SVC_I3C_MCONFIG);
1719 writel(master->saved_regs.mdynaddr,
1720 master->regs + SVC_I3C_MDYNADDR);
1721 }
1722 }
1723
svc_i3c_runtime_suspend(struct device * dev)1724 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
1725 {
1726 struct svc_i3c_master *master = dev_get_drvdata(dev);
1727
1728 svc_i3c_save_regs(master);
1729 svc_i3c_master_unprepare_clks(master);
1730 pinctrl_pm_select_sleep_state(dev);
1731
1732 return 0;
1733 }
1734
svc_i3c_runtime_resume(struct device * dev)1735 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
1736 {
1737 struct svc_i3c_master *master = dev_get_drvdata(dev);
1738
1739 pinctrl_pm_select_default_state(dev);
1740 svc_i3c_master_prepare_clks(master);
1741
1742 svc_i3c_restore_regs(master);
1743
1744 return 0;
1745 }
1746
1747 static const struct dev_pm_ops svc_i3c_pm_ops = {
1748 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1749 pm_runtime_force_resume)
1750 SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
1751 svc_i3c_runtime_resume, NULL)
1752 };
1753
1754 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
1755 { .compatible = "silvaco,i3c-master" },
1756 { /* sentinel */ },
1757 };
1758 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
1759
1760 static struct platform_driver svc_i3c_master = {
1761 .probe = svc_i3c_master_probe,
1762 .remove_new = svc_i3c_master_remove,
1763 .driver = {
1764 .name = "silvaco-i3c-master",
1765 .of_match_table = svc_i3c_master_of_match_tbl,
1766 .pm = &svc_i3c_pm_ops,
1767 },
1768 };
1769 module_platform_driver(svc_i3c_master);
1770
1771 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
1772 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
1773 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
1774 MODULE_LICENSE("GPL v2");
1775