1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Silvaco dual-role I3C master driver
4 *
5 * Copyright (C) 2020 Silvaco
6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7 * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG 0x000
26 #define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
36
37 #define SVC_I3C_MCTRL 0x084
38 #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
39 #define SVC_I3C_MCTRL_REQUEST_NONE 0
40 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
41 #define SVC_I3C_MCTRL_REQUEST_STOP 2
42 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
43 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
44 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
45 #define SVC_I3C_MCTRL_TYPE_I3C 0
46 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
47 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0
48 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
49 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
50 #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
51 #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
52 #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
53 #define SVC_I3C_MCTRL_DIR_WRITE 0
54 #define SVC_I3C_MCTRL_DIR_READ 1
55 #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
56 #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
57
58 #define SVC_I3C_MSTATUS 0x088
59 #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
60 #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
61 #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
62 #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
63 #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
64 #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
65 #define SVC_I3C_MSTATUS_IBITYPE_IBI 1
66 #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
67 #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
68 #define SVC_I3C_MINT_SLVSTART BIT(8)
69 #define SVC_I3C_MINT_MCTRLDONE BIT(9)
70 #define SVC_I3C_MINT_COMPLETE BIT(10)
71 #define SVC_I3C_MINT_RXPEND BIT(11)
72 #define SVC_I3C_MINT_TXNOTFULL BIT(12)
73 #define SVC_I3C_MINT_IBIWON BIT(13)
74 #define SVC_I3C_MINT_ERRWARN BIT(15)
75 #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
76 #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
77 #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
78 #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
79 #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
80 #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
81 #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
82 #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
83
84 #define SVC_I3C_IBIRULES 0x08C
85 #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
86 ((addr) & 0x3F) << ((slot) * 6))
87 #define SVC_I3C_IBIRULES_ADDRS 5
88 #define SVC_I3C_IBIRULES_MSB0 BIT(30)
89 #define SVC_I3C_IBIRULES_NOBYTE BIT(31)
90 #define SVC_I3C_IBIRULES_MANDBYTE 0
91 #define SVC_I3C_MINTSET 0x090
92 #define SVC_I3C_MINTCLR 0x094
93 #define SVC_I3C_MINTMASKED 0x098
94 #define SVC_I3C_MERRWARN 0x09C
95 #define SVC_I3C_MERRWARN_NACK BIT(2)
96 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
97 #define SVC_I3C_MDMACTRL 0x0A0
98 #define SVC_I3C_MDATACTRL 0x0AC
99 #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
100 #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
101 #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
102 #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
103 #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
104 #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
105 #define SVC_I3C_MDATACTRL_TXFULL BIT(30)
106 #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
107
108 #define SVC_I3C_MWDATAB 0x0B0
109 #define SVC_I3C_MWDATAB_END BIT(8)
110
111 #define SVC_I3C_MWDATABE 0x0B4
112 #define SVC_I3C_MWDATAH 0x0B8
113 #define SVC_I3C_MWDATAHE 0x0BC
114 #define SVC_I3C_MRDATAB 0x0C0
115 #define SVC_I3C_MRDATAH 0x0C8
116 #define SVC_I3C_MWMSG_SDR 0x0D0
117 #define SVC_I3C_MRMSG_SDR 0x0D4
118 #define SVC_I3C_MWMSG_DDR 0x0D8
119 #define SVC_I3C_MRMSG_DDR 0x0DC
120
121 #define SVC_I3C_MDYNADDR 0x0E4
122 #define SVC_MDYNADDR_VALID BIT(0)
123 #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
124
125 #define SVC_I3C_MAX_DEVS 32
126 #define SVC_I3C_PM_TIMEOUT_MS 1000
127
128 /* This parameter depends on the implementation and may be tuned */
129 #define SVC_I3C_FIFO_SIZE 16
130
131 #define SVC_I3C_EVENT_IBI GENMASK(7, 0)
132 #define SVC_I3C_EVENT_HOTJOIN BIT(31)
133
134 struct svc_i3c_cmd {
135 u8 addr;
136 bool rnw;
137 u8 *in;
138 const void *out;
139 unsigned int len;
140 unsigned int actual_len;
141 struct i3c_priv_xfer *xfer;
142 bool continued;
143 };
144
145 struct svc_i3c_xfer {
146 struct list_head node;
147 struct completion comp;
148 int ret;
149 unsigned int type;
150 unsigned int ncmds;
151 struct svc_i3c_cmd cmds[];
152 };
153
154 struct svc_i3c_regs_save {
155 u32 mconfig;
156 u32 mdynaddr;
157 };
158
159 /**
160 * struct svc_i3c_master - Silvaco I3C Master structure
161 * @base: I3C master controller
162 * @dev: Corresponding device
163 * @regs: Memory mapping
164 * @saved_regs: Volatile values for PM operations
165 * @free_slots: Bit array of available slots
166 * @addrs: Array containing the dynamic addresses of each attached device
167 * @descs: Array of descriptors, one per attached device
168 * @hj_work: Hot-join work
169 * @ibi_work: IBI work
170 * @irq: Main interrupt
171 * @pclk: System clock
172 * @fclk: Fast clock (bus)
173 * @sclk: Slow clock (other events)
174 * @xferqueue: Transfer queue structure
175 * @xferqueue.list: List member
176 * @xferqueue.cur: Current ongoing transfer
177 * @xferqueue.lock: Queue lock
178 * @ibi: IBI structure
179 * @ibi.num_slots: Number of slots available in @ibi.slots
180 * @ibi.slots: Available IBI slots
181 * @ibi.tbq_slot: To be queued IBI slot
182 * @ibi.lock: IBI lock
183 * @lock: Transfer lock, protect between IBI work thread and callbacks from master
184 * @enabled_events: Bit masks for enable events (IBI, HotJoin).
185 * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
186 */
187 struct svc_i3c_master {
188 struct i3c_master_controller base;
189 struct device *dev;
190 void __iomem *regs;
191 struct svc_i3c_regs_save saved_regs;
192 u32 free_slots;
193 u8 addrs[SVC_I3C_MAX_DEVS];
194 struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
195 struct work_struct hj_work;
196 struct work_struct ibi_work;
197 int irq;
198 struct clk *pclk;
199 struct clk *fclk;
200 struct clk *sclk;
201 struct {
202 struct list_head list;
203 struct svc_i3c_xfer *cur;
204 /* Prevent races between transfers */
205 spinlock_t lock;
206 } xferqueue;
207 struct {
208 unsigned int num_slots;
209 struct i3c_dev_desc **slots;
210 struct i3c_ibi_slot *tbq_slot;
211 /* Prevent races within IBI handlers */
212 spinlock_t lock;
213 } ibi;
214 struct mutex lock;
215 u32 enabled_events;
216 u32 mctrl_config;
217 };
218
219 /**
220 * struct svc_i3c_i2c_dev_data - Device specific data
221 * @index: Index in the master tables corresponding to this device
222 * @ibi: IBI slot index in the master structure
223 * @ibi_pool: IBI pool associated to this device
224 */
225 struct svc_i3c_i2c_dev_data {
226 u8 index;
227 int ibi;
228 struct i3c_generic_ibi_pool *ibi_pool;
229 };
230
is_events_enabled(struct svc_i3c_master * master,u32 mask)231 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
232 {
233 return !!(master->enabled_events & mask);
234 }
235
svc_i3c_master_error(struct svc_i3c_master * master)236 static bool svc_i3c_master_error(struct svc_i3c_master *master)
237 {
238 u32 mstatus, merrwarn;
239
240 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
241 if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
242 merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
243 writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
244
245 /* Ignore timeout error */
246 if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
247 dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
248 mstatus, merrwarn);
249 return false;
250 }
251
252 dev_err(master->dev,
253 "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
254 mstatus, merrwarn);
255
256 return true;
257 }
258
259 return false;
260 }
261
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)262 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
263 {
264 writel(mask, master->regs + SVC_I3C_MINTSET);
265 }
266
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)267 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
268 {
269 u32 mask = readl(master->regs + SVC_I3C_MINTSET);
270
271 writel(mask, master->regs + SVC_I3C_MINTCLR);
272 }
273
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)274 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
275 {
276 /* Clear pending warnings */
277 writel(readl(master->regs + SVC_I3C_MERRWARN),
278 master->regs + SVC_I3C_MERRWARN);
279 }
280
svc_i3c_master_flush_fifo(struct svc_i3c_master * master)281 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
282 {
283 /* Flush FIFOs */
284 writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
285 master->regs + SVC_I3C_MDATACTRL);
286 }
287
svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master * master)288 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
289 {
290 u32 reg;
291
292 /* Set RX and TX tigger levels, flush FIFOs */
293 reg = SVC_I3C_MDATACTRL_FLUSHTB |
294 SVC_I3C_MDATACTRL_FLUSHRB |
295 SVC_I3C_MDATACTRL_UNLOCK_TRIG |
296 SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
297 SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
298 writel(reg, master->regs + SVC_I3C_MDATACTRL);
299 }
300
svc_i3c_master_reset(struct svc_i3c_master * master)301 static void svc_i3c_master_reset(struct svc_i3c_master *master)
302 {
303 svc_i3c_master_clear_merrwarn(master);
304 svc_i3c_master_reset_fifo_trigger(master);
305 svc_i3c_master_disable_interrupts(master);
306 }
307
308 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)309 to_svc_i3c_master(struct i3c_master_controller *master)
310 {
311 return container_of(master, struct svc_i3c_master, base);
312 }
313
svc_i3c_master_hj_work(struct work_struct * work)314 static void svc_i3c_master_hj_work(struct work_struct *work)
315 {
316 struct svc_i3c_master *master;
317
318 master = container_of(work, struct svc_i3c_master, hj_work);
319 i3c_master_do_daa(&master->base);
320 }
321
322 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)323 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
324 unsigned int ibiaddr)
325 {
326 int i;
327
328 for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
329 if (master->addrs[i] == ibiaddr)
330 break;
331
332 if (i == SVC_I3C_MAX_DEVS)
333 return NULL;
334
335 return master->descs[i];
336 }
337
svc_i3c_master_emit_stop(struct svc_i3c_master * master)338 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
339 {
340 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
341
342 /*
343 * This delay is necessary after the emission of a stop, otherwise eg.
344 * repeating IBIs do not get detected. There is a note in the manual
345 * about it, stating that the stop condition might not be settled
346 * correctly if a start condition follows too rapidly.
347 */
348 udelay(1);
349 }
350
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)351 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
352 struct i3c_dev_desc *dev)
353 {
354 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
355 struct i3c_ibi_slot *slot;
356 unsigned int count;
357 u32 mdatactrl;
358 int ret, val;
359 u8 *buf;
360
361 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
362 if (!slot)
363 return -ENOSPC;
364
365 slot->len = 0;
366 buf = slot->data;
367
368 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
369 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
370 if (ret) {
371 dev_err(master->dev, "Timeout when polling for COMPLETE\n");
372 return ret;
373 }
374
375 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
376 slot->len < SVC_I3C_FIFO_SIZE) {
377 mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
378 count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
379 readsb(master->regs + SVC_I3C_MRDATAB, buf, count);
380 slot->len += count;
381 buf += count;
382 }
383
384 master->ibi.tbq_slot = slot;
385
386 return 0;
387 }
388
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)389 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
390 bool mandatory_byte)
391 {
392 unsigned int ibi_ack_nack;
393
394 ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
395 if (mandatory_byte)
396 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
397 else
398 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
399
400 writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
401 }
402
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)403 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
404 {
405 writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
406 SVC_I3C_MCTRL_IBIRESP_NACK,
407 master->regs + SVC_I3C_MCTRL);
408 }
409
svc_i3c_master_ibi_work(struct work_struct * work)410 static void svc_i3c_master_ibi_work(struct work_struct *work)
411 {
412 struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
413 struct svc_i3c_i2c_dev_data *data;
414 unsigned int ibitype, ibiaddr;
415 struct i3c_dev_desc *dev;
416 u32 status, val;
417 int ret;
418
419 mutex_lock(&master->lock);
420 /*
421 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
422 * readl_relaxed_poll_timeout() to return immediately. Consequently,
423 * ibitype will be 0 since it was last updated only after the 8th SCL
424 * cycle, leading to missed client IBI handlers.
425 *
426 * A typical scenario is when IBIWON occurs and bus arbitration is lost
427 * at svc_i3c_master_priv_xfers().
428 *
429 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
430 */
431 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
432
433 /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
434 writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
435 SVC_I3C_MCTRL_IBIRESP_AUTO,
436 master->regs + SVC_I3C_MCTRL);
437
438 /* Wait for IBIWON, should take approximately 100us */
439 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
440 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
441 if (ret) {
442 dev_err(master->dev, "Timeout when polling for IBIWON\n");
443 svc_i3c_master_emit_stop(master);
444 goto reenable_ibis;
445 }
446
447 status = readl(master->regs + SVC_I3C_MSTATUS);
448 ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
449 ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
450
451 /* Handle the critical responses to IBI's */
452 switch (ibitype) {
453 case SVC_I3C_MSTATUS_IBITYPE_IBI:
454 dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
455 if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
456 svc_i3c_master_nack_ibi(master);
457 else
458 svc_i3c_master_handle_ibi(master, dev);
459 break;
460 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
461 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
462 svc_i3c_master_ack_ibi(master, false);
463 else
464 svc_i3c_master_nack_ibi(master);
465 break;
466 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
467 svc_i3c_master_nack_ibi(master);
468 break;
469 default:
470 break;
471 }
472
473 /*
474 * If an error happened, we probably got interrupted and the exchange
475 * timedout. In this case we just drop everything, emit a stop and wait
476 * for the slave to interrupt again.
477 */
478 if (svc_i3c_master_error(master)) {
479 if (master->ibi.tbq_slot) {
480 data = i3c_dev_get_master_data(dev);
481 i3c_generic_ibi_recycle_slot(data->ibi_pool,
482 master->ibi.tbq_slot);
483 master->ibi.tbq_slot = NULL;
484 }
485
486 svc_i3c_master_emit_stop(master);
487
488 goto reenable_ibis;
489 }
490
491 /* Handle the non critical tasks */
492 switch (ibitype) {
493 case SVC_I3C_MSTATUS_IBITYPE_IBI:
494 if (dev) {
495 i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
496 master->ibi.tbq_slot = NULL;
497 }
498 svc_i3c_master_emit_stop(master);
499 break;
500 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
501 svc_i3c_master_emit_stop(master);
502 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
503 queue_work(master->base.wq, &master->hj_work);
504 break;
505 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
506 svc_i3c_master_emit_stop(master);
507 break;
508 default:
509 break;
510 }
511
512 reenable_ibis:
513 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
514 mutex_unlock(&master->lock);
515 }
516
svc_i3c_master_irq_handler(int irq,void * dev_id)517 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
518 {
519 struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
520 u32 active = readl(master->regs + SVC_I3C_MSTATUS);
521
522 if (!SVC_I3C_MSTATUS_SLVSTART(active))
523 return IRQ_NONE;
524
525 /* Clear the interrupt status */
526 writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
527
528 svc_i3c_master_disable_interrupts(master);
529
530 /* Handle the interrupt in a non atomic context */
531 queue_work(master->base.wq, &master->ibi_work);
532
533 return IRQ_HANDLED;
534 }
535
svc_i3c_master_set_speed(struct i3c_master_controller * m,enum i3c_open_drain_speed speed)536 static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
537 enum i3c_open_drain_speed speed)
538 {
539 struct svc_i3c_master *master = to_svc_i3c_master(m);
540 struct i3c_bus *bus = i3c_master_get_bus(&master->base);
541 u32 ppbaud, odbaud, odhpp, mconfig;
542 unsigned long fclk_rate;
543 int ret;
544
545 ret = pm_runtime_resume_and_get(master->dev);
546 if (ret < 0) {
547 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
548 return ret;
549 }
550
551 switch (speed) {
552 case I3C_OPEN_DRAIN_SLOW_SPEED:
553 fclk_rate = clk_get_rate(master->fclk);
554 if (!fclk_rate) {
555 ret = -EINVAL;
556 goto rpm_out;
557 }
558 /*
559 * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
560 * broadcast address is visible to all I2C/I3C devices on the I3C bus.
561 * I3C device working as a I2C device will turn off its 50ns Spike
562 * Filter to change to I3C mode.
563 */
564 mconfig = master->mctrl_config;
565 ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
566 odhpp = 0;
567 odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
568 mconfig &= ~GENMASK(24, 16);
569 mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
570 writel(mconfig, master->regs + SVC_I3C_MCONFIG);
571 break;
572 case I3C_OPEN_DRAIN_NORMAL_SPEED:
573 writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
574 break;
575 }
576
577 rpm_out:
578 pm_runtime_mark_last_busy(master->dev);
579 pm_runtime_put_autosuspend(master->dev);
580
581 return ret;
582 }
583
svc_i3c_master_bus_init(struct i3c_master_controller * m)584 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
585 {
586 struct svc_i3c_master *master = to_svc_i3c_master(m);
587 struct i3c_bus *bus = i3c_master_get_bus(m);
588 struct i3c_device_info info = {};
589 unsigned long fclk_rate, fclk_period_ns;
590 unsigned int high_period_ns, od_low_period_ns;
591 u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
592 int ret;
593
594 ret = pm_runtime_resume_and_get(master->dev);
595 if (ret < 0) {
596 dev_err(master->dev,
597 "<%s> cannot resume i3c bus master, err: %d\n",
598 __func__, ret);
599 return ret;
600 }
601
602 /* Timings derivation */
603 fclk_rate = clk_get_rate(master->fclk);
604 if (!fclk_rate) {
605 ret = -EINVAL;
606 goto rpm_out;
607 }
608
609 fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
610
611 /*
612 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
613 * Simplest configuration is using a 50% duty-cycle of 40ns.
614 */
615 ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
616 pplow = 0;
617
618 /*
619 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
620 * duty-cycle tuned so that high levels are filetered out by
621 * the 50ns filter (target being 40ns).
622 */
623 odhpp = 1;
624 high_period_ns = (ppbaud + 1) * fclk_period_ns;
625 odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
626 od_low_period_ns = (odbaud + 1) * high_period_ns;
627
628 switch (bus->mode) {
629 case I3C_BUS_MODE_PURE:
630 i2cbaud = 0;
631 odstop = 0;
632 break;
633 case I3C_BUS_MODE_MIXED_FAST:
634 case I3C_BUS_MODE_MIXED_LIMITED:
635 /*
636 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
637 * between the high and low period does not really matter.
638 */
639 i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
640 odstop = 1;
641 break;
642 case I3C_BUS_MODE_MIXED_SLOW:
643 /*
644 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
645 * constraints as the FM+ mode.
646 */
647 i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
648 odstop = 1;
649 break;
650 default:
651 goto rpm_out;
652 }
653
654 reg = SVC_I3C_MCONFIG_MASTER_EN |
655 SVC_I3C_MCONFIG_DISTO(0) |
656 SVC_I3C_MCONFIG_HKEEP(0) |
657 SVC_I3C_MCONFIG_ODSTOP(odstop) |
658 SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
659 SVC_I3C_MCONFIG_PPLOW(pplow) |
660 SVC_I3C_MCONFIG_ODBAUD(odbaud) |
661 SVC_I3C_MCONFIG_ODHPP(odhpp) |
662 SVC_I3C_MCONFIG_SKEW(0) |
663 SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
664 writel(reg, master->regs + SVC_I3C_MCONFIG);
665
666 master->mctrl_config = reg;
667 /* Master core's registration */
668 ret = i3c_master_get_free_addr(m, 0);
669 if (ret < 0)
670 goto rpm_out;
671
672 info.dyn_addr = ret;
673
674 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
675 master->regs + SVC_I3C_MDYNADDR);
676
677 ret = i3c_master_set_info(&master->base, &info);
678 if (ret)
679 goto rpm_out;
680
681 rpm_out:
682 pm_runtime_mark_last_busy(master->dev);
683 pm_runtime_put_autosuspend(master->dev);
684
685 return ret;
686 }
687
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)688 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
689 {
690 struct svc_i3c_master *master = to_svc_i3c_master(m);
691 int ret;
692
693 ret = pm_runtime_resume_and_get(master->dev);
694 if (ret < 0) {
695 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
696 return;
697 }
698
699 svc_i3c_master_disable_interrupts(master);
700
701 /* Disable master */
702 writel(0, master->regs + SVC_I3C_MCONFIG);
703
704 pm_runtime_mark_last_busy(master->dev);
705 pm_runtime_put_autosuspend(master->dev);
706 }
707
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)708 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
709 {
710 unsigned int slot;
711
712 if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
713 return -ENOSPC;
714
715 slot = ffs(master->free_slots) - 1;
716
717 master->free_slots &= ~BIT(slot);
718
719 return slot;
720 }
721
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)722 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
723 unsigned int slot)
724 {
725 master->free_slots |= BIT(slot);
726 }
727
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)728 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
729 {
730 struct i3c_master_controller *m = i3c_dev_get_master(dev);
731 struct svc_i3c_master *master = to_svc_i3c_master(m);
732 struct svc_i3c_i2c_dev_data *data;
733 int slot;
734
735 slot = svc_i3c_master_reserve_slot(master);
736 if (slot < 0)
737 return slot;
738
739 data = kzalloc(sizeof(*data), GFP_KERNEL);
740 if (!data) {
741 svc_i3c_master_release_slot(master, slot);
742 return -ENOMEM;
743 }
744
745 data->ibi = -1;
746 data->index = slot;
747 master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
748 dev->info.static_addr;
749 master->descs[slot] = dev;
750
751 i3c_dev_set_master_data(dev, data);
752
753 return 0;
754 }
755
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)756 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
757 u8 old_dyn_addr)
758 {
759 struct i3c_master_controller *m = i3c_dev_get_master(dev);
760 struct svc_i3c_master *master = to_svc_i3c_master(m);
761 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
762
763 master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
764 dev->info.static_addr;
765
766 return 0;
767 }
768
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)769 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
770 {
771 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
772 struct i3c_master_controller *m = i3c_dev_get_master(dev);
773 struct svc_i3c_master *master = to_svc_i3c_master(m);
774
775 master->addrs[data->index] = 0;
776 svc_i3c_master_release_slot(master, data->index);
777
778 kfree(data);
779 }
780
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)781 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
782 {
783 struct i3c_master_controller *m = i2c_dev_get_master(dev);
784 struct svc_i3c_master *master = to_svc_i3c_master(m);
785 struct svc_i3c_i2c_dev_data *data;
786 int slot;
787
788 slot = svc_i3c_master_reserve_slot(master);
789 if (slot < 0)
790 return slot;
791
792 data = kzalloc(sizeof(*data), GFP_KERNEL);
793 if (!data) {
794 svc_i3c_master_release_slot(master, slot);
795 return -ENOMEM;
796 }
797
798 data->index = slot;
799 master->addrs[slot] = dev->addr;
800
801 i2c_dev_set_master_data(dev, data);
802
803 return 0;
804 }
805
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)806 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
807 {
808 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
809 struct i3c_master_controller *m = i2c_dev_get_master(dev);
810 struct svc_i3c_master *master = to_svc_i3c_master(m);
811
812 svc_i3c_master_release_slot(master, data->index);
813
814 kfree(data);
815 }
816
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)817 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
818 unsigned int len)
819 {
820 int ret, i;
821 u32 reg;
822
823 for (i = 0; i < len; i++) {
824 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
825 reg,
826 SVC_I3C_MSTATUS_RXPEND(reg),
827 0, 1000);
828 if (ret)
829 return ret;
830
831 dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
832 }
833
834 return 0;
835 }
836
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)837 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
838 u8 *addrs, unsigned int *count)
839 {
840 u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
841 unsigned int dev_nb = 0, last_addr = 0;
842 u32 reg;
843 int ret, i;
844
845 svc_i3c_master_flush_fifo(master);
846
847 while (true) {
848 /* Enter/proceed with DAA */
849 writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
850 SVC_I3C_MCTRL_TYPE_I3C |
851 SVC_I3C_MCTRL_IBIRESP_NACK |
852 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
853 master->regs + SVC_I3C_MCTRL);
854
855 /*
856 * Either one slave will send its ID, or the assignment process
857 * is done.
858 */
859 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
860 reg,
861 SVC_I3C_MSTATUS_RXPEND(reg) |
862 SVC_I3C_MSTATUS_MCTRLDONE(reg),
863 1, 1000);
864 if (ret)
865 return ret;
866
867 if (SVC_I3C_MSTATUS_RXPEND(reg)) {
868 u8 data[6];
869
870 /*
871 * We only care about the 48-bit provisional ID yet to
872 * be sure a device does not nack an address twice.
873 * Otherwise, we would just need to flush the RX FIFO.
874 */
875 ret = svc_i3c_master_readb(master, data, 6);
876 if (ret)
877 return ret;
878
879 for (i = 0; i < 6; i++)
880 prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
881
882 /* We do not care about the BCR and DCR yet */
883 ret = svc_i3c_master_readb(master, data, 2);
884 if (ret)
885 return ret;
886 } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
887 if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
888 SVC_I3C_MSTATUS_COMPLETE(reg)) {
889 /*
890 * All devices received and acked they dynamic
891 * address, this is the natural end of the DAA
892 * procedure.
893 */
894 break;
895 } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
896 /* No I3C devices attached */
897 if (dev_nb == 0)
898 break;
899
900 /*
901 * A slave device nacked the address, this is
902 * allowed only once, DAA will be stopped and
903 * then resumed. The same device is supposed to
904 * answer again immediately and shall ack the
905 * address this time.
906 */
907 if (prov_id[dev_nb] == nacking_prov_id)
908 return -EIO;
909
910 dev_nb--;
911 nacking_prov_id = prov_id[dev_nb];
912 svc_i3c_master_emit_stop(master);
913
914 continue;
915 } else {
916 return -EIO;
917 }
918 }
919
920 /* Wait for the slave to be ready to receive its address */
921 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
922 reg,
923 SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
924 SVC_I3C_MSTATUS_STATE_DAA(reg) &&
925 SVC_I3C_MSTATUS_BETWEEN(reg),
926 0, 1000);
927 if (ret)
928 return ret;
929
930 /* Give the slave device a suitable dynamic address */
931 ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
932 if (ret < 0)
933 return ret;
934
935 addrs[dev_nb] = ret;
936 dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
937 dev_nb, addrs[dev_nb]);
938
939 writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
940 last_addr = addrs[dev_nb++];
941 }
942
943 *count = dev_nb;
944
945 return 0;
946 }
947
svc_i3c_update_ibirules(struct svc_i3c_master * master)948 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
949 {
950 struct i3c_dev_desc *dev;
951 u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
952 unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
953 nobyte_addr_ko = 0;
954 bool list_mbyte = false, list_nobyte = false;
955
956 /* Create the IBIRULES register for both cases */
957 i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
958 if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP))
959 continue;
960
961 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
962 reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
963 dev->info.dyn_addr);
964
965 /* IBI rules cannot be applied to devices with MSb=1 */
966 if (dev->info.dyn_addr & BIT(7))
967 mbyte_addr_ko++;
968 else
969 mbyte_addr_ok++;
970 } else {
971 reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
972 dev->info.dyn_addr);
973
974 /* IBI rules cannot be applied to devices with MSb=1 */
975 if (dev->info.dyn_addr & BIT(7))
976 nobyte_addr_ko++;
977 else
978 nobyte_addr_ok++;
979 }
980 }
981
982 /* Device list cannot be handled by hardware */
983 if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
984 list_mbyte = true;
985
986 if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
987 list_nobyte = true;
988
989 /* No list can be properly handled, return an error */
990 if (!list_mbyte && !list_nobyte)
991 return -ERANGE;
992
993 /* Pick the first list that can be handled by hardware, randomly */
994 if (list_mbyte)
995 writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
996 else
997 writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
998
999 return 0;
1000 }
1001
svc_i3c_master_do_daa(struct i3c_master_controller * m)1002 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
1003 {
1004 struct svc_i3c_master *master = to_svc_i3c_master(m);
1005 u8 addrs[SVC_I3C_MAX_DEVS];
1006 unsigned long flags;
1007 unsigned int dev_nb;
1008 int ret, i;
1009
1010 ret = pm_runtime_resume_and_get(master->dev);
1011 if (ret < 0) {
1012 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1013 return ret;
1014 }
1015
1016 spin_lock_irqsave(&master->xferqueue.lock, flags);
1017 ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
1018 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1019 if (ret) {
1020 svc_i3c_master_emit_stop(master);
1021 svc_i3c_master_clear_merrwarn(master);
1022 goto rpm_out;
1023 }
1024
1025 /*
1026 * Register all devices who participated to the core
1027 *
1028 * If two devices (A and B) are detected in DAA and address 0xa is assigned to
1029 * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked()
1030 * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being
1031 * registered on the bus. The I3C stack might still consider 0xb a free
1032 * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A,
1033 * causing both devices A and B to use the same address 0xb, violating the I3C
1034 * specification.
1035 *
1036 * The return value for i3c_master_add_i3c_dev_locked() should not be checked
1037 * because subsequent steps will scan the entire I3C bus, independent of
1038 * whether i3c_master_add_i3c_dev_locked() returns success.
1039 *
1040 * If device A registration fails, there is still a chance to register device
1041 * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while
1042 * retrieving device information.
1043 */
1044 for (i = 0; i < dev_nb; i++)
1045 i3c_master_add_i3c_dev_locked(m, addrs[i]);
1046
1047 /* Configure IBI auto-rules */
1048 ret = svc_i3c_update_ibirules(master);
1049 if (ret)
1050 dev_err(master->dev, "Cannot handle such a list of devices");
1051
1052 rpm_out:
1053 pm_runtime_mark_last_busy(master->dev);
1054 pm_runtime_put_autosuspend(master->dev);
1055
1056 return ret;
1057 }
1058
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)1059 static int svc_i3c_master_read(struct svc_i3c_master *master,
1060 u8 *in, unsigned int len)
1061 {
1062 int offset = 0, i;
1063 u32 mdctrl, mstatus;
1064 bool completed = false;
1065 unsigned int count;
1066 unsigned long start = jiffies;
1067
1068 while (!completed) {
1069 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
1070 if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
1071 completed = true;
1072
1073 if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
1074 dev_dbg(master->dev, "I3C read timeout\n");
1075 return -ETIMEDOUT;
1076 }
1077
1078 mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
1079 count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
1080 if (offset + count > len) {
1081 dev_err(master->dev, "I3C receive length too long!\n");
1082 return -EINVAL;
1083 }
1084 for (i = 0; i < count; i++)
1085 in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1086
1087 offset += count;
1088 }
1089
1090 return offset;
1091 }
1092
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)1093 static int svc_i3c_master_write(struct svc_i3c_master *master,
1094 const u8 *out, unsigned int len)
1095 {
1096 int offset = 0, ret;
1097 u32 mdctrl;
1098
1099 while (offset < len) {
1100 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1101 mdctrl,
1102 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1103 0, 1000);
1104 if (ret)
1105 return ret;
1106
1107 /*
1108 * The last byte to be sent over the bus must either have the
1109 * "end" bit set or be written in MWDATABE.
1110 */
1111 if (likely(offset < (len - 1)))
1112 writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1113 else
1114 writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1115 }
1116
1117 return 0;
1118 }
1119
svc_i3c_master_xfer(struct svc_i3c_master * master,bool rnw,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int * actual_len,bool continued)1120 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1121 bool rnw, unsigned int xfer_type, u8 addr,
1122 u8 *in, const u8 *out, unsigned int xfer_len,
1123 unsigned int *actual_len, bool continued)
1124 {
1125 int retry = 2;
1126 u32 reg;
1127 int ret;
1128
1129 /* clean SVC_I3C_MINT_IBIWON w1c bits */
1130 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1131
1132
1133 while (retry--) {
1134 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1135 xfer_type |
1136 SVC_I3C_MCTRL_IBIRESP_NACK |
1137 SVC_I3C_MCTRL_DIR(rnw) |
1138 SVC_I3C_MCTRL_ADDR(addr) |
1139 SVC_I3C_MCTRL_RDTERM(*actual_len),
1140 master->regs + SVC_I3C_MCTRL);
1141
1142 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1143 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1144 if (ret)
1145 goto emit_stop;
1146
1147 if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1148 /*
1149 * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
1150 * If the Controller chooses to start an I3C Message with an I3C Dynamic
1151 * Address, then special provisions shall be made because that same I3C
1152 * Target may be initiating an IBI or a Controller Role Request. So, one of
1153 * three things may happen: (skip 1, 2)
1154 *
1155 * 3. The Addresses match and the RnW bits also match, and so neither
1156 * Controller nor Target will ACK since both are expecting the other side to
1157 * provide ACK. As a result, each side might think it had "won" arbitration,
1158 * but neither side would continue, as each would subsequently see that the
1159 * other did not provide ACK.
1160 * ...
1161 * For either value of RnW: Due to the NACK, the Controller shall defer the
1162 * Private Write or Private Read, and should typically transmit the Target
1163 * Address again after a Repeated START (i.e., the next one or any one prior
1164 * to a STOP in the Frame). Since the Address Header following a Repeated
1165 * START is not arbitrated, the Controller will always win (see Section
1166 * 5.1.2.2.4).
1167 */
1168 if (retry && addr != 0x7e) {
1169 writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
1170 } else {
1171 ret = -ENXIO;
1172 *actual_len = 0;
1173 goto emit_stop;
1174 }
1175 } else {
1176 break;
1177 }
1178 }
1179
1180 /*
1181 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
1182 * with I3C Target Address.
1183 *
1184 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
1185 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
1186 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
1187 * a Hot-Join Request has been made.
1188 *
1189 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
1190 * and yield the above events handler.
1191 */
1192 if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1193 ret = -EAGAIN;
1194 *actual_len = 0;
1195 goto emit_stop;
1196 }
1197
1198 if (rnw)
1199 ret = svc_i3c_master_read(master, in, xfer_len);
1200 else
1201 ret = svc_i3c_master_write(master, out, xfer_len);
1202 if (ret < 0)
1203 goto emit_stop;
1204
1205 if (rnw)
1206 *actual_len = ret;
1207
1208 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1209 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1210 if (ret)
1211 goto emit_stop;
1212
1213 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1214
1215 if (!continued) {
1216 svc_i3c_master_emit_stop(master);
1217
1218 /* Wait idle if stop is sent. */
1219 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1220 SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1221 }
1222
1223 return 0;
1224
1225 emit_stop:
1226 svc_i3c_master_emit_stop(master);
1227 svc_i3c_master_clear_merrwarn(master);
1228
1229 return ret;
1230 }
1231
1232 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1233 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1234 {
1235 struct svc_i3c_xfer *xfer;
1236
1237 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1238 if (!xfer)
1239 return NULL;
1240
1241 INIT_LIST_HEAD(&xfer->node);
1242 xfer->ncmds = ncmds;
1243 xfer->ret = -ETIMEDOUT;
1244
1245 return xfer;
1246 }
1247
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1248 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1249 {
1250 kfree(xfer);
1251 }
1252
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1253 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1254 struct svc_i3c_xfer *xfer)
1255 {
1256 if (master->xferqueue.cur == xfer)
1257 master->xferqueue.cur = NULL;
1258 else
1259 list_del_init(&xfer->node);
1260 }
1261
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1262 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1263 struct svc_i3c_xfer *xfer)
1264 {
1265 unsigned long flags;
1266
1267 spin_lock_irqsave(&master->xferqueue.lock, flags);
1268 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1269 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1270 }
1271
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1272 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1273 {
1274 struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1275 int ret, i;
1276
1277 if (!xfer)
1278 return;
1279
1280 svc_i3c_master_clear_merrwarn(master);
1281 svc_i3c_master_flush_fifo(master);
1282
1283 for (i = 0; i < xfer->ncmds; i++) {
1284 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1285
1286 ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1287 cmd->addr, cmd->in, cmd->out,
1288 cmd->len, &cmd->actual_len,
1289 cmd->continued);
1290 /* cmd->xfer is NULL if I2C or CCC transfer */
1291 if (cmd->xfer)
1292 cmd->xfer->actual_len = cmd->actual_len;
1293
1294 if (ret)
1295 break;
1296 }
1297
1298 xfer->ret = ret;
1299 complete(&xfer->comp);
1300
1301 if (ret < 0)
1302 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1303
1304 xfer = list_first_entry_or_null(&master->xferqueue.list,
1305 struct svc_i3c_xfer,
1306 node);
1307 if (xfer)
1308 list_del_init(&xfer->node);
1309
1310 master->xferqueue.cur = xfer;
1311 svc_i3c_master_start_xfer_locked(master);
1312 }
1313
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1314 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1315 struct svc_i3c_xfer *xfer)
1316 {
1317 unsigned long flags;
1318 int ret;
1319
1320 ret = pm_runtime_resume_and_get(master->dev);
1321 if (ret < 0) {
1322 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1323 return;
1324 }
1325
1326 init_completion(&xfer->comp);
1327 spin_lock_irqsave(&master->xferqueue.lock, flags);
1328 if (master->xferqueue.cur) {
1329 list_add_tail(&xfer->node, &master->xferqueue.list);
1330 } else {
1331 master->xferqueue.cur = xfer;
1332 svc_i3c_master_start_xfer_locked(master);
1333 }
1334 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1335
1336 pm_runtime_mark_last_busy(master->dev);
1337 pm_runtime_put_autosuspend(master->dev);
1338 }
1339
1340 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1341 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1342 const struct i3c_ccc_cmd *cmd)
1343 {
1344 /* No software support for CCC commands targeting more than one slave */
1345 return (cmd->ndests == 1);
1346 }
1347
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1348 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1349 struct i3c_ccc_cmd *ccc)
1350 {
1351 unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1352 struct svc_i3c_xfer *xfer;
1353 struct svc_i3c_cmd *cmd;
1354 u8 *buf;
1355 int ret;
1356
1357 xfer = svc_i3c_master_alloc_xfer(master, 1);
1358 if (!xfer)
1359 return -ENOMEM;
1360
1361 buf = kmalloc(xfer_len, GFP_KERNEL);
1362 if (!buf) {
1363 svc_i3c_master_free_xfer(xfer);
1364 return -ENOMEM;
1365 }
1366
1367 buf[0] = ccc->id;
1368 memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1369
1370 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1371
1372 cmd = &xfer->cmds[0];
1373 cmd->addr = ccc->dests[0].addr;
1374 cmd->rnw = ccc->rnw;
1375 cmd->in = NULL;
1376 cmd->out = buf;
1377 cmd->len = xfer_len;
1378 cmd->actual_len = 0;
1379 cmd->continued = false;
1380
1381 mutex_lock(&master->lock);
1382 svc_i3c_master_enqueue_xfer(master, xfer);
1383 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1384 svc_i3c_master_dequeue_xfer(master, xfer);
1385 mutex_unlock(&master->lock);
1386
1387 ret = xfer->ret;
1388 kfree(buf);
1389 svc_i3c_master_free_xfer(xfer);
1390
1391 return ret;
1392 }
1393
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1394 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1395 struct i3c_ccc_cmd *ccc)
1396 {
1397 unsigned int xfer_len = ccc->dests[0].payload.len;
1398 unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1399 struct svc_i3c_xfer *xfer;
1400 struct svc_i3c_cmd *cmd;
1401 int ret;
1402
1403 xfer = svc_i3c_master_alloc_xfer(master, 2);
1404 if (!xfer)
1405 return -ENOMEM;
1406
1407 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1408
1409 /* Broadcasted message */
1410 cmd = &xfer->cmds[0];
1411 cmd->addr = I3C_BROADCAST_ADDR;
1412 cmd->rnw = 0;
1413 cmd->in = NULL;
1414 cmd->out = &ccc->id;
1415 cmd->len = 1;
1416 cmd->actual_len = 0;
1417 cmd->continued = true;
1418
1419 /* Directed message */
1420 cmd = &xfer->cmds[1];
1421 cmd->addr = ccc->dests[0].addr;
1422 cmd->rnw = ccc->rnw;
1423 cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1424 cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
1425 cmd->len = xfer_len;
1426 cmd->actual_len = actual_len;
1427 cmd->continued = false;
1428
1429 mutex_lock(&master->lock);
1430 svc_i3c_master_enqueue_xfer(master, xfer);
1431 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1432 svc_i3c_master_dequeue_xfer(master, xfer);
1433 mutex_unlock(&master->lock);
1434
1435 if (cmd->actual_len != xfer_len)
1436 ccc->dests[0].payload.len = cmd->actual_len;
1437
1438 ret = xfer->ret;
1439 svc_i3c_master_free_xfer(xfer);
1440
1441 return ret;
1442 }
1443
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1444 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1445 struct i3c_ccc_cmd *cmd)
1446 {
1447 struct svc_i3c_master *master = to_svc_i3c_master(m);
1448 bool broadcast = cmd->id < 0x80;
1449 int ret;
1450
1451 if (broadcast)
1452 ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1453 else
1454 ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1455
1456 if (ret)
1457 cmd->err = I3C_ERROR_M2;
1458
1459 return ret;
1460 }
1461
svc_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)1462 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1463 struct i3c_priv_xfer *xfers,
1464 int nxfers)
1465 {
1466 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1467 struct svc_i3c_master *master = to_svc_i3c_master(m);
1468 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1469 struct svc_i3c_xfer *xfer;
1470 int ret, i;
1471
1472 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1473 if (!xfer)
1474 return -ENOMEM;
1475
1476 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1477
1478 for (i = 0; i < nxfers; i++) {
1479 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1480
1481 cmd->xfer = &xfers[i];
1482 cmd->addr = master->addrs[data->index];
1483 cmd->rnw = xfers[i].rnw;
1484 cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1485 cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1486 cmd->len = xfers[i].len;
1487 cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
1488 cmd->continued = (i + 1) < nxfers;
1489 }
1490
1491 mutex_lock(&master->lock);
1492 svc_i3c_master_enqueue_xfer(master, xfer);
1493 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1494 svc_i3c_master_dequeue_xfer(master, xfer);
1495 mutex_unlock(&master->lock);
1496
1497 ret = xfer->ret;
1498 svc_i3c_master_free_xfer(xfer);
1499
1500 return ret;
1501 }
1502
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,const struct i2c_msg * xfers,int nxfers)1503 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1504 const struct i2c_msg *xfers,
1505 int nxfers)
1506 {
1507 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1508 struct svc_i3c_master *master = to_svc_i3c_master(m);
1509 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1510 struct svc_i3c_xfer *xfer;
1511 int ret, i;
1512
1513 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1514 if (!xfer)
1515 return -ENOMEM;
1516
1517 xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1518
1519 for (i = 0; i < nxfers; i++) {
1520 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1521
1522 cmd->addr = master->addrs[data->index];
1523 cmd->rnw = xfers[i].flags & I2C_M_RD;
1524 cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1525 cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1526 cmd->len = xfers[i].len;
1527 cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1528 cmd->continued = (i + 1 < nxfers);
1529 }
1530
1531 mutex_lock(&master->lock);
1532 svc_i3c_master_enqueue_xfer(master, xfer);
1533 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1534 svc_i3c_master_dequeue_xfer(master, xfer);
1535 mutex_unlock(&master->lock);
1536
1537 ret = xfer->ret;
1538 svc_i3c_master_free_xfer(xfer);
1539
1540 return ret;
1541 }
1542
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1543 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1544 const struct i3c_ibi_setup *req)
1545 {
1546 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1547 struct svc_i3c_master *master = to_svc_i3c_master(m);
1548 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1549 unsigned long flags;
1550 unsigned int i;
1551
1552 if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1553 dev_err(master->dev, "IBI max payload %d should be < %d\n",
1554 dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1555 return -ERANGE;
1556 }
1557
1558 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1559 if (IS_ERR(data->ibi_pool))
1560 return PTR_ERR(data->ibi_pool);
1561
1562 spin_lock_irqsave(&master->ibi.lock, flags);
1563 for (i = 0; i < master->ibi.num_slots; i++) {
1564 if (!master->ibi.slots[i]) {
1565 data->ibi = i;
1566 master->ibi.slots[i] = dev;
1567 break;
1568 }
1569 }
1570 spin_unlock_irqrestore(&master->ibi.lock, flags);
1571
1572 if (i < master->ibi.num_slots)
1573 return 0;
1574
1575 i3c_generic_ibi_free_pool(data->ibi_pool);
1576 data->ibi_pool = NULL;
1577
1578 return -ENOSPC;
1579 }
1580
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1581 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1582 {
1583 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1584 struct svc_i3c_master *master = to_svc_i3c_master(m);
1585 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1586 unsigned long flags;
1587
1588 spin_lock_irqsave(&master->ibi.lock, flags);
1589 master->ibi.slots[data->ibi] = NULL;
1590 data->ibi = -1;
1591 spin_unlock_irqrestore(&master->ibi.lock, flags);
1592
1593 i3c_generic_ibi_free_pool(data->ibi_pool);
1594 }
1595
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1596 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1597 {
1598 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1599 struct svc_i3c_master *master = to_svc_i3c_master(m);
1600 int ret;
1601
1602 ret = pm_runtime_resume_and_get(master->dev);
1603 if (ret < 0) {
1604 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1605 return ret;
1606 }
1607
1608 master->enabled_events++;
1609 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1610
1611 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1612 }
1613
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1614 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1615 {
1616 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1617 struct svc_i3c_master *master = to_svc_i3c_master(m);
1618 int ret;
1619
1620 master->enabled_events--;
1621 if (!master->enabled_events)
1622 svc_i3c_master_disable_interrupts(master);
1623
1624 ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1625
1626 pm_runtime_mark_last_busy(master->dev);
1627 pm_runtime_put_autosuspend(master->dev);
1628
1629 return ret;
1630 }
1631
svc_i3c_master_enable_hotjoin(struct i3c_master_controller * m)1632 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
1633 {
1634 struct svc_i3c_master *master = to_svc_i3c_master(m);
1635 int ret;
1636
1637 ret = pm_runtime_resume_and_get(master->dev);
1638 if (ret < 0) {
1639 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1640 return ret;
1641 }
1642
1643 master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
1644
1645 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1646
1647 return 0;
1648 }
1649
svc_i3c_master_disable_hotjoin(struct i3c_master_controller * m)1650 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
1651 {
1652 struct svc_i3c_master *master = to_svc_i3c_master(m);
1653
1654 master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
1655
1656 if (!master->enabled_events)
1657 svc_i3c_master_disable_interrupts(master);
1658
1659 pm_runtime_mark_last_busy(master->dev);
1660 pm_runtime_put_autosuspend(master->dev);
1661
1662 return 0;
1663 }
1664
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1665 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1666 struct i3c_ibi_slot *slot)
1667 {
1668 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1669
1670 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1671 }
1672
1673 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1674 .bus_init = svc_i3c_master_bus_init,
1675 .bus_cleanup = svc_i3c_master_bus_cleanup,
1676 .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1677 .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1678 .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1679 .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1680 .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1681 .do_daa = svc_i3c_master_do_daa,
1682 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1683 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1684 .priv_xfers = svc_i3c_master_priv_xfers,
1685 .i2c_xfers = svc_i3c_master_i2c_xfers,
1686 .request_ibi = svc_i3c_master_request_ibi,
1687 .free_ibi = svc_i3c_master_free_ibi,
1688 .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1689 .enable_ibi = svc_i3c_master_enable_ibi,
1690 .disable_ibi = svc_i3c_master_disable_ibi,
1691 .enable_hotjoin = svc_i3c_master_enable_hotjoin,
1692 .disable_hotjoin = svc_i3c_master_disable_hotjoin,
1693 .set_speed = svc_i3c_master_set_speed,
1694 };
1695
svc_i3c_master_prepare_clks(struct svc_i3c_master * master)1696 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
1697 {
1698 int ret = 0;
1699
1700 ret = clk_prepare_enable(master->pclk);
1701 if (ret)
1702 return ret;
1703
1704 ret = clk_prepare_enable(master->fclk);
1705 if (ret) {
1706 clk_disable_unprepare(master->pclk);
1707 return ret;
1708 }
1709
1710 ret = clk_prepare_enable(master->sclk);
1711 if (ret) {
1712 clk_disable_unprepare(master->pclk);
1713 clk_disable_unprepare(master->fclk);
1714 return ret;
1715 }
1716
1717 return 0;
1718 }
1719
svc_i3c_master_unprepare_clks(struct svc_i3c_master * master)1720 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
1721 {
1722 clk_disable_unprepare(master->pclk);
1723 clk_disable_unprepare(master->fclk);
1724 clk_disable_unprepare(master->sclk);
1725 }
1726
svc_i3c_master_probe(struct platform_device * pdev)1727 static int svc_i3c_master_probe(struct platform_device *pdev)
1728 {
1729 struct device *dev = &pdev->dev;
1730 struct svc_i3c_master *master;
1731 int ret;
1732
1733 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1734 if (!master)
1735 return -ENOMEM;
1736
1737 master->regs = devm_platform_ioremap_resource(pdev, 0);
1738 if (IS_ERR(master->regs))
1739 return PTR_ERR(master->regs);
1740
1741 master->pclk = devm_clk_get(dev, "pclk");
1742 if (IS_ERR(master->pclk))
1743 return PTR_ERR(master->pclk);
1744
1745 master->fclk = devm_clk_get(dev, "fast_clk");
1746 if (IS_ERR(master->fclk))
1747 return PTR_ERR(master->fclk);
1748
1749 master->sclk = devm_clk_get(dev, "slow_clk");
1750 if (IS_ERR(master->sclk))
1751 return PTR_ERR(master->sclk);
1752
1753 master->irq = platform_get_irq(pdev, 0);
1754 if (master->irq < 0)
1755 return master->irq;
1756
1757 master->dev = dev;
1758
1759 ret = svc_i3c_master_prepare_clks(master);
1760 if (ret)
1761 return ret;
1762
1763 INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1764 INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
1765 mutex_init(&master->lock);
1766
1767 ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1768 IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1769 if (ret)
1770 goto err_disable_clks;
1771
1772 master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1773
1774 spin_lock_init(&master->xferqueue.lock);
1775 INIT_LIST_HEAD(&master->xferqueue.list);
1776
1777 spin_lock_init(&master->ibi.lock);
1778 master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1779 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1780 sizeof(*master->ibi.slots),
1781 GFP_KERNEL);
1782 if (!master->ibi.slots) {
1783 ret = -ENOMEM;
1784 goto err_disable_clks;
1785 }
1786
1787 platform_set_drvdata(pdev, master);
1788
1789 pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1790 pm_runtime_use_autosuspend(&pdev->dev);
1791 pm_runtime_get_noresume(&pdev->dev);
1792 pm_runtime_set_active(&pdev->dev);
1793 pm_runtime_enable(&pdev->dev);
1794
1795 svc_i3c_master_reset(master);
1796
1797 /* Register the master */
1798 ret = i3c_master_register(&master->base, &pdev->dev,
1799 &svc_i3c_master_ops, false);
1800 if (ret)
1801 goto rpm_disable;
1802
1803 pm_runtime_mark_last_busy(&pdev->dev);
1804 pm_runtime_put_autosuspend(&pdev->dev);
1805
1806 return 0;
1807
1808 rpm_disable:
1809 pm_runtime_dont_use_autosuspend(&pdev->dev);
1810 pm_runtime_put_noidle(&pdev->dev);
1811 pm_runtime_disable(&pdev->dev);
1812 pm_runtime_set_suspended(&pdev->dev);
1813
1814 err_disable_clks:
1815 svc_i3c_master_unprepare_clks(master);
1816
1817 return ret;
1818 }
1819
svc_i3c_master_remove(struct platform_device * pdev)1820 static void svc_i3c_master_remove(struct platform_device *pdev)
1821 {
1822 struct svc_i3c_master *master = platform_get_drvdata(pdev);
1823
1824 cancel_work_sync(&master->hj_work);
1825 i3c_master_unregister(&master->base);
1826
1827 pm_runtime_dont_use_autosuspend(&pdev->dev);
1828 pm_runtime_disable(&pdev->dev);
1829 }
1830
svc_i3c_save_regs(struct svc_i3c_master * master)1831 static void svc_i3c_save_regs(struct svc_i3c_master *master)
1832 {
1833 master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
1834 master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
1835 }
1836
svc_i3c_restore_regs(struct svc_i3c_master * master)1837 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
1838 {
1839 if (readl(master->regs + SVC_I3C_MDYNADDR) !=
1840 master->saved_regs.mdynaddr) {
1841 writel(master->saved_regs.mconfig,
1842 master->regs + SVC_I3C_MCONFIG);
1843 writel(master->saved_regs.mdynaddr,
1844 master->regs + SVC_I3C_MDYNADDR);
1845 }
1846 }
1847
svc_i3c_runtime_suspend(struct device * dev)1848 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
1849 {
1850 struct svc_i3c_master *master = dev_get_drvdata(dev);
1851
1852 svc_i3c_save_regs(master);
1853 svc_i3c_master_unprepare_clks(master);
1854 pinctrl_pm_select_sleep_state(dev);
1855
1856 return 0;
1857 }
1858
svc_i3c_runtime_resume(struct device * dev)1859 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
1860 {
1861 struct svc_i3c_master *master = dev_get_drvdata(dev);
1862
1863 pinctrl_pm_select_default_state(dev);
1864 svc_i3c_master_prepare_clks(master);
1865
1866 svc_i3c_restore_regs(master);
1867
1868 return 0;
1869 }
1870
1871 static const struct dev_pm_ops svc_i3c_pm_ops = {
1872 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1873 pm_runtime_force_resume)
1874 SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
1875 svc_i3c_runtime_resume, NULL)
1876 };
1877
1878 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
1879 { .compatible = "silvaco,i3c-master" },
1880 { /* sentinel */ },
1881 };
1882 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
1883
1884 static struct platform_driver svc_i3c_master = {
1885 .probe = svc_i3c_master_probe,
1886 .remove_new = svc_i3c_master_remove,
1887 .driver = {
1888 .name = "silvaco-i3c-master",
1889 .of_match_table = svc_i3c_master_of_match_tbl,
1890 .pm = &svc_i3c_pm_ops,
1891 },
1892 };
1893 module_platform_driver(svc_i3c_master);
1894
1895 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
1896 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
1897 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
1898 MODULE_LICENSE("GPL v2");
1899