1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Silvaco dual-role I3C master driver 4 * 5 * Copyright (C) 2020 Silvaco 6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com> 7 * Based on a work from: Conor Culhane <conor.culhane@silvaco.com> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/clk.h> 12 #include <linux/completion.h> 13 #include <linux/errno.h> 14 #include <linux/i3c/master.h> 15 #include <linux/interrupt.h> 16 #include <linux/iopoll.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/pinctrl/consumer.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 24 /* Master Mode Registers */ 25 #define SVC_I3C_MCONFIG 0x000 26 #define SVC_I3C_MCONFIG_MASTER_EN BIT(0) 27 #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x)) 28 #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x)) 29 #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x)) 30 #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x)) 31 #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x)) 32 #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x)) 33 #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x)) 34 #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x)) 35 #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x)) 36 37 #define SVC_I3C_MCTRL 0x084 38 #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0) 39 #define SVC_I3C_MCTRL_REQUEST_NONE 0 40 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1 41 #define SVC_I3C_MCTRL_REQUEST_STOP 2 42 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3 43 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4 44 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7 45 #define SVC_I3C_MCTRL_TYPE_I3C 0 46 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4) 47 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0 48 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0 49 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7) 50 #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6) 51 #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6) 52 #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x)) 53 #define SVC_I3C_MCTRL_DIR_WRITE 0 54 #define SVC_I3C_MCTRL_DIR_READ 1 55 #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x)) 56 #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x)) 57 58 #define SVC_I3C_MSTATUS 0x088 59 #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x)) 60 #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5) 61 #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0) 62 #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x)) 63 #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x)) 64 #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x)) 65 #define SVC_I3C_MSTATUS_IBITYPE_IBI 1 66 #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2 67 #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3 68 #define SVC_I3C_MINT_SLVSTART BIT(8) 69 #define SVC_I3C_MINT_MCTRLDONE BIT(9) 70 #define SVC_I3C_MINT_COMPLETE BIT(10) 71 #define SVC_I3C_MINT_RXPEND BIT(11) 72 #define SVC_I3C_MINT_TXNOTFULL BIT(12) 73 #define SVC_I3C_MINT_IBIWON BIT(13) 74 #define SVC_I3C_MINT_ERRWARN BIT(15) 75 #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x)) 76 #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x)) 77 #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x)) 78 #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x)) 79 #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x)) 80 #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x)) 81 #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x)) 82 #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x)) 83 84 #define SVC_I3C_IBIRULES 0x08C 85 #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \ 86 ((addr) & 0x3F) << ((slot) * 6)) 87 #define SVC_I3C_IBIRULES_ADDRS 5 88 #define SVC_I3C_IBIRULES_MSB0 BIT(30) 89 #define SVC_I3C_IBIRULES_NOBYTE BIT(31) 90 #define SVC_I3C_IBIRULES_MANDBYTE 0 91 #define SVC_I3C_MINTSET 0x090 92 #define SVC_I3C_MINTCLR 0x094 93 #define SVC_I3C_MINTMASKED 0x098 94 #define SVC_I3C_MERRWARN 0x09C 95 #define SVC_I3C_MERRWARN_NACK BIT(2) 96 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20) 97 #define SVC_I3C_MDMACTRL 0x0A0 98 #define SVC_I3C_MDATACTRL 0x0AC 99 #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0) 100 #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1) 101 #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3) 102 #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4) 103 #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0 104 #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x)) 105 #define SVC_I3C_MDATACTRL_TXFULL BIT(30) 106 #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31) 107 108 #define SVC_I3C_MWDATAB 0x0B0 109 #define SVC_I3C_MWDATAB_END BIT(8) 110 111 #define SVC_I3C_MWDATABE 0x0B4 112 #define SVC_I3C_MWDATAH 0x0B8 113 #define SVC_I3C_MWDATAHE 0x0BC 114 #define SVC_I3C_MRDATAB 0x0C0 115 #define SVC_I3C_MRDATAH 0x0C8 116 #define SVC_I3C_MWMSG_SDR 0x0D0 117 #define SVC_I3C_MRMSG_SDR 0x0D4 118 #define SVC_I3C_MWMSG_DDR 0x0D8 119 #define SVC_I3C_MRMSG_DDR 0x0DC 120 121 #define SVC_I3C_MDYNADDR 0x0E4 122 #define SVC_MDYNADDR_VALID BIT(0) 123 #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x)) 124 125 #define SVC_I3C_MAX_DEVS 32 126 #define SVC_I3C_PM_TIMEOUT_MS 1000 127 128 /* This parameter depends on the implementation and may be tuned */ 129 #define SVC_I3C_FIFO_SIZE 16 130 131 #define SVC_I3C_EVENT_IBI GENMASK(7, 0) 132 #define SVC_I3C_EVENT_HOTJOIN BIT(31) 133 134 struct svc_i3c_cmd { 135 u8 addr; 136 bool rnw; 137 u8 *in; 138 const void *out; 139 unsigned int len; 140 unsigned int actual_len; 141 struct i3c_priv_xfer *xfer; 142 bool continued; 143 }; 144 145 struct svc_i3c_xfer { 146 struct list_head node; 147 struct completion comp; 148 int ret; 149 unsigned int type; 150 unsigned int ncmds; 151 struct svc_i3c_cmd cmds[]; 152 }; 153 154 struct svc_i3c_regs_save { 155 u32 mconfig; 156 u32 mdynaddr; 157 }; 158 159 /** 160 * struct svc_i3c_master - Silvaco I3C Master structure 161 * @base: I3C master controller 162 * @dev: Corresponding device 163 * @regs: Memory mapping 164 * @saved_regs: Volatile values for PM operations 165 * @free_slots: Bit array of available slots 166 * @addrs: Array containing the dynamic addresses of each attached device 167 * @descs: Array of descriptors, one per attached device 168 * @hj_work: Hot-join work 169 * @ibi_work: IBI work 170 * @irq: Main interrupt 171 * @pclk: System clock 172 * @fclk: Fast clock (bus) 173 * @sclk: Slow clock (other events) 174 * @xferqueue: Transfer queue structure 175 * @xferqueue.list: List member 176 * @xferqueue.cur: Current ongoing transfer 177 * @xferqueue.lock: Queue lock 178 * @ibi: IBI structure 179 * @ibi.num_slots: Number of slots available in @ibi.slots 180 * @ibi.slots: Available IBI slots 181 * @ibi.tbq_slot: To be queued IBI slot 182 * @ibi.lock: IBI lock 183 * @lock: Transfer lock, protect between IBI work thread and callbacks from master 184 * @enabled_events: Bit masks for enable events (IBI, HotJoin). 185 * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back. 186 */ 187 struct svc_i3c_master { 188 struct i3c_master_controller base; 189 struct device *dev; 190 void __iomem *regs; 191 struct svc_i3c_regs_save saved_regs; 192 u32 free_slots; 193 u8 addrs[SVC_I3C_MAX_DEVS]; 194 struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS]; 195 struct work_struct hj_work; 196 struct work_struct ibi_work; 197 int irq; 198 struct clk *pclk; 199 struct clk *fclk; 200 struct clk *sclk; 201 struct { 202 struct list_head list; 203 struct svc_i3c_xfer *cur; 204 /* Prevent races between transfers */ 205 spinlock_t lock; 206 } xferqueue; 207 struct { 208 unsigned int num_slots; 209 struct i3c_dev_desc **slots; 210 struct i3c_ibi_slot *tbq_slot; 211 /* Prevent races within IBI handlers */ 212 spinlock_t lock; 213 } ibi; 214 struct mutex lock; 215 u32 enabled_events; 216 u32 mctrl_config; 217 }; 218 219 /** 220 * struct svc_i3c_i2c_dev_data - Device specific data 221 * @index: Index in the master tables corresponding to this device 222 * @ibi: IBI slot index in the master structure 223 * @ibi_pool: IBI pool associated to this device 224 */ 225 struct svc_i3c_i2c_dev_data { 226 u8 index; 227 int ibi; 228 struct i3c_generic_ibi_pool *ibi_pool; 229 }; 230 231 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask) 232 { 233 return !!(master->enabled_events & mask); 234 } 235 236 static bool svc_i3c_master_error(struct svc_i3c_master *master) 237 { 238 u32 mstatus, merrwarn; 239 240 mstatus = readl(master->regs + SVC_I3C_MSTATUS); 241 if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) { 242 merrwarn = readl(master->regs + SVC_I3C_MERRWARN); 243 writel(merrwarn, master->regs + SVC_I3C_MERRWARN); 244 245 /* Ignore timeout error */ 246 if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) { 247 dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n", 248 mstatus, merrwarn); 249 return false; 250 } 251 252 dev_err(master->dev, 253 "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n", 254 mstatus, merrwarn); 255 256 return true; 257 } 258 259 return false; 260 } 261 262 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask) 263 { 264 writel(mask, master->regs + SVC_I3C_MINTSET); 265 } 266 267 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master) 268 { 269 u32 mask = readl(master->regs + SVC_I3C_MINTSET); 270 271 writel(mask, master->regs + SVC_I3C_MINTCLR); 272 } 273 274 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master) 275 { 276 /* Clear pending warnings */ 277 writel(readl(master->regs + SVC_I3C_MERRWARN), 278 master->regs + SVC_I3C_MERRWARN); 279 } 280 281 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master) 282 { 283 /* Flush FIFOs */ 284 writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB, 285 master->regs + SVC_I3C_MDATACTRL); 286 } 287 288 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master) 289 { 290 u32 reg; 291 292 /* Set RX and TX tigger levels, flush FIFOs */ 293 reg = SVC_I3C_MDATACTRL_FLUSHTB | 294 SVC_I3C_MDATACTRL_FLUSHRB | 295 SVC_I3C_MDATACTRL_UNLOCK_TRIG | 296 SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL | 297 SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY; 298 writel(reg, master->regs + SVC_I3C_MDATACTRL); 299 } 300 301 static void svc_i3c_master_reset(struct svc_i3c_master *master) 302 { 303 svc_i3c_master_clear_merrwarn(master); 304 svc_i3c_master_reset_fifo_trigger(master); 305 svc_i3c_master_disable_interrupts(master); 306 } 307 308 static inline struct svc_i3c_master * 309 to_svc_i3c_master(struct i3c_master_controller *master) 310 { 311 return container_of(master, struct svc_i3c_master, base); 312 } 313 314 static void svc_i3c_master_hj_work(struct work_struct *work) 315 { 316 struct svc_i3c_master *master; 317 318 master = container_of(work, struct svc_i3c_master, hj_work); 319 i3c_master_do_daa(&master->base); 320 } 321 322 static struct i3c_dev_desc * 323 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master, 324 unsigned int ibiaddr) 325 { 326 int i; 327 328 for (i = 0; i < SVC_I3C_MAX_DEVS; i++) 329 if (master->addrs[i] == ibiaddr) 330 break; 331 332 if (i == SVC_I3C_MAX_DEVS) 333 return NULL; 334 335 return master->descs[i]; 336 } 337 338 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master) 339 { 340 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL); 341 342 /* 343 * This delay is necessary after the emission of a stop, otherwise eg. 344 * repeating IBIs do not get detected. There is a note in the manual 345 * about it, stating that the stop condition might not be settled 346 * correctly if a start condition follows too rapidly. 347 */ 348 udelay(1); 349 } 350 351 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master, 352 struct i3c_dev_desc *dev) 353 { 354 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 355 struct i3c_ibi_slot *slot; 356 unsigned int count; 357 u32 mdatactrl; 358 int ret, val; 359 u8 *buf; 360 361 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 362 if (!slot) 363 return -ENOSPC; 364 365 slot->len = 0; 366 buf = slot->data; 367 368 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val, 369 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000); 370 if (ret) { 371 dev_err(master->dev, "Timeout when polling for COMPLETE\n"); 372 return ret; 373 } 374 375 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) && 376 slot->len < SVC_I3C_FIFO_SIZE) { 377 mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL); 378 count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl); 379 readsl(master->regs + SVC_I3C_MRDATAB, buf, count); 380 slot->len += count; 381 buf += count; 382 } 383 384 master->ibi.tbq_slot = slot; 385 386 return 0; 387 } 388 389 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master, 390 bool mandatory_byte) 391 { 392 unsigned int ibi_ack_nack; 393 394 ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK; 395 if (mandatory_byte) 396 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE; 397 else 398 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE; 399 400 writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL); 401 } 402 403 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master) 404 { 405 writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK | 406 SVC_I3C_MCTRL_IBIRESP_NACK, 407 master->regs + SVC_I3C_MCTRL); 408 } 409 410 static void svc_i3c_master_ibi_work(struct work_struct *work) 411 { 412 struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work); 413 struct svc_i3c_i2c_dev_data *data; 414 unsigned int ibitype, ibiaddr; 415 struct i3c_dev_desc *dev; 416 u32 status, val; 417 int ret; 418 419 mutex_lock(&master->lock); 420 /* 421 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing 422 * readl_relaxed_poll_timeout() to return immediately. Consequently, 423 * ibitype will be 0 since it was last updated only after the 8th SCL 424 * cycle, leading to missed client IBI handlers. 425 * 426 * A typical scenario is when IBIWON occurs and bus arbitration is lost 427 * at svc_i3c_master_priv_xfers(). 428 * 429 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI. 430 */ 431 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); 432 433 /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */ 434 writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI | 435 SVC_I3C_MCTRL_IBIRESP_AUTO, 436 master->regs + SVC_I3C_MCTRL); 437 438 /* Wait for IBIWON, should take approximately 100us */ 439 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val, 440 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000); 441 if (ret) { 442 dev_err(master->dev, "Timeout when polling for IBIWON\n"); 443 svc_i3c_master_emit_stop(master); 444 goto reenable_ibis; 445 } 446 447 status = readl(master->regs + SVC_I3C_MSTATUS); 448 ibitype = SVC_I3C_MSTATUS_IBITYPE(status); 449 ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status); 450 451 /* Handle the critical responses to IBI's */ 452 switch (ibitype) { 453 case SVC_I3C_MSTATUS_IBITYPE_IBI: 454 dev = svc_i3c_master_dev_from_addr(master, ibiaddr); 455 if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) 456 svc_i3c_master_nack_ibi(master); 457 else 458 svc_i3c_master_handle_ibi(master, dev); 459 break; 460 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: 461 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN)) 462 svc_i3c_master_ack_ibi(master, false); 463 else 464 svc_i3c_master_nack_ibi(master); 465 break; 466 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST: 467 svc_i3c_master_nack_ibi(master); 468 break; 469 default: 470 break; 471 } 472 473 /* 474 * If an error happened, we probably got interrupted and the exchange 475 * timedout. In this case we just drop everything, emit a stop and wait 476 * for the slave to interrupt again. 477 */ 478 if (svc_i3c_master_error(master)) { 479 if (master->ibi.tbq_slot) { 480 data = i3c_dev_get_master_data(dev); 481 i3c_generic_ibi_recycle_slot(data->ibi_pool, 482 master->ibi.tbq_slot); 483 master->ibi.tbq_slot = NULL; 484 } 485 486 svc_i3c_master_emit_stop(master); 487 488 goto reenable_ibis; 489 } 490 491 /* Handle the non critical tasks */ 492 switch (ibitype) { 493 case SVC_I3C_MSTATUS_IBITYPE_IBI: 494 if (dev) { 495 i3c_master_queue_ibi(dev, master->ibi.tbq_slot); 496 master->ibi.tbq_slot = NULL; 497 } 498 svc_i3c_master_emit_stop(master); 499 break; 500 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: 501 svc_i3c_master_emit_stop(master); 502 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN)) 503 queue_work(master->base.wq, &master->hj_work); 504 break; 505 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST: 506 default: 507 break; 508 } 509 510 reenable_ibis: 511 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); 512 mutex_unlock(&master->lock); 513 } 514 515 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id) 516 { 517 struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id; 518 u32 active = readl(master->regs + SVC_I3C_MSTATUS); 519 520 if (!SVC_I3C_MSTATUS_SLVSTART(active)) 521 return IRQ_NONE; 522 523 /* Clear the interrupt status */ 524 writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS); 525 526 svc_i3c_master_disable_interrupts(master); 527 528 /* Handle the interrupt in a non atomic context */ 529 queue_work(master->base.wq, &master->ibi_work); 530 531 return IRQ_HANDLED; 532 } 533 534 static int svc_i3c_master_set_speed(struct i3c_master_controller *m, 535 enum i3c_open_drain_speed speed) 536 { 537 struct svc_i3c_master *master = to_svc_i3c_master(m); 538 struct i3c_bus *bus = i3c_master_get_bus(&master->base); 539 u32 ppbaud, odbaud, odhpp, mconfig; 540 unsigned long fclk_rate; 541 int ret; 542 543 ret = pm_runtime_resume_and_get(master->dev); 544 if (ret < 0) { 545 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); 546 return ret; 547 } 548 549 switch (speed) { 550 case I3C_OPEN_DRAIN_SLOW_SPEED: 551 fclk_rate = clk_get_rate(master->fclk); 552 if (!fclk_rate) { 553 ret = -EINVAL; 554 goto rpm_out; 555 } 556 /* 557 * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first 558 * broadcast address is visible to all I2C/I3C devices on the I3C bus. 559 * I3C device working as a I2C device will turn off its 50ns Spike 560 * Filter to change to I3C mode. 561 */ 562 mconfig = master->mctrl_config; 563 ppbaud = FIELD_GET(GENMASK(11, 8), mconfig); 564 odhpp = 0; 565 odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1; 566 mconfig &= ~GENMASK(24, 16); 567 mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp); 568 writel(mconfig, master->regs + SVC_I3C_MCONFIG); 569 break; 570 case I3C_OPEN_DRAIN_NORMAL_SPEED: 571 writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG); 572 break; 573 } 574 575 rpm_out: 576 pm_runtime_mark_last_busy(master->dev); 577 pm_runtime_put_autosuspend(master->dev); 578 579 return ret; 580 } 581 582 static int svc_i3c_master_bus_init(struct i3c_master_controller *m) 583 { 584 struct svc_i3c_master *master = to_svc_i3c_master(m); 585 struct i3c_bus *bus = i3c_master_get_bus(m); 586 struct i3c_device_info info = {}; 587 unsigned long fclk_rate, fclk_period_ns; 588 unsigned int high_period_ns, od_low_period_ns; 589 u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg; 590 int ret; 591 592 ret = pm_runtime_resume_and_get(master->dev); 593 if (ret < 0) { 594 dev_err(master->dev, 595 "<%s> cannot resume i3c bus master, err: %d\n", 596 __func__, ret); 597 return ret; 598 } 599 600 /* Timings derivation */ 601 fclk_rate = clk_get_rate(master->fclk); 602 if (!fclk_rate) { 603 ret = -EINVAL; 604 goto rpm_out; 605 } 606 607 fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate); 608 609 /* 610 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period. 611 * Simplest configuration is using a 50% duty-cycle of 40ns. 612 */ 613 ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1; 614 pplow = 0; 615 616 /* 617 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a 618 * duty-cycle tuned so that high levels are filetered out by 619 * the 50ns filter (target being 40ns). 620 */ 621 odhpp = 1; 622 high_period_ns = (ppbaud + 1) * fclk_period_ns; 623 odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1; 624 od_low_period_ns = (odbaud + 1) * high_period_ns; 625 626 switch (bus->mode) { 627 case I3C_BUS_MODE_PURE: 628 i2cbaud = 0; 629 odstop = 0; 630 break; 631 case I3C_BUS_MODE_MIXED_FAST: 632 case I3C_BUS_MODE_MIXED_LIMITED: 633 /* 634 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference 635 * between the high and low period does not really matter. 636 */ 637 i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2; 638 odstop = 1; 639 break; 640 case I3C_BUS_MODE_MIXED_SLOW: 641 /* 642 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same 643 * constraints as the FM+ mode. 644 */ 645 i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2; 646 odstop = 1; 647 break; 648 default: 649 goto rpm_out; 650 } 651 652 reg = SVC_I3C_MCONFIG_MASTER_EN | 653 SVC_I3C_MCONFIG_DISTO(0) | 654 SVC_I3C_MCONFIG_HKEEP(0) | 655 SVC_I3C_MCONFIG_ODSTOP(odstop) | 656 SVC_I3C_MCONFIG_PPBAUD(ppbaud) | 657 SVC_I3C_MCONFIG_PPLOW(pplow) | 658 SVC_I3C_MCONFIG_ODBAUD(odbaud) | 659 SVC_I3C_MCONFIG_ODHPP(odhpp) | 660 SVC_I3C_MCONFIG_SKEW(0) | 661 SVC_I3C_MCONFIG_I2CBAUD(i2cbaud); 662 writel(reg, master->regs + SVC_I3C_MCONFIG); 663 664 master->mctrl_config = reg; 665 /* Master core's registration */ 666 ret = i3c_master_get_free_addr(m, 0); 667 if (ret < 0) 668 goto rpm_out; 669 670 info.dyn_addr = ret; 671 672 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr), 673 master->regs + SVC_I3C_MDYNADDR); 674 675 ret = i3c_master_set_info(&master->base, &info); 676 if (ret) 677 goto rpm_out; 678 679 rpm_out: 680 pm_runtime_mark_last_busy(master->dev); 681 pm_runtime_put_autosuspend(master->dev); 682 683 return ret; 684 } 685 686 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m) 687 { 688 struct svc_i3c_master *master = to_svc_i3c_master(m); 689 int ret; 690 691 ret = pm_runtime_resume_and_get(master->dev); 692 if (ret < 0) { 693 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); 694 return; 695 } 696 697 svc_i3c_master_disable_interrupts(master); 698 699 /* Disable master */ 700 writel(0, master->regs + SVC_I3C_MCONFIG); 701 702 pm_runtime_mark_last_busy(master->dev); 703 pm_runtime_put_autosuspend(master->dev); 704 } 705 706 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master) 707 { 708 unsigned int slot; 709 710 if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0))) 711 return -ENOSPC; 712 713 slot = ffs(master->free_slots) - 1; 714 715 master->free_slots &= ~BIT(slot); 716 717 return slot; 718 } 719 720 static void svc_i3c_master_release_slot(struct svc_i3c_master *master, 721 unsigned int slot) 722 { 723 master->free_slots |= BIT(slot); 724 } 725 726 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) 727 { 728 struct i3c_master_controller *m = i3c_dev_get_master(dev); 729 struct svc_i3c_master *master = to_svc_i3c_master(m); 730 struct svc_i3c_i2c_dev_data *data; 731 int slot; 732 733 slot = svc_i3c_master_reserve_slot(master); 734 if (slot < 0) 735 return slot; 736 737 data = kzalloc(sizeof(*data), GFP_KERNEL); 738 if (!data) { 739 svc_i3c_master_release_slot(master, slot); 740 return -ENOMEM; 741 } 742 743 data->ibi = -1; 744 data->index = slot; 745 master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr : 746 dev->info.static_addr; 747 master->descs[slot] = dev; 748 749 i3c_dev_set_master_data(dev, data); 750 751 return 0; 752 } 753 754 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, 755 u8 old_dyn_addr) 756 { 757 struct i3c_master_controller *m = i3c_dev_get_master(dev); 758 struct svc_i3c_master *master = to_svc_i3c_master(m); 759 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 760 761 master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr : 762 dev->info.static_addr; 763 764 return 0; 765 } 766 767 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev) 768 { 769 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 770 struct i3c_master_controller *m = i3c_dev_get_master(dev); 771 struct svc_i3c_master *master = to_svc_i3c_master(m); 772 773 master->addrs[data->index] = 0; 774 svc_i3c_master_release_slot(master, data->index); 775 776 kfree(data); 777 } 778 779 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev) 780 { 781 struct i3c_master_controller *m = i2c_dev_get_master(dev); 782 struct svc_i3c_master *master = to_svc_i3c_master(m); 783 struct svc_i3c_i2c_dev_data *data; 784 int slot; 785 786 slot = svc_i3c_master_reserve_slot(master); 787 if (slot < 0) 788 return slot; 789 790 data = kzalloc(sizeof(*data), GFP_KERNEL); 791 if (!data) { 792 svc_i3c_master_release_slot(master, slot); 793 return -ENOMEM; 794 } 795 796 data->index = slot; 797 master->addrs[slot] = dev->addr; 798 799 i2c_dev_set_master_data(dev, data); 800 801 return 0; 802 } 803 804 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev) 805 { 806 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 807 struct i3c_master_controller *m = i2c_dev_get_master(dev); 808 struct svc_i3c_master *master = to_svc_i3c_master(m); 809 810 svc_i3c_master_release_slot(master, data->index); 811 812 kfree(data); 813 } 814 815 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst, 816 unsigned int len) 817 { 818 int ret, i; 819 u32 reg; 820 821 for (i = 0; i < len; i++) { 822 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, 823 reg, 824 SVC_I3C_MSTATUS_RXPEND(reg), 825 0, 1000); 826 if (ret) 827 return ret; 828 829 dst[i] = readl(master->regs + SVC_I3C_MRDATAB); 830 } 831 832 return 0; 833 } 834 835 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master, 836 u8 *addrs, unsigned int *count) 837 { 838 u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0; 839 unsigned int dev_nb = 0, last_addr = 0; 840 u32 reg; 841 int ret, i; 842 843 while (true) { 844 /* Enter/proceed with DAA */ 845 writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA | 846 SVC_I3C_MCTRL_TYPE_I3C | 847 SVC_I3C_MCTRL_IBIRESP_NACK | 848 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE), 849 master->regs + SVC_I3C_MCTRL); 850 851 /* 852 * Either one slave will send its ID, or the assignment process 853 * is done. 854 */ 855 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, 856 reg, 857 SVC_I3C_MSTATUS_RXPEND(reg) | 858 SVC_I3C_MSTATUS_MCTRLDONE(reg), 859 1, 1000); 860 if (ret) 861 return ret; 862 863 if (SVC_I3C_MSTATUS_RXPEND(reg)) { 864 u8 data[6]; 865 866 /* 867 * We only care about the 48-bit provisional ID yet to 868 * be sure a device does not nack an address twice. 869 * Otherwise, we would just need to flush the RX FIFO. 870 */ 871 ret = svc_i3c_master_readb(master, data, 6); 872 if (ret) 873 return ret; 874 875 for (i = 0; i < 6; i++) 876 prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i)); 877 878 /* We do not care about the BCR and DCR yet */ 879 ret = svc_i3c_master_readb(master, data, 2); 880 if (ret) 881 return ret; 882 } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) { 883 if (SVC_I3C_MSTATUS_STATE_IDLE(reg) && 884 SVC_I3C_MSTATUS_COMPLETE(reg)) { 885 /* 886 * All devices received and acked they dynamic 887 * address, this is the natural end of the DAA 888 * procedure. 889 */ 890 break; 891 } else if (SVC_I3C_MSTATUS_NACKED(reg)) { 892 /* No I3C devices attached */ 893 if (dev_nb == 0) 894 break; 895 896 /* 897 * A slave device nacked the address, this is 898 * allowed only once, DAA will be stopped and 899 * then resumed. The same device is supposed to 900 * answer again immediately and shall ack the 901 * address this time. 902 */ 903 if (prov_id[dev_nb] == nacking_prov_id) 904 return -EIO; 905 906 dev_nb--; 907 nacking_prov_id = prov_id[dev_nb]; 908 svc_i3c_master_emit_stop(master); 909 910 continue; 911 } else { 912 return -EIO; 913 } 914 } 915 916 /* Wait for the slave to be ready to receive its address */ 917 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, 918 reg, 919 SVC_I3C_MSTATUS_MCTRLDONE(reg) && 920 SVC_I3C_MSTATUS_STATE_DAA(reg) && 921 SVC_I3C_MSTATUS_BETWEEN(reg), 922 0, 1000); 923 if (ret) 924 return ret; 925 926 /* Give the slave device a suitable dynamic address */ 927 ret = i3c_master_get_free_addr(&master->base, last_addr + 1); 928 if (ret < 0) 929 return ret; 930 931 addrs[dev_nb] = ret; 932 dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n", 933 dev_nb, addrs[dev_nb]); 934 935 writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB); 936 last_addr = addrs[dev_nb++]; 937 } 938 939 *count = dev_nb; 940 941 return 0; 942 } 943 944 static int svc_i3c_update_ibirules(struct svc_i3c_master *master) 945 { 946 struct i3c_dev_desc *dev; 947 u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE; 948 unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0, 949 nobyte_addr_ko = 0; 950 bool list_mbyte = false, list_nobyte = false; 951 952 /* Create the IBIRULES register for both cases */ 953 i3c_bus_for_each_i3cdev(&master->base.bus, dev) { 954 if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER) 955 continue; 956 957 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) { 958 reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok, 959 dev->info.dyn_addr); 960 961 /* IBI rules cannot be applied to devices with MSb=1 */ 962 if (dev->info.dyn_addr & BIT(7)) 963 mbyte_addr_ko++; 964 else 965 mbyte_addr_ok++; 966 } else { 967 reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok, 968 dev->info.dyn_addr); 969 970 /* IBI rules cannot be applied to devices with MSb=1 */ 971 if (dev->info.dyn_addr & BIT(7)) 972 nobyte_addr_ko++; 973 else 974 nobyte_addr_ok++; 975 } 976 } 977 978 /* Device list cannot be handled by hardware */ 979 if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS) 980 list_mbyte = true; 981 982 if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS) 983 list_nobyte = true; 984 985 /* No list can be properly handled, return an error */ 986 if (!list_mbyte && !list_nobyte) 987 return -ERANGE; 988 989 /* Pick the first list that can be handled by hardware, randomly */ 990 if (list_mbyte) 991 writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES); 992 else 993 writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES); 994 995 return 0; 996 } 997 998 static int svc_i3c_master_do_daa(struct i3c_master_controller *m) 999 { 1000 struct svc_i3c_master *master = to_svc_i3c_master(m); 1001 u8 addrs[SVC_I3C_MAX_DEVS]; 1002 unsigned long flags; 1003 unsigned int dev_nb; 1004 int ret, i; 1005 1006 ret = pm_runtime_resume_and_get(master->dev); 1007 if (ret < 0) { 1008 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); 1009 return ret; 1010 } 1011 1012 spin_lock_irqsave(&master->xferqueue.lock, flags); 1013 ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb); 1014 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 1015 if (ret) { 1016 svc_i3c_master_emit_stop(master); 1017 svc_i3c_master_clear_merrwarn(master); 1018 goto rpm_out; 1019 } 1020 1021 /* Register all devices who participated to the core */ 1022 for (i = 0; i < dev_nb; i++) { 1023 ret = i3c_master_add_i3c_dev_locked(m, addrs[i]); 1024 if (ret) 1025 goto rpm_out; 1026 } 1027 1028 /* Configure IBI auto-rules */ 1029 ret = svc_i3c_update_ibirules(master); 1030 if (ret) 1031 dev_err(master->dev, "Cannot handle such a list of devices"); 1032 1033 rpm_out: 1034 pm_runtime_mark_last_busy(master->dev); 1035 pm_runtime_put_autosuspend(master->dev); 1036 1037 return ret; 1038 } 1039 1040 static int svc_i3c_master_read(struct svc_i3c_master *master, 1041 u8 *in, unsigned int len) 1042 { 1043 int offset = 0, i; 1044 u32 mdctrl, mstatus; 1045 bool completed = false; 1046 unsigned int count; 1047 unsigned long start = jiffies; 1048 1049 while (!completed) { 1050 mstatus = readl(master->regs + SVC_I3C_MSTATUS); 1051 if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0) 1052 completed = true; 1053 1054 if (time_after(jiffies, start + msecs_to_jiffies(1000))) { 1055 dev_dbg(master->dev, "I3C read timeout\n"); 1056 return -ETIMEDOUT; 1057 } 1058 1059 mdctrl = readl(master->regs + SVC_I3C_MDATACTRL); 1060 count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl); 1061 if (offset + count > len) { 1062 dev_err(master->dev, "I3C receive length too long!\n"); 1063 return -EINVAL; 1064 } 1065 for (i = 0; i < count; i++) 1066 in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB); 1067 1068 offset += count; 1069 } 1070 1071 return offset; 1072 } 1073 1074 static int svc_i3c_master_write(struct svc_i3c_master *master, 1075 const u8 *out, unsigned int len) 1076 { 1077 int offset = 0, ret; 1078 u32 mdctrl; 1079 1080 while (offset < len) { 1081 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL, 1082 mdctrl, 1083 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL), 1084 0, 1000); 1085 if (ret) 1086 return ret; 1087 1088 /* 1089 * The last byte to be sent over the bus must either have the 1090 * "end" bit set or be written in MWDATABE. 1091 */ 1092 if (likely(offset < (len - 1))) 1093 writel(out[offset++], master->regs + SVC_I3C_MWDATAB); 1094 else 1095 writel(out[offset++], master->regs + SVC_I3C_MWDATABE); 1096 } 1097 1098 return 0; 1099 } 1100 1101 static int svc_i3c_master_xfer(struct svc_i3c_master *master, 1102 bool rnw, unsigned int xfer_type, u8 addr, 1103 u8 *in, const u8 *out, unsigned int xfer_len, 1104 unsigned int *actual_len, bool continued) 1105 { 1106 int retry = 2; 1107 u32 reg; 1108 int ret; 1109 1110 /* clean SVC_I3C_MINT_IBIWON w1c bits */ 1111 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); 1112 1113 1114 while (retry--) { 1115 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | 1116 xfer_type | 1117 SVC_I3C_MCTRL_IBIRESP_NACK | 1118 SVC_I3C_MCTRL_DIR(rnw) | 1119 SVC_I3C_MCTRL_ADDR(addr) | 1120 SVC_I3C_MCTRL_RDTERM(*actual_len), 1121 master->regs + SVC_I3C_MCTRL); 1122 1123 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, 1124 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000); 1125 if (ret) 1126 goto emit_stop; 1127 1128 if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) { 1129 /* 1130 * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3. 1131 * If the Controller chooses to start an I3C Message with an I3C Dynamic 1132 * Address, then special provisions shall be made because that same I3C 1133 * Target may be initiating an IBI or a Controller Role Request. So, one of 1134 * three things may happen: (skip 1, 2) 1135 * 1136 * 3. The Addresses match and the RnW bits also match, and so neither 1137 * Controller nor Target will ACK since both are expecting the other side to 1138 * provide ACK. As a result, each side might think it had "won" arbitration, 1139 * but neither side would continue, as each would subsequently see that the 1140 * other did not provide ACK. 1141 * ... 1142 * For either value of RnW: Due to the NACK, the Controller shall defer the 1143 * Private Write or Private Read, and should typically transmit the Target 1144 * Address again after a Repeated START (i.e., the next one or any one prior 1145 * to a STOP in the Frame). Since the Address Header following a Repeated 1146 * START is not arbitrated, the Controller will always win (see Section 1147 * 5.1.2.2.4). 1148 */ 1149 if (retry && addr != 0x7e) { 1150 writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN); 1151 } else { 1152 ret = -ENXIO; 1153 *actual_len = 0; 1154 goto emit_stop; 1155 } 1156 } else { 1157 break; 1158 } 1159 } 1160 1161 /* 1162 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame 1163 * with I3C Target Address. 1164 * 1165 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so 1166 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller 1167 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or 1168 * a Hot-Join Request has been made. 1169 * 1170 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure 1171 * and yield the above events handler. 1172 */ 1173 if (SVC_I3C_MSTATUS_IBIWON(reg)) { 1174 ret = -EAGAIN; 1175 *actual_len = 0; 1176 goto emit_stop; 1177 } 1178 1179 if (rnw) 1180 ret = svc_i3c_master_read(master, in, xfer_len); 1181 else 1182 ret = svc_i3c_master_write(master, out, xfer_len); 1183 if (ret < 0) 1184 goto emit_stop; 1185 1186 if (rnw) 1187 *actual_len = ret; 1188 1189 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, 1190 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000); 1191 if (ret) 1192 goto emit_stop; 1193 1194 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS); 1195 1196 if (!continued) { 1197 svc_i3c_master_emit_stop(master); 1198 1199 /* Wait idle if stop is sent. */ 1200 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, 1201 SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000); 1202 } 1203 1204 return 0; 1205 1206 emit_stop: 1207 svc_i3c_master_emit_stop(master); 1208 svc_i3c_master_clear_merrwarn(master); 1209 1210 return ret; 1211 } 1212 1213 static struct svc_i3c_xfer * 1214 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds) 1215 { 1216 struct svc_i3c_xfer *xfer; 1217 1218 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL); 1219 if (!xfer) 1220 return NULL; 1221 1222 INIT_LIST_HEAD(&xfer->node); 1223 xfer->ncmds = ncmds; 1224 xfer->ret = -ETIMEDOUT; 1225 1226 return xfer; 1227 } 1228 1229 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer) 1230 { 1231 kfree(xfer); 1232 } 1233 1234 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master, 1235 struct svc_i3c_xfer *xfer) 1236 { 1237 if (master->xferqueue.cur == xfer) 1238 master->xferqueue.cur = NULL; 1239 else 1240 list_del_init(&xfer->node); 1241 } 1242 1243 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master, 1244 struct svc_i3c_xfer *xfer) 1245 { 1246 unsigned long flags; 1247 1248 spin_lock_irqsave(&master->xferqueue.lock, flags); 1249 svc_i3c_master_dequeue_xfer_locked(master, xfer); 1250 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 1251 } 1252 1253 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master) 1254 { 1255 struct svc_i3c_xfer *xfer = master->xferqueue.cur; 1256 int ret, i; 1257 1258 if (!xfer) 1259 return; 1260 1261 svc_i3c_master_clear_merrwarn(master); 1262 svc_i3c_master_flush_fifo(master); 1263 1264 for (i = 0; i < xfer->ncmds; i++) { 1265 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1266 1267 ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type, 1268 cmd->addr, cmd->in, cmd->out, 1269 cmd->len, &cmd->actual_len, 1270 cmd->continued); 1271 /* cmd->xfer is NULL if I2C or CCC transfer */ 1272 if (cmd->xfer) 1273 cmd->xfer->actual_len = cmd->actual_len; 1274 1275 if (ret) 1276 break; 1277 } 1278 1279 xfer->ret = ret; 1280 complete(&xfer->comp); 1281 1282 if (ret < 0) 1283 svc_i3c_master_dequeue_xfer_locked(master, xfer); 1284 1285 xfer = list_first_entry_or_null(&master->xferqueue.list, 1286 struct svc_i3c_xfer, 1287 node); 1288 if (xfer) 1289 list_del_init(&xfer->node); 1290 1291 master->xferqueue.cur = xfer; 1292 svc_i3c_master_start_xfer_locked(master); 1293 } 1294 1295 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master, 1296 struct svc_i3c_xfer *xfer) 1297 { 1298 unsigned long flags; 1299 int ret; 1300 1301 ret = pm_runtime_resume_and_get(master->dev); 1302 if (ret < 0) { 1303 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); 1304 return; 1305 } 1306 1307 init_completion(&xfer->comp); 1308 spin_lock_irqsave(&master->xferqueue.lock, flags); 1309 if (master->xferqueue.cur) { 1310 list_add_tail(&xfer->node, &master->xferqueue.list); 1311 } else { 1312 master->xferqueue.cur = xfer; 1313 svc_i3c_master_start_xfer_locked(master); 1314 } 1315 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 1316 1317 pm_runtime_mark_last_busy(master->dev); 1318 pm_runtime_put_autosuspend(master->dev); 1319 } 1320 1321 static bool 1322 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master, 1323 const struct i3c_ccc_cmd *cmd) 1324 { 1325 /* No software support for CCC commands targeting more than one slave */ 1326 return (cmd->ndests == 1); 1327 } 1328 1329 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master, 1330 struct i3c_ccc_cmd *ccc) 1331 { 1332 unsigned int xfer_len = ccc->dests[0].payload.len + 1; 1333 struct svc_i3c_xfer *xfer; 1334 struct svc_i3c_cmd *cmd; 1335 u8 *buf; 1336 int ret; 1337 1338 xfer = svc_i3c_master_alloc_xfer(master, 1); 1339 if (!xfer) 1340 return -ENOMEM; 1341 1342 buf = kmalloc(xfer_len, GFP_KERNEL); 1343 if (!buf) { 1344 svc_i3c_master_free_xfer(xfer); 1345 return -ENOMEM; 1346 } 1347 1348 buf[0] = ccc->id; 1349 memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len); 1350 1351 xfer->type = SVC_I3C_MCTRL_TYPE_I3C; 1352 1353 cmd = &xfer->cmds[0]; 1354 cmd->addr = ccc->dests[0].addr; 1355 cmd->rnw = ccc->rnw; 1356 cmd->in = NULL; 1357 cmd->out = buf; 1358 cmd->len = xfer_len; 1359 cmd->actual_len = 0; 1360 cmd->continued = false; 1361 1362 mutex_lock(&master->lock); 1363 svc_i3c_master_enqueue_xfer(master, xfer); 1364 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1365 svc_i3c_master_dequeue_xfer(master, xfer); 1366 mutex_unlock(&master->lock); 1367 1368 ret = xfer->ret; 1369 kfree(buf); 1370 svc_i3c_master_free_xfer(xfer); 1371 1372 return ret; 1373 } 1374 1375 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master, 1376 struct i3c_ccc_cmd *ccc) 1377 { 1378 unsigned int xfer_len = ccc->dests[0].payload.len; 1379 unsigned int actual_len = ccc->rnw ? xfer_len : 0; 1380 struct svc_i3c_xfer *xfer; 1381 struct svc_i3c_cmd *cmd; 1382 int ret; 1383 1384 xfer = svc_i3c_master_alloc_xfer(master, 2); 1385 if (!xfer) 1386 return -ENOMEM; 1387 1388 xfer->type = SVC_I3C_MCTRL_TYPE_I3C; 1389 1390 /* Broadcasted message */ 1391 cmd = &xfer->cmds[0]; 1392 cmd->addr = I3C_BROADCAST_ADDR; 1393 cmd->rnw = 0; 1394 cmd->in = NULL; 1395 cmd->out = &ccc->id; 1396 cmd->len = 1; 1397 cmd->actual_len = 0; 1398 cmd->continued = true; 1399 1400 /* Directed message */ 1401 cmd = &xfer->cmds[1]; 1402 cmd->addr = ccc->dests[0].addr; 1403 cmd->rnw = ccc->rnw; 1404 cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL; 1405 cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data, 1406 cmd->len = xfer_len; 1407 cmd->actual_len = actual_len; 1408 cmd->continued = false; 1409 1410 mutex_lock(&master->lock); 1411 svc_i3c_master_enqueue_xfer(master, xfer); 1412 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1413 svc_i3c_master_dequeue_xfer(master, xfer); 1414 mutex_unlock(&master->lock); 1415 1416 if (cmd->actual_len != xfer_len) 1417 ccc->dests[0].payload.len = cmd->actual_len; 1418 1419 ret = xfer->ret; 1420 svc_i3c_master_free_xfer(xfer); 1421 1422 return ret; 1423 } 1424 1425 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, 1426 struct i3c_ccc_cmd *cmd) 1427 { 1428 struct svc_i3c_master *master = to_svc_i3c_master(m); 1429 bool broadcast = cmd->id < 0x80; 1430 int ret; 1431 1432 if (broadcast) 1433 ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd); 1434 else 1435 ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd); 1436 1437 if (ret) 1438 cmd->err = I3C_ERROR_M2; 1439 1440 return ret; 1441 } 1442 1443 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev, 1444 struct i3c_priv_xfer *xfers, 1445 int nxfers) 1446 { 1447 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1448 struct svc_i3c_master *master = to_svc_i3c_master(m); 1449 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1450 struct svc_i3c_xfer *xfer; 1451 int ret, i; 1452 1453 xfer = svc_i3c_master_alloc_xfer(master, nxfers); 1454 if (!xfer) 1455 return -ENOMEM; 1456 1457 xfer->type = SVC_I3C_MCTRL_TYPE_I3C; 1458 1459 for (i = 0; i < nxfers; i++) { 1460 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1461 1462 cmd->xfer = &xfers[i]; 1463 cmd->addr = master->addrs[data->index]; 1464 cmd->rnw = xfers[i].rnw; 1465 cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL; 1466 cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out; 1467 cmd->len = xfers[i].len; 1468 cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0; 1469 cmd->continued = (i + 1) < nxfers; 1470 } 1471 1472 mutex_lock(&master->lock); 1473 svc_i3c_master_enqueue_xfer(master, xfer); 1474 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1475 svc_i3c_master_dequeue_xfer(master, xfer); 1476 mutex_unlock(&master->lock); 1477 1478 ret = xfer->ret; 1479 svc_i3c_master_free_xfer(xfer); 1480 1481 return ret; 1482 } 1483 1484 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev, 1485 const struct i2c_msg *xfers, 1486 int nxfers) 1487 { 1488 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1489 struct svc_i3c_master *master = to_svc_i3c_master(m); 1490 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 1491 struct svc_i3c_xfer *xfer; 1492 int ret, i; 1493 1494 xfer = svc_i3c_master_alloc_xfer(master, nxfers); 1495 if (!xfer) 1496 return -ENOMEM; 1497 1498 xfer->type = SVC_I3C_MCTRL_TYPE_I2C; 1499 1500 for (i = 0; i < nxfers; i++) { 1501 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1502 1503 cmd->addr = master->addrs[data->index]; 1504 cmd->rnw = xfers[i].flags & I2C_M_RD; 1505 cmd->in = cmd->rnw ? xfers[i].buf : NULL; 1506 cmd->out = cmd->rnw ? NULL : xfers[i].buf; 1507 cmd->len = xfers[i].len; 1508 cmd->actual_len = cmd->rnw ? xfers[i].len : 0; 1509 cmd->continued = (i + 1 < nxfers); 1510 } 1511 1512 mutex_lock(&master->lock); 1513 svc_i3c_master_enqueue_xfer(master, xfer); 1514 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1515 svc_i3c_master_dequeue_xfer(master, xfer); 1516 mutex_unlock(&master->lock); 1517 1518 ret = xfer->ret; 1519 svc_i3c_master_free_xfer(xfer); 1520 1521 return ret; 1522 } 1523 1524 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev, 1525 const struct i3c_ibi_setup *req) 1526 { 1527 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1528 struct svc_i3c_master *master = to_svc_i3c_master(m); 1529 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1530 unsigned long flags; 1531 unsigned int i; 1532 1533 if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) { 1534 dev_err(master->dev, "IBI max payload %d should be < %d\n", 1535 dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE); 1536 return -ERANGE; 1537 } 1538 1539 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req); 1540 if (IS_ERR(data->ibi_pool)) 1541 return PTR_ERR(data->ibi_pool); 1542 1543 spin_lock_irqsave(&master->ibi.lock, flags); 1544 for (i = 0; i < master->ibi.num_slots; i++) { 1545 if (!master->ibi.slots[i]) { 1546 data->ibi = i; 1547 master->ibi.slots[i] = dev; 1548 break; 1549 } 1550 } 1551 spin_unlock_irqrestore(&master->ibi.lock, flags); 1552 1553 if (i < master->ibi.num_slots) 1554 return 0; 1555 1556 i3c_generic_ibi_free_pool(data->ibi_pool); 1557 data->ibi_pool = NULL; 1558 1559 return -ENOSPC; 1560 } 1561 1562 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev) 1563 { 1564 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1565 struct svc_i3c_master *master = to_svc_i3c_master(m); 1566 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1567 unsigned long flags; 1568 1569 spin_lock_irqsave(&master->ibi.lock, flags); 1570 master->ibi.slots[data->ibi] = NULL; 1571 data->ibi = -1; 1572 spin_unlock_irqrestore(&master->ibi.lock, flags); 1573 1574 i3c_generic_ibi_free_pool(data->ibi_pool); 1575 } 1576 1577 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev) 1578 { 1579 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1580 struct svc_i3c_master *master = to_svc_i3c_master(m); 1581 int ret; 1582 1583 ret = pm_runtime_resume_and_get(master->dev); 1584 if (ret < 0) { 1585 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); 1586 return ret; 1587 } 1588 1589 master->enabled_events++; 1590 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); 1591 1592 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1593 } 1594 1595 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev) 1596 { 1597 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1598 struct svc_i3c_master *master = to_svc_i3c_master(m); 1599 int ret; 1600 1601 master->enabled_events--; 1602 if (!master->enabled_events) 1603 svc_i3c_master_disable_interrupts(master); 1604 1605 ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1606 1607 pm_runtime_mark_last_busy(master->dev); 1608 pm_runtime_put_autosuspend(master->dev); 1609 1610 return ret; 1611 } 1612 1613 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m) 1614 { 1615 struct svc_i3c_master *master = to_svc_i3c_master(m); 1616 int ret; 1617 1618 ret = pm_runtime_resume_and_get(master->dev); 1619 if (ret < 0) { 1620 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); 1621 return ret; 1622 } 1623 1624 master->enabled_events |= SVC_I3C_EVENT_HOTJOIN; 1625 1626 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); 1627 1628 return 0; 1629 } 1630 1631 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m) 1632 { 1633 struct svc_i3c_master *master = to_svc_i3c_master(m); 1634 1635 master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN; 1636 1637 if (!master->enabled_events) 1638 svc_i3c_master_disable_interrupts(master); 1639 1640 pm_runtime_mark_last_busy(master->dev); 1641 pm_runtime_put_autosuspend(master->dev); 1642 1643 return 0; 1644 } 1645 1646 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev, 1647 struct i3c_ibi_slot *slot) 1648 { 1649 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1650 1651 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); 1652 } 1653 1654 static const struct i3c_master_controller_ops svc_i3c_master_ops = { 1655 .bus_init = svc_i3c_master_bus_init, 1656 .bus_cleanup = svc_i3c_master_bus_cleanup, 1657 .attach_i3c_dev = svc_i3c_master_attach_i3c_dev, 1658 .detach_i3c_dev = svc_i3c_master_detach_i3c_dev, 1659 .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev, 1660 .attach_i2c_dev = svc_i3c_master_attach_i2c_dev, 1661 .detach_i2c_dev = svc_i3c_master_detach_i2c_dev, 1662 .do_daa = svc_i3c_master_do_daa, 1663 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd, 1664 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd, 1665 .priv_xfers = svc_i3c_master_priv_xfers, 1666 .i2c_xfers = svc_i3c_master_i2c_xfers, 1667 .request_ibi = svc_i3c_master_request_ibi, 1668 .free_ibi = svc_i3c_master_free_ibi, 1669 .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot, 1670 .enable_ibi = svc_i3c_master_enable_ibi, 1671 .disable_ibi = svc_i3c_master_disable_ibi, 1672 .enable_hotjoin = svc_i3c_master_enable_hotjoin, 1673 .disable_hotjoin = svc_i3c_master_disable_hotjoin, 1674 .set_speed = svc_i3c_master_set_speed, 1675 }; 1676 1677 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master) 1678 { 1679 int ret = 0; 1680 1681 ret = clk_prepare_enable(master->pclk); 1682 if (ret) 1683 return ret; 1684 1685 ret = clk_prepare_enable(master->fclk); 1686 if (ret) { 1687 clk_disable_unprepare(master->pclk); 1688 return ret; 1689 } 1690 1691 ret = clk_prepare_enable(master->sclk); 1692 if (ret) { 1693 clk_disable_unprepare(master->pclk); 1694 clk_disable_unprepare(master->fclk); 1695 return ret; 1696 } 1697 1698 return 0; 1699 } 1700 1701 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master) 1702 { 1703 clk_disable_unprepare(master->pclk); 1704 clk_disable_unprepare(master->fclk); 1705 clk_disable_unprepare(master->sclk); 1706 } 1707 1708 static int svc_i3c_master_probe(struct platform_device *pdev) 1709 { 1710 struct device *dev = &pdev->dev; 1711 struct svc_i3c_master *master; 1712 int ret; 1713 1714 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); 1715 if (!master) 1716 return -ENOMEM; 1717 1718 master->regs = devm_platform_ioremap_resource(pdev, 0); 1719 if (IS_ERR(master->regs)) 1720 return PTR_ERR(master->regs); 1721 1722 master->pclk = devm_clk_get(dev, "pclk"); 1723 if (IS_ERR(master->pclk)) 1724 return PTR_ERR(master->pclk); 1725 1726 master->fclk = devm_clk_get(dev, "fast_clk"); 1727 if (IS_ERR(master->fclk)) 1728 return PTR_ERR(master->fclk); 1729 1730 master->sclk = devm_clk_get(dev, "slow_clk"); 1731 if (IS_ERR(master->sclk)) 1732 return PTR_ERR(master->sclk); 1733 1734 master->irq = platform_get_irq(pdev, 0); 1735 if (master->irq < 0) 1736 return master->irq; 1737 1738 master->dev = dev; 1739 1740 ret = svc_i3c_master_prepare_clks(master); 1741 if (ret) 1742 return ret; 1743 1744 INIT_WORK(&master->hj_work, svc_i3c_master_hj_work); 1745 INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work); 1746 mutex_init(&master->lock); 1747 1748 ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler, 1749 IRQF_NO_SUSPEND, "svc-i3c-irq", master); 1750 if (ret) 1751 goto err_disable_clks; 1752 1753 master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0); 1754 1755 spin_lock_init(&master->xferqueue.lock); 1756 INIT_LIST_HEAD(&master->xferqueue.list); 1757 1758 spin_lock_init(&master->ibi.lock); 1759 master->ibi.num_slots = SVC_I3C_MAX_DEVS; 1760 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots, 1761 sizeof(*master->ibi.slots), 1762 GFP_KERNEL); 1763 if (!master->ibi.slots) { 1764 ret = -ENOMEM; 1765 goto err_disable_clks; 1766 } 1767 1768 platform_set_drvdata(pdev, master); 1769 1770 pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS); 1771 pm_runtime_use_autosuspend(&pdev->dev); 1772 pm_runtime_get_noresume(&pdev->dev); 1773 pm_runtime_set_active(&pdev->dev); 1774 pm_runtime_enable(&pdev->dev); 1775 1776 svc_i3c_master_reset(master); 1777 1778 /* Register the master */ 1779 ret = i3c_master_register(&master->base, &pdev->dev, 1780 &svc_i3c_master_ops, false); 1781 if (ret) 1782 goto rpm_disable; 1783 1784 pm_runtime_mark_last_busy(&pdev->dev); 1785 pm_runtime_put_autosuspend(&pdev->dev); 1786 1787 return 0; 1788 1789 rpm_disable: 1790 pm_runtime_dont_use_autosuspend(&pdev->dev); 1791 pm_runtime_put_noidle(&pdev->dev); 1792 pm_runtime_disable(&pdev->dev); 1793 pm_runtime_set_suspended(&pdev->dev); 1794 1795 err_disable_clks: 1796 svc_i3c_master_unprepare_clks(master); 1797 1798 return ret; 1799 } 1800 1801 static void svc_i3c_master_remove(struct platform_device *pdev) 1802 { 1803 struct svc_i3c_master *master = platform_get_drvdata(pdev); 1804 1805 cancel_work_sync(&master->hj_work); 1806 i3c_master_unregister(&master->base); 1807 1808 pm_runtime_dont_use_autosuspend(&pdev->dev); 1809 pm_runtime_disable(&pdev->dev); 1810 } 1811 1812 static void svc_i3c_save_regs(struct svc_i3c_master *master) 1813 { 1814 master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG); 1815 master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR); 1816 } 1817 1818 static void svc_i3c_restore_regs(struct svc_i3c_master *master) 1819 { 1820 if (readl(master->regs + SVC_I3C_MDYNADDR) != 1821 master->saved_regs.mdynaddr) { 1822 writel(master->saved_regs.mconfig, 1823 master->regs + SVC_I3C_MCONFIG); 1824 writel(master->saved_regs.mdynaddr, 1825 master->regs + SVC_I3C_MDYNADDR); 1826 } 1827 } 1828 1829 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev) 1830 { 1831 struct svc_i3c_master *master = dev_get_drvdata(dev); 1832 1833 svc_i3c_save_regs(master); 1834 svc_i3c_master_unprepare_clks(master); 1835 pinctrl_pm_select_sleep_state(dev); 1836 1837 return 0; 1838 } 1839 1840 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev) 1841 { 1842 struct svc_i3c_master *master = dev_get_drvdata(dev); 1843 1844 pinctrl_pm_select_default_state(dev); 1845 svc_i3c_master_prepare_clks(master); 1846 1847 svc_i3c_restore_regs(master); 1848 1849 return 0; 1850 } 1851 1852 static const struct dev_pm_ops svc_i3c_pm_ops = { 1853 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1854 pm_runtime_force_resume) 1855 SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend, 1856 svc_i3c_runtime_resume, NULL) 1857 }; 1858 1859 static const struct of_device_id svc_i3c_master_of_match_tbl[] = { 1860 { .compatible = "silvaco,i3c-master" }, 1861 { /* sentinel */ }, 1862 }; 1863 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl); 1864 1865 static struct platform_driver svc_i3c_master = { 1866 .probe = svc_i3c_master_probe, 1867 .remove_new = svc_i3c_master_remove, 1868 .driver = { 1869 .name = "silvaco-i3c-master", 1870 .of_match_table = svc_i3c_master_of_match_tbl, 1871 .pm = &svc_i3c_pm_ops, 1872 }, 1873 }; 1874 module_platform_driver(svc_i3c_master); 1875 1876 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>"); 1877 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>"); 1878 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver"); 1879 MODULE_LICENSE("GPL v2"); 1880