1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Silvaco dual-role I3C master driver 4 * 5 * Copyright (C) 2020 Silvaco 6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com> 7 * Based on a work from: Conor Culhane <conor.culhane@silvaco.com> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/clk.h> 12 #include <linux/completion.h> 13 #include <linux/errno.h> 14 #include <linux/i3c/master.h> 15 #include <linux/interrupt.h> 16 #include <linux/iopoll.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/platform_device.h> 21 22 /* Master Mode Registers */ 23 #define SVC_I3C_MCONFIG 0x000 24 #define SVC_I3C_MCONFIG_MASTER_EN BIT(0) 25 #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x)) 26 #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x)) 27 #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x)) 28 #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x)) 29 #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x)) 30 #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x)) 31 #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x)) 32 #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x)) 33 #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x)) 34 35 #define SVC_I3C_MCTRL 0x084 36 #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0) 37 #define SVC_I3C_MCTRL_REQUEST_NONE 0 38 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1 39 #define SVC_I3C_MCTRL_REQUEST_STOP 2 40 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3 41 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4 42 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7 43 #define SVC_I3C_MCTRL_TYPE_I3C 0 44 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4) 45 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0 46 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0 47 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7) 48 #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6) 49 #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6) 50 #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x)) 51 #define SVC_I3C_MCTRL_DIR_WRITE 0 52 #define SVC_I3C_MCTRL_DIR_READ 1 53 #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x)) 54 #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x)) 55 56 #define SVC_I3C_MSTATUS 0x088 57 #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x)) 58 #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5) 59 #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0) 60 #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x)) 61 #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x)) 62 #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x)) 63 #define SVC_I3C_MSTATUS_IBITYPE_IBI 1 64 #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2 65 #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3 66 #define SVC_I3C_MINT_SLVSTART BIT(8) 67 #define SVC_I3C_MINT_MCTRLDONE BIT(9) 68 #define SVC_I3C_MINT_COMPLETE BIT(10) 69 #define SVC_I3C_MINT_RXPEND BIT(11) 70 #define SVC_I3C_MINT_TXNOTFULL BIT(12) 71 #define SVC_I3C_MINT_IBIWON BIT(13) 72 #define SVC_I3C_MINT_ERRWARN BIT(15) 73 #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x)) 74 #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x)) 75 #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x)) 76 #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x)) 77 #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x)) 78 #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x)) 79 #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x)) 80 #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x)) 81 82 #define SVC_I3C_IBIRULES 0x08C 83 #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \ 84 ((addr) & 0x3F) << ((slot) * 6)) 85 #define SVC_I3C_IBIRULES_ADDRS 5 86 #define SVC_I3C_IBIRULES_MSB0 BIT(30) 87 #define SVC_I3C_IBIRULES_NOBYTE BIT(31) 88 #define SVC_I3C_IBIRULES_MANDBYTE 0 89 #define SVC_I3C_MINTSET 0x090 90 #define SVC_I3C_MINTCLR 0x094 91 #define SVC_I3C_MINTMASKED 0x098 92 #define SVC_I3C_MERRWARN 0x09C 93 #define SVC_I3C_MDMACTRL 0x0A0 94 #define SVC_I3C_MDATACTRL 0x0AC 95 #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0) 96 #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1) 97 #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3) 98 #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4) 99 #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0 100 #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x)) 101 #define SVC_I3C_MDATACTRL_TXFULL BIT(30) 102 #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31) 103 104 #define SVC_I3C_MWDATAB 0x0B0 105 #define SVC_I3C_MWDATAB_END BIT(8) 106 107 #define SVC_I3C_MWDATABE 0x0B4 108 #define SVC_I3C_MWDATAH 0x0B8 109 #define SVC_I3C_MWDATAHE 0x0BC 110 #define SVC_I3C_MRDATAB 0x0C0 111 #define SVC_I3C_MRDATAH 0x0C8 112 #define SVC_I3C_MWMSG_SDR 0x0D0 113 #define SVC_I3C_MRMSG_SDR 0x0D4 114 #define SVC_I3C_MWMSG_DDR 0x0D8 115 #define SVC_I3C_MRMSG_DDR 0x0DC 116 117 #define SVC_I3C_MDYNADDR 0x0E4 118 #define SVC_MDYNADDR_VALID BIT(0) 119 #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x)) 120 121 #define SVC_I3C_MAX_DEVS 32 122 123 /* This parameter depends on the implementation and may be tuned */ 124 #define SVC_I3C_FIFO_SIZE 16 125 126 struct svc_i3c_cmd { 127 u8 addr; 128 bool rnw; 129 u8 *in; 130 const void *out; 131 unsigned int len; 132 unsigned int read_len; 133 bool continued; 134 }; 135 136 struct svc_i3c_xfer { 137 struct list_head node; 138 struct completion comp; 139 int ret; 140 unsigned int type; 141 unsigned int ncmds; 142 struct svc_i3c_cmd cmds[]; 143 }; 144 145 /** 146 * struct svc_i3c_master - Silvaco I3C Master structure 147 * @base: I3C master controller 148 * @dev: Corresponding device 149 * @regs: Memory mapping 150 * @free_slots: Bit array of available slots 151 * @addrs: Array containing the dynamic addresses of each attached device 152 * @descs: Array of descriptors, one per attached device 153 * @hj_work: Hot-join work 154 * @ibi_work: IBI work 155 * @irq: Main interrupt 156 * @pclk: System clock 157 * @fclk: Fast clock (bus) 158 * @sclk: Slow clock (other events) 159 * @xferqueue: Transfer queue structure 160 * @xferqueue.list: List member 161 * @xferqueue.cur: Current ongoing transfer 162 * @xferqueue.lock: Queue lock 163 * @ibi: IBI structure 164 * @ibi.num_slots: Number of slots available in @ibi.slots 165 * @ibi.slots: Available IBI slots 166 * @ibi.tbq_slot: To be queued IBI slot 167 * @ibi.lock: IBI lock 168 */ 169 struct svc_i3c_master { 170 struct i3c_master_controller base; 171 struct device *dev; 172 void __iomem *regs; 173 u32 free_slots; 174 u8 addrs[SVC_I3C_MAX_DEVS]; 175 struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS]; 176 struct work_struct hj_work; 177 struct work_struct ibi_work; 178 int irq; 179 struct clk *pclk; 180 struct clk *fclk; 181 struct clk *sclk; 182 struct { 183 struct list_head list; 184 struct svc_i3c_xfer *cur; 185 /* Prevent races between transfers */ 186 spinlock_t lock; 187 } xferqueue; 188 struct { 189 unsigned int num_slots; 190 struct i3c_dev_desc **slots; 191 struct i3c_ibi_slot *tbq_slot; 192 /* Prevent races within IBI handlers */ 193 spinlock_t lock; 194 } ibi; 195 }; 196 197 /** 198 * struct svc_i3c_i3c_dev_data - Device specific data 199 * @index: Index in the master tables corresponding to this device 200 * @ibi: IBI slot index in the master structure 201 * @ibi_pool: IBI pool associated to this device 202 */ 203 struct svc_i3c_i2c_dev_data { 204 u8 index; 205 int ibi; 206 struct i3c_generic_ibi_pool *ibi_pool; 207 }; 208 209 static bool svc_i3c_master_error(struct svc_i3c_master *master) 210 { 211 u32 mstatus, merrwarn; 212 213 mstatus = readl(master->regs + SVC_I3C_MSTATUS); 214 if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) { 215 merrwarn = readl(master->regs + SVC_I3C_MERRWARN); 216 writel(merrwarn, master->regs + SVC_I3C_MERRWARN); 217 dev_err(master->dev, 218 "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n", 219 mstatus, merrwarn); 220 221 return true; 222 } 223 224 return false; 225 } 226 227 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask) 228 { 229 writel(mask, master->regs + SVC_I3C_MINTSET); 230 } 231 232 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master) 233 { 234 u32 mask = readl(master->regs + SVC_I3C_MINTSET); 235 236 writel(mask, master->regs + SVC_I3C_MINTCLR); 237 } 238 239 static inline struct svc_i3c_master * 240 to_svc_i3c_master(struct i3c_master_controller *master) 241 { 242 return container_of(master, struct svc_i3c_master, base); 243 } 244 245 static void svc_i3c_master_hj_work(struct work_struct *work) 246 { 247 struct svc_i3c_master *master; 248 249 master = container_of(work, struct svc_i3c_master, hj_work); 250 i3c_master_do_daa(&master->base); 251 } 252 253 static struct i3c_dev_desc * 254 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master, 255 unsigned int ibiaddr) 256 { 257 int i; 258 259 for (i = 0; i < SVC_I3C_MAX_DEVS; i++) 260 if (master->addrs[i] == ibiaddr) 261 break; 262 263 if (i == SVC_I3C_MAX_DEVS) 264 return NULL; 265 266 return master->descs[i]; 267 } 268 269 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master) 270 { 271 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL); 272 273 /* 274 * This delay is necessary after the emission of a stop, otherwise eg. 275 * repeating IBIs do not get detected. There is a note in the manual 276 * about it, stating that the stop condition might not be settled 277 * correctly if a start condition follows too rapidly. 278 */ 279 udelay(1); 280 } 281 282 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master) 283 { 284 writel(readl(master->regs + SVC_I3C_MERRWARN), 285 master->regs + SVC_I3C_MERRWARN); 286 } 287 288 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master, 289 struct i3c_dev_desc *dev) 290 { 291 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 292 struct i3c_ibi_slot *slot; 293 unsigned int count; 294 u32 mdatactrl; 295 u8 *buf; 296 297 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 298 if (!slot) 299 return -ENOSPC; 300 301 slot->len = 0; 302 buf = slot->data; 303 304 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) && 305 slot->len < SVC_I3C_FIFO_SIZE) { 306 mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL); 307 count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl); 308 readsl(master->regs + SVC_I3C_MRDATAB, buf, count); 309 slot->len += count; 310 buf += count; 311 } 312 313 master->ibi.tbq_slot = slot; 314 315 return 0; 316 } 317 318 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master, 319 bool mandatory_byte) 320 { 321 unsigned int ibi_ack_nack; 322 323 ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK; 324 if (mandatory_byte) 325 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE; 326 else 327 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE; 328 329 writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL); 330 } 331 332 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master) 333 { 334 writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK | 335 SVC_I3C_MCTRL_IBIRESP_NACK, 336 master->regs + SVC_I3C_MCTRL); 337 } 338 339 static void svc_i3c_master_ibi_work(struct work_struct *work) 340 { 341 struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work); 342 struct svc_i3c_i2c_dev_data *data; 343 unsigned int ibitype, ibiaddr; 344 struct i3c_dev_desc *dev; 345 u32 status, val; 346 int ret; 347 348 /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */ 349 writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI | 350 SVC_I3C_MCTRL_IBIRESP_AUTO, 351 master->regs + SVC_I3C_MCTRL); 352 353 /* Wait for IBIWON, should take approximately 100us */ 354 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val, 355 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000); 356 if (ret) { 357 dev_err(master->dev, "Timeout when polling for IBIWON\n"); 358 goto reenable_ibis; 359 } 360 361 /* Clear the interrupt status */ 362 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); 363 364 status = readl(master->regs + SVC_I3C_MSTATUS); 365 ibitype = SVC_I3C_MSTATUS_IBITYPE(status); 366 ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status); 367 368 /* Handle the critical responses to IBI's */ 369 switch (ibitype) { 370 case SVC_I3C_MSTATUS_IBITYPE_IBI: 371 dev = svc_i3c_master_dev_from_addr(master, ibiaddr); 372 if (!dev) 373 svc_i3c_master_nack_ibi(master); 374 else 375 svc_i3c_master_handle_ibi(master, dev); 376 break; 377 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: 378 svc_i3c_master_ack_ibi(master, false); 379 break; 380 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST: 381 svc_i3c_master_nack_ibi(master); 382 break; 383 default: 384 break; 385 } 386 387 /* 388 * If an error happened, we probably got interrupted and the exchange 389 * timedout. In this case we just drop everything, emit a stop and wait 390 * for the slave to interrupt again. 391 */ 392 if (svc_i3c_master_error(master)) { 393 if (master->ibi.tbq_slot) { 394 data = i3c_dev_get_master_data(dev); 395 i3c_generic_ibi_recycle_slot(data->ibi_pool, 396 master->ibi.tbq_slot); 397 master->ibi.tbq_slot = NULL; 398 } 399 400 svc_i3c_master_emit_stop(master); 401 402 goto reenable_ibis; 403 } 404 405 /* Handle the non critical tasks */ 406 switch (ibitype) { 407 case SVC_I3C_MSTATUS_IBITYPE_IBI: 408 if (dev) { 409 i3c_master_queue_ibi(dev, master->ibi.tbq_slot); 410 master->ibi.tbq_slot = NULL; 411 } 412 svc_i3c_master_emit_stop(master); 413 break; 414 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: 415 queue_work(master->base.wq, &master->hj_work); 416 break; 417 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST: 418 default: 419 break; 420 } 421 422 reenable_ibis: 423 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); 424 } 425 426 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id) 427 { 428 struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id; 429 u32 active = readl(master->regs + SVC_I3C_MINTMASKED); 430 431 if (!SVC_I3C_MSTATUS_SLVSTART(active)) 432 return IRQ_NONE; 433 434 /* Clear the interrupt status */ 435 writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS); 436 437 svc_i3c_master_disable_interrupts(master); 438 439 /* Handle the interrupt in a non atomic context */ 440 queue_work(master->base.wq, &master->ibi_work); 441 442 return IRQ_HANDLED; 443 } 444 445 static int svc_i3c_master_bus_init(struct i3c_master_controller *m) 446 { 447 struct svc_i3c_master *master = to_svc_i3c_master(m); 448 struct i3c_bus *bus = i3c_master_get_bus(m); 449 struct i3c_device_info info = {}; 450 unsigned long fclk_rate, fclk_period_ns; 451 unsigned int high_period_ns, od_low_period_ns; 452 u32 ppbaud, pplow, odhpp, odbaud, i2cbaud, reg; 453 int ret; 454 455 /* Timings derivation */ 456 fclk_rate = clk_get_rate(master->fclk); 457 if (!fclk_rate) 458 return -EINVAL; 459 460 fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate); 461 462 /* 463 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period. 464 * Simplest configuration is using a 50% duty-cycle of 40ns. 465 */ 466 ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1; 467 pplow = 0; 468 469 /* 470 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a 471 * duty-cycle tuned so that high levels are filetered out by 472 * the 50ns filter (target being 40ns). 473 */ 474 odhpp = 1; 475 high_period_ns = (ppbaud + 1) * fclk_period_ns; 476 odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1; 477 od_low_period_ns = (odbaud + 1) * high_period_ns; 478 479 switch (bus->mode) { 480 case I3C_BUS_MODE_PURE: 481 i2cbaud = 0; 482 break; 483 case I3C_BUS_MODE_MIXED_FAST: 484 case I3C_BUS_MODE_MIXED_LIMITED: 485 /* 486 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference 487 * between the high and low period does not really matter. 488 */ 489 i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2; 490 break; 491 case I3C_BUS_MODE_MIXED_SLOW: 492 /* 493 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same 494 * constraints as the FM+ mode. 495 */ 496 i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2; 497 break; 498 default: 499 return -EINVAL; 500 } 501 502 reg = SVC_I3C_MCONFIG_MASTER_EN | 503 SVC_I3C_MCONFIG_DISTO(0) | 504 SVC_I3C_MCONFIG_HKEEP(0) | 505 SVC_I3C_MCONFIG_ODSTOP(0) | 506 SVC_I3C_MCONFIG_PPBAUD(ppbaud) | 507 SVC_I3C_MCONFIG_PPLOW(pplow) | 508 SVC_I3C_MCONFIG_ODBAUD(odbaud) | 509 SVC_I3C_MCONFIG_ODHPP(odhpp) | 510 SVC_I3C_MCONFIG_SKEW(0) | 511 SVC_I3C_MCONFIG_I2CBAUD(i2cbaud); 512 writel(reg, master->regs + SVC_I3C_MCONFIG); 513 514 /* Master core's registration */ 515 ret = i3c_master_get_free_addr(m, 0); 516 if (ret < 0) 517 return ret; 518 519 info.dyn_addr = ret; 520 521 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr), 522 master->regs + SVC_I3C_MDYNADDR); 523 524 ret = i3c_master_set_info(&master->base, &info); 525 if (ret) 526 return ret; 527 528 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); 529 530 return 0; 531 } 532 533 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m) 534 { 535 struct svc_i3c_master *master = to_svc_i3c_master(m); 536 537 svc_i3c_master_disable_interrupts(master); 538 539 /* Disable master */ 540 writel(0, master->regs + SVC_I3C_MCONFIG); 541 } 542 543 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master) 544 { 545 unsigned int slot; 546 547 if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0))) 548 return -ENOSPC; 549 550 slot = ffs(master->free_slots) - 1; 551 552 master->free_slots &= ~BIT(slot); 553 554 return slot; 555 } 556 557 static void svc_i3c_master_release_slot(struct svc_i3c_master *master, 558 unsigned int slot) 559 { 560 master->free_slots |= BIT(slot); 561 } 562 563 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) 564 { 565 struct i3c_master_controller *m = i3c_dev_get_master(dev); 566 struct svc_i3c_master *master = to_svc_i3c_master(m); 567 struct svc_i3c_i2c_dev_data *data; 568 int slot; 569 570 slot = svc_i3c_master_reserve_slot(master); 571 if (slot < 0) 572 return slot; 573 574 data = kzalloc(sizeof(*data), GFP_KERNEL); 575 if (!data) { 576 svc_i3c_master_release_slot(master, slot); 577 return -ENOMEM; 578 } 579 580 data->ibi = -1; 581 data->index = slot; 582 master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr : 583 dev->info.static_addr; 584 master->descs[slot] = dev; 585 586 i3c_dev_set_master_data(dev, data); 587 588 return 0; 589 } 590 591 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, 592 u8 old_dyn_addr) 593 { 594 struct i3c_master_controller *m = i3c_dev_get_master(dev); 595 struct svc_i3c_master *master = to_svc_i3c_master(m); 596 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 597 598 master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr : 599 dev->info.static_addr; 600 601 return 0; 602 } 603 604 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev) 605 { 606 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 607 struct i3c_master_controller *m = i3c_dev_get_master(dev); 608 struct svc_i3c_master *master = to_svc_i3c_master(m); 609 610 master->addrs[data->index] = 0; 611 svc_i3c_master_release_slot(master, data->index); 612 613 kfree(data); 614 } 615 616 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev) 617 { 618 struct i3c_master_controller *m = i2c_dev_get_master(dev); 619 struct svc_i3c_master *master = to_svc_i3c_master(m); 620 struct svc_i3c_i2c_dev_data *data; 621 int slot; 622 623 slot = svc_i3c_master_reserve_slot(master); 624 if (slot < 0) 625 return slot; 626 627 data = kzalloc(sizeof(*data), GFP_KERNEL); 628 if (!data) { 629 svc_i3c_master_release_slot(master, slot); 630 return -ENOMEM; 631 } 632 633 data->index = slot; 634 master->addrs[slot] = dev->addr; 635 636 i2c_dev_set_master_data(dev, data); 637 638 return 0; 639 } 640 641 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev) 642 { 643 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 644 struct i3c_master_controller *m = i2c_dev_get_master(dev); 645 struct svc_i3c_master *master = to_svc_i3c_master(m); 646 647 svc_i3c_master_release_slot(master, data->index); 648 649 kfree(data); 650 } 651 652 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst, 653 unsigned int len) 654 { 655 int ret, i; 656 u32 reg; 657 658 for (i = 0; i < len; i++) { 659 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, 660 SVC_I3C_MSTATUS_RXPEND(reg), 0, 1000); 661 if (ret) 662 return ret; 663 664 dst[i] = readl(master->regs + SVC_I3C_MRDATAB); 665 } 666 667 return 0; 668 } 669 670 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master, 671 u8 *addrs, unsigned int *count) 672 { 673 u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0; 674 unsigned int dev_nb = 0, last_addr = 0; 675 u32 reg; 676 int ret, i; 677 678 while (true) { 679 /* Enter/proceed with DAA */ 680 writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA | 681 SVC_I3C_MCTRL_TYPE_I3C | 682 SVC_I3C_MCTRL_IBIRESP_NACK | 683 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE), 684 master->regs + SVC_I3C_MCTRL); 685 686 /* 687 * Either one slave will send its ID, or the assignment process 688 * is done. 689 */ 690 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, 691 SVC_I3C_MSTATUS_RXPEND(reg) | 692 SVC_I3C_MSTATUS_MCTRLDONE(reg), 693 1, 1000); 694 if (ret) 695 return ret; 696 697 if (SVC_I3C_MSTATUS_RXPEND(reg)) { 698 u8 data[6]; 699 700 /* 701 * We only care about the 48-bit provisional ID yet to 702 * be sure a device does not nack an address twice. 703 * Otherwise, we would just need to flush the RX FIFO. 704 */ 705 ret = svc_i3c_master_readb(master, data, 6); 706 if (ret) 707 return ret; 708 709 for (i = 0; i < 6; i++) 710 prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i)); 711 712 /* We do not care about the BCR and DCR yet */ 713 ret = svc_i3c_master_readb(master, data, 2); 714 if (ret) 715 return ret; 716 } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) { 717 if (SVC_I3C_MSTATUS_STATE_IDLE(reg) && 718 SVC_I3C_MSTATUS_COMPLETE(reg)) { 719 /* 720 * All devices received and acked they dynamic 721 * address, this is the natural end of the DAA 722 * procedure. 723 */ 724 break; 725 } else if (SVC_I3C_MSTATUS_NACKED(reg)) { 726 /* 727 * A slave device nacked the address, this is 728 * allowed only once, DAA will be stopped and 729 * then resumed. The same device is supposed to 730 * answer again immediately and shall ack the 731 * address this time. 732 */ 733 if (prov_id[dev_nb] == nacking_prov_id) 734 return -EIO; 735 736 dev_nb--; 737 nacking_prov_id = prov_id[dev_nb]; 738 svc_i3c_master_emit_stop(master); 739 740 continue; 741 } else { 742 return -EIO; 743 } 744 } 745 746 /* Wait for the slave to be ready to receive its address */ 747 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, 748 SVC_I3C_MSTATUS_MCTRLDONE(reg) && 749 SVC_I3C_MSTATUS_STATE_DAA(reg) && 750 SVC_I3C_MSTATUS_BETWEEN(reg), 751 0, 1000); 752 if (ret) 753 return ret; 754 755 /* Give the slave device a suitable dynamic address */ 756 ret = i3c_master_get_free_addr(&master->base, last_addr + 1); 757 if (ret < 0) 758 return ret; 759 760 addrs[dev_nb] = ret; 761 dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n", 762 dev_nb, addrs[dev_nb]); 763 764 writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB); 765 last_addr = addrs[dev_nb++]; 766 } 767 768 *count = dev_nb; 769 770 return 0; 771 } 772 773 static int svc_i3c_update_ibirules(struct svc_i3c_master *master) 774 { 775 struct i3c_dev_desc *dev; 776 u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE; 777 unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0, 778 nobyte_addr_ko = 0; 779 bool list_mbyte = false, list_nobyte = false; 780 781 /* Create the IBIRULES register for both cases */ 782 i3c_bus_for_each_i3cdev(&master->base.bus, dev) { 783 if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER) 784 continue; 785 786 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) { 787 reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok, 788 dev->info.dyn_addr); 789 790 /* IBI rules cannot be applied to devices with MSb=1 */ 791 if (dev->info.dyn_addr & BIT(7)) 792 mbyte_addr_ko++; 793 else 794 mbyte_addr_ok++; 795 } else { 796 reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok, 797 dev->info.dyn_addr); 798 799 /* IBI rules cannot be applied to devices with MSb=1 */ 800 if (dev->info.dyn_addr & BIT(7)) 801 nobyte_addr_ko++; 802 else 803 nobyte_addr_ok++; 804 } 805 } 806 807 /* Device list cannot be handled by hardware */ 808 if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS) 809 list_mbyte = true; 810 811 if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS) 812 list_nobyte = true; 813 814 /* No list can be properly handled, return an error */ 815 if (!list_mbyte && !list_nobyte) 816 return -ERANGE; 817 818 /* Pick the first list that can be handled by hardware, randomly */ 819 if (list_mbyte) 820 writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES); 821 else 822 writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES); 823 824 return 0; 825 } 826 827 static int svc_i3c_master_do_daa(struct i3c_master_controller *m) 828 { 829 struct svc_i3c_master *master = to_svc_i3c_master(m); 830 u8 addrs[SVC_I3C_MAX_DEVS]; 831 unsigned long flags; 832 unsigned int dev_nb; 833 int ret, i; 834 835 spin_lock_irqsave(&master->xferqueue.lock, flags); 836 ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb); 837 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 838 if (ret) 839 goto emit_stop; 840 841 /* Register all devices who participated to the core */ 842 for (i = 0; i < dev_nb; i++) { 843 ret = i3c_master_add_i3c_dev_locked(m, addrs[i]); 844 if (ret) 845 return ret; 846 } 847 848 /* Configure IBI auto-rules */ 849 ret = svc_i3c_update_ibirules(master); 850 if (ret) { 851 dev_err(master->dev, "Cannot handle such a list of devices"); 852 return ret; 853 } 854 855 return 0; 856 857 emit_stop: 858 svc_i3c_master_emit_stop(master); 859 svc_i3c_master_clear_merrwarn(master); 860 861 return ret; 862 } 863 864 static int svc_i3c_master_read(struct svc_i3c_master *master, 865 u8 *in, unsigned int len) 866 { 867 int offset = 0, i, ret; 868 u32 mdctrl; 869 870 while (offset < len) { 871 unsigned int count; 872 873 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL, 874 mdctrl, 875 !(mdctrl & SVC_I3C_MDATACTRL_RXEMPTY), 876 0, 1000); 877 if (ret) 878 return ret; 879 880 count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl); 881 for (i = 0; i < count; i++) 882 in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB); 883 884 offset += count; 885 } 886 887 return 0; 888 } 889 890 static int svc_i3c_master_write(struct svc_i3c_master *master, 891 const u8 *out, unsigned int len) 892 { 893 int offset = 0, ret; 894 u32 mdctrl; 895 896 while (offset < len) { 897 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL, 898 mdctrl, 899 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL), 900 0, 1000); 901 if (ret) 902 return ret; 903 904 /* 905 * The last byte to be sent over the bus must either have the 906 * "end" bit set or be written in MWDATABE. 907 */ 908 if (likely(offset < (len - 1))) 909 writel(out[offset++], master->regs + SVC_I3C_MWDATAB); 910 else 911 writel(out[offset++], master->regs + SVC_I3C_MWDATABE); 912 } 913 914 return 0; 915 } 916 917 static int svc_i3c_master_xfer(struct svc_i3c_master *master, 918 bool rnw, unsigned int xfer_type, u8 addr, 919 u8 *in, const u8 *out, unsigned int xfer_len, 920 unsigned int read_len, bool continued) 921 { 922 u32 reg; 923 int ret; 924 925 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | 926 xfer_type | 927 SVC_I3C_MCTRL_IBIRESP_NACK | 928 SVC_I3C_MCTRL_DIR(rnw) | 929 SVC_I3C_MCTRL_ADDR(addr) | 930 SVC_I3C_MCTRL_RDTERM(read_len), 931 master->regs + SVC_I3C_MCTRL); 932 933 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, 934 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000); 935 if (ret) 936 goto emit_stop; 937 938 if (rnw) 939 ret = svc_i3c_master_read(master, in, xfer_len); 940 else 941 ret = svc_i3c_master_write(master, out, xfer_len); 942 if (ret) 943 goto emit_stop; 944 945 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, 946 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000); 947 if (ret) 948 goto emit_stop; 949 950 if (!continued) 951 svc_i3c_master_emit_stop(master); 952 953 return 0; 954 955 emit_stop: 956 svc_i3c_master_emit_stop(master); 957 svc_i3c_master_clear_merrwarn(master); 958 959 return ret; 960 } 961 962 static struct svc_i3c_xfer * 963 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds) 964 { 965 struct svc_i3c_xfer *xfer; 966 967 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL); 968 if (!xfer) 969 return NULL; 970 971 INIT_LIST_HEAD(&xfer->node); 972 xfer->ncmds = ncmds; 973 xfer->ret = -ETIMEDOUT; 974 975 return xfer; 976 } 977 978 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer) 979 { 980 kfree(xfer); 981 } 982 983 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master, 984 struct svc_i3c_xfer *xfer) 985 { 986 if (master->xferqueue.cur == xfer) 987 master->xferqueue.cur = NULL; 988 else 989 list_del_init(&xfer->node); 990 } 991 992 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master, 993 struct svc_i3c_xfer *xfer) 994 { 995 unsigned long flags; 996 997 spin_lock_irqsave(&master->xferqueue.lock, flags); 998 svc_i3c_master_dequeue_xfer_locked(master, xfer); 999 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 1000 } 1001 1002 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master) 1003 { 1004 struct svc_i3c_xfer *xfer = master->xferqueue.cur; 1005 int ret, i; 1006 1007 if (!xfer) 1008 return; 1009 1010 for (i = 0; i < xfer->ncmds; i++) { 1011 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1012 1013 ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type, 1014 cmd->addr, cmd->in, cmd->out, 1015 cmd->len, cmd->read_len, 1016 cmd->continued); 1017 if (ret) 1018 break; 1019 } 1020 1021 xfer->ret = ret; 1022 complete(&xfer->comp); 1023 1024 if (ret < 0) 1025 svc_i3c_master_dequeue_xfer_locked(master, xfer); 1026 1027 xfer = list_first_entry_or_null(&master->xferqueue.list, 1028 struct svc_i3c_xfer, 1029 node); 1030 if (xfer) 1031 list_del_init(&xfer->node); 1032 1033 master->xferqueue.cur = xfer; 1034 svc_i3c_master_start_xfer_locked(master); 1035 } 1036 1037 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master, 1038 struct svc_i3c_xfer *xfer) 1039 { 1040 unsigned long flags; 1041 1042 init_completion(&xfer->comp); 1043 spin_lock_irqsave(&master->xferqueue.lock, flags); 1044 if (master->xferqueue.cur) { 1045 list_add_tail(&xfer->node, &master->xferqueue.list); 1046 } else { 1047 master->xferqueue.cur = xfer; 1048 svc_i3c_master_start_xfer_locked(master); 1049 } 1050 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 1051 } 1052 1053 static bool 1054 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master, 1055 const struct i3c_ccc_cmd *cmd) 1056 { 1057 /* No software support for CCC commands targeting more than one slave */ 1058 return (cmd->ndests == 1); 1059 } 1060 1061 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master, 1062 struct i3c_ccc_cmd *ccc) 1063 { 1064 unsigned int xfer_len = ccc->dests[0].payload.len + 1; 1065 struct svc_i3c_xfer *xfer; 1066 struct svc_i3c_cmd *cmd; 1067 u8 *buf; 1068 int ret; 1069 1070 xfer = svc_i3c_master_alloc_xfer(master, 1); 1071 if (!xfer) 1072 return -ENOMEM; 1073 1074 buf = kmalloc(xfer_len, GFP_KERNEL); 1075 if (!buf) { 1076 svc_i3c_master_free_xfer(xfer); 1077 return -ENOMEM; 1078 } 1079 1080 buf[0] = ccc->id; 1081 memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len); 1082 1083 xfer->type = SVC_I3C_MCTRL_TYPE_I3C; 1084 1085 cmd = &xfer->cmds[0]; 1086 cmd->addr = ccc->dests[0].addr; 1087 cmd->rnw = ccc->rnw; 1088 cmd->in = NULL; 1089 cmd->out = buf; 1090 cmd->len = xfer_len; 1091 cmd->read_len = 0; 1092 cmd->continued = false; 1093 1094 svc_i3c_master_enqueue_xfer(master, xfer); 1095 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1096 svc_i3c_master_dequeue_xfer(master, xfer); 1097 1098 ret = xfer->ret; 1099 kfree(buf); 1100 svc_i3c_master_free_xfer(xfer); 1101 1102 return ret; 1103 } 1104 1105 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master, 1106 struct i3c_ccc_cmd *ccc) 1107 { 1108 unsigned int xfer_len = ccc->dests[0].payload.len; 1109 unsigned int read_len = ccc->rnw ? xfer_len : 0; 1110 struct svc_i3c_xfer *xfer; 1111 struct svc_i3c_cmd *cmd; 1112 int ret; 1113 1114 xfer = svc_i3c_master_alloc_xfer(master, 2); 1115 if (!xfer) 1116 return -ENOMEM; 1117 1118 xfer->type = SVC_I3C_MCTRL_TYPE_I3C; 1119 1120 /* Broadcasted message */ 1121 cmd = &xfer->cmds[0]; 1122 cmd->addr = I3C_BROADCAST_ADDR; 1123 cmd->rnw = 0; 1124 cmd->in = NULL; 1125 cmd->out = &ccc->id; 1126 cmd->len = 1; 1127 cmd->read_len = xfer_len; 1128 cmd->read_len = 0; 1129 cmd->continued = true; 1130 1131 /* Directed message */ 1132 cmd = &xfer->cmds[1]; 1133 cmd->addr = ccc->dests[0].addr; 1134 cmd->rnw = ccc->rnw; 1135 cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL; 1136 cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data, 1137 cmd->len = xfer_len; 1138 cmd->read_len = read_len; 1139 cmd->continued = false; 1140 1141 svc_i3c_master_enqueue_xfer(master, xfer); 1142 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1143 svc_i3c_master_dequeue_xfer(master, xfer); 1144 1145 ret = xfer->ret; 1146 svc_i3c_master_free_xfer(xfer); 1147 1148 return ret; 1149 } 1150 1151 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, 1152 struct i3c_ccc_cmd *cmd) 1153 { 1154 struct svc_i3c_master *master = to_svc_i3c_master(m); 1155 bool broadcast = cmd->id < 0x80; 1156 1157 if (broadcast) 1158 return svc_i3c_master_send_bdcast_ccc_cmd(master, cmd); 1159 else 1160 return svc_i3c_master_send_direct_ccc_cmd(master, cmd); 1161 } 1162 1163 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev, 1164 struct i3c_priv_xfer *xfers, 1165 int nxfers) 1166 { 1167 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1168 struct svc_i3c_master *master = to_svc_i3c_master(m); 1169 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1170 struct svc_i3c_xfer *xfer; 1171 int ret, i; 1172 1173 xfer = svc_i3c_master_alloc_xfer(master, nxfers); 1174 if (!xfer) 1175 return -ENOMEM; 1176 1177 xfer->type = SVC_I3C_MCTRL_TYPE_I3C; 1178 1179 for (i = 0; i < nxfers; i++) { 1180 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1181 1182 cmd->addr = master->addrs[data->index]; 1183 cmd->rnw = xfers[i].rnw; 1184 cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL; 1185 cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out; 1186 cmd->len = xfers[i].len; 1187 cmd->read_len = xfers[i].rnw ? xfers[i].len : 0; 1188 cmd->continued = (i + 1) < nxfers; 1189 } 1190 1191 svc_i3c_master_enqueue_xfer(master, xfer); 1192 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1193 svc_i3c_master_dequeue_xfer(master, xfer); 1194 1195 ret = xfer->ret; 1196 svc_i3c_master_free_xfer(xfer); 1197 1198 return ret; 1199 } 1200 1201 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev, 1202 const struct i2c_msg *xfers, 1203 int nxfers) 1204 { 1205 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1206 struct svc_i3c_master *master = to_svc_i3c_master(m); 1207 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 1208 struct svc_i3c_xfer *xfer; 1209 int ret, i; 1210 1211 xfer = svc_i3c_master_alloc_xfer(master, nxfers); 1212 if (!xfer) 1213 return -ENOMEM; 1214 1215 xfer->type = SVC_I3C_MCTRL_TYPE_I2C; 1216 1217 for (i = 0; i < nxfers; i++) { 1218 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1219 1220 cmd->addr = master->addrs[data->index]; 1221 cmd->rnw = xfers[i].flags & I2C_M_RD; 1222 cmd->in = cmd->rnw ? xfers[i].buf : NULL; 1223 cmd->out = cmd->rnw ? NULL : xfers[i].buf; 1224 cmd->len = xfers[i].len; 1225 cmd->read_len = cmd->rnw ? xfers[i].len : 0; 1226 cmd->continued = (i + 1 < nxfers); 1227 } 1228 1229 svc_i3c_master_enqueue_xfer(master, xfer); 1230 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1231 svc_i3c_master_dequeue_xfer(master, xfer); 1232 1233 ret = xfer->ret; 1234 svc_i3c_master_free_xfer(xfer); 1235 1236 return ret; 1237 } 1238 1239 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev, 1240 const struct i3c_ibi_setup *req) 1241 { 1242 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1243 struct svc_i3c_master *master = to_svc_i3c_master(m); 1244 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1245 unsigned long flags; 1246 unsigned int i; 1247 1248 if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) { 1249 dev_err(master->dev, "IBI max payload %d should be < %d\n", 1250 dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE); 1251 return -ERANGE; 1252 } 1253 1254 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req); 1255 if (IS_ERR(data->ibi_pool)) 1256 return PTR_ERR(data->ibi_pool); 1257 1258 spin_lock_irqsave(&master->ibi.lock, flags); 1259 for (i = 0; i < master->ibi.num_slots; i++) { 1260 if (!master->ibi.slots[i]) { 1261 data->ibi = i; 1262 master->ibi.slots[i] = dev; 1263 break; 1264 } 1265 } 1266 spin_unlock_irqrestore(&master->ibi.lock, flags); 1267 1268 if (i < master->ibi.num_slots) 1269 return 0; 1270 1271 i3c_generic_ibi_free_pool(data->ibi_pool); 1272 data->ibi_pool = NULL; 1273 1274 return -ENOSPC; 1275 } 1276 1277 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev) 1278 { 1279 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1280 struct svc_i3c_master *master = to_svc_i3c_master(m); 1281 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1282 unsigned long flags; 1283 1284 spin_lock_irqsave(&master->ibi.lock, flags); 1285 master->ibi.slots[data->ibi] = NULL; 1286 data->ibi = -1; 1287 spin_unlock_irqrestore(&master->ibi.lock, flags); 1288 1289 i3c_generic_ibi_free_pool(data->ibi_pool); 1290 } 1291 1292 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev) 1293 { 1294 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1295 1296 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1297 } 1298 1299 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev) 1300 { 1301 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1302 1303 return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1304 } 1305 1306 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev, 1307 struct i3c_ibi_slot *slot) 1308 { 1309 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1310 1311 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); 1312 } 1313 1314 static const struct i3c_master_controller_ops svc_i3c_master_ops = { 1315 .bus_init = svc_i3c_master_bus_init, 1316 .bus_cleanup = svc_i3c_master_bus_cleanup, 1317 .attach_i3c_dev = svc_i3c_master_attach_i3c_dev, 1318 .detach_i3c_dev = svc_i3c_master_detach_i3c_dev, 1319 .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev, 1320 .attach_i2c_dev = svc_i3c_master_attach_i2c_dev, 1321 .detach_i2c_dev = svc_i3c_master_detach_i2c_dev, 1322 .do_daa = svc_i3c_master_do_daa, 1323 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd, 1324 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd, 1325 .priv_xfers = svc_i3c_master_priv_xfers, 1326 .i2c_xfers = svc_i3c_master_i2c_xfers, 1327 .request_ibi = svc_i3c_master_request_ibi, 1328 .free_ibi = svc_i3c_master_free_ibi, 1329 .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot, 1330 .enable_ibi = svc_i3c_master_enable_ibi, 1331 .disable_ibi = svc_i3c_master_disable_ibi, 1332 }; 1333 1334 static void svc_i3c_master_reset(struct svc_i3c_master *master) 1335 { 1336 u32 reg; 1337 1338 /* Clear pending warnings */ 1339 writel(readl(master->regs + SVC_I3C_MERRWARN), 1340 master->regs + SVC_I3C_MERRWARN); 1341 1342 /* Set RX and TX tigger levels, flush FIFOs */ 1343 reg = SVC_I3C_MDATACTRL_FLUSHTB | 1344 SVC_I3C_MDATACTRL_FLUSHRB | 1345 SVC_I3C_MDATACTRL_UNLOCK_TRIG | 1346 SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL | 1347 SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY; 1348 writel(reg, master->regs + SVC_I3C_MDATACTRL); 1349 1350 svc_i3c_master_disable_interrupts(master); 1351 } 1352 1353 static int svc_i3c_master_probe(struct platform_device *pdev) 1354 { 1355 struct device *dev = &pdev->dev; 1356 struct svc_i3c_master *master; 1357 int ret; 1358 1359 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); 1360 if (!master) 1361 return -ENOMEM; 1362 1363 master->regs = devm_platform_ioremap_resource(pdev, 0); 1364 if (IS_ERR(master->regs)) 1365 return PTR_ERR(master->regs); 1366 1367 master->pclk = devm_clk_get(dev, "pclk"); 1368 if (IS_ERR(master->pclk)) 1369 return PTR_ERR(master->pclk); 1370 1371 master->fclk = devm_clk_get(dev, "fast_clk"); 1372 if (IS_ERR(master->fclk)) 1373 return PTR_ERR(master->fclk); 1374 1375 master->sclk = devm_clk_get(dev, "slow_clk"); 1376 if (IS_ERR(master->sclk)) 1377 return PTR_ERR(master->sclk); 1378 1379 master->irq = platform_get_irq(pdev, 0); 1380 if (master->irq <= 0) 1381 return -ENOENT; 1382 1383 master->dev = dev; 1384 1385 svc_i3c_master_reset(master); 1386 1387 ret = clk_prepare_enable(master->pclk); 1388 if (ret) 1389 return ret; 1390 1391 ret = clk_prepare_enable(master->fclk); 1392 if (ret) 1393 goto err_disable_pclk; 1394 1395 ret = clk_prepare_enable(master->sclk); 1396 if (ret) 1397 goto err_disable_fclk; 1398 1399 INIT_WORK(&master->hj_work, svc_i3c_master_hj_work); 1400 INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work); 1401 ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler, 1402 IRQF_NO_SUSPEND, "svc-i3c-irq", master); 1403 if (ret) 1404 goto err_disable_sclk; 1405 1406 master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0); 1407 1408 spin_lock_init(&master->xferqueue.lock); 1409 INIT_LIST_HEAD(&master->xferqueue.list); 1410 1411 spin_lock_init(&master->ibi.lock); 1412 master->ibi.num_slots = SVC_I3C_MAX_DEVS; 1413 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots, 1414 sizeof(*master->ibi.slots), 1415 GFP_KERNEL); 1416 if (!master->ibi.slots) { 1417 ret = -ENOMEM; 1418 goto err_disable_sclk; 1419 } 1420 1421 platform_set_drvdata(pdev, master); 1422 1423 /* Register the master */ 1424 ret = i3c_master_register(&master->base, &pdev->dev, 1425 &svc_i3c_master_ops, false); 1426 if (ret) 1427 goto err_disable_sclk; 1428 1429 return 0; 1430 1431 err_disable_sclk: 1432 clk_disable_unprepare(master->sclk); 1433 1434 err_disable_fclk: 1435 clk_disable_unprepare(master->fclk); 1436 1437 err_disable_pclk: 1438 clk_disable_unprepare(master->pclk); 1439 1440 return ret; 1441 } 1442 1443 static int svc_i3c_master_remove(struct platform_device *pdev) 1444 { 1445 struct svc_i3c_master *master = platform_get_drvdata(pdev); 1446 int ret; 1447 1448 ret = i3c_master_unregister(&master->base); 1449 if (ret) 1450 return ret; 1451 1452 free_irq(master->irq, master); 1453 clk_disable_unprepare(master->pclk); 1454 clk_disable_unprepare(master->fclk); 1455 clk_disable_unprepare(master->sclk); 1456 1457 return 0; 1458 } 1459 1460 static const struct of_device_id svc_i3c_master_of_match_tbl[] = { 1461 { .compatible = "silvaco,i3c-master" }, 1462 { /* sentinel */ }, 1463 }; 1464 1465 static struct platform_driver svc_i3c_master = { 1466 .probe = svc_i3c_master_probe, 1467 .remove = svc_i3c_master_remove, 1468 .driver = { 1469 .name = "silvaco-i3c-master", 1470 .of_match_table = svc_i3c_master_of_match_tbl, 1471 }, 1472 }; 1473 module_platform_driver(svc_i3c_master); 1474 1475 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>"); 1476 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>"); 1477 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver"); 1478 MODULE_LICENSE("GPL v2"); 1479