1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2018 Cadence Design Systems Inc. 4 * 5 * Author: Boris Brezillon <boris.brezillon@bootlin.com> 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/clk.h> 10 #include <linux/err.h> 11 #include <linux/errno.h> 12 #include <linux/i3c/master.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/iopoll.h> 16 #include <linux/ioport.h> 17 #include <linux/kernel.h> 18 #include <linux/list.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/platform_device.h> 22 #include <linux/slab.h> 23 #include <linux/spinlock.h> 24 #include <linux/workqueue.h> 25 26 #define DEV_ID 0x0 27 #define DEV_ID_I3C_MASTER 0x5034 28 29 #define CONF_STATUS0 0x4 30 #define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29)) 31 #define CONF_STATUS0_ECC_CHK BIT(28) 32 #define CONF_STATUS0_INTEG_CHK BIT(27) 33 #define CONF_STATUS0_CSR_DAP_CHK BIT(26) 34 #define CONF_STATUS0_TRANS_TOUT_CHK BIT(25) 35 #define CONF_STATUS0_PROT_FAULTS_CHK BIT(24) 36 #define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16) 37 #define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8) 38 #define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7)) 39 #define CONF_STATUS0_SUPPORTS_DDR BIT(5) 40 #define CONF_STATUS0_SEC_MASTER BIT(4) 41 #define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0)) 42 43 #define CONF_STATUS1 0x8 44 #define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1) 45 #define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26)) 46 #define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21)) 47 #define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16)) 48 #define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10)) 49 #define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5)) 50 #define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0))) 51 52 #define REV_ID 0xc 53 #define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20) 54 #define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8) 55 #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4) 56 #define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0)) 57 58 #define CTRL 0x10 59 #define CTRL_DEV_EN BIT(31) 60 #define CTRL_HALT_EN BIT(30) 61 #define CTRL_MCS BIT(29) 62 #define CTRL_MCS_EN BIT(28) 63 #define CTRL_HJ_DISEC BIT(8) 64 #define CTRL_MST_ACK BIT(7) 65 #define CTRL_HJ_ACK BIT(6) 66 #define CTRL_HJ_INIT BIT(5) 67 #define CTRL_MST_INIT BIT(4) 68 #define CTRL_AHDR_OPT BIT(3) 69 #define CTRL_PURE_BUS_MODE 0 70 #define CTRL_MIXED_FAST_BUS_MODE 2 71 #define CTRL_MIXED_SLOW_BUS_MODE 3 72 #define CTRL_BUS_MODE_MASK GENMASK(1, 0) 73 74 #define PRESCL_CTRL0 0x14 75 #define PRESCL_CTRL0_I2C(x) ((x) << 16) 76 #define PRESCL_CTRL0_I3C(x) (x) 77 #define PRESCL_CTRL0_MAX GENMASK(9, 0) 78 79 #define PRESCL_CTRL1 0x18 80 #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8) 81 #define PRESCL_CTRL1_PP_LOW(x) ((x) << 8) 82 #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0) 83 #define PRESCL_CTRL1_OD_LOW(x) (x) 84 85 #define MST_IER 0x20 86 #define MST_IDR 0x24 87 #define MST_IMR 0x28 88 #define MST_ICR 0x2c 89 #define MST_ISR 0x30 90 #define MST_INT_HALTED BIT(18) 91 #define MST_INT_MR_DONE BIT(17) 92 #define MST_INT_IMM_COMP BIT(16) 93 #define MST_INT_TX_THR BIT(15) 94 #define MST_INT_TX_OVF BIT(14) 95 #define MST_INT_IBID_THR BIT(12) 96 #define MST_INT_IBID_UNF BIT(11) 97 #define MST_INT_IBIR_THR BIT(10) 98 #define MST_INT_IBIR_UNF BIT(9) 99 #define MST_INT_IBIR_OVF BIT(8) 100 #define MST_INT_RX_THR BIT(7) 101 #define MST_INT_RX_UNF BIT(6) 102 #define MST_INT_CMDD_EMP BIT(5) 103 #define MST_INT_CMDD_THR BIT(4) 104 #define MST_INT_CMDD_OVF BIT(3) 105 #define MST_INT_CMDR_THR BIT(2) 106 #define MST_INT_CMDR_UNF BIT(1) 107 #define MST_INT_CMDR_OVF BIT(0) 108 109 #define MST_STATUS0 0x34 110 #define MST_STATUS0_IDLE BIT(18) 111 #define MST_STATUS0_HALTED BIT(17) 112 #define MST_STATUS0_MASTER_MODE BIT(16) 113 #define MST_STATUS0_TX_FULL BIT(13) 114 #define MST_STATUS0_IBID_FULL BIT(12) 115 #define MST_STATUS0_IBIR_FULL BIT(11) 116 #define MST_STATUS0_RX_FULL BIT(10) 117 #define MST_STATUS0_CMDD_FULL BIT(9) 118 #define MST_STATUS0_CMDR_FULL BIT(8) 119 #define MST_STATUS0_TX_EMP BIT(5) 120 #define MST_STATUS0_IBID_EMP BIT(4) 121 #define MST_STATUS0_IBIR_EMP BIT(3) 122 #define MST_STATUS0_RX_EMP BIT(2) 123 #define MST_STATUS0_CMDD_EMP BIT(1) 124 #define MST_STATUS0_CMDR_EMP BIT(0) 125 126 #define CMDR 0x38 127 #define CMDR_NO_ERROR 0 128 #define CMDR_DDR_PREAMBLE_ERROR 1 129 #define CMDR_DDR_PARITY_ERROR 2 130 #define CMDR_DDR_RX_FIFO_OVF 3 131 #define CMDR_DDR_TX_FIFO_UNF 4 132 #define CMDR_M0_ERROR 5 133 #define CMDR_M1_ERROR 6 134 #define CMDR_M2_ERROR 7 135 #define CMDR_MST_ABORT 8 136 #define CMDR_NACK_RESP 9 137 #define CMDR_INVALID_DA 10 138 #define CMDR_DDR_DROPPED 11 139 #define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24) 140 #define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8) 141 #define CMDR_CMDID_HJACK_DISEC 0xfe 142 #define CMDR_CMDID_HJACK_ENTDAA 0xff 143 #define CMDR_CMDID(x) ((x) & GENMASK(7, 0)) 144 145 #define IBIR 0x3c 146 #define IBIR_ACKED BIT(12) 147 #define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8) 148 #define IBIR_ERROR BIT(7) 149 #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2) 150 #define IBIR_TYPE_IBI 0 151 #define IBIR_TYPE_HJ 1 152 #define IBIR_TYPE_MR 2 153 #define IBIR_TYPE(x) ((x) & GENMASK(1, 0)) 154 155 #define SLV_IER 0x40 156 #define SLV_IDR 0x44 157 #define SLV_IMR 0x48 158 #define SLV_ICR 0x4c 159 #define SLV_ISR 0x50 160 #define SLV_INT_TM BIT(20) 161 #define SLV_INT_ERROR BIT(19) 162 #define SLV_INT_EVENT_UP BIT(18) 163 #define SLV_INT_HJ_DONE BIT(17) 164 #define SLV_INT_MR_DONE BIT(16) 165 #define SLV_INT_DA_UPD BIT(15) 166 #define SLV_INT_SDR_FAIL BIT(14) 167 #define SLV_INT_DDR_FAIL BIT(13) 168 #define SLV_INT_M_RD_ABORT BIT(12) 169 #define SLV_INT_DDR_RX_THR BIT(11) 170 #define SLV_INT_DDR_TX_THR BIT(10) 171 #define SLV_INT_SDR_RX_THR BIT(9) 172 #define SLV_INT_SDR_TX_THR BIT(8) 173 #define SLV_INT_DDR_RX_UNF BIT(7) 174 #define SLV_INT_DDR_TX_OVF BIT(6) 175 #define SLV_INT_SDR_RX_UNF BIT(5) 176 #define SLV_INT_SDR_TX_OVF BIT(4) 177 #define SLV_INT_DDR_RD_COMP BIT(3) 178 #define SLV_INT_DDR_WR_COMP BIT(2) 179 #define SLV_INT_SDR_RD_COMP BIT(1) 180 #define SLV_INT_SDR_WR_COMP BIT(0) 181 182 #define SLV_STATUS0 0x54 183 #define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16) 184 #define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0)) 185 186 #define SLV_STATUS1 0x58 187 #define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20) 188 #define SLV_STATUS1_VEN_TM BIT(19) 189 #define SLV_STATUS1_HJ_DIS BIT(18) 190 #define SLV_STATUS1_MR_DIS BIT(17) 191 #define SLV_STATUS1_PROT_ERR BIT(16) 192 #define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9) 193 #define SLV_STATUS1_HAS_DA BIT(8) 194 #define SLV_STATUS1_DDR_RX_FULL BIT(7) 195 #define SLV_STATUS1_DDR_TX_FULL BIT(6) 196 #define SLV_STATUS1_DDR_RX_EMPTY BIT(5) 197 #define SLV_STATUS1_DDR_TX_EMPTY BIT(4) 198 #define SLV_STATUS1_SDR_RX_FULL BIT(3) 199 #define SLV_STATUS1_SDR_TX_FULL BIT(2) 200 #define SLV_STATUS1_SDR_RX_EMPTY BIT(1) 201 #define SLV_STATUS1_SDR_TX_EMPTY BIT(0) 202 203 #define CMD0_FIFO 0x60 204 #define CMD0_FIFO_IS_DDR BIT(31) 205 #define CMD0_FIFO_IS_CCC BIT(30) 206 #define CMD0_FIFO_BCH BIT(29) 207 #define XMIT_BURST_STATIC_SUBADDR 0 208 #define XMIT_SINGLE_INC_SUBADDR 1 209 #define XMIT_SINGLE_STATIC_SUBADDR 2 210 #define XMIT_BURST_WITHOUT_SUBADDR 3 211 #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27) 212 #define CMD0_FIFO_SBCA BIT(26) 213 #define CMD0_FIFO_RSBC BIT(25) 214 #define CMD0_FIFO_IS_10B BIT(24) 215 #define CMD0_FIFO_PL_LEN(l) ((l) << 12) 216 #define CMD0_FIFO_PL_LEN_MAX 4095 217 #define CMD0_FIFO_DEV_ADDR(a) ((a) << 1) 218 #define CMD0_FIFO_RNW BIT(0) 219 220 #define CMD1_FIFO 0x64 221 #define CMD1_FIFO_CMDID(id) ((id) << 24) 222 #define CMD1_FIFO_CSRADDR(a) (a) 223 #define CMD1_FIFO_CCC(id) (id) 224 225 #define TX_FIFO 0x68 226 227 #define IMD_CMD0 0x70 228 #define IMD_CMD0_PL_LEN(l) ((l) << 12) 229 #define IMD_CMD0_DEV_ADDR(a) ((a) << 1) 230 #define IMD_CMD0_RNW BIT(0) 231 232 #define IMD_CMD1 0x74 233 #define IMD_CMD1_CCC(id) (id) 234 235 #define IMD_DATA 0x78 236 #define RX_FIFO 0x80 237 #define IBI_DATA_FIFO 0x84 238 #define SLV_DDR_TX_FIFO 0x88 239 #define SLV_DDR_RX_FIFO 0x8c 240 241 #define CMD_IBI_THR_CTRL 0x90 242 #define IBIR_THR(t) ((t) << 24) 243 #define CMDR_THR(t) ((t) << 16) 244 #define IBI_THR(t) ((t) << 8) 245 #define CMD_THR(t) (t) 246 247 #define TX_RX_THR_CTRL 0x94 248 #define RX_THR(t) ((t) << 16) 249 #define TX_THR(t) (t) 250 251 #define SLV_DDR_TX_RX_THR_CTRL 0x98 252 #define SLV_DDR_RX_THR(t) ((t) << 16) 253 #define SLV_DDR_TX_THR(t) (t) 254 255 #define FLUSH_CTRL 0x9c 256 #define FLUSH_IBI_RESP BIT(23) 257 #define FLUSH_CMD_RESP BIT(22) 258 #define FLUSH_SLV_DDR_RX_FIFO BIT(22) 259 #define FLUSH_SLV_DDR_TX_FIFO BIT(21) 260 #define FLUSH_IMM_FIFO BIT(20) 261 #define FLUSH_IBI_FIFO BIT(19) 262 #define FLUSH_RX_FIFO BIT(18) 263 #define FLUSH_TX_FIFO BIT(17) 264 #define FLUSH_CMD_FIFO BIT(16) 265 266 #define TTO_PRESCL_CTRL0 0xb0 267 #define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16) 268 #define TTO_PRESCL_CTRL0_DIVA(x) (x) 269 270 #define TTO_PRESCL_CTRL1 0xb4 271 #define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16) 272 #define TTO_PRESCL_CTRL1_DIVA(x) (x) 273 274 #define DEVS_CTRL 0xb8 275 #define DEVS_CTRL_DEV_CLR_SHIFT 16 276 #define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16) 277 #define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev)) 278 #define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev) 279 #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0) 280 #define MAX_DEVS 16 281 282 #define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10)) 283 #define DEV_ID_RR0_LVR_EXT_ADDR BIT(11) 284 #define DEV_ID_RR0_HDR_CAP BIT(10) 285 #define DEV_ID_RR0_IS_I3C BIT(9) 286 #define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13)) 287 #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \ 288 (((a) & GENMASK(9, 7)) << 6)) 289 #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \ 290 (((x) >> 6) & GENMASK(9, 7))) 291 292 #define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10)) 293 #define DEV_ID_RR1_PID_MSB(pid) (pid) 294 295 #define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10)) 296 #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16) 297 #define DEV_ID_RR2_BCR(bcr) ((bcr) << 8) 298 #define DEV_ID_RR2_DCR(dcr) (dcr) 299 #define DEV_ID_RR2_LVR(lvr) (lvr) 300 301 #define SIR_MAP(x) (0x180 + ((x) * 4)) 302 #define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2) 303 #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0)) 304 #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0)) 305 #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0)) 306 #define DEV_ROLE_SLAVE 0 307 #define DEV_ROLE_MASTER 1 308 #define SIR_MAP_DEV_ROLE(role) ((role) << 14) 309 #define SIR_MAP_DEV_SLOW BIT(13) 310 #define SIR_MAP_DEV_PL(l) ((l) << 8) 311 #define SIR_MAP_PL_MAX GENMASK(4, 0) 312 #define SIR_MAP_DEV_DA(a) ((a) << 1) 313 #define SIR_MAP_DEV_ACK BIT(0) 314 315 #define GPIR_WORD(x) (0x200 + ((x) * 4)) 316 #define GPI_REG(val, id) \ 317 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0)) 318 319 #define GPOR_WORD(x) (0x220 + ((x) * 4)) 320 #define GPO_REG(val, id) \ 321 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0)) 322 323 #define ASF_INT_STATUS 0x300 324 #define ASF_INT_RAW_STATUS 0x304 325 #define ASF_INT_MASK 0x308 326 #define ASF_INT_TEST 0x30c 327 #define ASF_INT_FATAL_SELECT 0x310 328 #define ASF_INTEGRITY_ERR BIT(6) 329 #define ASF_PROTOCOL_ERR BIT(5) 330 #define ASF_TRANS_TIMEOUT_ERR BIT(4) 331 #define ASF_CSR_ERR BIT(3) 332 #define ASF_DAP_ERR BIT(2) 333 #define ASF_SRAM_UNCORR_ERR BIT(1) 334 #define ASF_SRAM_CORR_ERR BIT(0) 335 336 #define ASF_SRAM_CORR_FAULT_STATUS 0x320 337 #define ASF_SRAM_UNCORR_FAULT_STATUS 0x324 338 #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24) 339 #define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0)) 340 341 #define ASF_SRAM_FAULT_STATS 0x328 342 #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16) 343 #define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0)) 344 345 #define ASF_TRANS_TOUT_CTRL 0x330 346 #define ASF_TRANS_TOUT_EN BIT(31) 347 #define ASF_TRANS_TOUT_VAL(x) (x) 348 349 #define ASF_TRANS_TOUT_FAULT_MASK 0x334 350 #define ASF_TRANS_TOUT_FAULT_STATUS 0x338 351 #define ASF_TRANS_TOUT_FAULT_APB BIT(3) 352 #define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2) 353 #define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1) 354 #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0) 355 356 #define ASF_PROTO_FAULT_MASK 0x340 357 #define ASF_PROTO_FAULT_STATUS 0x344 358 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31) 359 #define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30) 360 #define ASF_PROTO_FAULT_S(x) BIT(16 + (x)) 361 #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15) 362 #define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14) 363 #define ASF_PROTO_FAULT_M(x) BIT(x) 364 365 struct cdns_i3c_master_caps { 366 u32 cmdfifodepth; 367 u32 cmdrfifodepth; 368 u32 txfifodepth; 369 u32 rxfifodepth; 370 u32 ibirfifodepth; 371 }; 372 373 struct cdns_i3c_cmd { 374 u32 cmd0; 375 u32 cmd1; 376 u32 tx_len; 377 const void *tx_buf; 378 u32 rx_len; 379 void *rx_buf; 380 u32 error; 381 }; 382 383 struct cdns_i3c_xfer { 384 struct list_head node; 385 struct completion comp; 386 int ret; 387 unsigned int ncmds; 388 struct cdns_i3c_cmd cmds[0]; 389 }; 390 391 struct cdns_i3c_master { 392 struct work_struct hj_work; 393 struct i3c_master_controller base; 394 u32 free_rr_slots; 395 unsigned int maxdevs; 396 struct { 397 unsigned int num_slots; 398 struct i3c_dev_desc **slots; 399 spinlock_t lock; 400 } ibi; 401 struct { 402 struct list_head list; 403 struct cdns_i3c_xfer *cur; 404 spinlock_t lock; 405 } xferqueue; 406 void __iomem *regs; 407 struct clk *sysclk; 408 struct clk *pclk; 409 struct cdns_i3c_master_caps caps; 410 unsigned long i3c_scl_lim; 411 }; 412 413 static inline struct cdns_i3c_master * 414 to_cdns_i3c_master(struct i3c_master_controller *master) 415 { 416 return container_of(master, struct cdns_i3c_master, base); 417 } 418 419 static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master, 420 const u8 *bytes, int nbytes) 421 { 422 writesl(master->regs + TX_FIFO, bytes, nbytes / 4); 423 if (nbytes & 3) { 424 u32 tmp = 0; 425 426 memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3); 427 writesl(master->regs + TX_FIFO, &tmp, 1); 428 } 429 } 430 431 static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master, 432 u8 *bytes, int nbytes) 433 { 434 readsl(master->regs + RX_FIFO, bytes, nbytes / 4); 435 if (nbytes & 3) { 436 u32 tmp; 437 438 readsl(master->regs + RX_FIFO, &tmp, 1); 439 memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3); 440 } 441 } 442 443 static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m, 444 const struct i3c_ccc_cmd *cmd) 445 { 446 if (cmd->ndests > 1) 447 return false; 448 449 switch (cmd->id) { 450 case I3C_CCC_ENEC(true): 451 case I3C_CCC_ENEC(false): 452 case I3C_CCC_DISEC(true): 453 case I3C_CCC_DISEC(false): 454 case I3C_CCC_ENTAS(0, true): 455 case I3C_CCC_ENTAS(0, false): 456 case I3C_CCC_RSTDAA(true): 457 case I3C_CCC_RSTDAA(false): 458 case I3C_CCC_ENTDAA: 459 case I3C_CCC_SETMWL(true): 460 case I3C_CCC_SETMWL(false): 461 case I3C_CCC_SETMRL(true): 462 case I3C_CCC_SETMRL(false): 463 case I3C_CCC_DEFSLVS: 464 case I3C_CCC_ENTHDR(0): 465 case I3C_CCC_SETDASA: 466 case I3C_CCC_SETNEWDA: 467 case I3C_CCC_GETMWL: 468 case I3C_CCC_GETMRL: 469 case I3C_CCC_GETPID: 470 case I3C_CCC_GETBCR: 471 case I3C_CCC_GETDCR: 472 case I3C_CCC_GETSTATUS: 473 case I3C_CCC_GETACCMST: 474 case I3C_CCC_GETMXDS: 475 case I3C_CCC_GETHDRCAP: 476 return true; 477 default: 478 break; 479 } 480 481 return false; 482 } 483 484 static int cdns_i3c_master_disable(struct cdns_i3c_master *master) 485 { 486 u32 status; 487 488 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL); 489 490 return readl_poll_timeout(master->regs + MST_STATUS0, status, 491 status & MST_STATUS0_IDLE, 10, 1000000); 492 } 493 494 static void cdns_i3c_master_enable(struct cdns_i3c_master *master) 495 { 496 writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL); 497 } 498 499 static struct cdns_i3c_xfer * 500 cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds) 501 { 502 struct cdns_i3c_xfer *xfer; 503 504 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL); 505 if (!xfer) 506 return NULL; 507 508 INIT_LIST_HEAD(&xfer->node); 509 xfer->ncmds = ncmds; 510 xfer->ret = -ETIMEDOUT; 511 512 return xfer; 513 } 514 515 static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer) 516 { 517 kfree(xfer); 518 } 519 520 static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master) 521 { 522 struct cdns_i3c_xfer *xfer = master->xferqueue.cur; 523 unsigned int i; 524 525 if (!xfer) 526 return; 527 528 writel(MST_INT_CMDD_EMP, master->regs + MST_ICR); 529 for (i = 0; i < xfer->ncmds; i++) { 530 struct cdns_i3c_cmd *cmd = &xfer->cmds[i]; 531 532 cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf, 533 cmd->tx_len); 534 } 535 536 for (i = 0; i < xfer->ncmds; i++) { 537 struct cdns_i3c_cmd *cmd = &xfer->cmds[i]; 538 539 writel(cmd->cmd1 | CMD1_FIFO_CMDID(i), 540 master->regs + CMD1_FIFO); 541 writel(cmd->cmd0, master->regs + CMD0_FIFO); 542 } 543 544 writel(readl(master->regs + CTRL) | CTRL_MCS, 545 master->regs + CTRL); 546 writel(MST_INT_CMDD_EMP, master->regs + MST_IER); 547 } 548 549 static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master, 550 u32 isr) 551 { 552 struct cdns_i3c_xfer *xfer = master->xferqueue.cur; 553 int i, ret = 0; 554 u32 status0; 555 556 if (!xfer) 557 return; 558 559 if (!(isr & MST_INT_CMDD_EMP)) 560 return; 561 562 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR); 563 564 for (status0 = readl(master->regs + MST_STATUS0); 565 !(status0 & MST_STATUS0_CMDR_EMP); 566 status0 = readl(master->regs + MST_STATUS0)) { 567 struct cdns_i3c_cmd *cmd; 568 u32 cmdr, rx_len, id; 569 570 cmdr = readl(master->regs + CMDR); 571 id = CMDR_CMDID(cmdr); 572 if (id == CMDR_CMDID_HJACK_DISEC || 573 id == CMDR_CMDID_HJACK_ENTDAA || 574 WARN_ON(id >= xfer->ncmds)) 575 continue; 576 577 cmd = &xfer->cmds[CMDR_CMDID(cmdr)]; 578 rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len); 579 cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len); 580 cmd->error = CMDR_ERROR(cmdr); 581 } 582 583 for (i = 0; i < xfer->ncmds; i++) { 584 switch (xfer->cmds[i].error) { 585 case CMDR_NO_ERROR: 586 break; 587 588 case CMDR_DDR_PREAMBLE_ERROR: 589 case CMDR_DDR_PARITY_ERROR: 590 case CMDR_M0_ERROR: 591 case CMDR_M1_ERROR: 592 case CMDR_M2_ERROR: 593 case CMDR_MST_ABORT: 594 case CMDR_NACK_RESP: 595 case CMDR_DDR_DROPPED: 596 ret = -EIO; 597 break; 598 599 case CMDR_DDR_RX_FIFO_OVF: 600 case CMDR_DDR_TX_FIFO_UNF: 601 ret = -ENOSPC; 602 break; 603 604 case CMDR_INVALID_DA: 605 default: 606 ret = -EINVAL; 607 break; 608 } 609 } 610 611 xfer->ret = ret; 612 complete(&xfer->comp); 613 614 xfer = list_first_entry_or_null(&master->xferqueue.list, 615 struct cdns_i3c_xfer, node); 616 if (xfer) 617 list_del_init(&xfer->node); 618 619 master->xferqueue.cur = xfer; 620 cdns_i3c_master_start_xfer_locked(master); 621 } 622 623 static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master, 624 struct cdns_i3c_xfer *xfer) 625 { 626 unsigned long flags; 627 628 init_completion(&xfer->comp); 629 spin_lock_irqsave(&master->xferqueue.lock, flags); 630 if (master->xferqueue.cur) { 631 list_add_tail(&xfer->node, &master->xferqueue.list); 632 } else { 633 master->xferqueue.cur = xfer; 634 cdns_i3c_master_start_xfer_locked(master); 635 } 636 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 637 } 638 639 static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master, 640 struct cdns_i3c_xfer *xfer) 641 { 642 unsigned long flags; 643 644 spin_lock_irqsave(&master->xferqueue.lock, flags); 645 if (master->xferqueue.cur == xfer) { 646 u32 status; 647 648 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, 649 master->regs + CTRL); 650 readl_poll_timeout_atomic(master->regs + MST_STATUS0, status, 651 status & MST_STATUS0_IDLE, 10, 652 1000000); 653 master->xferqueue.cur = NULL; 654 writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO | 655 FLUSH_CMD_RESP, 656 master->regs + FLUSH_CTRL); 657 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR); 658 writel(readl(master->regs + CTRL) | CTRL_DEV_EN, 659 master->regs + CTRL); 660 } else { 661 list_del_init(&xfer->node); 662 } 663 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 664 } 665 666 static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd) 667 { 668 switch (cmd->error) { 669 case CMDR_M0_ERROR: 670 return I3C_ERROR_M0; 671 672 case CMDR_M1_ERROR: 673 return I3C_ERROR_M1; 674 675 case CMDR_M2_ERROR: 676 case CMDR_NACK_RESP: 677 return I3C_ERROR_M2; 678 679 default: 680 break; 681 } 682 683 return I3C_ERROR_UNKNOWN; 684 } 685 686 static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, 687 struct i3c_ccc_cmd *cmd) 688 { 689 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 690 struct cdns_i3c_xfer *xfer; 691 struct cdns_i3c_cmd *ccmd; 692 int ret; 693 694 xfer = cdns_i3c_master_alloc_xfer(master, 1); 695 if (!xfer) 696 return -ENOMEM; 697 698 ccmd = xfer->cmds; 699 ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id); 700 ccmd->cmd0 = CMD0_FIFO_IS_CCC | 701 CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len); 702 703 if (cmd->id & I3C_CCC_DIRECT) 704 ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr); 705 706 if (cmd->rnw) { 707 ccmd->cmd0 |= CMD0_FIFO_RNW; 708 ccmd->rx_buf = cmd->dests[0].payload.data; 709 ccmd->rx_len = cmd->dests[0].payload.len; 710 } else { 711 ccmd->tx_buf = cmd->dests[0].payload.data; 712 ccmd->tx_len = cmd->dests[0].payload.len; 713 } 714 715 cdns_i3c_master_queue_xfer(master, xfer); 716 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 717 cdns_i3c_master_unqueue_xfer(master, xfer); 718 719 ret = xfer->ret; 720 cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]); 721 cdns_i3c_master_free_xfer(xfer); 722 723 return ret; 724 } 725 726 static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev, 727 struct i3c_priv_xfer *xfers, 728 int nxfers) 729 { 730 struct i3c_master_controller *m = i3c_dev_get_master(dev); 731 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 732 int txslots = 0, rxslots = 0, i, ret; 733 struct cdns_i3c_xfer *cdns_xfer; 734 735 for (i = 0; i < nxfers; i++) { 736 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX) 737 return -ENOTSUPP; 738 } 739 740 if (!nxfers) 741 return 0; 742 743 if (nxfers > master->caps.cmdfifodepth || 744 nxfers > master->caps.cmdrfifodepth) 745 return -ENOTSUPP; 746 747 /* 748 * First make sure that all transactions (block of transfers separated 749 * by a STOP marker) fit in the FIFOs. 750 */ 751 for (i = 0; i < nxfers; i++) { 752 if (xfers[i].rnw) 753 rxslots += DIV_ROUND_UP(xfers[i].len, 4); 754 else 755 txslots += DIV_ROUND_UP(xfers[i].len, 4); 756 } 757 758 if (rxslots > master->caps.rxfifodepth || 759 txslots > master->caps.txfifodepth) 760 return -ENOTSUPP; 761 762 cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers); 763 if (!cdns_xfer) 764 return -ENOMEM; 765 766 for (i = 0; i < nxfers; i++) { 767 struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i]; 768 u32 pl_len = xfers[i].len; 769 770 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) | 771 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR); 772 773 if (xfers[i].rnw) { 774 ccmd->cmd0 |= CMD0_FIFO_RNW; 775 ccmd->rx_buf = xfers[i].data.in; 776 ccmd->rx_len = xfers[i].len; 777 pl_len++; 778 } else { 779 ccmd->tx_buf = xfers[i].data.out; 780 ccmd->tx_len = xfers[i].len; 781 } 782 783 ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len); 784 785 if (i < nxfers - 1) 786 ccmd->cmd0 |= CMD0_FIFO_RSBC; 787 788 if (!i) 789 ccmd->cmd0 |= CMD0_FIFO_BCH; 790 } 791 792 cdns_i3c_master_queue_xfer(master, cdns_xfer); 793 if (!wait_for_completion_timeout(&cdns_xfer->comp, 794 msecs_to_jiffies(1000))) 795 cdns_i3c_master_unqueue_xfer(master, cdns_xfer); 796 797 ret = cdns_xfer->ret; 798 799 for (i = 0; i < nxfers; i++) 800 xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]); 801 802 cdns_i3c_master_free_xfer(cdns_xfer); 803 804 return ret; 805 } 806 807 static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev, 808 const struct i2c_msg *xfers, int nxfers) 809 { 810 struct i3c_master_controller *m = i2c_dev_get_master(dev); 811 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 812 unsigned int nrxwords = 0, ntxwords = 0; 813 struct cdns_i3c_xfer *xfer; 814 int i, ret = 0; 815 816 if (nxfers > master->caps.cmdfifodepth) 817 return -ENOTSUPP; 818 819 for (i = 0; i < nxfers; i++) { 820 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX) 821 return -ENOTSUPP; 822 823 if (xfers[i].flags & I2C_M_RD) 824 nrxwords += DIV_ROUND_UP(xfers[i].len, 4); 825 else 826 ntxwords += DIV_ROUND_UP(xfers[i].len, 4); 827 } 828 829 if (ntxwords > master->caps.txfifodepth || 830 nrxwords > master->caps.rxfifodepth) 831 return -ENOTSUPP; 832 833 xfer = cdns_i3c_master_alloc_xfer(master, nxfers); 834 if (!xfer) 835 return -ENOMEM; 836 837 for (i = 0; i < nxfers; i++) { 838 struct cdns_i3c_cmd *ccmd = &xfer->cmds[i]; 839 840 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) | 841 CMD0_FIFO_PL_LEN(xfers[i].len) | 842 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR); 843 844 if (xfers[i].flags & I2C_M_TEN) 845 ccmd->cmd0 |= CMD0_FIFO_IS_10B; 846 847 if (xfers[i].flags & I2C_M_RD) { 848 ccmd->cmd0 |= CMD0_FIFO_RNW; 849 ccmd->rx_buf = xfers[i].buf; 850 ccmd->rx_len = xfers[i].len; 851 } else { 852 ccmd->tx_buf = xfers[i].buf; 853 ccmd->tx_len = xfers[i].len; 854 } 855 } 856 857 cdns_i3c_master_queue_xfer(master, xfer); 858 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 859 cdns_i3c_master_unqueue_xfer(master, xfer); 860 861 ret = xfer->ret; 862 cdns_i3c_master_free_xfer(xfer); 863 864 return ret; 865 } 866 867 static u32 cdns_i3c_master_i2c_funcs(struct i3c_master_controller *m) 868 { 869 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR; 870 } 871 872 struct cdns_i3c_i2c_dev_data { 873 u16 id; 874 s16 ibi; 875 struct i3c_generic_ibi_pool *ibi_pool; 876 }; 877 878 static u32 prepare_rr0_dev_address(u32 addr) 879 { 880 u32 ret = (addr << 1) & 0xff; 881 882 /* RR0[7:1] = addr[6:0] */ 883 ret |= (addr & GENMASK(6, 0)) << 1; 884 885 /* RR0[15:13] = addr[9:7] */ 886 ret |= (addr & GENMASK(9, 7)) << 6; 887 888 /* RR0[0] = ~XOR(addr[6:0]) */ 889 if (!(hweight8(addr & 0x7f) & 1)) 890 ret |= 1; 891 892 return ret; 893 } 894 895 static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev) 896 { 897 struct i3c_master_controller *m = i3c_dev_get_master(dev); 898 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 899 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 900 u32 rr; 901 902 rr = prepare_rr0_dev_address(dev->info.dyn_addr ? 903 dev->info.dyn_addr : 904 dev->info.static_addr); 905 writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id)); 906 } 907 908 static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master, 909 u8 dyn_addr) 910 { 911 u32 activedevs, rr; 912 int i; 913 914 if (!dyn_addr) { 915 if (!master->free_rr_slots) 916 return -ENOSPC; 917 918 return ffs(master->free_rr_slots) - 1; 919 } 920 921 activedevs = readl(master->regs + DEVS_CTRL) & 922 DEVS_CTRL_DEVS_ACTIVE_MASK; 923 924 for (i = 1; i <= master->maxdevs; i++) { 925 if (!(BIT(i) & activedevs)) 926 continue; 927 928 rr = readl(master->regs + DEV_ID_RR0(i)); 929 if (!(rr & DEV_ID_RR0_IS_I3C) || 930 DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr) 931 continue; 932 933 return i; 934 } 935 936 return -EINVAL; 937 } 938 939 static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, 940 u8 old_dyn_addr) 941 { 942 cdns_i3c_master_upd_i3c_addr(dev); 943 944 return 0; 945 } 946 947 static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) 948 { 949 struct i3c_master_controller *m = i3c_dev_get_master(dev); 950 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 951 struct cdns_i3c_i2c_dev_data *data; 952 int slot; 953 954 data = kzalloc(sizeof(*data), GFP_KERNEL); 955 if (!data) 956 return -ENOMEM; 957 958 slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr); 959 if (slot < 0) { 960 kfree(data); 961 return slot; 962 } 963 964 data->ibi = -1; 965 data->id = slot; 966 i3c_dev_set_master_data(dev, data); 967 master->free_rr_slots &= ~BIT(slot); 968 969 if (!dev->info.dyn_addr) { 970 cdns_i3c_master_upd_i3c_addr(dev); 971 writel(readl(master->regs + DEVS_CTRL) | 972 DEVS_CTRL_DEV_ACTIVE(data->id), 973 master->regs + DEVS_CTRL); 974 } 975 976 return 0; 977 } 978 979 static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev) 980 { 981 struct i3c_master_controller *m = i3c_dev_get_master(dev); 982 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 983 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 984 985 writel(readl(master->regs + DEVS_CTRL) | 986 DEVS_CTRL_DEV_CLR(data->id), 987 master->regs + DEVS_CTRL); 988 989 i3c_dev_set_master_data(dev, NULL); 990 master->free_rr_slots |= BIT(data->id); 991 kfree(data); 992 } 993 994 static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev) 995 { 996 struct i3c_master_controller *m = i2c_dev_get_master(dev); 997 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 998 struct cdns_i3c_i2c_dev_data *data; 999 int slot; 1000 1001 slot = cdns_i3c_master_get_rr_slot(master, 0); 1002 if (slot < 0) 1003 return slot; 1004 1005 data = kzalloc(sizeof(*data), GFP_KERNEL); 1006 if (!data) 1007 return -ENOMEM; 1008 1009 data->id = slot; 1010 master->free_rr_slots &= ~BIT(slot); 1011 i2c_dev_set_master_data(dev, data); 1012 1013 writel(prepare_rr0_dev_address(dev->boardinfo->base.addr) | 1014 (dev->boardinfo->base.flags & I2C_CLIENT_TEN ? 1015 DEV_ID_RR0_LVR_EXT_ADDR : 0), 1016 master->regs + DEV_ID_RR0(data->id)); 1017 writel(dev->boardinfo->lvr, master->regs + DEV_ID_RR2(data->id)); 1018 writel(readl(master->regs + DEVS_CTRL) | 1019 DEVS_CTRL_DEV_ACTIVE(data->id), 1020 master->regs + DEVS_CTRL); 1021 1022 return 0; 1023 } 1024 1025 static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev) 1026 { 1027 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1028 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1029 struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 1030 1031 writel(readl(master->regs + DEVS_CTRL) | 1032 DEVS_CTRL_DEV_CLR(data->id), 1033 master->regs + DEVS_CTRL); 1034 master->free_rr_slots |= BIT(data->id); 1035 1036 i2c_dev_set_master_data(dev, NULL); 1037 kfree(data); 1038 } 1039 1040 static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m) 1041 { 1042 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1043 1044 cdns_i3c_master_disable(master); 1045 } 1046 1047 static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master, 1048 unsigned int slot, 1049 struct i3c_device_info *info) 1050 { 1051 u32 rr; 1052 1053 memset(info, 0, sizeof(*info)); 1054 rr = readl(master->regs + DEV_ID_RR0(slot)); 1055 info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr); 1056 rr = readl(master->regs + DEV_ID_RR2(slot)); 1057 info->dcr = rr; 1058 info->bcr = rr >> 8; 1059 info->pid = rr >> 16; 1060 info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16; 1061 } 1062 1063 static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master) 1064 { 1065 struct i3c_master_controller *m = &master->base; 1066 unsigned long i3c_lim_period, pres_step, ncycles; 1067 struct i3c_bus *bus = i3c_master_get_bus(m); 1068 unsigned long new_i3c_scl_lim = 0; 1069 struct i3c_dev_desc *dev; 1070 u32 prescl1, ctrl; 1071 1072 i3c_bus_for_each_i3cdev(bus, dev) { 1073 unsigned long max_fscl; 1074 1075 max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds), 1076 I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds)); 1077 switch (max_fscl) { 1078 case I3C_SDR1_FSCL_8MHZ: 1079 max_fscl = 8000000; 1080 break; 1081 case I3C_SDR2_FSCL_6MHZ: 1082 max_fscl = 6000000; 1083 break; 1084 case I3C_SDR3_FSCL_4MHZ: 1085 max_fscl = 4000000; 1086 break; 1087 case I3C_SDR4_FSCL_2MHZ: 1088 max_fscl = 2000000; 1089 break; 1090 case I3C_SDR0_FSCL_MAX: 1091 default: 1092 max_fscl = 0; 1093 break; 1094 } 1095 1096 if (max_fscl && 1097 (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim)) 1098 new_i3c_scl_lim = max_fscl; 1099 } 1100 1101 /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */ 1102 if (new_i3c_scl_lim == master->i3c_scl_lim) 1103 return; 1104 master->i3c_scl_lim = new_i3c_scl_lim; 1105 if (!new_i3c_scl_lim) 1106 return; 1107 pres_step = 1000000000UL / (bus->scl_rate.i3c * 4); 1108 1109 /* Configure PP_LOW to meet I3C slave limitations. */ 1110 prescl1 = readl(master->regs + PRESCL_CTRL1) & 1111 ~PRESCL_CTRL1_PP_LOW_MASK; 1112 ctrl = readl(master->regs + CTRL); 1113 1114 i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim); 1115 ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step); 1116 if (ncycles < 4) 1117 ncycles = 0; 1118 else 1119 ncycles -= 4; 1120 1121 prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles); 1122 1123 /* Disable I3C master before updating PRESCL_CTRL1. */ 1124 if (ctrl & CTRL_DEV_EN) 1125 cdns_i3c_master_disable(master); 1126 1127 writel(prescl1, master->regs + PRESCL_CTRL1); 1128 1129 if (ctrl & CTRL_DEV_EN) 1130 cdns_i3c_master_enable(master); 1131 } 1132 1133 static int cdns_i3c_master_do_daa(struct i3c_master_controller *m) 1134 { 1135 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1136 u32 olddevs, newdevs; 1137 int ret, slot; 1138 u8 addrs[MAX_DEVS] = { }; 1139 u8 last_addr = 0; 1140 1141 olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK; 1142 1143 /* Prepare RR slots before launching DAA. */ 1144 for (slot = 1; slot <= master->maxdevs; slot++) { 1145 if (olddevs & BIT(slot)) 1146 continue; 1147 1148 ret = i3c_master_get_free_addr(m, last_addr + 1); 1149 if (ret < 0) 1150 return -ENOSPC; 1151 1152 last_addr = ret; 1153 addrs[slot] = last_addr; 1154 writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C, 1155 master->regs + DEV_ID_RR0(slot)); 1156 writel(0, master->regs + DEV_ID_RR1(slot)); 1157 writel(0, master->regs + DEV_ID_RR2(slot)); 1158 } 1159 1160 ret = i3c_master_entdaa_locked(&master->base); 1161 if (ret && ret != I3C_ERROR_M2) 1162 return ret; 1163 1164 newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK; 1165 newdevs &= ~olddevs; 1166 1167 /* 1168 * Clear all retaining registers filled during DAA. We already 1169 * have the addressed assigned to them in the addrs array. 1170 */ 1171 for (slot = 1; slot <= master->maxdevs; slot++) { 1172 if (newdevs & BIT(slot)) 1173 i3c_master_add_i3c_dev_locked(m, addrs[slot]); 1174 } 1175 1176 /* 1177 * Clear slots that ended up not being used. Can be caused by I3C 1178 * device creation failure or when the I3C device was already known 1179 * by the system but with a different address (in this case the device 1180 * already has a slot and does not need a new one). 1181 */ 1182 writel(readl(master->regs + DEVS_CTRL) | 1183 master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT, 1184 master->regs + DEVS_CTRL); 1185 1186 i3c_master_defslvs_locked(&master->base); 1187 1188 cdns_i3c_master_upd_i3c_scl_lim(master); 1189 1190 /* Unmask Hot-Join and Mastership request interrupts. */ 1191 i3c_master_enec_locked(m, I3C_BROADCAST_ADDR, 1192 I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR); 1193 1194 return 0; 1195 } 1196 1197 static int cdns_i3c_master_bus_init(struct i3c_master_controller *m) 1198 { 1199 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1200 unsigned long pres_step, sysclk_rate, max_i2cfreq; 1201 struct i3c_bus *bus = i3c_master_get_bus(m); 1202 u32 ctrl, prescl0, prescl1, pres, low; 1203 struct i3c_device_info info = { }; 1204 int ret, ncycles; 1205 1206 switch (bus->mode) { 1207 case I3C_BUS_MODE_PURE: 1208 ctrl = CTRL_PURE_BUS_MODE; 1209 break; 1210 1211 case I3C_BUS_MODE_MIXED_FAST: 1212 ctrl = CTRL_MIXED_FAST_BUS_MODE; 1213 break; 1214 1215 case I3C_BUS_MODE_MIXED_SLOW: 1216 ctrl = CTRL_MIXED_SLOW_BUS_MODE; 1217 break; 1218 1219 default: 1220 return -EINVAL; 1221 } 1222 1223 sysclk_rate = clk_get_rate(master->sysclk); 1224 if (!sysclk_rate) 1225 return -EINVAL; 1226 1227 pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1; 1228 if (pres > PRESCL_CTRL0_MAX) 1229 return -ERANGE; 1230 1231 bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4); 1232 1233 prescl0 = PRESCL_CTRL0_I3C(pres); 1234 1235 low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2; 1236 prescl1 = PRESCL_CTRL1_OD_LOW(low); 1237 1238 max_i2cfreq = bus->scl_rate.i2c; 1239 1240 pres = (sysclk_rate / (max_i2cfreq * 5)) - 1; 1241 if (pres > PRESCL_CTRL0_MAX) 1242 return -ERANGE; 1243 1244 bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5); 1245 1246 prescl0 |= PRESCL_CTRL0_I2C(pres); 1247 writel(prescl0, master->regs + PRESCL_CTRL0); 1248 1249 /* Calculate OD and PP low. */ 1250 pres_step = 1000000000 / (bus->scl_rate.i3c * 4); 1251 ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2; 1252 if (ncycles < 0) 1253 ncycles = 0; 1254 prescl1 = PRESCL_CTRL1_OD_LOW(ncycles); 1255 writel(prescl1, master->regs + PRESCL_CTRL1); 1256 1257 /* Get an address for the master. */ 1258 ret = i3c_master_get_free_addr(m, 0); 1259 if (ret < 0) 1260 return ret; 1261 1262 writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C, 1263 master->regs + DEV_ID_RR0(0)); 1264 1265 cdns_i3c_master_dev_rr_to_info(master, 0, &info); 1266 if (info.bcr & I3C_BCR_HDR_CAP) 1267 info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR); 1268 1269 ret = i3c_master_set_info(&master->base, &info); 1270 if (ret) 1271 return ret; 1272 1273 /* 1274 * Enable Hot-Join, and, when a Hot-Join request happens, disable all 1275 * events coming from this device. 1276 * 1277 * We will issue ENTDAA afterwards from the threaded IRQ handler. 1278 */ 1279 ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN; 1280 writel(ctrl, master->regs + CTRL); 1281 1282 cdns_i3c_master_enable(master); 1283 1284 return 0; 1285 } 1286 1287 static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master, 1288 u32 ibir) 1289 { 1290 struct cdns_i3c_i2c_dev_data *data; 1291 bool data_consumed = false; 1292 struct i3c_ibi_slot *slot; 1293 u32 id = IBIR_SLVID(ibir); 1294 struct i3c_dev_desc *dev; 1295 size_t nbytes; 1296 u8 *buf; 1297 1298 /* 1299 * FIXME: maybe we should report the FIFO OVF errors to the upper 1300 * layer. 1301 */ 1302 if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR)) 1303 goto out; 1304 1305 dev = master->ibi.slots[id]; 1306 spin_lock(&master->ibi.lock); 1307 1308 data = i3c_dev_get_master_data(dev); 1309 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 1310 if (!slot) 1311 goto out_unlock; 1312 1313 buf = slot->data; 1314 1315 nbytes = IBIR_XFER_BYTES(ibir); 1316 readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4); 1317 if (nbytes % 3) { 1318 u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO); 1319 1320 memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3); 1321 } 1322 1323 slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir), 1324 dev->ibi->max_payload_len); 1325 i3c_master_queue_ibi(dev, slot); 1326 data_consumed = true; 1327 1328 out_unlock: 1329 spin_unlock(&master->ibi.lock); 1330 1331 out: 1332 /* Consume data from the FIFO if it's not been done already. */ 1333 if (!data_consumed) { 1334 int i; 1335 1336 for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4) 1337 readl(master->regs + IBI_DATA_FIFO); 1338 } 1339 } 1340 1341 static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master) 1342 { 1343 u32 status0; 1344 1345 writel(MST_INT_IBIR_THR, master->regs + MST_ICR); 1346 1347 for (status0 = readl(master->regs + MST_STATUS0); 1348 !(status0 & MST_STATUS0_IBIR_EMP); 1349 status0 = readl(master->regs + MST_STATUS0)) { 1350 u32 ibir = readl(master->regs + IBIR); 1351 1352 switch (IBIR_TYPE(ibir)) { 1353 case IBIR_TYPE_IBI: 1354 cdns_i3c_master_handle_ibi(master, ibir); 1355 break; 1356 1357 case IBIR_TYPE_HJ: 1358 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR)); 1359 queue_work(master->base.wq, &master->hj_work); 1360 break; 1361 1362 case IBIR_TYPE_MR: 1363 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR)); 1364 default: 1365 break; 1366 } 1367 } 1368 } 1369 1370 static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data) 1371 { 1372 struct cdns_i3c_master *master = data; 1373 u32 status; 1374 1375 status = readl(master->regs + MST_ISR); 1376 if (!(status & readl(master->regs + MST_IMR))) 1377 return IRQ_NONE; 1378 1379 spin_lock(&master->xferqueue.lock); 1380 cdns_i3c_master_end_xfer_locked(master, status); 1381 spin_unlock(&master->xferqueue.lock); 1382 1383 if (status & MST_INT_IBIR_THR) 1384 cnds_i3c_master_demux_ibis(master); 1385 1386 return IRQ_HANDLED; 1387 } 1388 1389 static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev) 1390 { 1391 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1392 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1393 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1394 unsigned long flags; 1395 u32 sirmap; 1396 int ret; 1397 1398 ret = i3c_master_disec_locked(m, dev->info.dyn_addr, 1399 I3C_CCC_EVENT_SIR); 1400 if (ret) 1401 return ret; 1402 1403 spin_lock_irqsave(&master->ibi.lock, flags); 1404 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi)); 1405 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi); 1406 sirmap |= SIR_MAP_DEV_CONF(data->ibi, 1407 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR)); 1408 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi)); 1409 spin_unlock_irqrestore(&master->ibi.lock, flags); 1410 1411 return ret; 1412 } 1413 1414 static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev) 1415 { 1416 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1417 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1418 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1419 unsigned long flags; 1420 u32 sircfg, sirmap; 1421 int ret; 1422 1423 spin_lock_irqsave(&master->ibi.lock, flags); 1424 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi)); 1425 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi); 1426 sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) | 1427 SIR_MAP_DEV_DA(dev->info.dyn_addr) | 1428 SIR_MAP_DEV_PL(dev->info.max_ibi_len) | 1429 SIR_MAP_DEV_ACK; 1430 1431 if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM) 1432 sircfg |= SIR_MAP_DEV_SLOW; 1433 1434 sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg); 1435 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi)); 1436 spin_unlock_irqrestore(&master->ibi.lock, flags); 1437 1438 ret = i3c_master_enec_locked(m, dev->info.dyn_addr, 1439 I3C_CCC_EVENT_SIR); 1440 if (ret) { 1441 spin_lock_irqsave(&master->ibi.lock, flags); 1442 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi)); 1443 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi); 1444 sirmap |= SIR_MAP_DEV_CONF(data->ibi, 1445 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR)); 1446 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi)); 1447 spin_unlock_irqrestore(&master->ibi.lock, flags); 1448 } 1449 1450 return ret; 1451 } 1452 1453 static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev, 1454 const struct i3c_ibi_setup *req) 1455 { 1456 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1457 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1458 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1459 unsigned long flags; 1460 unsigned int i; 1461 1462 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req); 1463 if (IS_ERR(data->ibi_pool)) 1464 return PTR_ERR(data->ibi_pool); 1465 1466 spin_lock_irqsave(&master->ibi.lock, flags); 1467 for (i = 0; i < master->ibi.num_slots; i++) { 1468 if (!master->ibi.slots[i]) { 1469 data->ibi = i; 1470 master->ibi.slots[i] = dev; 1471 break; 1472 } 1473 } 1474 spin_unlock_irqrestore(&master->ibi.lock, flags); 1475 1476 if (i < master->ibi.num_slots) 1477 return 0; 1478 1479 i3c_generic_ibi_free_pool(data->ibi_pool); 1480 data->ibi_pool = NULL; 1481 1482 return -ENOSPC; 1483 } 1484 1485 static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev) 1486 { 1487 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1488 struct cdns_i3c_master *master = to_cdns_i3c_master(m); 1489 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1490 unsigned long flags; 1491 1492 spin_lock_irqsave(&master->ibi.lock, flags); 1493 master->ibi.slots[data->ibi] = NULL; 1494 data->ibi = -1; 1495 spin_unlock_irqrestore(&master->ibi.lock, flags); 1496 1497 i3c_generic_ibi_free_pool(data->ibi_pool); 1498 } 1499 1500 static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev, 1501 struct i3c_ibi_slot *slot) 1502 { 1503 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1504 1505 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); 1506 } 1507 1508 static const struct i3c_master_controller_ops cdns_i3c_master_ops = { 1509 .bus_init = cdns_i3c_master_bus_init, 1510 .bus_cleanup = cdns_i3c_master_bus_cleanup, 1511 .do_daa = cdns_i3c_master_do_daa, 1512 .attach_i3c_dev = cdns_i3c_master_attach_i3c_dev, 1513 .reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev, 1514 .detach_i3c_dev = cdns_i3c_master_detach_i3c_dev, 1515 .attach_i2c_dev = cdns_i3c_master_attach_i2c_dev, 1516 .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev, 1517 .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd, 1518 .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd, 1519 .priv_xfers = cdns_i3c_master_priv_xfers, 1520 .i2c_xfers = cdns_i3c_master_i2c_xfers, 1521 .i2c_funcs = cdns_i3c_master_i2c_funcs, 1522 .enable_ibi = cdns_i3c_master_enable_ibi, 1523 .disable_ibi = cdns_i3c_master_disable_ibi, 1524 .request_ibi = cdns_i3c_master_request_ibi, 1525 .free_ibi = cdns_i3c_master_free_ibi, 1526 .recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot, 1527 }; 1528 1529 static void cdns_i3c_master_hj(struct work_struct *work) 1530 { 1531 struct cdns_i3c_master *master = container_of(work, 1532 struct cdns_i3c_master, 1533 hj_work); 1534 1535 i3c_master_do_daa(&master->base); 1536 } 1537 1538 static int cdns_i3c_master_probe(struct platform_device *pdev) 1539 { 1540 struct cdns_i3c_master *master; 1541 struct resource *res; 1542 int ret, irq; 1543 u32 val; 1544 1545 master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL); 1546 if (!master) 1547 return -ENOMEM; 1548 1549 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1550 master->regs = devm_ioremap_resource(&pdev->dev, res); 1551 if (IS_ERR(master->regs)) 1552 return PTR_ERR(master->regs); 1553 1554 master->pclk = devm_clk_get(&pdev->dev, "pclk"); 1555 if (IS_ERR(master->pclk)) 1556 return PTR_ERR(master->pclk); 1557 1558 master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); 1559 if (IS_ERR(master->pclk)) 1560 return PTR_ERR(master->pclk); 1561 1562 irq = platform_get_irq(pdev, 0); 1563 if (irq < 0) 1564 return irq; 1565 1566 ret = clk_prepare_enable(master->pclk); 1567 if (ret) 1568 return ret; 1569 1570 ret = clk_prepare_enable(master->sysclk); 1571 if (ret) 1572 goto err_disable_pclk; 1573 1574 if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) { 1575 ret = -EINVAL; 1576 goto err_disable_sysclk; 1577 } 1578 1579 spin_lock_init(&master->xferqueue.lock); 1580 INIT_LIST_HEAD(&master->xferqueue.list); 1581 1582 INIT_WORK(&master->hj_work, cdns_i3c_master_hj); 1583 writel(0xffffffff, master->regs + MST_IDR); 1584 writel(0xffffffff, master->regs + SLV_IDR); 1585 ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0, 1586 dev_name(&pdev->dev), master); 1587 if (ret) 1588 goto err_disable_sysclk; 1589 1590 platform_set_drvdata(pdev, master); 1591 1592 val = readl(master->regs + CONF_STATUS0); 1593 1594 /* Device ID0 is reserved to describe this master. */ 1595 master->maxdevs = CONF_STATUS0_DEVS_NUM(val); 1596 master->free_rr_slots = GENMASK(master->maxdevs, 1); 1597 1598 val = readl(master->regs + CONF_STATUS1); 1599 master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val); 1600 master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val); 1601 master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val); 1602 master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val); 1603 master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val); 1604 1605 spin_lock_init(&master->ibi.lock); 1606 master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val); 1607 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots, 1608 sizeof(*master->ibi.slots), 1609 GFP_KERNEL); 1610 if (!master->ibi.slots) 1611 goto err_disable_sysclk; 1612 1613 writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL); 1614 writel(MST_INT_IBIR_THR, master->regs + MST_IER); 1615 writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL); 1616 1617 ret = i3c_master_register(&master->base, &pdev->dev, 1618 &cdns_i3c_master_ops, false); 1619 if (ret) 1620 goto err_disable_sysclk; 1621 1622 return 0; 1623 1624 err_disable_sysclk: 1625 clk_disable_unprepare(master->sysclk); 1626 1627 err_disable_pclk: 1628 clk_disable_unprepare(master->pclk); 1629 1630 return ret; 1631 } 1632 1633 static int cdns_i3c_master_remove(struct platform_device *pdev) 1634 { 1635 struct cdns_i3c_master *master = platform_get_drvdata(pdev); 1636 int ret; 1637 1638 ret = i3c_master_unregister(&master->base); 1639 if (ret) 1640 return ret; 1641 1642 clk_disable_unprepare(master->sysclk); 1643 clk_disable_unprepare(master->pclk); 1644 1645 return 0; 1646 } 1647 1648 static const struct of_device_id cdns_i3c_master_of_ids[] = { 1649 { .compatible = "cdns,i3c-master" }, 1650 { /* sentinel */ }, 1651 }; 1652 1653 static struct platform_driver cdns_i3c_master = { 1654 .probe = cdns_i3c_master_probe, 1655 .remove = cdns_i3c_master_remove, 1656 .driver = { 1657 .name = "cdns-i3c-master", 1658 .of_match_table = cdns_i3c_master_of_ids, 1659 }, 1660 }; 1661 module_platform_driver(cdns_i3c_master); 1662 1663 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>"); 1664 MODULE_DESCRIPTION("Cadence I3C master driver"); 1665 MODULE_LICENSE("GPL v2"); 1666 MODULE_ALIAS("platform:cdns-i3c-master"); 1667