1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * i2c-xiic.c 4 * Copyright (c) 2002-2007 Xilinx Inc. 5 * Copyright (c) 2009-2010 Intel Corporation 6 * 7 * This code was implemented by Mocean Laboratories AB when porting linux 8 * to the automotive development board Russellville. The copyright holder 9 * as seen in the header is Intel corporation. 10 * Mocean Laboratories forked off the GNU/Linux platform work into a 11 * separate company called Pelagicore AB, which committed the code to the 12 * kernel. 13 */ 14 15 /* Supports: 16 * Xilinx IIC 17 */ 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/errno.h> 21 #include <linux/err.h> 22 #include <linux/delay.h> 23 #include <linux/platform_device.h> 24 #include <linux/i2c.h> 25 #include <linux/interrupt.h> 26 #include <linux/wait.h> 27 #include <linux/platform_data/i2c-xiic.h> 28 #include <linux/io.h> 29 #include <linux/slab.h> 30 #include <linux/of.h> 31 #include <linux/clk.h> 32 #include <linux/pm_runtime.h> 33 34 #define DRIVER_NAME "xiic-i2c" 35 36 enum xilinx_i2c_state { 37 STATE_DONE, 38 STATE_ERROR, 39 STATE_START 40 }; 41 42 enum xiic_endian { 43 LITTLE, 44 BIG 45 }; 46 47 /** 48 * struct xiic_i2c - Internal representation of the XIIC I2C bus 49 * @base: Memory base of the HW registers 50 * @wait: Wait queue for callers 51 * @adap: Kernel adapter representation 52 * @tx_msg: Messages from above to be sent 53 * @lock: Mutual exclusion 54 * @tx_pos: Current pos in TX message 55 * @nmsgs: Number of messages in tx_msg 56 * @state: See STATE_ 57 * @rx_msg: Current RX message 58 * @rx_pos: Position within current RX message 59 * @endianness: big/little-endian byte order 60 */ 61 struct xiic_i2c { 62 struct device *dev; 63 void __iomem *base; 64 wait_queue_head_t wait; 65 struct i2c_adapter adap; 66 struct i2c_msg *tx_msg; 67 struct mutex lock; 68 unsigned int tx_pos; 69 unsigned int nmsgs; 70 enum xilinx_i2c_state state; 71 struct i2c_msg *rx_msg; 72 int rx_pos; 73 enum xiic_endian endianness; 74 struct clk *clk; 75 }; 76 77 78 #define XIIC_MSB_OFFSET 0 79 #define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET) 80 81 /* 82 * Register offsets in bytes from RegisterBase. Three is added to the 83 * base offset to access LSB (IBM style) of the word 84 */ 85 #define XIIC_CR_REG_OFFSET (0x00+XIIC_REG_OFFSET) /* Control Register */ 86 #define XIIC_SR_REG_OFFSET (0x04+XIIC_REG_OFFSET) /* Status Register */ 87 #define XIIC_DTR_REG_OFFSET (0x08+XIIC_REG_OFFSET) /* Data Tx Register */ 88 #define XIIC_DRR_REG_OFFSET (0x0C+XIIC_REG_OFFSET) /* Data Rx Register */ 89 #define XIIC_ADR_REG_OFFSET (0x10+XIIC_REG_OFFSET) /* Address Register */ 90 #define XIIC_TFO_REG_OFFSET (0x14+XIIC_REG_OFFSET) /* Tx FIFO Occupancy */ 91 #define XIIC_RFO_REG_OFFSET (0x18+XIIC_REG_OFFSET) /* Rx FIFO Occupancy */ 92 #define XIIC_TBA_REG_OFFSET (0x1C+XIIC_REG_OFFSET) /* 10 Bit Address reg */ 93 #define XIIC_RFD_REG_OFFSET (0x20+XIIC_REG_OFFSET) /* Rx FIFO Depth reg */ 94 #define XIIC_GPO_REG_OFFSET (0x24+XIIC_REG_OFFSET) /* Output Register */ 95 96 /* Control Register masks */ 97 #define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */ 98 #define XIIC_CR_TX_FIFO_RESET_MASK 0x02 /* Transmit FIFO reset=1 */ 99 #define XIIC_CR_MSMS_MASK 0x04 /* Master starts Txing=1 */ 100 #define XIIC_CR_DIR_IS_TX_MASK 0x08 /* Dir of tx. Txing=1 */ 101 #define XIIC_CR_NO_ACK_MASK 0x10 /* Tx Ack. NO ack = 1 */ 102 #define XIIC_CR_REPEATED_START_MASK 0x20 /* Repeated start = 1 */ 103 #define XIIC_CR_GENERAL_CALL_MASK 0x40 /* Gen Call enabled = 1 */ 104 105 /* Status Register masks */ 106 #define XIIC_SR_GEN_CALL_MASK 0x01 /* 1=a mstr issued a GC */ 107 #define XIIC_SR_ADDR_AS_SLAVE_MASK 0x02 /* 1=when addr as slave */ 108 #define XIIC_SR_BUS_BUSY_MASK 0x04 /* 1 = bus is busy */ 109 #define XIIC_SR_MSTR_RDING_SLAVE_MASK 0x08 /* 1=Dir: mstr <-- slave */ 110 #define XIIC_SR_TX_FIFO_FULL_MASK 0x10 /* 1 = Tx FIFO full */ 111 #define XIIC_SR_RX_FIFO_FULL_MASK 0x20 /* 1 = Rx FIFO full */ 112 #define XIIC_SR_RX_FIFO_EMPTY_MASK 0x40 /* 1 = Rx FIFO empty */ 113 #define XIIC_SR_TX_FIFO_EMPTY_MASK 0x80 /* 1 = Tx FIFO empty */ 114 115 /* Interrupt Status Register masks Interrupt occurs when... */ 116 #define XIIC_INTR_ARB_LOST_MASK 0x01 /* 1 = arbitration lost */ 117 #define XIIC_INTR_TX_ERROR_MASK 0x02 /* 1=Tx error/msg complete */ 118 #define XIIC_INTR_TX_EMPTY_MASK 0x04 /* 1 = Tx FIFO/reg empty */ 119 #define XIIC_INTR_RX_FULL_MASK 0x08 /* 1=Rx FIFO/reg=OCY level */ 120 #define XIIC_INTR_BNB_MASK 0x10 /* 1 = Bus not busy */ 121 #define XIIC_INTR_AAS_MASK 0x20 /* 1 = when addr as slave */ 122 #define XIIC_INTR_NAAS_MASK 0x40 /* 1 = not addr as slave */ 123 #define XIIC_INTR_TX_HALF_MASK 0x80 /* 1 = TX FIFO half empty */ 124 125 /* The following constants specify the depth of the FIFOs */ 126 #define IIC_RX_FIFO_DEPTH 16 /* Rx fifo capacity */ 127 #define IIC_TX_FIFO_DEPTH 16 /* Tx fifo capacity */ 128 129 /* The following constants specify groups of interrupts that are typically 130 * enabled or disables at the same time 131 */ 132 #define XIIC_TX_INTERRUPTS \ 133 (XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK) 134 135 #define XIIC_TX_RX_INTERRUPTS (XIIC_INTR_RX_FULL_MASK | XIIC_TX_INTERRUPTS) 136 137 /* 138 * Tx Fifo upper bit masks. 139 */ 140 #define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */ 141 #define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */ 142 143 /* 144 * The following constants define the register offsets for the Interrupt 145 * registers. There are some holes in the memory map for reserved addresses 146 * to allow other registers to be added and still match the memory map of the 147 * interrupt controller registers 148 */ 149 #define XIIC_DGIER_OFFSET 0x1C /* Device Global Interrupt Enable Register */ 150 #define XIIC_IISR_OFFSET 0x20 /* Interrupt Status Register */ 151 #define XIIC_IIER_OFFSET 0x28 /* Interrupt Enable Register */ 152 #define XIIC_RESETR_OFFSET 0x40 /* Reset Register */ 153 154 #define XIIC_RESET_MASK 0xAUL 155 156 #define XIIC_PM_TIMEOUT 1000 /* ms */ 157 /* 158 * The following constant is used for the device global interrupt enable 159 * register, to enable all interrupts for the device, this is the only bit 160 * in the register 161 */ 162 #define XIIC_GINTR_ENABLE_MASK 0x80000000UL 163 164 #define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos) 165 #define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos) 166 167 static void xiic_start_xfer(struct xiic_i2c *i2c); 168 static void __xiic_start_xfer(struct xiic_i2c *i2c); 169 170 /* 171 * For the register read and write functions, a little-endian and big-endian 172 * version are necessary. Endianness is detected during the probe function. 173 * Only the least significant byte [doublet] of the register are ever 174 * accessed. This requires an offset of 3 [2] from the base address for 175 * big-endian systems. 176 */ 177 178 static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value) 179 { 180 if (i2c->endianness == LITTLE) 181 iowrite8(value, i2c->base + reg); 182 else 183 iowrite8(value, i2c->base + reg + 3); 184 } 185 186 static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg) 187 { 188 u8 ret; 189 190 if (i2c->endianness == LITTLE) 191 ret = ioread8(i2c->base + reg); 192 else 193 ret = ioread8(i2c->base + reg + 3); 194 return ret; 195 } 196 197 static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value) 198 { 199 if (i2c->endianness == LITTLE) 200 iowrite16(value, i2c->base + reg); 201 else 202 iowrite16be(value, i2c->base + reg + 2); 203 } 204 205 static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value) 206 { 207 if (i2c->endianness == LITTLE) 208 iowrite32(value, i2c->base + reg); 209 else 210 iowrite32be(value, i2c->base + reg); 211 } 212 213 static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg) 214 { 215 u32 ret; 216 217 if (i2c->endianness == LITTLE) 218 ret = ioread32(i2c->base + reg); 219 else 220 ret = ioread32be(i2c->base + reg); 221 return ret; 222 } 223 224 static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask) 225 { 226 u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); 227 xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask); 228 } 229 230 static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask) 231 { 232 u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); 233 xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask); 234 } 235 236 static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask) 237 { 238 u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET); 239 xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask); 240 } 241 242 static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask) 243 { 244 xiic_irq_clr(i2c, mask); 245 xiic_irq_en(i2c, mask); 246 } 247 248 static void xiic_clear_rx_fifo(struct xiic_i2c *i2c) 249 { 250 u8 sr; 251 for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET); 252 !(sr & XIIC_SR_RX_FIFO_EMPTY_MASK); 253 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)) 254 xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET); 255 } 256 257 static void xiic_reinit(struct xiic_i2c *i2c) 258 { 259 xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK); 260 261 /* Set receive Fifo depth to maximum (zero based). */ 262 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1); 263 264 /* Reset Tx Fifo. */ 265 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK); 266 267 /* Enable IIC Device, remove Tx Fifo reset & disable general call. */ 268 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK); 269 270 /* make sure RX fifo is empty */ 271 xiic_clear_rx_fifo(i2c); 272 273 /* Enable interrupts */ 274 xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK); 275 276 xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK); 277 } 278 279 static void xiic_deinit(struct xiic_i2c *i2c) 280 { 281 u8 cr; 282 283 xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK); 284 285 /* Disable IIC Device. */ 286 cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); 287 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_ENABLE_DEVICE_MASK); 288 } 289 290 static void xiic_read_rx(struct xiic_i2c *i2c) 291 { 292 u8 bytes_in_fifo; 293 int i; 294 295 bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1; 296 297 dev_dbg(i2c->adap.dev.parent, 298 "%s entry, bytes in fifo: %d, msg: %d, SR: 0x%x, CR: 0x%x\n", 299 __func__, bytes_in_fifo, xiic_rx_space(i2c), 300 xiic_getreg8(i2c, XIIC_SR_REG_OFFSET), 301 xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); 302 303 if (bytes_in_fifo > xiic_rx_space(i2c)) 304 bytes_in_fifo = xiic_rx_space(i2c); 305 306 for (i = 0; i < bytes_in_fifo; i++) 307 i2c->rx_msg->buf[i2c->rx_pos++] = 308 xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET); 309 310 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, 311 (xiic_rx_space(i2c) > IIC_RX_FIFO_DEPTH) ? 312 IIC_RX_FIFO_DEPTH - 1 : xiic_rx_space(i2c) - 1); 313 } 314 315 static int xiic_tx_fifo_space(struct xiic_i2c *i2c) 316 { 317 /* return the actual space left in the FIFO */ 318 return IIC_TX_FIFO_DEPTH - xiic_getreg8(i2c, XIIC_TFO_REG_OFFSET) - 1; 319 } 320 321 static void xiic_fill_tx_fifo(struct xiic_i2c *i2c) 322 { 323 u8 fifo_space = xiic_tx_fifo_space(i2c); 324 int len = xiic_tx_space(i2c); 325 326 len = (len > fifo_space) ? fifo_space : len; 327 328 dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n", 329 __func__, len, fifo_space); 330 331 while (len--) { 332 u16 data = i2c->tx_msg->buf[i2c->tx_pos++]; 333 if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) { 334 /* last message in transfer -> STOP */ 335 data |= XIIC_TX_DYN_STOP_MASK; 336 dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__); 337 } 338 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); 339 } 340 } 341 342 static void xiic_wakeup(struct xiic_i2c *i2c, int code) 343 { 344 i2c->tx_msg = NULL; 345 i2c->rx_msg = NULL; 346 i2c->nmsgs = 0; 347 i2c->state = code; 348 wake_up(&i2c->wait); 349 } 350 351 static irqreturn_t xiic_process(int irq, void *dev_id) 352 { 353 struct xiic_i2c *i2c = dev_id; 354 u32 pend, isr, ier; 355 u32 clr = 0; 356 357 /* Get the interrupt Status from the IPIF. There is no clearing of 358 * interrupts in the IPIF. Interrupts must be cleared at the source. 359 * To find which interrupts are pending; AND interrupts pending with 360 * interrupts masked. 361 */ 362 mutex_lock(&i2c->lock); 363 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET); 364 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); 365 pend = isr & ier; 366 367 dev_dbg(i2c->adap.dev.parent, "%s: IER: 0x%x, ISR: 0x%x, pend: 0x%x\n", 368 __func__, ier, isr, pend); 369 dev_dbg(i2c->adap.dev.parent, "%s: SR: 0x%x, msg: %p, nmsgs: %d\n", 370 __func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET), 371 i2c->tx_msg, i2c->nmsgs); 372 373 374 /* Service requesting interrupt */ 375 if ((pend & XIIC_INTR_ARB_LOST_MASK) || 376 ((pend & XIIC_INTR_TX_ERROR_MASK) && 377 !(pend & XIIC_INTR_RX_FULL_MASK))) { 378 /* bus arbritration lost, or... 379 * Transmit error _OR_ RX completed 380 * if this happens when RX_FULL is not set 381 * this is probably a TX error 382 */ 383 384 dev_dbg(i2c->adap.dev.parent, "%s error\n", __func__); 385 386 /* dynamic mode seem to suffer from problems if we just flushes 387 * fifos and the next message is a TX with len 0 (only addr) 388 * reset the IP instead of just flush fifos 389 */ 390 xiic_reinit(i2c); 391 392 if (i2c->rx_msg) 393 xiic_wakeup(i2c, STATE_ERROR); 394 if (i2c->tx_msg) 395 xiic_wakeup(i2c, STATE_ERROR); 396 } 397 if (pend & XIIC_INTR_RX_FULL_MASK) { 398 /* Receive register/FIFO is full */ 399 400 clr |= XIIC_INTR_RX_FULL_MASK; 401 if (!i2c->rx_msg) { 402 dev_dbg(i2c->adap.dev.parent, 403 "%s unexpected RX IRQ\n", __func__); 404 xiic_clear_rx_fifo(i2c); 405 goto out; 406 } 407 408 xiic_read_rx(i2c); 409 if (xiic_rx_space(i2c) == 0) { 410 /* this is the last part of the message */ 411 i2c->rx_msg = NULL; 412 413 /* also clear TX error if there (RX complete) */ 414 clr |= (isr & XIIC_INTR_TX_ERROR_MASK); 415 416 dev_dbg(i2c->adap.dev.parent, 417 "%s end of message, nmsgs: %d\n", 418 __func__, i2c->nmsgs); 419 420 /* send next message if this wasn't the last, 421 * otherwise the transfer will be finialise when 422 * receiving the bus not busy interrupt 423 */ 424 if (i2c->nmsgs > 1) { 425 i2c->nmsgs--; 426 i2c->tx_msg++; 427 dev_dbg(i2c->adap.dev.parent, 428 "%s will start next...\n", __func__); 429 430 __xiic_start_xfer(i2c); 431 } 432 } 433 } 434 if (pend & XIIC_INTR_BNB_MASK) { 435 /* IIC bus has transitioned to not busy */ 436 clr |= XIIC_INTR_BNB_MASK; 437 438 /* The bus is not busy, disable BusNotBusy interrupt */ 439 xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK); 440 441 if (!i2c->tx_msg) 442 goto out; 443 444 if ((i2c->nmsgs == 1) && !i2c->rx_msg && 445 xiic_tx_space(i2c) == 0) 446 xiic_wakeup(i2c, STATE_DONE); 447 else 448 xiic_wakeup(i2c, STATE_ERROR); 449 } 450 if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) { 451 /* Transmit register/FIFO is empty or ½ empty */ 452 453 clr |= (pend & 454 (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)); 455 456 if (!i2c->tx_msg) { 457 dev_dbg(i2c->adap.dev.parent, 458 "%s unexpected TX IRQ\n", __func__); 459 goto out; 460 } 461 462 xiic_fill_tx_fifo(i2c); 463 464 /* current message sent and there is space in the fifo */ 465 if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) { 466 dev_dbg(i2c->adap.dev.parent, 467 "%s end of message sent, nmsgs: %d\n", 468 __func__, i2c->nmsgs); 469 if (i2c->nmsgs > 1) { 470 i2c->nmsgs--; 471 i2c->tx_msg++; 472 __xiic_start_xfer(i2c); 473 } else { 474 xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK); 475 476 dev_dbg(i2c->adap.dev.parent, 477 "%s Got TX IRQ but no more to do...\n", 478 __func__); 479 } 480 } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1)) 481 /* current frame is sent and is last, 482 * make sure to disable tx half 483 */ 484 xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK); 485 } 486 out: 487 dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr); 488 489 xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr); 490 mutex_unlock(&i2c->lock); 491 return IRQ_HANDLED; 492 } 493 494 static int xiic_bus_busy(struct xiic_i2c *i2c) 495 { 496 u8 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET); 497 498 return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0; 499 } 500 501 static int xiic_busy(struct xiic_i2c *i2c) 502 { 503 int tries = 3; 504 int err; 505 506 if (i2c->tx_msg) 507 return -EBUSY; 508 509 /* for instance if previous transfer was terminated due to TX error 510 * it might be that the bus is on it's way to become available 511 * give it at most 3 ms to wake 512 */ 513 err = xiic_bus_busy(i2c); 514 while (err && tries--) { 515 msleep(1); 516 err = xiic_bus_busy(i2c); 517 } 518 519 return err; 520 } 521 522 static void xiic_start_recv(struct xiic_i2c *i2c) 523 { 524 u8 rx_watermark; 525 struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; 526 unsigned long flags; 527 528 /* Clear and enable Rx full interrupt. */ 529 xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); 530 531 /* we want to get all but last byte, because the TX_ERROR IRQ is used 532 * to inidicate error ACK on the address, and negative ack on the last 533 * received byte, so to not mix them receive all but last. 534 * In the case where there is only one byte to receive 535 * we can check if ERROR and RX full is set at the same time 536 */ 537 rx_watermark = msg->len; 538 if (rx_watermark > IIC_RX_FIFO_DEPTH) 539 rx_watermark = IIC_RX_FIFO_DEPTH; 540 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); 541 542 local_irq_save(flags); 543 if (!(msg->flags & I2C_M_NOSTART)) 544 /* write the address */ 545 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, 546 i2c_8bit_addr_from_msg(msg) | XIIC_TX_DYN_START_MASK); 547 548 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); 549 550 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, 551 msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); 552 local_irq_restore(flags); 553 554 if (i2c->nmsgs == 1) 555 /* very last, enable bus not busy as well */ 556 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); 557 558 /* the message is tx:ed */ 559 i2c->tx_pos = msg->len; 560 } 561 562 static void xiic_start_send(struct xiic_i2c *i2c) 563 { 564 struct i2c_msg *msg = i2c->tx_msg; 565 566 xiic_irq_clr(i2c, XIIC_INTR_TX_ERROR_MASK); 567 568 dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d", 569 __func__, msg, msg->len); 570 dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n", 571 __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET), 572 xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); 573 574 if (!(msg->flags & I2C_M_NOSTART)) { 575 /* write the address */ 576 u16 data = i2c_8bit_addr_from_msg(msg) | 577 XIIC_TX_DYN_START_MASK; 578 if ((i2c->nmsgs == 1) && msg->len == 0) 579 /* no data and last message -> add STOP */ 580 data |= XIIC_TX_DYN_STOP_MASK; 581 582 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); 583 } 584 585 xiic_fill_tx_fifo(i2c); 586 587 /* Clear any pending Tx empty, Tx Error and then enable them. */ 588 xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK | 589 XIIC_INTR_BNB_MASK); 590 } 591 592 static irqreturn_t xiic_isr(int irq, void *dev_id) 593 { 594 struct xiic_i2c *i2c = dev_id; 595 u32 pend, isr, ier; 596 irqreturn_t ret = IRQ_NONE; 597 /* Do not processes a devices interrupts if the device has no 598 * interrupts pending 599 */ 600 601 dev_dbg(i2c->adap.dev.parent, "%s entry\n", __func__); 602 603 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET); 604 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); 605 pend = isr & ier; 606 if (pend) 607 ret = IRQ_WAKE_THREAD; 608 609 return ret; 610 } 611 612 static void __xiic_start_xfer(struct xiic_i2c *i2c) 613 { 614 int first = 1; 615 int fifo_space = xiic_tx_fifo_space(i2c); 616 dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n", 617 __func__, i2c->tx_msg, fifo_space); 618 619 if (!i2c->tx_msg) 620 return; 621 622 i2c->rx_pos = 0; 623 i2c->tx_pos = 0; 624 i2c->state = STATE_START; 625 while ((fifo_space >= 2) && (first || (i2c->nmsgs > 1))) { 626 if (!first) { 627 i2c->nmsgs--; 628 i2c->tx_msg++; 629 i2c->tx_pos = 0; 630 } else 631 first = 0; 632 633 if (i2c->tx_msg->flags & I2C_M_RD) { 634 /* we dont date putting several reads in the FIFO */ 635 xiic_start_recv(i2c); 636 return; 637 } else { 638 xiic_start_send(i2c); 639 if (xiic_tx_space(i2c) != 0) { 640 /* the message could not be completely sent */ 641 break; 642 } 643 } 644 645 fifo_space = xiic_tx_fifo_space(i2c); 646 } 647 648 /* there are more messages or the current one could not be completely 649 * put into the FIFO, also enable the half empty interrupt 650 */ 651 if (i2c->nmsgs > 1 || xiic_tx_space(i2c)) 652 xiic_irq_clr_en(i2c, XIIC_INTR_TX_HALF_MASK); 653 654 } 655 656 static void xiic_start_xfer(struct xiic_i2c *i2c) 657 { 658 mutex_lock(&i2c->lock); 659 xiic_reinit(i2c); 660 __xiic_start_xfer(i2c); 661 mutex_unlock(&i2c->lock); 662 } 663 664 static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 665 { 666 struct xiic_i2c *i2c = i2c_get_adapdata(adap); 667 int err; 668 669 dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__, 670 xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)); 671 672 err = pm_runtime_get_sync(i2c->dev); 673 if (err < 0) 674 return err; 675 676 err = xiic_busy(i2c); 677 if (err) 678 goto out; 679 680 i2c->tx_msg = msgs; 681 i2c->nmsgs = num; 682 683 xiic_start_xfer(i2c); 684 685 if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) || 686 (i2c->state == STATE_DONE), HZ)) { 687 err = (i2c->state == STATE_DONE) ? num : -EIO; 688 goto out; 689 } else { 690 i2c->tx_msg = NULL; 691 i2c->rx_msg = NULL; 692 i2c->nmsgs = 0; 693 err = -ETIMEDOUT; 694 goto out; 695 } 696 out: 697 pm_runtime_mark_last_busy(i2c->dev); 698 pm_runtime_put_autosuspend(i2c->dev); 699 return err; 700 } 701 702 static u32 xiic_func(struct i2c_adapter *adap) 703 { 704 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 705 } 706 707 static const struct i2c_algorithm xiic_algorithm = { 708 .master_xfer = xiic_xfer, 709 .functionality = xiic_func, 710 }; 711 712 static const struct i2c_adapter_quirks xiic_quirks = { 713 .max_read_len = 255, 714 }; 715 716 static const struct i2c_adapter xiic_adapter = { 717 .owner = THIS_MODULE, 718 .name = DRIVER_NAME, 719 .class = I2C_CLASS_DEPRECATED, 720 .algo = &xiic_algorithm, 721 .quirks = &xiic_quirks, 722 }; 723 724 725 static int xiic_i2c_probe(struct platform_device *pdev) 726 { 727 struct xiic_i2c *i2c; 728 struct xiic_i2c_platform_data *pdata; 729 struct resource *res; 730 int ret, irq; 731 u8 i; 732 u32 sr; 733 734 i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); 735 if (!i2c) 736 return -ENOMEM; 737 738 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 739 i2c->base = devm_ioremap_resource(&pdev->dev, res); 740 if (IS_ERR(i2c->base)) 741 return PTR_ERR(i2c->base); 742 743 irq = platform_get_irq(pdev, 0); 744 if (irq < 0) 745 return irq; 746 747 pdata = dev_get_platdata(&pdev->dev); 748 749 /* hook up driver to tree */ 750 platform_set_drvdata(pdev, i2c); 751 i2c->adap = xiic_adapter; 752 i2c_set_adapdata(&i2c->adap, i2c); 753 i2c->adap.dev.parent = &pdev->dev; 754 i2c->adap.dev.of_node = pdev->dev.of_node; 755 756 mutex_init(&i2c->lock); 757 init_waitqueue_head(&i2c->wait); 758 759 i2c->clk = devm_clk_get(&pdev->dev, NULL); 760 if (IS_ERR(i2c->clk)) { 761 dev_err(&pdev->dev, "input clock not found.\n"); 762 return PTR_ERR(i2c->clk); 763 } 764 ret = clk_prepare_enable(i2c->clk); 765 if (ret) { 766 dev_err(&pdev->dev, "Unable to enable clock.\n"); 767 return ret; 768 } 769 i2c->dev = &pdev->dev; 770 pm_runtime_enable(i2c->dev); 771 pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT); 772 pm_runtime_use_autosuspend(i2c->dev); 773 pm_runtime_set_active(i2c->dev); 774 ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr, 775 xiic_process, IRQF_ONESHOT, 776 pdev->name, i2c); 777 778 if (ret < 0) { 779 dev_err(&pdev->dev, "Cannot claim IRQ\n"); 780 goto err_clk_dis; 781 } 782 783 /* 784 * Detect endianness 785 * Try to reset the TX FIFO. Then check the EMPTY flag. If it is not 786 * set, assume that the endianness was wrong and swap. 787 */ 788 i2c->endianness = LITTLE; 789 xiic_setreg32(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK); 790 /* Reset is cleared in xiic_reinit */ 791 sr = xiic_getreg32(i2c, XIIC_SR_REG_OFFSET); 792 if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK)) 793 i2c->endianness = BIG; 794 795 xiic_reinit(i2c); 796 797 /* add i2c adapter to i2c tree */ 798 ret = i2c_add_adapter(&i2c->adap); 799 if (ret) { 800 xiic_deinit(i2c); 801 goto err_clk_dis; 802 } 803 804 if (pdata) { 805 /* add in known devices to the bus */ 806 for (i = 0; i < pdata->num_devices; i++) 807 i2c_new_device(&i2c->adap, pdata->devices + i); 808 } 809 810 return 0; 811 812 err_clk_dis: 813 pm_runtime_set_suspended(&pdev->dev); 814 pm_runtime_disable(&pdev->dev); 815 clk_disable_unprepare(i2c->clk); 816 return ret; 817 } 818 819 static int xiic_i2c_remove(struct platform_device *pdev) 820 { 821 struct xiic_i2c *i2c = platform_get_drvdata(pdev); 822 int ret; 823 824 /* remove adapter & data */ 825 i2c_del_adapter(&i2c->adap); 826 827 ret = clk_prepare_enable(i2c->clk); 828 if (ret) { 829 dev_err(&pdev->dev, "Unable to enable clock.\n"); 830 return ret; 831 } 832 xiic_deinit(i2c); 833 clk_disable_unprepare(i2c->clk); 834 pm_runtime_disable(&pdev->dev); 835 836 return 0; 837 } 838 839 #if defined(CONFIG_OF) 840 static const struct of_device_id xiic_of_match[] = { 841 { .compatible = "xlnx,xps-iic-2.00.a", }, 842 {}, 843 }; 844 MODULE_DEVICE_TABLE(of, xiic_of_match); 845 #endif 846 847 static int __maybe_unused xiic_i2c_runtime_suspend(struct device *dev) 848 { 849 struct xiic_i2c *i2c = dev_get_drvdata(dev); 850 851 clk_disable(i2c->clk); 852 853 return 0; 854 } 855 856 static int __maybe_unused xiic_i2c_runtime_resume(struct device *dev) 857 { 858 struct xiic_i2c *i2c = dev_get_drvdata(dev); 859 int ret; 860 861 ret = clk_enable(i2c->clk); 862 if (ret) { 863 dev_err(dev, "Cannot enable clock.\n"); 864 return ret; 865 } 866 867 return 0; 868 } 869 870 static const struct dev_pm_ops xiic_dev_pm_ops = { 871 SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend, 872 xiic_i2c_runtime_resume, NULL) 873 }; 874 static struct platform_driver xiic_i2c_driver = { 875 .probe = xiic_i2c_probe, 876 .remove = xiic_i2c_remove, 877 .driver = { 878 .name = DRIVER_NAME, 879 .of_match_table = of_match_ptr(xiic_of_match), 880 .pm = &xiic_dev_pm_ops, 881 }, 882 }; 883 884 module_platform_driver(xiic_i2c_driver); 885 886 MODULE_AUTHOR("info@mocean-labs.com"); 887 MODULE_DESCRIPTION("Xilinx I2C bus driver"); 888 MODULE_LICENSE("GPL v2"); 889 MODULE_ALIAS("platform:"DRIVER_NAME); 890