1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Freescale eSPI controller driver. 4 * 5 * Copyright 2010 Freescale Semiconductor, Inc. 6 */ 7 #include <linux/delay.h> 8 #include <linux/err.h> 9 #include <linux/fsl_devices.h> 10 #include <linux/interrupt.h> 11 #include <linux/module.h> 12 #include <linux/mm.h> 13 #include <linux/of.h> 14 #include <linux/of_address.h> 15 #include <linux/of_irq.h> 16 #include <linux/of_platform.h> 17 #include <linux/platform_device.h> 18 #include <linux/spi/spi.h> 19 #include <linux/pm_runtime.h> 20 #include <sysdev/fsl_soc.h> 21 22 /* eSPI Controller registers */ 23 #define ESPI_SPMODE 0x00 /* eSPI mode register */ 24 #define ESPI_SPIE 0x04 /* eSPI event register */ 25 #define ESPI_SPIM 0x08 /* eSPI mask register */ 26 #define ESPI_SPCOM 0x0c /* eSPI command register */ 27 #define ESPI_SPITF 0x10 /* eSPI transmit FIFO access register*/ 28 #define ESPI_SPIRF 0x14 /* eSPI receive FIFO access register*/ 29 #define ESPI_SPMODE0 0x20 /* eSPI cs0 mode register */ 30 31 #define ESPI_SPMODEx(x) (ESPI_SPMODE0 + (x) * 4) 32 33 /* eSPI Controller mode register definitions */ 34 #define SPMODE_ENABLE BIT(31) 35 #define SPMODE_LOOP BIT(30) 36 #define SPMODE_TXTHR(x) ((x) << 8) 37 #define SPMODE_RXTHR(x) ((x) << 0) 38 39 /* eSPI Controller CS mode register definitions */ 40 #define CSMODE_CI_INACTIVEHIGH BIT(31) 41 #define CSMODE_CP_BEGIN_EDGECLK BIT(30) 42 #define CSMODE_REV BIT(29) 43 #define CSMODE_DIV16 BIT(28) 44 #define CSMODE_PM(x) ((x) << 24) 45 #define CSMODE_POL_1 BIT(20) 46 #define CSMODE_LEN(x) ((x) << 16) 47 #define CSMODE_BEF(x) ((x) << 12) 48 #define CSMODE_AFT(x) ((x) << 8) 49 #define CSMODE_CG(x) ((x) << 3) 50 51 #define FSL_ESPI_FIFO_SIZE 32 52 #define FSL_ESPI_RXTHR 15 53 54 /* Default mode/csmode for eSPI controller */ 55 #define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR)) 56 #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ 57 | CSMODE_AFT(0) | CSMODE_CG(1)) 58 59 /* SPIE register values */ 60 #define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) 61 #define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) 62 #define SPIE_TXE BIT(15) /* TX FIFO empty */ 63 #define SPIE_DON BIT(14) /* TX done */ 64 #define SPIE_RXT BIT(13) /* RX FIFO threshold */ 65 #define SPIE_RXF BIT(12) /* RX FIFO full */ 66 #define SPIE_TXT BIT(11) /* TX FIFO threshold*/ 67 #define SPIE_RNE BIT(9) /* RX FIFO not empty */ 68 #define SPIE_TNF BIT(8) /* TX FIFO not full */ 69 70 /* SPIM register values */ 71 #define SPIM_TXE BIT(15) /* TX FIFO empty */ 72 #define SPIM_DON BIT(14) /* TX done */ 73 #define SPIM_RXT BIT(13) /* RX FIFO threshold */ 74 #define SPIM_RXF BIT(12) /* RX FIFO full */ 75 #define SPIM_TXT BIT(11) /* TX FIFO threshold*/ 76 #define SPIM_RNE BIT(9) /* RX FIFO not empty */ 77 #define SPIM_TNF BIT(8) /* TX FIFO not full */ 78 79 /* SPCOM register values */ 80 #define SPCOM_CS(x) ((x) << 30) 81 #define SPCOM_DO BIT(28) /* Dual output */ 82 #define SPCOM_TO BIT(27) /* TX only */ 83 #define SPCOM_RXSKIP(x) ((x) << 16) 84 #define SPCOM_TRANLEN(x) ((x) << 0) 85 86 #define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */ 87 88 #define AUTOSUSPEND_TIMEOUT 2000 89 90 struct fsl_espi { 91 struct device *dev; 92 void __iomem *reg_base; 93 94 struct list_head *m_transfers; 95 struct spi_transfer *tx_t; 96 unsigned int tx_pos; 97 bool tx_done; 98 struct spi_transfer *rx_t; 99 unsigned int rx_pos; 100 bool rx_done; 101 102 bool swab; 103 unsigned int rxskip; 104 105 spinlock_t lock; 106 107 u32 spibrg; /* SPIBRG input clock */ 108 109 struct completion done; 110 }; 111 112 struct fsl_espi_cs { 113 u32 hw_mode; 114 }; 115 116 static inline u32 fsl_espi_read_reg(struct fsl_espi *espi, int offset) 117 { 118 return ioread32be(espi->reg_base + offset); 119 } 120 121 static inline u16 fsl_espi_read_reg16(struct fsl_espi *espi, int offset) 122 { 123 return ioread16be(espi->reg_base + offset); 124 } 125 126 static inline u8 fsl_espi_read_reg8(struct fsl_espi *espi, int offset) 127 { 128 return ioread8(espi->reg_base + offset); 129 } 130 131 static inline void fsl_espi_write_reg(struct fsl_espi *espi, int offset, 132 u32 val) 133 { 134 iowrite32be(val, espi->reg_base + offset); 135 } 136 137 static inline void fsl_espi_write_reg16(struct fsl_espi *espi, int offset, 138 u16 val) 139 { 140 iowrite16be(val, espi->reg_base + offset); 141 } 142 143 static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset, 144 u8 val) 145 { 146 iowrite8(val, espi->reg_base + offset); 147 } 148 149 static int fsl_espi_check_message(struct spi_message *m) 150 { 151 struct fsl_espi *espi = spi_master_get_devdata(m->spi->master); 152 struct spi_transfer *t, *first; 153 154 if (m->frame_length > SPCOM_TRANLEN_MAX) { 155 dev_err(espi->dev, "message too long, size is %u bytes\n", 156 m->frame_length); 157 return -EMSGSIZE; 158 } 159 160 first = list_first_entry(&m->transfers, struct spi_transfer, 161 transfer_list); 162 163 list_for_each_entry(t, &m->transfers, transfer_list) { 164 if (first->bits_per_word != t->bits_per_word || 165 first->speed_hz != t->speed_hz) { 166 dev_err(espi->dev, "bits_per_word/speed_hz should be the same for all transfers\n"); 167 return -EINVAL; 168 } 169 } 170 171 /* ESPI supports MSB-first transfers for word size 8 / 16 only */ 172 if (!(m->spi->mode & SPI_LSB_FIRST) && first->bits_per_word != 8 && 173 first->bits_per_word != 16) { 174 dev_err(espi->dev, 175 "MSB-first transfer not supported for wordsize %u\n", 176 first->bits_per_word); 177 return -EINVAL; 178 } 179 180 return 0; 181 } 182 183 static unsigned int fsl_espi_check_rxskip_mode(struct spi_message *m) 184 { 185 struct spi_transfer *t; 186 unsigned int i = 0, rxskip = 0; 187 188 /* 189 * prerequisites for ESPI rxskip mode: 190 * - message has two transfers 191 * - first transfer is a write and second is a read 192 * 193 * In addition the current low-level transfer mechanism requires 194 * that the rxskip bytes fit into the TX FIFO. Else the transfer 195 * would hang because after the first FSL_ESPI_FIFO_SIZE bytes 196 * the TX FIFO isn't re-filled. 197 */ 198 list_for_each_entry(t, &m->transfers, transfer_list) { 199 if (i == 0) { 200 if (!t->tx_buf || t->rx_buf || 201 t->len > FSL_ESPI_FIFO_SIZE) 202 return 0; 203 rxskip = t->len; 204 } else if (i == 1) { 205 if (t->tx_buf || !t->rx_buf) 206 return 0; 207 } 208 i++; 209 } 210 211 return i == 2 ? rxskip : 0; 212 } 213 214 static void fsl_espi_fill_tx_fifo(struct fsl_espi *espi, u32 events) 215 { 216 u32 tx_fifo_avail; 217 unsigned int tx_left; 218 const void *tx_buf; 219 220 /* if events is zero transfer has not started and tx fifo is empty */ 221 tx_fifo_avail = events ? SPIE_TXCNT(events) : FSL_ESPI_FIFO_SIZE; 222 start: 223 tx_left = espi->tx_t->len - espi->tx_pos; 224 tx_buf = espi->tx_t->tx_buf; 225 while (tx_fifo_avail >= min(4U, tx_left) && tx_left) { 226 if (tx_left >= 4) { 227 if (!tx_buf) 228 fsl_espi_write_reg(espi, ESPI_SPITF, 0); 229 else if (espi->swab) 230 fsl_espi_write_reg(espi, ESPI_SPITF, 231 swahb32p(tx_buf + espi->tx_pos)); 232 else 233 fsl_espi_write_reg(espi, ESPI_SPITF, 234 *(u32 *)(tx_buf + espi->tx_pos)); 235 espi->tx_pos += 4; 236 tx_left -= 4; 237 tx_fifo_avail -= 4; 238 } else if (tx_left >= 2 && tx_buf && espi->swab) { 239 fsl_espi_write_reg16(espi, ESPI_SPITF, 240 swab16p(tx_buf + espi->tx_pos)); 241 espi->tx_pos += 2; 242 tx_left -= 2; 243 tx_fifo_avail -= 2; 244 } else { 245 if (!tx_buf) 246 fsl_espi_write_reg8(espi, ESPI_SPITF, 0); 247 else 248 fsl_espi_write_reg8(espi, ESPI_SPITF, 249 *(u8 *)(tx_buf + espi->tx_pos)); 250 espi->tx_pos += 1; 251 tx_left -= 1; 252 tx_fifo_avail -= 1; 253 } 254 } 255 256 if (!tx_left) { 257 /* Last transfer finished, in rxskip mode only one is needed */ 258 if (list_is_last(&espi->tx_t->transfer_list, 259 espi->m_transfers) || espi->rxskip) { 260 espi->tx_done = true; 261 return; 262 } 263 espi->tx_t = list_next_entry(espi->tx_t, transfer_list); 264 espi->tx_pos = 0; 265 /* continue with next transfer if tx fifo is not full */ 266 if (tx_fifo_avail) 267 goto start; 268 } 269 } 270 271 static void fsl_espi_read_rx_fifo(struct fsl_espi *espi, u32 events) 272 { 273 u32 rx_fifo_avail = SPIE_RXCNT(events); 274 unsigned int rx_left; 275 void *rx_buf; 276 277 start: 278 rx_left = espi->rx_t->len - espi->rx_pos; 279 rx_buf = espi->rx_t->rx_buf; 280 while (rx_fifo_avail >= min(4U, rx_left) && rx_left) { 281 if (rx_left >= 4) { 282 u32 val = fsl_espi_read_reg(espi, ESPI_SPIRF); 283 284 if (rx_buf && espi->swab) 285 *(u32 *)(rx_buf + espi->rx_pos) = swahb32(val); 286 else if (rx_buf) 287 *(u32 *)(rx_buf + espi->rx_pos) = val; 288 espi->rx_pos += 4; 289 rx_left -= 4; 290 rx_fifo_avail -= 4; 291 } else if (rx_left >= 2 && rx_buf && espi->swab) { 292 u16 val = fsl_espi_read_reg16(espi, ESPI_SPIRF); 293 294 *(u16 *)(rx_buf + espi->rx_pos) = swab16(val); 295 espi->rx_pos += 2; 296 rx_left -= 2; 297 rx_fifo_avail -= 2; 298 } else { 299 u8 val = fsl_espi_read_reg8(espi, ESPI_SPIRF); 300 301 if (rx_buf) 302 *(u8 *)(rx_buf + espi->rx_pos) = val; 303 espi->rx_pos += 1; 304 rx_left -= 1; 305 rx_fifo_avail -= 1; 306 } 307 } 308 309 if (!rx_left) { 310 if (list_is_last(&espi->rx_t->transfer_list, 311 espi->m_transfers)) { 312 espi->rx_done = true; 313 return; 314 } 315 espi->rx_t = list_next_entry(espi->rx_t, transfer_list); 316 espi->rx_pos = 0; 317 /* continue with next transfer if rx fifo is not empty */ 318 if (rx_fifo_avail) 319 goto start; 320 } 321 } 322 323 static void fsl_espi_setup_transfer(struct spi_device *spi, 324 struct spi_transfer *t) 325 { 326 struct fsl_espi *espi = spi_master_get_devdata(spi->master); 327 int bits_per_word = t ? t->bits_per_word : spi->bits_per_word; 328 u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz; 329 struct fsl_espi_cs *cs = spi_get_ctldata(spi); 330 u32 hw_mode_old = cs->hw_mode; 331 332 /* mask out bits we are going to set */ 333 cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); 334 335 cs->hw_mode |= CSMODE_LEN(bits_per_word - 1); 336 337 pm = DIV_ROUND_UP(espi->spibrg, hz * 4) - 1; 338 339 if (pm > 15) { 340 cs->hw_mode |= CSMODE_DIV16; 341 pm = DIV_ROUND_UP(espi->spibrg, hz * 16 * 4) - 1; 342 } 343 344 cs->hw_mode |= CSMODE_PM(pm); 345 346 /* don't write the mode register if the mode doesn't change */ 347 if (cs->hw_mode != hw_mode_old) 348 fsl_espi_write_reg(espi, ESPI_SPMODEx(spi->chip_select), 349 cs->hw_mode); 350 } 351 352 static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) 353 { 354 struct fsl_espi *espi = spi_master_get_devdata(spi->master); 355 unsigned int rx_len = t->len; 356 u32 mask, spcom; 357 int ret; 358 359 reinit_completion(&espi->done); 360 361 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ 362 spcom = SPCOM_CS(spi->chip_select); 363 spcom |= SPCOM_TRANLEN(t->len - 1); 364 365 /* configure RXSKIP mode */ 366 if (espi->rxskip) { 367 spcom |= SPCOM_RXSKIP(espi->rxskip); 368 rx_len = t->len - espi->rxskip; 369 if (t->rx_nbits == SPI_NBITS_DUAL) 370 spcom |= SPCOM_DO; 371 } 372 373 fsl_espi_write_reg(espi, ESPI_SPCOM, spcom); 374 375 /* enable interrupts */ 376 mask = SPIM_DON; 377 if (rx_len > FSL_ESPI_FIFO_SIZE) 378 mask |= SPIM_RXT; 379 fsl_espi_write_reg(espi, ESPI_SPIM, mask); 380 381 /* Prevent filling the fifo from getting interrupted */ 382 spin_lock_irq(&espi->lock); 383 fsl_espi_fill_tx_fifo(espi, 0); 384 spin_unlock_irq(&espi->lock); 385 386 /* Won't hang up forever, SPI bus sometimes got lost interrupts... */ 387 ret = wait_for_completion_timeout(&espi->done, 2 * HZ); 388 if (ret == 0) 389 dev_err(espi->dev, "Transfer timed out!\n"); 390 391 /* disable rx ints */ 392 fsl_espi_write_reg(espi, ESPI_SPIM, 0); 393 394 return ret == 0 ? -ETIMEDOUT : 0; 395 } 396 397 static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans) 398 { 399 struct fsl_espi *espi = spi_master_get_devdata(m->spi->master); 400 struct spi_device *spi = m->spi; 401 int ret; 402 403 /* In case of LSB-first and bits_per_word > 8 byte-swap all words */ 404 espi->swab = spi->mode & SPI_LSB_FIRST && trans->bits_per_word > 8; 405 406 espi->m_transfers = &m->transfers; 407 espi->tx_t = list_first_entry(&m->transfers, struct spi_transfer, 408 transfer_list); 409 espi->tx_pos = 0; 410 espi->tx_done = false; 411 espi->rx_t = list_first_entry(&m->transfers, struct spi_transfer, 412 transfer_list); 413 espi->rx_pos = 0; 414 espi->rx_done = false; 415 416 espi->rxskip = fsl_espi_check_rxskip_mode(m); 417 if (trans->rx_nbits == SPI_NBITS_DUAL && !espi->rxskip) { 418 dev_err(espi->dev, "Dual output mode requires RXSKIP mode!\n"); 419 return -EINVAL; 420 } 421 422 /* In RXSKIP mode skip first transfer for reads */ 423 if (espi->rxskip) 424 espi->rx_t = list_next_entry(espi->rx_t, transfer_list); 425 426 fsl_espi_setup_transfer(spi, trans); 427 428 ret = fsl_espi_bufs(spi, trans); 429 430 spi_transfer_delay_exec(trans); 431 432 return ret; 433 } 434 435 static int fsl_espi_do_one_msg(struct spi_master *master, 436 struct spi_message *m) 437 { 438 unsigned int delay_usecs = 0, rx_nbits = 0; 439 unsigned int delay_nsecs = 0, delay_nsecs1 = 0; 440 struct spi_transfer *t, trans = {}; 441 int ret; 442 443 ret = fsl_espi_check_message(m); 444 if (ret) 445 goto out; 446 447 list_for_each_entry(t, &m->transfers, transfer_list) { 448 if (t->delay_usecs) { 449 if (t->delay_usecs > delay_usecs) { 450 delay_usecs = t->delay_usecs; 451 delay_nsecs = delay_usecs * 1000; 452 } 453 } else { 454 delay_nsecs1 = spi_delay_to_ns(&t->delay, t); 455 if (delay_nsecs1 > delay_nsecs) 456 delay_nsecs = delay_nsecs1; 457 } 458 if (t->rx_nbits > rx_nbits) 459 rx_nbits = t->rx_nbits; 460 } 461 462 t = list_first_entry(&m->transfers, struct spi_transfer, 463 transfer_list); 464 465 trans.len = m->frame_length; 466 trans.speed_hz = t->speed_hz; 467 trans.bits_per_word = t->bits_per_word; 468 trans.delay.value = delay_nsecs; 469 trans.delay.unit = SPI_DELAY_UNIT_NSECS; 470 trans.rx_nbits = rx_nbits; 471 472 if (trans.len) 473 ret = fsl_espi_trans(m, &trans); 474 475 m->actual_length = ret ? 0 : trans.len; 476 out: 477 if (m->status == -EINPROGRESS) 478 m->status = ret; 479 480 spi_finalize_current_message(master); 481 482 return ret; 483 } 484 485 static int fsl_espi_setup(struct spi_device *spi) 486 { 487 struct fsl_espi *espi; 488 u32 loop_mode; 489 struct fsl_espi_cs *cs = spi_get_ctldata(spi); 490 491 if (!cs) { 492 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 493 if (!cs) 494 return -ENOMEM; 495 spi_set_ctldata(spi, cs); 496 } 497 498 espi = spi_master_get_devdata(spi->master); 499 500 pm_runtime_get_sync(espi->dev); 501 502 cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi->chip_select)); 503 /* mask out bits we are going to set */ 504 cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH 505 | CSMODE_REV); 506 507 if (spi->mode & SPI_CPHA) 508 cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; 509 if (spi->mode & SPI_CPOL) 510 cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; 511 if (!(spi->mode & SPI_LSB_FIRST)) 512 cs->hw_mode |= CSMODE_REV; 513 514 /* Handle the loop mode */ 515 loop_mode = fsl_espi_read_reg(espi, ESPI_SPMODE); 516 loop_mode &= ~SPMODE_LOOP; 517 if (spi->mode & SPI_LOOP) 518 loop_mode |= SPMODE_LOOP; 519 fsl_espi_write_reg(espi, ESPI_SPMODE, loop_mode); 520 521 fsl_espi_setup_transfer(spi, NULL); 522 523 pm_runtime_mark_last_busy(espi->dev); 524 pm_runtime_put_autosuspend(espi->dev); 525 526 return 0; 527 } 528 529 static void fsl_espi_cleanup(struct spi_device *spi) 530 { 531 struct fsl_espi_cs *cs = spi_get_ctldata(spi); 532 533 kfree(cs); 534 spi_set_ctldata(spi, NULL); 535 } 536 537 static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events) 538 { 539 if (!espi->rx_done) 540 fsl_espi_read_rx_fifo(espi, events); 541 542 if (!espi->tx_done) 543 fsl_espi_fill_tx_fifo(espi, events); 544 545 if (!espi->tx_done || !espi->rx_done) 546 return; 547 548 /* we're done, but check for errors before returning */ 549 events = fsl_espi_read_reg(espi, ESPI_SPIE); 550 551 if (!(events & SPIE_DON)) 552 dev_err(espi->dev, 553 "Transfer done but SPIE_DON isn't set!\n"); 554 555 if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE) { 556 dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n"); 557 dev_err(espi->dev, "SPIE_RXCNT = %d, SPIE_TXCNT = %d\n", 558 SPIE_RXCNT(events), SPIE_TXCNT(events)); 559 } 560 561 complete(&espi->done); 562 } 563 564 static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) 565 { 566 struct fsl_espi *espi = context_data; 567 u32 events, mask; 568 569 spin_lock(&espi->lock); 570 571 /* Get interrupt events(tx/rx) */ 572 events = fsl_espi_read_reg(espi, ESPI_SPIE); 573 mask = fsl_espi_read_reg(espi, ESPI_SPIM); 574 if (!(events & mask)) { 575 spin_unlock(&espi->lock); 576 return IRQ_NONE; 577 } 578 579 dev_vdbg(espi->dev, "%s: events %x\n", __func__, events); 580 581 fsl_espi_cpu_irq(espi, events); 582 583 /* Clear the events */ 584 fsl_espi_write_reg(espi, ESPI_SPIE, events); 585 586 spin_unlock(&espi->lock); 587 588 return IRQ_HANDLED; 589 } 590 591 #ifdef CONFIG_PM 592 static int fsl_espi_runtime_suspend(struct device *dev) 593 { 594 struct spi_master *master = dev_get_drvdata(dev); 595 struct fsl_espi *espi = spi_master_get_devdata(master); 596 u32 regval; 597 598 regval = fsl_espi_read_reg(espi, ESPI_SPMODE); 599 regval &= ~SPMODE_ENABLE; 600 fsl_espi_write_reg(espi, ESPI_SPMODE, regval); 601 602 return 0; 603 } 604 605 static int fsl_espi_runtime_resume(struct device *dev) 606 { 607 struct spi_master *master = dev_get_drvdata(dev); 608 struct fsl_espi *espi = spi_master_get_devdata(master); 609 u32 regval; 610 611 regval = fsl_espi_read_reg(espi, ESPI_SPMODE); 612 regval |= SPMODE_ENABLE; 613 fsl_espi_write_reg(espi, ESPI_SPMODE, regval); 614 615 return 0; 616 } 617 #endif 618 619 static size_t fsl_espi_max_message_size(struct spi_device *spi) 620 { 621 return SPCOM_TRANLEN_MAX; 622 } 623 624 static void fsl_espi_init_regs(struct device *dev, bool initial) 625 { 626 struct spi_master *master = dev_get_drvdata(dev); 627 struct fsl_espi *espi = spi_master_get_devdata(master); 628 struct device_node *nc; 629 u32 csmode, cs, prop; 630 int ret; 631 632 /* SPI controller initializations */ 633 fsl_espi_write_reg(espi, ESPI_SPMODE, 0); 634 fsl_espi_write_reg(espi, ESPI_SPIM, 0); 635 fsl_espi_write_reg(espi, ESPI_SPCOM, 0); 636 fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff); 637 638 /* Init eSPI CS mode register */ 639 for_each_available_child_of_node(master->dev.of_node, nc) { 640 /* get chip select */ 641 ret = of_property_read_u32(nc, "reg", &cs); 642 if (ret || cs >= master->num_chipselect) 643 continue; 644 645 csmode = CSMODE_INIT_VAL; 646 647 /* check if CSBEF is set in device tree */ 648 ret = of_property_read_u32(nc, "fsl,csbef", &prop); 649 if (!ret) { 650 csmode &= ~(CSMODE_BEF(0xf)); 651 csmode |= CSMODE_BEF(prop); 652 } 653 654 /* check if CSAFT is set in device tree */ 655 ret = of_property_read_u32(nc, "fsl,csaft", &prop); 656 if (!ret) { 657 csmode &= ~(CSMODE_AFT(0xf)); 658 csmode |= CSMODE_AFT(prop); 659 } 660 661 fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode); 662 663 if (initial) 664 dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode); 665 } 666 667 /* Enable SPI interface */ 668 fsl_espi_write_reg(espi, ESPI_SPMODE, SPMODE_INIT_VAL | SPMODE_ENABLE); 669 } 670 671 static int fsl_espi_probe(struct device *dev, struct resource *mem, 672 unsigned int irq, unsigned int num_cs) 673 { 674 struct spi_master *master; 675 struct fsl_espi *espi; 676 int ret; 677 678 master = spi_alloc_master(dev, sizeof(struct fsl_espi)); 679 if (!master) 680 return -ENOMEM; 681 682 dev_set_drvdata(dev, master); 683 684 master->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | 685 SPI_LSB_FIRST | SPI_LOOP; 686 master->dev.of_node = dev->of_node; 687 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 688 master->setup = fsl_espi_setup; 689 master->cleanup = fsl_espi_cleanup; 690 master->transfer_one_message = fsl_espi_do_one_msg; 691 master->auto_runtime_pm = true; 692 master->max_message_size = fsl_espi_max_message_size; 693 master->num_chipselect = num_cs; 694 695 espi = spi_master_get_devdata(master); 696 spin_lock_init(&espi->lock); 697 698 espi->dev = dev; 699 espi->spibrg = fsl_get_sys_freq(); 700 if (espi->spibrg == -1) { 701 dev_err(dev, "Can't get sys frequency!\n"); 702 ret = -EINVAL; 703 goto err_probe; 704 } 705 /* determined by clock divider fields DIV16/PM in register SPMODEx */ 706 master->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16); 707 master->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4); 708 709 init_completion(&espi->done); 710 711 espi->reg_base = devm_ioremap_resource(dev, mem); 712 if (IS_ERR(espi->reg_base)) { 713 ret = PTR_ERR(espi->reg_base); 714 goto err_probe; 715 } 716 717 /* Register for SPI Interrupt */ 718 ret = devm_request_irq(dev, irq, fsl_espi_irq, 0, "fsl_espi", espi); 719 if (ret) 720 goto err_probe; 721 722 fsl_espi_init_regs(dev, true); 723 724 pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT); 725 pm_runtime_use_autosuspend(dev); 726 pm_runtime_set_active(dev); 727 pm_runtime_enable(dev); 728 pm_runtime_get_sync(dev); 729 730 ret = devm_spi_register_master(dev, master); 731 if (ret < 0) 732 goto err_pm; 733 734 dev_info(dev, "irq = %u\n", irq); 735 736 pm_runtime_mark_last_busy(dev); 737 pm_runtime_put_autosuspend(dev); 738 739 return 0; 740 741 err_pm: 742 pm_runtime_put_noidle(dev); 743 pm_runtime_disable(dev); 744 pm_runtime_set_suspended(dev); 745 err_probe: 746 spi_master_put(master); 747 return ret; 748 } 749 750 static int of_fsl_espi_get_chipselects(struct device *dev) 751 { 752 struct device_node *np = dev->of_node; 753 u32 num_cs; 754 int ret; 755 756 ret = of_property_read_u32(np, "fsl,espi-num-chipselects", &num_cs); 757 if (ret) { 758 dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); 759 return 0; 760 } 761 762 return num_cs; 763 } 764 765 static int of_fsl_espi_probe(struct platform_device *ofdev) 766 { 767 struct device *dev = &ofdev->dev; 768 struct device_node *np = ofdev->dev.of_node; 769 struct resource mem; 770 unsigned int irq, num_cs; 771 int ret; 772 773 if (of_property_read_bool(np, "mode")) { 774 dev_err(dev, "mode property is not supported on ESPI!\n"); 775 return -EINVAL; 776 } 777 778 num_cs = of_fsl_espi_get_chipselects(dev); 779 if (!num_cs) 780 return -EINVAL; 781 782 ret = of_address_to_resource(np, 0, &mem); 783 if (ret) 784 return ret; 785 786 irq = irq_of_parse_and_map(np, 0); 787 if (!irq) 788 return -EINVAL; 789 790 return fsl_espi_probe(dev, &mem, irq, num_cs); 791 } 792 793 static int of_fsl_espi_remove(struct platform_device *dev) 794 { 795 pm_runtime_disable(&dev->dev); 796 797 return 0; 798 } 799 800 #ifdef CONFIG_PM_SLEEP 801 static int of_fsl_espi_suspend(struct device *dev) 802 { 803 struct spi_master *master = dev_get_drvdata(dev); 804 int ret; 805 806 ret = spi_master_suspend(master); 807 if (ret) 808 return ret; 809 810 return pm_runtime_force_suspend(dev); 811 } 812 813 static int of_fsl_espi_resume(struct device *dev) 814 { 815 struct spi_master *master = dev_get_drvdata(dev); 816 int ret; 817 818 fsl_espi_init_regs(dev, false); 819 820 ret = pm_runtime_force_resume(dev); 821 if (ret < 0) 822 return ret; 823 824 return spi_master_resume(master); 825 } 826 #endif /* CONFIG_PM_SLEEP */ 827 828 static const struct dev_pm_ops espi_pm = { 829 SET_RUNTIME_PM_OPS(fsl_espi_runtime_suspend, 830 fsl_espi_runtime_resume, NULL) 831 SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume) 832 }; 833 834 static const struct of_device_id of_fsl_espi_match[] = { 835 { .compatible = "fsl,mpc8536-espi" }, 836 {} 837 }; 838 MODULE_DEVICE_TABLE(of, of_fsl_espi_match); 839 840 static struct platform_driver fsl_espi_driver = { 841 .driver = { 842 .name = "fsl_espi", 843 .of_match_table = of_fsl_espi_match, 844 .pm = &espi_pm, 845 }, 846 .probe = of_fsl_espi_probe, 847 .remove = of_fsl_espi_remove, 848 }; 849 module_platform_driver(fsl_espi_driver); 850 851 MODULE_AUTHOR("Mingkai Hu"); 852 MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); 853 MODULE_LICENSE("GPL"); 854