1 /* 2 * Freescale eSPI controller driver. 3 * 4 * Copyright 2010 Freescale Semiconductor, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; either version 2 of the License, or (at your 9 * option) any later version. 10 */ 11 #include <linux/delay.h> 12 #include <linux/err.h> 13 #include <linux/fsl_devices.h> 14 #include <linux/interrupt.h> 15 #include <linux/module.h> 16 #include <linux/mm.h> 17 #include <linux/of.h> 18 #include <linux/of_address.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_platform.h> 21 #include <linux/platform_device.h> 22 #include <linux/spi/spi.h> 23 #include <linux/pm_runtime.h> 24 #include <sysdev/fsl_soc.h> 25 26 /* eSPI Controller registers */ 27 #define ESPI_SPMODE 0x00 /* eSPI mode register */ 28 #define ESPI_SPIE 0x04 /* eSPI event register */ 29 #define ESPI_SPIM 0x08 /* eSPI mask register */ 30 #define ESPI_SPCOM 0x0c /* eSPI command register */ 31 #define ESPI_SPITF 0x10 /* eSPI transmit FIFO access register*/ 32 #define ESPI_SPIRF 0x14 /* eSPI receive FIFO access register*/ 33 #define ESPI_SPMODE0 0x20 /* eSPI cs0 mode register */ 34 35 #define ESPI_SPMODEx(x) (ESPI_SPMODE0 + (x) * 4) 36 37 /* eSPI Controller mode register definitions */ 38 #define SPMODE_ENABLE BIT(31) 39 #define SPMODE_LOOP BIT(30) 40 #define SPMODE_TXTHR(x) ((x) << 8) 41 #define SPMODE_RXTHR(x) ((x) << 0) 42 43 /* eSPI Controller CS mode register definitions */ 44 #define CSMODE_CI_INACTIVEHIGH BIT(31) 45 #define CSMODE_CP_BEGIN_EDGECLK BIT(30) 46 #define CSMODE_REV BIT(29) 47 #define CSMODE_DIV16 BIT(28) 48 #define CSMODE_PM(x) ((x) << 24) 49 #define CSMODE_POL_1 BIT(20) 50 #define CSMODE_LEN(x) ((x) << 16) 51 #define CSMODE_BEF(x) ((x) << 12) 52 #define CSMODE_AFT(x) ((x) << 8) 53 #define CSMODE_CG(x) ((x) << 3) 54 55 #define FSL_ESPI_FIFO_SIZE 32 56 #define FSL_ESPI_RXTHR 15 57 58 /* Default mode/csmode for eSPI controller */ 59 #define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR)) 60 #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ 61 | CSMODE_AFT(0) | CSMODE_CG(1)) 62 63 /* SPIE register values */ 64 #define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) 65 #define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) 66 #define SPIE_TXE BIT(15) /* TX FIFO empty */ 67 #define SPIE_DON BIT(14) /* TX done */ 68 #define SPIE_RXT BIT(13) /* RX FIFO threshold */ 69 #define SPIE_RXF BIT(12) /* RX FIFO full */ 70 #define SPIE_TXT BIT(11) /* TX FIFO threshold*/ 71 #define SPIE_RNE BIT(9) /* RX FIFO not empty */ 72 #define SPIE_TNF BIT(8) /* TX FIFO not full */ 73 74 /* SPIM register values */ 75 #define SPIM_TXE BIT(15) /* TX FIFO empty */ 76 #define SPIM_DON BIT(14) /* TX done */ 77 #define SPIM_RXT BIT(13) /* RX FIFO threshold */ 78 #define SPIM_RXF BIT(12) /* RX FIFO full */ 79 #define SPIM_TXT BIT(11) /* TX FIFO threshold*/ 80 #define SPIM_RNE BIT(9) /* RX FIFO not empty */ 81 #define SPIM_TNF BIT(8) /* TX FIFO not full */ 82 83 /* SPCOM register values */ 84 #define SPCOM_CS(x) ((x) << 30) 85 #define SPCOM_DO BIT(28) /* Dual output */ 86 #define SPCOM_TO BIT(27) /* TX only */ 87 #define SPCOM_RXSKIP(x) ((x) << 16) 88 #define SPCOM_TRANLEN(x) ((x) << 0) 89 90 #define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */ 91 92 #define AUTOSUSPEND_TIMEOUT 2000 93 94 struct fsl_espi { 95 struct device *dev; 96 void __iomem *reg_base; 97 98 struct list_head *m_transfers; 99 struct spi_transfer *tx_t; 100 unsigned int tx_pos; 101 bool tx_done; 102 struct spi_transfer *rx_t; 103 unsigned int rx_pos; 104 bool rx_done; 105 106 bool swab; 107 unsigned int rxskip; 108 109 spinlock_t lock; 110 111 u32 spibrg; /* SPIBRG input clock */ 112 113 struct completion done; 114 }; 115 116 struct fsl_espi_cs { 117 u32 hw_mode; 118 }; 119 120 static inline u32 fsl_espi_read_reg(struct fsl_espi *espi, int offset) 121 { 122 return ioread32be(espi->reg_base + offset); 123 } 124 125 static inline u16 fsl_espi_read_reg16(struct fsl_espi *espi, int offset) 126 { 127 return ioread16be(espi->reg_base + offset); 128 } 129 130 static inline u8 fsl_espi_read_reg8(struct fsl_espi *espi, int offset) 131 { 132 return ioread8(espi->reg_base + offset); 133 } 134 135 static inline void fsl_espi_write_reg(struct fsl_espi *espi, int offset, 136 u32 val) 137 { 138 iowrite32be(val, espi->reg_base + offset); 139 } 140 141 static inline void fsl_espi_write_reg16(struct fsl_espi *espi, int offset, 142 u16 val) 143 { 144 iowrite16be(val, espi->reg_base + offset); 145 } 146 147 static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset, 148 u8 val) 149 { 150 iowrite8(val, espi->reg_base + offset); 151 } 152 153 static int fsl_espi_check_message(struct spi_message *m) 154 { 155 struct fsl_espi *espi = spi_master_get_devdata(m->spi->master); 156 struct spi_transfer *t, *first; 157 158 if (m->frame_length > SPCOM_TRANLEN_MAX) { 159 dev_err(espi->dev, "message too long, size is %u bytes\n", 160 m->frame_length); 161 return -EMSGSIZE; 162 } 163 164 first = list_first_entry(&m->transfers, struct spi_transfer, 165 transfer_list); 166 167 list_for_each_entry(t, &m->transfers, transfer_list) { 168 if (first->bits_per_word != t->bits_per_word || 169 first->speed_hz != t->speed_hz) { 170 dev_err(espi->dev, "bits_per_word/speed_hz should be the same for all transfers\n"); 171 return -EINVAL; 172 } 173 } 174 175 /* ESPI supports MSB-first transfers for word size 8 / 16 only */ 176 if (!(m->spi->mode & SPI_LSB_FIRST) && first->bits_per_word != 8 && 177 first->bits_per_word != 16) { 178 dev_err(espi->dev, 179 "MSB-first transfer not supported for wordsize %u\n", 180 first->bits_per_word); 181 return -EINVAL; 182 } 183 184 return 0; 185 } 186 187 static unsigned int fsl_espi_check_rxskip_mode(struct spi_message *m) 188 { 189 struct spi_transfer *t; 190 unsigned int i = 0, rxskip = 0; 191 192 /* 193 * prerequisites for ESPI rxskip mode: 194 * - message has two transfers 195 * - first transfer is a write and second is a read 196 * 197 * In addition the current low-level transfer mechanism requires 198 * that the rxskip bytes fit into the TX FIFO. Else the transfer 199 * would hang because after the first FSL_ESPI_FIFO_SIZE bytes 200 * the TX FIFO isn't re-filled. 201 */ 202 list_for_each_entry(t, &m->transfers, transfer_list) { 203 if (i == 0) { 204 if (!t->tx_buf || t->rx_buf || 205 t->len > FSL_ESPI_FIFO_SIZE) 206 return 0; 207 rxskip = t->len; 208 } else if (i == 1) { 209 if (t->tx_buf || !t->rx_buf) 210 return 0; 211 } 212 i++; 213 } 214 215 return i == 2 ? rxskip : 0; 216 } 217 218 static void fsl_espi_fill_tx_fifo(struct fsl_espi *espi, u32 events) 219 { 220 u32 tx_fifo_avail; 221 unsigned int tx_left; 222 const void *tx_buf; 223 224 /* if events is zero transfer has not started and tx fifo is empty */ 225 tx_fifo_avail = events ? SPIE_TXCNT(events) : FSL_ESPI_FIFO_SIZE; 226 start: 227 tx_left = espi->tx_t->len - espi->tx_pos; 228 tx_buf = espi->tx_t->tx_buf; 229 while (tx_fifo_avail >= min(4U, tx_left) && tx_left) { 230 if (tx_left >= 4) { 231 if (!tx_buf) 232 fsl_espi_write_reg(espi, ESPI_SPITF, 0); 233 else if (espi->swab) 234 fsl_espi_write_reg(espi, ESPI_SPITF, 235 swahb32p(tx_buf + espi->tx_pos)); 236 else 237 fsl_espi_write_reg(espi, ESPI_SPITF, 238 *(u32 *)(tx_buf + espi->tx_pos)); 239 espi->tx_pos += 4; 240 tx_left -= 4; 241 tx_fifo_avail -= 4; 242 } else if (tx_left >= 2 && tx_buf && espi->swab) { 243 fsl_espi_write_reg16(espi, ESPI_SPITF, 244 swab16p(tx_buf + espi->tx_pos)); 245 espi->tx_pos += 2; 246 tx_left -= 2; 247 tx_fifo_avail -= 2; 248 } else { 249 if (!tx_buf) 250 fsl_espi_write_reg8(espi, ESPI_SPITF, 0); 251 else 252 fsl_espi_write_reg8(espi, ESPI_SPITF, 253 *(u8 *)(tx_buf + espi->tx_pos)); 254 espi->tx_pos += 1; 255 tx_left -= 1; 256 tx_fifo_avail -= 1; 257 } 258 } 259 260 if (!tx_left) { 261 /* Last transfer finished, in rxskip mode only one is needed */ 262 if (list_is_last(&espi->tx_t->transfer_list, 263 espi->m_transfers) || espi->rxskip) { 264 espi->tx_done = true; 265 return; 266 } 267 espi->tx_t = list_next_entry(espi->tx_t, transfer_list); 268 espi->tx_pos = 0; 269 /* continue with next transfer if tx fifo is not full */ 270 if (tx_fifo_avail) 271 goto start; 272 } 273 } 274 275 static void fsl_espi_read_rx_fifo(struct fsl_espi *espi, u32 events) 276 { 277 u32 rx_fifo_avail = SPIE_RXCNT(events); 278 unsigned int rx_left; 279 void *rx_buf; 280 281 start: 282 rx_left = espi->rx_t->len - espi->rx_pos; 283 rx_buf = espi->rx_t->rx_buf; 284 while (rx_fifo_avail >= min(4U, rx_left) && rx_left) { 285 if (rx_left >= 4) { 286 u32 val = fsl_espi_read_reg(espi, ESPI_SPIRF); 287 288 if (rx_buf && espi->swab) 289 *(u32 *)(rx_buf + espi->rx_pos) = swahb32(val); 290 else if (rx_buf) 291 *(u32 *)(rx_buf + espi->rx_pos) = val; 292 espi->rx_pos += 4; 293 rx_left -= 4; 294 rx_fifo_avail -= 4; 295 } else if (rx_left >= 2 && rx_buf && espi->swab) { 296 u16 val = fsl_espi_read_reg16(espi, ESPI_SPIRF); 297 298 *(u16 *)(rx_buf + espi->rx_pos) = swab16(val); 299 espi->rx_pos += 2; 300 rx_left -= 2; 301 rx_fifo_avail -= 2; 302 } else { 303 u8 val = fsl_espi_read_reg8(espi, ESPI_SPIRF); 304 305 if (rx_buf) 306 *(u8 *)(rx_buf + espi->rx_pos) = val; 307 espi->rx_pos += 1; 308 rx_left -= 1; 309 rx_fifo_avail -= 1; 310 } 311 } 312 313 if (!rx_left) { 314 if (list_is_last(&espi->rx_t->transfer_list, 315 espi->m_transfers)) { 316 espi->rx_done = true; 317 return; 318 } 319 espi->rx_t = list_next_entry(espi->rx_t, transfer_list); 320 espi->rx_pos = 0; 321 /* continue with next transfer if rx fifo is not empty */ 322 if (rx_fifo_avail) 323 goto start; 324 } 325 } 326 327 static void fsl_espi_setup_transfer(struct spi_device *spi, 328 struct spi_transfer *t) 329 { 330 struct fsl_espi *espi = spi_master_get_devdata(spi->master); 331 int bits_per_word = t ? t->bits_per_word : spi->bits_per_word; 332 u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz; 333 struct fsl_espi_cs *cs = spi_get_ctldata(spi); 334 u32 hw_mode_old = cs->hw_mode; 335 336 /* mask out bits we are going to set */ 337 cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); 338 339 cs->hw_mode |= CSMODE_LEN(bits_per_word - 1); 340 341 pm = DIV_ROUND_UP(espi->spibrg, hz * 4) - 1; 342 343 if (pm > 15) { 344 cs->hw_mode |= CSMODE_DIV16; 345 pm = DIV_ROUND_UP(espi->spibrg, hz * 16 * 4) - 1; 346 } 347 348 cs->hw_mode |= CSMODE_PM(pm); 349 350 /* don't write the mode register if the mode doesn't change */ 351 if (cs->hw_mode != hw_mode_old) 352 fsl_espi_write_reg(espi, ESPI_SPMODEx(spi->chip_select), 353 cs->hw_mode); 354 } 355 356 static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) 357 { 358 struct fsl_espi *espi = spi_master_get_devdata(spi->master); 359 unsigned int rx_len = t->len; 360 u32 mask, spcom; 361 int ret; 362 363 reinit_completion(&espi->done); 364 365 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ 366 spcom = SPCOM_CS(spi->chip_select); 367 spcom |= SPCOM_TRANLEN(t->len - 1); 368 369 /* configure RXSKIP mode */ 370 if (espi->rxskip) { 371 spcom |= SPCOM_RXSKIP(espi->rxskip); 372 rx_len = t->len - espi->rxskip; 373 if (t->rx_nbits == SPI_NBITS_DUAL) 374 spcom |= SPCOM_DO; 375 } 376 377 fsl_espi_write_reg(espi, ESPI_SPCOM, spcom); 378 379 /* enable interrupts */ 380 mask = SPIM_DON; 381 if (rx_len > FSL_ESPI_FIFO_SIZE) 382 mask |= SPIM_RXT; 383 fsl_espi_write_reg(espi, ESPI_SPIM, mask); 384 385 /* Prevent filling the fifo from getting interrupted */ 386 spin_lock_irq(&espi->lock); 387 fsl_espi_fill_tx_fifo(espi, 0); 388 spin_unlock_irq(&espi->lock); 389 390 /* Won't hang up forever, SPI bus sometimes got lost interrupts... */ 391 ret = wait_for_completion_timeout(&espi->done, 2 * HZ); 392 if (ret == 0) 393 dev_err(espi->dev, "Transfer timed out!\n"); 394 395 /* disable rx ints */ 396 fsl_espi_write_reg(espi, ESPI_SPIM, 0); 397 398 return ret == 0 ? -ETIMEDOUT : 0; 399 } 400 401 static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans) 402 { 403 struct fsl_espi *espi = spi_master_get_devdata(m->spi->master); 404 struct spi_device *spi = m->spi; 405 int ret; 406 407 /* In case of LSB-first and bits_per_word > 8 byte-swap all words */ 408 espi->swab = spi->mode & SPI_LSB_FIRST && trans->bits_per_word > 8; 409 410 espi->m_transfers = &m->transfers; 411 espi->tx_t = list_first_entry(&m->transfers, struct spi_transfer, 412 transfer_list); 413 espi->tx_pos = 0; 414 espi->tx_done = false; 415 espi->rx_t = list_first_entry(&m->transfers, struct spi_transfer, 416 transfer_list); 417 espi->rx_pos = 0; 418 espi->rx_done = false; 419 420 espi->rxskip = fsl_espi_check_rxskip_mode(m); 421 if (trans->rx_nbits == SPI_NBITS_DUAL && !espi->rxskip) { 422 dev_err(espi->dev, "Dual output mode requires RXSKIP mode!\n"); 423 return -EINVAL; 424 } 425 426 /* In RXSKIP mode skip first transfer for reads */ 427 if (espi->rxskip) 428 espi->rx_t = list_next_entry(espi->rx_t, transfer_list); 429 430 fsl_espi_setup_transfer(spi, trans); 431 432 ret = fsl_espi_bufs(spi, trans); 433 434 if (trans->delay_usecs) 435 udelay(trans->delay_usecs); 436 437 return ret; 438 } 439 440 static int fsl_espi_do_one_msg(struct spi_master *master, 441 struct spi_message *m) 442 { 443 unsigned int delay_usecs = 0, rx_nbits = 0; 444 struct spi_transfer *t, trans = {}; 445 int ret; 446 447 ret = fsl_espi_check_message(m); 448 if (ret) 449 goto out; 450 451 list_for_each_entry(t, &m->transfers, transfer_list) { 452 if (t->delay_usecs > delay_usecs) 453 delay_usecs = t->delay_usecs; 454 if (t->rx_nbits > rx_nbits) 455 rx_nbits = t->rx_nbits; 456 } 457 458 t = list_first_entry(&m->transfers, struct spi_transfer, 459 transfer_list); 460 461 trans.len = m->frame_length; 462 trans.speed_hz = t->speed_hz; 463 trans.bits_per_word = t->bits_per_word; 464 trans.delay_usecs = delay_usecs; 465 trans.rx_nbits = rx_nbits; 466 467 if (trans.len) 468 ret = fsl_espi_trans(m, &trans); 469 470 m->actual_length = ret ? 0 : trans.len; 471 out: 472 if (m->status == -EINPROGRESS) 473 m->status = ret; 474 475 spi_finalize_current_message(master); 476 477 return ret; 478 } 479 480 static int fsl_espi_setup(struct spi_device *spi) 481 { 482 struct fsl_espi *espi; 483 u32 loop_mode; 484 struct fsl_espi_cs *cs = spi_get_ctldata(spi); 485 486 if (!cs) { 487 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 488 if (!cs) 489 return -ENOMEM; 490 spi_set_ctldata(spi, cs); 491 } 492 493 espi = spi_master_get_devdata(spi->master); 494 495 pm_runtime_get_sync(espi->dev); 496 497 cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi->chip_select)); 498 /* mask out bits we are going to set */ 499 cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH 500 | CSMODE_REV); 501 502 if (spi->mode & SPI_CPHA) 503 cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; 504 if (spi->mode & SPI_CPOL) 505 cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; 506 if (!(spi->mode & SPI_LSB_FIRST)) 507 cs->hw_mode |= CSMODE_REV; 508 509 /* Handle the loop mode */ 510 loop_mode = fsl_espi_read_reg(espi, ESPI_SPMODE); 511 loop_mode &= ~SPMODE_LOOP; 512 if (spi->mode & SPI_LOOP) 513 loop_mode |= SPMODE_LOOP; 514 fsl_espi_write_reg(espi, ESPI_SPMODE, loop_mode); 515 516 fsl_espi_setup_transfer(spi, NULL); 517 518 pm_runtime_mark_last_busy(espi->dev); 519 pm_runtime_put_autosuspend(espi->dev); 520 521 return 0; 522 } 523 524 static void fsl_espi_cleanup(struct spi_device *spi) 525 { 526 struct fsl_espi_cs *cs = spi_get_ctldata(spi); 527 528 kfree(cs); 529 spi_set_ctldata(spi, NULL); 530 } 531 532 static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events) 533 { 534 if (!espi->rx_done) 535 fsl_espi_read_rx_fifo(espi, events); 536 537 if (!espi->tx_done) 538 fsl_espi_fill_tx_fifo(espi, events); 539 540 if (!espi->tx_done || !espi->rx_done) 541 return; 542 543 /* we're done, but check for errors before returning */ 544 events = fsl_espi_read_reg(espi, ESPI_SPIE); 545 546 if (!(events & SPIE_DON)) 547 dev_err(espi->dev, 548 "Transfer done but SPIE_DON isn't set!\n"); 549 550 if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE) { 551 dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n"); 552 dev_err(espi->dev, "SPIE_RXCNT = %d, SPIE_TXCNT = %d\n", 553 SPIE_RXCNT(events), SPIE_TXCNT(events)); 554 } 555 556 complete(&espi->done); 557 } 558 559 static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) 560 { 561 struct fsl_espi *espi = context_data; 562 u32 events; 563 564 spin_lock(&espi->lock); 565 566 /* Get interrupt events(tx/rx) */ 567 events = fsl_espi_read_reg(espi, ESPI_SPIE); 568 if (!events) { 569 spin_unlock(&espi->lock); 570 return IRQ_NONE; 571 } 572 573 dev_vdbg(espi->dev, "%s: events %x\n", __func__, events); 574 575 fsl_espi_cpu_irq(espi, events); 576 577 /* Clear the events */ 578 fsl_espi_write_reg(espi, ESPI_SPIE, events); 579 580 spin_unlock(&espi->lock); 581 582 return IRQ_HANDLED; 583 } 584 585 #ifdef CONFIG_PM 586 static int fsl_espi_runtime_suspend(struct device *dev) 587 { 588 struct spi_master *master = dev_get_drvdata(dev); 589 struct fsl_espi *espi = spi_master_get_devdata(master); 590 u32 regval; 591 592 regval = fsl_espi_read_reg(espi, ESPI_SPMODE); 593 regval &= ~SPMODE_ENABLE; 594 fsl_espi_write_reg(espi, ESPI_SPMODE, regval); 595 596 return 0; 597 } 598 599 static int fsl_espi_runtime_resume(struct device *dev) 600 { 601 struct spi_master *master = dev_get_drvdata(dev); 602 struct fsl_espi *espi = spi_master_get_devdata(master); 603 u32 regval; 604 605 regval = fsl_espi_read_reg(espi, ESPI_SPMODE); 606 regval |= SPMODE_ENABLE; 607 fsl_espi_write_reg(espi, ESPI_SPMODE, regval); 608 609 return 0; 610 } 611 #endif 612 613 static size_t fsl_espi_max_message_size(struct spi_device *spi) 614 { 615 return SPCOM_TRANLEN_MAX; 616 } 617 618 static void fsl_espi_init_regs(struct device *dev, bool initial) 619 { 620 struct spi_master *master = dev_get_drvdata(dev); 621 struct fsl_espi *espi = spi_master_get_devdata(master); 622 struct device_node *nc; 623 u32 csmode, cs, prop; 624 int ret; 625 626 /* SPI controller initializations */ 627 fsl_espi_write_reg(espi, ESPI_SPMODE, 0); 628 fsl_espi_write_reg(espi, ESPI_SPIM, 0); 629 fsl_espi_write_reg(espi, ESPI_SPCOM, 0); 630 fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff); 631 632 /* Init eSPI CS mode register */ 633 for_each_available_child_of_node(master->dev.of_node, nc) { 634 /* get chip select */ 635 ret = of_property_read_u32(nc, "reg", &cs); 636 if (ret || cs >= master->num_chipselect) 637 continue; 638 639 csmode = CSMODE_INIT_VAL; 640 641 /* check if CSBEF is set in device tree */ 642 ret = of_property_read_u32(nc, "fsl,csbef", &prop); 643 if (!ret) { 644 csmode &= ~(CSMODE_BEF(0xf)); 645 csmode |= CSMODE_BEF(prop); 646 } 647 648 /* check if CSAFT is set in device tree */ 649 ret = of_property_read_u32(nc, "fsl,csaft", &prop); 650 if (!ret) { 651 csmode &= ~(CSMODE_AFT(0xf)); 652 csmode |= CSMODE_AFT(prop); 653 } 654 655 fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode); 656 657 if (initial) 658 dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode); 659 } 660 661 /* Enable SPI interface */ 662 fsl_espi_write_reg(espi, ESPI_SPMODE, SPMODE_INIT_VAL | SPMODE_ENABLE); 663 } 664 665 static int fsl_espi_probe(struct device *dev, struct resource *mem, 666 unsigned int irq, unsigned int num_cs) 667 { 668 struct spi_master *master; 669 struct fsl_espi *espi; 670 int ret; 671 672 master = spi_alloc_master(dev, sizeof(struct fsl_espi)); 673 if (!master) 674 return -ENOMEM; 675 676 dev_set_drvdata(dev, master); 677 678 master->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | 679 SPI_LSB_FIRST | SPI_LOOP; 680 master->dev.of_node = dev->of_node; 681 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 682 master->setup = fsl_espi_setup; 683 master->cleanup = fsl_espi_cleanup; 684 master->transfer_one_message = fsl_espi_do_one_msg; 685 master->auto_runtime_pm = true; 686 master->max_message_size = fsl_espi_max_message_size; 687 master->num_chipselect = num_cs; 688 689 espi = spi_master_get_devdata(master); 690 spin_lock_init(&espi->lock); 691 692 espi->dev = dev; 693 espi->spibrg = fsl_get_sys_freq(); 694 if (espi->spibrg == -1) { 695 dev_err(dev, "Can't get sys frequency!\n"); 696 ret = -EINVAL; 697 goto err_probe; 698 } 699 /* determined by clock divider fields DIV16/PM in register SPMODEx */ 700 master->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16); 701 master->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4); 702 703 init_completion(&espi->done); 704 705 espi->reg_base = devm_ioremap_resource(dev, mem); 706 if (IS_ERR(espi->reg_base)) { 707 ret = PTR_ERR(espi->reg_base); 708 goto err_probe; 709 } 710 711 /* Register for SPI Interrupt */ 712 ret = devm_request_irq(dev, irq, fsl_espi_irq, 0, "fsl_espi", espi); 713 if (ret) 714 goto err_probe; 715 716 fsl_espi_init_regs(dev, true); 717 718 pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT); 719 pm_runtime_use_autosuspend(dev); 720 pm_runtime_set_active(dev); 721 pm_runtime_enable(dev); 722 pm_runtime_get_sync(dev); 723 724 ret = devm_spi_register_master(dev, master); 725 if (ret < 0) 726 goto err_pm; 727 728 dev_info(dev, "at 0x%p (irq = %u)\n", espi->reg_base, irq); 729 730 pm_runtime_mark_last_busy(dev); 731 pm_runtime_put_autosuspend(dev); 732 733 return 0; 734 735 err_pm: 736 pm_runtime_put_noidle(dev); 737 pm_runtime_disable(dev); 738 pm_runtime_set_suspended(dev); 739 err_probe: 740 spi_master_put(master); 741 return ret; 742 } 743 744 static int of_fsl_espi_get_chipselects(struct device *dev) 745 { 746 struct device_node *np = dev->of_node; 747 u32 num_cs; 748 int ret; 749 750 ret = of_property_read_u32(np, "fsl,espi-num-chipselects", &num_cs); 751 if (ret) { 752 dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); 753 return 0; 754 } 755 756 return num_cs; 757 } 758 759 static int of_fsl_espi_probe(struct platform_device *ofdev) 760 { 761 struct device *dev = &ofdev->dev; 762 struct device_node *np = ofdev->dev.of_node; 763 struct resource mem; 764 unsigned int irq, num_cs; 765 int ret; 766 767 if (of_property_read_bool(np, "mode")) { 768 dev_err(dev, "mode property is not supported on ESPI!\n"); 769 return -EINVAL; 770 } 771 772 num_cs = of_fsl_espi_get_chipselects(dev); 773 if (!num_cs) 774 return -EINVAL; 775 776 ret = of_address_to_resource(np, 0, &mem); 777 if (ret) 778 return ret; 779 780 irq = irq_of_parse_and_map(np, 0); 781 if (!irq) 782 return -EINVAL; 783 784 return fsl_espi_probe(dev, &mem, irq, num_cs); 785 } 786 787 static int of_fsl_espi_remove(struct platform_device *dev) 788 { 789 pm_runtime_disable(&dev->dev); 790 791 return 0; 792 } 793 794 #ifdef CONFIG_PM_SLEEP 795 static int of_fsl_espi_suspend(struct device *dev) 796 { 797 struct spi_master *master = dev_get_drvdata(dev); 798 int ret; 799 800 ret = spi_master_suspend(master); 801 if (ret) 802 return ret; 803 804 return pm_runtime_force_suspend(dev); 805 } 806 807 static int of_fsl_espi_resume(struct device *dev) 808 { 809 struct spi_master *master = dev_get_drvdata(dev); 810 int ret; 811 812 fsl_espi_init_regs(dev, false); 813 814 ret = pm_runtime_force_resume(dev); 815 if (ret < 0) 816 return ret; 817 818 return spi_master_resume(master); 819 } 820 #endif /* CONFIG_PM_SLEEP */ 821 822 static const struct dev_pm_ops espi_pm = { 823 SET_RUNTIME_PM_OPS(fsl_espi_runtime_suspend, 824 fsl_espi_runtime_resume, NULL) 825 SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume) 826 }; 827 828 static const struct of_device_id of_fsl_espi_match[] = { 829 { .compatible = "fsl,mpc8536-espi" }, 830 {} 831 }; 832 MODULE_DEVICE_TABLE(of, of_fsl_espi_match); 833 834 static struct platform_driver fsl_espi_driver = { 835 .driver = { 836 .name = "fsl_espi", 837 .of_match_table = of_fsl_espi_match, 838 .pm = &espi_pm, 839 }, 840 .probe = of_fsl_espi_probe, 841 .remove = of_fsl_espi_remove, 842 }; 843 module_platform_driver(fsl_espi_driver); 844 845 MODULE_AUTHOR("Mingkai Hu"); 846 MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); 847 MODULE_LICENSE("GPL"); 848