1 /* 2 * SPI driver for NVIDIA's Tegra114 SPI Controller. 3 * 4 * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/clk.h> 20 #include <linux/completion.h> 21 #include <linux/delay.h> 22 #include <linux/dmaengine.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/dmapool.h> 25 #include <linux/err.h> 26 #include <linux/init.h> 27 #include <linux/interrupt.h> 28 #include <linux/io.h> 29 #include <linux/kernel.h> 30 #include <linux/kthread.h> 31 #include <linux/module.h> 32 #include <linux/platform_device.h> 33 #include <linux/pm_runtime.h> 34 #include <linux/of.h> 35 #include <linux/of_device.h> 36 #include <linux/reset.h> 37 #include <linux/spi/spi.h> 38 39 #define SPI_COMMAND1 0x000 40 #define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0) 41 #define SPI_PACKED (1 << 5) 42 #define SPI_TX_EN (1 << 11) 43 #define SPI_RX_EN (1 << 12) 44 #define SPI_BOTH_EN_BYTE (1 << 13) 45 #define SPI_BOTH_EN_BIT (1 << 14) 46 #define SPI_LSBYTE_FE (1 << 15) 47 #define SPI_LSBIT_FE (1 << 16) 48 #define SPI_BIDIROE (1 << 17) 49 #define SPI_IDLE_SDA_DRIVE_LOW (0 << 18) 50 #define SPI_IDLE_SDA_DRIVE_HIGH (1 << 18) 51 #define SPI_IDLE_SDA_PULL_LOW (2 << 18) 52 #define SPI_IDLE_SDA_PULL_HIGH (3 << 18) 53 #define SPI_IDLE_SDA_MASK (3 << 18) 54 #define SPI_CS_SS_VAL (1 << 20) 55 #define SPI_CS_SW_HW (1 << 21) 56 /* SPI_CS_POL_INACTIVE bits are default high */ 57 /* n from 0 to 3 */ 58 #define SPI_CS_POL_INACTIVE(n) (1 << (22 + (n))) 59 #define SPI_CS_POL_INACTIVE_MASK (0xF << 22) 60 61 #define SPI_CS_SEL_0 (0 << 26) 62 #define SPI_CS_SEL_1 (1 << 26) 63 #define SPI_CS_SEL_2 (2 << 26) 64 #define SPI_CS_SEL_3 (3 << 26) 65 #define SPI_CS_SEL_MASK (3 << 26) 66 #define SPI_CS_SEL(x) (((x) & 0x3) << 26) 67 #define SPI_CONTROL_MODE_0 (0 << 28) 68 #define SPI_CONTROL_MODE_1 (1 << 28) 69 #define SPI_CONTROL_MODE_2 (2 << 28) 70 #define SPI_CONTROL_MODE_3 (3 << 28) 71 #define SPI_CONTROL_MODE_MASK (3 << 28) 72 #define SPI_MODE_SEL(x) (((x) & 0x3) << 28) 73 #define SPI_M_S (1 << 30) 74 #define SPI_PIO (1 << 31) 75 76 #define SPI_COMMAND2 0x004 77 #define SPI_TX_TAP_DELAY(x) (((x) & 0x3F) << 6) 78 #define SPI_RX_TAP_DELAY(x) (((x) & 0x3F) << 0) 79 80 #define SPI_CS_TIMING1 0x008 81 #define SPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold)) 82 #define SPI_CS_SETUP_HOLD(reg, cs, val) \ 83 ((((val) & 0xFFu) << ((cs) * 8)) | \ 84 ((reg) & ~(0xFFu << ((cs) * 8)))) 85 86 #define SPI_CS_TIMING2 0x00C 87 #define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1F) << 0) 88 #define CS_ACTIVE_BETWEEN_PACKETS_0 (1 << 5) 89 #define CYCLES_BETWEEN_PACKETS_1(x) (((x) & 0x1F) << 8) 90 #define CS_ACTIVE_BETWEEN_PACKETS_1 (1 << 13) 91 #define CYCLES_BETWEEN_PACKETS_2(x) (((x) & 0x1F) << 16) 92 #define CS_ACTIVE_BETWEEN_PACKETS_2 (1 << 21) 93 #define CYCLES_BETWEEN_PACKETS_3(x) (((x) & 0x1F) << 24) 94 #define CS_ACTIVE_BETWEEN_PACKETS_3 (1 << 29) 95 #define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val) \ 96 (reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \ 97 ((reg) & ~(1 << ((cs) * 8 + 5)))) 98 #define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \ 99 (reg = (((val) & 0xF) << ((cs) * 8)) | \ 100 ((reg) & ~(0xF << ((cs) * 8)))) 101 102 #define SPI_TRANS_STATUS 0x010 103 #define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF) 104 #define SPI_SLV_IDLE_COUNT(val) (((val) >> 16) & 0xFF) 105 #define SPI_RDY (1 << 30) 106 107 #define SPI_FIFO_STATUS 0x014 108 #define SPI_RX_FIFO_EMPTY (1 << 0) 109 #define SPI_RX_FIFO_FULL (1 << 1) 110 #define SPI_TX_FIFO_EMPTY (1 << 2) 111 #define SPI_TX_FIFO_FULL (1 << 3) 112 #define SPI_RX_FIFO_UNF (1 << 4) 113 #define SPI_RX_FIFO_OVF (1 << 5) 114 #define SPI_TX_FIFO_UNF (1 << 6) 115 #define SPI_TX_FIFO_OVF (1 << 7) 116 #define SPI_ERR (1 << 8) 117 #define SPI_TX_FIFO_FLUSH (1 << 14) 118 #define SPI_RX_FIFO_FLUSH (1 << 15) 119 #define SPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7F) 120 #define SPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7F) 121 #define SPI_FRAME_END (1 << 30) 122 #define SPI_CS_INACTIVE (1 << 31) 123 124 #define SPI_FIFO_ERROR (SPI_RX_FIFO_UNF | \ 125 SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF) 126 #define SPI_FIFO_EMPTY (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY) 127 128 #define SPI_TX_DATA 0x018 129 #define SPI_RX_DATA 0x01C 130 131 #define SPI_DMA_CTL 0x020 132 #define SPI_TX_TRIG_1 (0 << 15) 133 #define SPI_TX_TRIG_4 (1 << 15) 134 #define SPI_TX_TRIG_8 (2 << 15) 135 #define SPI_TX_TRIG_16 (3 << 15) 136 #define SPI_TX_TRIG_MASK (3 << 15) 137 #define SPI_RX_TRIG_1 (0 << 19) 138 #define SPI_RX_TRIG_4 (1 << 19) 139 #define SPI_RX_TRIG_8 (2 << 19) 140 #define SPI_RX_TRIG_16 (3 << 19) 141 #define SPI_RX_TRIG_MASK (3 << 19) 142 #define SPI_IE_TX (1 << 28) 143 #define SPI_IE_RX (1 << 29) 144 #define SPI_CONT (1 << 30) 145 #define SPI_DMA (1 << 31) 146 #define SPI_DMA_EN SPI_DMA 147 148 #define SPI_DMA_BLK 0x024 149 #define SPI_DMA_BLK_SET(x) (((x) & 0xFFFF) << 0) 150 151 #define SPI_TX_FIFO 0x108 152 #define SPI_RX_FIFO 0x188 153 #define MAX_CHIP_SELECT 4 154 #define SPI_FIFO_DEPTH 64 155 #define DATA_DIR_TX (1 << 0) 156 #define DATA_DIR_RX (1 << 1) 157 158 #define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) 159 #define DEFAULT_SPI_DMA_BUF_LEN (16*1024) 160 #define TX_FIFO_EMPTY_COUNT_MAX SPI_TX_FIFO_EMPTY_COUNT(0x40) 161 #define RX_FIFO_FULL_COUNT_ZERO SPI_RX_FIFO_FULL_COUNT(0) 162 #define MAX_HOLD_CYCLES 16 163 #define SPI_DEFAULT_SPEED 25000000 164 165 struct tegra_spi_data { 166 struct device *dev; 167 struct spi_master *master; 168 spinlock_t lock; 169 170 struct clk *clk; 171 struct reset_control *rst; 172 void __iomem *base; 173 phys_addr_t phys; 174 unsigned irq; 175 u32 spi_max_frequency; 176 u32 cur_speed; 177 178 struct spi_device *cur_spi; 179 struct spi_device *cs_control; 180 unsigned cur_pos; 181 unsigned words_per_32bit; 182 unsigned bytes_per_word; 183 unsigned curr_dma_words; 184 unsigned cur_direction; 185 186 unsigned cur_rx_pos; 187 unsigned cur_tx_pos; 188 189 unsigned dma_buf_size; 190 unsigned max_buf_size; 191 bool is_curr_dma_xfer; 192 193 struct completion rx_dma_complete; 194 struct completion tx_dma_complete; 195 196 u32 tx_status; 197 u32 rx_status; 198 u32 status_reg; 199 bool is_packed; 200 201 u32 command1_reg; 202 u32 dma_control_reg; 203 u32 def_command1_reg; 204 205 struct completion xfer_completion; 206 struct spi_transfer *curr_xfer; 207 struct dma_chan *rx_dma_chan; 208 u32 *rx_dma_buf; 209 dma_addr_t rx_dma_phys; 210 struct dma_async_tx_descriptor *rx_dma_desc; 211 212 struct dma_chan *tx_dma_chan; 213 u32 *tx_dma_buf; 214 dma_addr_t tx_dma_phys; 215 struct dma_async_tx_descriptor *tx_dma_desc; 216 }; 217 218 static int tegra_spi_runtime_suspend(struct device *dev); 219 static int tegra_spi_runtime_resume(struct device *dev); 220 221 static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi, 222 unsigned long reg) 223 { 224 return readl(tspi->base + reg); 225 } 226 227 static inline void tegra_spi_writel(struct tegra_spi_data *tspi, 228 u32 val, unsigned long reg) 229 { 230 writel(val, tspi->base + reg); 231 232 /* Read back register to make sure that register writes completed */ 233 if (reg != SPI_TX_FIFO) 234 readl(tspi->base + SPI_COMMAND1); 235 } 236 237 static void tegra_spi_clear_status(struct tegra_spi_data *tspi) 238 { 239 u32 val; 240 241 /* Write 1 to clear status register */ 242 val = tegra_spi_readl(tspi, SPI_TRANS_STATUS); 243 tegra_spi_writel(tspi, val, SPI_TRANS_STATUS); 244 245 /* Clear fifo status error if any */ 246 val = tegra_spi_readl(tspi, SPI_FIFO_STATUS); 247 if (val & SPI_ERR) 248 tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR, 249 SPI_FIFO_STATUS); 250 } 251 252 static unsigned tegra_spi_calculate_curr_xfer_param( 253 struct spi_device *spi, struct tegra_spi_data *tspi, 254 struct spi_transfer *t) 255 { 256 unsigned remain_len = t->len - tspi->cur_pos; 257 unsigned max_word; 258 unsigned bits_per_word = t->bits_per_word; 259 unsigned max_len; 260 unsigned total_fifo_words; 261 262 tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8); 263 264 if (bits_per_word == 8 || bits_per_word == 16) { 265 tspi->is_packed = 1; 266 tspi->words_per_32bit = 32/bits_per_word; 267 } else { 268 tspi->is_packed = 0; 269 tspi->words_per_32bit = 1; 270 } 271 272 if (tspi->is_packed) { 273 max_len = min(remain_len, tspi->max_buf_size); 274 tspi->curr_dma_words = max_len/tspi->bytes_per_word; 275 total_fifo_words = (max_len + 3) / 4; 276 } else { 277 max_word = (remain_len - 1) / tspi->bytes_per_word + 1; 278 max_word = min(max_word, tspi->max_buf_size/4); 279 tspi->curr_dma_words = max_word; 280 total_fifo_words = max_word; 281 } 282 return total_fifo_words; 283 } 284 285 static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf( 286 struct tegra_spi_data *tspi, struct spi_transfer *t) 287 { 288 unsigned nbytes; 289 unsigned tx_empty_count; 290 u32 fifo_status; 291 unsigned max_n_32bit; 292 unsigned i, count; 293 unsigned int written_words; 294 unsigned fifo_words_left; 295 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos; 296 297 fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS); 298 tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status); 299 300 if (tspi->is_packed) { 301 fifo_words_left = tx_empty_count * tspi->words_per_32bit; 302 written_words = min(fifo_words_left, tspi->curr_dma_words); 303 nbytes = written_words * tspi->bytes_per_word; 304 max_n_32bit = DIV_ROUND_UP(nbytes, 4); 305 for (count = 0; count < max_n_32bit; count++) { 306 u32 x = 0; 307 for (i = 0; (i < 4) && nbytes; i++, nbytes--) 308 x |= (u32)(*tx_buf++) << (i * 8); 309 tegra_spi_writel(tspi, x, SPI_TX_FIFO); 310 } 311 } else { 312 max_n_32bit = min(tspi->curr_dma_words, tx_empty_count); 313 written_words = max_n_32bit; 314 nbytes = written_words * tspi->bytes_per_word; 315 for (count = 0; count < max_n_32bit; count++) { 316 u32 x = 0; 317 for (i = 0; nbytes && (i < tspi->bytes_per_word); 318 i++, nbytes--) 319 x |= (u32)(*tx_buf++) << (i * 8); 320 tegra_spi_writel(tspi, x, SPI_TX_FIFO); 321 } 322 } 323 tspi->cur_tx_pos += written_words * tspi->bytes_per_word; 324 return written_words; 325 } 326 327 static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf( 328 struct tegra_spi_data *tspi, struct spi_transfer *t) 329 { 330 unsigned rx_full_count; 331 u32 fifo_status; 332 unsigned i, count; 333 unsigned int read_words = 0; 334 unsigned len; 335 u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos; 336 337 fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS); 338 rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status); 339 if (tspi->is_packed) { 340 len = tspi->curr_dma_words * tspi->bytes_per_word; 341 for (count = 0; count < rx_full_count; count++) { 342 u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO); 343 for (i = 0; len && (i < 4); i++, len--) 344 *rx_buf++ = (x >> i*8) & 0xFF; 345 } 346 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word; 347 read_words += tspi->curr_dma_words; 348 } else { 349 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1; 350 for (count = 0; count < rx_full_count; count++) { 351 u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask; 352 for (i = 0; (i < tspi->bytes_per_word); i++) 353 *rx_buf++ = (x >> (i*8)) & 0xFF; 354 } 355 tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word; 356 read_words += rx_full_count; 357 } 358 return read_words; 359 } 360 361 static void tegra_spi_copy_client_txbuf_to_spi_txbuf( 362 struct tegra_spi_data *tspi, struct spi_transfer *t) 363 { 364 /* Make the dma buffer to read by cpu */ 365 dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys, 366 tspi->dma_buf_size, DMA_TO_DEVICE); 367 368 if (tspi->is_packed) { 369 unsigned len = tspi->curr_dma_words * tspi->bytes_per_word; 370 memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len); 371 } else { 372 unsigned int i; 373 unsigned int count; 374 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos; 375 unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word; 376 377 for (count = 0; count < tspi->curr_dma_words; count++) { 378 u32 x = 0; 379 for (i = 0; consume && (i < tspi->bytes_per_word); 380 i++, consume--) 381 x |= (u32)(*tx_buf++) << (i * 8); 382 tspi->tx_dma_buf[count] = x; 383 } 384 } 385 tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word; 386 387 /* Make the dma buffer to read by dma */ 388 dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys, 389 tspi->dma_buf_size, DMA_TO_DEVICE); 390 } 391 392 static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf( 393 struct tegra_spi_data *tspi, struct spi_transfer *t) 394 { 395 /* Make the dma buffer to read by cpu */ 396 dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys, 397 tspi->dma_buf_size, DMA_FROM_DEVICE); 398 399 if (tspi->is_packed) { 400 unsigned len = tspi->curr_dma_words * tspi->bytes_per_word; 401 memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len); 402 } else { 403 unsigned int i; 404 unsigned int count; 405 unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos; 406 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1; 407 408 for (count = 0; count < tspi->curr_dma_words; count++) { 409 u32 x = tspi->rx_dma_buf[count] & rx_mask; 410 for (i = 0; (i < tspi->bytes_per_word); i++) 411 *rx_buf++ = (x >> (i*8)) & 0xFF; 412 } 413 } 414 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word; 415 416 /* Make the dma buffer to read by dma */ 417 dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys, 418 tspi->dma_buf_size, DMA_FROM_DEVICE); 419 } 420 421 static void tegra_spi_dma_complete(void *args) 422 { 423 struct completion *dma_complete = args; 424 425 complete(dma_complete); 426 } 427 428 static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len) 429 { 430 reinit_completion(&tspi->tx_dma_complete); 431 tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan, 432 tspi->tx_dma_phys, len, DMA_MEM_TO_DEV, 433 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 434 if (!tspi->tx_dma_desc) { 435 dev_err(tspi->dev, "Not able to get desc for Tx\n"); 436 return -EIO; 437 } 438 439 tspi->tx_dma_desc->callback = tegra_spi_dma_complete; 440 tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete; 441 442 dmaengine_submit(tspi->tx_dma_desc); 443 dma_async_issue_pending(tspi->tx_dma_chan); 444 return 0; 445 } 446 447 static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len) 448 { 449 reinit_completion(&tspi->rx_dma_complete); 450 tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan, 451 tspi->rx_dma_phys, len, DMA_DEV_TO_MEM, 452 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 453 if (!tspi->rx_dma_desc) { 454 dev_err(tspi->dev, "Not able to get desc for Rx\n"); 455 return -EIO; 456 } 457 458 tspi->rx_dma_desc->callback = tegra_spi_dma_complete; 459 tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete; 460 461 dmaengine_submit(tspi->rx_dma_desc); 462 dma_async_issue_pending(tspi->rx_dma_chan); 463 return 0; 464 } 465 466 static int tegra_spi_start_dma_based_transfer( 467 struct tegra_spi_data *tspi, struct spi_transfer *t) 468 { 469 u32 val; 470 unsigned int len; 471 int ret = 0; 472 u32 status; 473 474 /* Make sure that Rx and Tx fifo are empty */ 475 status = tegra_spi_readl(tspi, SPI_FIFO_STATUS); 476 if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) { 477 dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n", 478 (unsigned)status); 479 return -EIO; 480 } 481 482 val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1); 483 tegra_spi_writel(tspi, val, SPI_DMA_BLK); 484 485 if (tspi->is_packed) 486 len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word, 487 4) * 4; 488 else 489 len = tspi->curr_dma_words * 4; 490 491 /* Set attention level based on length of transfer */ 492 if (len & 0xF) 493 val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1; 494 else if (((len) >> 4) & 0x1) 495 val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4; 496 else 497 val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8; 498 499 if (tspi->cur_direction & DATA_DIR_TX) 500 val |= SPI_IE_TX; 501 502 if (tspi->cur_direction & DATA_DIR_RX) 503 val |= SPI_IE_RX; 504 505 tegra_spi_writel(tspi, val, SPI_DMA_CTL); 506 tspi->dma_control_reg = val; 507 508 if (tspi->cur_direction & DATA_DIR_TX) { 509 tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t); 510 ret = tegra_spi_start_tx_dma(tspi, len); 511 if (ret < 0) { 512 dev_err(tspi->dev, 513 "Starting tx dma failed, err %d\n", ret); 514 return ret; 515 } 516 } 517 518 if (tspi->cur_direction & DATA_DIR_RX) { 519 /* Make the dma buffer to read by dma */ 520 dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys, 521 tspi->dma_buf_size, DMA_FROM_DEVICE); 522 523 ret = tegra_spi_start_rx_dma(tspi, len); 524 if (ret < 0) { 525 dev_err(tspi->dev, 526 "Starting rx dma failed, err %d\n", ret); 527 if (tspi->cur_direction & DATA_DIR_TX) 528 dmaengine_terminate_all(tspi->tx_dma_chan); 529 return ret; 530 } 531 } 532 tspi->is_curr_dma_xfer = true; 533 tspi->dma_control_reg = val; 534 535 val |= SPI_DMA_EN; 536 tegra_spi_writel(tspi, val, SPI_DMA_CTL); 537 return ret; 538 } 539 540 static int tegra_spi_start_cpu_based_transfer( 541 struct tegra_spi_data *tspi, struct spi_transfer *t) 542 { 543 u32 val; 544 unsigned cur_words; 545 546 if (tspi->cur_direction & DATA_DIR_TX) 547 cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t); 548 else 549 cur_words = tspi->curr_dma_words; 550 551 val = SPI_DMA_BLK_SET(cur_words - 1); 552 tegra_spi_writel(tspi, val, SPI_DMA_BLK); 553 554 val = 0; 555 if (tspi->cur_direction & DATA_DIR_TX) 556 val |= SPI_IE_TX; 557 558 if (tspi->cur_direction & DATA_DIR_RX) 559 val |= SPI_IE_RX; 560 561 tegra_spi_writel(tspi, val, SPI_DMA_CTL); 562 tspi->dma_control_reg = val; 563 564 tspi->is_curr_dma_xfer = false; 565 566 val |= SPI_DMA_EN; 567 tegra_spi_writel(tspi, val, SPI_DMA_CTL); 568 return 0; 569 } 570 571 static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi, 572 bool dma_to_memory) 573 { 574 struct dma_chan *dma_chan; 575 u32 *dma_buf; 576 dma_addr_t dma_phys; 577 int ret; 578 struct dma_slave_config dma_sconfig; 579 580 dma_chan = dma_request_slave_channel_reason(tspi->dev, 581 dma_to_memory ? "rx" : "tx"); 582 if (IS_ERR(dma_chan)) { 583 ret = PTR_ERR(dma_chan); 584 if (ret != -EPROBE_DEFER) 585 dev_err(tspi->dev, 586 "Dma channel is not available: %d\n", ret); 587 return ret; 588 } 589 590 dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size, 591 &dma_phys, GFP_KERNEL); 592 if (!dma_buf) { 593 dev_err(tspi->dev, " Not able to allocate the dma buffer\n"); 594 dma_release_channel(dma_chan); 595 return -ENOMEM; 596 } 597 598 if (dma_to_memory) { 599 dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO; 600 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 601 dma_sconfig.src_maxburst = 0; 602 } else { 603 dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO; 604 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 605 dma_sconfig.dst_maxburst = 0; 606 } 607 608 ret = dmaengine_slave_config(dma_chan, &dma_sconfig); 609 if (ret) 610 goto scrub; 611 if (dma_to_memory) { 612 tspi->rx_dma_chan = dma_chan; 613 tspi->rx_dma_buf = dma_buf; 614 tspi->rx_dma_phys = dma_phys; 615 } else { 616 tspi->tx_dma_chan = dma_chan; 617 tspi->tx_dma_buf = dma_buf; 618 tspi->tx_dma_phys = dma_phys; 619 } 620 return 0; 621 622 scrub: 623 dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys); 624 dma_release_channel(dma_chan); 625 return ret; 626 } 627 628 static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi, 629 bool dma_to_memory) 630 { 631 u32 *dma_buf; 632 dma_addr_t dma_phys; 633 struct dma_chan *dma_chan; 634 635 if (dma_to_memory) { 636 dma_buf = tspi->rx_dma_buf; 637 dma_chan = tspi->rx_dma_chan; 638 dma_phys = tspi->rx_dma_phys; 639 tspi->rx_dma_chan = NULL; 640 tspi->rx_dma_buf = NULL; 641 } else { 642 dma_buf = tspi->tx_dma_buf; 643 dma_chan = tspi->tx_dma_chan; 644 dma_phys = tspi->tx_dma_phys; 645 tspi->tx_dma_buf = NULL; 646 tspi->tx_dma_chan = NULL; 647 } 648 if (!dma_chan) 649 return; 650 651 dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys); 652 dma_release_channel(dma_chan); 653 } 654 655 static u32 tegra_spi_setup_transfer_one(struct spi_device *spi, 656 struct spi_transfer *t, bool is_first_of_msg) 657 { 658 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); 659 u32 speed = t->speed_hz; 660 u8 bits_per_word = t->bits_per_word; 661 u32 command1; 662 int req_mode; 663 664 if (speed != tspi->cur_speed) { 665 clk_set_rate(tspi->clk, speed); 666 tspi->cur_speed = speed; 667 } 668 669 tspi->cur_spi = spi; 670 tspi->cur_pos = 0; 671 tspi->cur_rx_pos = 0; 672 tspi->cur_tx_pos = 0; 673 tspi->curr_xfer = t; 674 675 if (is_first_of_msg) { 676 tegra_spi_clear_status(tspi); 677 678 command1 = tspi->def_command1_reg; 679 command1 |= SPI_BIT_LENGTH(bits_per_word - 1); 680 681 command1 &= ~SPI_CONTROL_MODE_MASK; 682 req_mode = spi->mode & 0x3; 683 if (req_mode == SPI_MODE_0) 684 command1 |= SPI_CONTROL_MODE_0; 685 else if (req_mode == SPI_MODE_1) 686 command1 |= SPI_CONTROL_MODE_1; 687 else if (req_mode == SPI_MODE_2) 688 command1 |= SPI_CONTROL_MODE_2; 689 else if (req_mode == SPI_MODE_3) 690 command1 |= SPI_CONTROL_MODE_3; 691 692 if (tspi->cs_control) { 693 if (tspi->cs_control != spi) 694 tegra_spi_writel(tspi, command1, SPI_COMMAND1); 695 tspi->cs_control = NULL; 696 } else 697 tegra_spi_writel(tspi, command1, SPI_COMMAND1); 698 699 command1 |= SPI_CS_SW_HW; 700 if (spi->mode & SPI_CS_HIGH) 701 command1 |= SPI_CS_SS_VAL; 702 else 703 command1 &= ~SPI_CS_SS_VAL; 704 705 tegra_spi_writel(tspi, 0, SPI_COMMAND2); 706 } else { 707 command1 = tspi->command1_reg; 708 command1 &= ~SPI_BIT_LENGTH(~0); 709 command1 |= SPI_BIT_LENGTH(bits_per_word - 1); 710 } 711 712 return command1; 713 } 714 715 static int tegra_spi_start_transfer_one(struct spi_device *spi, 716 struct spi_transfer *t, u32 command1) 717 { 718 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); 719 unsigned total_fifo_words; 720 int ret; 721 722 total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t); 723 724 if (tspi->is_packed) 725 command1 |= SPI_PACKED; 726 727 command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN); 728 tspi->cur_direction = 0; 729 if (t->rx_buf) { 730 command1 |= SPI_RX_EN; 731 tspi->cur_direction |= DATA_DIR_RX; 732 } 733 if (t->tx_buf) { 734 command1 |= SPI_TX_EN; 735 tspi->cur_direction |= DATA_DIR_TX; 736 } 737 command1 |= SPI_CS_SEL(spi->chip_select); 738 tegra_spi_writel(tspi, command1, SPI_COMMAND1); 739 tspi->command1_reg = command1; 740 741 dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n", 742 tspi->def_command1_reg, (unsigned)command1); 743 744 if (total_fifo_words > SPI_FIFO_DEPTH) 745 ret = tegra_spi_start_dma_based_transfer(tspi, t); 746 else 747 ret = tegra_spi_start_cpu_based_transfer(tspi, t); 748 return ret; 749 } 750 751 static int tegra_spi_setup(struct spi_device *spi) 752 { 753 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); 754 u32 val; 755 unsigned long flags; 756 int ret; 757 758 dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n", 759 spi->bits_per_word, 760 spi->mode & SPI_CPOL ? "" : "~", 761 spi->mode & SPI_CPHA ? "" : "~", 762 spi->max_speed_hz); 763 764 BUG_ON(spi->chip_select >= MAX_CHIP_SELECT); 765 766 /* Set speed to the spi max fequency if spi device has not set */ 767 spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency; 768 769 ret = pm_runtime_get_sync(tspi->dev); 770 if (ret < 0) { 771 dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); 772 return ret; 773 } 774 775 spin_lock_irqsave(&tspi->lock, flags); 776 val = tspi->def_command1_reg; 777 if (spi->mode & SPI_CS_HIGH) 778 val &= ~SPI_CS_POL_INACTIVE(spi->chip_select); 779 else 780 val |= SPI_CS_POL_INACTIVE(spi->chip_select); 781 tspi->def_command1_reg = val; 782 tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); 783 spin_unlock_irqrestore(&tspi->lock, flags); 784 785 pm_runtime_put(tspi->dev); 786 return 0; 787 } 788 789 static void tegra_spi_transfer_delay(int delay) 790 { 791 if (!delay) 792 return; 793 794 if (delay >= 1000) 795 mdelay(delay / 1000); 796 797 udelay(delay % 1000); 798 } 799 800 static int tegra_spi_transfer_one_message(struct spi_master *master, 801 struct spi_message *msg) 802 { 803 bool is_first_msg = true; 804 struct tegra_spi_data *tspi = spi_master_get_devdata(master); 805 struct spi_transfer *xfer; 806 struct spi_device *spi = msg->spi; 807 int ret; 808 bool skip = false; 809 810 msg->status = 0; 811 msg->actual_length = 0; 812 813 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 814 u32 cmd1; 815 816 reinit_completion(&tspi->xfer_completion); 817 818 cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg); 819 820 if (!xfer->len) { 821 ret = 0; 822 skip = true; 823 goto complete_xfer; 824 } 825 826 ret = tegra_spi_start_transfer_one(spi, xfer, cmd1); 827 if (ret < 0) { 828 dev_err(tspi->dev, 829 "spi can not start transfer, err %d\n", ret); 830 goto complete_xfer; 831 } 832 833 is_first_msg = false; 834 ret = wait_for_completion_timeout(&tspi->xfer_completion, 835 SPI_DMA_TIMEOUT); 836 if (WARN_ON(ret == 0)) { 837 dev_err(tspi->dev, 838 "spi trasfer timeout, err %d\n", ret); 839 ret = -EIO; 840 goto complete_xfer; 841 } 842 843 if (tspi->tx_status || tspi->rx_status) { 844 dev_err(tspi->dev, "Error in Transfer\n"); 845 ret = -EIO; 846 goto complete_xfer; 847 } 848 msg->actual_length += xfer->len; 849 850 complete_xfer: 851 if (ret < 0 || skip) { 852 tegra_spi_writel(tspi, tspi->def_command1_reg, 853 SPI_COMMAND1); 854 tegra_spi_transfer_delay(xfer->delay_usecs); 855 goto exit; 856 } else if (msg->transfers.prev == &xfer->transfer_list) { 857 /* This is the last transfer in message */ 858 if (xfer->cs_change) 859 tspi->cs_control = spi; 860 else { 861 tegra_spi_writel(tspi, tspi->def_command1_reg, 862 SPI_COMMAND1); 863 tegra_spi_transfer_delay(xfer->delay_usecs); 864 } 865 } else if (xfer->cs_change) { 866 tegra_spi_writel(tspi, tspi->def_command1_reg, 867 SPI_COMMAND1); 868 tegra_spi_transfer_delay(xfer->delay_usecs); 869 } 870 871 } 872 ret = 0; 873 exit: 874 msg->status = ret; 875 spi_finalize_current_message(master); 876 return ret; 877 } 878 879 static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi) 880 { 881 struct spi_transfer *t = tspi->curr_xfer; 882 unsigned long flags; 883 884 spin_lock_irqsave(&tspi->lock, flags); 885 if (tspi->tx_status || tspi->rx_status) { 886 dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n", 887 tspi->status_reg); 888 dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n", 889 tspi->command1_reg, tspi->dma_control_reg); 890 reset_control_assert(tspi->rst); 891 udelay(2); 892 reset_control_deassert(tspi->rst); 893 complete(&tspi->xfer_completion); 894 goto exit; 895 } 896 897 if (tspi->cur_direction & DATA_DIR_RX) 898 tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t); 899 900 if (tspi->cur_direction & DATA_DIR_TX) 901 tspi->cur_pos = tspi->cur_tx_pos; 902 else 903 tspi->cur_pos = tspi->cur_rx_pos; 904 905 if (tspi->cur_pos == t->len) { 906 complete(&tspi->xfer_completion); 907 goto exit; 908 } 909 910 tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t); 911 tegra_spi_start_cpu_based_transfer(tspi, t); 912 exit: 913 spin_unlock_irqrestore(&tspi->lock, flags); 914 return IRQ_HANDLED; 915 } 916 917 static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi) 918 { 919 struct spi_transfer *t = tspi->curr_xfer; 920 long wait_status; 921 int err = 0; 922 unsigned total_fifo_words; 923 unsigned long flags; 924 925 /* Abort dmas if any error */ 926 if (tspi->cur_direction & DATA_DIR_TX) { 927 if (tspi->tx_status) { 928 dmaengine_terminate_all(tspi->tx_dma_chan); 929 err += 1; 930 } else { 931 wait_status = wait_for_completion_interruptible_timeout( 932 &tspi->tx_dma_complete, SPI_DMA_TIMEOUT); 933 if (wait_status <= 0) { 934 dmaengine_terminate_all(tspi->tx_dma_chan); 935 dev_err(tspi->dev, "TxDma Xfer failed\n"); 936 err += 1; 937 } 938 } 939 } 940 941 if (tspi->cur_direction & DATA_DIR_RX) { 942 if (tspi->rx_status) { 943 dmaengine_terminate_all(tspi->rx_dma_chan); 944 err += 2; 945 } else { 946 wait_status = wait_for_completion_interruptible_timeout( 947 &tspi->rx_dma_complete, SPI_DMA_TIMEOUT); 948 if (wait_status <= 0) { 949 dmaengine_terminate_all(tspi->rx_dma_chan); 950 dev_err(tspi->dev, "RxDma Xfer failed\n"); 951 err += 2; 952 } 953 } 954 } 955 956 spin_lock_irqsave(&tspi->lock, flags); 957 if (err) { 958 dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n", 959 tspi->status_reg); 960 dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n", 961 tspi->command1_reg, tspi->dma_control_reg); 962 reset_control_assert(tspi->rst); 963 udelay(2); 964 reset_control_deassert(tspi->rst); 965 complete(&tspi->xfer_completion); 966 spin_unlock_irqrestore(&tspi->lock, flags); 967 return IRQ_HANDLED; 968 } 969 970 if (tspi->cur_direction & DATA_DIR_RX) 971 tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t); 972 973 if (tspi->cur_direction & DATA_DIR_TX) 974 tspi->cur_pos = tspi->cur_tx_pos; 975 else 976 tspi->cur_pos = tspi->cur_rx_pos; 977 978 if (tspi->cur_pos == t->len) { 979 complete(&tspi->xfer_completion); 980 goto exit; 981 } 982 983 /* Continue transfer in current message */ 984 total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, 985 tspi, t); 986 if (total_fifo_words > SPI_FIFO_DEPTH) 987 err = tegra_spi_start_dma_based_transfer(tspi, t); 988 else 989 err = tegra_spi_start_cpu_based_transfer(tspi, t); 990 991 exit: 992 spin_unlock_irqrestore(&tspi->lock, flags); 993 return IRQ_HANDLED; 994 } 995 996 static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data) 997 { 998 struct tegra_spi_data *tspi = context_data; 999 1000 if (!tspi->is_curr_dma_xfer) 1001 return handle_cpu_based_xfer(tspi); 1002 return handle_dma_based_xfer(tspi); 1003 } 1004 1005 static irqreturn_t tegra_spi_isr(int irq, void *context_data) 1006 { 1007 struct tegra_spi_data *tspi = context_data; 1008 1009 tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS); 1010 if (tspi->cur_direction & DATA_DIR_TX) 1011 tspi->tx_status = tspi->status_reg & 1012 (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF); 1013 1014 if (tspi->cur_direction & DATA_DIR_RX) 1015 tspi->rx_status = tspi->status_reg & 1016 (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF); 1017 tegra_spi_clear_status(tspi); 1018 1019 return IRQ_WAKE_THREAD; 1020 } 1021 1022 static void tegra_spi_parse_dt(struct platform_device *pdev, 1023 struct tegra_spi_data *tspi) 1024 { 1025 struct device_node *np = pdev->dev.of_node; 1026 1027 if (of_property_read_u32(np, "spi-max-frequency", 1028 &tspi->spi_max_frequency)) 1029 tspi->spi_max_frequency = 25000000; /* 25MHz */ 1030 } 1031 1032 static struct of_device_id tegra_spi_of_match[] = { 1033 { .compatible = "nvidia,tegra114-spi", }, 1034 {} 1035 }; 1036 MODULE_DEVICE_TABLE(of, tegra_spi_of_match); 1037 1038 static int tegra_spi_probe(struct platform_device *pdev) 1039 { 1040 struct spi_master *master; 1041 struct tegra_spi_data *tspi; 1042 struct resource *r; 1043 int ret, spi_irq; 1044 1045 master = spi_alloc_master(&pdev->dev, sizeof(*tspi)); 1046 if (!master) { 1047 dev_err(&pdev->dev, "master allocation failed\n"); 1048 return -ENOMEM; 1049 } 1050 platform_set_drvdata(pdev, master); 1051 tspi = spi_master_get_devdata(master); 1052 1053 /* Parse DT */ 1054 tegra_spi_parse_dt(pdev, tspi); 1055 1056 /* the spi->mode bits understood by this driver: */ 1057 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1058 master->setup = tegra_spi_setup; 1059 master->transfer_one_message = tegra_spi_transfer_one_message; 1060 master->num_chipselect = MAX_CHIP_SELECT; 1061 master->bus_num = -1; 1062 master->auto_runtime_pm = true; 1063 1064 tspi->master = master; 1065 tspi->dev = &pdev->dev; 1066 spin_lock_init(&tspi->lock); 1067 1068 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1069 tspi->base = devm_ioremap_resource(&pdev->dev, r); 1070 if (IS_ERR(tspi->base)) { 1071 ret = PTR_ERR(tspi->base); 1072 goto exit_free_master; 1073 } 1074 tspi->phys = r->start; 1075 1076 spi_irq = platform_get_irq(pdev, 0); 1077 tspi->irq = spi_irq; 1078 ret = request_threaded_irq(tspi->irq, tegra_spi_isr, 1079 tegra_spi_isr_thread, IRQF_ONESHOT, 1080 dev_name(&pdev->dev), tspi); 1081 if (ret < 0) { 1082 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", 1083 tspi->irq); 1084 goto exit_free_master; 1085 } 1086 1087 tspi->clk = devm_clk_get(&pdev->dev, "spi"); 1088 if (IS_ERR(tspi->clk)) { 1089 dev_err(&pdev->dev, "can not get clock\n"); 1090 ret = PTR_ERR(tspi->clk); 1091 goto exit_free_irq; 1092 } 1093 1094 tspi->rst = devm_reset_control_get(&pdev->dev, "spi"); 1095 if (IS_ERR(tspi->rst)) { 1096 dev_err(&pdev->dev, "can not get reset\n"); 1097 ret = PTR_ERR(tspi->rst); 1098 goto exit_free_irq; 1099 } 1100 1101 tspi->max_buf_size = SPI_FIFO_DEPTH << 2; 1102 tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN; 1103 1104 ret = tegra_spi_init_dma_param(tspi, true); 1105 if (ret < 0) 1106 goto exit_free_irq; 1107 ret = tegra_spi_init_dma_param(tspi, false); 1108 if (ret < 0) 1109 goto exit_rx_dma_free; 1110 tspi->max_buf_size = tspi->dma_buf_size; 1111 init_completion(&tspi->tx_dma_complete); 1112 init_completion(&tspi->rx_dma_complete); 1113 1114 init_completion(&tspi->xfer_completion); 1115 1116 pm_runtime_enable(&pdev->dev); 1117 if (!pm_runtime_enabled(&pdev->dev)) { 1118 ret = tegra_spi_runtime_resume(&pdev->dev); 1119 if (ret) 1120 goto exit_pm_disable; 1121 } 1122 1123 ret = pm_runtime_get_sync(&pdev->dev); 1124 if (ret < 0) { 1125 dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret); 1126 goto exit_pm_disable; 1127 } 1128 tspi->def_command1_reg = SPI_M_S; 1129 tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); 1130 pm_runtime_put(&pdev->dev); 1131 1132 master->dev.of_node = pdev->dev.of_node; 1133 ret = devm_spi_register_master(&pdev->dev, master); 1134 if (ret < 0) { 1135 dev_err(&pdev->dev, "can not register to master err %d\n", ret); 1136 goto exit_pm_disable; 1137 } 1138 return ret; 1139 1140 exit_pm_disable: 1141 pm_runtime_disable(&pdev->dev); 1142 if (!pm_runtime_status_suspended(&pdev->dev)) 1143 tegra_spi_runtime_suspend(&pdev->dev); 1144 tegra_spi_deinit_dma_param(tspi, false); 1145 exit_rx_dma_free: 1146 tegra_spi_deinit_dma_param(tspi, true); 1147 exit_free_irq: 1148 free_irq(spi_irq, tspi); 1149 exit_free_master: 1150 spi_master_put(master); 1151 return ret; 1152 } 1153 1154 static int tegra_spi_remove(struct platform_device *pdev) 1155 { 1156 struct spi_master *master = platform_get_drvdata(pdev); 1157 struct tegra_spi_data *tspi = spi_master_get_devdata(master); 1158 1159 free_irq(tspi->irq, tspi); 1160 1161 if (tspi->tx_dma_chan) 1162 tegra_spi_deinit_dma_param(tspi, false); 1163 1164 if (tspi->rx_dma_chan) 1165 tegra_spi_deinit_dma_param(tspi, true); 1166 1167 pm_runtime_disable(&pdev->dev); 1168 if (!pm_runtime_status_suspended(&pdev->dev)) 1169 tegra_spi_runtime_suspend(&pdev->dev); 1170 1171 return 0; 1172 } 1173 1174 #ifdef CONFIG_PM_SLEEP 1175 static int tegra_spi_suspend(struct device *dev) 1176 { 1177 struct spi_master *master = dev_get_drvdata(dev); 1178 1179 return spi_master_suspend(master); 1180 } 1181 1182 static int tegra_spi_resume(struct device *dev) 1183 { 1184 struct spi_master *master = dev_get_drvdata(dev); 1185 struct tegra_spi_data *tspi = spi_master_get_devdata(master); 1186 int ret; 1187 1188 ret = pm_runtime_get_sync(dev); 1189 if (ret < 0) { 1190 dev_err(dev, "pm runtime failed, e = %d\n", ret); 1191 return ret; 1192 } 1193 tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1); 1194 pm_runtime_put(dev); 1195 1196 return spi_master_resume(master); 1197 } 1198 #endif 1199 1200 static int tegra_spi_runtime_suspend(struct device *dev) 1201 { 1202 struct spi_master *master = dev_get_drvdata(dev); 1203 struct tegra_spi_data *tspi = spi_master_get_devdata(master); 1204 1205 /* Flush all write which are in PPSB queue by reading back */ 1206 tegra_spi_readl(tspi, SPI_COMMAND1); 1207 1208 clk_disable_unprepare(tspi->clk); 1209 return 0; 1210 } 1211 1212 static int tegra_spi_runtime_resume(struct device *dev) 1213 { 1214 struct spi_master *master = dev_get_drvdata(dev); 1215 struct tegra_spi_data *tspi = spi_master_get_devdata(master); 1216 int ret; 1217 1218 ret = clk_prepare_enable(tspi->clk); 1219 if (ret < 0) { 1220 dev_err(tspi->dev, "clk_prepare failed: %d\n", ret); 1221 return ret; 1222 } 1223 return 0; 1224 } 1225 1226 static const struct dev_pm_ops tegra_spi_pm_ops = { 1227 SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend, 1228 tegra_spi_runtime_resume, NULL) 1229 SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume) 1230 }; 1231 static struct platform_driver tegra_spi_driver = { 1232 .driver = { 1233 .name = "spi-tegra114", 1234 .owner = THIS_MODULE, 1235 .pm = &tegra_spi_pm_ops, 1236 .of_match_table = tegra_spi_of_match, 1237 }, 1238 .probe = tegra_spi_probe, 1239 .remove = tegra_spi_remove, 1240 }; 1241 module_platform_driver(tegra_spi_driver); 1242 1243 MODULE_ALIAS("platform:spi-tegra114"); 1244 MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver"); 1245 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); 1246 MODULE_LICENSE("GPL v2"); 1247