1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Synquacer HSSPI controller driver 4 // 5 // Copyright (c) 2015-2018 Socionext Inc. 6 // Copyright (c) 2018-2019 Linaro Ltd. 7 // 8 9 #include <linux/acpi.h> 10 #include <linux/delay.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/scatterlist.h> 18 #include <linux/slab.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spinlock.h> 21 #include <linux/clk.h> 22 23 /* HSSPI register address definitions */ 24 #define SYNQUACER_HSSPI_REG_MCTRL 0x00 25 #define SYNQUACER_HSSPI_REG_PCC0 0x04 26 #define SYNQUACER_HSSPI_REG_PCC(n) (SYNQUACER_HSSPI_REG_PCC0 + (n) * 4) 27 #define SYNQUACER_HSSPI_REG_TXF 0x14 28 #define SYNQUACER_HSSPI_REG_TXE 0x18 29 #define SYNQUACER_HSSPI_REG_TXC 0x1C 30 #define SYNQUACER_HSSPI_REG_RXF 0x20 31 #define SYNQUACER_HSSPI_REG_RXE 0x24 32 #define SYNQUACER_HSSPI_REG_RXC 0x28 33 #define SYNQUACER_HSSPI_REG_FAULTF 0x2C 34 #define SYNQUACER_HSSPI_REG_FAULTC 0x30 35 #define SYNQUACER_HSSPI_REG_DMCFG 0x34 36 #define SYNQUACER_HSSPI_REG_DMSTART 0x38 37 #define SYNQUACER_HSSPI_REG_DMBCC 0x3C 38 #define SYNQUACER_HSSPI_REG_DMSTATUS 0x40 39 #define SYNQUACER_HSSPI_REG_FIFOCFG 0x4C 40 #define SYNQUACER_HSSPI_REG_TX_FIFO 0x50 41 #define SYNQUACER_HSSPI_REG_RX_FIFO 0x90 42 #define SYNQUACER_HSSPI_REG_MID 0xFC 43 44 /* HSSPI register bit definitions */ 45 #define SYNQUACER_HSSPI_MCTRL_MEN BIT(0) 46 #define SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN BIT(1) 47 #define SYNQUACER_HSSPI_MCTRL_CDSS BIT(3) 48 #define SYNQUACER_HSSPI_MCTRL_MES BIT(4) 49 #define SYNQUACER_HSSPI_MCTRL_SYNCON BIT(5) 50 51 #define SYNQUACER_HSSPI_PCC_CPHA BIT(0) 52 #define SYNQUACER_HSSPI_PCC_CPOL BIT(1) 53 #define SYNQUACER_HSSPI_PCC_ACES BIT(2) 54 #define SYNQUACER_HSSPI_PCC_RTM BIT(3) 55 #define SYNQUACER_HSSPI_PCC_SSPOL BIT(4) 56 #define SYNQUACER_HSSPI_PCC_SDIR BIT(7) 57 #define SYNQUACER_HSSPI_PCC_SENDIAN BIT(8) 58 #define SYNQUACER_HSSPI_PCC_SAFESYNC BIT(16) 59 #define SYNQUACER_HSSPI_PCC_SS2CD_SHIFT 5U 60 #define SYNQUACER_HSSPI_PCC_CDRS_MASK 0x7f 61 #define SYNQUACER_HSSPI_PCC_CDRS_SHIFT 9U 62 63 #define SYNQUACER_HSSPI_TXF_FIFO_FULL BIT(0) 64 #define SYNQUACER_HSSPI_TXF_FIFO_EMPTY BIT(1) 65 #define SYNQUACER_HSSPI_TXF_SLAVE_RELEASED BIT(6) 66 67 #define SYNQUACER_HSSPI_TXE_FIFO_FULL BIT(0) 68 #define SYNQUACER_HSSPI_TXE_FIFO_EMPTY BIT(1) 69 #define SYNQUACER_HSSPI_TXE_SLAVE_RELEASED BIT(6) 70 71 #define SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD BIT(5) 72 #define SYNQUACER_HSSPI_RXF_SLAVE_RELEASED BIT(6) 73 74 #define SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD BIT(5) 75 #define SYNQUACER_HSSPI_RXE_SLAVE_RELEASED BIT(6) 76 77 #define SYNQUACER_HSSPI_DMCFG_SSDC BIT(1) 78 #define SYNQUACER_HSSPI_DMCFG_MSTARTEN BIT(2) 79 80 #define SYNQUACER_HSSPI_DMSTART_START BIT(0) 81 #define SYNQUACER_HSSPI_DMSTOP_STOP BIT(8) 82 #define SYNQUACER_HSSPI_DMPSEL_CS_MASK 0x3 83 #define SYNQUACER_HSSPI_DMPSEL_CS_SHIFT 16U 84 #define SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT 24U 85 #define SYNQUACER_HSSPI_DMTRP_DATA_MASK 0x3 86 #define SYNQUACER_HSSPI_DMTRP_DATA_SHIFT 26U 87 #define SYNQUACER_HSSPI_DMTRP_DATA_TXRX 0 88 #define SYNQUACER_HSSPI_DMTRP_DATA_RX 1 89 #define SYNQUACER_HSSPI_DMTRP_DATA_TX 2 90 91 #define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK 0x1f 92 #define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT 8U 93 #define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK 0x1f 94 #define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT 16U 95 96 #define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK 0xf 97 #define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT 0U 98 #define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_MASK 0xf 99 #define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_SHIFT 4U 100 #define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK 0x3 101 #define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT 8U 102 #define SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH BIT(11) 103 #define SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH BIT(12) 104 105 #define SYNQUACER_HSSPI_FIFO_DEPTH 16U 106 #define SYNQUACER_HSSPI_FIFO_TX_THRESHOLD 4U 107 #define SYNQUACER_HSSPI_FIFO_RX_THRESHOLD \ 108 (SYNQUACER_HSSPI_FIFO_DEPTH - SYNQUACER_HSSPI_FIFO_TX_THRESHOLD) 109 110 #define SYNQUACER_HSSPI_TRANSFER_MODE_TX BIT(1) 111 #define SYNQUACER_HSSPI_TRANSFER_MODE_RX BIT(2) 112 #define SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC 2000U 113 #define SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC 1000U 114 115 #define SYNQUACER_HSSPI_CLOCK_SRC_IHCLK 0 116 #define SYNQUACER_HSSPI_CLOCK_SRC_IPCLK 1 117 118 #define SYNQUACER_HSSPI_NUM_CHIP_SELECT 4U 119 #define SYNQUACER_HSSPI_IRQ_NAME_MAX 32U 120 121 struct synquacer_spi { 122 struct device *dev; 123 struct completion transfer_done; 124 unsigned int cs; 125 unsigned int bpw; 126 unsigned int mode; 127 unsigned int speed; 128 bool aces, rtm; 129 void *rx_buf; 130 const void *tx_buf; 131 struct clk *clk; 132 int clk_src_type; 133 void __iomem *regs; 134 u32 tx_words, rx_words; 135 unsigned int bus_width; 136 unsigned int transfer_mode; 137 char rx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX]; 138 char tx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX]; 139 }; 140 141 static int read_fifo(struct synquacer_spi *sspi) 142 { 143 u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS); 144 145 len = (len >> SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT) & 146 SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK; 147 len = min(len, sspi->rx_words); 148 149 switch (sspi->bpw) { 150 case 8: { 151 u8 *buf = sspi->rx_buf; 152 153 ioread8_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO, 154 buf, len); 155 sspi->rx_buf = buf + len; 156 break; 157 } 158 case 16: { 159 u16 *buf = sspi->rx_buf; 160 161 ioread16_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO, 162 buf, len); 163 sspi->rx_buf = buf + len; 164 break; 165 } 166 case 24: 167 /* fallthrough, should use 32-bits access */ 168 case 32: { 169 u32 *buf = sspi->rx_buf; 170 171 ioread32_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO, 172 buf, len); 173 sspi->rx_buf = buf + len; 174 break; 175 } 176 default: 177 return -EINVAL; 178 } 179 180 sspi->rx_words -= len; 181 return 0; 182 } 183 184 static int write_fifo(struct synquacer_spi *sspi) 185 { 186 u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS); 187 188 len = (len >> SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT) & 189 SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK; 190 len = min(SYNQUACER_HSSPI_FIFO_DEPTH - len, 191 sspi->tx_words); 192 193 switch (sspi->bpw) { 194 case 8: { 195 const u8 *buf = sspi->tx_buf; 196 197 iowrite8_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO, 198 buf, len); 199 sspi->tx_buf = buf + len; 200 break; 201 } 202 case 16: { 203 const u16 *buf = sspi->tx_buf; 204 205 iowrite16_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO, 206 buf, len); 207 sspi->tx_buf = buf + len; 208 break; 209 } 210 case 24: 211 /* fallthrough, should use 32-bits access */ 212 case 32: { 213 const u32 *buf = sspi->tx_buf; 214 215 iowrite32_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO, 216 buf, len); 217 sspi->tx_buf = buf + len; 218 break; 219 } 220 default: 221 return -EINVAL; 222 } 223 224 sspi->tx_words -= len; 225 return 0; 226 } 227 228 static int synquacer_spi_config(struct spi_master *master, 229 struct spi_device *spi, 230 struct spi_transfer *xfer) 231 { 232 struct synquacer_spi *sspi = spi_master_get_devdata(master); 233 unsigned int speed, mode, bpw, cs, bus_width, transfer_mode; 234 u32 rate, val, div; 235 236 /* Full Duplex only on 1-bit wide bus */ 237 if (xfer->rx_buf && xfer->tx_buf && 238 (xfer->rx_nbits != 1 || xfer->tx_nbits != 1)) { 239 dev_err(sspi->dev, 240 "RX and TX bus widths must be 1-bit for Full-Duplex!\n"); 241 return -EINVAL; 242 } 243 244 if (xfer->tx_buf) { 245 bus_width = xfer->tx_nbits; 246 transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_TX; 247 } else { 248 bus_width = xfer->rx_nbits; 249 transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_RX; 250 } 251 252 mode = spi->mode; 253 cs = spi->chip_select; 254 speed = xfer->speed_hz; 255 bpw = xfer->bits_per_word; 256 257 /* return if nothing to change */ 258 if (speed == sspi->speed && 259 bus_width == sspi->bus_width && bpw == sspi->bpw && 260 mode == sspi->mode && cs == sspi->cs && 261 transfer_mode == sspi->transfer_mode) { 262 return 0; 263 } 264 265 sspi->transfer_mode = transfer_mode; 266 rate = master->max_speed_hz; 267 268 div = DIV_ROUND_UP(rate, speed); 269 if (div > 254) { 270 dev_err(sspi->dev, "Requested rate too low (%u)\n", 271 sspi->speed); 272 return -EINVAL; 273 } 274 275 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs)); 276 val &= ~SYNQUACER_HSSPI_PCC_SAFESYNC; 277 if (bpw == 8 && (mode & (SPI_TX_DUAL | SPI_RX_DUAL)) && div < 3) 278 val |= SYNQUACER_HSSPI_PCC_SAFESYNC; 279 if (bpw == 8 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 6) 280 val |= SYNQUACER_HSSPI_PCC_SAFESYNC; 281 if (bpw == 16 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 3) 282 val |= SYNQUACER_HSSPI_PCC_SAFESYNC; 283 284 if (mode & SPI_CPHA) 285 val |= SYNQUACER_HSSPI_PCC_CPHA; 286 else 287 val &= ~SYNQUACER_HSSPI_PCC_CPHA; 288 289 if (mode & SPI_CPOL) 290 val |= SYNQUACER_HSSPI_PCC_CPOL; 291 else 292 val &= ~SYNQUACER_HSSPI_PCC_CPOL; 293 294 if (mode & SPI_CS_HIGH) 295 val |= SYNQUACER_HSSPI_PCC_SSPOL; 296 else 297 val &= ~SYNQUACER_HSSPI_PCC_SSPOL; 298 299 if (mode & SPI_LSB_FIRST) 300 val |= SYNQUACER_HSSPI_PCC_SDIR; 301 else 302 val &= ~SYNQUACER_HSSPI_PCC_SDIR; 303 304 if (sspi->aces) 305 val |= SYNQUACER_HSSPI_PCC_ACES; 306 else 307 val &= ~SYNQUACER_HSSPI_PCC_ACES; 308 309 if (sspi->rtm) 310 val |= SYNQUACER_HSSPI_PCC_RTM; 311 else 312 val &= ~SYNQUACER_HSSPI_PCC_RTM; 313 314 val |= (3 << SYNQUACER_HSSPI_PCC_SS2CD_SHIFT); 315 val |= SYNQUACER_HSSPI_PCC_SENDIAN; 316 317 val &= ~(SYNQUACER_HSSPI_PCC_CDRS_MASK << 318 SYNQUACER_HSSPI_PCC_CDRS_SHIFT); 319 val |= ((div >> 1) << SYNQUACER_HSSPI_PCC_CDRS_SHIFT); 320 321 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs)); 322 323 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 324 val &= ~(SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK << 325 SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT); 326 val |= ((bpw / 8 - 1) << SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT); 327 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 328 329 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 330 val &= ~(SYNQUACER_HSSPI_DMTRP_DATA_MASK << 331 SYNQUACER_HSSPI_DMTRP_DATA_SHIFT); 332 333 if (xfer->rx_buf) 334 val |= (SYNQUACER_HSSPI_DMTRP_DATA_RX << 335 SYNQUACER_HSSPI_DMTRP_DATA_SHIFT); 336 else 337 val |= (SYNQUACER_HSSPI_DMTRP_DATA_TX << 338 SYNQUACER_HSSPI_DMTRP_DATA_SHIFT); 339 340 val &= ~(3 << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT); 341 val |= ((bus_width >> 1) << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT); 342 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 343 344 sspi->bpw = bpw; 345 sspi->mode = mode; 346 sspi->speed = speed; 347 sspi->cs = spi->chip_select; 348 sspi->bus_width = bus_width; 349 350 return 0; 351 } 352 353 static int synquacer_spi_transfer_one(struct spi_master *master, 354 struct spi_device *spi, 355 struct spi_transfer *xfer) 356 { 357 struct synquacer_spi *sspi = spi_master_get_devdata(master); 358 int ret; 359 int status = 0; 360 u32 words; 361 u8 bpw; 362 u32 val; 363 364 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 365 val &= ~SYNQUACER_HSSPI_DMSTOP_STOP; 366 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 367 368 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 369 val |= SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH; 370 val |= SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH; 371 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 372 373 /* 374 * See if we can transfer 4-bytes as 1 word 375 * to maximize the FIFO buffer efficiency. 376 */ 377 bpw = xfer->bits_per_word; 378 if (bpw == 8 && !(xfer->len % 4) && !(spi->mode & SPI_LSB_FIRST)) 379 xfer->bits_per_word = 32; 380 381 ret = synquacer_spi_config(master, spi, xfer); 382 383 /* restore */ 384 xfer->bits_per_word = bpw; 385 386 if (ret) 387 return ret; 388 389 reinit_completion(&sspi->transfer_done); 390 391 sspi->tx_buf = xfer->tx_buf; 392 sspi->rx_buf = xfer->rx_buf; 393 394 switch (sspi->bpw) { 395 case 8: 396 words = xfer->len; 397 break; 398 case 16: 399 words = xfer->len / 2; 400 break; 401 case 24: 402 /* fallthrough, should use 32-bits access */ 403 case 32: 404 words = xfer->len / 4; 405 break; 406 default: 407 dev_err(sspi->dev, "unsupported bpw: %d\n", sspi->bpw); 408 return -EINVAL; 409 } 410 411 if (xfer->tx_buf) 412 sspi->tx_words = words; 413 else 414 sspi->tx_words = 0; 415 416 if (xfer->rx_buf) 417 sspi->rx_words = words; 418 else 419 sspi->rx_words = 0; 420 421 if (xfer->tx_buf) { 422 status = write_fifo(sspi); 423 if (status < 0) { 424 dev_err(sspi->dev, "failed write_fifo. status: 0x%x\n", 425 status); 426 return status; 427 } 428 } 429 430 if (xfer->rx_buf) { 431 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 432 val &= ~(SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK << 433 SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT); 434 val |= ((sspi->rx_words > SYNQUACER_HSSPI_FIFO_DEPTH ? 435 SYNQUACER_HSSPI_FIFO_RX_THRESHOLD : sspi->rx_words) << 436 SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT); 437 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 438 } 439 440 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC); 441 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC); 442 443 /* Trigger */ 444 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 445 val |= SYNQUACER_HSSPI_DMSTART_START; 446 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 447 448 if (xfer->tx_buf) { 449 val = SYNQUACER_HSSPI_TXE_FIFO_EMPTY; 450 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_TXE); 451 status = wait_for_completion_timeout(&sspi->transfer_done, 452 msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC)); 453 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE); 454 } 455 456 if (xfer->rx_buf) { 457 u32 buf[SYNQUACER_HSSPI_FIFO_DEPTH]; 458 459 val = SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD | 460 SYNQUACER_HSSPI_RXE_SLAVE_RELEASED; 461 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_RXE); 462 status = wait_for_completion_timeout(&sspi->transfer_done, 463 msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC)); 464 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE); 465 466 /* stop RX and clean RXFIFO */ 467 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 468 val |= SYNQUACER_HSSPI_DMSTOP_STOP; 469 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 470 sspi->rx_buf = buf; 471 sspi->rx_words = SYNQUACER_HSSPI_FIFO_DEPTH; 472 read_fifo(sspi); 473 } 474 475 if (status < 0) { 476 dev_err(sspi->dev, "failed to transfer. status: 0x%x\n", 477 status); 478 return status; 479 } 480 481 return 0; 482 } 483 484 static void synquacer_spi_set_cs(struct spi_device *spi, bool enable) 485 { 486 struct synquacer_spi *sspi = spi_master_get_devdata(spi->master); 487 u32 val; 488 489 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 490 val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK << 491 SYNQUACER_HSSPI_DMPSEL_CS_SHIFT); 492 val |= spi->chip_select << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT; 493 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 494 } 495 496 static int synquacer_spi_wait_status_update(struct synquacer_spi *sspi, 497 bool enable) 498 { 499 u32 val; 500 unsigned long timeout = jiffies + 501 msecs_to_jiffies(SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC); 502 503 /* wait MES(Module Enable Status) is updated */ 504 do { 505 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL) & 506 SYNQUACER_HSSPI_MCTRL_MES; 507 if (enable && val) 508 return 0; 509 if (!enable && !val) 510 return 0; 511 } while (time_before(jiffies, timeout)); 512 513 dev_err(sspi->dev, "timeout occurs in updating Module Enable Status\n"); 514 return -EBUSY; 515 } 516 517 static int synquacer_spi_enable(struct spi_master *master) 518 { 519 u32 val; 520 int status; 521 struct synquacer_spi *sspi = spi_master_get_devdata(master); 522 523 /* Disable module */ 524 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL); 525 status = synquacer_spi_wait_status_update(sspi, false); 526 if (status < 0) 527 return status; 528 529 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE); 530 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE); 531 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC); 532 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC); 533 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_FAULTC); 534 535 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMCFG); 536 val &= ~SYNQUACER_HSSPI_DMCFG_SSDC; 537 val &= ~SYNQUACER_HSSPI_DMCFG_MSTARTEN; 538 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMCFG); 539 540 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL); 541 if (sspi->clk_src_type == SYNQUACER_HSSPI_CLOCK_SRC_IPCLK) 542 val |= SYNQUACER_HSSPI_MCTRL_CDSS; 543 else 544 val &= ~SYNQUACER_HSSPI_MCTRL_CDSS; 545 546 val &= ~SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN; 547 val |= SYNQUACER_HSSPI_MCTRL_MEN; 548 val |= SYNQUACER_HSSPI_MCTRL_SYNCON; 549 550 /* Enable module */ 551 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL); 552 status = synquacer_spi_wait_status_update(sspi, true); 553 if (status < 0) 554 return status; 555 556 return 0; 557 } 558 559 static irqreturn_t sq_spi_rx_handler(int irq, void *priv) 560 { 561 uint32_t val; 562 struct synquacer_spi *sspi = priv; 563 564 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_RXF); 565 if ((val & SYNQUACER_HSSPI_RXF_SLAVE_RELEASED) || 566 (val & SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD)) { 567 read_fifo(sspi); 568 569 if (sspi->rx_words == 0) { 570 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE); 571 complete(&sspi->transfer_done); 572 } 573 return IRQ_HANDLED; 574 } 575 576 return IRQ_NONE; 577 } 578 579 static irqreturn_t sq_spi_tx_handler(int irq, void *priv) 580 { 581 uint32_t val; 582 struct synquacer_spi *sspi = priv; 583 584 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_TXF); 585 if (val & SYNQUACER_HSSPI_TXF_FIFO_EMPTY) { 586 if (sspi->tx_words == 0) { 587 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE); 588 complete(&sspi->transfer_done); 589 } else { 590 write_fifo(sspi); 591 } 592 return IRQ_HANDLED; 593 } 594 595 return IRQ_NONE; 596 } 597 598 static int synquacer_spi_probe(struct platform_device *pdev) 599 { 600 struct device_node *np = pdev->dev.of_node; 601 struct spi_master *master; 602 struct synquacer_spi *sspi; 603 int ret; 604 int rx_irq, tx_irq; 605 606 master = spi_alloc_master(&pdev->dev, sizeof(*sspi)); 607 if (!master) 608 return -ENOMEM; 609 610 platform_set_drvdata(pdev, master); 611 612 sspi = spi_master_get_devdata(master); 613 sspi->dev = &pdev->dev; 614 615 init_completion(&sspi->transfer_done); 616 617 sspi->regs = devm_platform_ioremap_resource(pdev, 0); 618 if (IS_ERR(sspi->regs)) { 619 ret = PTR_ERR(sspi->regs); 620 goto put_spi; 621 } 622 623 sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; /* Default */ 624 device_property_read_u32(&pdev->dev, "socionext,ihclk-rate", 625 &master->max_speed_hz); /* for ACPI */ 626 627 if (dev_of_node(&pdev->dev)) { 628 if (device_property_match_string(&pdev->dev, 629 "clock-names", "iHCLK") >= 0) { 630 sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; 631 sspi->clk = devm_clk_get(sspi->dev, "iHCLK"); 632 } else if (device_property_match_string(&pdev->dev, 633 "clock-names", "iPCLK") >= 0) { 634 sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IPCLK; 635 sspi->clk = devm_clk_get(sspi->dev, "iPCLK"); 636 } else { 637 dev_err(&pdev->dev, "specified wrong clock source\n"); 638 ret = -EINVAL; 639 goto put_spi; 640 } 641 642 if (IS_ERR(sspi->clk)) { 643 if (!(PTR_ERR(sspi->clk) == -EPROBE_DEFER)) 644 dev_err(&pdev->dev, "clock not found\n"); 645 ret = PTR_ERR(sspi->clk); 646 goto put_spi; 647 } 648 649 ret = clk_prepare_enable(sspi->clk); 650 if (ret) { 651 dev_err(&pdev->dev, "failed to enable clock (%d)\n", 652 ret); 653 goto put_spi; 654 } 655 656 master->max_speed_hz = clk_get_rate(sspi->clk); 657 } 658 659 if (!master->max_speed_hz) { 660 dev_err(&pdev->dev, "missing clock source\n"); 661 return -EINVAL; 662 } 663 master->min_speed_hz = master->max_speed_hz / 254; 664 665 sspi->aces = device_property_read_bool(&pdev->dev, 666 "socionext,set-aces"); 667 sspi->rtm = device_property_read_bool(&pdev->dev, "socionext,use-rtm"); 668 669 master->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT; 670 671 rx_irq = platform_get_irq(pdev, 0); 672 if (rx_irq <= 0) { 673 dev_err(&pdev->dev, "get rx_irq failed (%d)\n", rx_irq); 674 ret = rx_irq; 675 goto put_spi; 676 } 677 snprintf(sspi->rx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-rx", 678 dev_name(&pdev->dev)); 679 ret = devm_request_irq(&pdev->dev, rx_irq, sq_spi_rx_handler, 680 0, sspi->rx_irq_name, sspi); 681 if (ret) { 682 dev_err(&pdev->dev, "request rx_irq failed (%d)\n", ret); 683 goto put_spi; 684 } 685 686 tx_irq = platform_get_irq(pdev, 1); 687 if (tx_irq <= 0) { 688 dev_err(&pdev->dev, "get tx_irq failed (%d)\n", tx_irq); 689 ret = tx_irq; 690 goto put_spi; 691 } 692 snprintf(sspi->tx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-tx", 693 dev_name(&pdev->dev)); 694 ret = devm_request_irq(&pdev->dev, tx_irq, sq_spi_tx_handler, 695 0, sspi->tx_irq_name, sspi); 696 if (ret) { 697 dev_err(&pdev->dev, "request tx_irq failed (%d)\n", ret); 698 goto put_spi; 699 } 700 701 master->dev.of_node = np; 702 master->dev.fwnode = pdev->dev.fwnode; 703 master->auto_runtime_pm = true; 704 master->bus_num = pdev->id; 705 706 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL | 707 SPI_TX_QUAD | SPI_RX_QUAD; 708 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) | 709 SPI_BPW_MASK(16) | SPI_BPW_MASK(8); 710 711 master->set_cs = synquacer_spi_set_cs; 712 master->transfer_one = synquacer_spi_transfer_one; 713 714 ret = synquacer_spi_enable(master); 715 if (ret) 716 goto fail_enable; 717 718 pm_runtime_set_active(sspi->dev); 719 pm_runtime_enable(sspi->dev); 720 721 ret = devm_spi_register_master(sspi->dev, master); 722 if (ret) 723 goto disable_pm; 724 725 return 0; 726 727 disable_pm: 728 pm_runtime_disable(sspi->dev); 729 fail_enable: 730 clk_disable_unprepare(sspi->clk); 731 put_spi: 732 spi_master_put(master); 733 734 return ret; 735 } 736 737 static int synquacer_spi_remove(struct platform_device *pdev) 738 { 739 struct spi_master *master = platform_get_drvdata(pdev); 740 struct synquacer_spi *sspi = spi_master_get_devdata(master); 741 742 pm_runtime_disable(sspi->dev); 743 744 clk_disable_unprepare(sspi->clk); 745 746 return 0; 747 } 748 749 static int __maybe_unused synquacer_spi_suspend(struct device *dev) 750 { 751 struct spi_master *master = dev_get_drvdata(dev); 752 struct synquacer_spi *sspi = spi_master_get_devdata(master); 753 int ret; 754 755 ret = spi_master_suspend(master); 756 if (ret) 757 return ret; 758 759 if (!pm_runtime_suspended(dev)) 760 clk_disable_unprepare(sspi->clk); 761 762 return ret; 763 } 764 765 static int __maybe_unused synquacer_spi_resume(struct device *dev) 766 { 767 struct spi_master *master = dev_get_drvdata(dev); 768 struct synquacer_spi *sspi = spi_master_get_devdata(master); 769 int ret; 770 771 if (!pm_runtime_suspended(dev)) { 772 /* Ensure reconfigure during next xfer */ 773 sspi->speed = 0; 774 775 ret = clk_prepare_enable(sspi->clk); 776 if (ret < 0) { 777 dev_err(dev, "failed to enable clk (%d)\n", 778 ret); 779 return ret; 780 } 781 782 ret = synquacer_spi_enable(master); 783 if (ret) { 784 dev_err(dev, "failed to enable spi (%d)\n", ret); 785 return ret; 786 } 787 } 788 789 ret = spi_master_resume(master); 790 if (ret < 0) 791 clk_disable_unprepare(sspi->clk); 792 793 return ret; 794 } 795 796 static SIMPLE_DEV_PM_OPS(synquacer_spi_pm_ops, synquacer_spi_suspend, 797 synquacer_spi_resume); 798 799 static const struct of_device_id synquacer_spi_of_match[] = { 800 {.compatible = "socionext,synquacer-spi"}, 801 {} 802 }; 803 MODULE_DEVICE_TABLE(of, synquacer_spi_of_match); 804 805 #ifdef CONFIG_ACPI 806 static const struct acpi_device_id synquacer_hsspi_acpi_ids[] = { 807 { "SCX0004" }, 808 { /* sentinel */ } 809 }; 810 MODULE_DEVICE_TABLE(acpi, synquacer_hsspi_acpi_ids); 811 #endif 812 813 static struct platform_driver synquacer_spi_driver = { 814 .driver = { 815 .name = "synquacer-spi", 816 .pm = &synquacer_spi_pm_ops, 817 .of_match_table = synquacer_spi_of_match, 818 .acpi_match_table = ACPI_PTR(synquacer_hsspi_acpi_ids), 819 }, 820 .probe = synquacer_spi_probe, 821 .remove = synquacer_spi_remove, 822 }; 823 module_platform_driver(synquacer_spi_driver); 824 825 MODULE_DESCRIPTION("Socionext Synquacer HS-SPI controller driver"); 826 MODULE_AUTHOR("Masahisa Kojima <masahisa.kojima@linaro.org>"); 827 MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>"); 828 MODULE_LICENSE("GPL v2"); 829