1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale i.MX28 SPI driver 4 * 5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> 6 * on behalf of DENX Software Engineering GmbH 7 * 8 * NOTE: This driver only supports the SPI-controller chipselects, 9 * GPIO driven chipselects are not supported. 10 */ 11 12 #include <common.h> 13 #include <malloc.h> 14 #include <memalign.h> 15 #include <spi.h> 16 #include <linux/errno.h> 17 #include <asm/io.h> 18 #include <asm/arch/clock.h> 19 #include <asm/arch/imx-regs.h> 20 #include <asm/arch/sys_proto.h> 21 #include <asm/mach-imx/dma.h> 22 23 #define MXS_SPI_MAX_TIMEOUT 1000000 24 #define MXS_SPI_PORT_OFFSET 0x2000 25 #define MXS_SSP_CHIPSELECT_MASK 0x00300000 26 #define MXS_SSP_CHIPSELECT_SHIFT 20 27 28 #define MXSSSP_SMALL_TRANSFER 512 29 30 struct mxs_spi_slave { 31 struct spi_slave slave; 32 uint32_t max_khz; 33 uint32_t mode; 34 struct mxs_ssp_regs *regs; 35 }; 36 37 static inline struct mxs_spi_slave *to_mxs_slave(struct spi_slave *slave) 38 { 39 return container_of(slave, struct mxs_spi_slave, slave); 40 } 41 42 int spi_cs_is_valid(unsigned int bus, unsigned int cs) 43 { 44 /* MXS SPI: 4 ports and 3 chip selects maximum */ 45 if (!mxs_ssp_bus_id_valid(bus) || cs > 2) 46 return 0; 47 else 48 return 1; 49 } 50 51 struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs, 52 unsigned int max_hz, unsigned int mode) 53 { 54 struct mxs_spi_slave *mxs_slave; 55 56 if (!spi_cs_is_valid(bus, cs)) { 57 printf("mxs_spi: invalid bus %d / chip select %d\n", bus, cs); 58 return NULL; 59 } 60 61 mxs_slave = spi_alloc_slave(struct mxs_spi_slave, bus, cs); 62 if (!mxs_slave) 63 return NULL; 64 65 if (mxs_dma_init_channel(MXS_DMA_CHANNEL_AHB_APBH_SSP0 + bus)) 66 goto err_init; 67 68 mxs_slave->max_khz = max_hz / 1000; 69 mxs_slave->mode = mode; 70 mxs_slave->regs = mxs_ssp_regs_by_bus(bus); 71 72 return &mxs_slave->slave; 73 74 err_init: 75 free(mxs_slave); 76 return NULL; 77 } 78 79 void spi_free_slave(struct spi_slave *slave) 80 { 81 struct mxs_spi_slave *mxs_slave = to_mxs_slave(slave); 82 free(mxs_slave); 83 } 84 85 int spi_claim_bus(struct spi_slave *slave) 86 { 87 struct mxs_spi_slave *mxs_slave = to_mxs_slave(slave); 88 struct mxs_ssp_regs *ssp_regs = mxs_slave->regs; 89 uint32_t reg = 0; 90 91 mxs_reset_block(&ssp_regs->hw_ssp_ctrl0_reg); 92 93 writel((slave->cs << MXS_SSP_CHIPSELECT_SHIFT) | 94 SSP_CTRL0_BUS_WIDTH_ONE_BIT, 95 &ssp_regs->hw_ssp_ctrl0); 96 97 reg = SSP_CTRL1_SSP_MODE_SPI | SSP_CTRL1_WORD_LENGTH_EIGHT_BITS; 98 reg |= (mxs_slave->mode & SPI_CPOL) ? SSP_CTRL1_POLARITY : 0; 99 reg |= (mxs_slave->mode & SPI_CPHA) ? SSP_CTRL1_PHASE : 0; 100 writel(reg, &ssp_regs->hw_ssp_ctrl1); 101 102 writel(0, &ssp_regs->hw_ssp_cmd0); 103 104 mxs_set_ssp_busclock(slave->bus, mxs_slave->max_khz); 105 106 return 0; 107 } 108 109 void spi_release_bus(struct spi_slave *slave) 110 { 111 } 112 113 static void mxs_spi_start_xfer(struct mxs_ssp_regs *ssp_regs) 114 { 115 writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_set); 116 writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_clr); 117 } 118 119 static void mxs_spi_end_xfer(struct mxs_ssp_regs *ssp_regs) 120 { 121 writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_clr); 122 writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_set); 123 } 124 125 static int mxs_spi_xfer_pio(struct mxs_spi_slave *slave, 126 char *data, int length, int write, unsigned long flags) 127 { 128 struct mxs_ssp_regs *ssp_regs = slave->regs; 129 130 if (flags & SPI_XFER_BEGIN) 131 mxs_spi_start_xfer(ssp_regs); 132 133 while (length--) { 134 /* We transfer 1 byte */ 135 #if defined(CONFIG_MX23) 136 writel(SSP_CTRL0_XFER_COUNT_MASK, &ssp_regs->hw_ssp_ctrl0_clr); 137 writel(1, &ssp_regs->hw_ssp_ctrl0_set); 138 #elif defined(CONFIG_MX28) 139 writel(1, &ssp_regs->hw_ssp_xfer_size); 140 #endif 141 142 if ((flags & SPI_XFER_END) && !length) 143 mxs_spi_end_xfer(ssp_regs); 144 145 if (write) 146 writel(SSP_CTRL0_READ, &ssp_regs->hw_ssp_ctrl0_clr); 147 else 148 writel(SSP_CTRL0_READ, &ssp_regs->hw_ssp_ctrl0_set); 149 150 writel(SSP_CTRL0_RUN, &ssp_regs->hw_ssp_ctrl0_set); 151 152 if (mxs_wait_mask_set(&ssp_regs->hw_ssp_ctrl0_reg, 153 SSP_CTRL0_RUN, MXS_SPI_MAX_TIMEOUT)) { 154 printf("MXS SPI: Timeout waiting for start\n"); 155 return -ETIMEDOUT; 156 } 157 158 if (write) 159 writel(*data++, &ssp_regs->hw_ssp_data); 160 161 writel(SSP_CTRL0_DATA_XFER, &ssp_regs->hw_ssp_ctrl0_set); 162 163 if (!write) { 164 if (mxs_wait_mask_clr(&ssp_regs->hw_ssp_status_reg, 165 SSP_STATUS_FIFO_EMPTY, MXS_SPI_MAX_TIMEOUT)) { 166 printf("MXS SPI: Timeout waiting for data\n"); 167 return -ETIMEDOUT; 168 } 169 170 *data = readl(&ssp_regs->hw_ssp_data); 171 data++; 172 } 173 174 if (mxs_wait_mask_clr(&ssp_regs->hw_ssp_ctrl0_reg, 175 SSP_CTRL0_RUN, MXS_SPI_MAX_TIMEOUT)) { 176 printf("MXS SPI: Timeout waiting for finish\n"); 177 return -ETIMEDOUT; 178 } 179 } 180 181 return 0; 182 } 183 184 static int mxs_spi_xfer_dma(struct mxs_spi_slave *slave, 185 char *data, int length, int write, unsigned long flags) 186 { 187 const int xfer_max_sz = 0xff00; 188 const int desc_count = DIV_ROUND_UP(length, xfer_max_sz) + 1; 189 struct mxs_ssp_regs *ssp_regs = slave->regs; 190 struct mxs_dma_desc *dp; 191 uint32_t ctrl0; 192 uint32_t cache_data_count; 193 const uint32_t dstart = (uint32_t)data; 194 int dmach; 195 int tl; 196 int ret = 0; 197 198 #if defined(CONFIG_MX23) 199 const int mxs_spi_pio_words = 1; 200 #elif defined(CONFIG_MX28) 201 const int mxs_spi_pio_words = 4; 202 #endif 203 204 ALLOC_CACHE_ALIGN_BUFFER(struct mxs_dma_desc, desc, desc_count); 205 206 memset(desc, 0, sizeof(struct mxs_dma_desc) * desc_count); 207 208 ctrl0 = readl(&ssp_regs->hw_ssp_ctrl0); 209 ctrl0 |= SSP_CTRL0_DATA_XFER; 210 211 if (flags & SPI_XFER_BEGIN) 212 ctrl0 |= SSP_CTRL0_LOCK_CS; 213 if (!write) 214 ctrl0 |= SSP_CTRL0_READ; 215 216 if (length % ARCH_DMA_MINALIGN) 217 cache_data_count = roundup(length, ARCH_DMA_MINALIGN); 218 else 219 cache_data_count = length; 220 221 /* Flush data to DRAM so DMA can pick them up */ 222 if (write) 223 flush_dcache_range(dstart, dstart + cache_data_count); 224 225 /* Invalidate the area, so no writeback into the RAM races with DMA */ 226 invalidate_dcache_range(dstart, dstart + cache_data_count); 227 228 dmach = MXS_DMA_CHANNEL_AHB_APBH_SSP0 + slave->slave.bus; 229 230 dp = desc; 231 while (length) { 232 dp->address = (dma_addr_t)dp; 233 dp->cmd.address = (dma_addr_t)data; 234 235 /* 236 * This is correct, even though it does indeed look insane. 237 * I hereby have to, wholeheartedly, thank Freescale Inc., 238 * for always inventing insane hardware and keeping me busy 239 * and employed ;-) 240 */ 241 if (write) 242 dp->cmd.data = MXS_DMA_DESC_COMMAND_DMA_READ; 243 else 244 dp->cmd.data = MXS_DMA_DESC_COMMAND_DMA_WRITE; 245 246 /* 247 * The DMA controller can transfer large chunks (64kB) at 248 * time by setting the transfer length to 0. Setting tl to 249 * 0x10000 will overflow below and make .data contain 0. 250 * Otherwise, 0xff00 is the transfer maximum. 251 */ 252 if (length >= 0x10000) 253 tl = 0x10000; 254 else 255 tl = min(length, xfer_max_sz); 256 257 dp->cmd.data |= 258 ((tl & 0xffff) << MXS_DMA_DESC_BYTES_OFFSET) | 259 (mxs_spi_pio_words << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 260 MXS_DMA_DESC_HALT_ON_TERMINATE | 261 MXS_DMA_DESC_TERMINATE_FLUSH; 262 263 data += tl; 264 length -= tl; 265 266 if (!length) { 267 dp->cmd.data |= MXS_DMA_DESC_IRQ | MXS_DMA_DESC_DEC_SEM; 268 269 if (flags & SPI_XFER_END) { 270 ctrl0 &= ~SSP_CTRL0_LOCK_CS; 271 ctrl0 |= SSP_CTRL0_IGNORE_CRC; 272 } 273 } 274 275 /* 276 * Write CTRL0, CMD0, CMD1 and XFER_SIZE registers in 277 * case of MX28, write only CTRL0 in case of MX23 due 278 * to the difference in register layout. It is utterly 279 * essential that the XFER_SIZE register is written on 280 * a per-descriptor basis with the same size as is the 281 * descriptor! 282 */ 283 dp->cmd.pio_words[0] = ctrl0; 284 #ifdef CONFIG_MX28 285 dp->cmd.pio_words[1] = 0; 286 dp->cmd.pio_words[2] = 0; 287 dp->cmd.pio_words[3] = tl; 288 #endif 289 290 mxs_dma_desc_append(dmach, dp); 291 292 dp++; 293 } 294 295 if (mxs_dma_go(dmach)) 296 ret = -EINVAL; 297 298 /* The data arrived into DRAM, invalidate cache over them */ 299 if (!write) 300 invalidate_dcache_range(dstart, dstart + cache_data_count); 301 302 return ret; 303 } 304 305 int spi_xfer(struct spi_slave *slave, unsigned int bitlen, 306 const void *dout, void *din, unsigned long flags) 307 { 308 struct mxs_spi_slave *mxs_slave = to_mxs_slave(slave); 309 struct mxs_ssp_regs *ssp_regs = mxs_slave->regs; 310 int len = bitlen / 8; 311 char dummy; 312 int write = 0; 313 char *data = NULL; 314 int dma = 1; 315 316 if (bitlen == 0) { 317 if (flags & SPI_XFER_END) { 318 din = (void *)&dummy; 319 len = 1; 320 } else 321 return 0; 322 } 323 324 /* Half-duplex only */ 325 if (din && dout) 326 return -EINVAL; 327 /* No data */ 328 if (!din && !dout) 329 return 0; 330 331 if (dout) { 332 data = (char *)dout; 333 write = 1; 334 } else if (din) { 335 data = (char *)din; 336 write = 0; 337 } 338 339 /* 340 * Check for alignment, if the buffer is aligned, do DMA transfer, 341 * PIO otherwise. This is a temporary workaround until proper bounce 342 * buffer is in place. 343 */ 344 if (dma) { 345 if (((uint32_t)data) & (ARCH_DMA_MINALIGN - 1)) 346 dma = 0; 347 if (((uint32_t)len) & (ARCH_DMA_MINALIGN - 1)) 348 dma = 0; 349 } 350 351 if (!dma || (len < MXSSSP_SMALL_TRANSFER)) { 352 writel(SSP_CTRL1_DMA_ENABLE, &ssp_regs->hw_ssp_ctrl1_clr); 353 return mxs_spi_xfer_pio(mxs_slave, data, len, write, flags); 354 } else { 355 writel(SSP_CTRL1_DMA_ENABLE, &ssp_regs->hw_ssp_ctrl1_set); 356 return mxs_spi_xfer_dma(mxs_slave, data, len, write, flags); 357 } 358 } 359