1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * (C) Copyright 2018 Xilinx 4 * 5 * Xilinx ZynqMP Generic Quad-SPI(QSPI) controller driver(master mode only) 6 */ 7 8 #include <common.h> 9 #include <asm/arch/clk.h> 10 #include <asm/arch/hardware.h> 11 #include <asm/arch/sys_proto.h> 12 #include <asm/io.h> 13 #include <clk.h> 14 #include <dm.h> 15 #include <malloc.h> 16 #include <memalign.h> 17 #include <spi.h> 18 #include <ubi_uboot.h> 19 #include <wait_bit.h> 20 21 #define GQSPI_GFIFO_STRT_MODE_MASK BIT(29) 22 #define GQSPI_CONFIG_MODE_EN_MASK (3 << 30) 23 #define GQSPI_CONFIG_DMA_MODE (2 << 30) 24 #define GQSPI_CONFIG_CPHA_MASK BIT(2) 25 #define GQSPI_CONFIG_CPOL_MASK BIT(1) 26 27 /* 28 * QSPI Interrupt Registers bit Masks 29 * 30 * All the four interrupt registers (Status/Mask/Enable/Disable) have the same 31 * bit definitions. 32 */ 33 #define GQSPI_IXR_TXNFULL_MASK 0x00000004 /* QSPI TX FIFO Overflow */ 34 #define GQSPI_IXR_TXFULL_MASK 0x00000008 /* QSPI TX FIFO is full */ 35 #define GQSPI_IXR_RXNEMTY_MASK 0x00000010 /* QSPI RX FIFO Not Empty */ 36 #define GQSPI_IXR_GFEMTY_MASK 0x00000080 /* QSPI Generic FIFO Empty */ 37 #define GQSPI_IXR_ALL_MASK (GQSPI_IXR_TXNFULL_MASK | \ 38 GQSPI_IXR_RXNEMTY_MASK) 39 40 /* 41 * QSPI Enable Register bit Masks 42 * 43 * This register is used to enable or disable the QSPI controller 44 */ 45 #define GQSPI_ENABLE_ENABLE_MASK 0x00000001 /* QSPI Enable Bit Mask */ 46 47 #define GQSPI_GFIFO_LOW_BUS BIT(14) 48 #define GQSPI_GFIFO_CS_LOWER BIT(12) 49 #define GQSPI_GFIFO_UP_BUS BIT(15) 50 #define GQSPI_GFIFO_CS_UPPER BIT(13) 51 #define GQSPI_SPI_MODE_QSPI (3 << 10) 52 #define GQSPI_SPI_MODE_SPI BIT(10) 53 #define GQSPI_SPI_MODE_DUAL_SPI (2 << 10) 54 #define GQSPI_IMD_DATA_CS_ASSERT 5 55 #define GQSPI_IMD_DATA_CS_DEASSERT 5 56 #define GQSPI_GFIFO_TX BIT(16) 57 #define GQSPI_GFIFO_RX BIT(17) 58 #define GQSPI_GFIFO_STRIPE_MASK BIT(18) 59 #define GQSPI_GFIFO_IMD_MASK 0xFF 60 #define GQSPI_GFIFO_EXP_MASK BIT(9) 61 #define GQSPI_GFIFO_DATA_XFR_MASK BIT(8) 62 #define GQSPI_STRT_GEN_FIFO BIT(28) 63 #define GQSPI_GEN_FIFO_STRT_MOD BIT(29) 64 #define GQSPI_GFIFO_WP_HOLD BIT(19) 65 #define GQSPI_BAUD_DIV_MASK (7 << 3) 66 #define GQSPI_DFLT_BAUD_RATE_DIV BIT(3) 67 #define GQSPI_GFIFO_ALL_INT_MASK 0xFBE 68 #define GQSPI_DMA_DST_I_STS_DONE BIT(1) 69 #define GQSPI_DMA_DST_I_STS_MASK 0xFE 70 #define MODEBITS 0x6 71 72 #define GQSPI_GFIFO_SELECT BIT(0) 73 #define GQSPI_FIFO_THRESHOLD 1 74 75 #define SPI_XFER_ON_BOTH 0 76 #define SPI_XFER_ON_LOWER 1 77 #define SPI_XFER_ON_UPPER 2 78 79 #define GQSPI_DMA_ALIGN 0x4 80 #define GQSPI_MAX_BAUD_RATE_VAL 7 81 #define GQSPI_DFLT_BAUD_RATE_VAL 2 82 83 #define GQSPI_TIMEOUT 100000000 84 85 #define GQSPI_BAUD_DIV_SHIFT 2 86 #define GQSPI_LPBK_DLY_ADJ_LPBK_SHIFT 5 87 #define GQSPI_LPBK_DLY_ADJ_DLY_1 0x2 88 #define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT 3 89 #define GQSPI_LPBK_DLY_ADJ_DLY_0 0x3 90 #define GQSPI_USE_DATA_DLY 0x1 91 #define GQSPI_USE_DATA_DLY_SHIFT 31 92 #define GQSPI_DATA_DLY_ADJ_VALUE 0x2 93 #define GQSPI_DATA_DLY_ADJ_SHIFT 28 94 #define TAP_DLY_BYPASS_LQSPI_RX_VALUE 0x1 95 #define TAP_DLY_BYPASS_LQSPI_RX_SHIFT 2 96 #define GQSPI_DATA_DLY_ADJ_OFST 0x000001F8 97 #define IOU_TAPDLY_BYPASS_OFST 0xFF180390 98 #define GQSPI_LPBK_DLY_ADJ_LPBK_MASK 0x00000020 99 #define GQSPI_FREQ_40MHZ 40000000 100 #define GQSPI_FREQ_100MHZ 100000000 101 #define GQSPI_FREQ_150MHZ 150000000 102 #define IOU_TAPDLY_BYPASS_MASK 0x7 103 104 #define GQSPI_REG_OFFSET 0x100 105 #define GQSPI_DMA_REG_OFFSET 0x800 106 107 /* QSPI register offsets */ 108 struct zynqmp_qspi_regs { 109 u32 confr; /* 0x00 */ 110 u32 isr; /* 0x04 */ 111 u32 ier; /* 0x08 */ 112 u32 idisr; /* 0x0C */ 113 u32 imaskr; /* 0x10 */ 114 u32 enbr; /* 0x14 */ 115 u32 dr; /* 0x18 */ 116 u32 txd0r; /* 0x1C */ 117 u32 drxr; /* 0x20 */ 118 u32 sicr; /* 0x24 */ 119 u32 txftr; /* 0x28 */ 120 u32 rxftr; /* 0x2C */ 121 u32 gpior; /* 0x30 */ 122 u32 reserved0; /* 0x34 */ 123 u32 lpbkdly; /* 0x38 */ 124 u32 reserved1; /* 0x3C */ 125 u32 genfifo; /* 0x40 */ 126 u32 gqspisel; /* 0x44 */ 127 u32 reserved2; /* 0x48 */ 128 u32 gqfifoctrl; /* 0x4C */ 129 u32 gqfthr; /* 0x50 */ 130 u32 gqpollcfg; /* 0x54 */ 131 u32 gqpollto; /* 0x58 */ 132 u32 gqxfersts; /* 0x5C */ 133 u32 gqfifosnap; /* 0x60 */ 134 u32 gqrxcpy; /* 0x64 */ 135 u32 reserved3[36]; /* 0x68 */ 136 u32 gqspidlyadj; /* 0xF8 */ 137 }; 138 139 struct zynqmp_qspi_dma_regs { 140 u32 dmadst; /* 0x00 */ 141 u32 dmasize; /* 0x04 */ 142 u32 dmasts; /* 0x08 */ 143 u32 dmactrl; /* 0x0C */ 144 u32 reserved0; /* 0x10 */ 145 u32 dmaisr; /* 0x14 */ 146 u32 dmaier; /* 0x18 */ 147 u32 dmaidr; /* 0x1C */ 148 u32 dmaimr; /* 0x20 */ 149 u32 dmactrl2; /* 0x24 */ 150 u32 dmadstmsb; /* 0x28 */ 151 }; 152 153 DECLARE_GLOBAL_DATA_PTR; 154 155 struct zynqmp_qspi_platdata { 156 struct zynqmp_qspi_regs *regs; 157 struct zynqmp_qspi_dma_regs *dma_regs; 158 u32 frequency; 159 u32 speed_hz; 160 }; 161 162 struct zynqmp_qspi_priv { 163 struct zynqmp_qspi_regs *regs; 164 struct zynqmp_qspi_dma_regs *dma_regs; 165 const void *tx_buf; 166 void *rx_buf; 167 unsigned int len; 168 int bytes_to_transfer; 169 int bytes_to_receive; 170 unsigned int is_inst; 171 unsigned int cs_change:1; 172 }; 173 174 static int zynqmp_qspi_ofdata_to_platdata(struct udevice *bus) 175 { 176 struct zynqmp_qspi_platdata *plat = bus->platdata; 177 178 debug("%s\n", __func__); 179 180 plat->regs = (struct zynqmp_qspi_regs *)(devfdt_get_addr(bus) + 181 GQSPI_REG_OFFSET); 182 plat->dma_regs = (struct zynqmp_qspi_dma_regs *) 183 (devfdt_get_addr(bus) + GQSPI_DMA_REG_OFFSET); 184 185 return 0; 186 } 187 188 static void zynqmp_qspi_init_hw(struct zynqmp_qspi_priv *priv) 189 { 190 u32 config_reg; 191 struct zynqmp_qspi_regs *regs = priv->regs; 192 193 writel(GQSPI_GFIFO_SELECT, ®s->gqspisel); 194 writel(GQSPI_GFIFO_ALL_INT_MASK, ®s->idisr); 195 writel(GQSPI_FIFO_THRESHOLD, ®s->txftr); 196 writel(GQSPI_FIFO_THRESHOLD, ®s->rxftr); 197 writel(GQSPI_GFIFO_ALL_INT_MASK, ®s->isr); 198 199 config_reg = readl(®s->confr); 200 config_reg &= ~(GQSPI_GFIFO_STRT_MODE_MASK | 201 GQSPI_CONFIG_MODE_EN_MASK); 202 config_reg |= GQSPI_CONFIG_DMA_MODE | 203 GQSPI_GFIFO_WP_HOLD | 204 GQSPI_DFLT_BAUD_RATE_DIV; 205 writel(config_reg, ®s->confr); 206 207 writel(GQSPI_ENABLE_ENABLE_MASK, ®s->enbr); 208 } 209 210 static u32 zynqmp_qspi_bus_select(struct zynqmp_qspi_priv *priv) 211 { 212 u32 gqspi_fifo_reg = 0; 213 214 gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS | 215 GQSPI_GFIFO_CS_LOWER; 216 217 return gqspi_fifo_reg; 218 } 219 220 static void zynqmp_qspi_fill_gen_fifo(struct zynqmp_qspi_priv *priv, 221 u32 gqspi_fifo_reg) 222 { 223 struct zynqmp_qspi_regs *regs = priv->regs; 224 int ret = 0; 225 226 ret = wait_for_bit_le32(®s->isr, GQSPI_IXR_GFEMTY_MASK, 1, 227 GQSPI_TIMEOUT, 1); 228 if (ret) 229 printf("%s Timeout\n", __func__); 230 231 writel(gqspi_fifo_reg, ®s->genfifo); 232 } 233 234 static void zynqmp_qspi_chipselect(struct zynqmp_qspi_priv *priv, int is_on) 235 { 236 u32 gqspi_fifo_reg = 0; 237 238 if (is_on) { 239 gqspi_fifo_reg = zynqmp_qspi_bus_select(priv); 240 gqspi_fifo_reg |= GQSPI_SPI_MODE_SPI | 241 GQSPI_IMD_DATA_CS_ASSERT; 242 } else { 243 gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS; 244 gqspi_fifo_reg |= GQSPI_IMD_DATA_CS_DEASSERT; 245 } 246 247 debug("GFIFO_CMD_CS: 0x%x\n", gqspi_fifo_reg); 248 249 zynqmp_qspi_fill_gen_fifo(priv, gqspi_fifo_reg); 250 } 251 252 void zynqmp_qspi_set_tapdelay(struct udevice *bus, u32 baudrateval) 253 { 254 struct zynqmp_qspi_platdata *plat = bus->platdata; 255 struct zynqmp_qspi_priv *priv = dev_get_priv(bus); 256 struct zynqmp_qspi_regs *regs = priv->regs; 257 u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate; 258 u32 reqhz = 0; 259 260 clk_rate = plat->frequency; 261 reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval)); 262 263 debug("%s, req_hz:%d, clk_rate:%d, baudrateval:%d\n", 264 __func__, reqhz, clk_rate, baudrateval); 265 266 if (reqhz < GQSPI_FREQ_40MHZ) { 267 zynqmp_mmio_read(IOU_TAPDLY_BYPASS_OFST, &tapdlybypass); 268 tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE << 269 TAP_DLY_BYPASS_LQSPI_RX_SHIFT); 270 } else if (reqhz < GQSPI_FREQ_100MHZ) { 271 zynqmp_mmio_read(IOU_TAPDLY_BYPASS_OFST, &tapdlybypass); 272 tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE << 273 TAP_DLY_BYPASS_LQSPI_RX_SHIFT); 274 lpbkdlyadj = readl(®s->lpbkdly); 275 lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_LPBK_MASK); 276 datadlyadj = readl(®s->gqspidlyadj); 277 datadlyadj |= ((GQSPI_USE_DATA_DLY << GQSPI_USE_DATA_DLY_SHIFT) 278 | (GQSPI_DATA_DLY_ADJ_VALUE << 279 GQSPI_DATA_DLY_ADJ_SHIFT)); 280 } else if (reqhz < GQSPI_FREQ_150MHZ) { 281 lpbkdlyadj = readl(®s->lpbkdly); 282 lpbkdlyadj |= ((GQSPI_LPBK_DLY_ADJ_LPBK_MASK) | 283 GQSPI_LPBK_DLY_ADJ_DLY_0); 284 } 285 286 zynqmp_mmio_write(IOU_TAPDLY_BYPASS_OFST, IOU_TAPDLY_BYPASS_MASK, 287 tapdlybypass); 288 writel(lpbkdlyadj, ®s->lpbkdly); 289 writel(datadlyadj, ®s->gqspidlyadj); 290 } 291 292 static int zynqmp_qspi_set_speed(struct udevice *bus, uint speed) 293 { 294 struct zynqmp_qspi_platdata *plat = bus->platdata; 295 struct zynqmp_qspi_priv *priv = dev_get_priv(bus); 296 struct zynqmp_qspi_regs *regs = priv->regs; 297 u32 confr; 298 u8 baud_rate_val = 0; 299 300 debug("%s\n", __func__); 301 if (speed > plat->frequency) 302 speed = plat->frequency; 303 304 /* Set the clock frequency */ 305 confr = readl(®s->confr); 306 if (speed == 0) { 307 /* Set baudrate x8, if the freq is 0 */ 308 baud_rate_val = GQSPI_DFLT_BAUD_RATE_VAL; 309 } else if (plat->speed_hz != speed) { 310 while ((baud_rate_val < 8) && 311 ((plat->frequency / 312 (2 << baud_rate_val)) > speed)) 313 baud_rate_val++; 314 315 if (baud_rate_val > GQSPI_MAX_BAUD_RATE_VAL) 316 baud_rate_val = GQSPI_DFLT_BAUD_RATE_VAL; 317 318 plat->speed_hz = plat->frequency / (2 << baud_rate_val); 319 } 320 confr &= ~GQSPI_BAUD_DIV_MASK; 321 confr |= (baud_rate_val << 3); 322 writel(confr, ®s->confr); 323 324 zynqmp_qspi_set_tapdelay(bus, baud_rate_val); 325 debug("regs=%p, speed=%d\n", priv->regs, plat->speed_hz); 326 327 return 0; 328 } 329 330 static int zynqmp_qspi_probe(struct udevice *bus) 331 { 332 struct zynqmp_qspi_platdata *plat = dev_get_platdata(bus); 333 struct zynqmp_qspi_priv *priv = dev_get_priv(bus); 334 struct clk clk; 335 unsigned long clock; 336 int ret; 337 338 debug("%s: bus:%p, priv:%p\n", __func__, bus, priv); 339 340 priv->regs = plat->regs; 341 priv->dma_regs = plat->dma_regs; 342 343 ret = clk_get_by_index(bus, 0, &clk); 344 if (ret < 0) { 345 dev_err(dev, "failed to get clock\n"); 346 return ret; 347 } 348 349 clock = clk_get_rate(&clk); 350 if (IS_ERR_VALUE(clock)) { 351 dev_err(dev, "failed to get rate\n"); 352 return clock; 353 } 354 debug("%s: CLK %ld\n", __func__, clock); 355 356 ret = clk_enable(&clk); 357 if (ret && ret != -ENOSYS) { 358 dev_err(dev, "failed to enable clock\n"); 359 return ret; 360 } 361 plat->frequency = clock; 362 plat->speed_hz = plat->frequency / 2; 363 364 /* init the zynq spi hw */ 365 zynqmp_qspi_init_hw(priv); 366 367 return 0; 368 } 369 370 static int zynqmp_qspi_set_mode(struct udevice *bus, uint mode) 371 { 372 struct zynqmp_qspi_priv *priv = dev_get_priv(bus); 373 struct zynqmp_qspi_regs *regs = priv->regs; 374 u32 confr; 375 376 debug("%s\n", __func__); 377 /* Set the SPI Clock phase and polarities */ 378 confr = readl(®s->confr); 379 confr &= ~(GQSPI_CONFIG_CPHA_MASK | 380 GQSPI_CONFIG_CPOL_MASK); 381 382 if (mode & SPI_CPHA) 383 confr |= GQSPI_CONFIG_CPHA_MASK; 384 if (mode & SPI_CPOL) 385 confr |= GQSPI_CONFIG_CPOL_MASK; 386 387 writel(confr, ®s->confr); 388 389 return 0; 390 } 391 392 static int zynqmp_qspi_fill_tx_fifo(struct zynqmp_qspi_priv *priv, u32 size) 393 { 394 u32 data; 395 int ret = 0; 396 struct zynqmp_qspi_regs *regs = priv->regs; 397 u32 *buf = (u32 *)priv->tx_buf; 398 u32 len = size; 399 400 debug("TxFIFO: 0x%x, size: 0x%x\n", readl(®s->isr), 401 size); 402 403 while (size) { 404 ret = wait_for_bit_le32(®s->isr, GQSPI_IXR_TXNFULL_MASK, 1, 405 GQSPI_TIMEOUT, 1); 406 if (ret) { 407 printf("%s: Timeout\n", __func__); 408 return ret; 409 } 410 411 if (size >= 4) { 412 writel(*buf, ®s->txd0r); 413 buf++; 414 size -= 4; 415 } else { 416 switch (size) { 417 case 1: 418 data = *((u8 *)buf); 419 buf += 1; 420 data |= GENMASK(31, 8); 421 break; 422 case 2: 423 data = *((u16 *)buf); 424 buf += 2; 425 data |= GENMASK(31, 16); 426 break; 427 case 3: 428 data = *((u16 *)buf); 429 buf += 2; 430 data |= (*((u8 *)buf) << 16); 431 buf += 1; 432 data |= GENMASK(31, 24); 433 break; 434 } 435 writel(data, ®s->txd0r); 436 size = 0; 437 } 438 } 439 440 priv->tx_buf += len; 441 return 0; 442 } 443 444 static void zynqmp_qspi_genfifo_cmd(struct zynqmp_qspi_priv *priv) 445 { 446 u32 gen_fifo_cmd; 447 u32 bytecount = 0; 448 449 while (priv->len) { 450 gen_fifo_cmd = zynqmp_qspi_bus_select(priv); 451 gen_fifo_cmd |= GQSPI_GFIFO_TX | GQSPI_SPI_MODE_SPI; 452 gen_fifo_cmd |= *(u8 *)priv->tx_buf; 453 bytecount++; 454 priv->len--; 455 priv->tx_buf = (u8 *)priv->tx_buf + 1; 456 457 debug("GFIFO_CMD_Cmd = 0x%x\n", gen_fifo_cmd); 458 459 zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd); 460 } 461 } 462 463 static u32 zynqmp_qspi_calc_exp(struct zynqmp_qspi_priv *priv, 464 u32 *gen_fifo_cmd) 465 { 466 u32 expval = 8; 467 u32 len; 468 469 while (1) { 470 if (priv->len > 255) { 471 if (priv->len & (1 << expval)) { 472 *gen_fifo_cmd &= ~GQSPI_GFIFO_IMD_MASK; 473 *gen_fifo_cmd |= GQSPI_GFIFO_EXP_MASK; 474 *gen_fifo_cmd |= expval; 475 priv->len -= (1 << expval); 476 return expval; 477 } 478 expval++; 479 } else { 480 *gen_fifo_cmd &= ~(GQSPI_GFIFO_IMD_MASK | 481 GQSPI_GFIFO_EXP_MASK); 482 *gen_fifo_cmd |= (u8)priv->len; 483 len = (u8)priv->len; 484 priv->len = 0; 485 return len; 486 } 487 } 488 } 489 490 static int zynqmp_qspi_genfifo_fill_tx(struct zynqmp_qspi_priv *priv) 491 { 492 u32 gen_fifo_cmd; 493 u32 len; 494 int ret = 0; 495 496 gen_fifo_cmd = zynqmp_qspi_bus_select(priv); 497 gen_fifo_cmd |= GQSPI_GFIFO_TX | 498 GQSPI_GFIFO_DATA_XFR_MASK; 499 500 gen_fifo_cmd |= GQSPI_SPI_MODE_SPI; 501 502 while (priv->len) { 503 len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd); 504 zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd); 505 506 debug("GFIFO_CMD_TX:0x%x\n", gen_fifo_cmd); 507 508 if (gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK) 509 ret = zynqmp_qspi_fill_tx_fifo(priv, 510 1 << len); 511 else 512 ret = zynqmp_qspi_fill_tx_fifo(priv, 513 len); 514 515 if (ret) 516 return ret; 517 } 518 return ret; 519 } 520 521 static int zynqmp_qspi_start_dma(struct zynqmp_qspi_priv *priv, 522 u32 gen_fifo_cmd, u32 *buf) 523 { 524 u32 addr; 525 u32 size, len; 526 u32 actuallen = priv->len; 527 int ret = 0; 528 struct zynqmp_qspi_dma_regs *dma_regs = priv->dma_regs; 529 530 writel((unsigned long)buf, &dma_regs->dmadst); 531 writel(roundup(priv->len, ARCH_DMA_MINALIGN), &dma_regs->dmasize); 532 writel(GQSPI_DMA_DST_I_STS_MASK, &dma_regs->dmaier); 533 addr = (unsigned long)buf; 534 size = roundup(priv->len, ARCH_DMA_MINALIGN); 535 flush_dcache_range(addr, addr + size); 536 537 while (priv->len) { 538 len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd); 539 if (!(gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK) && 540 (len % ARCH_DMA_MINALIGN)) { 541 gen_fifo_cmd &= ~GENMASK(7, 0); 542 gen_fifo_cmd |= roundup(len, ARCH_DMA_MINALIGN); 543 } 544 zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd); 545 546 debug("GFIFO_CMD_RX:0x%x\n", gen_fifo_cmd); 547 } 548 549 ret = wait_for_bit_le32(&dma_regs->dmaisr, GQSPI_DMA_DST_I_STS_DONE, 550 1, GQSPI_TIMEOUT, 1); 551 if (ret) { 552 printf("DMA Timeout:0x%x\n", readl(&dma_regs->dmaisr)); 553 return -ETIMEDOUT; 554 } 555 556 writel(GQSPI_DMA_DST_I_STS_DONE, &dma_regs->dmaisr); 557 558 debug("buf:0x%lx, rxbuf:0x%lx, *buf:0x%x len: 0x%x\n", 559 (unsigned long)buf, (unsigned long)priv->rx_buf, *buf, 560 actuallen); 561 562 if (buf != priv->rx_buf) 563 memcpy(priv->rx_buf, buf, actuallen); 564 565 return 0; 566 } 567 568 static int zynqmp_qspi_genfifo_fill_rx(struct zynqmp_qspi_priv *priv) 569 { 570 u32 gen_fifo_cmd; 571 u32 *buf; 572 u32 actuallen = priv->len; 573 574 gen_fifo_cmd = zynqmp_qspi_bus_select(priv); 575 gen_fifo_cmd |= GQSPI_GFIFO_RX | 576 GQSPI_GFIFO_DATA_XFR_MASK; 577 578 gen_fifo_cmd |= GQSPI_SPI_MODE_SPI; 579 580 /* 581 * Check if receive buffer is aligned to 4 byte and length 582 * is multiples of four byte as we are using dma to receive. 583 */ 584 if (!((unsigned long)priv->rx_buf & (GQSPI_DMA_ALIGN - 1)) && 585 !(actuallen % GQSPI_DMA_ALIGN)) { 586 buf = (u32 *)priv->rx_buf; 587 return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf); 588 } 589 590 ALLOC_CACHE_ALIGN_BUFFER(u8, tmp, roundup(priv->len, 591 GQSPI_DMA_ALIGN)); 592 buf = (u32 *)tmp; 593 return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf); 594 } 595 596 static int zynqmp_qspi_start_transfer(struct zynqmp_qspi_priv *priv) 597 { 598 int ret = 0; 599 600 if (priv->is_inst) { 601 if (priv->tx_buf) 602 zynqmp_qspi_genfifo_cmd(priv); 603 else 604 return -EINVAL; 605 } else { 606 if (priv->tx_buf) 607 ret = zynqmp_qspi_genfifo_fill_tx(priv); 608 else if (priv->rx_buf) 609 ret = zynqmp_qspi_genfifo_fill_rx(priv); 610 else 611 return -EINVAL; 612 } 613 return ret; 614 } 615 616 static int zynqmp_qspi_transfer(struct zynqmp_qspi_priv *priv) 617 { 618 static unsigned int cs_change = 1; 619 int status = 0; 620 621 debug("%s\n", __func__); 622 623 while (1) { 624 /* Select the chip if required */ 625 if (cs_change) 626 zynqmp_qspi_chipselect(priv, 1); 627 628 cs_change = priv->cs_change; 629 630 if (!priv->tx_buf && !priv->rx_buf && priv->len) { 631 status = -EINVAL; 632 break; 633 } 634 635 /* Request the transfer */ 636 if (priv->len) { 637 status = zynqmp_qspi_start_transfer(priv); 638 priv->is_inst = 0; 639 if (status < 0) 640 break; 641 } 642 643 if (cs_change) 644 /* Deselect the chip */ 645 zynqmp_qspi_chipselect(priv, 0); 646 break; 647 } 648 649 return status; 650 } 651 652 static int zynqmp_qspi_claim_bus(struct udevice *dev) 653 { 654 struct udevice *bus = dev->parent; 655 struct zynqmp_qspi_priv *priv = dev_get_priv(bus); 656 struct zynqmp_qspi_regs *regs = priv->regs; 657 658 writel(GQSPI_ENABLE_ENABLE_MASK, ®s->enbr); 659 660 return 0; 661 } 662 663 static int zynqmp_qspi_release_bus(struct udevice *dev) 664 { 665 struct udevice *bus = dev->parent; 666 struct zynqmp_qspi_priv *priv = dev_get_priv(bus); 667 struct zynqmp_qspi_regs *regs = priv->regs; 668 669 writel(~GQSPI_ENABLE_ENABLE_MASK, ®s->enbr); 670 671 return 0; 672 } 673 674 int zynqmp_qspi_xfer(struct udevice *dev, unsigned int bitlen, const void *dout, 675 void *din, unsigned long flags) 676 { 677 struct udevice *bus = dev->parent; 678 struct zynqmp_qspi_priv *priv = dev_get_priv(bus); 679 680 debug("%s: priv: 0x%08lx bitlen: %d dout: 0x%08lx ", __func__, 681 (unsigned long)priv, bitlen, (unsigned long)dout); 682 debug("din: 0x%08lx flags: 0x%lx\n", (unsigned long)din, flags); 683 684 priv->tx_buf = dout; 685 priv->rx_buf = din; 686 priv->len = bitlen / 8; 687 688 /* 689 * Assume that the beginning of a transfer with bits to 690 * transmit must contain a device command. 691 */ 692 if (dout && flags & SPI_XFER_BEGIN) 693 priv->is_inst = 1; 694 else 695 priv->is_inst = 0; 696 697 if (flags & SPI_XFER_END) 698 priv->cs_change = 1; 699 else 700 priv->cs_change = 0; 701 702 zynqmp_qspi_transfer(priv); 703 704 return 0; 705 } 706 707 static const struct dm_spi_ops zynqmp_qspi_ops = { 708 .claim_bus = zynqmp_qspi_claim_bus, 709 .release_bus = zynqmp_qspi_release_bus, 710 .xfer = zynqmp_qspi_xfer, 711 .set_speed = zynqmp_qspi_set_speed, 712 .set_mode = zynqmp_qspi_set_mode, 713 }; 714 715 static const struct udevice_id zynqmp_qspi_ids[] = { 716 { .compatible = "xlnx,zynqmp-qspi-1.0" }, 717 { } 718 }; 719 720 U_BOOT_DRIVER(zynqmp_qspi) = { 721 .name = "zynqmp_qspi", 722 .id = UCLASS_SPI, 723 .of_match = zynqmp_qspi_ids, 724 .ops = &zynqmp_qspi_ops, 725 .ofdata_to_platdata = zynqmp_qspi_ofdata_to_platdata, 726 .platdata_auto_alloc_size = sizeof(struct zynqmp_qspi_platdata), 727 .priv_auto_alloc_size = sizeof(struct zynqmp_qspi_priv), 728 .probe = zynqmp_qspi_probe, 729 }; 730