1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ASPEED AST2500 FMC/SPI Controller driver 4 * 5 * Copyright (c) 2015-2018, IBM Corporation. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <common.h> 11 #include <clk.h> 12 #include <dm.h> 13 #include <spi.h> 14 #include <spi_flash.h> 15 #include <asm/io.h> 16 #include <linux/ioport.h> 17 18 #define ASPEED_SPI_MAX_CS 3 19 #define FLASH_CALIBRATION_LEN 0x400 20 21 struct aspeed_spi_regs { 22 u32 conf; /* 0x00 CE Type Setting */ 23 u32 ctrl; /* 0x04 Control */ 24 u32 intr_ctrl; /* 0x08 Interrupt Control and Status */ 25 u32 cmd_ctrl; /* 0x0c Command Control */ 26 u32 ce_ctrl[ASPEED_SPI_MAX_CS]; /* 0x10 .. 0x18 CEx Control */ 27 u32 _reserved0[5]; /* .. */ 28 u32 segment_addr[ASPEED_SPI_MAX_CS]; 29 /* 0x30 .. 0x38 Segment Address */ 30 u32 _reserved1[5]; /* .. */ 31 u32 soft_rst_cmd_ctrl; /* 0x50 Auto Soft-Reset Command Control */ 32 u32 _reserved2[11]; /* .. */ 33 u32 dma_ctrl; /* 0x80 DMA Control/Status */ 34 u32 dma_flash_addr; /* 0x84 DMA Flash Side Address */ 35 u32 dma_dram_addr; /* 0x88 DMA DRAM Side Address */ 36 u32 dma_len; /* 0x8c DMA Length Register */ 37 u32 dma_checksum; /* 0x90 Checksum Calculation Result */ 38 u32 timings; /* 0x94 Read Timing Compensation */ 39 40 /* not used */ 41 u32 soft_strap_status; /* 0x9c Software Strap Status */ 42 u32 write_cmd_filter_ctrl; /* 0xa0 Write Command Filter Control */ 43 u32 write_addr_filter_ctrl; /* 0xa4 Write Address Filter Control */ 44 u32 lock_ctrl_reset; /* 0xa8 Lock Control (SRST#) */ 45 u32 lock_ctrl_wdt; /* 0xac Lock Control (Watchdog) */ 46 u32 write_addr_filter[5]; /* 0xb0 Write Address Filter */ 47 }; 48 49 /* CE Type Setting Register */ 50 #define CONF_ENABLE_W2 BIT(18) 51 #define CONF_ENABLE_W1 BIT(17) 52 #define CONF_ENABLE_W0 BIT(16) 53 #define CONF_FLASH_TYPE2 4 54 #define CONF_FLASH_TYPE1 2 /* Hardwired to SPI */ 55 #define CONF_FLASH_TYPE0 0 /* Hardwired to SPI */ 56 #define CONF_FLASH_TYPE_NOR 0x0 57 #define CONF_FLASH_TYPE_SPI 0x2 58 59 /* CE Control Register */ 60 #define CTRL_EXTENDED2 BIT(2) /* 32 bit addressing for SPI */ 61 #define CTRL_EXTENDED1 BIT(1) /* 32 bit addressing for SPI */ 62 #define CTRL_EXTENDED0 BIT(0) /* 32 bit addressing for SPI */ 63 64 /* Interrupt Control and Status Register */ 65 #define INTR_CTRL_DMA_STATUS BIT(11) 66 #define INTR_CTRL_CMD_ABORT_STATUS BIT(10) 67 #define INTR_CTRL_WRITE_PROTECT_STATUS BIT(9) 68 #define INTR_CTRL_DMA_EN BIT(3) 69 #define INTR_CTRL_CMD_ABORT_EN BIT(2) 70 #define INTR_CTRL_WRITE_PROTECT_EN BIT(1) 71 72 /* CEx Control Register */ 73 #define CE_CTRL_IO_MODE_MASK GENMASK(31, 28) 74 #define CE_CTRL_IO_QPI_DATA BIT(31) 75 #define CE_CTRL_IO_DUAL_DATA BIT(29) 76 #define CE_CTRL_IO_DUAL_ADDR_DATA (BIT(29) | BIT(28)) 77 #define CE_CTRL_IO_QUAD_DATA BIT(30) 78 #define CE_CTRL_IO_QUAD_ADDR_DATA (BIT(30) | BIT(28)) 79 #define CE_CTRL_CMD_SHIFT 16 80 #define CE_CTRL_CMD_MASK 0xff 81 #define CE_CTRL_CMD(cmd) \ 82 (((cmd) & CE_CTRL_CMD_MASK) << CE_CTRL_CMD_SHIFT) 83 #define CE_CTRL_DUMMY_HIGH_SHIFT 14 84 #define CE_CTRL_DUMMY_HIGH_MASK 0x1 85 #define CE_CTRL_CLOCK_FREQ_SHIFT 8 86 #define CE_CTRL_CLOCK_FREQ_MASK 0xf 87 #define CE_CTRL_CLOCK_FREQ(div) \ 88 (((div) & CE_CTRL_CLOCK_FREQ_MASK) << CE_CTRL_CLOCK_FREQ_SHIFT) 89 #define CE_G6_CTRL_CLOCK_FREQ(div) \ 90 ((((div) & CE_CTRL_CLOCK_FREQ_MASK) << CE_CTRL_CLOCK_FREQ_SHIFT) | (((div) & 0xf0) << 20)) 91 #define CE_CTRL_DUMMY_LOW_SHIFT 6 /* 2 bits [7:6] */ 92 #define CE_CTRL_DUMMY_LOW_MASK 0x3 93 #define CE_CTRL_DUMMY(dummy) \ 94 (((((dummy) >> 2) & CE_CTRL_DUMMY_HIGH_MASK) \ 95 << CE_CTRL_DUMMY_HIGH_SHIFT) | \ 96 (((dummy) & CE_CTRL_DUMMY_LOW_MASK) << CE_CTRL_DUMMY_LOW_SHIFT)) 97 #define CE_CTRL_STOP_ACTIVE BIT(2) 98 #define CE_CTRL_MODE_MASK 0x3 99 #define CE_CTRL_READMODE 0x0 100 #define CE_CTRL_FREADMODE 0x1 101 #define CE_CTRL_WRITEMODE 0x2 102 #define CE_CTRL_USERMODE 0x3 103 104 /* Auto Soft-Reset Command Control */ 105 #define SOFT_RST_CMD_EN GENMASK(1, 0) 106 107 /* 108 * The Segment Register uses a 8MB unit to encode the start address 109 * and the end address of the AHB window of a SPI flash device. 110 * Default segment addresses are : 111 * 112 * CE0 0x20000000 - 0x2fffffff 128MB 113 * CE1 0x28000000 - 0x29ffffff 32MB 114 * CE2 0x2a000000 - 0x2bffffff 32MB 115 * 116 * The full address space of the AHB window of the controller is 117 * covered and CE0 start address and CE2 end addresses are read-only. 118 */ 119 #define SEGMENT_ADDR_START(reg) ((((reg) >> 16) & 0xff) << 23) 120 #define SEGMENT_ADDR_END(reg) ((((reg) >> 24) & 0xff) << 23) 121 #define SEGMENT_ADDR_VALUE(start, end) \ 122 (((((start) >> 23) & 0xff) << 16) | ((((end) >> 23) & 0xff) << 24)) 123 124 #define G6_SEGMENT_ADDR_START(reg) (reg & 0xffff) 125 #define G6_SEGMENT_ADDR_END(reg) ((reg >> 16) & 0xffff) 126 #define G6_SEGMENT_ADDR_VALUE(start, end) \ 127 ((((start) >> 16) & 0xffff) | (((end) - 0x100000) & 0xffff0000)) 128 129 /* DMA Control/Status Register */ 130 #define DMA_CTRL_DELAY_SHIFT 8 131 #define DMA_CTRL_DELAY_MASK 0xf 132 #define G6_DMA_CTRL_DELAY_MASK 0xff 133 #define DMA_CTRL_FREQ_SHIFT 4 134 #define G6_DMA_CTRL_FREQ_SHIFT 16 135 136 #define DMA_CTRL_FREQ_MASK 0xf 137 #define TIMING_MASK(div, delay) \ 138 (((delay & DMA_CTRL_DELAY_MASK) << DMA_CTRL_DELAY_SHIFT) | \ 139 ((div & DMA_CTRL_FREQ_MASK) << DMA_CTRL_FREQ_SHIFT)) 140 #define G6_TIMING_MASK(div, delay) \ 141 (((delay & G6_DMA_CTRL_DELAY_MASK) << DMA_CTRL_DELAY_SHIFT) | \ 142 ((div & DMA_CTRL_FREQ_MASK) << G6_DMA_CTRL_FREQ_SHIFT)) 143 #define DMA_CTRL_CALIB BIT(3) 144 #define DMA_CTRL_CKSUM BIT(2) 145 #define DMA_CTRL_WRITE BIT(1) 146 #define DMA_CTRL_ENABLE BIT(0) 147 148 /* for ast2600 setting */ 149 #define SPI_3B_AUTO_CLR_REG 0x1e6e2510 150 #define SPI_3B_AUTO_CLR BIT(9) 151 152 153 /* 154 * flash related info 155 */ 156 struct aspeed_spi_flash { 157 u8 cs; 158 bool init; /* Initialized when the SPI bus is 159 * first claimed 160 */ 161 void __iomem *ahb_base; /* AHB Window for this device */ 162 u32 ahb_size; /* AHB Window segment size */ 163 u32 ce_ctrl_user; /* CE Control Register for USER mode */ 164 u32 ce_ctrl_fread; /* CE Control Register for FREAD mode */ 165 u32 iomode; 166 167 struct spi_flash *spi; /* Associated SPI Flash device */ 168 }; 169 170 struct aspeed_spi_priv { 171 struct aspeed_spi_regs *regs; 172 void __iomem *ahb_base; /* AHB Window for all flash devices */ 173 int new_ver; 174 u32 ahb_size; /* AHB Window segments size */ 175 176 ulong hclk_rate; /* AHB clock rate */ 177 u32 max_hz; 178 u8 num_cs; 179 bool is_fmc; 180 181 struct aspeed_spi_flash flashes[ASPEED_SPI_MAX_CS]; 182 u32 flash_count; 183 184 u8 cmd_buf[16]; /* SPI command in progress */ 185 size_t cmd_len; 186 }; 187 188 static struct aspeed_spi_flash *aspeed_spi_get_flash(struct udevice *dev) 189 { 190 struct dm_spi_slave_platdata *slave_plat = dev_get_parent_platdata(dev); 191 struct aspeed_spi_priv *priv = dev_get_priv(dev->parent); 192 u8 cs = slave_plat->cs; 193 194 if (cs >= priv->flash_count) { 195 pr_err("invalid CS %u\n", cs); 196 return NULL; 197 } 198 199 return &priv->flashes[cs]; 200 } 201 202 static u32 aspeed_g6_spi_hclk_divisor(struct aspeed_spi_priv *priv, u32 max_hz) 203 { 204 u32 hclk_rate = priv->hclk_rate; 205 /* HCLK/1 .. HCLK/16 */ 206 const u8 hclk_masks[] = { 207 15, 7, 14, 6, 13, 5, 12, 4, 11, 3, 10, 2, 9, 1, 8, 0 208 }; 209 u8 base_div = 0; 210 int done = 0; 211 u32 i, j = 0; 212 u32 hclk_div_setting = 0; 213 214 for (j = 0; j < 0xf; i++) { 215 for (i = 0; i < ARRAY_SIZE(hclk_masks); i++) { 216 base_div = j * 16; 217 if (max_hz >= (hclk_rate / ((i + 1) + base_div))) { 218 219 done = 1; 220 break; 221 } 222 } 223 if (done) 224 break; 225 } 226 227 debug("hclk=%d required=%d h_div %d, divisor is %d (mask %x) speed=%d\n", 228 hclk_rate, max_hz, j, i + 1, hclk_masks[i], hclk_rate / (i + 1 + base_div)); 229 230 hclk_div_setting = ((j << 4) | hclk_masks[i]); 231 232 return hclk_div_setting; 233 234 } 235 236 static u32 aspeed_spi_hclk_divisor(struct aspeed_spi_priv *priv, u32 max_hz) 237 { 238 u32 hclk_rate = priv->hclk_rate; 239 /* HCLK/1 .. HCLK/16 */ 240 const u8 hclk_masks[] = { 241 15, 7, 14, 6, 13, 5, 12, 4, 11, 3, 10, 2, 9, 1, 8, 0 242 }; 243 u32 i; 244 u32 hclk_div_setting = 0; 245 246 for (i = 0; i < ARRAY_SIZE(hclk_masks); i++) { 247 if (max_hz >= (hclk_rate / (i + 1))) 248 break; 249 } 250 debug("hclk=%d required=%d divisor is %d (mask %x) speed=%d\n", 251 hclk_rate, max_hz, i + 1, hclk_masks[i], hclk_rate / (i + 1)); 252 253 hclk_div_setting = hclk_masks[i]; 254 255 return hclk_div_setting; 256 } 257 258 /* 259 * Use some address/size under the first flash device CE0 260 */ 261 static u32 aspeed_spi_fmc_checksum(struct aspeed_spi_priv *priv, u8 div, 262 u8 delay) 263 { 264 u32 flash_addr = (u32)priv->ahb_base + 0x10000; 265 u32 flash_len = FLASH_CALIBRATION_LEN; 266 u32 dma_ctrl; 267 u32 checksum; 268 269 writel(flash_addr, &priv->regs->dma_flash_addr); 270 writel(flash_len, &priv->regs->dma_len); 271 272 /* 273 * When doing calibration, the SPI clock rate in the CE0 274 * Control Register and the data input delay cycles in the 275 * Read Timing Compensation Register are replaced by bit[11:4]. 276 */ 277 if(priv->new_ver) 278 dma_ctrl = DMA_CTRL_ENABLE | DMA_CTRL_CKSUM | DMA_CTRL_CALIB | 279 G6_TIMING_MASK(div, delay); 280 else 281 dma_ctrl = DMA_CTRL_ENABLE | DMA_CTRL_CKSUM | DMA_CTRL_CALIB | 282 TIMING_MASK(div, delay); 283 writel(dma_ctrl, &priv->regs->dma_ctrl); 284 while (!(readl(&priv->regs->intr_ctrl) & INTR_CTRL_DMA_STATUS)) 285 ; 286 287 writel(0x0, &priv->regs->intr_ctrl); 288 289 checksum = readl(&priv->regs->dma_checksum); 290 291 writel(0x0, &priv->regs->dma_ctrl); 292 return checksum; 293 } 294 295 static u32 aspeed_spi_read_checksum(struct aspeed_spi_priv *priv, u8 div, 296 u8 delay) 297 { 298 /* TODO(clg@kaod.org): the SPI controllers do not have the DMA 299 * registers. The algorithm is the same. 300 */ 301 if (!priv->is_fmc) { 302 pr_warn("No timing calibration support for SPI controllers"); 303 return 0xbadc0de; 304 } 305 306 return aspeed_spi_fmc_checksum(priv, div, delay); 307 } 308 309 #define TIMING_DELAY_DI_4NS BIT(3) 310 #define TIMING_DELAY_HCYCLE_MAX 5 311 312 static int aspeed_spi_timing_calibration(struct aspeed_spi_priv *priv) 313 { 314 /* HCLK/5 .. HCLK/1 */ 315 const u8 hclk_masks[] = {13, 6, 14, 7, 15}; 316 u32 timing_reg; 317 u32 checksum, gold_checksum; 318 int i, hcycle, delay_ns; 319 320 /* Use the ctrl setting in aspeed_spi_flash_init() to 321 * implement calibration process. 322 */ 323 timing_reg = readl(&priv->regs->timings); 324 if (timing_reg != 0) 325 return 0; 326 327 debug("Read timing calibration :\n"); 328 329 /* Compute reference checksum at lowest freq HCLK/16 */ 330 gold_checksum = aspeed_spi_read_checksum(priv, 0, 0); 331 332 /* Increase HCLK freq */ 333 if (priv->new_ver) { 334 for (i = 0; i < ARRAY_SIZE(hclk_masks) - 1; i++) { 335 u32 hdiv = 5 - i; 336 u32 hshift = (hdiv - 2) * 8; 337 bool pass = false; 338 u8 delay; 339 u16 first_delay = 0; 340 u16 end_delay = 0; 341 u32 cal_tmp; 342 u32 max_window_sz = 0; 343 u32 cur_window_sz = 0; 344 u32 tmp_delay; 345 346 debug("hdiv %d, hshift %d\n", hdiv, hshift); 347 if (priv->hclk_rate / hdiv > priv->max_hz) { 348 debug("skipping freq %ld\n", priv->hclk_rate / hdiv); 349 continue; 350 } 351 352 /* Try without the 4ns DI delay */ 353 hcycle = delay = 0; 354 debug("** Dealy Disable **\n"); 355 checksum = aspeed_spi_read_checksum(priv, hclk_masks[i], delay); 356 pass = (checksum == gold_checksum); 357 debug("HCLK/%d, no DI delay, %d HCLK cycle : %s\n", 358 hdiv, hcycle, pass ? "PASS" : "FAIL"); 359 360 /* All good for this freq */ 361 if (pass) 362 goto next_div; 363 364 /* Try each hcycle delay */ 365 for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) { 366 /* Increase DI delay by the step of 0.5ns */ 367 debug("** Delay Enable : hcycle %x ** \n", hcycle); 368 for (delay_ns = 0; delay_ns < 0xf; delay_ns++) { 369 tmp_delay = TIMING_DELAY_DI_4NS | hcycle | (delay_ns << 4); 370 checksum = aspeed_spi_read_checksum(priv, hclk_masks[i], 371 tmp_delay); 372 pass = (checksum == gold_checksum); 373 debug("HCLK/%d, DI delay, %d HCLK cycle, %d delay_ns : %s\n", 374 hdiv, hcycle, delay_ns, pass ? "PASS" : "FAIL"); 375 376 if (!pass) { 377 if (!first_delay) 378 continue; 379 else { 380 end_delay = (hcycle << 4) | (delay_ns); 381 end_delay = end_delay - 1; 382 /* Larger window size is found */ 383 if (cur_window_sz > max_window_sz) { 384 max_window_sz = cur_window_sz; 385 cal_tmp = (first_delay + end_delay) / 2; 386 delay = TIMING_DELAY_DI_4NS | 387 ((cal_tmp & 0xf) << 4) | 388 (cal_tmp >> 4); 389 } 390 debug("find end_delay %x %d %d\n", end_delay, 391 hcycle, delay_ns); 392 393 first_delay = 0; 394 end_delay = 0; 395 cur_window_sz = 0; 396 397 break; 398 } 399 } else { 400 if (!first_delay) { 401 first_delay = (hcycle << 4) | delay_ns; 402 debug("find first_delay %x %d %d\n", first_delay, hcycle, delay_ns); 403 } 404 /* Record current pass window size */ 405 cur_window_sz++; 406 } 407 } 408 } 409 410 if (pass) { 411 if (cur_window_sz > max_window_sz) { 412 max_window_sz = cur_window_sz; 413 end_delay = ((hcycle - 1) << 4) | (delay_ns - 1); 414 cal_tmp = (first_delay + end_delay) / 2; 415 delay = TIMING_DELAY_DI_4NS | 416 ((cal_tmp & 0xf) << 4) | 417 (cal_tmp >> 4); 418 } 419 } 420 next_div: 421 timing_reg &= ~(0xfu << hshift); 422 timing_reg |= delay << hshift; 423 debug("timing_reg %x, delay %x, hshift bit %d\n",timing_reg, delay, hshift); 424 } 425 } else { 426 for (i = 0; i < ARRAY_SIZE(hclk_masks); i++) { 427 u32 hdiv = 5 - i; 428 u32 hshift = (hdiv - 1) << 2; 429 bool pass = false; 430 u8 delay; 431 432 if (priv->hclk_rate / hdiv > priv->max_hz) { 433 debug("skipping freq %ld\n", priv->hclk_rate / hdiv); 434 continue; 435 } 436 437 /* Increase HCLK cycles until read succeeds */ 438 for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) { 439 /* Try first with a 4ns DI delay */ 440 delay = TIMING_DELAY_DI_4NS | hcycle; 441 checksum = aspeed_spi_read_checksum(priv, hclk_masks[i], 442 delay); 443 pass = (checksum == gold_checksum); 444 debug(" HCLK/%d, 4ns DI delay, %d HCLK cycle : %s\n", 445 hdiv, hcycle, pass ? "PASS" : "FAIL"); 446 447 /* Try again with more HCLK cycles */ 448 if (!pass) 449 continue; 450 451 /* Try without the 4ns DI delay */ 452 delay = hcycle; 453 checksum = aspeed_spi_read_checksum(priv, hclk_masks[i], 454 delay); 455 pass = (checksum == gold_checksum); 456 debug(" HCLK/%d, no DI delay, %d HCLK cycle : %s\n", 457 hdiv, hcycle, pass ? "PASS" : "FAIL"); 458 459 /* All good for this freq */ 460 if (pass) 461 break; 462 } 463 464 if (pass) { 465 timing_reg &= ~(0xfu << hshift); 466 timing_reg |= delay << hshift; 467 } 468 } 469 } 470 471 debug("Read Timing Compensation set to 0x%08x\n", timing_reg); 472 writel(timing_reg, &priv->regs->timings); 473 474 return 0; 475 } 476 477 static int aspeed_spi_controller_init(struct aspeed_spi_priv *priv) 478 { 479 int cs; 480 481 /* 482 * Enable write on all flash devices as USER command mode 483 * requires it. 484 */ 485 setbits_le32(&priv->regs->conf, 486 CONF_ENABLE_W2 | CONF_ENABLE_W1 | CONF_ENABLE_W0); 487 488 /* 489 * Set safe default settings for each device. These will be 490 * tuned after the SPI flash devices are probed. 491 */ 492 if (priv->new_ver) { 493 for (cs = 0; cs < priv->flash_count; cs++) { 494 struct aspeed_spi_flash *flash = &priv->flashes[cs]; 495 u32 seg_addr = readl(&priv->regs->segment_addr[cs]); 496 u32 addr_config = 0; 497 switch(cs) { 498 case 0: 499 flash->ahb_base = cs ? (void *)G6_SEGMENT_ADDR_START(seg_addr) : 500 priv->ahb_base; 501 debug("cs0 mem-map : %x \n", (u32)flash->ahb_base); 502 break; 503 case 1: 504 flash->ahb_base = priv->flashes[0].ahb_base + 0x8000000; //cs0 + 128Mb : use 64MB 505 debug("cs1 mem-map : %x end %x \n", (u32)flash->ahb_base, (u32)flash->ahb_base + 0x4000000); 506 addr_config = G6_SEGMENT_ADDR_VALUE((u32)flash->ahb_base, (u32)flash->ahb_base + 0x4000000); //add 512Mb 507 writel(addr_config, &priv->regs->segment_addr[cs]); 508 break; 509 case 2: 510 flash->ahb_base = priv->flashes[0].ahb_base + 0xc000000; //cs0 + 192Mb : use 64MB 511 debug("cs2 mem-map : %x end %x \n", (u32)flash->ahb_base, (u32)flash->ahb_base + 0x4000000); 512 addr_config = G6_SEGMENT_ADDR_VALUE((u32)flash->ahb_base, (u32)flash->ahb_base + 0x4000000); //add 512Mb 513 writel(addr_config, &priv->regs->segment_addr[cs]); 514 break; 515 } 516 flash->cs = cs; 517 flash->ce_ctrl_user = CE_CTRL_USERMODE; 518 flash->ce_ctrl_fread = CE_CTRL_READMODE; 519 } 520 } else { 521 for (cs = 0; cs < priv->flash_count; cs++) { 522 struct aspeed_spi_flash *flash = &priv->flashes[cs]; 523 u32 seg_addr = readl(&priv->regs->segment_addr[cs]); 524 /* 525 * The start address of the AHB window of CE0 is 526 * read-only and is the same as the address of the 527 * overall AHB window of the controller for all flash 528 * devices. 529 */ 530 flash->ahb_base = cs ? (void *)SEGMENT_ADDR_START(seg_addr) : 531 priv->ahb_base; 532 533 flash->cs = cs; 534 flash->ce_ctrl_user = CE_CTRL_USERMODE; 535 flash->ce_ctrl_fread = CE_CTRL_READMODE; 536 } 537 } 538 return 0; 539 } 540 541 static int aspeed_spi_read_from_ahb(void __iomem *ahb_base, void *buf, 542 size_t len) 543 { 544 size_t offset = 0; 545 546 if (!((uintptr_t)buf % 4)) { 547 readsl(ahb_base, buf, len >> 2); 548 offset = len & ~0x3; 549 len -= offset; 550 } 551 readsb(ahb_base, (u8 *)buf + offset, len); 552 553 return 0; 554 } 555 556 static int aspeed_spi_write_to_ahb(void __iomem *ahb_base, const void *buf, 557 size_t len) 558 { 559 size_t offset = 0; 560 561 if (!((uintptr_t)buf % 4)) { 562 writesl(ahb_base, buf, len >> 2); 563 offset = len & ~0x3; 564 len -= offset; 565 } 566 writesb(ahb_base, (u8 *)buf + offset, len); 567 568 return 0; 569 } 570 571 static void aspeed_spi_start_user(struct aspeed_spi_priv *priv, 572 struct aspeed_spi_flash *flash) 573 { 574 u32 ctrl_reg = flash->ce_ctrl_user | CE_CTRL_STOP_ACTIVE; 575 576 /* Deselect CS and set USER command mode */ 577 writel(ctrl_reg, &priv->regs->ce_ctrl[flash->cs]); 578 579 /* Select CS */ 580 clrbits_le32(&priv->regs->ce_ctrl[flash->cs], CE_CTRL_STOP_ACTIVE); 581 } 582 583 static void aspeed_spi_stop_user(struct aspeed_spi_priv *priv, 584 struct aspeed_spi_flash *flash) 585 { 586 /* Deselect CS first */ 587 setbits_le32(&priv->regs->ce_ctrl[flash->cs], CE_CTRL_STOP_ACTIVE); 588 589 /* Restore default command mode */ 590 writel(flash->ce_ctrl_fread, &priv->regs->ce_ctrl[flash->cs]); 591 } 592 593 static int aspeed_spi_read_reg(struct aspeed_spi_priv *priv, 594 struct aspeed_spi_flash *flash, 595 u8 opcode, u8 *read_buf, int len) 596 { 597 aspeed_spi_start_user(priv, flash); 598 aspeed_spi_write_to_ahb(flash->ahb_base, &opcode, 1); 599 aspeed_spi_read_from_ahb(flash->ahb_base, read_buf, len); 600 aspeed_spi_stop_user(priv, flash); 601 602 return 0; 603 } 604 605 static int aspeed_spi_write_reg(struct aspeed_spi_priv *priv, 606 struct aspeed_spi_flash *flash, 607 u8 opcode, const u8 *write_buf, int len) 608 { 609 aspeed_spi_start_user(priv, flash); 610 aspeed_spi_write_to_ahb(flash->ahb_base, &opcode, 1); 611 aspeed_spi_write_to_ahb(flash->ahb_base, write_buf, len); 612 aspeed_spi_stop_user(priv, flash); 613 614 debug("=== write opcode [%x] ==== \n", opcode); 615 switch(opcode) { 616 case SPINOR_OP_EN4B: 617 /* For ast2600, if 2 chips ABR mode is enabled, 618 * turn on 3B mode auto clear in order to avoid 619 * the scenario where spi controller is at 4B mode 620 * and flash site is at 3B mode after 3rd switch. 621 */ 622 if (priv->new_ver == 1 && (readl(SPI_3B_AUTO_CLR_REG) & SPI_3B_AUTO_CLR)) 623 writel(readl(&priv->regs->soft_rst_cmd_ctrl) | SOFT_RST_CMD_EN, 624 &priv->regs->soft_rst_cmd_ctrl); 625 626 writel(readl(&priv->regs->ctrl) | (0x11 << flash->cs), &priv->regs->ctrl); 627 break; 628 case SPINOR_OP_EX4B: 629 writel(readl(&priv->regs->ctrl) & ~(0x11 << flash->cs), &priv->regs->ctrl); 630 break; 631 } 632 return 0; 633 } 634 635 static void aspeed_spi_send_cmd_addr(struct aspeed_spi_priv *priv, 636 struct aspeed_spi_flash *flash, 637 const u8 *cmdbuf, unsigned int cmdlen) 638 { 639 int i; 640 u8 byte0 = 0x0; 641 u8 addrlen = cmdlen - 1; 642 643 /* First, send the opcode */ 644 aspeed_spi_write_to_ahb(flash->ahb_base, &cmdbuf[0], 1); 645 646 if(flash->iomode == CE_CTRL_IO_QUAD_ADDR_DATA) 647 writel(flash->ce_ctrl_user | flash->iomode, &priv->regs->ce_ctrl[flash->cs]); 648 649 /* 650 * The controller is configured for 4BYTE address mode. Fix 651 * the address width and send an extra byte if the SPI Flash 652 * layer uses 3 bytes addresses. 653 */ 654 if (addrlen == 3 && readl(&priv->regs->ctrl) & BIT(flash->cs)) 655 aspeed_spi_write_to_ahb(flash->ahb_base, &byte0, 1); 656 657 /* Then the address */ 658 for (i = 1 ; i < cmdlen; i++) 659 aspeed_spi_write_to_ahb(flash->ahb_base, &cmdbuf[i], 1); 660 } 661 662 static ssize_t aspeed_spi_read_user(struct aspeed_spi_priv *priv, 663 struct aspeed_spi_flash *flash, 664 unsigned int cmdlen, const u8 *cmdbuf, 665 unsigned int len, u8 *read_buf) 666 { 667 u8 dummy = 0xff; 668 int i; 669 670 aspeed_spi_start_user(priv, flash); 671 672 /* cmd buffer = cmd + addr + dummies */ 673 aspeed_spi_send_cmd_addr(priv, flash, cmdbuf, 674 cmdlen - (flash->spi->read_dummy/8)); 675 676 for (i = 0 ; i < (flash->spi->read_dummy/8); i++) 677 aspeed_spi_write_to_ahb(flash->ahb_base, &dummy, 1); 678 679 if (flash->iomode) { 680 clrbits_le32(&priv->regs->ce_ctrl[flash->cs], 681 CE_CTRL_IO_MODE_MASK); 682 setbits_le32(&priv->regs->ce_ctrl[flash->cs], flash->iomode); 683 } 684 685 aspeed_spi_read_from_ahb(flash->ahb_base, read_buf, len); 686 aspeed_spi_stop_user(priv, flash); 687 688 return 0; 689 } 690 691 static ssize_t aspeed_spi_write_user(struct aspeed_spi_priv *priv, 692 struct aspeed_spi_flash *flash, 693 unsigned int cmdlen, const u8 *cmdbuf, 694 unsigned int len, const u8 *write_buf) 695 { 696 aspeed_spi_start_user(priv, flash); 697 698 /* cmd buffer = cmd + addr : normally cmd is use signle mode*/ 699 aspeed_spi_send_cmd_addr(priv, flash, cmdbuf, cmdlen); 700 701 /* data will use io mode */ 702 if(flash->iomode == CE_CTRL_IO_QUAD_DATA) 703 writel(flash->ce_ctrl_user | flash->iomode, &priv->regs->ce_ctrl[flash->cs]); 704 705 aspeed_spi_write_to_ahb(flash->ahb_base, write_buf, len); 706 707 aspeed_spi_stop_user(priv, flash); 708 709 return 0; 710 } 711 712 static u32 aspeed_spi_flash_to_addr(struct aspeed_spi_flash *flash, 713 const u8 *cmdbuf, unsigned int cmdlen) 714 { 715 u8 addrlen = cmdlen - 1; 716 u32 addr = (cmdbuf[1] << 16) | (cmdbuf[2] << 8) | cmdbuf[3]; 717 718 /* 719 * U-Boot SPI Flash layer uses 3 bytes addresses, but it might 720 * change one day 721 */ 722 if (addrlen == 4) 723 addr = (addr << 8) | cmdbuf[4]; 724 725 return addr; 726 } 727 728 /* TODO(clg@kaod.org): add support for XFER_MMAP instead ? */ 729 static ssize_t aspeed_spi_read(struct aspeed_spi_priv *priv, 730 struct aspeed_spi_flash *flash, 731 unsigned int cmdlen, const u8 *cmdbuf, 732 unsigned int len, u8 *read_buf) 733 { 734 /* cmd buffer = cmd + addr + dummies */ 735 u32 offset = aspeed_spi_flash_to_addr(flash, cmdbuf, 736 cmdlen - (flash->spi->read_dummy/8)); 737 738 /* 739 * Switch to USER command mode: 740 * - if the AHB window configured for the device is 741 * too small for the read operation 742 * - if read offset is smaller than the decoded start address 743 * and the decoded range is not multiple of flash size 744 */ 745 if ((offset + len >= flash->ahb_size) || \ 746 (offset < ((int)flash->ahb_base & 0x0FFFFFFF) && \ 747 (((int)flash->ahb_base & 0x0FFFFFFF) % flash->spi->size) != 0)) { 748 return aspeed_spi_read_user(priv, flash, cmdlen, cmdbuf, 749 len, read_buf); 750 } 751 752 memcpy_fromio(read_buf, flash->ahb_base + offset, len); 753 754 return 0; 755 } 756 757 static int aspeed_spi_xfer(struct udevice *dev, unsigned int bitlen, 758 const void *dout, void *din, unsigned long flags) 759 { 760 struct udevice *bus = dev->parent; 761 struct aspeed_spi_priv *priv = dev_get_priv(bus); 762 struct aspeed_spi_flash *flash; 763 u8 *cmd_buf = priv->cmd_buf; 764 size_t data_bytes; 765 int err = 0; 766 767 flash = aspeed_spi_get_flash(dev); 768 if (!flash) 769 return -ENXIO; 770 771 if (flags & SPI_XFER_BEGIN) { 772 /* save command in progress */ 773 priv->cmd_len = bitlen / 8; 774 memcpy(cmd_buf, dout, priv->cmd_len); 775 } 776 777 if (flags == (SPI_XFER_BEGIN | SPI_XFER_END)) { 778 /* if start and end bit are set, the data bytes is 0. */ 779 data_bytes = 0; 780 } else { 781 data_bytes = bitlen / 8; 782 } 783 784 debug("CS%u: %s cmd %zu bytes data %zu bytes\n", flash->cs, 785 din ? "read" : "write", priv->cmd_len, data_bytes); 786 787 if ((flags & SPI_XFER_END) || flags == 0) { 788 if (priv->cmd_len == 0) { 789 pr_err("No command is progress !\n"); 790 return -1; 791 } 792 793 if (din && data_bytes) { 794 if (priv->cmd_len == 1) 795 err = aspeed_spi_read_reg(priv, flash, 796 cmd_buf[0], 797 din, data_bytes); 798 else 799 err = aspeed_spi_read(priv, flash, 800 priv->cmd_len, 801 cmd_buf, data_bytes, 802 din); 803 } else if (dout) { 804 if (priv->cmd_len == 1) 805 err = aspeed_spi_write_reg(priv, flash, 806 cmd_buf[0], 807 dout, data_bytes); 808 else 809 err = aspeed_spi_write_user(priv, flash, 810 priv->cmd_len, 811 cmd_buf, data_bytes, 812 dout); 813 } 814 815 if (flags & SPI_XFER_END) { 816 /* clear command */ 817 memset(cmd_buf, 0, sizeof(priv->cmd_buf)); 818 priv->cmd_len = 0; 819 } 820 } 821 822 return err; 823 } 824 825 static int aspeed_spi_child_pre_probe(struct udevice *dev) 826 { 827 struct dm_spi_slave_platdata *slave_plat = dev_get_parent_platdata(dev); 828 829 debug("pre_probe slave device on CS%u, max_hz %u, mode 0x%x.\n", 830 slave_plat->cs, slave_plat->max_hz, slave_plat->mode); 831 832 if (!aspeed_spi_get_flash(dev)) 833 return -ENXIO; 834 835 return 0; 836 } 837 838 /* 839 * It is possible to automatically define a contiguous address space 840 * on top of all CEs in the AHB window of the controller but it would 841 * require much more work. Let's start with a simple mapping scheme 842 * which should work fine for a single flash device. 843 * 844 * More complex schemes should probably be defined with the device 845 * tree. 846 */ 847 static int aspeed_spi_flash_set_segment(struct aspeed_spi_priv *priv, 848 struct aspeed_spi_flash *flash) 849 { 850 u32 seg_addr; 851 852 /* could be configured through the device tree */ 853 flash->ahb_size = flash->spi->size; 854 855 if (priv->new_ver) { 856 seg_addr = G6_SEGMENT_ADDR_VALUE((u32)flash->ahb_base, 857 (u32)flash->ahb_base + flash->ahb_size); 858 } else { 859 seg_addr = SEGMENT_ADDR_VALUE((u32)flash->ahb_base, 860 (u32)flash->ahb_base + flash->ahb_size); 861 } 862 writel(seg_addr, &priv->regs->segment_addr[flash->cs]); 863 864 return 0; 865 } 866 867 static int aspeed_spi_flash_init(struct aspeed_spi_priv *priv, 868 struct aspeed_spi_flash *flash, 869 struct udevice *dev) 870 { 871 int ret; 872 struct spi_flash *spi_flash = dev_get_uclass_priv(dev); 873 struct spi_slave *slave = dev_get_parent_priv(dev); 874 u32 read_hclk; 875 876 877 /* 878 * The SPI flash device slave should not change, so initialize 879 * it only once. 880 */ 881 if (flash->init) 882 return 0; 883 884 /* 885 * The flash device has not been probed yet. Initial transfers 886 * to read the JEDEC of the device will use the initial 887 * default settings of the registers. 888 */ 889 if (!spi_flash->name) 890 return 0; 891 892 debug("CS%u: init %s flags:%x size:%d page:%d sector:%d erase:%d " 893 "cmds [ erase:%x read=%x write:%x ] dummy:%d\n", 894 flash->cs, 895 spi_flash->name, spi_flash->flags, spi_flash->size, 896 spi_flash->page_size, spi_flash->sector_size, 897 spi_flash->erase_size, spi_flash->erase_opcode, 898 spi_flash->read_opcode, spi_flash->program_opcode, 899 spi_flash->read_dummy); 900 901 flash->spi = spi_flash; 902 903 flash->ce_ctrl_user = CE_CTRL_USERMODE; 904 905 if(priv->new_ver) 906 read_hclk = aspeed_g6_spi_hclk_divisor(priv, slave->speed); 907 else 908 read_hclk = aspeed_spi_hclk_divisor(priv, slave->speed); 909 910 switch(flash->spi->read_opcode) { 911 case SPINOR_OP_READ_1_1_2: 912 case SPINOR_OP_READ_1_1_2_4B: 913 flash->iomode = CE_CTRL_IO_DUAL_DATA; 914 break; 915 case SPINOR_OP_READ_1_1_4: 916 case SPINOR_OP_READ_1_1_4_4B: 917 flash->iomode = CE_CTRL_IO_QUAD_DATA; 918 break; 919 case SPINOR_OP_READ_1_4_4: 920 case SPINOR_OP_READ_1_4_4_4B: 921 flash->iomode = CE_CTRL_IO_QUAD_ADDR_DATA; 922 printf("need modify dummy for 3 bytes"); 923 break; 924 } 925 926 if(priv->new_ver) { 927 flash->ce_ctrl_fread = CE_G6_CTRL_CLOCK_FREQ(read_hclk) | 928 flash->iomode | 929 CE_CTRL_CMD(flash->spi->read_opcode) | 930 CE_CTRL_DUMMY((flash->spi->read_dummy/8)) | 931 CE_CTRL_FREADMODE; 932 } else { 933 flash->ce_ctrl_fread = CE_CTRL_CLOCK_FREQ(read_hclk) | 934 flash->iomode | 935 CE_CTRL_CMD(flash->spi->read_opcode) | 936 CE_CTRL_DUMMY((flash->spi->read_dummy/8)) | 937 CE_CTRL_FREADMODE; 938 } 939 940 if (flash->spi->addr_width == 4) 941 writel(readl(&priv->regs->ctrl) | 0x11 << flash->cs, &priv->regs->ctrl); 942 943 debug("CS%u: USER mode 0x%08x FREAD mode 0x%08x\n", flash->cs, 944 flash->ce_ctrl_user, flash->ce_ctrl_fread); 945 946 /* Set the CE Control Register default (FAST READ) */ 947 writel(flash->ce_ctrl_fread, &priv->regs->ce_ctrl[flash->cs]); 948 949 /* Set Address Segment Register for direct AHB accesses */ 950 aspeed_spi_flash_set_segment(priv, flash); 951 952 /* 953 * Set the Read Timing Compensation Register. This setting 954 * applies to all devices. 955 */ 956 ret = aspeed_spi_timing_calibration(priv); 957 if (ret != 0) 958 return ret; 959 960 /* All done */ 961 flash->init = true; 962 963 return 0; 964 } 965 966 static int aspeed_spi_claim_bus(struct udevice *dev) 967 { 968 struct udevice *bus = dev->parent; 969 struct aspeed_spi_priv *priv = dev_get_priv(bus); 970 struct dm_spi_slave_platdata *slave_plat = dev_get_parent_platdata(dev); 971 struct aspeed_spi_flash *flash; 972 973 debug("%s: claim bus CS%u\n", bus->name, slave_plat->cs); 974 975 flash = aspeed_spi_get_flash(dev); 976 if (!flash) 977 return -ENODEV; 978 979 return aspeed_spi_flash_init(priv, flash, dev); 980 } 981 982 static int aspeed_spi_release_bus(struct udevice *dev) 983 { 984 struct udevice *bus = dev->parent; 985 struct dm_spi_slave_platdata *slave_plat = dev_get_parent_platdata(dev); 986 987 debug("%s: release bus CS%u\n", bus->name, slave_plat->cs); 988 989 if (!aspeed_spi_get_flash(dev)) 990 return -ENODEV; 991 992 return 0; 993 } 994 995 static int aspeed_spi_set_mode(struct udevice *bus, uint mode) 996 { 997 debug("%s: setting mode to %x\n", bus->name, mode); 998 999 if (mode & (SPI_RX_QUAD | SPI_TX_QUAD)) { 1000 #ifndef CONFIG_ASPEED_AST2600 1001 pr_err("%s invalid QUAD IO mode\n", bus->name); 1002 return -EINVAL; 1003 #endif 1004 } 1005 1006 /* The CE Control Register is set in claim_bus() */ 1007 return 0; 1008 } 1009 1010 static int aspeed_spi_set_speed(struct udevice *bus, uint hz) 1011 { 1012 debug("%s: setting speed to %u\n", bus->name, hz); 1013 1014 /* The CE Control Register is set in claim_bus() */ 1015 return 0; 1016 } 1017 1018 static int aspeed_spi_count_flash_devices(struct udevice *bus) 1019 { 1020 ofnode node; 1021 int count = 0; 1022 1023 dev_for_each_subnode(node, bus) { 1024 if (ofnode_is_available(node) && 1025 ofnode_device_is_compatible(node, "spi-flash")) 1026 count++; 1027 } 1028 1029 return count; 1030 } 1031 1032 static int aspeed_spi_bind(struct udevice *bus) 1033 { 1034 debug("%s assigned req_seq=%d seq=%d\n", bus->name, bus->req_seq, 1035 bus->seq); 1036 1037 return 0; 1038 } 1039 1040 static int aspeed_spi_probe(struct udevice *bus) 1041 { 1042 struct resource res_regs, res_ahb; 1043 struct aspeed_spi_priv *priv = dev_get_priv(bus); 1044 struct clk hclk; 1045 int ret; 1046 1047 ret = dev_read_resource(bus, 0, &res_regs); 1048 if (ret < 0) 1049 return ret; 1050 1051 priv->regs = (void __iomem *)res_regs.start; 1052 1053 ret = dev_read_resource(bus, 1, &res_ahb); 1054 if (ret < 0) 1055 return ret; 1056 1057 priv->ahb_base = (void __iomem *)res_ahb.start; 1058 priv->ahb_size = res_ahb.end - res_ahb.start; 1059 1060 ret = clk_get_by_index(bus, 0, &hclk); 1061 if (ret < 0) { 1062 pr_err("%s could not get clock: %d\n", bus->name, ret); 1063 return ret; 1064 } 1065 1066 priv->hclk_rate = clk_get_rate(&hclk); 1067 clk_free(&hclk); 1068 1069 priv->max_hz = dev_read_u32_default(bus, "spi-max-frequency", 1070 100000000); 1071 1072 priv->num_cs = dev_read_u32_default(bus, "num-cs", ASPEED_SPI_MAX_CS); 1073 1074 priv->flash_count = aspeed_spi_count_flash_devices(bus); 1075 if (priv->flash_count > priv->num_cs) { 1076 pr_err("%s has too many flash devices: %d\n", bus->name, 1077 priv->flash_count); 1078 return -EINVAL; 1079 } 1080 1081 if (!priv->flash_count) { 1082 pr_err("%s has no flash devices ?!\n", bus->name); 1083 return -ENODEV; 1084 } 1085 1086 if (device_is_compatible(bus, "aspeed,ast2600-fmc") || 1087 device_is_compatible(bus, "aspeed,ast2600-spi")) { 1088 priv->new_ver = 1; 1089 } 1090 1091 /* 1092 * There are some slight differences between the FMC and the 1093 * SPI controllers 1094 */ 1095 priv->is_fmc = dev_get_driver_data(bus); 1096 1097 ret = aspeed_spi_controller_init(priv); 1098 if (ret) 1099 return ret; 1100 1101 debug("%s probed regs=%p ahb_base=%p max-hz=%d cs=%d seq=%d\n", 1102 bus->name, priv->regs, priv->ahb_base, priv->max_hz, 1103 priv->flash_count, bus->seq); 1104 1105 return 0; 1106 } 1107 1108 static const struct dm_spi_ops aspeed_spi_ops = { 1109 .claim_bus = aspeed_spi_claim_bus, 1110 .release_bus = aspeed_spi_release_bus, 1111 .set_mode = aspeed_spi_set_mode, 1112 .set_speed = aspeed_spi_set_speed, 1113 .xfer = aspeed_spi_xfer, 1114 }; 1115 1116 static const struct udevice_id aspeed_spi_ids[] = { 1117 { .compatible = "aspeed,ast2600-fmc", .data = 1 }, 1118 { .compatible = "aspeed,ast2600-spi", .data = 0 }, 1119 { .compatible = "aspeed,ast2500-fmc", .data = 1 }, 1120 { .compatible = "aspeed,ast2500-spi", .data = 0 }, 1121 { .compatible = "aspeed,ast2400-fmc", .data = 1 }, 1122 { } 1123 }; 1124 1125 U_BOOT_DRIVER(aspeed_spi) = { 1126 .name = "aspeed_spi", 1127 .id = UCLASS_SPI, 1128 .of_match = aspeed_spi_ids, 1129 .ops = &aspeed_spi_ops, 1130 .priv_auto_alloc_size = sizeof(struct aspeed_spi_priv), 1131 .child_pre_probe = aspeed_spi_child_pre_probe, 1132 .bind = aspeed_spi_bind, 1133 .probe = aspeed_spi_probe, 1134 }; 1135