1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * ASPEED AST2500 FMC/SPI Controller driver
4 *
5 * Copyright (c) 2015-2018, IBM Corporation.
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <common.h>
11 #include <clk.h>
12 #include <dm.h>
13 #include <spi.h>
14 #include <spi_flash.h>
15 #include <asm/io.h>
16 #include <linux/ioport.h>
17 #include <malloc.h>
18
19 #define ASPEED_SPI_MAX_CS 3
20 #define FLASH_CALIBRATION_LEN 0x400
21
22 struct aspeed_spi_regs {
23 u32 conf; /* 0x00 CE Type Setting */
24 u32 ctrl; /* 0x04 Control */
25 u32 intr_ctrl; /* 0x08 Interrupt Control and Status */
26 u32 cmd_ctrl; /* 0x0c Command Control */
27 u32 ce_ctrl[ASPEED_SPI_MAX_CS]; /* 0x10 .. 0x18 CEx Control */
28 u32 _reserved0[5]; /* .. */
29 u32 segment_addr[ASPEED_SPI_MAX_CS];
30 /* 0x30 .. 0x38 Segment Address */
31 u32 _reserved1[5]; /* .. */
32 u32 soft_rst_cmd_ctrl; /* 0x50 Auto Soft-Reset Command Control */
33 u32 _reserved2[11]; /* .. */
34 u32 dma_ctrl; /* 0x80 DMA Control/Status */
35 u32 dma_flash_addr; /* 0x84 DMA Flash Side Address */
36 u32 dma_dram_addr; /* 0x88 DMA DRAM Side Address */
37 u32 dma_len; /* 0x8c DMA Length Register */
38 u32 dma_checksum; /* 0x90 Checksum Calculation Result */
39 u32 timings; /* 0x94 Read Timing Compensation */
40 u32 _reserved3[1];
41 /* not used */
42 u32 soft_strap_status; /* 0x9c Software Strap Status */
43 u32 write_cmd_filter_ctrl; /* 0xa0 Write Command Filter Control */
44 u32 write_addr_filter_ctrl; /* 0xa4 Write Address Filter Control */
45 u32 lock_ctrl_reset; /* 0xa8 Lock Control (SRST#) */
46 u32 lock_ctrl_wdt; /* 0xac Lock Control (Watchdog) */
47 u32 write_addr_filter[8]; /* 0xb0 Write Address Filter */
48 u32 _reserved4[12];
49 u32 fully_qualified_cmd[20]; /* 0x100 Fully Qualified Command */
50 u32 addr_qualified_cmd[12]; /* 0x150 Address Qualified Command */
51 };
52
53 /* CE Type Setting Register */
54 #define CONF_ENABLE_W2 BIT(18)
55 #define CONF_ENABLE_W1 BIT(17)
56 #define CONF_ENABLE_W0 BIT(16)
57 #define CONF_FLASH_TYPE2 4
58 #define CONF_FLASH_TYPE1 2 /* Hardwired to SPI */
59 #define CONF_FLASH_TYPE0 0 /* Hardwired to SPI */
60 #define CONF_FLASH_TYPE_NOR 0x0
61 #define CONF_FLASH_TYPE_SPI 0x2
62
63 /* CE Control Register */
64 #define CTRL_EXTENDED2 BIT(2) /* 32 bit addressing for SPI */
65 #define CTRL_EXTENDED1 BIT(1) /* 32 bit addressing for SPI */
66 #define CTRL_EXTENDED0 BIT(0) /* 32 bit addressing for SPI */
67
68 /* Interrupt Control and Status Register */
69 #define INTR_CTRL_DMA_STATUS BIT(11)
70 #define INTR_CTRL_CMD_ABORT_STATUS BIT(10)
71 #define INTR_CTRL_WRITE_PROTECT_STATUS BIT(9)
72 #define INTR_CTRL_DMA_EN BIT(3)
73 #define INTR_CTRL_CMD_ABORT_EN BIT(2)
74 #define INTR_CTRL_WRITE_PROTECT_EN BIT(1)
75
76 /* CEx Control Register */
77 #define CE_CTRL_IO_MODE_MASK GENMASK(31, 28)
78 #define CE_CTRL_IO_QPI_DATA BIT(31)
79 #define CE_CTRL_IO_DUAL_DATA BIT(29)
80 #define CE_CTRL_IO_SINGLE 0
81 #define CE_CTRL_IO_DUAL_ADDR_DATA (BIT(29) | BIT(28))
82 #define CE_CTRL_IO_QUAD_DATA BIT(30)
83 #define CE_CTRL_IO_QUAD_ADDR_DATA (BIT(30) | BIT(28))
84 #define CE_CTRL_CMD_SHIFT 16
85 #define CE_CTRL_CMD_MASK 0xff
86 #define CE_CTRL_CMD(cmd) \
87 (((cmd) & CE_CTRL_CMD_MASK) << CE_CTRL_CMD_SHIFT)
88 #define CE_CTRL_DUMMY_HIGH_SHIFT 14
89 #define CE_CTRL_DUMMY_HIGH_MASK 0x1
90 #define CE_CTRL_CLOCK_FREQ_SHIFT 8
91 #define CE_CTRL_CLOCK_FREQ_MASK 0xf
92 #define CE_CTRL_CLOCK_FREQ(div) \
93 (((div) & CE_CTRL_CLOCK_FREQ_MASK) << CE_CTRL_CLOCK_FREQ_SHIFT)
94 #define CE_G6_CTRL_CLOCK_FREQ(div) \
95 ((((div) & CE_CTRL_CLOCK_FREQ_MASK) << CE_CTRL_CLOCK_FREQ_SHIFT) | (((div) & 0xf0) << 20))
96 #define CE_CTRL_DUMMY_LOW_SHIFT 6 /* 2 bits [7:6] */
97 #define CE_CTRL_DUMMY_LOW_MASK 0x3
98 #define CE_CTRL_DUMMY(dummy) \
99 (((((dummy) >> 2) & CE_CTRL_DUMMY_HIGH_MASK) \
100 << CE_CTRL_DUMMY_HIGH_SHIFT) | \
101 (((dummy) & CE_CTRL_DUMMY_LOW_MASK) << CE_CTRL_DUMMY_LOW_SHIFT))
102 #define CE_CTRL_STOP_ACTIVE BIT(2)
103 #define CE_CTRL_MODE_MASK 0x3
104 #define CE_CTRL_READMODE 0x0
105 #define CE_CTRL_FREADMODE 0x1
106 #define CE_CTRL_WRITEMODE 0x2
107 #define CE_CTRL_USERMODE 0x3
108 #define CE_CTRL_FREQ_MASK 0xf0fff0ff
109
110 #define SPI_READ_FROM_FLASH 0x00000001
111 #define SPI_WRITE_TO_FLASH 0x00000002
112
113 /* Auto Soft-Reset Command Control */
114 #define SOFT_RST_CMD_EN GENMASK(1, 0)
115
116 /*
117 * The Segment Register uses a 8MB unit to encode the start address
118 * and the end address of the AHB window of a SPI flash device.
119 * Default segment addresses are :
120 *
121 * CE0 0x20000000 - 0x2fffffff 128MB
122 * CE1 0x28000000 - 0x29ffffff 32MB
123 * CE2 0x2a000000 - 0x2bffffff 32MB
124 *
125 * The full address space of the AHB window of the controller is
126 * covered and CE0 start address and CE2 end addresses are read-only.
127 */
128 #define SEGMENT_ADDR_START(reg) ((((reg) >> 16) & 0xff) << 23)
129 #define SEGMENT_ADDR_END(reg) ((((reg) >> 24) & 0xff) << 23)
130 #define SEGMENT_ADDR_VALUE(start, end) \
131 (((((start) >> 23) & 0xff) << 16) | ((((end) >> 23) & 0xff) << 24))
132
133 #define G6_SEGMENT_ADDR_START(reg) (((reg) << 16) & 0x0ff00000)
134 #define G6_SEGMENT_ADDR_END(reg) (((reg) & 0x0ff00000) + 0x100000)
135 #define G6_SEGMENT_ADDR_VALUE(start, end) \
136 ((((start) & 0x0ff00000) >> 16) | (((end) - 0x100000) & 0xffff0000))
137
138 /* DMA Control/Status Register */
139 #define DMA_CTRL_DELAY_SHIFT 8
140 #define DMA_CTRL_DELAY_MASK 0xf
141 #define G6_DMA_CTRL_DELAY_MASK 0xff
142 #define DMA_CTRL_FREQ_SHIFT 4
143 #define G6_DMA_CTRL_FREQ_SHIFT 16
144
145 #define DMA_CTRL_FREQ_MASK 0xf
146 #define TIMING_MASK(div, delay) \
147 (((delay & DMA_CTRL_DELAY_MASK) << DMA_CTRL_DELAY_SHIFT) | \
148 ((div & DMA_CTRL_FREQ_MASK) << DMA_CTRL_FREQ_SHIFT))
149 #define G6_TIMING_MASK(div, delay) \
150 (((delay & G6_DMA_CTRL_DELAY_MASK) << DMA_CTRL_DELAY_SHIFT) | \
151 ((div & DMA_CTRL_FREQ_MASK) << G6_DMA_CTRL_FREQ_SHIFT))
152 #define DAM_CTRL_REQUEST BIT(31)
153 #define DAM_CTRL_GRANT BIT(30)
154 #define DMA_CTRL_CALIB BIT(3)
155 #define DMA_CTRL_CKSUM BIT(2)
156 #define DMA_CTRL_WRITE BIT(1)
157 #define DMA_CTRL_ENABLE BIT(0)
158
159 #define DMA_GET_REQ_MAGIC 0xaeed0000
160 #define DMA_DISCARD_REQ_MAGIC 0xdeea0000
161
162 /* for ast2600 setting */
163 #define SPI_3B_AUTO_CLR_REG 0x1e6e2510
164 #define SPI_3B_AUTO_CLR BIT(9)
165
166
167 /*
168 * flash related info
169 */
170 struct aspeed_spi_flash {
171 u8 cs;
172 /* Initialized when the SPI bus is
173 * first claimed
174 */
175 bool init;
176 void __iomem *ahb_base; /* AHB Window for this device */
177 u32 ahb_size; /* AHB Window segment size */
178 u32 ce_ctrl_user; /* CE Control Register for USER mode */
179 u32 ce_ctrl_fread; /* CE Control Register for FREAD mode */
180 u32 read_iomode;
181 u32 write_iomode;
182 u32 max_freq;
183 struct spi_flash *spi; /* Associated SPI Flash device */
184 };
185
186 enum aspeed_spi_dir {
187 ASPEED_SPI_DIR_IN,
188 ASPEED_SPI_DIR_OUT,
189 };
190
191 #define ASPEED_SPI_OP_CMD(__opcode) \
192 { \
193 .opcode = __opcode, \
194 }
195
196 #define ASPEED_SPI_OP_ADDR(__nbytes, __val) \
197 { \
198 .nbytes = __nbytes, \
199 .val = __val, \
200 }
201
202 #define ASPEED_SPI_OP_NO_ADDR { }
203
204 #define ASPEED_SPI_OP_DUMMY(__nbytes) \
205 { \
206 .nbytes = __nbytes, \
207 }
208
209 #define ASPEED_SPI_OP_NO_DUMMY { }
210
211 #define ASPEED_SPI_OP_DATA_IN(__nbytes, __buf) \
212 { \
213 .dir = ASPEED_SPI_DIR_IN, \
214 .nbytes = __nbytes, \
215 .buf.in = __buf, \
216 }
217
218 #define ASPEED_SPI_OP_DATA_OUT(__nbytes, __buf) \
219 { \
220 .dir = ASPEED_SPI_DIR_OUT, \
221 .nbytes = __nbytes, \
222 .buf.out = __buf, \
223 }
224
225 #define ASPEED_SPI_OP_NO_DATA { }
226
227 #define ASPEED_SPI_OP(__io_mode, __cmd, __addr, __dummy, __data) \
228 { \
229 .io_mode = __io_mode, \
230 .cmd = __cmd, \
231 .addr = __addr, \
232 .dummy = __dummy, \
233 .data = __data, \
234 }
235
236 struct aspeed_spi_op {
237 u32 io_mode;
238
239 struct {
240 u16 opcode;
241 } cmd;
242
243 struct {
244 u8 nbytes;
245 u32 val;
246 } addr;
247
248 struct {
249 u8 nbytes;
250 } dummy;
251
252 struct {
253 enum aspeed_spi_dir dir;
254 unsigned int nbytes;
255 union {
256 void *in;
257 const void *out;
258 } buf;
259 } data;
260 };
261
262 struct aspeed_spi_priv {
263 struct aspeed_spi_regs *regs;
264 void __iomem *ahb_base; /* AHB Window for all flash devices */
265 int new_ver;
266 u32 ahb_size; /* AHB Window segments size */
267 ulong hclk_rate; /* AHB clock rate */
268 u8 num_cs;
269 bool is_fmc;
270
271 struct aspeed_spi_flash flashes[ASPEED_SPI_MAX_CS];
272 u32 flash_count;
273
274 u8 cmd_buf[16]; /* SPI command in progress */
275 size_t cmd_len;
276 u8 *tmp_buf;
277 int (*spi_exec_op_cmd)(struct aspeed_spi_priv *priv,
278 struct aspeed_spi_flash *flash,
279 struct aspeed_spi_op *op);
280 };
281
282 static u32 aspeed_spi_flash_to_addr(struct aspeed_spi_flash *flash,
283 const u8 *cmdbuf, unsigned int cmdlen);
284
aspeed_spi_get_flash(struct udevice * dev)285 static struct aspeed_spi_flash *aspeed_spi_get_flash(struct udevice *dev)
286 {
287 struct dm_spi_slave_platdata *slave_plat = dev_get_parent_platdata(dev);
288 struct aspeed_spi_priv *priv = dev_get_priv(dev->parent);
289 u8 cs = slave_plat->cs;
290
291 if (cs >= priv->flash_count) {
292 pr_err("invalid CS %u\n", cs);
293 return NULL;
294 }
295
296 return &priv->flashes[cs];
297 }
298
aspeed_g6_spi_hclk_divisor(struct aspeed_spi_priv * priv,u32 max_hz)299 static u32 aspeed_g6_spi_hclk_divisor(struct aspeed_spi_priv *priv, u32 max_hz)
300 {
301 u32 hclk_rate = priv->hclk_rate;
302 /* HCLK/1 .. HCLK/16 */
303 const u8 hclk_masks[] = {
304 15, 7, 14, 6, 13, 5, 12, 4, 11, 3, 10, 2, 9, 1, 8, 0
305 };
306 u8 hclk_div = 0x4; /* default value */
307 bool found = false;
308 u32 i, j = 0;
309
310 /* FMC/SPIR10[27:24] */
311 for (j = 0; j < 0xf; j++) {
312 for (i = 0; i < ARRAY_SIZE(hclk_masks); i++) {
313 if (i == 0 && j == 0)
314 continue;
315
316 if ((hclk_rate / ((i + 1) + j * 16)) <= max_hz) {
317 found = 1;
318 break;
319 }
320 }
321
322 if (found)
323 break;
324 }
325
326 debug("hclk=%d required=%d h_div %d, divisor is %d (mask %x) speed=%d\n",
327 hclk_rate, max_hz, j, i + 1, hclk_masks[i], hclk_rate / (i + 1 + j * 16));
328
329 hclk_div = ((j << 4) | hclk_masks[i]);
330
331 return hclk_div;
332 }
333
aspeed_spi_hclk_divisor(struct aspeed_spi_priv * priv,u32 max_hz)334 static u32 aspeed_spi_hclk_divisor(struct aspeed_spi_priv *priv, u32 max_hz)
335 {
336 u32 hclk_rate = priv->hclk_rate;
337 /* HCLK/1 .. HCLK/16 */
338 const u8 hclk_masks[] = {
339 15, 7, 14, 6, 13, 5, 12, 4, 11, 3, 10, 2, 9, 1, 8, 0
340 };
341 u32 i;
342 u32 hclk_div_setting = 0;
343
344 for (i = 0; i < ARRAY_SIZE(hclk_masks); i++) {
345 if (max_hz >= (hclk_rate / (i + 1)))
346 break;
347 }
348 debug("hclk=%d required=%d divisor is %d (mask %x) speed=%d\n",
349 hclk_rate, max_hz, i + 1, hclk_masks[i], hclk_rate / (i + 1));
350
351 hclk_div_setting = hclk_masks[i];
352
353 return hclk_div_setting;
354 }
355
356 /*
357 * Use some address/size under the first flash device CE0
358 */
aspeed_spi_fmc_checksum(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,u8 div,u8 delay)359 static u32 aspeed_spi_fmc_checksum(struct aspeed_spi_priv *priv,
360 struct aspeed_spi_flash *flash,
361 u8 div, u8 delay)
362 {
363 u32 flash_addr = (u32)flash->ahb_base + 0x10000;
364 u32 dma_ctrl;
365 u32 checksum;
366
367 writel(flash_addr, &priv->regs->dma_flash_addr);
368 writel(FLASH_CALIBRATION_LEN, &priv->regs->dma_len);
369
370 /*
371 * When doing calibration, the SPI clock rate in the CE0
372 * Control Register and the data input delay cycles in the
373 * Read Timing Compensation Register are replaced by bit[11:4].
374 */
375 dma_ctrl = DMA_CTRL_ENABLE | DMA_CTRL_CKSUM | DMA_CTRL_CALIB |
376 TIMING_MASK(div, delay);
377
378 writel(dma_ctrl, &priv->regs->dma_ctrl);
379 while (!(readl(&priv->regs->intr_ctrl) & INTR_CTRL_DMA_STATUS))
380 ;
381
382 writel(0x0, &priv->regs->intr_ctrl);
383
384 checksum = readl(&priv->regs->dma_checksum);
385
386 writel(0x0, &priv->regs->dma_ctrl);
387 return checksum;
388 }
389
390 /*
391 * Use some address/size under the first flash device CE0
392 */
aspeed_g6_spi_fmc_checksum(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,u8 div,u8 delay)393 static u32 aspeed_g6_spi_fmc_checksum(struct aspeed_spi_priv *priv,
394 struct aspeed_spi_flash *flash,
395 u8 div, u8 delay)
396 {
397 u32 flash_addr = (u32)flash->ahb_base;
398 u32 dma_ctrl;
399 u32 checksum;
400
401 writel(DMA_GET_REQ_MAGIC, &priv->regs->dma_ctrl);
402 if (readl(&priv->regs->dma_ctrl) & DAM_CTRL_REQUEST) {
403 while (!(readl(&priv->regs->dma_ctrl) & DAM_CTRL_GRANT))
404 ;
405 }
406
407 writel(flash_addr, &priv->regs->dma_flash_addr);
408 writel(FLASH_CALIBRATION_LEN, &priv->regs->dma_len);
409
410 /*
411 * When doing calibration, the SPI clock rate in the control
412 * register and the data input delay cycles in the
413 * read timing compensation register are replaced by bit[11:4].
414 */
415 dma_ctrl = DMA_CTRL_ENABLE | DMA_CTRL_CKSUM | DMA_CTRL_CALIB |
416 G6_TIMING_MASK(div, delay);
417
418 writel(dma_ctrl, &priv->regs->dma_ctrl);
419 while (!(readl(&priv->regs->intr_ctrl) & INTR_CTRL_DMA_STATUS))
420 ;
421
422 checksum = readl(&priv->regs->dma_checksum);
423
424 writel(0x0, &priv->regs->intr_ctrl);
425 writel(0x0, &priv->regs->dma_ctrl);
426 writel(DMA_DISCARD_REQ_MAGIC, &priv->regs->dma_ctrl);
427
428 return checksum;
429 }
430
aspeed_spi_read_checksum(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,u8 div,u8 delay)431 static u32 aspeed_spi_read_checksum(struct aspeed_spi_priv *priv,
432 struct aspeed_spi_flash *flash,
433 u8 div, u8 delay)
434 {
435 if (priv->new_ver)
436 return aspeed_g6_spi_fmc_checksum(priv, flash, div, delay);
437
438 /* for AST2500, */
439 if (!priv->is_fmc) {
440 pr_warn("No timing calibration support for SPI controllers");
441 return 0xbadc0de;
442 }
443
444 return aspeed_spi_fmc_checksum(priv, flash, div, delay);
445 }
446
447 #define TIMING_DELAY_DI_4NS BIT(3)
448 #define TIMING_DELAY_HCYCLE_MAX 5
449
450 /*
451 * Check whether the data is not all 0 or 1 in order to
452 * avoid calibriate umount spi-flash.
453 */
aspeed_spi_calibriation_enable(const u8 * buf,u32 sz)454 static bool aspeed_spi_calibriation_enable(const u8 *buf, u32 sz)
455 {
456 const u32 *buf_32 = (const u32 *)buf;
457 u32 i;
458 u32 valid_count = 0;
459
460 for (i = 0; i < (sz / 4); i++) {
461 if (buf_32[i] != 0 && buf_32[i] != 0xffffffff)
462 valid_count++;
463 if (valid_count > 100)
464 return true;
465 }
466
467 return false;
468 }
469
get_mid_point_of_longest_one(u8 * buf,u32 len)470 static int get_mid_point_of_longest_one(u8 *buf, u32 len)
471 {
472 int i;
473 int start = 0, mid_point = 0;
474 int max_cnt = 0, cnt = 0;
475
476 for (i = 0; i < len; i++) {
477 if (buf[i] == 1) {
478 cnt++;
479 } else {
480 cnt = 0;
481 start = i;
482 }
483
484 if (max_cnt < cnt) {
485 max_cnt = cnt;
486 mid_point = start + (cnt / 2);
487 }
488 }
489
490 /*
491 * In order to get a stable SPI read timing,
492 * abandon the result if the length of longest
493 * consecutive good points is too short.
494 */
495 if (max_cnt < 4)
496 return -1;
497
498 return mid_point;
499 }
500
aspeed_spi_timing_calibration(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash)501 static int aspeed_spi_timing_calibration(struct aspeed_spi_priv *priv,
502 struct aspeed_spi_flash *flash)
503 {
504 u32 cs = flash->cs;
505 /* HCLK/5 .. HCLK/1 */
506 const u8 hclk_masks[] = {13, 6, 14, 7, 15};
507 u32 timing_reg;
508 u32 checksum, gold_checksum;
509 int i;
510 u32 hcycle, delay_ns;
511 u32 final_delay = 0;
512 u32 hclk_div = 0;
513 u32 max_freq = flash->max_freq;
514 u32 reg_val;
515 u8 *tmp_buf = NULL;
516 u8 *calib_res = NULL;
517 int calib_point;
518 bool pass;
519
520 if (priv->new_ver) {
521 timing_reg = readl(&priv->regs->timings + cs);
522 if (timing_reg != 0)
523 return 0;
524
525 /*
526 * use the related low frequency to get check calibration data
527 * and get golden data.
528 */
529 reg_val = flash->ce_ctrl_fread & CE_CTRL_FREQ_MASK;
530 writel(reg_val, &priv->regs->ce_ctrl[cs]);
531 tmp_buf = (u8 *)malloc(FLASH_CALIBRATION_LEN);
532 if (!tmp_buf)
533 return -ENOMEM;
534
535 memcpy_fromio(tmp_buf, flash->ahb_base, FLASH_CALIBRATION_LEN);
536 if (!aspeed_spi_calibriation_enable(tmp_buf, FLASH_CALIBRATION_LEN)) {
537 debug("flash data is monotonous, skip calibration.\n");
538 goto no_calib;
539 }
540
541 /* Compute reference checksum at lowest freq HCLK/16 */
542 gold_checksum = aspeed_spi_read_checksum(priv, flash, 0, 0);
543
544 /*
545 * allocate a space to record calibration result for
546 * different timing compensation with fixed
547 * HCLK division.
548 */
549 calib_res = (u8 *)malloc(6 * 17);
550 if (!calib_res) {
551 free(tmp_buf);
552 return -ENOMEM;
553 }
554
555 /* from HCLK/2 to HCLK/5 */
556 for (i = 0; i < ARRAY_SIZE(hclk_masks) - 1; i++) {
557 if (priv->hclk_rate / (i + 2) > max_freq) {
558 debug("skipping freq %ld\n", priv->hclk_rate / (i + 2));
559 continue;
560 }
561
562 max_freq = (u32)priv->hclk_rate / (i + 2);
563
564 memset(calib_res, 0x0, 6 * 17);
565 for (hcycle = 0; hcycle <= 5; hcycle++) {
566 /* increase DI delay by the step of 0.5ns */
567 debug("Delay Enable : hcycle %x\n", hcycle);
568 for (delay_ns = 0; delay_ns <= 0xf; delay_ns++) {
569 checksum = aspeed_g6_spi_fmc_checksum(priv, flash,
570 hclk_masks[3 - i],
571 TIMING_DELAY_DI_4NS | hcycle | (delay_ns << 4));
572 pass = (checksum == gold_checksum);
573 calib_res[hcycle * 17 + delay_ns] = pass;
574 debug("HCLK/%d, %d HCLK cycle, %d delay_ns : %s\n",
575 i + 2, hcycle, delay_ns, pass ? "PASS" : "FAIL");
576 }
577 }
578
579 calib_point = get_mid_point_of_longest_one(calib_res, 6 * 17);
580 if (calib_point < 0) {
581 debug("cannot get good calibration point.\n");
582 continue;
583 }
584
585 hcycle = calib_point / 17;
586 delay_ns = calib_point % 17;
587 debug("final hcycle: %d, delay_ns: %d\n", hcycle, delay_ns);
588
589 final_delay = (TIMING_DELAY_DI_4NS | hcycle | (delay_ns << 4)) << (i * 8);
590 writel(final_delay, &priv->regs->timings + cs);
591 break;
592 }
593
594 no_calib:
595 hclk_div = aspeed_g6_spi_hclk_divisor(priv, max_freq);
596 /* configure SPI clock frequency */
597 reg_val = readl(&priv->regs->ce_ctrl[cs]);
598 reg_val = (reg_val & CE_CTRL_FREQ_MASK) | CE_G6_CTRL_CLOCK_FREQ(hclk_div);
599 writel(reg_val, &priv->regs->ce_ctrl[cs]);
600
601 /* add clock setting info for CE ctrl setting */
602 flash->ce_ctrl_user =
603 (flash->ce_ctrl_user & CE_CTRL_FREQ_MASK) | CE_G6_CTRL_CLOCK_FREQ(hclk_div);
604 flash->ce_ctrl_fread =
605 (flash->ce_ctrl_fread & CE_CTRL_FREQ_MASK) | CE_G6_CTRL_CLOCK_FREQ(hclk_div);
606
607 debug("cs: %d, freq: %dMHz\n", cs, max_freq / 1000000);
608
609 if (tmp_buf)
610 free(tmp_buf);
611 if (calib_res)
612 free(calib_res);
613 } else {
614 /* Use the ctrl setting in aspeed_spi_flash_init() to
615 * implement calibration process.
616 */
617 timing_reg = readl(&priv->regs->timings);
618 if (timing_reg != 0)
619 return 0;
620
621 /* Compute reference checksum at lowest freq HCLK/16 */
622 gold_checksum = aspeed_spi_read_checksum(priv, flash, 0, 0);
623
624 for (i = 0; i < ARRAY_SIZE(hclk_masks); i++) {
625 u32 hdiv = 5 - i;
626 u32 hshift = (hdiv - 1) << 2;
627 bool pass = false;
628 u8 delay;
629
630 if (priv->hclk_rate / hdiv > flash->max_freq) {
631 debug("skipping freq %ld\n", priv->hclk_rate / hdiv);
632 continue;
633 }
634
635 /* Increase HCLK cycles until read succeeds */
636 for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) {
637 /* Try first with a 4ns DI delay */
638 delay = TIMING_DELAY_DI_4NS | hcycle;
639 checksum = aspeed_spi_read_checksum(priv, flash, hclk_masks[i],
640 delay);
641 pass = (checksum == gold_checksum);
642 debug(" HCLK/%d, 4ns DI delay, %d HCLK cycle : %s\n",
643 hdiv, hcycle, pass ? "PASS" : "FAIL");
644
645 /* Try again with more HCLK cycles */
646 if (!pass)
647 continue;
648
649 /* Try without the 4ns DI delay */
650 delay = hcycle;
651 checksum = aspeed_spi_read_checksum(priv, flash, hclk_masks[i],
652 delay);
653 pass = (checksum == gold_checksum);
654 debug(" HCLK/%d, no DI delay, %d HCLK cycle : %s\n",
655 hdiv, hcycle, pass ? "PASS" : "FAIL");
656
657 /* All good for this freq */
658 if (pass)
659 break;
660 }
661
662 if (pass) {
663 timing_reg &= ~(0xfu << hshift);
664 timing_reg |= delay << hshift;
665 }
666 }
667
668 debug("Read Timing Compensation set to 0x%08x\n", timing_reg);
669 writel(timing_reg, &priv->regs->timings);
670 }
671
672 return 0;
673 }
674
aspeed_spi_controller_init(struct aspeed_spi_priv * priv)675 static int aspeed_spi_controller_init(struct aspeed_spi_priv *priv)
676 {
677 int cs;
678
679 /*
680 * Enable write on all flash devices as USER command mode
681 * requires it.
682 */
683 setbits_le32(&priv->regs->conf,
684 CONF_ENABLE_W2 | CONF_ENABLE_W1 | CONF_ENABLE_W0);
685
686 /*
687 * Set safe default settings for each device. These will be
688 * tuned after the SPI flash devices are probed.
689 */
690 if (priv->new_ver) {
691 for (cs = 0; cs < priv->flash_count; cs++) {
692 struct aspeed_spi_flash *flash = &priv->flashes[cs];
693 u32 addr_config = 0;
694 switch(cs) {
695 case 0:
696 flash->ahb_base = priv->ahb_base;
697 debug("cs0 mem-map : %x\n", (u32)flash->ahb_base);
698 break;
699 case 1:
700 flash->ahb_base = priv->flashes[0].ahb_base + 0x4000000; /* cs0 + 64MB */
701 debug("cs1 mem-map : %x end %x\n",
702 (u32)flash->ahb_base, (u32)flash->ahb_base + 0x4000000);
703 break;
704 case 2:
705 flash->ahb_base = priv->flashes[0].ahb_base + 0x4000000 * 2; /* cs0 + 128MB : use 64MB */
706 debug("cs2 mem-map : %x end %x\n",
707 (u32)flash->ahb_base, (u32)flash->ahb_base + 0x4000000);
708 break;
709 }
710 addr_config =
711 G6_SEGMENT_ADDR_VALUE((u32)flash->ahb_base, (u32)flash->ahb_base + 0x4000000);
712 writel(addr_config, &priv->regs->segment_addr[cs]);
713 flash->cs = cs;
714 flash->ce_ctrl_user = CE_CTRL_USERMODE;
715 flash->ce_ctrl_fread = CE_CTRL_READMODE;
716 }
717 } else {
718 for (cs = 0; cs < priv->flash_count; cs++) {
719 struct aspeed_spi_flash *flash = &priv->flashes[cs];
720 u32 seg_addr = readl(&priv->regs->segment_addr[cs]);
721 /*
722 * The start address of the AHB window of CE0 is
723 * read-only and is the same as the address of the
724 * overall AHB window of the controller for all flash
725 * devices.
726 */
727 flash->ahb_base = cs ? (void *)SEGMENT_ADDR_START(seg_addr) :
728 priv->ahb_base;
729
730 flash->cs = cs;
731 flash->ce_ctrl_user = CE_CTRL_USERMODE;
732 flash->ce_ctrl_fread = CE_CTRL_READMODE;
733 }
734 }
735 return 0;
736 }
737
aspeed_spi_read_from_ahb(void __iomem * ahb_base,void * buf,size_t len)738 static int aspeed_spi_read_from_ahb(void __iomem *ahb_base, void *buf,
739 size_t len)
740 {
741 size_t offset = 0;
742
743 if (!((uintptr_t)buf % 4)) {
744 readsl(ahb_base, buf, len >> 2);
745 offset = len & ~0x3;
746 len -= offset;
747 }
748 readsb(ahb_base, (u8 *)buf + offset, len);
749
750 return 0;
751 }
752
aspeed_spi_write_to_ahb(void __iomem * ahb_base,const void * buf,size_t len)753 static int aspeed_spi_write_to_ahb(void __iomem *ahb_base, const void *buf,
754 size_t len)
755 {
756 size_t offset = 0;
757
758 if (!((uintptr_t)buf % 4)) {
759 writesl(ahb_base, buf, len >> 2);
760 offset = len & ~0x3;
761 len -= offset;
762 }
763 writesb(ahb_base, (u8 *)buf + offset, len);
764
765 return 0;
766 }
767
aspeed_spi_start_user(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash)768 static void aspeed_spi_start_user(struct aspeed_spi_priv *priv,
769 struct aspeed_spi_flash *flash)
770 {
771 u32 ctrl_reg = flash->ce_ctrl_user | CE_CTRL_STOP_ACTIVE;
772
773 /* Deselect CS and set USER command mode */
774 writel(ctrl_reg, &priv->regs->ce_ctrl[flash->cs]);
775
776 /* Select CS */
777 clrbits_le32(&priv->regs->ce_ctrl[flash->cs], CE_CTRL_STOP_ACTIVE);
778 }
779
aspeed_spi_stop_user(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash)780 static void aspeed_spi_stop_user(struct aspeed_spi_priv *priv,
781 struct aspeed_spi_flash *flash)
782 {
783 /* Deselect CS first */
784 setbits_le32(&priv->regs->ce_ctrl[flash->cs], CE_CTRL_STOP_ACTIVE);
785
786 /* Restore default command mode */
787 writel(flash->ce_ctrl_fread, &priv->regs->ce_ctrl[flash->cs]);
788 }
789
aspeed_spi_read_reg(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,u8 opcode,u8 * read_buf,int len)790 static int aspeed_spi_read_reg(struct aspeed_spi_priv *priv,
791 struct aspeed_spi_flash *flash,
792 u8 opcode, u8 *read_buf, int len)
793 {
794 struct aspeed_spi_op op =
795 ASPEED_SPI_OP(0,
796 ASPEED_SPI_OP_CMD(opcode),
797 ASPEED_SPI_OP_ADDR(0, 0),
798 ASPEED_SPI_OP_DUMMY(0),
799 ASPEED_SPI_OP_DATA_IN(len, read_buf));
800
801 if (priv->spi_exec_op_cmd) {
802 priv->spi_exec_op_cmd(priv, flash, &op);
803 return 0;
804 }
805
806 aspeed_spi_start_user(priv, flash);
807 aspeed_spi_write_to_ahb(flash->ahb_base, &opcode, 1);
808 aspeed_spi_read_from_ahb(flash->ahb_base, read_buf, len);
809 aspeed_spi_stop_user(priv, flash);
810
811 return 0;
812 }
813
aspeed_spi_write_reg(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,u8 opcode,const u8 * write_buf,int len)814 static int aspeed_spi_write_reg(struct aspeed_spi_priv *priv,
815 struct aspeed_spi_flash *flash,
816 u8 opcode, const u8 *write_buf, int len)
817 {
818 int i;
819 struct aspeed_spi_op op =
820 ASPEED_SPI_OP(0,
821 ASPEED_SPI_OP_CMD(opcode),
822 ASPEED_SPI_OP_ADDR(0, 0),
823 ASPEED_SPI_OP_DUMMY(0),
824 ASPEED_SPI_OP_DATA_OUT(len, write_buf));
825
826 if (priv->spi_exec_op_cmd) {
827 if (opcode == SPINOR_OP_BE_4K || opcode == SPINOR_OP_BE_4K_4B ||
828 opcode == SPINOR_OP_BE_32K || opcode == SPINOR_OP_BE_32K_4B ||
829 opcode == SPINOR_OP_SE || opcode == SPINOR_OP_SE_4B) {
830 op.addr.nbytes = len;
831 for (i = 0; i < len; i++) {
832 op.addr.val <<= 8;
833 op.addr.val |= (u32)write_buf[i];
834 }
835 op.data.nbytes = 0;
836 }
837
838 priv->spi_exec_op_cmd(priv, flash, &op);
839 return 0;
840 }
841
842 aspeed_spi_start_user(priv, flash);
843 aspeed_spi_write_to_ahb(flash->ahb_base, &opcode, 1);
844 aspeed_spi_write_to_ahb(flash->ahb_base, write_buf, len);
845 aspeed_spi_stop_user(priv, flash);
846
847 debug("=== write opcode [%x] ==== \n", opcode);
848 switch(opcode) {
849 case SPINOR_OP_EN4B:
850 /* For ast2600, if 2 chips ABR mode is enabled,
851 * turn on 3B mode auto clear in order to avoid
852 * the scenario where spi controller is at 4B mode
853 * and flash site is at 3B mode after 3rd switch.
854 */
855 if (priv->new_ver == 1 && (readl(SPI_3B_AUTO_CLR_REG) & SPI_3B_AUTO_CLR))
856 writel(readl(&priv->regs->soft_rst_cmd_ctrl) | SOFT_RST_CMD_EN,
857 &priv->regs->soft_rst_cmd_ctrl);
858
859 writel(readl(&priv->regs->ctrl) | (0x11 << flash->cs), &priv->regs->ctrl);
860 break;
861 case SPINOR_OP_EX4B:
862 writel(readl(&priv->regs->ctrl) & ~(0x11 << flash->cs), &priv->regs->ctrl);
863 break;
864 }
865 return 0;
866 }
867
aspeed_spi_send_cmd_addr(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,const u8 * cmdbuf,unsigned int cmdlen,uint32_t flag)868 static void aspeed_spi_send_cmd_addr(struct aspeed_spi_priv *priv,
869 struct aspeed_spi_flash *flash,
870 const u8 *cmdbuf, unsigned int cmdlen, uint32_t flag)
871 {
872 int i;
873
874 /* First, send the opcode */
875 aspeed_spi_write_to_ahb(flash->ahb_base, &cmdbuf[0], 1);
876
877 if(flash->write_iomode == CE_CTRL_IO_QUAD_ADDR_DATA && (flag & SPI_WRITE_TO_FLASH))
878 writel(flash->ce_ctrl_user | flash->write_iomode, &priv->regs->ce_ctrl[flash->cs]);
879 else if(flash->read_iomode == CE_CTRL_IO_QUAD_ADDR_DATA && (flag & SPI_READ_FROM_FLASH))
880 writel(flash->ce_ctrl_user | flash->read_iomode, &priv->regs->ce_ctrl[flash->cs]);
881
882 /* Then the address */
883 for (i = 1 ; i < cmdlen; i++)
884 aspeed_spi_write_to_ahb(flash->ahb_base, &cmdbuf[i], 1);
885 }
886
aspeed_spi_read_user(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,unsigned int cmdlen,const u8 * cmdbuf,unsigned int len,u8 * read_buf)887 static ssize_t aspeed_spi_read_user(struct aspeed_spi_priv *priv,
888 struct aspeed_spi_flash *flash,
889 unsigned int cmdlen, const u8 *cmdbuf,
890 unsigned int len, u8 *read_buf)
891 {
892 u8 dummy = 0x00;
893 int i;
894 struct aspeed_spi_op op =
895 ASPEED_SPI_OP(flash->read_iomode,
896 ASPEED_SPI_OP_CMD(cmdbuf[0]),
897 ASPEED_SPI_OP_ADDR(0, 0),
898 ASPEED_SPI_OP_DUMMY(flash->spi->read_dummy / 8),
899 ASPEED_SPI_OP_DATA_IN(len, read_buf));
900
901 if (priv->spi_exec_op_cmd) {
902 op.addr.nbytes = cmdlen - 1 - op.dummy.nbytes;
903 op.addr.val = aspeed_spi_flash_to_addr(flash, cmdbuf, op.addr.nbytes + 1);
904 priv->spi_exec_op_cmd(priv, flash, &op);
905 return 0;
906 }
907
908 aspeed_spi_start_user(priv, flash);
909
910 /* cmd buffer = cmd + addr + dummies */
911 aspeed_spi_send_cmd_addr(priv, flash, cmdbuf,
912 cmdlen - (flash->spi->read_dummy / 8), SPI_READ_FROM_FLASH);
913
914 for (i = 0; i < (flash->spi->read_dummy / 8); i++)
915 aspeed_spi_write_to_ahb(flash->ahb_base, &dummy, 1);
916
917 if (flash->read_iomode) {
918 clrbits_le32(&priv->regs->ce_ctrl[flash->cs],
919 CE_CTRL_IO_MODE_MASK);
920 setbits_le32(&priv->regs->ce_ctrl[flash->cs], flash->read_iomode);
921 }
922
923 aspeed_spi_read_from_ahb(flash->ahb_base, read_buf, len);
924 aspeed_spi_stop_user(priv, flash);
925
926 return 0;
927 }
928
aspeed_spi_read_sfdp(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,unsigned int cmdlen,const u8 * cmdbuf,unsigned int len,u8 * read_buf)929 static ssize_t aspeed_spi_read_sfdp(struct aspeed_spi_priv *priv,
930 struct aspeed_spi_flash *flash,
931 unsigned int cmdlen, const u8 *cmdbuf,
932 unsigned int len, u8 *read_buf)
933 {
934 u8 dummy = 0x00;
935 int i;
936 struct aspeed_spi_op op =
937 ASPEED_SPI_OP(flash->read_iomode,
938 ASPEED_SPI_OP_CMD(cmdbuf[0]),
939 ASPEED_SPI_OP_ADDR(0, 3),
940 ASPEED_SPI_OP_DUMMY(flash->spi->read_dummy / 8),
941 ASPEED_SPI_OP_DATA_IN(len, read_buf));
942
943 if (priv->spi_exec_op_cmd) {
944 op.addr.val = aspeed_spi_flash_to_addr(flash, cmdbuf, op.addr.nbytes + 1);
945 priv->spi_exec_op_cmd(priv, flash, &op);
946 return 0;
947 }
948
949 /* only 1-1-1 mode is used to read SFDP */
950 aspeed_spi_start_user(priv, flash);
951
952 /* cmd buffer = cmd + addr + dummies */
953 aspeed_spi_send_cmd_addr(priv, flash, cmdbuf,
954 cmdlen - (flash->spi->read_dummy / 8), 0);
955
956 for (i = 0; i < (flash->spi->read_dummy / 8); i++)
957 aspeed_spi_write_to_ahb(flash->ahb_base, &dummy, 1);
958
959 aspeed_spi_read_from_ahb(flash->ahb_base, read_buf, len);
960 aspeed_spi_stop_user(priv, flash);
961
962 return 0;
963 }
964
aspeed_spi_write_user(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,unsigned int cmdlen,const u8 * cmdbuf,unsigned int len,const u8 * write_buf)965 static ssize_t aspeed_spi_write_user(struct aspeed_spi_priv *priv,
966 struct aspeed_spi_flash *flash,
967 unsigned int cmdlen, const u8 *cmdbuf,
968 unsigned int len, const u8 *write_buf)
969 {
970 struct aspeed_spi_op op =
971 ASPEED_SPI_OP(flash->write_iomode,
972 ASPEED_SPI_OP_CMD(cmdbuf[0]),
973 ASPEED_SPI_OP_ADDR(0, 0),
974 ASPEED_SPI_OP_DUMMY(0),
975 ASPEED_SPI_OP_DATA_OUT(len, write_buf));
976
977 if (priv->spi_exec_op_cmd) {
978 op.addr.nbytes = cmdlen - 1;
979 op.addr.val = aspeed_spi_flash_to_addr(flash, cmdbuf, op.addr.nbytes + 1);
980 priv->spi_exec_op_cmd(priv, flash, &op);
981 return 0;
982 }
983
984 aspeed_spi_start_user(priv, flash);
985
986 /* cmd buffer = cmd + addr : normally cmd is use signle mode*/
987 aspeed_spi_send_cmd_addr(priv, flash, cmdbuf, cmdlen, SPI_WRITE_TO_FLASH);
988
989 /* data will use io mode */
990 if(flash->write_iomode == CE_CTRL_IO_QUAD_DATA)
991 writel(flash->ce_ctrl_user | flash->write_iomode, &priv->regs->ce_ctrl[flash->cs]);
992
993 aspeed_spi_write_to_ahb(flash->ahb_base, write_buf, len);
994
995 aspeed_spi_stop_user(priv, flash);
996
997 return 0;
998 }
999
aspeed_spi_flash_to_addr(struct aspeed_spi_flash * flash,const u8 * cmdbuf,unsigned int cmdlen)1000 static u32 aspeed_spi_flash_to_addr(struct aspeed_spi_flash *flash,
1001 const u8 *cmdbuf, unsigned int cmdlen)
1002 {
1003 u8 addrlen = cmdlen - 1;
1004 u32 addr = (cmdbuf[1] << 16) | (cmdbuf[2] << 8) | cmdbuf[3];
1005
1006 /*
1007 * U-Boot SPI Flash layer uses 3 bytes addresses, but it might
1008 * change one day
1009 */
1010 if (addrlen == 4)
1011 addr = (addr << 8) | cmdbuf[4];
1012
1013 return addr;
1014 }
1015
1016 /* TODO(clg@kaod.org): add support for XFER_MMAP instead ? */
aspeed_spi_read(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,unsigned int cmdlen,const u8 * cmdbuf,unsigned int len,u8 * read_buf)1017 static ssize_t aspeed_spi_read(struct aspeed_spi_priv *priv,
1018 struct aspeed_spi_flash *flash,
1019 unsigned int cmdlen, const u8 *cmdbuf,
1020 unsigned int len, u8 *read_buf)
1021 {
1022 /* cmd buffer = cmd + addr + dummies */
1023 u32 offset = aspeed_spi_flash_to_addr(flash, cmdbuf,
1024 cmdlen - (flash->spi->read_dummy/8));
1025 struct aspeed_spi_op op =
1026 ASPEED_SPI_OP(flash->read_iomode,
1027 ASPEED_SPI_OP_CMD(cmdbuf[0]),
1028 ASPEED_SPI_OP_ADDR(0, 0),
1029 ASPEED_SPI_OP_DUMMY(flash->spi->read_dummy / 8),
1030 ASPEED_SPI_OP_DATA_IN(len, read_buf));
1031
1032 if (priv->spi_exec_op_cmd) {
1033 op.addr.nbytes = cmdlen - 1 - op.dummy.nbytes;
1034 op.addr.val = aspeed_spi_flash_to_addr(flash, cmdbuf, op.addr.nbytes + 1);
1035 priv->spi_exec_op_cmd(priv, flash, &op);
1036 return 0;
1037 }
1038
1039 /*
1040 * Switch to USER command mode:
1041 * - if read SFDP content.
1042 * - if the AHB window configured for the device is
1043 * too small for the read operation
1044 * - if read offset is smaller than the decoded start address
1045 * and the decoded range is not multiple of flash size.
1046 */
1047 if ((offset + len >= flash->ahb_size) || \
1048 (offset < ((int)flash->ahb_base & 0x0FFFFFFF) && \
1049 (((int)flash->ahb_base & 0x0FFFFFFF) % flash->spi->size) != 0)) {
1050 return aspeed_spi_read_user(priv, flash, cmdlen, cmdbuf,
1051 len, read_buf);
1052 }
1053
1054 memcpy_fromio(read_buf, flash->ahb_base + offset, len);
1055
1056 return 0;
1057 }
1058
aspeed_spi_exec_op_cmd_mode(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,struct aspeed_spi_op * op)1059 int aspeed_spi_exec_op_cmd_mode(struct aspeed_spi_priv *priv,
1060 struct aspeed_spi_flash *flash,
1061 struct aspeed_spi_op *op)
1062 {
1063 uint32_t cs = flash->cs;
1064 uint32_t ctrl_val;
1065 uint32_t addr_mode_reg, addr_mode_reg_backup;
1066 uint32_t addr_data_mask = 0;
1067 void __iomem *op_addr;
1068 const void *data_buf;
1069 uint32_t data_byte = 0;
1070 uint32_t dummy_data = 0;
1071
1072 debug("iomode: %08x, cmd:%02x, addr:%08x, dummy:%d, data_len:%x, dir: %s\n",
1073 op->io_mode, op->cmd.opcode, op->addr.val, op->dummy.nbytes,
1074 op->data.nbytes, op->data.dir == ASPEED_SPI_DIR_IN ? "in" : "out");
1075
1076 addr_mode_reg = readl(&priv->regs->ctrl);
1077 addr_mode_reg_backup = addr_mode_reg;
1078 addr_data_mask = readl(&priv->regs->cmd_ctrl);
1079
1080 ctrl_val = flash->ce_ctrl_fread & (~0xf0ff40c7);
1081 ctrl_val |= op->io_mode;
1082 /* configure opcode */
1083 ctrl_val |= op->cmd.opcode << 16;
1084
1085 /* configure operation address, address length and address mask */
1086 if (op->addr.nbytes != 0) {
1087 if (op->addr.nbytes == 3)
1088 addr_mode_reg &= ~(0x11 << cs);
1089 else
1090 addr_mode_reg |= (0x11 << cs);
1091
1092 addr_data_mask &= 0x0f;
1093 op_addr = flash->ahb_base + op->addr.val;
1094 } else {
1095 addr_data_mask |= 0xf0;
1096 op_addr = flash->ahb_base;
1097 }
1098
1099 if (op->dummy.nbytes != 0) {
1100 ctrl_val |= ((op->dummy.nbytes & 0x3) << 6 |
1101 ((op->dummy.nbytes & 0x4) >> 2) << 14);
1102 }
1103
1104 /* configure data io mode and data mask */
1105 if (op->data.nbytes != 0) {
1106 addr_data_mask &= 0xF0;
1107 if (op->data.nbytes < 4)
1108 addr_data_mask |= ~((1 << op->data.nbytes) - 1);
1109
1110 data_byte = op->data.nbytes;
1111 if (op->data.dir == ASPEED_SPI_DIR_OUT) {
1112 if (data_byte % 4 != 0) {
1113 memset(priv->tmp_buf, 0xff, ((data_byte / 4) + 1) * 4);
1114 memcpy(priv->tmp_buf, op->data.buf.out, data_byte);
1115 data_buf = priv->tmp_buf;
1116 data_byte = ((data_byte / 4) + 1) * 4;
1117 } else {
1118 data_buf = op->data.buf.out;
1119 }
1120 } else {
1121 data_buf = op->data.buf.in;
1122 }
1123 } else {
1124 addr_data_mask |= 0x0f;
1125 data_byte = 1;
1126 data_buf = &dummy_data;
1127 }
1128
1129 /* configure command mode */
1130 if (op->data.dir == ASPEED_SPI_DIR_OUT)
1131 ctrl_val |= CE_CTRL_WRITEMODE;
1132 else
1133 ctrl_val |= CE_CTRL_FREADMODE;
1134
1135 /* set controller registers */
1136 writel(ctrl_val, &priv->regs->ce_ctrl[cs]);
1137 writel(addr_mode_reg, &priv->regs->ctrl);
1138 writel(addr_data_mask, &priv->regs->cmd_ctrl);
1139
1140 debug("ctrl: 0x%08x, addr_mode: 0x%x, mask: 0x%x, addr:0x%08x\n",
1141 ctrl_val, addr_mode_reg, addr_data_mask, (uint32_t)op_addr);
1142
1143 /* trigger spi transmission or reception sequence */
1144 if (op->data.dir == ASPEED_SPI_DIR_OUT)
1145 memcpy_toio(op_addr, data_buf, data_byte);
1146 else
1147 memcpy_fromio((void *)data_buf, op_addr, data_byte);
1148
1149 /* restore controller setting */
1150 writel(flash->ce_ctrl_fread, &priv->regs->ce_ctrl[cs]);
1151 writel(addr_mode_reg_backup, &priv->regs->ctrl);
1152 writel(0x0, &priv->regs->cmd_ctrl);
1153
1154 return 0;
1155 }
1156
aspeed_spi_xfer(struct udevice * dev,unsigned int bitlen,const void * dout,void * din,unsigned long flags)1157 static int aspeed_spi_xfer(struct udevice *dev, unsigned int bitlen,
1158 const void *dout, void *din, unsigned long flags)
1159 {
1160 struct udevice *bus = dev->parent;
1161 struct aspeed_spi_priv *priv = dev_get_priv(bus);
1162 struct aspeed_spi_flash *flash;
1163 u8 *cmd_buf = priv->cmd_buf;
1164 size_t data_bytes;
1165 int err = 0;
1166 u32 iomode;
1167
1168 flash = aspeed_spi_get_flash(dev);
1169 if (!flash)
1170 return -ENXIO;
1171
1172 if (flags & SPI_XFER_BEGIN) {
1173 /* save command in progress */
1174 priv->cmd_len = bitlen / 8;
1175 memcpy(cmd_buf, dout, priv->cmd_len);
1176 }
1177
1178 if (flags == (SPI_XFER_BEGIN | SPI_XFER_END)) {
1179 /* if start and end bit are set, the data bytes is 0. */
1180 data_bytes = 0;
1181 } else {
1182 data_bytes = bitlen / 8;
1183 }
1184
1185 debug("CS%u: %s cmd %zu bytes data %zu bytes\n", flash->cs,
1186 din ? "read" : "write", priv->cmd_len, data_bytes);
1187
1188 if ((flags & SPI_XFER_END) || flags == 0) {
1189 if (priv->cmd_len == 0) {
1190 pr_err("No command is progress !\n");
1191 return -1;
1192 }
1193
1194 if (din && data_bytes) {
1195 if (priv->cmd_len == 1) {
1196 err = aspeed_spi_read_reg(priv, flash,
1197 cmd_buf[0],
1198 din, data_bytes);
1199 } else if (cmd_buf[0] == SPINOR_OP_RDSFDP) {
1200 err = aspeed_spi_read_sfdp(priv, flash,
1201 priv->cmd_len,
1202 cmd_buf, data_bytes,
1203 din);
1204 } else if (cmd_buf[0] == SPINOR_OP_RDAR) {
1205 /* only for Cypress flash */
1206 iomode = flash->read_iomode;
1207 flash->read_iomode = 0;
1208 err = aspeed_spi_read_user(priv, flash,
1209 priv->cmd_len,
1210 cmd_buf, data_bytes,
1211 din);
1212 flash->read_iomode = iomode;
1213 } else {
1214 err = aspeed_spi_read(priv, flash,
1215 priv->cmd_len,
1216 cmd_buf, data_bytes,
1217 din);
1218 }
1219 } else if (dout) {
1220 if (priv->cmd_len == 1) {
1221 err = aspeed_spi_write_reg(priv, flash,
1222 cmd_buf[0],
1223 dout, data_bytes);
1224 } else {
1225 err = aspeed_spi_write_user(priv, flash,
1226 priv->cmd_len,
1227 cmd_buf, data_bytes,
1228 dout);
1229 }
1230 }
1231
1232 if (flags & SPI_XFER_END) {
1233 /* clear command */
1234 memset(cmd_buf, 0, sizeof(priv->cmd_buf));
1235 priv->cmd_len = 0;
1236 }
1237 }
1238
1239 return err;
1240 }
1241
1242 #ifdef CONFIG_ASPEED_SPI_FLASH_WRITE_PROTECTION
aspeed_spi_fill_FQCD(struct aspeed_spi_priv * priv,u8 cmd)1243 static void aspeed_spi_fill_FQCD(struct aspeed_spi_priv *priv, u8 cmd)
1244 {
1245 u32 reg_val;
1246 u32 i;
1247
1248 for (i = 0; i < 20; i++) {
1249 reg_val = readl(&priv->regs->fully_qualified_cmd[i]);
1250 if ((u8)(reg_val & 0xff) == cmd ||
1251 (u8)((reg_val & 0xff00) >> 8) == cmd) {
1252 if ((reg_val & 0x80000000) == 0x80000000) {
1253 debug("cmd: %02x already exists in FQCD.\n", cmd);
1254 return;
1255 }
1256 }
1257 }
1258
1259 for (i = 0; i < 20; i++) {
1260 reg_val = readl(&priv->regs->fully_qualified_cmd[i]);
1261 if ((reg_val & 0x80000000) == 0x80000000) {
1262 if ((u8)(reg_val & 0xff) == 0x0) {
1263 reg_val |= (u32)cmd;
1264 debug("[%d]fill %02x cmd in FQCD%02d.\n", __LINE__, cmd, i);
1265 writel(reg_val, &priv->regs->fully_qualified_cmd[i]);
1266 return;
1267 } else if ((u8)((reg_val & 0xff00) >> 8) == 0x0) {
1268 reg_val |= ((u32)cmd) << 8;
1269 debug("[%d]fill %02x cmd in FQCD%02d.\n", __LINE__, cmd, i);
1270 writel(reg_val, &priv->regs->fully_qualified_cmd[i]);
1271 return;
1272 }
1273 }
1274 }
1275
1276 for (i = 0; i < 20; i++) {
1277 reg_val = readl(&priv->regs->fully_qualified_cmd[i]);
1278 if (reg_val == 0) {
1279 reg_val = 0x80000000 | (u32)cmd;
1280 debug("[%d]fill %02x cmd in FQCD%02d.\n", __LINE__, cmd, i);
1281 writel(reg_val, &priv->regs->fully_qualified_cmd[i]);
1282 return;
1283 }
1284 }
1285 }
1286
aspeed_spi_fill_AQCD(struct aspeed_spi_priv * priv,u8 cmd,u8 addr_width)1287 static void aspeed_spi_fill_AQCD(struct aspeed_spi_priv *priv, u8 cmd, u8 addr_width)
1288 {
1289 u32 reg_val;
1290 u32 i;
1291 u32 bit_offset;
1292
1293 if (addr_width != 3 && addr_width != 4) {
1294 printf("wrong address width: %d.\n", addr_width);
1295 return;
1296 }
1297
1298 bit_offset = (addr_width - 3) * 8;
1299
1300 for (i = 0; i < 12; i++) {
1301 reg_val = readl(&priv->regs->addr_qualified_cmd[i]);
1302 if ((reg_val & 0x80000000) == 0x80000000) {
1303 if ((u8)((reg_val & (0xff << bit_offset)) >> bit_offset) == cmd) {
1304 debug("cmd: %02x already exists in AQCD.\n", cmd);
1305 return;
1306 }
1307 }
1308 }
1309
1310 for (i = 0; i < 12; i++) {
1311 reg_val = readl(&priv->regs->addr_qualified_cmd[i]);
1312 if ((reg_val & 0x80000000) == 0x80000000) {
1313 if ((u8)((reg_val & (0xff << bit_offset)) >> bit_offset) == 0x0) {
1314 reg_val |= ((u32)cmd << bit_offset);
1315 debug("fill %02x cmd in AQCD%02d.\n", cmd, i);
1316 writel(reg_val, &priv->regs->addr_qualified_cmd[i]);
1317 return;
1318 }
1319 }
1320
1321 if (reg_val == 0) {
1322 reg_val = 0x80000000 | ((u32)cmd << bit_offset);
1323 debug("fill %02x cmd in AQCD%02d.\n", cmd, i);
1324 writel(reg_val, &priv->regs->addr_qualified_cmd[i]);
1325 return;
1326 }
1327 }
1328 }
1329
aspeed_spi_cmd_filter_config(struct aspeed_spi_priv * priv,u32 cs,bool enable)1330 static void aspeed_spi_cmd_filter_config(struct aspeed_spi_priv *priv,
1331 u32 cs, bool enable)
1332 {
1333 u32 reg_val;
1334
1335 reg_val = readl(&priv->regs->write_cmd_filter_ctrl);
1336
1337 if (enable)
1338 reg_val |= BIT(cs);
1339 else
1340 reg_val &= ~BIT(cs);
1341
1342 writel(reg_val, &priv->regs->write_cmd_filter_ctrl);
1343 }
1344
aspeed_spi_write_addr_ftr_sanity(struct aspeed_spi_priv * priv,u32 offset,size_t len)1345 static int aspeed_spi_write_addr_ftr_sanity(struct aspeed_spi_priv *priv,
1346 u32 offset, size_t len)
1347 {
1348 u32 addr_ftr_ctrl;
1349 u32 reg_val;
1350 u32 start;
1351 u32 end;
1352 u32 i;
1353
1354 addr_ftr_ctrl = readl(&priv->regs->write_addr_filter_ctrl);
1355 for (i = 0; i < 8; i++) {
1356 if ((addr_ftr_ctrl & (0x3 << (i * 2))) == 0)
1357 continue;
1358 reg_val = readl(&priv->regs->write_addr_filter[i]);
1359 start = (reg_val & 0xffff) << 12;
1360 end = (((reg_val & 0xffff0000) >> 16) << 12) | 0xFFF;
1361
1362 if (offset >= start && offset < end)
1363 return -1;
1364 else if ((offset + len) > start && (offset + len) < end)
1365 return -1;
1366 }
1367
1368 return 0;
1369 }
1370
aspeed_add_write_addr_ftr(struct aspeed_spi_priv * priv,u32 offset,size_t len)1371 static int aspeed_add_write_addr_ftr(struct aspeed_spi_priv *priv,
1372 u32 offset, size_t len)
1373 {
1374 u32 addr_ftr_ctrl;
1375 u32 reg_val;
1376 u32 start;
1377 u32 end;
1378 u32 i;
1379
1380 if ((offset & 0xfff) != 0) {
1381 offset &= 0xfffff000;
1382 printf("protected start address will be entend to 0x%08x.\n",
1383 offset);
1384 }
1385
1386 if ((len & 0xfff) != 0) {
1387 len &= 0xfff;
1388 printf("protected len will be trimed to 0x%x.\n", len);
1389 }
1390
1391 if (len == 0) {
1392 printf("invalid protect len: 0x%x.\n", len);
1393 return -1;
1394 }
1395
1396 addr_ftr_ctrl = readl(&priv->regs->write_addr_filter_ctrl);
1397 for (i = 0; i < 8; i++) {
1398 if ((addr_ftr_ctrl & (0x3 << (i * 2))) == 0) {
1399 start = offset;
1400 end = offset + len - 1;
1401
1402 reg_val = (start >> 12) | ((end >> 12) << 16);
1403
1404 debug("start: 0x%08x, end: 0x%08x, val: 0x%08x.\n",
1405 start, end, reg_val);
1406
1407 writel(reg_val, &priv->regs->write_addr_filter[i]);
1408 addr_ftr_ctrl |= 0x3 << (i * 2);
1409 writel(addr_ftr_ctrl, &priv->regs->write_addr_filter_ctrl);
1410
1411 printf("apply write lock from offset, 0x%08x, with len, 0x%08x.\n",
1412 offset, (u32)len);
1413
1414 break;
1415 }
1416 }
1417
1418 if (i == 8) {
1419 printf("insufficient write address filter register.\n");
1420 return -1;
1421 }
1422
1423 return 0;
1424 }
1425
aspeed_remove_write_addr_ftr(struct aspeed_spi_priv * priv,u32 offset,size_t len)1426 static int aspeed_remove_write_addr_ftr(struct aspeed_spi_priv *priv,
1427 u32 offset, size_t len)
1428 {
1429 u32 addr_ftr_ctrl;
1430 u32 reg_val;
1431 u32 bit_mask;
1432 u32 start;
1433 u32 end;
1434 u32 i;
1435
1436 if ((offset & 0xfff) != 0) {
1437 printf("start address should be aligned to 0x1000.\n");
1438 return -1;
1439 }
1440
1441 if ((len & 0xfff) != 0) {
1442 printf("removed length should be aligned to 0x1000.\n");
1443 return -1;
1444 }
1445
1446 if (len == 0) {
1447 printf("invalid removed length!\n");
1448 return -1;
1449 }
1450
1451 addr_ftr_ctrl = readl(&priv->regs->write_addr_filter_ctrl);
1452 for (i = 0; i < 8; i++) {
1453 bit_mask = 0x3 << (i * 2);
1454 if ((addr_ftr_ctrl & bit_mask) != bit_mask)
1455 continue;
1456
1457 reg_val = readl(&priv->regs->write_addr_filter[i]);
1458 start = (reg_val & 0xffff) << 12;
1459 end = (((reg_val & 0xffff0000) >> 16) << 12) + 0x1000;
1460
1461 if (offset != start || offset + len != end)
1462 continue;
1463
1464 addr_ftr_ctrl &= ~(0x3 << (i * 2));
1465 writel(addr_ftr_ctrl, &priv->regs->write_addr_filter_ctrl);
1466 writel(0x0, &priv->regs->write_addr_filter[i]);
1467 printf("remove write lock from offset, 0x%08x, with len, 0x%08x.\n",
1468 offset, (u32)len);
1469 break;
1470 }
1471
1472 if (i == 8) {
1473 printf("cannot find expected removed region.\n");
1474 return -1;
1475 }
1476
1477 return 0;
1478 }
1479
aspeed_spi_mem_wlock(struct udevice * dev,u32 offset,size_t len)1480 static int aspeed_spi_mem_wlock(struct udevice *dev, u32 offset, size_t len)
1481 {
1482 struct udevice *bus = dev->parent;
1483 struct aspeed_spi_priv *priv = dev_get_priv(bus);
1484 struct aspeed_spi_flash *flash;
1485 struct spi_nor *nor;
1486 int ret;
1487
1488 debug("%s offset: 0x%08x, len: 0x%08x.\n", __func__, offset, (u32)len);
1489
1490 flash = aspeed_spi_get_flash(dev);
1491 if (!flash)
1492 return -ENXIO;
1493
1494 nor = flash->spi;
1495
1496 debug("name: %s, read cmd: %02x, erase cmd: %02x, write cmd: %02x.\n",
1497 nor->name, nor->read_opcode, nor->erase_opcode, nor->program_opcode);
1498
1499 /* enable address filter */
1500 aspeed_spi_fill_FQCD(priv, nor->read_opcode);
1501 aspeed_spi_fill_AQCD(priv, nor->erase_opcode, nor->addr_width);
1502 aspeed_spi_fill_AQCD(priv, nor->program_opcode, nor->addr_width);
1503 aspeed_spi_cmd_filter_config(priv, flash->cs, true);
1504
1505 ret = aspeed_spi_write_addr_ftr_sanity(priv, offset, len);
1506 if (ret < 0) {
1507 printf("The expected protect region overlays with the existed regions!\n");
1508 return ret;
1509 }
1510
1511 ret = aspeed_add_write_addr_ftr(priv, offset, len);
1512 if (ret < 0)
1513 return -1;
1514
1515 return 0;
1516 }
1517
aspeed_spi_mem_wunlock(struct udevice * dev,u32 offset,size_t len)1518 static int aspeed_spi_mem_wunlock(struct udevice *dev, u32 offset, size_t len)
1519 {
1520 struct udevice *bus = dev->parent;
1521 struct aspeed_spi_priv *priv = dev_get_priv(bus);
1522 int ret;
1523
1524 ret = aspeed_remove_write_addr_ftr(priv, offset, len);
1525 if (ret < 0)
1526 return -1;
1527
1528 return 0;
1529 }
1530 #endif
1531
aspeed_spi_child_pre_probe(struct udevice * dev)1532 static int aspeed_spi_child_pre_probe(struct udevice *dev)
1533 {
1534 struct dm_spi_slave_platdata *slave_plat = dev_get_parent_platdata(dev);
1535
1536 debug("pre_probe slave device on CS%u, max_hz %u, mode 0x%x.\n",
1537 slave_plat->cs, slave_plat->max_hz, slave_plat->mode);
1538
1539 if (!aspeed_spi_get_flash(dev))
1540 return -ENXIO;
1541
1542 return 0;
1543 }
1544
1545 /*
1546 * AST2600 SPI memory controllers support multiple chip selects.
1547 * The start address of a decode range should be multiple
1548 * of its related flash size. Namely, the total decoded size
1549 * from flash 0 to flash N should be multiple of (N + 1) flash size.
1550 */
aspeed_g6_adjust_decode_sz(u32 decode_sz_arr[],int len)1551 void aspeed_g6_adjust_decode_sz(u32 decode_sz_arr[], int len)
1552 {
1553 int cs, j;
1554 u32 sz;
1555
1556 for (cs = len - 1; cs >= 0; cs--) {
1557 sz = 0;
1558 for (j = 0; j < cs; j++)
1559 sz += decode_sz_arr[j];
1560
1561 if (sz % decode_sz_arr[cs] != 0)
1562 decode_sz_arr[0] += (sz % decode_sz_arr[cs]);
1563 }
1564 }
1565
1566 /*
1567 * It is possible to automatically define a contiguous address space
1568 * on top of all CEs in the AHB window of the controller but it would
1569 * require much more work. Let's start with a simple mapping scheme
1570 * which should work fine for a single flash device.
1571 *
1572 * More complex schemes should probably be defined with the device
1573 * tree.
1574 */
aspeed_spi_flash_set_segment(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash)1575 static int aspeed_spi_flash_set_segment(struct aspeed_spi_priv *priv,
1576 struct aspeed_spi_flash *flash)
1577 {
1578 u32 seg_addr;
1579 u32 decode_sz_arr[ASPEED_SPI_MAX_CS];
1580 u32 reg_val;
1581 u32 cs;
1582 u32 total_decode_sz = 0;
1583 u32 cur_offset = 0;
1584
1585 /* could be configured through the device tree */
1586 flash->ahb_size = flash->spi->size;
1587
1588 if (priv->new_ver) {
1589 for (cs = 0; cs < ASPEED_SPI_MAX_CS; cs++) {
1590 reg_val = readl(&priv->regs->segment_addr[cs]);
1591 if (reg_val != 0 &&
1592 G6_SEGMENT_ADDR_END(reg_val) > G6_SEGMENT_ADDR_START(reg_val)) {
1593 decode_sz_arr[cs] =
1594 G6_SEGMENT_ADDR_END(reg_val) - G6_SEGMENT_ADDR_START(reg_val);
1595 } else {
1596 decode_sz_arr[cs] = 0;
1597 }
1598 }
1599
1600 decode_sz_arr[flash->cs] = flash->ahb_size;
1601 aspeed_g6_adjust_decode_sz(decode_sz_arr, flash->cs + 1);
1602
1603 for (cs = 0; cs < ASPEED_SPI_MAX_CS; cs++)
1604 total_decode_sz += decode_sz_arr[cs];
1605
1606 if (total_decode_sz > priv->ahb_size) {
1607 printf("err: Total decoded size, 0x%x, is too large.\n", total_decode_sz);
1608 return -ENOMEM;
1609 }
1610
1611 for (cs = 0; cs < ASPEED_SPI_MAX_CS; cs++) {
1612 struct aspeed_spi_flash *flash = &priv->flashes[cs];
1613
1614 flash->ahb_base = (void __iomem *)((u32)priv->ahb_base + cur_offset);
1615
1616 if (decode_sz_arr[cs] != 0) {
1617 seg_addr = G6_SEGMENT_ADDR_VALUE((u32)flash->ahb_base,
1618 (u32)flash->ahb_base + decode_sz_arr[cs]);
1619 } else {
1620 seg_addr = 0;
1621 }
1622
1623 writel(seg_addr, &priv->regs->segment_addr[cs]);
1624 flash->ahb_size = decode_sz_arr[cs];
1625 cur_offset += decode_sz_arr[cs];
1626 }
1627 } else {
1628 seg_addr = SEGMENT_ADDR_VALUE((u32)flash->ahb_base,
1629 (u32)flash->ahb_base + flash->ahb_size);
1630 writel(seg_addr, &priv->regs->segment_addr[flash->cs]);
1631 }
1632
1633 return 0;
1634 }
1635
aspeed_spi_flash_init(struct aspeed_spi_priv * priv,struct aspeed_spi_flash * flash,struct udevice * dev)1636 static int aspeed_spi_flash_init(struct aspeed_spi_priv *priv,
1637 struct aspeed_spi_flash *flash,
1638 struct udevice *dev)
1639 {
1640 int ret;
1641 struct spi_flash *spi_flash = dev_get_uclass_priv(dev);
1642 struct spi_slave *slave = dev_get_parent_priv(dev);
1643 struct udevice *bus = dev->parent;
1644 u32 read_hclk;
1645
1646 flash->spi = spi_flash;
1647
1648 /*
1649 * The flash device has not been probed yet. Initial transfers
1650 * to read the JEDEC of the device will use the initial
1651 * default settings of the registers.
1652 */
1653 if (!spi_flash->name)
1654 return 0;
1655
1656 /*
1657 * The SPI flash device slave should not change, so initialize
1658 * it only once.
1659 */
1660 if (flash->init)
1661 return 0;
1662
1663 debug("CS%u: init %s flags:%x size:%d page:%d sector:%d erase:%d",
1664 flash->cs,
1665 spi_flash->name, spi_flash->flags, spi_flash->size,
1666 spi_flash->page_size, spi_flash->sector_size,
1667 spi_flash->erase_size);
1668 debug(" cmds [ erase:%x read=%x write:%x ] dummy:%d, speed:%d\n",
1669 spi_flash->erase_opcode,
1670 spi_flash->read_opcode, spi_flash->program_opcode,
1671 spi_flash->read_dummy, slave->speed);
1672
1673 flash->ce_ctrl_user = CE_CTRL_USERMODE;
1674 flash->max_freq = slave->speed;
1675
1676 if(priv->new_ver)
1677 read_hclk = aspeed_g6_spi_hclk_divisor(priv, slave->speed);
1678 else
1679 read_hclk = aspeed_spi_hclk_divisor(priv, slave->speed);
1680
1681 switch(flash->spi->read_opcode) {
1682 case SPINOR_OP_READ:
1683 case SPINOR_OP_READ_4B:
1684 flash->read_iomode = CE_CTRL_IO_SINGLE;
1685 break;
1686 case SPINOR_OP_READ_1_1_2:
1687 case SPINOR_OP_READ_1_1_2_4B:
1688 flash->read_iomode = CE_CTRL_IO_DUAL_DATA;
1689 break;
1690 case SPINOR_OP_READ_1_1_4:
1691 case SPINOR_OP_READ_1_1_4_4B:
1692 flash->read_iomode = CE_CTRL_IO_QUAD_DATA;
1693 break;
1694 case SPINOR_OP_READ_1_4_4:
1695 case SPINOR_OP_READ_1_4_4_4B:
1696 flash->read_iomode = CE_CTRL_IO_QUAD_ADDR_DATA;
1697 printf("need modify dummy for 3 bytes\n");
1698 break;
1699 }
1700
1701 switch(flash->spi->program_opcode) {
1702 case SPINOR_OP_PP:
1703 case SPINOR_OP_PP_4B:
1704 flash->write_iomode = CE_CTRL_IO_SINGLE;
1705 break;
1706 case SPINOR_OP_PP_1_1_4:
1707 case SPINOR_OP_PP_1_1_4_4B:
1708 flash->write_iomode = CE_CTRL_IO_QUAD_DATA;
1709 break;
1710 case SPINOR_OP_PP_1_4_4:
1711 case SPINOR_OP_PP_1_4_4_4B:
1712 flash->write_iomode = CE_CTRL_IO_QUAD_ADDR_DATA;
1713 printf("need modify dummy for 3 bytes");
1714 break;
1715 }
1716
1717 if(priv->new_ver) {
1718 flash->ce_ctrl_fread = CE_G6_CTRL_CLOCK_FREQ(read_hclk) |
1719 flash->read_iomode |
1720 CE_CTRL_CMD(flash->spi->read_opcode) |
1721 CE_CTRL_DUMMY((flash->spi->read_dummy/8)) |
1722 CE_CTRL_FREADMODE;
1723 flash->ce_ctrl_user |= CE_G6_CTRL_CLOCK_FREQ(read_hclk);
1724 } else {
1725 flash->ce_ctrl_fread = CE_CTRL_CLOCK_FREQ(read_hclk) |
1726 flash->read_iomode |
1727 CE_CTRL_CMD(flash->spi->read_opcode) |
1728 CE_CTRL_DUMMY((flash->spi->read_dummy/8)) |
1729 CE_CTRL_FREADMODE;
1730 }
1731
1732 if (flash->spi->addr_width == 4)
1733 writel(readl(&priv->regs->ctrl) | 0x11 << flash->cs, &priv->regs->ctrl);
1734
1735 debug("CS%u: USER mode 0x%08x FREAD mode 0x%08x\n", flash->cs,
1736 flash->ce_ctrl_user, flash->ce_ctrl_fread);
1737
1738 /* Set the CE Control Register default (FAST READ) */
1739 writel(flash->ce_ctrl_fread, &priv->regs->ce_ctrl[flash->cs]);
1740
1741 /* Set Address Segment Register for direct AHB accesses */
1742 ret = aspeed_spi_flash_set_segment(priv, flash);
1743 if (ret != 0)
1744 return ret;
1745
1746 /*
1747 * Set the Read Timing Compensation Register. This setting
1748 * applies to all devices.
1749 */
1750 if (!dev_read_bool(bus, "timing-calibration-disabled")) {
1751 ret = aspeed_spi_timing_calibration(priv, flash);
1752 if (ret != 0)
1753 return ret;
1754 }
1755
1756 /* All done */
1757 flash->init = true;
1758
1759 return 0;
1760 }
1761
aspeed_spi_claim_bus(struct udevice * dev)1762 static int aspeed_spi_claim_bus(struct udevice *dev)
1763 {
1764 struct udevice *bus = dev->parent;
1765 struct aspeed_spi_priv *priv = dev_get_priv(bus);
1766 struct dm_spi_slave_platdata *slave_plat = dev_get_parent_platdata(dev);
1767 struct aspeed_spi_flash *flash;
1768 struct spi_slave *slave = dev_get_parent_priv(dev);
1769 u32 read_hclk;
1770
1771 debug("%s: claim bus CS%u\n", bus->name, slave_plat->cs);
1772
1773 flash = aspeed_spi_get_flash(dev);
1774 if (!flash)
1775 return -ENODEV;
1776
1777 if (priv->new_ver) {
1778 if (dev_read_bool(bus, "timing-calibration-disabled")) {
1779 read_hclk = aspeed_g6_spi_hclk_divisor(priv, slave->speed);
1780 flash->ce_ctrl_user &= CE_CTRL_FREQ_MASK;
1781 flash->ce_ctrl_user |= CE_G6_CTRL_CLOCK_FREQ(read_hclk);
1782 flash->ce_ctrl_fread &= CE_CTRL_FREQ_MASK;
1783 flash->ce_ctrl_fread |= CE_G6_CTRL_CLOCK_FREQ(read_hclk);
1784 }
1785 }
1786
1787 return aspeed_spi_flash_init(priv, flash, dev);
1788 }
1789
aspeed_spi_release_bus(struct udevice * dev)1790 static int aspeed_spi_release_bus(struct udevice *dev)
1791 {
1792 struct udevice *bus = dev->parent;
1793 struct dm_spi_slave_platdata *slave_plat = dev_get_parent_platdata(dev);
1794
1795 debug("%s: release bus CS%u\n", bus->name, slave_plat->cs);
1796
1797 if (!aspeed_spi_get_flash(dev))
1798 return -ENODEV;
1799
1800 return 0;
1801 }
1802
aspeed_spi_set_mode(struct udevice * bus,uint mode)1803 static int aspeed_spi_set_mode(struct udevice *bus, uint mode)
1804 {
1805 debug("%s: setting mode to %x\n", bus->name, mode);
1806
1807 if (mode & (SPI_RX_QUAD | SPI_TX_QUAD)) {
1808 #ifndef CONFIG_ASPEED_AST2600
1809 pr_err("%s invalid QUAD IO mode\n", bus->name);
1810 return -EINVAL;
1811 #endif
1812 }
1813
1814 /* The CE Control Register is set in claim_bus() */
1815 return 0;
1816 }
1817
aspeed_spi_set_speed(struct udevice * bus,uint hz)1818 static int aspeed_spi_set_speed(struct udevice *bus, uint hz)
1819 {
1820 debug("%s: setting speed to %u\n", bus->name, hz);
1821
1822 /* The CE Control Register is set in claim_bus() */
1823 return 0;
1824 }
1825
aspeed_spi_count_flash_devices(struct udevice * bus)1826 static int aspeed_spi_count_flash_devices(struct udevice *bus)
1827 {
1828 ofnode node;
1829 int count = 0;
1830
1831 dev_for_each_subnode(node, bus) {
1832 if (ofnode_is_available(node) &&
1833 (ofnode_device_is_compatible(node, "spi-flash") ||
1834 ofnode_device_is_compatible(node, "jedec,spi-nor")))
1835 count++;
1836 }
1837
1838 return count;
1839 }
1840
aspeed_spi_bind(struct udevice * bus)1841 static int aspeed_spi_bind(struct udevice *bus)
1842 {
1843 debug("%s assigned req_seq=%d seq=%d\n", bus->name, bus->req_seq,
1844 bus->seq);
1845
1846 return 0;
1847 }
1848
aspeed_spi_probe(struct udevice * bus)1849 static int aspeed_spi_probe(struct udevice *bus)
1850 {
1851 struct resource res_regs, res_ahb;
1852 struct aspeed_spi_priv *priv = dev_get_priv(bus);
1853 struct clk hclk;
1854 int ret;
1855
1856 ret = dev_read_resource(bus, 0, &res_regs);
1857 if (ret < 0)
1858 return ret;
1859
1860 priv->regs = (void __iomem *)res_regs.start;
1861
1862 ret = dev_read_resource(bus, 1, &res_ahb);
1863 if (ret < 0)
1864 return ret;
1865
1866 priv->ahb_base = (void __iomem *)res_ahb.start;
1867 priv->ahb_size = res_ahb.end - res_ahb.start + 1;
1868
1869 ret = clk_get_by_index(bus, 0, &hclk);
1870 if (ret < 0) {
1871 pr_err("%s could not get clock: %d\n", bus->name, ret);
1872 return ret;
1873 }
1874
1875 priv->hclk_rate = clk_get_rate(&hclk);
1876 clk_free(&hclk);
1877
1878 priv->num_cs = dev_read_u32_default(bus, "num-cs", ASPEED_SPI_MAX_CS);
1879
1880 priv->flash_count = aspeed_spi_count_flash_devices(bus);
1881 if (priv->flash_count > priv->num_cs) {
1882 pr_err("%s has too many flash devices: %d\n", bus->name,
1883 priv->flash_count);
1884 return -EINVAL;
1885 }
1886
1887 if (!priv->flash_count) {
1888 pr_err("%s has no flash devices ?!\n", bus->name);
1889 return -ENODEV;
1890 }
1891
1892 if (device_is_compatible(bus, "aspeed,ast2600-fmc") ||
1893 device_is_compatible(bus, "aspeed,ast2600-spi")) {
1894 priv->new_ver = 1;
1895 }
1896
1897 if (dev_read_bool(bus, "aspeed-spi-command-mode")) {
1898 debug("adopt command mode\n");
1899 priv->tmp_buf = memalign(4, 512);
1900 priv->spi_exec_op_cmd = aspeed_spi_exec_op_cmd_mode;
1901 } else {
1902 priv->spi_exec_op_cmd = NULL;
1903 }
1904
1905 /*
1906 * There are some slight differences between the FMC and the
1907 * SPI controllers
1908 */
1909 priv->is_fmc = dev_get_driver_data(bus);
1910
1911 ret = aspeed_spi_controller_init(priv);
1912 if (ret)
1913 return ret;
1914
1915 debug("%s probed regs=%p ahb_base=%p cs_num=%d seq=%d\n",
1916 bus->name, priv->regs, priv->ahb_base, priv->flash_count, bus->seq);
1917
1918 return 0;
1919 }
1920
1921 static const struct dm_spi_ops aspeed_spi_ops = {
1922 .claim_bus = aspeed_spi_claim_bus,
1923 .release_bus = aspeed_spi_release_bus,
1924 .set_mode = aspeed_spi_set_mode,
1925 .set_speed = aspeed_spi_set_speed,
1926 .xfer = aspeed_spi_xfer,
1927 #ifdef CONFIG_ASPEED_SPI_FLASH_WRITE_PROTECTION
1928 .mem_ctrl_wlock = aspeed_spi_mem_wlock,
1929 .mem_ctrl_wunlock = aspeed_spi_mem_wunlock,
1930 #endif
1931 };
1932
1933 static const struct udevice_id aspeed_spi_ids[] = {
1934 { .compatible = "aspeed,ast2600-fmc", .data = 1 },
1935 { .compatible = "aspeed,ast2600-spi", .data = 0 },
1936 { .compatible = "aspeed,ast2500-fmc", .data = 1 },
1937 { .compatible = "aspeed,ast2500-spi", .data = 0 },
1938 { .compatible = "aspeed,ast2400-fmc", .data = 1 },
1939 { }
1940 };
1941
1942 U_BOOT_DRIVER(aspeed_spi) = {
1943 .name = "aspeed_spi",
1944 .id = UCLASS_SPI,
1945 .of_match = aspeed_spi_ids,
1946 .ops = &aspeed_spi_ops,
1947 .priv_auto_alloc_size = sizeof(struct aspeed_spi_priv),
1948 .child_pre_probe = aspeed_spi_child_pre_probe,
1949 .bind = aspeed_spi_bind,
1950 .probe = aspeed_spi_probe,
1951 };
1952