xref: /openbmc/linux/drivers/spi/spi-aspeed-smc.c (revision 21ac58f5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ASPEED FMC/SPI Memory Controller Driver
4  *
5  * Copyright (c) 2015-2022, IBM Corporation.
6  * Copyright (c) 2020, ASPEED Corporation.
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_platform.h>
13 #include <linux/platform_device.h>
14 #include <linux/spi/spi.h>
15 #include <linux/spi/spi-mem.h>
16 
17 #define DEVICE_NAME "spi-aspeed-smc"
18 
19 /* Type setting Register */
20 #define CONFIG_REG			0x0
21 #define   CONFIG_TYPE_SPI		0x2
22 
23 /* CE Control Register */
24 #define CE_CTRL_REG			0x4
25 
26 /* CEx Control Register */
27 #define CE0_CTRL_REG			0x10
28 #define   CTRL_IO_MODE_MASK		GENMASK(30, 28)
29 #define   CTRL_IO_SINGLE_DATA	        0x0
30 #define   CTRL_IO_DUAL_DATA		BIT(29)
31 #define   CTRL_IO_QUAD_DATA		BIT(30)
32 #define   CTRL_COMMAND_SHIFT		16
33 #define   CTRL_IO_ADDRESS_4B		BIT(13)	/* AST2400 SPI only */
34 #define   CTRL_IO_DUMMY_SET(dummy)					\
35 	(((((dummy) >> 2) & 0x1) << 14) | (((dummy) & 0x3) << 6))
36 #define   CTRL_FREQ_SEL_SHIFT		8
37 #define   CTRL_FREQ_SEL_MASK		GENMASK(11, CTRL_FREQ_SEL_SHIFT)
38 #define   CTRL_CE_STOP_ACTIVE		BIT(2)
39 #define   CTRL_IO_MODE_CMD_MASK		GENMASK(1, 0)
40 #define   CTRL_IO_MODE_NORMAL		0x0
41 #define   CTRL_IO_MODE_READ		0x1
42 #define   CTRL_IO_MODE_WRITE		0x2
43 #define   CTRL_IO_MODE_USER		0x3
44 
45 #define   CTRL_IO_CMD_MASK		0xf0ff40c3
46 
47 /* CEx Address Decoding Range Register */
48 #define CE0_SEGMENT_ADDR_REG		0x30
49 
50 /* CEx Read timing compensation register */
51 #define CE0_TIMING_COMPENSATION_REG	0x94
52 
53 enum aspeed_spi_ctl_reg_value {
54 	ASPEED_SPI_BASE,
55 	ASPEED_SPI_READ,
56 	ASPEED_SPI_WRITE,
57 	ASPEED_SPI_MAX,
58 };
59 
60 struct aspeed_spi;
61 
62 struct aspeed_spi_chip {
63 	struct aspeed_spi	*aspi;
64 	u32			 cs;
65 	void __iomem		*ctl;
66 	void __iomem		*ahb_base;
67 	u32			 ahb_window_size;
68 	u32			 ctl_val[ASPEED_SPI_MAX];
69 	u32			 clk_freq;
70 };
71 
72 struct aspeed_spi_data {
73 	u32	ctl0;
74 	u32	max_cs;
75 	bool	hastype;
76 	u32	mode_bits;
77 	u32	we0;
78 	u32	timing;
79 	u32	hclk_mask;
80 	u32	hdiv_max;
81 
82 	u32 (*segment_start)(struct aspeed_spi *aspi, u32 reg);
83 	u32 (*segment_end)(struct aspeed_spi *aspi, u32 reg);
84 	u32 (*segment_reg)(struct aspeed_spi *aspi, u32 start, u32 end);
85 	int (*calibrate)(struct aspeed_spi_chip *chip, u32 hdiv,
86 			 const u8 *golden_buf, u8 *test_buf);
87 };
88 
89 #define ASPEED_SPI_MAX_NUM_CS	5
90 
91 struct aspeed_spi {
92 	const struct aspeed_spi_data	*data;
93 
94 	void __iomem		*regs;
95 	void __iomem		*ahb_base;
96 	u32			 ahb_base_phy;
97 	u32			 ahb_window_size;
98 	struct device		*dev;
99 
100 	struct clk		*clk;
101 	u32			 clk_freq;
102 
103 	struct aspeed_spi_chip	 chips[ASPEED_SPI_MAX_NUM_CS];
104 };
105 
aspeed_spi_get_io_mode(const struct spi_mem_op * op)106 static u32 aspeed_spi_get_io_mode(const struct spi_mem_op *op)
107 {
108 	switch (op->data.buswidth) {
109 	case 1:
110 		return CTRL_IO_SINGLE_DATA;
111 	case 2:
112 		return CTRL_IO_DUAL_DATA;
113 	case 4:
114 		return CTRL_IO_QUAD_DATA;
115 	default:
116 		return CTRL_IO_SINGLE_DATA;
117 	}
118 }
119 
aspeed_spi_set_io_mode(struct aspeed_spi_chip * chip,u32 io_mode)120 static void aspeed_spi_set_io_mode(struct aspeed_spi_chip *chip, u32 io_mode)
121 {
122 	u32 ctl;
123 
124 	if (io_mode > 0) {
125 		ctl = readl(chip->ctl) & ~CTRL_IO_MODE_MASK;
126 		ctl |= io_mode;
127 		writel(ctl, chip->ctl);
128 	}
129 }
130 
aspeed_spi_start_user(struct aspeed_spi_chip * chip)131 static void aspeed_spi_start_user(struct aspeed_spi_chip *chip)
132 {
133 	u32 ctl = chip->ctl_val[ASPEED_SPI_BASE];
134 
135 	ctl |= CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
136 	writel(ctl, chip->ctl);
137 
138 	ctl &= ~CTRL_CE_STOP_ACTIVE;
139 	writel(ctl, chip->ctl);
140 }
141 
aspeed_spi_stop_user(struct aspeed_spi_chip * chip)142 static void aspeed_spi_stop_user(struct aspeed_spi_chip *chip)
143 {
144 	u32 ctl = chip->ctl_val[ASPEED_SPI_READ] |
145 		CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
146 
147 	writel(ctl, chip->ctl);
148 
149 	/* Restore defaults */
150 	writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
151 }
152 
aspeed_spi_read_from_ahb(void * buf,void __iomem * src,size_t len)153 static int aspeed_spi_read_from_ahb(void *buf, void __iomem *src, size_t len)
154 {
155 	size_t offset = 0;
156 
157 	if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) &&
158 	    IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
159 		ioread32_rep(src, buf, len >> 2);
160 		offset = len & ~0x3;
161 		len -= offset;
162 	}
163 	ioread8_rep(src, (u8 *)buf + offset, len);
164 	return 0;
165 }
166 
aspeed_spi_write_to_ahb(void __iomem * dst,const void * buf,size_t len)167 static int aspeed_spi_write_to_ahb(void __iomem *dst, const void *buf, size_t len)
168 {
169 	size_t offset = 0;
170 
171 	if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) &&
172 	    IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
173 		iowrite32_rep(dst, buf, len >> 2);
174 		offset = len & ~0x3;
175 		len -= offset;
176 	}
177 	iowrite8_rep(dst, (const u8 *)buf + offset, len);
178 	return 0;
179 }
180 
aspeed_spi_send_cmd_addr(struct aspeed_spi_chip * chip,u8 addr_nbytes,u64 offset,u32 opcode)181 static int aspeed_spi_send_cmd_addr(struct aspeed_spi_chip *chip, u8 addr_nbytes,
182 				    u64 offset, u32 opcode)
183 {
184 	__be32 temp;
185 	u32 cmdaddr;
186 
187 	switch (addr_nbytes) {
188 	case 3:
189 		cmdaddr = offset & 0xFFFFFF;
190 		cmdaddr |= opcode << 24;
191 
192 		temp = cpu_to_be32(cmdaddr);
193 		aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
194 		break;
195 	case 4:
196 		temp = cpu_to_be32(offset);
197 		aspeed_spi_write_to_ahb(chip->ahb_base, &opcode, 1);
198 		aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
199 		break;
200 	default:
201 		WARN_ONCE(1, "Unexpected address width %u", addr_nbytes);
202 		return -EOPNOTSUPP;
203 	}
204 	return 0;
205 }
206 
aspeed_spi_read_reg(struct aspeed_spi_chip * chip,const struct spi_mem_op * op)207 static int aspeed_spi_read_reg(struct aspeed_spi_chip *chip,
208 			       const struct spi_mem_op *op)
209 {
210 	aspeed_spi_start_user(chip);
211 	aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
212 	aspeed_spi_read_from_ahb(op->data.buf.in,
213 				 chip->ahb_base, op->data.nbytes);
214 	aspeed_spi_stop_user(chip);
215 	return 0;
216 }
217 
aspeed_spi_write_reg(struct aspeed_spi_chip * chip,const struct spi_mem_op * op)218 static int aspeed_spi_write_reg(struct aspeed_spi_chip *chip,
219 				const struct spi_mem_op *op)
220 {
221 	aspeed_spi_start_user(chip);
222 	aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
223 	aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out,
224 				op->data.nbytes);
225 	aspeed_spi_stop_user(chip);
226 	return 0;
227 }
228 
aspeed_spi_read_user(struct aspeed_spi_chip * chip,const struct spi_mem_op * op,u64 offset,size_t len,void * buf)229 static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
230 				    const struct spi_mem_op *op,
231 				    u64 offset, size_t len, void *buf)
232 {
233 	int io_mode = aspeed_spi_get_io_mode(op);
234 	u8 dummy = 0xFF;
235 	int i;
236 	int ret;
237 
238 	aspeed_spi_start_user(chip);
239 
240 	ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode);
241 	if (ret < 0)
242 		return ret;
243 
244 	if (op->dummy.buswidth && op->dummy.nbytes) {
245 		for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++)
246 			aspeed_spi_write_to_ahb(chip->ahb_base, &dummy,	sizeof(dummy));
247 	}
248 
249 	aspeed_spi_set_io_mode(chip, io_mode);
250 
251 	aspeed_spi_read_from_ahb(buf, chip->ahb_base, len);
252 	aspeed_spi_stop_user(chip);
253 	return 0;
254 }
255 
aspeed_spi_write_user(struct aspeed_spi_chip * chip,const struct spi_mem_op * op)256 static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
257 				     const struct spi_mem_op *op)
258 {
259 	int ret;
260 
261 	aspeed_spi_start_user(chip);
262 	ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode);
263 	if (ret < 0)
264 		return ret;
265 	aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes);
266 	aspeed_spi_stop_user(chip);
267 	return 0;
268 }
269 
270 /* support for 1-1-1, 1-1-2 or 1-1-4 */
aspeed_spi_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)271 static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
272 {
273 	if (op->cmd.buswidth > 1)
274 		return false;
275 
276 	if (op->addr.nbytes != 0) {
277 		if (op->addr.buswidth > 1)
278 			return false;
279 		if (op->addr.nbytes < 3 || op->addr.nbytes > 4)
280 			return false;
281 	}
282 
283 	if (op->dummy.nbytes != 0) {
284 		if (op->dummy.buswidth > 1 || op->dummy.nbytes > 7)
285 			return false;
286 	}
287 
288 	if (op->data.nbytes != 0 && op->data.buswidth > 4)
289 		return false;
290 
291 	return spi_mem_default_supports_op(mem, op);
292 }
293 
294 static const struct aspeed_spi_data ast2400_spi_data;
295 
do_aspeed_spi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)296 static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
297 {
298 	struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
299 	struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(mem->spi, 0)];
300 	u32 addr_mode, addr_mode_backup;
301 	u32 ctl_val;
302 	int ret = 0;
303 
304 	dev_dbg(aspi->dev,
305 		"CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x",
306 		chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
307 		op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
308 		op->dummy.buswidth, op->data.buswidth,
309 		op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
310 
311 	addr_mode = readl(aspi->regs + CE_CTRL_REG);
312 	addr_mode_backup = addr_mode;
313 
314 	ctl_val = chip->ctl_val[ASPEED_SPI_BASE];
315 	ctl_val &= ~CTRL_IO_CMD_MASK;
316 
317 	ctl_val |= op->cmd.opcode << CTRL_COMMAND_SHIFT;
318 
319 	/* 4BYTE address mode */
320 	if (op->addr.nbytes) {
321 		if (op->addr.nbytes == 4)
322 			addr_mode |= (0x11 << chip->cs);
323 		else
324 			addr_mode &= ~(0x11 << chip->cs);
325 
326 		if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
327 			ctl_val |= CTRL_IO_ADDRESS_4B;
328 	}
329 
330 	if (op->dummy.nbytes)
331 		ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
332 
333 	if (op->data.nbytes)
334 		ctl_val |= aspeed_spi_get_io_mode(op);
335 
336 	if (op->data.dir == SPI_MEM_DATA_OUT)
337 		ctl_val |= CTRL_IO_MODE_WRITE;
338 	else
339 		ctl_val |= CTRL_IO_MODE_READ;
340 
341 	if (addr_mode != addr_mode_backup)
342 		writel(addr_mode, aspi->regs + CE_CTRL_REG);
343 	writel(ctl_val, chip->ctl);
344 
345 	if (op->data.dir == SPI_MEM_DATA_IN) {
346 		if (!op->addr.nbytes)
347 			ret = aspeed_spi_read_reg(chip, op);
348 		else
349 			ret = aspeed_spi_read_user(chip, op, op->addr.val,
350 						   op->data.nbytes, op->data.buf.in);
351 	} else {
352 		if (!op->addr.nbytes)
353 			ret = aspeed_spi_write_reg(chip, op);
354 		else
355 			ret = aspeed_spi_write_user(chip, op);
356 	}
357 
358 	/* Restore defaults */
359 	if (addr_mode != addr_mode_backup)
360 		writel(addr_mode_backup, aspi->regs + CE_CTRL_REG);
361 	writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
362 	return ret;
363 }
364 
aspeed_spi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)365 static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
366 {
367 	int ret;
368 
369 	ret = do_aspeed_spi_exec_op(mem, op);
370 	if (ret)
371 		dev_err(&mem->spi->dev, "operation failed: %d\n", ret);
372 	return ret;
373 }
374 
aspeed_spi_get_name(struct spi_mem * mem)375 static const char *aspeed_spi_get_name(struct spi_mem *mem)
376 {
377 	struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
378 	struct device *dev = aspi->dev;
379 
380 	return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
381 			      spi_get_chipselect(mem->spi, 0));
382 }
383 
384 struct aspeed_spi_window {
385 	u32 cs;
386 	u32 offset;
387 	u32 size;
388 };
389 
aspeed_spi_get_windows(struct aspeed_spi * aspi,struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS])390 static void aspeed_spi_get_windows(struct aspeed_spi *aspi,
391 				   struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS])
392 {
393 	const struct aspeed_spi_data *data = aspi->data;
394 	u32 reg_val;
395 	u32 cs;
396 
397 	for (cs = 0; cs < aspi->data->max_cs; cs++) {
398 		reg_val = readl(aspi->regs + CE0_SEGMENT_ADDR_REG + cs * 4);
399 		windows[cs].cs = cs;
400 		windows[cs].size = data->segment_end(aspi, reg_val) -
401 			data->segment_start(aspi, reg_val);
402 		windows[cs].offset = data->segment_start(aspi, reg_val) - aspi->ahb_base_phy;
403 		dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs,
404 			 windows[cs].offset, windows[cs].size);
405 	}
406 }
407 
408 /*
409  * On the AST2600, some CE windows are closed by default at reset but
410  * U-Boot should open all.
411  */
aspeed_spi_chip_set_default_window(struct aspeed_spi_chip * chip)412 static int aspeed_spi_chip_set_default_window(struct aspeed_spi_chip *chip)
413 {
414 	struct aspeed_spi *aspi = chip->aspi;
415 	struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
416 	struct aspeed_spi_window *win = &windows[chip->cs];
417 
418 	/* No segment registers for the AST2400 SPI controller */
419 	if (aspi->data == &ast2400_spi_data) {
420 		win->offset = 0;
421 		win->size = aspi->ahb_window_size;
422 	} else {
423 		aspeed_spi_get_windows(aspi, windows);
424 	}
425 
426 	chip->ahb_base = aspi->ahb_base + win->offset;
427 	chip->ahb_window_size = win->size;
428 
429 	dev_dbg(aspi->dev, "CE%d default window [ 0x%.8x - 0x%.8x ] %dMB",
430 		chip->cs, aspi->ahb_base_phy + win->offset,
431 		aspi->ahb_base_phy + win->offset + win->size - 1,
432 		win->size >> 20);
433 
434 	return chip->ahb_window_size ? 0 : -1;
435 }
436 
aspeed_spi_set_window(struct aspeed_spi * aspi,const struct aspeed_spi_window * win)437 static int aspeed_spi_set_window(struct aspeed_spi *aspi,
438 				 const struct aspeed_spi_window *win)
439 {
440 	u32 start = aspi->ahb_base_phy + win->offset;
441 	u32 end = start + win->size;
442 	void __iomem *seg_reg = aspi->regs + CE0_SEGMENT_ADDR_REG + win->cs * 4;
443 	u32 seg_val_backup = readl(seg_reg);
444 	u32 seg_val = aspi->data->segment_reg(aspi, start, end);
445 
446 	if (seg_val == seg_val_backup)
447 		return 0;
448 
449 	writel(seg_val, seg_reg);
450 
451 	/*
452 	 * Restore initial value if something goes wrong else we could
453 	 * loose access to the chip.
454 	 */
455 	if (seg_val != readl(seg_reg)) {
456 		dev_err(aspi->dev, "CE%d invalid window [ 0x%.8x - 0x%.8x ] %dMB",
457 			win->cs, start, end - 1, win->size >> 20);
458 		writel(seg_val_backup, seg_reg);
459 		return -EIO;
460 	}
461 
462 	if (win->size)
463 		dev_dbg(aspi->dev, "CE%d new window [ 0x%.8x - 0x%.8x ] %dMB",
464 			win->cs, start, end - 1,  win->size >> 20);
465 	else
466 		dev_dbg(aspi->dev, "CE%d window closed", win->cs);
467 
468 	return 0;
469 }
470 
471 /*
472  * Yet to be done when possible :
473  * - Align mappings on flash size (we don't have the info)
474  * - ioremap each window, not strictly necessary since the overall window
475  *   is correct.
476  */
477 static const struct aspeed_spi_data ast2500_spi_data;
478 static const struct aspeed_spi_data ast2600_spi_data;
479 static const struct aspeed_spi_data ast2600_fmc_data;
480 
aspeed_spi_chip_adjust_window(struct aspeed_spi_chip * chip,u32 local_offset,u32 size)481 static int aspeed_spi_chip_adjust_window(struct aspeed_spi_chip *chip,
482 					 u32 local_offset, u32 size)
483 {
484 	struct aspeed_spi *aspi = chip->aspi;
485 	struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
486 	struct aspeed_spi_window *win = &windows[chip->cs];
487 	int ret;
488 
489 	/* No segment registers for the AST2400 SPI controller */
490 	if (aspi->data == &ast2400_spi_data)
491 		return 0;
492 
493 	/*
494 	 * Due to an HW issue on the AST2500 SPI controller, the CE0
495 	 * window size should be smaller than the maximum 128MB.
496 	 */
497 	if (aspi->data == &ast2500_spi_data && chip->cs == 0 && size == SZ_128M) {
498 		size = 120 << 20;
499 		dev_info(aspi->dev, "CE%d window resized to %dMB (AST2500 HW quirk)",
500 			 chip->cs, size >> 20);
501 	}
502 
503 	/*
504 	 * The decoding size of AST2600 SPI controller should set at
505 	 * least 2MB.
506 	 */
507 	if ((aspi->data == &ast2600_spi_data || aspi->data == &ast2600_fmc_data) &&
508 	    size < SZ_2M) {
509 		size = SZ_2M;
510 		dev_info(aspi->dev, "CE%d window resized to %dMB (AST2600 Decoding)",
511 			 chip->cs, size >> 20);
512 	}
513 
514 	aspeed_spi_get_windows(aspi, windows);
515 
516 	/* Adjust this chip window */
517 	win->offset += local_offset;
518 	win->size = size;
519 
520 	if (win->offset + win->size > aspi->ahb_window_size) {
521 		win->size = aspi->ahb_window_size - win->offset;
522 		dev_warn(aspi->dev, "CE%d window resized to %dMB", chip->cs, win->size >> 20);
523 	}
524 
525 	ret = aspeed_spi_set_window(aspi, win);
526 	if (ret)
527 		return ret;
528 
529 	/* Update chip mapping info */
530 	chip->ahb_base = aspi->ahb_base + win->offset;
531 	chip->ahb_window_size = win->size;
532 
533 	/*
534 	 * Also adjust next chip window to make sure that it does not
535 	 * overlap with the current window.
536 	 */
537 	if (chip->cs < aspi->data->max_cs - 1) {
538 		struct aspeed_spi_window *next = &windows[chip->cs + 1];
539 
540 		/* Change offset and size to keep the same end address */
541 		if ((next->offset + next->size) > (win->offset + win->size))
542 			next->size = (next->offset + next->size) - (win->offset + win->size);
543 		else
544 			next->size = 0;
545 		next->offset = win->offset + win->size;
546 
547 		aspeed_spi_set_window(aspi, next);
548 	}
549 	return 0;
550 }
551 
552 static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip);
553 
aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc * desc)554 static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
555 {
556 	struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller);
557 	struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
558 	struct spi_mem_op *op = &desc->info.op_tmpl;
559 	u32 ctl_val;
560 	int ret = 0;
561 
562 	dev_dbg(aspi->dev,
563 		"CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n",
564 		chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
565 		desc->info.offset, desc->info.offset + desc->info.length,
566 		op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
567 		op->dummy.buswidth, op->data.buswidth,
568 		op->addr.nbytes, op->dummy.nbytes);
569 
570 	chip->clk_freq = desc->mem->spi->max_speed_hz;
571 
572 	/* Only for reads */
573 	if (op->data.dir != SPI_MEM_DATA_IN)
574 		return -EOPNOTSUPP;
575 
576 	aspeed_spi_chip_adjust_window(chip, desc->info.offset, desc->info.length);
577 
578 	if (desc->info.length > chip->ahb_window_size)
579 		dev_warn(aspi->dev, "CE%d window (%dMB) too small for mapping",
580 			 chip->cs, chip->ahb_window_size >> 20);
581 
582 	/* Define the default IO read settings */
583 	ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK;
584 	ctl_val |= aspeed_spi_get_io_mode(op) |
585 		op->cmd.opcode << CTRL_COMMAND_SHIFT |
586 		CTRL_IO_MODE_READ;
587 
588 	if (op->dummy.nbytes)
589 		ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
590 
591 	/* Tune 4BYTE address mode */
592 	if (op->addr.nbytes) {
593 		u32 addr_mode = readl(aspi->regs + CE_CTRL_REG);
594 
595 		if (op->addr.nbytes == 4)
596 			addr_mode |= (0x11 << chip->cs);
597 		else
598 			addr_mode &= ~(0x11 << chip->cs);
599 		writel(addr_mode, aspi->regs + CE_CTRL_REG);
600 
601 		/* AST2400 SPI controller sets 4BYTE address mode in
602 		 * CE0 Control Register
603 		 */
604 		if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
605 			ctl_val |= CTRL_IO_ADDRESS_4B;
606 	}
607 
608 	/* READ mode is the controller default setting */
609 	chip->ctl_val[ASPEED_SPI_READ] = ctl_val;
610 	writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
611 
612 	ret = aspeed_spi_do_calibration(chip);
613 
614 	dev_info(aspi->dev, "CE%d read buswidth:%d [0x%08x]\n",
615 		 chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_READ]);
616 
617 	return ret;
618 }
619 
aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc * desc,u64 offset,size_t len,void * buf)620 static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc,
621 				      u64 offset, size_t len, void *buf)
622 {
623 	struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller);
624 	struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
625 
626 	/* Switch to USER command mode if mapping window is too small */
627 	if (chip->ahb_window_size < offset + len) {
628 		int ret;
629 
630 		ret = aspeed_spi_read_user(chip, &desc->info.op_tmpl, offset, len, buf);
631 		if (ret < 0)
632 			return ret;
633 	} else {
634 		memcpy_fromio(buf, chip->ahb_base + offset, len);
635 	}
636 
637 	return len;
638 }
639 
640 static const struct spi_controller_mem_ops aspeed_spi_mem_ops = {
641 	.supports_op = aspeed_spi_supports_op,
642 	.exec_op = aspeed_spi_exec_op,
643 	.get_name = aspeed_spi_get_name,
644 	.dirmap_create = aspeed_spi_dirmap_create,
645 	.dirmap_read = aspeed_spi_dirmap_read,
646 };
647 
aspeed_spi_chip_set_type(struct aspeed_spi * aspi,unsigned int cs,int type)648 static void aspeed_spi_chip_set_type(struct aspeed_spi *aspi, unsigned int cs, int type)
649 {
650 	u32 reg;
651 
652 	reg = readl(aspi->regs + CONFIG_REG);
653 	reg &= ~(0x3 << (cs * 2));
654 	reg |= type << (cs * 2);
655 	writel(reg, aspi->regs + CONFIG_REG);
656 }
657 
aspeed_spi_chip_enable(struct aspeed_spi * aspi,unsigned int cs,bool enable)658 static void aspeed_spi_chip_enable(struct aspeed_spi *aspi, unsigned int cs, bool enable)
659 {
660 	u32 we_bit = BIT(aspi->data->we0 + cs);
661 	u32 reg = readl(aspi->regs + CONFIG_REG);
662 
663 	if (enable)
664 		reg |= we_bit;
665 	else
666 		reg &= ~we_bit;
667 	writel(reg, aspi->regs + CONFIG_REG);
668 }
669 
aspeed_spi_setup(struct spi_device * spi)670 static int aspeed_spi_setup(struct spi_device *spi)
671 {
672 	struct aspeed_spi *aspi = spi_controller_get_devdata(spi->controller);
673 	const struct aspeed_spi_data *data = aspi->data;
674 	unsigned int cs = spi_get_chipselect(spi, 0);
675 	struct aspeed_spi_chip *chip = &aspi->chips[cs];
676 
677 	chip->aspi = aspi;
678 	chip->cs = cs;
679 	chip->ctl = aspi->regs + data->ctl0 + cs * 4;
680 
681 	/* The driver only supports SPI type flash */
682 	if (data->hastype)
683 		aspeed_spi_chip_set_type(aspi, cs, CONFIG_TYPE_SPI);
684 
685 	if (aspeed_spi_chip_set_default_window(chip) < 0) {
686 		dev_warn(aspi->dev, "CE%d window invalid", cs);
687 		return -EINVAL;
688 	}
689 
690 	aspeed_spi_chip_enable(aspi, cs, true);
691 
692 	chip->ctl_val[ASPEED_SPI_BASE] = CTRL_CE_STOP_ACTIVE | CTRL_IO_MODE_USER;
693 
694 	dev_dbg(aspi->dev, "CE%d setup done\n", cs);
695 	return 0;
696 }
697 
aspeed_spi_cleanup(struct spi_device * spi)698 static void aspeed_spi_cleanup(struct spi_device *spi)
699 {
700 	struct aspeed_spi *aspi = spi_controller_get_devdata(spi->controller);
701 	unsigned int cs = spi_get_chipselect(spi, 0);
702 
703 	aspeed_spi_chip_enable(aspi, cs, false);
704 
705 	dev_dbg(aspi->dev, "CE%d cleanup done\n", cs);
706 }
707 
aspeed_spi_enable(struct aspeed_spi * aspi,bool enable)708 static void aspeed_spi_enable(struct aspeed_spi *aspi, bool enable)
709 {
710 	int cs;
711 
712 	for (cs = 0; cs < aspi->data->max_cs; cs++)
713 		aspeed_spi_chip_enable(aspi, cs, enable);
714 }
715 
aspeed_spi_probe(struct platform_device * pdev)716 static int aspeed_spi_probe(struct platform_device *pdev)
717 {
718 	struct device *dev = &pdev->dev;
719 	const struct aspeed_spi_data *data;
720 	struct spi_controller *ctlr;
721 	struct aspeed_spi *aspi;
722 	struct resource *res;
723 	int ret;
724 
725 	data = of_device_get_match_data(&pdev->dev);
726 	if (!data)
727 		return -ENODEV;
728 
729 	ctlr = devm_spi_alloc_host(dev, sizeof(*aspi));
730 	if (!ctlr)
731 		return -ENOMEM;
732 
733 	aspi = spi_controller_get_devdata(ctlr);
734 	platform_set_drvdata(pdev, aspi);
735 	aspi->data = data;
736 	aspi->dev = dev;
737 
738 	aspi->regs = devm_platform_ioremap_resource(pdev, 0);
739 	if (IS_ERR(aspi->regs))
740 		return PTR_ERR(aspi->regs);
741 
742 	aspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
743 	if (IS_ERR(aspi->ahb_base)) {
744 		dev_err(dev, "missing AHB mapping window\n");
745 		return PTR_ERR(aspi->ahb_base);
746 	}
747 
748 	aspi->ahb_window_size = resource_size(res);
749 	aspi->ahb_base_phy = res->start;
750 
751 	aspi->clk = devm_clk_get(&pdev->dev, NULL);
752 	if (IS_ERR(aspi->clk)) {
753 		dev_err(dev, "missing clock\n");
754 		return PTR_ERR(aspi->clk);
755 	}
756 
757 	aspi->clk_freq = clk_get_rate(aspi->clk);
758 	if (!aspi->clk_freq) {
759 		dev_err(dev, "invalid clock\n");
760 		return -EINVAL;
761 	}
762 
763 	ret = clk_prepare_enable(aspi->clk);
764 	if (ret) {
765 		dev_err(dev, "can not enable the clock\n");
766 		return ret;
767 	}
768 
769 	/* IRQ is for DMA, which the driver doesn't support yet */
770 
771 	ctlr->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | data->mode_bits;
772 	ctlr->bus_num = pdev->id;
773 	ctlr->mem_ops = &aspeed_spi_mem_ops;
774 	ctlr->setup = aspeed_spi_setup;
775 	ctlr->cleanup = aspeed_spi_cleanup;
776 	ctlr->num_chipselect = data->max_cs;
777 	ctlr->dev.of_node = dev->of_node;
778 
779 	ret = devm_spi_register_controller(dev, ctlr);
780 	if (ret) {
781 		dev_err(&pdev->dev, "spi_register_controller failed\n");
782 		goto disable_clk;
783 	}
784 	return 0;
785 
786 disable_clk:
787 	clk_disable_unprepare(aspi->clk);
788 	return ret;
789 }
790 
aspeed_spi_remove(struct platform_device * pdev)791 static void aspeed_spi_remove(struct platform_device *pdev)
792 {
793 	struct aspeed_spi *aspi = platform_get_drvdata(pdev);
794 
795 	aspeed_spi_enable(aspi, false);
796 	clk_disable_unprepare(aspi->clk);
797 }
798 
799 /*
800  * AHB mappings
801  */
802 
803 /*
804  * The Segment Registers of the AST2400 and AST2500 use a 8MB unit.
805  * The address range is encoded with absolute addresses in the overall
806  * mapping window.
807  */
aspeed_spi_segment_start(struct aspeed_spi * aspi,u32 reg)808 static u32 aspeed_spi_segment_start(struct aspeed_spi *aspi, u32 reg)
809 {
810 	return ((reg >> 16) & 0xFF) << 23;
811 }
812 
aspeed_spi_segment_end(struct aspeed_spi * aspi,u32 reg)813 static u32 aspeed_spi_segment_end(struct aspeed_spi *aspi, u32 reg)
814 {
815 	return ((reg >> 24) & 0xFF) << 23;
816 }
817 
aspeed_spi_segment_reg(struct aspeed_spi * aspi,u32 start,u32 end)818 static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end)
819 {
820 	return (((start >> 23) & 0xFF) << 16) | (((end >> 23) & 0xFF) << 24);
821 }
822 
823 /*
824  * The Segment Registers of the AST2600 use a 1MB unit. The address
825  * range is encoded with offsets in the overall mapping window.
826  */
827 
828 #define AST2600_SEG_ADDR_MASK 0x0ff00000
829 
aspeed_spi_segment_ast2600_start(struct aspeed_spi * aspi,u32 reg)830 static u32 aspeed_spi_segment_ast2600_start(struct aspeed_spi *aspi,
831 					    u32 reg)
832 {
833 	u32 start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK;
834 
835 	return aspi->ahb_base_phy + start_offset;
836 }
837 
aspeed_spi_segment_ast2600_end(struct aspeed_spi * aspi,u32 reg)838 static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi,
839 					  u32 reg)
840 {
841 	u32 end_offset = reg & AST2600_SEG_ADDR_MASK;
842 
843 	/* segment is disabled */
844 	if (!end_offset)
845 		return aspi->ahb_base_phy;
846 
847 	return aspi->ahb_base_phy + end_offset + 0x100000;
848 }
849 
aspeed_spi_segment_ast2600_reg(struct aspeed_spi * aspi,u32 start,u32 end)850 static u32 aspeed_spi_segment_ast2600_reg(struct aspeed_spi *aspi,
851 					  u32 start, u32 end)
852 {
853 	/* disable zero size segments */
854 	if (start == end)
855 		return 0;
856 
857 	return ((start & AST2600_SEG_ADDR_MASK) >> 16) |
858 		((end - 1) & AST2600_SEG_ADDR_MASK);
859 }
860 
861 /*
862  * Read timing compensation sequences
863  */
864 
865 #define CALIBRATE_BUF_SIZE SZ_16K
866 
aspeed_spi_check_reads(struct aspeed_spi_chip * chip,const u8 * golden_buf,u8 * test_buf)867 static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip,
868 				   const u8 *golden_buf, u8 *test_buf)
869 {
870 	int i;
871 
872 	for (i = 0; i < 10; i++) {
873 		memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
874 		if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0) {
875 #if defined(VERBOSE_DEBUG)
876 			print_hex_dump_bytes(DEVICE_NAME "  fail: ", DUMP_PREFIX_NONE,
877 					     test_buf, 0x100);
878 #endif
879 			return false;
880 		}
881 	}
882 	return true;
883 }
884 
885 #define FREAD_TPASS(i)	(((i) / 2) | (((i) & 1) ? 0 : 8))
886 
887 /*
888  * The timing register is shared by all devices. Only update for CE0.
889  */
aspeed_spi_calibrate(struct aspeed_spi_chip * chip,u32 hdiv,const u8 * golden_buf,u8 * test_buf)890 static int aspeed_spi_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
891 				const u8 *golden_buf, u8 *test_buf)
892 {
893 	struct aspeed_spi *aspi = chip->aspi;
894 	const struct aspeed_spi_data *data = aspi->data;
895 	int i;
896 	int good_pass = -1, pass_count = 0;
897 	u32 shift = (hdiv - 1) << 2;
898 	u32 mask = ~(0xfu << shift);
899 	u32 fread_timing_val = 0;
900 
901 	/* Try HCLK delay 0..5, each one with/without delay and look for a
902 	 * good pair.
903 	 */
904 	for (i = 0; i < 12; i++) {
905 		bool pass;
906 
907 		if (chip->cs == 0) {
908 			fread_timing_val &= mask;
909 			fread_timing_val |= FREAD_TPASS(i) << shift;
910 			writel(fread_timing_val, aspi->regs + data->timing);
911 		}
912 		pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
913 		dev_dbg(aspi->dev,
914 			"  * [%08x] %d HCLK delay, %dns DI delay : %s",
915 			fread_timing_val, i / 2, (i & 1) ? 0 : 4,
916 			pass ? "PASS" : "FAIL");
917 		if (pass) {
918 			pass_count++;
919 			if (pass_count == 3) {
920 				good_pass = i - 1;
921 				break;
922 			}
923 		} else {
924 			pass_count = 0;
925 		}
926 	}
927 
928 	/* No good setting for this frequency */
929 	if (good_pass < 0)
930 		return -1;
931 
932 	/* We have at least one pass of margin, let's use first pass */
933 	if (chip->cs == 0) {
934 		fread_timing_val &= mask;
935 		fread_timing_val |= FREAD_TPASS(good_pass) << shift;
936 		writel(fread_timing_val, aspi->regs + data->timing);
937 	}
938 	dev_dbg(aspi->dev, " * -> good is pass %d [0x%08x]",
939 		good_pass, fread_timing_val);
940 	return 0;
941 }
942 
aspeed_spi_check_calib_data(const u8 * test_buf,u32 size)943 static bool aspeed_spi_check_calib_data(const u8 *test_buf, u32 size)
944 {
945 	const u32 *tb32 = (const u32 *)test_buf;
946 	u32 i, cnt = 0;
947 
948 	/* We check if we have enough words that are neither all 0
949 	 * nor all 1's so the calibration can be considered valid.
950 	 *
951 	 * I use an arbitrary threshold for now of 64
952 	 */
953 	size >>= 2;
954 	for (i = 0; i < size; i++) {
955 		if (tb32[i] != 0 && tb32[i] != 0xffffffff)
956 			cnt++;
957 	}
958 	return cnt >= 64;
959 }
960 
961 static const u32 aspeed_spi_hclk_divs[] = {
962 	0xf, /* HCLK */
963 	0x7, /* HCLK/2 */
964 	0xe, /* HCLK/3 */
965 	0x6, /* HCLK/4 */
966 	0xd, /* HCLK/5 */
967 };
968 
969 #define ASPEED_SPI_HCLK_DIV(i) \
970 	(aspeed_spi_hclk_divs[(i) - 1] << CTRL_FREQ_SEL_SHIFT)
971 
aspeed_spi_do_calibration(struct aspeed_spi_chip * chip)972 static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
973 {
974 	struct aspeed_spi *aspi = chip->aspi;
975 	const struct aspeed_spi_data *data = aspi->data;
976 	u32 ahb_freq = aspi->clk_freq;
977 	u32 max_freq = chip->clk_freq;
978 	u32 ctl_val;
979 	u8 *golden_buf = NULL;
980 	u8 *test_buf = NULL;
981 	int i, rc, best_div = -1;
982 
983 	dev_dbg(aspi->dev, "calculate timing compensation - AHB freq: %d MHz",
984 		ahb_freq / 1000000);
985 
986 	/*
987 	 * use the related low frequency to get check calibration data
988 	 * and get golden data.
989 	 */
990 	ctl_val = chip->ctl_val[ASPEED_SPI_READ] & data->hclk_mask;
991 	writel(ctl_val, chip->ctl);
992 
993 	test_buf = kzalloc(CALIBRATE_BUF_SIZE * 2, GFP_KERNEL);
994 	if (!test_buf)
995 		return -ENOMEM;
996 
997 	golden_buf = test_buf + CALIBRATE_BUF_SIZE;
998 
999 	memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
1000 	if (!aspeed_spi_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) {
1001 		dev_info(aspi->dev, "Calibration area too uniform, using low speed");
1002 		goto no_calib;
1003 	}
1004 
1005 #if defined(VERBOSE_DEBUG)
1006 	print_hex_dump_bytes(DEVICE_NAME "  good: ", DUMP_PREFIX_NONE,
1007 			     golden_buf, 0x100);
1008 #endif
1009 
1010 	/* Now we iterate the HCLK dividers until we find our breaking point */
1011 	for (i = ARRAY_SIZE(aspeed_spi_hclk_divs); i > data->hdiv_max - 1; i--) {
1012 		u32 tv, freq;
1013 
1014 		freq = ahb_freq / i;
1015 		if (freq > max_freq)
1016 			continue;
1017 
1018 		/* Set the timing */
1019 		tv = chip->ctl_val[ASPEED_SPI_READ] | ASPEED_SPI_HCLK_DIV(i);
1020 		writel(tv, chip->ctl);
1021 		dev_dbg(aspi->dev, "Trying HCLK/%d [%08x] ...", i, tv);
1022 		rc = data->calibrate(chip, i, golden_buf, test_buf);
1023 		if (rc == 0)
1024 			best_div = i;
1025 	}
1026 
1027 	/* Nothing found ? */
1028 	if (best_div < 0) {
1029 		dev_warn(aspi->dev, "No good frequency, using dumb slow");
1030 	} else {
1031 		dev_dbg(aspi->dev, "Found good read timings at HCLK/%d", best_div);
1032 
1033 		/* Record the freq */
1034 		for (i = 0; i < ASPEED_SPI_MAX; i++)
1035 			chip->ctl_val[i] = (chip->ctl_val[i] & data->hclk_mask) |
1036 				ASPEED_SPI_HCLK_DIV(best_div);
1037 	}
1038 
1039 no_calib:
1040 	writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
1041 	kfree(test_buf);
1042 	return 0;
1043 }
1044 
1045 #define TIMING_DELAY_DI		BIT(3)
1046 #define TIMING_DELAY_HCYCLE_MAX	5
1047 #define TIMING_REG_AST2600(chip)				\
1048 	((chip)->aspi->regs + (chip)->aspi->data->timing +	\
1049 	 (chip)->cs * 4)
1050 
aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip * chip,u32 hdiv,const u8 * golden_buf,u8 * test_buf)1051 static int aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
1052 					const u8 *golden_buf, u8 *test_buf)
1053 {
1054 	struct aspeed_spi *aspi = chip->aspi;
1055 	int hcycle;
1056 	u32 shift = (hdiv - 2) << 3;
1057 	u32 mask = ~(0xfu << shift);
1058 	u32 fread_timing_val = 0;
1059 
1060 	for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) {
1061 		int delay_ns;
1062 		bool pass = false;
1063 
1064 		fread_timing_val &= mask;
1065 		fread_timing_val |= hcycle << shift;
1066 
1067 		/* no DI input delay first  */
1068 		writel(fread_timing_val, TIMING_REG_AST2600(chip));
1069 		pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
1070 		dev_dbg(aspi->dev,
1071 			"  * [%08x] %d HCLK delay, DI delay none : %s",
1072 			fread_timing_val, hcycle, pass ? "PASS" : "FAIL");
1073 		if (pass)
1074 			return 0;
1075 
1076 		/* Add DI input delays  */
1077 		fread_timing_val &= mask;
1078 		fread_timing_val |= (TIMING_DELAY_DI | hcycle) << shift;
1079 
1080 		for (delay_ns = 0; delay_ns < 0x10; delay_ns++) {
1081 			fread_timing_val &= ~(0xf << (4 + shift));
1082 			fread_timing_val |= delay_ns << (4 + shift);
1083 
1084 			writel(fread_timing_val, TIMING_REG_AST2600(chip));
1085 			pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
1086 			dev_dbg(aspi->dev,
1087 				"  * [%08x] %d HCLK delay, DI delay %d.%dns : %s",
1088 				fread_timing_val, hcycle, (delay_ns + 1) / 2,
1089 				(delay_ns + 1) & 1 ? 5 : 5, pass ? "PASS" : "FAIL");
1090 			/*
1091 			 * TODO: This is optimistic. We should look
1092 			 * for a working interval and save the middle
1093 			 * value in the read timing register.
1094 			 */
1095 			if (pass)
1096 				return 0;
1097 		}
1098 	}
1099 
1100 	/* No good setting for this frequency */
1101 	return -1;
1102 }
1103 
1104 /*
1105  * Platform definitions
1106  */
1107 static const struct aspeed_spi_data ast2400_fmc_data = {
1108 	.max_cs	       = 5,
1109 	.hastype       = true,
1110 	.we0	       = 16,
1111 	.ctl0	       = CE0_CTRL_REG,
1112 	.timing	       = CE0_TIMING_COMPENSATION_REG,
1113 	.hclk_mask     = 0xfffff0ff,
1114 	.hdiv_max      = 1,
1115 	.calibrate     = aspeed_spi_calibrate,
1116 	.segment_start = aspeed_spi_segment_start,
1117 	.segment_end   = aspeed_spi_segment_end,
1118 	.segment_reg   = aspeed_spi_segment_reg,
1119 };
1120 
1121 static const struct aspeed_spi_data ast2400_spi_data = {
1122 	.max_cs	       = 1,
1123 	.hastype       = false,
1124 	.we0	       = 0,
1125 	.ctl0	       = 0x04,
1126 	.timing	       = 0x14,
1127 	.hclk_mask     = 0xfffff0ff,
1128 	.hdiv_max      = 1,
1129 	.calibrate     = aspeed_spi_calibrate,
1130 	/* No segment registers */
1131 };
1132 
1133 static const struct aspeed_spi_data ast2500_fmc_data = {
1134 	.max_cs	       = 3,
1135 	.hastype       = true,
1136 	.we0	       = 16,
1137 	.ctl0	       = CE0_CTRL_REG,
1138 	.timing	       = CE0_TIMING_COMPENSATION_REG,
1139 	.hclk_mask     = 0xffffd0ff,
1140 	.hdiv_max      = 1,
1141 	.calibrate     = aspeed_spi_calibrate,
1142 	.segment_start = aspeed_spi_segment_start,
1143 	.segment_end   = aspeed_spi_segment_end,
1144 	.segment_reg   = aspeed_spi_segment_reg,
1145 };
1146 
1147 static const struct aspeed_spi_data ast2500_spi_data = {
1148 	.max_cs	       = 2,
1149 	.hastype       = false,
1150 	.we0	       = 16,
1151 	.ctl0	       = CE0_CTRL_REG,
1152 	.timing	       = CE0_TIMING_COMPENSATION_REG,
1153 	.hclk_mask     = 0xffffd0ff,
1154 	.hdiv_max      = 1,
1155 	.calibrate     = aspeed_spi_calibrate,
1156 	.segment_start = aspeed_spi_segment_start,
1157 	.segment_end   = aspeed_spi_segment_end,
1158 	.segment_reg   = aspeed_spi_segment_reg,
1159 };
1160 
1161 static const struct aspeed_spi_data ast2600_fmc_data = {
1162 	.max_cs	       = 3,
1163 	.hastype       = false,
1164 	.mode_bits     = SPI_RX_QUAD | SPI_TX_QUAD,
1165 	.we0	       = 16,
1166 	.ctl0	       = CE0_CTRL_REG,
1167 	.timing	       = CE0_TIMING_COMPENSATION_REG,
1168 	.hclk_mask     = 0xf0fff0ff,
1169 	.hdiv_max      = 2,
1170 	.calibrate     = aspeed_spi_ast2600_calibrate,
1171 	.segment_start = aspeed_spi_segment_ast2600_start,
1172 	.segment_end   = aspeed_spi_segment_ast2600_end,
1173 	.segment_reg   = aspeed_spi_segment_ast2600_reg,
1174 };
1175 
1176 static const struct aspeed_spi_data ast2600_spi_data = {
1177 	.max_cs	       = 2,
1178 	.hastype       = false,
1179 	.mode_bits     = SPI_RX_QUAD | SPI_TX_QUAD,
1180 	.we0	       = 16,
1181 	.ctl0	       = CE0_CTRL_REG,
1182 	.timing	       = CE0_TIMING_COMPENSATION_REG,
1183 	.hclk_mask     = 0xf0fff0ff,
1184 	.hdiv_max      = 2,
1185 	.calibrate     = aspeed_spi_ast2600_calibrate,
1186 	.segment_start = aspeed_spi_segment_ast2600_start,
1187 	.segment_end   = aspeed_spi_segment_ast2600_end,
1188 	.segment_reg   = aspeed_spi_segment_ast2600_reg,
1189 };
1190 
1191 static const struct of_device_id aspeed_spi_matches[] = {
1192 	{ .compatible = "aspeed,ast2400-fmc", .data = &ast2400_fmc_data },
1193 	{ .compatible = "aspeed,ast2400-spi", .data = &ast2400_spi_data },
1194 	{ .compatible = "aspeed,ast2500-fmc", .data = &ast2500_fmc_data },
1195 	{ .compatible = "aspeed,ast2500-spi", .data = &ast2500_spi_data },
1196 	{ .compatible = "aspeed,ast2600-fmc", .data = &ast2600_fmc_data },
1197 	{ .compatible = "aspeed,ast2600-spi", .data = &ast2600_spi_data },
1198 	{ }
1199 };
1200 MODULE_DEVICE_TABLE(of, aspeed_spi_matches);
1201 
1202 static struct platform_driver aspeed_spi_driver = {
1203 	.probe			= aspeed_spi_probe,
1204 	.remove_new		= aspeed_spi_remove,
1205 	.driver	= {
1206 		.name		= DEVICE_NAME,
1207 		.of_match_table = aspeed_spi_matches,
1208 	}
1209 };
1210 
1211 module_platform_driver(aspeed_spi_driver);
1212 
1213 MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver");
1214 MODULE_AUTHOR("Chin-Ting Kuo <chin-ting_kuo@aspeedtech.com>");
1215 MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>");
1216 MODULE_LICENSE("GPL v2");
1217