xref: /openbmc/linux/drivers/soc/aspeed/aspeed-xdma.c (revision db199f57dd03e7efdb7861a1be337fac61bcbad7)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  // Copyright IBM Corp 2019
3  
4  #include <linux/aspeed-xdma.h>
5  #include <linux/bitfield.h>
6  #include <linux/clk.h>
7  #include <linux/delay.h>
8  #include <linux/device.h>
9  #include <linux/dma-mapping.h>
10  #include <linux/fs.h>
11  #include <linux/genalloc.h>
12  #include <linux/interrupt.h>
13  #include <linux/io.h>
14  #include <linux/jiffies.h>
15  #include <linux/mfd/syscon.h>
16  #include <linux/miscdevice.h>
17  #include <linux/module.h>
18  #include <linux/mutex.h>
19  #include <linux/of_device.h>
20  #include <linux/of_reserved_mem.h>
21  #include <linux/platform_device.h>
22  #include <linux/poll.h>
23  #include <linux/regmap.h>
24  #include <linux/reset.h>
25  #include <linux/slab.h>
26  #include <linux/spinlock.h>
27  #include <linux/string.h>
28  #include <linux/uaccess.h>
29  #include <linux/wait.h>
30  #include <linux/workqueue.h>
31  
32  #define DEVICE_NAME				"aspeed-xdma"
33  
34  #define SCU_AST2600_MISC_CTRL			0x0c0
35  #define  SCU_AST2600_MISC_CTRL_XDMA_BMC		 BIT(8)
36  
37  #define SCU_AST2600_DEBUG_CTRL			0x0c8
38  #define  DEBUG_CTRL_XDMA_DISABLE	 	 BIT(2)
39  
40  #define SCU_AST2500_PCIE_CONF			0x180
41  #define SCU_AST2600_PCIE_CONF			0xc20
42  #define  SCU_PCIE_CONF_VGA_EN			 BIT(0)
43  #define  SCU_PCIE_CONF_VGA_EN_MMIO		 BIT(1)
44  #define  SCU_PCIE_CONF_VGA_EN_LPC		 BIT(2)
45  #define  SCU_PCIE_CONF_VGA_EN_MSI		 BIT(3)
46  #define  SCU_PCIE_CONF_VGA_EN_MCTP		 BIT(4)
47  #define  SCU_PCIE_CONF_VGA_EN_IRQ		 BIT(5)
48  #define  SCU_PCIE_CONF_VGA_EN_DMA		 BIT(6)
49  #define  SCU_PCIE_CONF_BMC_EN			 BIT(8)
50  #define  SCU_PCIE_CONF_BMC_EN_MMIO		 BIT(9)
51  #define  SCU_PCIE_CONF_BMC_EN_MSI		 BIT(11)
52  #define  SCU_PCIE_CONF_BMC_EN_MCTP		 BIT(12)
53  #define  SCU_PCIE_CONF_BMC_EN_IRQ		 BIT(13)
54  #define  SCU_PCIE_CONF_BMC_EN_DMA		 BIT(14)
55  
56  #define SCU_AST2500_BMC_CLASS_REV		0x19c
57  #define SCU_AST2600_BMC_CLASS_REV		0xc68
58  #define  SCU_BMC_CLASS_REV_XDMA			 0xff000001
59  
60  #define XDMA_CMDQ_SIZE				PAGE_SIZE
61  #define XDMA_NUM_CMDS				\
62  	(XDMA_CMDQ_SIZE / sizeof(struct aspeed_xdma_cmd))
63  
64  /* Aspeed specification requires 100us after disabling the reset */
65  #define XDMA_ENGINE_SETUP_TIME_MAX_US          1000
66  #define XDMA_ENGINE_SETUP_TIME_MIN_US          100
67  
68  #define XDMA_CMD_AST2500_PITCH_SHIFT		3
69  #define XDMA_CMD_AST2500_PITCH_BMC		GENMASK_ULL(62, 51)
70  #define XDMA_CMD_AST2500_PITCH_HOST		GENMASK_ULL(46, 35)
71  #define XDMA_CMD_AST2500_PITCH_UPSTREAM		BIT_ULL(31)
72  #define XDMA_CMD_AST2500_PITCH_ADDR		GENMASK_ULL(29, 4)
73  #define XDMA_CMD_AST2500_PITCH_ID		BIT_ULL(0)
74  #define XDMA_CMD_AST2500_CMD_IRQ_EN		BIT_ULL(31)
75  #define XDMA_CMD_AST2500_CMD_LINE_NO		GENMASK_ULL(27, 16)
76  #define XDMA_CMD_AST2500_CMD_IRQ_BMC		BIT_ULL(15)
77  #define XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT	4
78  #define XDMA_CMD_AST2500_CMD_LINE_SIZE		\
79  	GENMASK_ULL(14, XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT)
80  #define XDMA_CMD_AST2500_CMD_ID			BIT_ULL(1)
81  
82  #define XDMA_CMD_AST2600_PITCH_BMC		GENMASK_ULL(62, 48)
83  #define XDMA_CMD_AST2600_PITCH_HOST		GENMASK_ULL(46, 32)
84  #define XDMA_CMD_AST2600_PITCH_ADDR		GENMASK_ULL(30, 0)
85  #define XDMA_CMD_AST2600_CMD_64_EN		BIT_ULL(40)
86  #define XDMA_CMD_AST2600_CMD_IRQ_BMC		BIT_ULL(37)
87  #define XDMA_CMD_AST2600_CMD_IRQ_HOST		BIT_ULL(36)
88  #define XDMA_CMD_AST2600_CMD_UPSTREAM		BIT_ULL(32)
89  #define XDMA_CMD_AST2600_CMD_LINE_NO		GENMASK_ULL(27, 16)
90  #define XDMA_CMD_AST2600_CMD_LINE_SIZE		GENMASK_ULL(14, 0)
91  #define XDMA_CMD_AST2600_CMD_MULTILINE_SIZE	GENMASK_ULL(14, 12)
92  
93  #define XDMA_AST2500_QUEUE_ENTRY_SIZE		4
94  #define XDMA_AST2500_HOST_CMDQ_ADDR0		0x00
95  #define XDMA_AST2500_HOST_CMDQ_ENDP		0x04
96  #define XDMA_AST2500_HOST_CMDQ_WRITEP		0x08
97  #define XDMA_AST2500_HOST_CMDQ_READP		0x0c
98  #define XDMA_AST2500_BMC_CMDQ_ADDR		0x10
99  #define XDMA_AST2500_BMC_CMDQ_ENDP		0x14
100  #define XDMA_AST2500_BMC_CMDQ_WRITEP		0x18
101  #define XDMA_AST2500_BMC_CMDQ_READP		0x1c
102  #define  XDMA_BMC_CMDQ_READP_RESET		 0xee882266
103  #define XDMA_AST2500_CTRL			0x20
104  #define  XDMA_AST2500_CTRL_US_COMP		 BIT(4)
105  #define  XDMA_AST2500_CTRL_DS_COMP		 BIT(5)
106  #define  XDMA_AST2500_CTRL_DS_DIRTY		 BIT(6)
107  #define  XDMA_AST2500_CTRL_DS_SIZE_256		 BIT(17)
108  #define  XDMA_AST2500_CTRL_DS_TIMEOUT		 BIT(28)
109  #define  XDMA_AST2500_CTRL_DS_CHECK_ID		 BIT(29)
110  #define XDMA_AST2500_STATUS			0x24
111  #define  XDMA_AST2500_STATUS_US_COMP		 BIT(4)
112  #define  XDMA_AST2500_STATUS_DS_COMP		 BIT(5)
113  #define  XDMA_AST2500_STATUS_DS_DIRTY		 BIT(6)
114  #define XDMA_AST2500_INPRG_DS_CMD1		0x38
115  #define XDMA_AST2500_INPRG_DS_CMD2		0x3c
116  #define XDMA_AST2500_INPRG_US_CMD00		0x40
117  #define XDMA_AST2500_INPRG_US_CMD01		0x44
118  #define XDMA_AST2500_INPRG_US_CMD10		0x48
119  #define XDMA_AST2500_INPRG_US_CMD11		0x4c
120  #define XDMA_AST2500_INPRG_US_CMD20		0x50
121  #define XDMA_AST2500_INPRG_US_CMD21		0x54
122  #define XDMA_AST2500_HOST_CMDQ_ADDR1		0x60
123  #define XDMA_AST2500_VGA_CMDQ_ADDR0		0x64
124  #define XDMA_AST2500_VGA_CMDQ_ENDP		0x68
125  #define XDMA_AST2500_VGA_CMDQ_WRITEP		0x6c
126  #define XDMA_AST2500_VGA_CMDQ_READP		0x70
127  #define XDMA_AST2500_VGA_CMD_STATUS		0x74
128  #define XDMA_AST2500_VGA_CMDQ_ADDR1		0x78
129  
130  #define XDMA_AST2600_QUEUE_ENTRY_SIZE		2
131  #define XDMA_AST2600_HOST_CMDQ_ADDR0		0x00
132  #define XDMA_AST2600_HOST_CMDQ_ADDR1		0x04
133  #define XDMA_AST2600_HOST_CMDQ_ENDP		0x08
134  #define XDMA_AST2600_HOST_CMDQ_WRITEP		0x0c
135  #define XDMA_AST2600_HOST_CMDQ_READP		0x10
136  #define XDMA_AST2600_BMC_CMDQ_ADDR		0x14
137  #define XDMA_AST2600_BMC_CMDQ_ENDP		0x18
138  #define XDMA_AST2600_BMC_CMDQ_WRITEP		0x1c
139  #define XDMA_AST2600_BMC_CMDQ_READP		0x20
140  #define XDMA_AST2600_VGA_CMDQ_ADDR0		0x24
141  #define XDMA_AST2600_VGA_CMDQ_ADDR1		0x28
142  #define XDMA_AST2600_VGA_CMDQ_ENDP		0x2c
143  #define XDMA_AST2600_VGA_CMDQ_WRITEP		0x30
144  #define XDMA_AST2600_VGA_CMDQ_READP		0x34
145  #define XDMA_AST2600_CTRL			0x38
146  #define  XDMA_AST2600_CTRL_US_COMP		 BIT(16)
147  #define  XDMA_AST2600_CTRL_DS_COMP		 BIT(17)
148  #define  XDMA_AST2600_CTRL_DS_DIRTY		 BIT(18)
149  #define  XDMA_AST2600_CTRL_DS_SIZE_256		 BIT(20)
150  #define XDMA_AST2600_STATUS			0x3c
151  #define  XDMA_AST2600_STATUS_US_COMP		 BIT(16)
152  #define  XDMA_AST2600_STATUS_DS_COMP		 BIT(17)
153  #define  XDMA_AST2600_STATUS_DS_DIRTY		 BIT(18)
154  #define XDMA_AST2600_INPRG_DS_CMD00		0x40
155  #define XDMA_AST2600_INPRG_DS_CMD01		0x44
156  #define XDMA_AST2600_INPRG_DS_CMD10		0x48
157  #define XDMA_AST2600_INPRG_DS_CMD11		0x4c
158  #define XDMA_AST2600_INPRG_DS_CMD20		0x50
159  #define XDMA_AST2600_INPRG_DS_CMD21		0x54
160  #define XDMA_AST2600_INPRG_US_CMD00		0x60
161  #define XDMA_AST2600_INPRG_US_CMD01		0x64
162  #define XDMA_AST2600_INPRG_US_CMD10		0x68
163  #define XDMA_AST2600_INPRG_US_CMD11		0x6c
164  #define XDMA_AST2600_INPRG_US_CMD20		0x70
165  #define XDMA_AST2600_INPRG_US_CMD21		0x74
166  
167  struct aspeed_xdma_cmd {
168  	u64 host_addr;
169  	u64 pitch;
170  	u64 cmd;
171  	u64 reserved;
172  };
173  
174  struct aspeed_xdma_regs {
175  	u8 bmc_cmdq_addr;
176  	u8 bmc_cmdq_endp;
177  	u8 bmc_cmdq_writep;
178  	u8 bmc_cmdq_readp;
179  	u8 control;
180  	u8 status;
181  };
182  
183  struct aspeed_xdma_status_bits {
184  	u32 us_comp;
185  	u32 ds_comp;
186  	u32 ds_dirty;
187  };
188  
189  struct aspeed_xdma;
190  
191  struct aspeed_xdma_chip {
192  	u32 control;
193  	u32 scu_bmc_class;
194  	u32 scu_misc_ctrl;
195  	u32 scu_pcie_conf;
196  	unsigned int queue_entry_size;
197  	struct aspeed_xdma_regs regs;
198  	struct aspeed_xdma_status_bits status_bits;
199  	unsigned int (*set_cmd)(struct aspeed_xdma *ctx,
200  				struct aspeed_xdma_cmd cmds[2],
201  				struct aspeed_xdma_op *op, u32 bmc_addr);
202  };
203  
204  struct aspeed_xdma_client;
205  
206  struct aspeed_xdma {
207  	struct kobject kobj;
208  	const struct aspeed_xdma_chip *chip;
209  
210  	int irq;
211  	int pcie_irq;
212  	struct clk *clock;
213  	struct device *dev;
214  	void __iomem *base;
215  	resource_size_t res_size;
216  	resource_size_t res_start;
217  	struct reset_control *reset;
218  	struct reset_control *reset_rc;
219  
220  	/* Protects current_client */
221  	spinlock_t client_lock;
222  	struct aspeed_xdma_client *current_client;
223  
224  	/* Protects engine configuration */
225  	spinlock_t engine_lock;
226  	struct aspeed_xdma_cmd *cmdq;
227  	unsigned int cmd_idx;
228  	bool in_reset;
229  	bool upstream;
230  
231  	/* Queue waiters for idle engine */
232  	wait_queue_head_t wait;
233  
234  	struct work_struct reset_work;
235  
236  	u32 mem_phys;
237  	u32 mem_size;
238  	void *mem_virt;
239  	dma_addr_t mem_coherent;
240  	dma_addr_t cmdq_phys;
241  	struct gen_pool *pool;
242  
243  	struct miscdevice misc;
244  };
245  
246  struct aspeed_xdma_client {
247  	struct aspeed_xdma *ctx;
248  
249  	bool error;
250  	bool in_progress;
251  	void *virt;
252  	dma_addr_t phys;
253  	u32 size;
254  };
255  
256  #define CREATE_TRACE_POINTS
257  #include <trace/events/xdma.h>
258  
aspeed_xdma_readl(struct aspeed_xdma * ctx,u8 reg)259  static u32 aspeed_xdma_readl(struct aspeed_xdma *ctx, u8 reg)
260  {
261  	u32 v = readl(ctx->base + reg);
262  
263  	dev_dbg(ctx->dev, "read %02x[%08x]\n", reg, v);
264  	return v;
265  }
266  
aspeed_xdma_writel(struct aspeed_xdma * ctx,u8 reg,u32 val)267  static void aspeed_xdma_writel(struct aspeed_xdma *ctx, u8 reg, u32 val)
268  {
269  	writel(val, ctx->base + reg);
270  	dev_dbg(ctx->dev, "write %02x[%08x]\n", reg, val);
271  }
272  
aspeed_xdma_init_eng(struct aspeed_xdma * ctx)273  static void aspeed_xdma_init_eng(struct aspeed_xdma *ctx)
274  {
275  	unsigned long flags;
276  
277  	spin_lock_irqsave(&ctx->engine_lock, flags);
278  	aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_endp,
279  			   ctx->chip->queue_entry_size * XDMA_NUM_CMDS);
280  	aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_readp,
281  			   XDMA_BMC_CMDQ_READP_RESET);
282  	aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_writep, 0);
283  	aspeed_xdma_writel(ctx, ctx->chip->regs.control, ctx->chip->control);
284  	aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_addr, ctx->cmdq_phys);
285  
286  	ctx->cmd_idx = 0;
287  	spin_unlock_irqrestore(&ctx->engine_lock, flags);
288  }
289  
aspeed_xdma_ast2500_set_cmd(struct aspeed_xdma * ctx,struct aspeed_xdma_cmd cmds[2],struct aspeed_xdma_op * op,u32 bmc_addr)290  static unsigned int aspeed_xdma_ast2500_set_cmd(struct aspeed_xdma *ctx,
291  						struct aspeed_xdma_cmd cmds[2],
292  						struct aspeed_xdma_op *op,
293  						u32 bmc_addr)
294  {
295  	unsigned int rc = 1;
296  	unsigned int pitch = 1;
297  	unsigned int line_no = 1;
298  	unsigned int line_size = op->len >>
299  		XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT;
300  	u64 cmd = XDMA_CMD_AST2500_CMD_IRQ_EN | XDMA_CMD_AST2500_CMD_IRQ_BMC |
301  		XDMA_CMD_AST2500_CMD_ID;
302  	u64 cmd_pitch = (op->direction ? XDMA_CMD_AST2500_PITCH_UPSTREAM : 0) |
303  		XDMA_CMD_AST2500_PITCH_ID;
304  
305  	dev_dbg(ctx->dev, "xdma %s ast2500: bmc[%08x] len[%08x] host[%08x]\n",
306  		op->direction ? "upstream" : "downstream", bmc_addr, op->len,
307  		(u32)op->host_addr);
308  
309  	if (op->len > XDMA_CMD_AST2500_CMD_LINE_SIZE) {
310  		unsigned int rem;
311  		unsigned int total;
312  
313  		line_no = op->len / XDMA_CMD_AST2500_CMD_LINE_SIZE;
314  		total = XDMA_CMD_AST2500_CMD_LINE_SIZE * line_no;
315  		rem = (op->len - total) >>
316  			XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT;
317  		line_size = XDMA_CMD_AST2500_CMD_LINE_SIZE;
318  		pitch = line_size >> XDMA_CMD_AST2500_PITCH_SHIFT;
319  		line_size >>= XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT;
320  
321  		if (rem) {
322  			u32 rbmc = bmc_addr + total;
323  
324  			cmds[1].host_addr = op->host_addr + (u64)total;
325  			cmds[1].pitch = cmd_pitch |
326  				((u64)rbmc & XDMA_CMD_AST2500_PITCH_ADDR) |
327  				FIELD_PREP(XDMA_CMD_AST2500_PITCH_HOST, 1) |
328  				FIELD_PREP(XDMA_CMD_AST2500_PITCH_BMC, 1);
329  			cmds[1].cmd = cmd |
330  				FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_NO, 1) |
331  				FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_SIZE,
332  					   rem);
333  			cmds[1].reserved = 0ULL;
334  
335  			print_hex_dump_debug("xdma rem ", DUMP_PREFIX_OFFSET,
336  					     16, 1, &cmds[1], sizeof(*cmds),
337  					     true);
338  
339  			cmd &= ~(XDMA_CMD_AST2500_CMD_IRQ_EN |
340  				 XDMA_CMD_AST2500_CMD_IRQ_BMC);
341  
342  			rc++;
343  		}
344  	}
345  
346  	cmds[0].host_addr = op->host_addr;
347  	cmds[0].pitch = cmd_pitch |
348  		((u64)bmc_addr & XDMA_CMD_AST2500_PITCH_ADDR) |
349  		FIELD_PREP(XDMA_CMD_AST2500_PITCH_HOST, pitch) |
350  		FIELD_PREP(XDMA_CMD_AST2500_PITCH_BMC, pitch);
351  	cmds[0].cmd = cmd | FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_NO, line_no) |
352  		FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_SIZE, line_size);
353  	cmds[0].reserved = 0ULL;
354  
355  	print_hex_dump_debug("xdma cmd ", DUMP_PREFIX_OFFSET, 16, 1, cmds,
356  			     sizeof(*cmds), true);
357  
358  	return rc;
359  }
360  
aspeed_xdma_ast2600_set_cmd(struct aspeed_xdma * ctx,struct aspeed_xdma_cmd cmds[2],struct aspeed_xdma_op * op,u32 bmc_addr)361  static unsigned int aspeed_xdma_ast2600_set_cmd(struct aspeed_xdma *ctx,
362  						struct aspeed_xdma_cmd cmds[2],
363  						struct aspeed_xdma_op *op,
364  						u32 bmc_addr)
365  {
366  	unsigned int rc = 1;
367  	unsigned int pitch = 1;
368  	unsigned int line_no = 1;
369  	unsigned int line_size = op->len;
370  	u64 cmd = XDMA_CMD_AST2600_CMD_IRQ_BMC |
371  		(op->direction ? XDMA_CMD_AST2600_CMD_UPSTREAM : 0);
372  
373  	if (op->host_addr & 0xffffffff00000000ULL ||
374  	    (op->host_addr + (u64)op->len) & 0xffffffff00000000ULL)
375  		cmd |= XDMA_CMD_AST2600_CMD_64_EN;
376  
377  	dev_dbg(ctx->dev, "xdma %s ast2600: bmc[%08x] len[%08x] "
378  		"host[%016llx]\n", op->direction ? "upstream" : "downstream",
379  		bmc_addr, op->len, op->host_addr);
380  
381  	if (op->len > XDMA_CMD_AST2600_CMD_LINE_SIZE) {
382  		unsigned int rem;
383  		unsigned int total;
384  
385  		line_no = op->len / XDMA_CMD_AST2600_CMD_MULTILINE_SIZE;
386  		total = XDMA_CMD_AST2600_CMD_MULTILINE_SIZE * line_no;
387  		rem = op->len - total;
388  		line_size = XDMA_CMD_AST2600_CMD_MULTILINE_SIZE;
389  		pitch = line_size;
390  
391  		if (rem) {
392  			u32 rbmc = bmc_addr + total;
393  
394  			cmds[1].host_addr = op->host_addr + (u64)total;
395  			cmds[1].pitch =
396  				((u64)rbmc & XDMA_CMD_AST2600_PITCH_ADDR) |
397  				FIELD_PREP(XDMA_CMD_AST2600_PITCH_HOST, 1) |
398  				FIELD_PREP(XDMA_CMD_AST2600_PITCH_BMC, 1);
399  			cmds[1].cmd = cmd |
400  				FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_NO, 1) |
401  				FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_SIZE,
402  					   rem);
403  			cmds[1].reserved = 0ULL;
404  
405  			print_hex_dump_debug("xdma rem ", DUMP_PREFIX_OFFSET,
406  					     16, 1, &cmds[1], sizeof(*cmds),
407  					     true);
408  
409  			cmd &= ~XDMA_CMD_AST2600_CMD_IRQ_BMC;
410  
411  			rc++;
412  		}
413  	}
414  
415  	cmds[0].host_addr = op->host_addr;
416  	cmds[0].pitch = ((u64)bmc_addr & XDMA_CMD_AST2600_PITCH_ADDR) |
417  		FIELD_PREP(XDMA_CMD_AST2600_PITCH_HOST, pitch) |
418  		FIELD_PREP(XDMA_CMD_AST2600_PITCH_BMC, pitch);
419  	cmds[0].cmd = cmd | FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_NO, line_no) |
420  		FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_SIZE, line_size);
421  	cmds[0].reserved = 0ULL;
422  
423  	print_hex_dump_debug("xdma cmd ", DUMP_PREFIX_OFFSET, 16, 1, cmds,
424  			     sizeof(*cmds), true);
425  
426  	return rc;
427  }
428  
aspeed_xdma_start(struct aspeed_xdma * ctx,unsigned int num_cmds,struct aspeed_xdma_cmd cmds[2],bool upstream,struct aspeed_xdma_client * client)429  static int aspeed_xdma_start(struct aspeed_xdma *ctx, unsigned int num_cmds,
430  			     struct aspeed_xdma_cmd cmds[2], bool upstream,
431  			     struct aspeed_xdma_client *client)
432  {
433  	unsigned int i;
434  	int rc = -EBUSY;
435  	unsigned long flags;
436  
437  	spin_lock_irqsave(&ctx->engine_lock, flags);
438  	if (ctx->in_reset)
439  		goto unlock;
440  
441  	spin_lock(&ctx->client_lock);
442  	if (ctx->current_client) {
443  		spin_unlock(&ctx->client_lock);
444  		goto unlock;
445  	}
446  
447  	client->error = false;
448  	client->in_progress = true;
449  	ctx->current_client = client;
450  	spin_unlock(&ctx->client_lock);
451  
452  	ctx->upstream = upstream;
453  	for (i = 0; i < num_cmds; ++i) {
454  		trace_xdma_start(ctx, &cmds[i]);
455  		/*
456  		 * Use memcpy_toio here to get some barriers before starting
457  		 * the operation. The command(s) need to be in physical memory
458  		 * before the XDMA engine starts.
459  		 */
460  		memcpy_toio(&ctx->cmdq[ctx->cmd_idx], &cmds[i],
461  			    sizeof(struct aspeed_xdma_cmd));
462  		ctx->cmd_idx = (ctx->cmd_idx + 1) % XDMA_NUM_CMDS;
463  	}
464  
465  	aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_writep,
466  			   ctx->cmd_idx * ctx->chip->queue_entry_size);
467  	rc = 0;
468  
469  unlock:
470  	spin_unlock_irqrestore(&ctx->engine_lock, flags);
471  	return rc;
472  }
473  
aspeed_xdma_done(struct aspeed_xdma * ctx,bool error)474  static void aspeed_xdma_done(struct aspeed_xdma *ctx, bool error)
475  {
476  	unsigned long flags;
477  
478  	spin_lock_irqsave(&ctx->client_lock, flags);
479  	if (ctx->current_client) {
480  		ctx->current_client->error = error;
481  		ctx->current_client->in_progress = false;
482  		ctx->current_client = NULL;
483  	}
484  	spin_unlock_irqrestore(&ctx->client_lock, flags);
485  
486  	wake_up_interruptible_all(&ctx->wait);
487  }
488  
aspeed_xdma_irq(int irq,void * arg)489  static irqreturn_t aspeed_xdma_irq(int irq, void *arg)
490  {
491  	struct aspeed_xdma *ctx = arg;
492  	u32 status;
493  
494  	spin_lock(&ctx->engine_lock);
495  	status = aspeed_xdma_readl(ctx, ctx->chip->regs.status);
496  
497  	trace_xdma_irq(status);
498  
499  	if (status & ctx->chip->status_bits.ds_dirty) {
500  		aspeed_xdma_done(ctx, true);
501  	} else {
502  		if (status & ctx->chip->status_bits.us_comp) {
503  			if (ctx->upstream)
504  				aspeed_xdma_done(ctx, false);
505  		}
506  
507  		if (status & ctx->chip->status_bits.ds_comp) {
508  			if (!ctx->upstream)
509  				aspeed_xdma_done(ctx, false);
510  		}
511  	}
512  
513  	aspeed_xdma_writel(ctx, ctx->chip->regs.status, status);
514  	spin_unlock(&ctx->engine_lock);
515  
516  	return IRQ_HANDLED;
517  }
518  
aspeed_xdma_reset(struct aspeed_xdma * ctx)519  static void aspeed_xdma_reset(struct aspeed_xdma *ctx)
520  {
521  	unsigned long flags;
522  
523  	trace_xdma_reset(ctx);
524  
525  	reset_control_assert(ctx->reset);
526  	usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
527  		     XDMA_ENGINE_SETUP_TIME_MAX_US);
528  	reset_control_deassert(ctx->reset);
529  	usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
530  		     XDMA_ENGINE_SETUP_TIME_MAX_US);
531  
532  	aspeed_xdma_init_eng(ctx);
533  
534  	aspeed_xdma_done(ctx, true);
535  
536  	spin_lock_irqsave(&ctx->engine_lock, flags);
537  	ctx->in_reset = false;
538  	spin_unlock_irqrestore(&ctx->engine_lock, flags);
539  
540  	wake_up_interruptible(&ctx->wait);
541  }
542  
aspeed_xdma_reset_work(struct work_struct * work)543  static void aspeed_xdma_reset_work(struct work_struct *work)
544  {
545  	struct aspeed_xdma *ctx = container_of(work, struct aspeed_xdma,
546  					       reset_work);
547  
548  	aspeed_xdma_reset(ctx);
549  }
550  
aspeed_xdma_pcie_irq(int irq,void * arg)551  static irqreturn_t aspeed_xdma_pcie_irq(int irq, void *arg)
552  {
553  	struct aspeed_xdma *ctx = arg;
554  
555  	trace_xdma_perst(ctx);
556  
557  	spin_lock(&ctx->engine_lock);
558  	if (ctx->in_reset) {
559  		spin_unlock(&ctx->engine_lock);
560  		return IRQ_HANDLED;
561  	}
562  
563  	ctx->in_reset = true;
564  	spin_unlock(&ctx->engine_lock);
565  
566  	schedule_work(&ctx->reset_work);
567  	return IRQ_HANDLED;
568  }
569  
aspeed_xdma_write(struct file * file,const char __user * buf,size_t len,loff_t * offset)570  static ssize_t aspeed_xdma_write(struct file *file, const char __user *buf,
571  				 size_t len, loff_t *offset)
572  {
573  	int rc;
574  	unsigned int num_cmds;
575  	struct aspeed_xdma_op op;
576  	struct aspeed_xdma_cmd cmds[2];
577  	struct aspeed_xdma_client *client = file->private_data;
578  	struct aspeed_xdma *ctx = client->ctx;
579  
580  	if (len != sizeof(op))
581  		return -EINVAL;
582  
583  	if (copy_from_user(&op, buf, len))
584  		return -EFAULT;
585  
586  	if (!op.len || op.len > client->size ||
587  	    op.direction > ASPEED_XDMA_DIRECTION_UPSTREAM)
588  		return -EINVAL;
589  
590  	num_cmds = ctx->chip->set_cmd(ctx, cmds, &op, client->phys);
591  	do {
592  		rc = aspeed_xdma_start(ctx, num_cmds, cmds, !!op.direction,
593  				       client);
594  		if (!rc)
595  			break;
596  
597  		if ((file->f_flags & O_NONBLOCK) || rc != -EBUSY)
598  			return rc;
599  
600  		rc = wait_event_interruptible(ctx->wait,
601  					      !(ctx->current_client ||
602  						ctx->in_reset));
603  	} while (!rc);
604  
605  	if (rc)
606  		return -EINTR;
607  
608  	if (!(file->f_flags & O_NONBLOCK)) {
609  		rc = wait_event_interruptible(ctx->wait, !client->in_progress);
610  		if (rc)
611  			return -EINTR;
612  
613  		if (client->error)
614  			return -EIO;
615  	}
616  
617  	return len;
618  }
619  
aspeed_xdma_poll(struct file * file,struct poll_table_struct * wait)620  static __poll_t aspeed_xdma_poll(struct file *file,
621  				 struct poll_table_struct *wait)
622  {
623  	__poll_t mask = 0;
624  	__poll_t req = poll_requested_events(wait);
625  	struct aspeed_xdma_client *client = file->private_data;
626  	struct aspeed_xdma *ctx = client->ctx;
627  
628  	if (req & (EPOLLIN | EPOLLRDNORM)) {
629  		if (READ_ONCE(client->in_progress))
630  			poll_wait(file, &ctx->wait, wait);
631  
632  		if (!READ_ONCE(client->in_progress)) {
633  			if (READ_ONCE(client->error))
634  				mask |= EPOLLERR;
635  			else
636  				mask |= EPOLLIN | EPOLLRDNORM;
637  		}
638  	}
639  
640  	if (req & (EPOLLOUT | EPOLLWRNORM)) {
641  		if (READ_ONCE(ctx->current_client))
642  			poll_wait(file, &ctx->wait, wait);
643  
644  		if (!READ_ONCE(ctx->current_client))
645  			mask |= EPOLLOUT | EPOLLWRNORM;
646  	}
647  
648  	return mask;
649  }
650  
aspeed_xdma_ioctl(struct file * file,unsigned int cmd,unsigned long param)651  static long aspeed_xdma_ioctl(struct file *file, unsigned int cmd,
652  			      unsigned long param)
653  {
654  	unsigned long flags;
655  	struct aspeed_xdma_client *client = file->private_data;
656  	struct aspeed_xdma *ctx = client->ctx;
657  
658  	switch (cmd) {
659  	case ASPEED_XDMA_IOCTL_RESET:
660  		spin_lock_irqsave(&ctx->engine_lock, flags);
661  		if (ctx->in_reset) {
662  			spin_unlock_irqrestore(&ctx->engine_lock, flags);
663  			return 0;
664  		}
665  
666  		ctx->in_reset = true;
667  		spin_unlock_irqrestore(&ctx->engine_lock, flags);
668  
669  		if (READ_ONCE(ctx->current_client))
670  			dev_warn(ctx->dev,
671  				 "User reset with transfer in progress.\n");
672  
673  		aspeed_xdma_reset(ctx);
674  		break;
675  	default:
676  		return -EINVAL;
677  	}
678  
679  	return 0;
680  }
681  
aspeed_xdma_vma_close(struct vm_area_struct * vma)682  static void aspeed_xdma_vma_close(struct vm_area_struct *vma)
683  {
684  	int rc;
685  	struct aspeed_xdma_client *client = vma->vm_private_data;
686  
687  	rc = wait_event_interruptible(client->ctx->wait, !client->in_progress);
688  	if (rc)
689  		return;
690  
691  	gen_pool_free(client->ctx->pool, (unsigned long)client->virt,
692  		      client->size);
693  	trace_xdma_unmap(client);
694  
695  	client->virt = NULL;
696  	client->phys = 0;
697  	client->size = 0;
698  }
699  
700  static const struct vm_operations_struct aspeed_xdma_vm_ops = {
701  	.close =	aspeed_xdma_vma_close,
702  };
703  
aspeed_xdma_mmap(struct file * file,struct vm_area_struct * vma)704  static int aspeed_xdma_mmap(struct file *file, struct vm_area_struct *vma)
705  {
706  	int rc;
707  	struct aspeed_xdma_client *client = file->private_data;
708  	struct aspeed_xdma *ctx = client->ctx;
709  
710  	/* restrict file to one mapping */
711  	if (client->size)
712  		return -EBUSY;
713  
714  	client->size = vma->vm_end - vma->vm_start;
715  	client->virt = gen_pool_dma_alloc(ctx->pool, client->size,
716  					  &client->phys);
717  	if (!client->virt) {
718  		trace_xdma_mmap_error(client, 0UL);
719  		client->phys = 0;
720  		client->size = 0;
721  		return -ENOMEM;
722  	}
723  
724  	vma->vm_pgoff = (client->phys - ctx->mem_phys) >> PAGE_SHIFT;
725  	vma->vm_ops = &aspeed_xdma_vm_ops;
726  	vma->vm_private_data = client;
727  	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
728  
729  	rc = io_remap_pfn_range(vma, vma->vm_start, client->phys >> PAGE_SHIFT,
730  				client->size, vma->vm_page_prot);
731  	if (rc) {
732  		dev_warn(ctx->dev, "mmap err: v[%08lx] to p[%08x], s[%08x]\n",
733  			 vma->vm_start, (u32)client->phys, client->size);
734  
735  		gen_pool_free(ctx->pool, (unsigned long)client->virt,
736  			      client->size);
737  
738  		trace_xdma_mmap_error(client, vma->vm_start);
739  		client->virt = NULL;
740  		client->phys = 0;
741  		client->size = 0;
742  		return rc;
743  	}
744  
745  	trace_xdma_mmap(client);
746  	dev_dbg(ctx->dev, "mmap: v[%08lx] to p[%08x], s[%08x]\n",
747  		vma->vm_start, (u32)client->phys, client->size);
748  
749  	return 0;
750  }
751  
aspeed_xdma_open(struct inode * inode,struct file * file)752  static int aspeed_xdma_open(struct inode *inode, struct file *file)
753  {
754  	struct miscdevice *misc = file->private_data;
755  	struct aspeed_xdma *ctx = container_of(misc, struct aspeed_xdma, misc);
756  	struct aspeed_xdma_client *client = kzalloc(sizeof(*client),
757  						    GFP_KERNEL);
758  
759  	if (!client)
760  		return -ENOMEM;
761  
762  	kobject_get(&ctx->kobj);
763  	client->ctx = ctx;
764  	file->private_data = client;
765  	return 0;
766  }
767  
aspeed_xdma_release(struct inode * inode,struct file * file)768  static int aspeed_xdma_release(struct inode *inode, struct file *file)
769  {
770  	bool reset = false;
771  	unsigned long flags;
772  	struct aspeed_xdma_client *client = file->private_data;
773  	struct aspeed_xdma *ctx = client->ctx;
774  
775  	spin_lock_irqsave(&ctx->client_lock, flags);
776  	if (client == ctx->current_client) {
777  		spin_lock(&ctx->engine_lock);
778  		if (ctx->in_reset) {
779  			ctx->current_client = NULL;
780  		} else {
781  			ctx->in_reset = true;
782  			reset = true;
783  		}
784  		spin_unlock(&ctx->engine_lock);
785  	}
786  	spin_unlock_irqrestore(&ctx->client_lock, flags);
787  
788  	if (reset)
789  		aspeed_xdma_reset(ctx);
790  
791  	if (client->virt) {
792  		gen_pool_free(ctx->pool, (unsigned long)client->virt,
793  			      client->size);
794  		trace_xdma_unmap(client);
795  	}
796  
797  	kfree(client);
798  	kobject_put(&ctx->kobj);
799  	return 0;
800  }
801  
802  static const struct file_operations aspeed_xdma_fops = {
803  	.owner			= THIS_MODULE,
804  	.write			= aspeed_xdma_write,
805  	.poll			= aspeed_xdma_poll,
806  	.unlocked_ioctl		= aspeed_xdma_ioctl,
807  	.mmap			= aspeed_xdma_mmap,
808  	.open			= aspeed_xdma_open,
809  	.release		= aspeed_xdma_release,
810  };
811  
aspeed_xdma_init_scu(struct aspeed_xdma * ctx,struct device * dev)812  static int aspeed_xdma_init_scu(struct aspeed_xdma *ctx, struct device *dev)
813  {
814  	struct regmap *scu = syscon_regmap_lookup_by_phandle(dev->of_node,
815  							     "aspeed,scu");
816  
817  	if (!IS_ERR(scu)) {
818  		u32 selection;
819  		bool pcie_device_bmc = true;
820  		const u32 bmc = SCU_PCIE_CONF_BMC_EN |
821  			SCU_PCIE_CONF_BMC_EN_MSI | SCU_PCIE_CONF_BMC_EN_IRQ |
822  			SCU_PCIE_CONF_BMC_EN_DMA;
823  		const u32 vga = SCU_PCIE_CONF_VGA_EN |
824  			SCU_PCIE_CONF_VGA_EN_MSI | SCU_PCIE_CONF_VGA_EN_IRQ |
825  			SCU_PCIE_CONF_VGA_EN_DMA;
826  		const char *pcie = NULL;
827  
828  		if (!of_property_read_string(dev->of_node,
829  					     "aspeed,pcie-device", &pcie)) {
830  			if (!strcmp(pcie, "vga")) {
831  				pcie_device_bmc = false;
832  			} else if (strcmp(pcie, "bmc")) {
833  				dev_err(dev,
834  					"Invalid pcie-device property %s.\n",
835  					pcie);
836  				return -EINVAL;
837  			}
838  		}
839  
840  		if (pcie_device_bmc) {
841  			selection = bmc;
842  			regmap_write(scu, ctx->chip->scu_bmc_class,
843  				     SCU_BMC_CLASS_REV_XDMA);
844  		} else {
845  			selection = vga;
846  		}
847  
848  		regmap_update_bits(scu, ctx->chip->scu_pcie_conf, bmc | vga,
849  				   selection);
850  
851  		if (ctx->chip->scu_misc_ctrl) {
852  			regmap_update_bits(scu, ctx->chip->scu_misc_ctrl,
853  					   SCU_AST2600_MISC_CTRL_XDMA_BMC,
854  					   SCU_AST2600_MISC_CTRL_XDMA_BMC);
855  
856  			/* Allow XDMA to be used on AST2600 */
857  			regmap_update_bits(scu, SCU_AST2600_DEBUG_CTRL,
858  					   DEBUG_CTRL_XDMA_DISABLE, 0);
859  		}
860  	} else {
861  		dev_warn(dev, "Unable to configure PCIe: %ld; continuing.\n",
862  			 PTR_ERR(scu));
863  	}
864  
865  	return 0;
866  }
867  
aspeed_xdma_kobject_release(struct kobject * kobj)868  static void aspeed_xdma_kobject_release(struct kobject *kobj)
869  {
870  	struct aspeed_xdma *ctx = container_of(kobj, struct aspeed_xdma, kobj);
871  
872  	if (ctx->pcie_irq >= 0)
873  		free_irq(ctx->pcie_irq, ctx);
874  
875  	gen_pool_free(ctx->pool, (unsigned long)ctx->cmdq, XDMA_CMDQ_SIZE);
876  
877  	gen_pool_destroy(ctx->pool);
878  
879  	dma_free_coherent(ctx->dev, ctx->mem_size, ctx->mem_virt,
880  			  ctx->mem_coherent);
881  
882  	if (ctx->reset_rc)
883  		reset_control_put(ctx->reset_rc);
884  	reset_control_put(ctx->reset);
885  
886  	clk_put(ctx->clock);
887  
888  	free_irq(ctx->irq, ctx);
889  
890  	iounmap(ctx->base);
891  	release_mem_region(ctx->res_start, ctx->res_size);
892  
893  	kfree(ctx);
894  }
895  
896  static struct kobj_type aspeed_xdma_kobject_type = {
897  	.release = aspeed_xdma_kobject_release,
898  };
899  
aspeed_xdma_iomap(struct aspeed_xdma * ctx,struct platform_device * pdev)900  static int aspeed_xdma_iomap(struct aspeed_xdma *ctx,
901  			     struct platform_device *pdev)
902  {
903  	resource_size_t size;
904  	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
905  
906  	if (!res)
907  		return -ENOMEM;
908  
909  	size = resource_size(res);
910  	if (!request_mem_region(res->start, size, dev_name(ctx->dev)))
911  		return -ENOMEM;
912  
913  	ctx->base = ioremap(res->start, size);
914  	if (!ctx->base) {
915  		release_mem_region(res->start, size);
916  		return -ENOMEM;
917  	}
918  
919  	ctx->res_start = res->start;
920  	ctx->res_size = size;
921  
922  	return 0;
923  }
924  
aspeed_xdma_probe(struct platform_device * pdev)925  static int aspeed_xdma_probe(struct platform_device *pdev)
926  {
927  	int rc;
928  	struct aspeed_xdma *ctx;
929  	struct reserved_mem *mem;
930  	struct device *dev = &pdev->dev;
931  	struct device_node *memory_region;
932  	const void *md = of_device_get_match_data(dev);
933  
934  	if (!md)
935  		return -ENODEV;
936  
937  	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
938  	if (!ctx)
939  		return -ENOMEM;
940  
941  	ctx->chip = md;
942  	ctx->dev = dev;
943  	platform_set_drvdata(pdev, ctx);
944  	spin_lock_init(&ctx->client_lock);
945  	spin_lock_init(&ctx->engine_lock);
946  	INIT_WORK(&ctx->reset_work, aspeed_xdma_reset_work);
947  	init_waitqueue_head(&ctx->wait);
948  
949  	rc = aspeed_xdma_iomap(ctx, pdev);
950  	if (rc) {
951  		dev_err(dev, "Failed to map registers.\n");
952  		goto err_nomap;
953  	}
954  
955  	ctx->irq = platform_get_irq(pdev, 0);
956  	if (ctx->irq < 0) {
957  		dev_err(dev, "Failed to find IRQ.\n");
958  		rc = ctx->irq;
959  		goto err_noirq;
960  	}
961  
962  	rc = request_irq(ctx->irq, aspeed_xdma_irq, 0, DEVICE_NAME, ctx);
963  	if (rc < 0) {
964  		dev_err(dev, "Failed to request IRQ %d.\n", ctx->irq);
965  		goto err_noirq;
966  	}
967  
968  	ctx->clock = clk_get(dev, NULL);
969  	if (IS_ERR(ctx->clock)) {
970  		dev_err(dev, "Failed to request clock.\n");
971  		rc = PTR_ERR(ctx->clock);
972  		goto err_noclk;
973  	}
974  
975  	ctx->reset = reset_control_get_exclusive(dev, NULL);
976  	if (IS_ERR(ctx->reset)) {
977  		dev_err(dev, "Failed to request reset control.\n");
978  		rc = PTR_ERR(ctx->reset);
979  		goto err_noreset;
980  	}
981  
982  	ctx->reset_rc = reset_control_get_exclusive(dev, "root-complex");
983  	if (IS_ERR(ctx->reset_rc)) {
984  		dev_dbg(dev, "Failed to request reset RC control.\n");
985  		ctx->reset_rc = NULL;
986  	}
987  
988  	memory_region = of_parse_phandle(dev->of_node, "memory-region", 0);
989  	if (!memory_region) {
990  		dev_err(dev, "Failed to find memory-region.\n");
991  		rc = -ENOMEM;
992  		goto err_nomem;
993  	}
994  
995  	mem = of_reserved_mem_lookup(memory_region);
996  	of_node_put(memory_region);
997  	if (!mem) {
998  		dev_err(dev, "Failed to find reserved memory.\n");
999  		rc = -ENOMEM;
1000  		goto err_nomem;
1001  	}
1002  
1003  	ctx->mem_phys = mem->base;
1004  	ctx->mem_size = mem->size;
1005  
1006  	rc = of_reserved_mem_device_init(dev);
1007  	if (rc) {
1008  		dev_err(dev, "Failed to init reserved memory.\n");
1009  		goto err_nomem;
1010  	}
1011  
1012  	rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1013  	if (rc) {
1014  		dev_err(dev, "Failed to mask DMA.\n");
1015  		goto err_nomem;
1016  	}
1017  
1018  	ctx->mem_virt = dma_alloc_coherent(dev, ctx->mem_size,
1019  					   &ctx->mem_coherent, 0);
1020  	if (!ctx->mem_virt) {
1021  		dev_err(dev, "Failed to allocate reserved memory.\n");
1022  		rc = -ENOMEM;
1023  		goto err_nomem;
1024  	}
1025  
1026  	ctx->pool = gen_pool_create(ilog2(PAGE_SIZE), -1);
1027  	if (!ctx->pool) {
1028  		dev_err(dev, "Failed to setup genalloc pool.\n");
1029  		rc = -ENOMEM;
1030  		goto err_nopool;
1031  	}
1032  
1033  	rc = gen_pool_add_virt(ctx->pool, (unsigned long)ctx->mem_virt,
1034  			       ctx->mem_phys, ctx->mem_size, -1);
1035  	if (rc) {
1036  		dev_err(ctx->dev, "Failed to add memory to genalloc pool.\n");
1037  		goto err_pool_scu_clk;
1038  	}
1039  
1040  	rc = aspeed_xdma_init_scu(ctx, dev);
1041  	if (rc)
1042  		goto err_pool_scu_clk;
1043  
1044  	rc = clk_prepare_enable(ctx->clock);
1045  	if (rc) {
1046  		dev_err(dev, "Failed to enable the clock.\n");
1047  		goto err_pool_scu_clk;
1048  	}
1049  
1050  	if (ctx->reset_rc) {
1051  		rc = reset_control_deassert(ctx->reset_rc);
1052  		if (rc) {
1053  			dev_err(dev, "Failed to clear the RC reset.\n");
1054  			goto err_reset_rc;
1055  		}
1056  		usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
1057  			     XDMA_ENGINE_SETUP_TIME_MAX_US);
1058  	}
1059  
1060  	rc = reset_control_deassert(ctx->reset);
1061  	if (rc) {
1062  		dev_err(dev, "Failed to clear the reset.\n");
1063  		goto err_reset;
1064  	}
1065  	usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
1066  		     XDMA_ENGINE_SETUP_TIME_MAX_US);
1067  
1068  	ctx->cmdq = gen_pool_dma_alloc(ctx->pool, XDMA_CMDQ_SIZE,
1069  				       &ctx->cmdq_phys);
1070  	if (!ctx->cmdq) {
1071  		dev_err(ctx->dev, "Failed to genalloc cmdq.\n");
1072  		rc = -ENOMEM;
1073  		goto err_pool;
1074  	}
1075  
1076  	aspeed_xdma_init_eng(ctx);
1077  
1078  	ctx->misc.minor = MISC_DYNAMIC_MINOR;
1079  	ctx->misc.fops = &aspeed_xdma_fops;
1080  	ctx->misc.name = "aspeed-xdma";
1081  	ctx->misc.parent = dev;
1082  	rc = misc_register(&ctx->misc);
1083  	if (rc) {
1084  		dev_err(dev, "Failed to register xdma miscdevice.\n");
1085  		goto err_misc;
1086  	}
1087  
1088  	/*
1089  	 * This interrupt could fire immediately so only request it once the
1090  	 * engine and driver are initialized.
1091  	 */
1092  	ctx->pcie_irq = platform_get_irq(pdev, 1);
1093  	if (ctx->pcie_irq < 0) {
1094  		dev_warn(dev, "Failed to find PCI-E IRQ.\n");
1095  	} else {
1096  		rc = request_irq(ctx->pcie_irq, aspeed_xdma_pcie_irq,
1097  				 IRQF_SHARED, DEVICE_NAME, ctx);
1098  		if (rc < 0) {
1099  			dev_warn(dev, "Failed to request PCI-E IRQ %d.\n", rc);
1100  			ctx->pcie_irq = -1;
1101  		}
1102  	}
1103  
1104  	kobject_init(&ctx->kobj, &aspeed_xdma_kobject_type);
1105  	return 0;
1106  
1107  err_misc:
1108  	gen_pool_free(ctx->pool, (unsigned long)ctx->cmdq, XDMA_CMDQ_SIZE);
1109  err_pool:
1110  	reset_control_assert(ctx->reset);
1111  err_reset:
1112  	if (ctx->reset_rc)
1113  		reset_control_assert(ctx->reset_rc);
1114  err_reset_rc:
1115  	clk_disable_unprepare(ctx->clock);
1116  err_pool_scu_clk:
1117  	gen_pool_destroy(ctx->pool);
1118  err_nopool:
1119  	dma_free_coherent(ctx->dev, ctx->mem_size, ctx->mem_virt,
1120  			  ctx->mem_coherent);
1121  err_nomem:
1122  	if (ctx->reset_rc)
1123  		reset_control_put(ctx->reset_rc);
1124  	reset_control_put(ctx->reset);
1125  err_noreset:
1126  	clk_put(ctx->clock);
1127  err_noclk:
1128  	free_irq(ctx->irq, ctx);
1129  err_noirq:
1130  	iounmap(ctx->base);
1131  	release_mem_region(ctx->res_start, ctx->res_size);
1132  err_nomap:
1133  	kfree(ctx);
1134  	return rc;
1135  }
1136  
aspeed_xdma_remove(struct platform_device * pdev)1137  static int aspeed_xdma_remove(struct platform_device *pdev)
1138  {
1139  	struct aspeed_xdma *ctx = platform_get_drvdata(pdev);
1140  
1141  	reset_control_assert(ctx->reset);
1142  	if (ctx->reset_rc)
1143  		reset_control_assert(ctx->reset_rc);
1144  	clk_disable_unprepare(ctx->clock);
1145  
1146  	aspeed_xdma_done(ctx, true);
1147  
1148  	misc_deregister(&ctx->misc);
1149  	kobject_put(&ctx->kobj);
1150  
1151  	return 0;
1152  }
1153  
1154  static const struct aspeed_xdma_chip aspeed_ast2500_xdma_chip = {
1155  	.control = XDMA_AST2500_CTRL_US_COMP | XDMA_AST2500_CTRL_DS_COMP |
1156  		XDMA_AST2500_CTRL_DS_DIRTY | XDMA_AST2500_CTRL_DS_SIZE_256 |
1157  		XDMA_AST2500_CTRL_DS_TIMEOUT | XDMA_AST2500_CTRL_DS_CHECK_ID,
1158  	.scu_bmc_class = SCU_AST2500_BMC_CLASS_REV,
1159  	.scu_misc_ctrl = 0,
1160  	.scu_pcie_conf = SCU_AST2500_PCIE_CONF,
1161  	.queue_entry_size = XDMA_AST2500_QUEUE_ENTRY_SIZE,
1162  	.regs = {
1163  		.bmc_cmdq_addr = XDMA_AST2500_BMC_CMDQ_ADDR,
1164  		.bmc_cmdq_endp = XDMA_AST2500_BMC_CMDQ_ENDP,
1165  		.bmc_cmdq_writep = XDMA_AST2500_BMC_CMDQ_WRITEP,
1166  		.bmc_cmdq_readp = XDMA_AST2500_BMC_CMDQ_READP,
1167  		.control = XDMA_AST2500_CTRL,
1168  		.status = XDMA_AST2500_STATUS,
1169  	},
1170  	.status_bits = {
1171  		.us_comp = XDMA_AST2500_STATUS_US_COMP,
1172  		.ds_comp = XDMA_AST2500_STATUS_DS_COMP,
1173  		.ds_dirty = XDMA_AST2500_STATUS_DS_DIRTY,
1174  	},
1175  	.set_cmd = aspeed_xdma_ast2500_set_cmd,
1176  };
1177  
1178  static const struct aspeed_xdma_chip aspeed_ast2600_xdma_chip = {
1179  	.control = XDMA_AST2600_CTRL_US_COMP | XDMA_AST2600_CTRL_DS_COMP |
1180  		XDMA_AST2600_CTRL_DS_DIRTY | XDMA_AST2600_CTRL_DS_SIZE_256,
1181  	.scu_bmc_class = SCU_AST2600_BMC_CLASS_REV,
1182  	.scu_misc_ctrl = SCU_AST2600_MISC_CTRL,
1183  	.scu_pcie_conf = SCU_AST2600_PCIE_CONF,
1184  	.queue_entry_size = XDMA_AST2600_QUEUE_ENTRY_SIZE,
1185  	.regs = {
1186  		.bmc_cmdq_addr = XDMA_AST2600_BMC_CMDQ_ADDR,
1187  		.bmc_cmdq_endp = XDMA_AST2600_BMC_CMDQ_ENDP,
1188  		.bmc_cmdq_writep = XDMA_AST2600_BMC_CMDQ_WRITEP,
1189  		.bmc_cmdq_readp = XDMA_AST2600_BMC_CMDQ_READP,
1190  		.control = XDMA_AST2600_CTRL,
1191  		.status = XDMA_AST2600_STATUS,
1192  	},
1193  	.status_bits = {
1194  		.us_comp = XDMA_AST2600_STATUS_US_COMP,
1195  		.ds_comp = XDMA_AST2600_STATUS_DS_COMP,
1196  		.ds_dirty = XDMA_AST2600_STATUS_DS_DIRTY,
1197  	},
1198  	.set_cmd = aspeed_xdma_ast2600_set_cmd,
1199  };
1200  
1201  static const struct of_device_id aspeed_xdma_match[] = {
1202  	{
1203  		.compatible = "aspeed,ast2500-xdma",
1204  		.data = &aspeed_ast2500_xdma_chip,
1205  	},
1206  	{
1207  		.compatible = "aspeed,ast2600-xdma",
1208  		.data = &aspeed_ast2600_xdma_chip,
1209  	},
1210  	{ },
1211  };
1212  
1213  static struct platform_driver aspeed_xdma_driver = {
1214  	.probe = aspeed_xdma_probe,
1215  	.remove = aspeed_xdma_remove,
1216  	.driver = {
1217  		.name = DEVICE_NAME,
1218  		.of_match_table = aspeed_xdma_match,
1219  	},
1220  };
1221  
1222  module_platform_driver(aspeed_xdma_driver);
1223  
1224  MODULE_AUTHOR("Eddie James");
1225  MODULE_DESCRIPTION("ASPEED XDMA Engine Driver");
1226  MODULE_LICENSE("GPL v2");
1227