xref: /openbmc/linux/drivers/usb/gadget/udc/pch_udc.c (revision afba8b0a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/list.h>
12 #include <linux/interrupt.h>
13 #include <linux/usb/ch9.h>
14 #include <linux/usb/gadget.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/irq.h>
17 
18 #define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
19 #define PCH_VBUS_INTERVAL	10	/* VBUS polling interval (msec) */
20 
21 /* Address offset of Registers */
22 #define UDC_EP_REG_SHIFT	0x20	/* Offset to next EP */
23 
24 #define UDC_EPCTL_ADDR		0x00	/* Endpoint control */
25 #define UDC_EPSTS_ADDR		0x04	/* Endpoint status */
26 #define UDC_BUFIN_FRAMENUM_ADDR	0x08	/* buffer size in / frame number out */
27 #define UDC_BUFOUT_MAXPKT_ADDR	0x0C	/* buffer size out / maxpkt in */
28 #define UDC_SUBPTR_ADDR		0x10	/* setup buffer pointer */
29 #define UDC_DESPTR_ADDR		0x14	/* Data descriptor pointer */
30 #define UDC_CONFIRM_ADDR	0x18	/* Write/Read confirmation */
31 
32 #define UDC_DEVCFG_ADDR		0x400	/* Device configuration */
33 #define UDC_DEVCTL_ADDR		0x404	/* Device control */
34 #define UDC_DEVSTS_ADDR		0x408	/* Device status */
35 #define UDC_DEVIRQSTS_ADDR	0x40C	/* Device irq status */
36 #define UDC_DEVIRQMSK_ADDR	0x410	/* Device irq mask */
37 #define UDC_EPIRQSTS_ADDR	0x414	/* Endpoint irq status */
38 #define UDC_EPIRQMSK_ADDR	0x418	/* Endpoint irq mask */
39 #define UDC_DEVLPM_ADDR		0x41C	/* LPM control / status */
40 #define UDC_CSR_BUSY_ADDR	0x4f0	/* UDC_CSR_BUSY Status register */
41 #define UDC_SRST_ADDR		0x4fc	/* SOFT RESET register */
42 #define UDC_CSR_ADDR		0x500	/* USB_DEVICE endpoint register */
43 
44 /* Endpoint control register */
45 /* Bit position */
46 #define UDC_EPCTL_MRXFLUSH		(1 << 12)
47 #define UDC_EPCTL_RRDY			(1 << 9)
48 #define UDC_EPCTL_CNAK			(1 << 8)
49 #define UDC_EPCTL_SNAK			(1 << 7)
50 #define UDC_EPCTL_NAK			(1 << 6)
51 #define UDC_EPCTL_P			(1 << 3)
52 #define UDC_EPCTL_F			(1 << 1)
53 #define UDC_EPCTL_S			(1 << 0)
54 #define UDC_EPCTL_ET_SHIFT		4
55 /* Mask patern */
56 #define UDC_EPCTL_ET_MASK		0x00000030
57 /* Value for ET field */
58 #define UDC_EPCTL_ET_CONTROL		0
59 #define UDC_EPCTL_ET_ISO		1
60 #define UDC_EPCTL_ET_BULK		2
61 #define UDC_EPCTL_ET_INTERRUPT		3
62 
63 /* Endpoint status register */
64 /* Bit position */
65 #define UDC_EPSTS_XFERDONE		(1 << 27)
66 #define UDC_EPSTS_RSS			(1 << 26)
67 #define UDC_EPSTS_RCS			(1 << 25)
68 #define UDC_EPSTS_TXEMPTY		(1 << 24)
69 #define UDC_EPSTS_TDC			(1 << 10)
70 #define UDC_EPSTS_HE			(1 << 9)
71 #define UDC_EPSTS_MRXFIFO_EMP		(1 << 8)
72 #define UDC_EPSTS_BNA			(1 << 7)
73 #define UDC_EPSTS_IN			(1 << 6)
74 #define UDC_EPSTS_OUT_SHIFT		4
75 /* Mask patern */
76 #define UDC_EPSTS_OUT_MASK		0x00000030
77 #define UDC_EPSTS_ALL_CLR_MASK		0x1F0006F0
78 /* Value for OUT field */
79 #define UDC_EPSTS_OUT_SETUP		2
80 #define UDC_EPSTS_OUT_DATA		1
81 
82 /* Device configuration register */
83 /* Bit position */
84 #define UDC_DEVCFG_CSR_PRG		(1 << 17)
85 #define UDC_DEVCFG_SP			(1 << 3)
86 /* SPD Valee */
87 #define UDC_DEVCFG_SPD_HS		0x0
88 #define UDC_DEVCFG_SPD_FS		0x1
89 #define UDC_DEVCFG_SPD_LS		0x2
90 
91 /* Device control register */
92 /* Bit position */
93 #define UDC_DEVCTL_THLEN_SHIFT		24
94 #define UDC_DEVCTL_BRLEN_SHIFT		16
95 #define UDC_DEVCTL_CSR_DONE		(1 << 13)
96 #define UDC_DEVCTL_SD			(1 << 10)
97 #define UDC_DEVCTL_MODE			(1 << 9)
98 #define UDC_DEVCTL_BREN			(1 << 8)
99 #define UDC_DEVCTL_THE			(1 << 7)
100 #define UDC_DEVCTL_DU			(1 << 4)
101 #define UDC_DEVCTL_TDE			(1 << 3)
102 #define UDC_DEVCTL_RDE			(1 << 2)
103 #define UDC_DEVCTL_RES			(1 << 0)
104 
105 /* Device status register */
106 /* Bit position */
107 #define UDC_DEVSTS_TS_SHIFT		18
108 #define UDC_DEVSTS_ENUM_SPEED_SHIFT	13
109 #define UDC_DEVSTS_ALT_SHIFT		8
110 #define UDC_DEVSTS_INTF_SHIFT		4
111 #define UDC_DEVSTS_CFG_SHIFT		0
112 /* Mask patern */
113 #define UDC_DEVSTS_TS_MASK		0xfffc0000
114 #define UDC_DEVSTS_ENUM_SPEED_MASK	0x00006000
115 #define UDC_DEVSTS_ALT_MASK		0x00000f00
116 #define UDC_DEVSTS_INTF_MASK		0x000000f0
117 #define UDC_DEVSTS_CFG_MASK		0x0000000f
118 /* value for maximum speed for SPEED field */
119 #define UDC_DEVSTS_ENUM_SPEED_FULL	1
120 #define UDC_DEVSTS_ENUM_SPEED_HIGH	0
121 #define UDC_DEVSTS_ENUM_SPEED_LOW	2
122 #define UDC_DEVSTS_ENUM_SPEED_FULLX	3
123 
124 /* Device irq register */
125 /* Bit position */
126 #define UDC_DEVINT_RWKP			(1 << 7)
127 #define UDC_DEVINT_ENUM			(1 << 6)
128 #define UDC_DEVINT_SOF			(1 << 5)
129 #define UDC_DEVINT_US			(1 << 4)
130 #define UDC_DEVINT_UR			(1 << 3)
131 #define UDC_DEVINT_ES			(1 << 2)
132 #define UDC_DEVINT_SI			(1 << 1)
133 #define UDC_DEVINT_SC			(1 << 0)
134 /* Mask patern */
135 #define UDC_DEVINT_MSK			0x7f
136 
137 /* Endpoint irq register */
138 /* Bit position */
139 #define UDC_EPINT_IN_SHIFT		0
140 #define UDC_EPINT_OUT_SHIFT		16
141 #define UDC_EPINT_IN_EP0		(1 << 0)
142 #define UDC_EPINT_OUT_EP0		(1 << 16)
143 /* Mask patern */
144 #define UDC_EPINT_MSK_DISABLE_ALL	0xffffffff
145 
146 /* UDC_CSR_BUSY Status register */
147 /* Bit position */
148 #define UDC_CSR_BUSY			(1 << 0)
149 
150 /* SOFT RESET register */
151 /* Bit position */
152 #define UDC_PSRST			(1 << 1)
153 #define UDC_SRST			(1 << 0)
154 
155 /* USB_DEVICE endpoint register */
156 /* Bit position */
157 #define UDC_CSR_NE_NUM_SHIFT		0
158 #define UDC_CSR_NE_DIR_SHIFT		4
159 #define UDC_CSR_NE_TYPE_SHIFT		5
160 #define UDC_CSR_NE_CFG_SHIFT		7
161 #define UDC_CSR_NE_INTF_SHIFT		11
162 #define UDC_CSR_NE_ALT_SHIFT		15
163 #define UDC_CSR_NE_MAX_PKT_SHIFT	19
164 /* Mask patern */
165 #define UDC_CSR_NE_NUM_MASK		0x0000000f
166 #define UDC_CSR_NE_DIR_MASK		0x00000010
167 #define UDC_CSR_NE_TYPE_MASK		0x00000060
168 #define UDC_CSR_NE_CFG_MASK		0x00000780
169 #define UDC_CSR_NE_INTF_MASK		0x00007800
170 #define UDC_CSR_NE_ALT_MASK		0x00078000
171 #define UDC_CSR_NE_MAX_PKT_MASK		0x3ff80000
172 
173 #define PCH_UDC_CSR(ep)	(UDC_CSR_ADDR + ep*4)
174 #define PCH_UDC_EPINT(in, num)\
175 		(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
176 
177 /* Index of endpoint */
178 #define UDC_EP0IN_IDX		0
179 #define UDC_EP0OUT_IDX		1
180 #define UDC_EPIN_IDX(ep)	(ep * 2)
181 #define UDC_EPOUT_IDX(ep)	(ep * 2 + 1)
182 #define PCH_UDC_EP0		0
183 #define PCH_UDC_EP1		1
184 #define PCH_UDC_EP2		2
185 #define PCH_UDC_EP3		3
186 
187 /* Number of endpoint */
188 #define PCH_UDC_EP_NUM		32	/* Total number of EPs (16 IN,16 OUT) */
189 #define PCH_UDC_USED_EP_NUM	4	/* EP number of EP's really used */
190 /* Length Value */
191 #define PCH_UDC_BRLEN		0x0F	/* Burst length */
192 #define PCH_UDC_THLEN		0x1F	/* Threshold length */
193 /* Value of EP Buffer Size */
194 #define UDC_EP0IN_BUFF_SIZE	16
195 #define UDC_EPIN_BUFF_SIZE	256
196 #define UDC_EP0OUT_BUFF_SIZE	16
197 #define UDC_EPOUT_BUFF_SIZE	256
198 /* Value of EP maximum packet size */
199 #define UDC_EP0IN_MAX_PKT_SIZE	64
200 #define UDC_EP0OUT_MAX_PKT_SIZE	64
201 #define UDC_BULK_MAX_PKT_SIZE	512
202 
203 /* DMA */
204 #define DMA_DIR_RX		1	/* DMA for data receive */
205 #define DMA_DIR_TX		2	/* DMA for data transmit */
206 #define DMA_ADDR_INVALID	(~(dma_addr_t)0)
207 #define UDC_DMA_MAXPACKET	65536	/* maximum packet size for DMA */
208 
209 /**
210  * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
211  *				  for data
212  * @status:		Status quadlet
213  * @reserved:		Reserved
214  * @dataptr:		Buffer descriptor
215  * @next:		Next descriptor
216  */
217 struct pch_udc_data_dma_desc {
218 	u32 status;
219 	u32 reserved;
220 	u32 dataptr;
221 	u32 next;
222 };
223 
224 /**
225  * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
226  *				 for control data
227  * @status:	Status
228  * @reserved:	Reserved
229  * @request:	Control Request
230  */
231 struct pch_udc_stp_dma_desc {
232 	u32 status;
233 	u32 reserved;
234 	struct usb_ctrlrequest request;
235 } __attribute((packed));
236 
237 /* DMA status definitions */
238 /* Buffer status */
239 #define PCH_UDC_BUFF_STS	0xC0000000
240 #define PCH_UDC_BS_HST_RDY	0x00000000
241 #define PCH_UDC_BS_DMA_BSY	0x40000000
242 #define PCH_UDC_BS_DMA_DONE	0x80000000
243 #define PCH_UDC_BS_HST_BSY	0xC0000000
244 /*  Rx/Tx Status */
245 #define PCH_UDC_RXTX_STS	0x30000000
246 #define PCH_UDC_RTS_SUCC	0x00000000
247 #define PCH_UDC_RTS_DESERR	0x10000000
248 #define PCH_UDC_RTS_BUFERR	0x30000000
249 /* Last Descriptor Indication */
250 #define PCH_UDC_DMA_LAST	0x08000000
251 /* Number of Rx/Tx Bytes Mask */
252 #define PCH_UDC_RXTX_BYTES	0x0000ffff
253 
254 /**
255  * struct pch_udc_cfg_data - Structure to hold current configuration
256  *			     and interface information
257  * @cur_cfg:	current configuration in use
258  * @cur_intf:	current interface in use
259  * @cur_alt:	current alt interface in use
260  */
261 struct pch_udc_cfg_data {
262 	u16 cur_cfg;
263 	u16 cur_intf;
264 	u16 cur_alt;
265 };
266 
267 /**
268  * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
269  * @ep:			embedded ep request
270  * @td_stp_phys:	for setup request
271  * @td_data_phys:	for data request
272  * @td_stp:		for setup request
273  * @td_data:		for data request
274  * @dev:		reference to device struct
275  * @offset_addr:	offset address of ep register
276  * @desc:		for this ep
277  * @queue:		queue for requests
278  * @num:		endpoint number
279  * @in:			endpoint is IN
280  * @halted:		endpoint halted?
281  * @epsts:		Endpoint status
282  */
283 struct pch_udc_ep {
284 	struct usb_ep			ep;
285 	dma_addr_t			td_stp_phys;
286 	dma_addr_t			td_data_phys;
287 	struct pch_udc_stp_dma_desc	*td_stp;
288 	struct pch_udc_data_dma_desc	*td_data;
289 	struct pch_udc_dev		*dev;
290 	unsigned long			offset_addr;
291 	struct list_head		queue;
292 	unsigned			num:5,
293 					in:1,
294 					halted:1;
295 	unsigned long			epsts;
296 };
297 
298 /**
299  * struct pch_vbus_gpio_data - Structure holding GPIO informaton
300  *					for detecting VBUS
301  * @port:		gpio descriptor for the VBUS GPIO
302  * @intr:		gpio interrupt number
303  * @irq_work_fall:	Structure for WorkQueue
304  * @irq_work_rise:	Structure for WorkQueue
305  */
306 struct pch_vbus_gpio_data {
307 	struct gpio_desc	*port;
308 	int			intr;
309 	struct work_struct	irq_work_fall;
310 	struct work_struct	irq_work_rise;
311 };
312 
313 /**
314  * struct pch_udc_dev - Structure holding complete information
315  *			of the PCH USB device
316  * @gadget:		gadget driver data
317  * @driver:		reference to gadget driver bound
318  * @pdev:		reference to the PCI device
319  * @ep:			array of endpoints
320  * @lock:		protects all state
321  * @stall:		stall requested
322  * @prot_stall:		protcol stall requested
323  * @registered:		driver registered with system
324  * @suspended:		driver in suspended state
325  * @connected:		gadget driver associated
326  * @vbus_session:	required vbus_session state
327  * @set_cfg_not_acked:	pending acknowledgement 4 setup
328  * @waiting_zlp_ack:	pending acknowledgement 4 ZLP
329  * @data_requests:	DMA pool for data requests
330  * @stp_requests:	DMA pool for setup requests
331  * @dma_addr:		DMA pool for received
332  * @setup_data:		Received setup data
333  * @base_addr:		for mapped device memory
334  * @cfg_data:		current cfg, intf, and alt in use
335  * @vbus_gpio:		GPIO informaton for detecting VBUS
336  */
337 struct pch_udc_dev {
338 	struct usb_gadget		gadget;
339 	struct usb_gadget_driver	*driver;
340 	struct pci_dev			*pdev;
341 	struct pch_udc_ep		ep[PCH_UDC_EP_NUM];
342 	spinlock_t			lock; /* protects all state */
343 	unsigned
344 			stall:1,
345 			prot_stall:1,
346 			suspended:1,
347 			connected:1,
348 			vbus_session:1,
349 			set_cfg_not_acked:1,
350 			waiting_zlp_ack:1;
351 	struct dma_pool		*data_requests;
352 	struct dma_pool		*stp_requests;
353 	dma_addr_t			dma_addr;
354 	struct usb_ctrlrequest		setup_data;
355 	void __iomem			*base_addr;
356 	struct pch_udc_cfg_data		cfg_data;
357 	struct pch_vbus_gpio_data	vbus_gpio;
358 };
359 #define to_pch_udc(g)	(container_of((g), struct pch_udc_dev, gadget))
360 
361 #define PCH_UDC_PCI_BAR_QUARK_X1000	0
362 #define PCH_UDC_PCI_BAR			1
363 
364 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC	0x0939
365 #define PCI_DEVICE_ID_INTEL_EG20T_UDC		0x8808
366 
367 #define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
368 #define PCI_DEVICE_ID_ML7831_IOH_UDC	0x8808
369 
370 static const char	ep0_string[] = "ep0in";
371 static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
372 static bool speed_fs;
373 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
374 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
375 
376 /**
377  * struct pch_udc_request - Structure holding a PCH USB device request packet
378  * @req:		embedded ep request
379  * @td_data_phys:	phys. address
380  * @td_data:		first dma desc. of chain
381  * @td_data_last:	last dma desc. of chain
382  * @queue:		associated queue
383  * @dma_going:		DMA in progress for request
384  * @dma_mapped:		DMA memory mapped for request
385  * @dma_done:		DMA completed for request
386  * @chain_len:		chain length
387  * @buf:		Buffer memory for align adjustment
388  * @dma:		DMA memory for align adjustment
389  */
390 struct pch_udc_request {
391 	struct usb_request		req;
392 	dma_addr_t			td_data_phys;
393 	struct pch_udc_data_dma_desc	*td_data;
394 	struct pch_udc_data_dma_desc	*td_data_last;
395 	struct list_head		queue;
396 	unsigned			dma_going:1,
397 					dma_mapped:1,
398 					dma_done:1;
399 	unsigned			chain_len;
400 	void				*buf;
401 	dma_addr_t			dma;
402 };
403 
404 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
405 {
406 	return ioread32(dev->base_addr + reg);
407 }
408 
409 static inline void pch_udc_writel(struct pch_udc_dev *dev,
410 				    unsigned long val, unsigned long reg)
411 {
412 	iowrite32(val, dev->base_addr + reg);
413 }
414 
415 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
416 				     unsigned long reg,
417 				     unsigned long bitmask)
418 {
419 	pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
420 }
421 
422 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
423 				     unsigned long reg,
424 				     unsigned long bitmask)
425 {
426 	pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
427 }
428 
429 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
430 {
431 	return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
432 }
433 
434 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
435 				    unsigned long val, unsigned long reg)
436 {
437 	iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
438 }
439 
440 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
441 				     unsigned long reg,
442 				     unsigned long bitmask)
443 {
444 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
445 }
446 
447 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
448 				     unsigned long reg,
449 				     unsigned long bitmask)
450 {
451 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
452 }
453 
454 /**
455  * pch_udc_csr_busy() - Wait till idle.
456  * @dev:	Reference to pch_udc_dev structure
457  */
458 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
459 {
460 	unsigned int count = 200;
461 
462 	/* Wait till idle */
463 	while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
464 		&& --count)
465 		cpu_relax();
466 	if (!count)
467 		dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
468 }
469 
470 /**
471  * pch_udc_write_csr() - Write the command and status registers.
472  * @dev:	Reference to pch_udc_dev structure
473  * @val:	value to be written to CSR register
474  * @ep:		end-point number
475  */
476 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
477 			       unsigned int ep)
478 {
479 	unsigned long reg = PCH_UDC_CSR(ep);
480 
481 	pch_udc_csr_busy(dev);		/* Wait till idle */
482 	pch_udc_writel(dev, val, reg);
483 	pch_udc_csr_busy(dev);		/* Wait till idle */
484 }
485 
486 /**
487  * pch_udc_read_csr() - Read the command and status registers.
488  * @dev:	Reference to pch_udc_dev structure
489  * @ep:		end-point number
490  *
491  * Return codes:	content of CSR register
492  */
493 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
494 {
495 	unsigned long reg = PCH_UDC_CSR(ep);
496 
497 	pch_udc_csr_busy(dev);		/* Wait till idle */
498 	pch_udc_readl(dev, reg);	/* Dummy read */
499 	pch_udc_csr_busy(dev);		/* Wait till idle */
500 	return pch_udc_readl(dev, reg);
501 }
502 
503 /**
504  * pch_udc_rmt_wakeup() - Initiate for remote wakeup
505  * @dev:	Reference to pch_udc_dev structure
506  */
507 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
508 {
509 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
510 	mdelay(1);
511 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
512 }
513 
514 /**
515  * pch_udc_get_frame() - Get the current frame from device status register
516  * @dev:	Reference to pch_udc_dev structure
517  * Retern	current frame
518  */
519 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
520 {
521 	u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
522 	return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
523 }
524 
525 /**
526  * pch_udc_clear_selfpowered() - Clear the self power control
527  * @dev:	Reference to pch_udc_regs structure
528  */
529 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
530 {
531 	pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
532 }
533 
534 /**
535  * pch_udc_set_selfpowered() - Set the self power control
536  * @dev:	Reference to pch_udc_regs structure
537  */
538 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
539 {
540 	pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
541 }
542 
543 /**
544  * pch_udc_set_disconnect() - Set the disconnect status.
545  * @dev:	Reference to pch_udc_regs structure
546  */
547 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
548 {
549 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
550 }
551 
552 /**
553  * pch_udc_clear_disconnect() - Clear the disconnect status.
554  * @dev:	Reference to pch_udc_regs structure
555  */
556 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
557 {
558 	/* Clear the disconnect */
559 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
560 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
561 	mdelay(1);
562 	/* Resume USB signalling */
563 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
564 }
565 
566 /**
567  * pch_udc_reconnect() - This API initializes usb device controller,
568  *						and clear the disconnect status.
569  * @dev:		Reference to pch_udc_regs structure
570  */
571 static void pch_udc_init(struct pch_udc_dev *dev);
572 static void pch_udc_reconnect(struct pch_udc_dev *dev)
573 {
574 	pch_udc_init(dev);
575 
576 	/* enable device interrupts */
577 	/* pch_udc_enable_interrupts() */
578 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
579 			UDC_DEVINT_UR | UDC_DEVINT_ENUM);
580 
581 	/* Clear the disconnect */
582 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
583 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
584 	mdelay(1);
585 	/* Resume USB signalling */
586 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
587 }
588 
589 /**
590  * pch_udc_vbus_session() - set or clearr the disconnect status.
591  * @dev:	Reference to pch_udc_regs structure
592  * @is_active:	Parameter specifying the action
593  *		  0:   indicating VBUS power is ending
594  *		  !0:  indicating VBUS power is starting
595  */
596 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
597 					  int is_active)
598 {
599 	if (is_active) {
600 		pch_udc_reconnect(dev);
601 		dev->vbus_session = 1;
602 	} else {
603 		if (dev->driver && dev->driver->disconnect) {
604 			spin_lock(&dev->lock);
605 			dev->driver->disconnect(&dev->gadget);
606 			spin_unlock(&dev->lock);
607 		}
608 		pch_udc_set_disconnect(dev);
609 		dev->vbus_session = 0;
610 	}
611 }
612 
613 /**
614  * pch_udc_ep_set_stall() - Set the stall of endpoint
615  * @ep:		Reference to structure of type pch_udc_ep_regs
616  */
617 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
618 {
619 	if (ep->in) {
620 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
621 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
622 	} else {
623 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
624 	}
625 }
626 
627 /**
628  * pch_udc_ep_clear_stall() - Clear the stall of endpoint
629  * @ep:		Reference to structure of type pch_udc_ep_regs
630  */
631 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
632 {
633 	/* Clear the stall */
634 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
635 	/* Clear NAK by writing CNAK */
636 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
637 }
638 
639 /**
640  * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
641  * @ep:		Reference to structure of type pch_udc_ep_regs
642  * @type:	Type of endpoint
643  */
644 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
645 					u8 type)
646 {
647 	pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
648 				UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
649 }
650 
651 /**
652  * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
653  * @ep:		Reference to structure of type pch_udc_ep_regs
654  * @buf_size:	The buffer word size
655  * @ep_in:	EP is IN
656  */
657 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
658 						 u32 buf_size, u32 ep_in)
659 {
660 	u32 data;
661 	if (ep_in) {
662 		data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
663 		data = (data & 0xffff0000) | (buf_size & 0xffff);
664 		pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
665 	} else {
666 		data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
667 		data = (buf_size << 16) | (data & 0xffff);
668 		pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
669 	}
670 }
671 
672 /**
673  * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
674  * @ep:		Reference to structure of type pch_udc_ep_regs
675  * @pkt_size:	The packet byte size
676  */
677 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
678 {
679 	u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
680 	data = (data & 0xffff0000) | (pkt_size & 0xffff);
681 	pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
682 }
683 
684 /**
685  * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
686  * @ep:		Reference to structure of type pch_udc_ep_regs
687  * @addr:	Address of the register
688  */
689 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
690 {
691 	pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
692 }
693 
694 /**
695  * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
696  * @ep:		Reference to structure of type pch_udc_ep_regs
697  * @addr:	Address of the register
698  */
699 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
700 {
701 	pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
702 }
703 
704 /**
705  * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
706  * @ep:		Reference to structure of type pch_udc_ep_regs
707  */
708 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
709 {
710 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
711 }
712 
713 /**
714  * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
715  * @ep:		Reference to structure of type pch_udc_ep_regs
716  */
717 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
718 {
719 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
720 }
721 
722 /**
723  * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
724  * @ep:		Reference to structure of type pch_udc_ep_regs
725  */
726 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
727 {
728 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
729 }
730 
731 /**
732  * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
733  *			register depending on the direction specified
734  * @dev:	Reference to structure of type pch_udc_regs
735  * @dir:	whether Tx or Rx
736  *		  DMA_DIR_RX: Receive
737  *		  DMA_DIR_TX: Transmit
738  */
739 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
740 {
741 	if (dir == DMA_DIR_RX)
742 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
743 	else if (dir == DMA_DIR_TX)
744 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
745 }
746 
747 /**
748  * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
749  *				 register depending on the direction specified
750  * @dev:	Reference to structure of type pch_udc_regs
751  * @dir:	Whether Tx or Rx
752  *		  DMA_DIR_RX: Receive
753  *		  DMA_DIR_TX: Transmit
754  */
755 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
756 {
757 	if (dir == DMA_DIR_RX)
758 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
759 	else if (dir == DMA_DIR_TX)
760 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
761 }
762 
763 /**
764  * pch_udc_set_csr_done() - Set the device control register
765  *				CSR done field (bit 13)
766  * @dev:	reference to structure of type pch_udc_regs
767  */
768 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
769 {
770 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
771 }
772 
773 /**
774  * pch_udc_disable_interrupts() - Disables the specified interrupts
775  * @dev:	Reference to structure of type pch_udc_regs
776  * @mask:	Mask to disable interrupts
777  */
778 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
779 					    u32 mask)
780 {
781 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
782 }
783 
784 /**
785  * pch_udc_enable_interrupts() - Enable the specified interrupts
786  * @dev:	Reference to structure of type pch_udc_regs
787  * @mask:	Mask to enable interrupts
788  */
789 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
790 					   u32 mask)
791 {
792 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
793 }
794 
795 /**
796  * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
797  * @dev:	Reference to structure of type pch_udc_regs
798  * @mask:	Mask to disable interrupts
799  */
800 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
801 						u32 mask)
802 {
803 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
804 }
805 
806 /**
807  * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
808  * @dev:	Reference to structure of type pch_udc_regs
809  * @mask:	Mask to enable interrupts
810  */
811 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
812 					      u32 mask)
813 {
814 	pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
815 }
816 
817 /**
818  * pch_udc_read_device_interrupts() - Read the device interrupts
819  * @dev:	Reference to structure of type pch_udc_regs
820  * Retern	The device interrupts
821  */
822 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
823 {
824 	return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
825 }
826 
827 /**
828  * pch_udc_write_device_interrupts() - Write device interrupts
829  * @dev:	Reference to structure of type pch_udc_regs
830  * @val:	The value to be written to interrupt register
831  */
832 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
833 						     u32 val)
834 {
835 	pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
836 }
837 
838 /**
839  * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
840  * @dev:	Reference to structure of type pch_udc_regs
841  * Retern	The endpoint interrupt
842  */
843 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
844 {
845 	return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
846 }
847 
848 /**
849  * pch_udc_write_ep_interrupts() - Clear endpoint interupts
850  * @dev:	Reference to structure of type pch_udc_regs
851  * @val:	The value to be written to interrupt register
852  */
853 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
854 					     u32 val)
855 {
856 	pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
857 }
858 
859 /**
860  * pch_udc_read_device_status() - Read the device status
861  * @dev:	Reference to structure of type pch_udc_regs
862  * Retern	The device status
863  */
864 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
865 {
866 	return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
867 }
868 
869 /**
870  * pch_udc_read_ep_control() - Read the endpoint control
871  * @ep:		Reference to structure of type pch_udc_ep_regs
872  * Retern	The endpoint control register value
873  */
874 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
875 {
876 	return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
877 }
878 
879 /**
880  * pch_udc_clear_ep_control() - Clear the endpoint control register
881  * @ep:		Reference to structure of type pch_udc_ep_regs
882  * Retern	The endpoint control register value
883  */
884 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
885 {
886 	return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
887 }
888 
889 /**
890  * pch_udc_read_ep_status() - Read the endpoint status
891  * @ep:		Reference to structure of type pch_udc_ep_regs
892  * Retern	The endpoint status
893  */
894 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
895 {
896 	return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
897 }
898 
899 /**
900  * pch_udc_clear_ep_status() - Clear the endpoint status
901  * @ep:		Reference to structure of type pch_udc_ep_regs
902  * @stat:	Endpoint status
903  */
904 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
905 					 u32 stat)
906 {
907 	return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
908 }
909 
910 /**
911  * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
912  *				of the endpoint control register
913  * @ep:		Reference to structure of type pch_udc_ep_regs
914  */
915 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
916 {
917 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
918 }
919 
920 /**
921  * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
922  *				of the endpoint control register
923  * @ep:		reference to structure of type pch_udc_ep_regs
924  */
925 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
926 {
927 	unsigned int loopcnt = 0;
928 	struct pch_udc_dev *dev = ep->dev;
929 
930 	if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
931 		return;
932 	if (!ep->in) {
933 		loopcnt = 10000;
934 		while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
935 			--loopcnt)
936 			udelay(5);
937 		if (!loopcnt)
938 			dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
939 				__func__);
940 	}
941 	loopcnt = 10000;
942 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
943 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
944 		udelay(5);
945 	}
946 	if (!loopcnt)
947 		dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
948 			__func__, ep->num, (ep->in ? "in" : "out"));
949 }
950 
951 /**
952  * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
953  * @ep:	reference to structure of type pch_udc_ep_regs
954  * @dir:	direction of endpoint
955  *		  0:  endpoint is OUT
956  *		  !0: endpoint is IN
957  */
958 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
959 {
960 	if (dir) {	/* IN ep */
961 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
962 		return;
963 	}
964 }
965 
966 /**
967  * pch_udc_ep_enable() - This api enables endpoint
968  * @ep:		reference to structure of type pch_udc_ep_regs
969  * @cfg:	current configuration information
970  * @desc:	endpoint descriptor
971  */
972 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
973 			       struct pch_udc_cfg_data *cfg,
974 			       const struct usb_endpoint_descriptor *desc)
975 {
976 	u32 val = 0;
977 	u32 buff_size = 0;
978 
979 	pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
980 	if (ep->in)
981 		buff_size = UDC_EPIN_BUFF_SIZE;
982 	else
983 		buff_size = UDC_EPOUT_BUFF_SIZE;
984 	pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
985 	pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
986 	pch_udc_ep_set_nak(ep);
987 	pch_udc_ep_fifo_flush(ep, ep->in);
988 	/* Configure the endpoint */
989 	val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
990 	      ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
991 		UDC_CSR_NE_TYPE_SHIFT) |
992 	      (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
993 	      (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
994 	      (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
995 	      usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
996 
997 	if (ep->in)
998 		pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
999 	else
1000 		pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1001 }
1002 
1003 /**
1004  * pch_udc_ep_disable() - This api disables endpoint
1005  * @ep:		reference to structure of type pch_udc_ep_regs
1006  */
1007 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1008 {
1009 	if (ep->in) {
1010 		/* flush the fifo */
1011 		pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1012 		/* set NAK */
1013 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1014 		pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1015 	} else {
1016 		/* set NAK */
1017 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1018 	}
1019 	/* reset desc pointer */
1020 	pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1021 }
1022 
1023 /**
1024  * pch_udc_wait_ep_stall() - Wait EP stall.
1025  * @ep:		reference to structure of type pch_udc_ep_regs
1026  */
1027 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1028 {
1029 	unsigned int count = 10000;
1030 
1031 	/* Wait till idle */
1032 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1033 		udelay(5);
1034 	if (!count)
1035 		dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1036 }
1037 
1038 /**
1039  * pch_udc_init() - This API initializes usb device controller
1040  * @dev:	Rreference to pch_udc_regs structure
1041  */
1042 static void pch_udc_init(struct pch_udc_dev *dev)
1043 {
1044 	if (NULL == dev) {
1045 		pr_err("%s: Invalid address\n", __func__);
1046 		return;
1047 	}
1048 	/* Soft Reset and Reset PHY */
1049 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1050 	pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1051 	mdelay(1);
1052 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1053 	pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1054 	mdelay(1);
1055 	/* mask and clear all device interrupts */
1056 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1057 	pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1058 
1059 	/* mask and clear all ep interrupts */
1060 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1061 	pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1062 
1063 	/* enable dynamic CSR programmingi, self powered and device speed */
1064 	if (speed_fs)
1065 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1066 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1067 	else /* defaul high speed */
1068 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1069 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1070 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1071 			(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1072 			(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1073 			UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1074 			UDC_DEVCTL_THE);
1075 }
1076 
1077 /**
1078  * pch_udc_exit() - This API exit usb device controller
1079  * @dev:	Reference to pch_udc_regs structure
1080  */
1081 static void pch_udc_exit(struct pch_udc_dev *dev)
1082 {
1083 	/* mask all device interrupts */
1084 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1085 	/* mask all ep interrupts */
1086 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1087 	/* put device in disconnected state */
1088 	pch_udc_set_disconnect(dev);
1089 }
1090 
1091 /**
1092  * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1093  * @gadget:	Reference to the gadget driver
1094  *
1095  * Return codes:
1096  *	0:		Success
1097  *	-EINVAL:	If the gadget passed is NULL
1098  */
1099 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1100 {
1101 	struct pch_udc_dev	*dev;
1102 
1103 	if (!gadget)
1104 		return -EINVAL;
1105 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1106 	return pch_udc_get_frame(dev);
1107 }
1108 
1109 /**
1110  * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1111  * @gadget:	Reference to the gadget driver
1112  *
1113  * Return codes:
1114  *	0:		Success
1115  *	-EINVAL:	If the gadget passed is NULL
1116  */
1117 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1118 {
1119 	struct pch_udc_dev	*dev;
1120 	unsigned long		flags;
1121 
1122 	if (!gadget)
1123 		return -EINVAL;
1124 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1125 	spin_lock_irqsave(&dev->lock, flags);
1126 	pch_udc_rmt_wakeup(dev);
1127 	spin_unlock_irqrestore(&dev->lock, flags);
1128 	return 0;
1129 }
1130 
1131 /**
1132  * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1133  *				is self powered or not
1134  * @gadget:	Reference to the gadget driver
1135  * @value:	Specifies self powered or not
1136  *
1137  * Return codes:
1138  *	0:		Success
1139  *	-EINVAL:	If the gadget passed is NULL
1140  */
1141 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1142 {
1143 	struct pch_udc_dev	*dev;
1144 
1145 	if (!gadget)
1146 		return -EINVAL;
1147 	gadget->is_selfpowered = (value != 0);
1148 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1149 	if (value)
1150 		pch_udc_set_selfpowered(dev);
1151 	else
1152 		pch_udc_clear_selfpowered(dev);
1153 	return 0;
1154 }
1155 
1156 /**
1157  * pch_udc_pcd_pullup() - This API is invoked to make the device
1158  *				visible/invisible to the host
1159  * @gadget:	Reference to the gadget driver
1160  * @is_on:	Specifies whether the pull up is made active or inactive
1161  *
1162  * Return codes:
1163  *	0:		Success
1164  *	-EINVAL:	If the gadget passed is NULL
1165  */
1166 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1167 {
1168 	struct pch_udc_dev	*dev;
1169 
1170 	if (!gadget)
1171 		return -EINVAL;
1172 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1173 	if (is_on) {
1174 		pch_udc_reconnect(dev);
1175 	} else {
1176 		if (dev->driver && dev->driver->disconnect) {
1177 			spin_lock(&dev->lock);
1178 			dev->driver->disconnect(&dev->gadget);
1179 			spin_unlock(&dev->lock);
1180 		}
1181 		pch_udc_set_disconnect(dev);
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 /**
1188  * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1189  *				transceiver (or GPIO) that
1190  *				detects a VBUS power session starting/ending
1191  * @gadget:	Reference to the gadget driver
1192  * @is_active:	specifies whether the session is starting or ending
1193  *
1194  * Return codes:
1195  *	0:		Success
1196  *	-EINVAL:	If the gadget passed is NULL
1197  */
1198 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1199 {
1200 	struct pch_udc_dev	*dev;
1201 
1202 	if (!gadget)
1203 		return -EINVAL;
1204 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1205 	pch_udc_vbus_session(dev, is_active);
1206 	return 0;
1207 }
1208 
1209 /**
1210  * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1211  *				SET_CONFIGURATION calls to
1212  *				specify how much power the device can consume
1213  * @gadget:	Reference to the gadget driver
1214  * @mA:		specifies the current limit in 2mA unit
1215  *
1216  * Return codes:
1217  *	-EINVAL:	If the gadget passed is NULL
1218  *	-EOPNOTSUPP:
1219  */
1220 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1221 {
1222 	return -EOPNOTSUPP;
1223 }
1224 
1225 static int pch_udc_start(struct usb_gadget *g,
1226 		struct usb_gadget_driver *driver);
1227 static int pch_udc_stop(struct usb_gadget *g);
1228 
1229 static const struct usb_gadget_ops pch_udc_ops = {
1230 	.get_frame = pch_udc_pcd_get_frame,
1231 	.wakeup = pch_udc_pcd_wakeup,
1232 	.set_selfpowered = pch_udc_pcd_selfpowered,
1233 	.pullup = pch_udc_pcd_pullup,
1234 	.vbus_session = pch_udc_pcd_vbus_session,
1235 	.vbus_draw = pch_udc_pcd_vbus_draw,
1236 	.udc_start = pch_udc_start,
1237 	.udc_stop = pch_udc_stop,
1238 };
1239 
1240 /**
1241  * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1242  * @dev:	Reference to the driver structure
1243  *
1244  * Return value:
1245  *	1: VBUS is high
1246  *	0: VBUS is low
1247  *     -1: It is not enable to detect VBUS using GPIO
1248  */
1249 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1250 {
1251 	int vbus = 0;
1252 
1253 	if (dev->vbus_gpio.port)
1254 		vbus = gpiod_get_value(dev->vbus_gpio.port) ? 1 : 0;
1255 	else
1256 		vbus = -1;
1257 
1258 	return vbus;
1259 }
1260 
1261 /**
1262  * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1263  *                             If VBUS is Low, disconnect is processed
1264  * @irq_work:	Structure for WorkQueue
1265  *
1266  */
1267 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1268 {
1269 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1270 		struct pch_vbus_gpio_data, irq_work_fall);
1271 	struct pch_udc_dev *dev =
1272 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1273 	int vbus_saved = -1;
1274 	int vbus;
1275 	int count;
1276 
1277 	if (!dev->vbus_gpio.port)
1278 		return;
1279 
1280 	for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1281 		count++) {
1282 		vbus = pch_vbus_gpio_get_value(dev);
1283 
1284 		if ((vbus_saved == vbus) && (vbus == 0)) {
1285 			dev_dbg(&dev->pdev->dev, "VBUS fell");
1286 			if (dev->driver
1287 				&& dev->driver->disconnect) {
1288 				dev->driver->disconnect(
1289 					&dev->gadget);
1290 			}
1291 			if (dev->vbus_gpio.intr)
1292 				pch_udc_init(dev);
1293 			else
1294 				pch_udc_reconnect(dev);
1295 			return;
1296 		}
1297 		vbus_saved = vbus;
1298 		mdelay(PCH_VBUS_INTERVAL);
1299 	}
1300 }
1301 
1302 /**
1303  * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1304  *                             If VBUS is High, connect is processed
1305  * @irq_work:	Structure for WorkQueue
1306  *
1307  */
1308 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1309 {
1310 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1311 		struct pch_vbus_gpio_data, irq_work_rise);
1312 	struct pch_udc_dev *dev =
1313 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1314 	int vbus;
1315 
1316 	if (!dev->vbus_gpio.port)
1317 		return;
1318 
1319 	mdelay(PCH_VBUS_INTERVAL);
1320 	vbus = pch_vbus_gpio_get_value(dev);
1321 
1322 	if (vbus == 1) {
1323 		dev_dbg(&dev->pdev->dev, "VBUS rose");
1324 		pch_udc_reconnect(dev);
1325 		return;
1326 	}
1327 }
1328 
1329 /**
1330  * pch_vbus_gpio_irq() - IRQ handler for GPIO interrupt for changing VBUS
1331  * @irq:	Interrupt request number
1332  * @data:	Reference to the device structure
1333  *
1334  * Return codes:
1335  *	0: Success
1336  *	-EINVAL: GPIO port is invalid or can't be initialized.
1337  */
1338 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1339 {
1340 	struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1341 
1342 	if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1343 		return IRQ_NONE;
1344 
1345 	if (pch_vbus_gpio_get_value(dev))
1346 		schedule_work(&dev->vbus_gpio.irq_work_rise);
1347 	else
1348 		schedule_work(&dev->vbus_gpio.irq_work_fall);
1349 
1350 	return IRQ_HANDLED;
1351 }
1352 
1353 /**
1354  * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1355  * @dev:		Reference to the driver structure
1356  *
1357  * Return codes:
1358  *	0: Success
1359  *	-EINVAL: GPIO port is invalid or can't be initialized.
1360  */
1361 static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
1362 {
1363 	int err;
1364 	int irq_num = 0;
1365 	struct gpio_desc *gpiod;
1366 
1367 	dev->vbus_gpio.port = NULL;
1368 	dev->vbus_gpio.intr = 0;
1369 
1370 	/* Retrieve the GPIO line from the USB gadget device */
1371 	gpiod = devm_gpiod_get(dev->gadget.dev.parent, NULL, GPIOD_IN);
1372 	if (IS_ERR(gpiod))
1373 		return PTR_ERR(gpiod);
1374 	gpiod_set_consumer_name(gpiod, "pch_vbus");
1375 
1376 	dev->vbus_gpio.port = gpiod;
1377 	INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1378 
1379 	irq_num = gpiod_to_irq(gpiod);
1380 	if (irq_num > 0) {
1381 		irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1382 		err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1383 			"vbus_detect", dev);
1384 		if (!err) {
1385 			dev->vbus_gpio.intr = irq_num;
1386 			INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1387 				pch_vbus_gpio_work_rise);
1388 		} else {
1389 			pr_err("%s: can't request irq %d, err: %d\n",
1390 				__func__, irq_num, err);
1391 		}
1392 	}
1393 
1394 	return 0;
1395 }
1396 
1397 /**
1398  * pch_vbus_gpio_free() - This API frees resources of GPIO port
1399  * @dev:	Reference to the driver structure
1400  */
1401 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1402 {
1403 	if (dev->vbus_gpio.intr)
1404 		free_irq(dev->vbus_gpio.intr, dev);
1405 }
1406 
1407 /**
1408  * complete_req() - This API is invoked from the driver when processing
1409  *			of a request is complete
1410  * @ep:		Reference to the endpoint structure
1411  * @req:	Reference to the request structure
1412  * @status:	Indicates the success/failure of completion
1413  */
1414 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1415 								 int status)
1416 	__releases(&dev->lock)
1417 	__acquires(&dev->lock)
1418 {
1419 	struct pch_udc_dev	*dev;
1420 	unsigned halted = ep->halted;
1421 
1422 	list_del_init(&req->queue);
1423 
1424 	/* set new status if pending */
1425 	if (req->req.status == -EINPROGRESS)
1426 		req->req.status = status;
1427 	else
1428 		status = req->req.status;
1429 
1430 	dev = ep->dev;
1431 	if (req->dma_mapped) {
1432 		if (req->dma == DMA_ADDR_INVALID) {
1433 			if (ep->in)
1434 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1435 						 req->req.length,
1436 						 DMA_TO_DEVICE);
1437 			else
1438 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1439 						 req->req.length,
1440 						 DMA_FROM_DEVICE);
1441 			req->req.dma = DMA_ADDR_INVALID;
1442 		} else {
1443 			if (ep->in)
1444 				dma_unmap_single(&dev->pdev->dev, req->dma,
1445 						 req->req.length,
1446 						 DMA_TO_DEVICE);
1447 			else {
1448 				dma_unmap_single(&dev->pdev->dev, req->dma,
1449 						 req->req.length,
1450 						 DMA_FROM_DEVICE);
1451 				memcpy(req->req.buf, req->buf, req->req.length);
1452 			}
1453 			kfree(req->buf);
1454 			req->dma = DMA_ADDR_INVALID;
1455 		}
1456 		req->dma_mapped = 0;
1457 	}
1458 	ep->halted = 1;
1459 	spin_unlock(&dev->lock);
1460 	if (!ep->in)
1461 		pch_udc_ep_clear_rrdy(ep);
1462 	usb_gadget_giveback_request(&ep->ep, &req->req);
1463 	spin_lock(&dev->lock);
1464 	ep->halted = halted;
1465 }
1466 
1467 /**
1468  * empty_req_queue() - This API empties the request queue of an endpoint
1469  * @ep:		Reference to the endpoint structure
1470  */
1471 static void empty_req_queue(struct pch_udc_ep *ep)
1472 {
1473 	struct pch_udc_request	*req;
1474 
1475 	ep->halted = 1;
1476 	while (!list_empty(&ep->queue)) {
1477 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1478 		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
1479 	}
1480 }
1481 
1482 /**
1483  * pch_udc_free_dma_chain() - This function frees the DMA chain created
1484  *				for the request
1485  * @dev:	Reference to the driver structure
1486  * @req:	Reference to the request to be freed
1487  *
1488  * Return codes:
1489  *	0: Success
1490  */
1491 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1492 				   struct pch_udc_request *req)
1493 {
1494 	struct pch_udc_data_dma_desc *td = req->td_data;
1495 	unsigned i = req->chain_len;
1496 
1497 	dma_addr_t addr2;
1498 	dma_addr_t addr = (dma_addr_t)td->next;
1499 	td->next = 0x00;
1500 	for (; i > 1; --i) {
1501 		/* do not free first desc., will be done by free for request */
1502 		td = phys_to_virt(addr);
1503 		addr2 = (dma_addr_t)td->next;
1504 		dma_pool_free(dev->data_requests, td, addr);
1505 		addr = addr2;
1506 	}
1507 	req->chain_len = 1;
1508 }
1509 
1510 /**
1511  * pch_udc_create_dma_chain() - This function creates or reinitializes
1512  *				a DMA chain
1513  * @ep:		Reference to the endpoint structure
1514  * @req:	Reference to the request
1515  * @buf_len:	The buffer length
1516  * @gfp_flags:	Flags to be used while mapping the data buffer
1517  *
1518  * Return codes:
1519  *	0:		success,
1520  *	-ENOMEM:	dma_pool_alloc invocation fails
1521  */
1522 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1523 				    struct pch_udc_request *req,
1524 				    unsigned long buf_len,
1525 				    gfp_t gfp_flags)
1526 {
1527 	struct pch_udc_data_dma_desc *td = req->td_data, *last;
1528 	unsigned long bytes = req->req.length, i = 0;
1529 	dma_addr_t dma_addr;
1530 	unsigned len = 1;
1531 
1532 	if (req->chain_len > 1)
1533 		pch_udc_free_dma_chain(ep->dev, req);
1534 
1535 	if (req->dma == DMA_ADDR_INVALID)
1536 		td->dataptr = req->req.dma;
1537 	else
1538 		td->dataptr = req->dma;
1539 
1540 	td->status = PCH_UDC_BS_HST_BSY;
1541 	for (; ; bytes -= buf_len, ++len) {
1542 		td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1543 		if (bytes <= buf_len)
1544 			break;
1545 		last = td;
1546 		td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
1547 				    &dma_addr);
1548 		if (!td)
1549 			goto nomem;
1550 		i += buf_len;
1551 		td->dataptr = req->td_data->dataptr + i;
1552 		last->next = dma_addr;
1553 	}
1554 
1555 	req->td_data_last = td;
1556 	td->status |= PCH_UDC_DMA_LAST;
1557 	td->next = req->td_data_phys;
1558 	req->chain_len = len;
1559 	return 0;
1560 
1561 nomem:
1562 	if (len > 1) {
1563 		req->chain_len = len;
1564 		pch_udc_free_dma_chain(ep->dev, req);
1565 	}
1566 	req->chain_len = 1;
1567 	return -ENOMEM;
1568 }
1569 
1570 /**
1571  * prepare_dma() - This function creates and initializes the DMA chain
1572  *			for the request
1573  * @ep:		Reference to the endpoint structure
1574  * @req:	Reference to the request
1575  * @gfp:	Flag to be used while mapping the data buffer
1576  *
1577  * Return codes:
1578  *	0:		Success
1579  *	Other 0:	linux error number on failure
1580  */
1581 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1582 			  gfp_t gfp)
1583 {
1584 	int	retval;
1585 
1586 	/* Allocate and create a DMA chain */
1587 	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1588 	if (retval) {
1589 		pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1590 		return retval;
1591 	}
1592 	if (ep->in)
1593 		req->td_data->status = (req->td_data->status &
1594 				~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1595 	return 0;
1596 }
1597 
1598 /**
1599  * process_zlp() - This function process zero length packets
1600  *			from the gadget driver
1601  * @ep:		Reference to the endpoint structure
1602  * @req:	Reference to the request
1603  */
1604 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1605 {
1606 	struct pch_udc_dev	*dev = ep->dev;
1607 
1608 	/* IN zlp's are handled by hardware */
1609 	complete_req(ep, req, 0);
1610 
1611 	/* if set_config or set_intf is waiting for ack by zlp
1612 	 * then set CSR_DONE
1613 	 */
1614 	if (dev->set_cfg_not_acked) {
1615 		pch_udc_set_csr_done(dev);
1616 		dev->set_cfg_not_acked = 0;
1617 	}
1618 	/* setup command is ACK'ed now by zlp */
1619 	if (!dev->stall && dev->waiting_zlp_ack) {
1620 		pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1621 		dev->waiting_zlp_ack = 0;
1622 	}
1623 }
1624 
1625 /**
1626  * pch_udc_start_rxrequest() - This function starts the receive requirement.
1627  * @ep:		Reference to the endpoint structure
1628  * @req:	Reference to the request structure
1629  */
1630 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1631 					 struct pch_udc_request *req)
1632 {
1633 	struct pch_udc_data_dma_desc *td_data;
1634 
1635 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1636 	td_data = req->td_data;
1637 	/* Set the status bits for all descriptors */
1638 	while (1) {
1639 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1640 				    PCH_UDC_BS_HST_RDY;
1641 		if ((td_data->status & PCH_UDC_DMA_LAST) ==  PCH_UDC_DMA_LAST)
1642 			break;
1643 		td_data = phys_to_virt(td_data->next);
1644 	}
1645 	/* Write the descriptor pointer */
1646 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1647 	req->dma_going = 1;
1648 	pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1649 	pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1650 	pch_udc_ep_clear_nak(ep);
1651 	pch_udc_ep_set_rrdy(ep);
1652 }
1653 
1654 /**
1655  * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1656  *				from gadget driver
1657  * @usbep:	Reference to the USB endpoint structure
1658  * @desc:	Reference to the USB endpoint descriptor structure
1659  *
1660  * Return codes:
1661  *	0:		Success
1662  *	-EINVAL:
1663  *	-ESHUTDOWN:
1664  */
1665 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1666 				    const struct usb_endpoint_descriptor *desc)
1667 {
1668 	struct pch_udc_ep	*ep;
1669 	struct pch_udc_dev	*dev;
1670 	unsigned long		iflags;
1671 
1672 	if (!usbep || (usbep->name == ep0_string) || !desc ||
1673 	    (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1674 		return -EINVAL;
1675 
1676 	ep = container_of(usbep, struct pch_udc_ep, ep);
1677 	dev = ep->dev;
1678 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1679 		return -ESHUTDOWN;
1680 	spin_lock_irqsave(&dev->lock, iflags);
1681 	ep->ep.desc = desc;
1682 	ep->halted = 0;
1683 	pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1684 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1685 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1686 	spin_unlock_irqrestore(&dev->lock, iflags);
1687 	return 0;
1688 }
1689 
1690 /**
1691  * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1692  *				from gadget driver
1693  * @usbep:	Reference to the USB endpoint structure
1694  *
1695  * Return codes:
1696  *	0:		Success
1697  *	-EINVAL:
1698  */
1699 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1700 {
1701 	struct pch_udc_ep	*ep;
1702 	unsigned long	iflags;
1703 
1704 	if (!usbep)
1705 		return -EINVAL;
1706 
1707 	ep = container_of(usbep, struct pch_udc_ep, ep);
1708 	if ((usbep->name == ep0_string) || !ep->ep.desc)
1709 		return -EINVAL;
1710 
1711 	spin_lock_irqsave(&ep->dev->lock, iflags);
1712 	empty_req_queue(ep);
1713 	ep->halted = 1;
1714 	pch_udc_ep_disable(ep);
1715 	pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1716 	ep->ep.desc = NULL;
1717 	INIT_LIST_HEAD(&ep->queue);
1718 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1719 	return 0;
1720 }
1721 
1722 /**
1723  * pch_udc_alloc_request() - This function allocates request structure.
1724  *				It is called by gadget driver
1725  * @usbep:	Reference to the USB endpoint structure
1726  * @gfp:	Flag to be used while allocating memory
1727  *
1728  * Return codes:
1729  *	NULL:			Failure
1730  *	Allocated address:	Success
1731  */
1732 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1733 						  gfp_t gfp)
1734 {
1735 	struct pch_udc_request		*req;
1736 	struct pch_udc_ep		*ep;
1737 	struct pch_udc_data_dma_desc	*dma_desc;
1738 
1739 	if (!usbep)
1740 		return NULL;
1741 	ep = container_of(usbep, struct pch_udc_ep, ep);
1742 	req = kzalloc(sizeof *req, gfp);
1743 	if (!req)
1744 		return NULL;
1745 	req->req.dma = DMA_ADDR_INVALID;
1746 	req->dma = DMA_ADDR_INVALID;
1747 	INIT_LIST_HEAD(&req->queue);
1748 	if (!ep->dev->dma_addr)
1749 		return &req->req;
1750 	/* ep0 in requests are allocated from data pool here */
1751 	dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
1752 				  &req->td_data_phys);
1753 	if (NULL == dma_desc) {
1754 		kfree(req);
1755 		return NULL;
1756 	}
1757 	/* prevent from using desc. - set HOST BUSY */
1758 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
1759 	dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1760 	req->td_data = dma_desc;
1761 	req->td_data_last = dma_desc;
1762 	req->chain_len = 1;
1763 	return &req->req;
1764 }
1765 
1766 /**
1767  * pch_udc_free_request() - This function frees request structure.
1768  *				It is called by gadget driver
1769  * @usbep:	Reference to the USB endpoint structure
1770  * @usbreq:	Reference to the USB request
1771  */
1772 static void pch_udc_free_request(struct usb_ep *usbep,
1773 				  struct usb_request *usbreq)
1774 {
1775 	struct pch_udc_ep	*ep;
1776 	struct pch_udc_request	*req;
1777 	struct pch_udc_dev	*dev;
1778 
1779 	if (!usbep || !usbreq)
1780 		return;
1781 	ep = container_of(usbep, struct pch_udc_ep, ep);
1782 	req = container_of(usbreq, struct pch_udc_request, req);
1783 	dev = ep->dev;
1784 	if (!list_empty(&req->queue))
1785 		dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1786 			__func__, usbep->name, req);
1787 	if (req->td_data != NULL) {
1788 		if (req->chain_len > 1)
1789 			pch_udc_free_dma_chain(ep->dev, req);
1790 		dma_pool_free(ep->dev->data_requests, req->td_data,
1791 			      req->td_data_phys);
1792 	}
1793 	kfree(req);
1794 }
1795 
1796 /**
1797  * pch_udc_pcd_queue() - This function queues a request packet. It is called
1798  *			by gadget driver
1799  * @usbep:	Reference to the USB endpoint structure
1800  * @usbreq:	Reference to the USB request
1801  * @gfp:	Flag to be used while mapping the data buffer
1802  *
1803  * Return codes:
1804  *	0:			Success
1805  *	linux error number:	Failure
1806  */
1807 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1808 								 gfp_t gfp)
1809 {
1810 	int retval = 0;
1811 	struct pch_udc_ep	*ep;
1812 	struct pch_udc_dev	*dev;
1813 	struct pch_udc_request	*req;
1814 	unsigned long	iflags;
1815 
1816 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1817 		return -EINVAL;
1818 	ep = container_of(usbep, struct pch_udc_ep, ep);
1819 	dev = ep->dev;
1820 	if (!ep->ep.desc && ep->num)
1821 		return -EINVAL;
1822 	req = container_of(usbreq, struct pch_udc_request, req);
1823 	if (!list_empty(&req->queue))
1824 		return -EINVAL;
1825 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1826 		return -ESHUTDOWN;
1827 	spin_lock_irqsave(&dev->lock, iflags);
1828 	/* map the buffer for dma */
1829 	if (usbreq->length &&
1830 	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1831 		if (!((unsigned long)(usbreq->buf) & 0x03)) {
1832 			if (ep->in)
1833 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1834 							     usbreq->buf,
1835 							     usbreq->length,
1836 							     DMA_TO_DEVICE);
1837 			else
1838 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1839 							     usbreq->buf,
1840 							     usbreq->length,
1841 							     DMA_FROM_DEVICE);
1842 		} else {
1843 			req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1844 			if (!req->buf) {
1845 				retval = -ENOMEM;
1846 				goto probe_end;
1847 			}
1848 			if (ep->in) {
1849 				memcpy(req->buf, usbreq->buf, usbreq->length);
1850 				req->dma = dma_map_single(&dev->pdev->dev,
1851 							  req->buf,
1852 							  usbreq->length,
1853 							  DMA_TO_DEVICE);
1854 			} else
1855 				req->dma = dma_map_single(&dev->pdev->dev,
1856 							  req->buf,
1857 							  usbreq->length,
1858 							  DMA_FROM_DEVICE);
1859 		}
1860 		req->dma_mapped = 1;
1861 	}
1862 	if (usbreq->length > 0) {
1863 		retval = prepare_dma(ep, req, GFP_ATOMIC);
1864 		if (retval)
1865 			goto probe_end;
1866 	}
1867 	usbreq->actual = 0;
1868 	usbreq->status = -EINPROGRESS;
1869 	req->dma_done = 0;
1870 	if (list_empty(&ep->queue) && !ep->halted) {
1871 		/* no pending transfer, so start this req */
1872 		if (!usbreq->length) {
1873 			process_zlp(ep, req);
1874 			retval = 0;
1875 			goto probe_end;
1876 		}
1877 		if (!ep->in) {
1878 			pch_udc_start_rxrequest(ep, req);
1879 		} else {
1880 			/*
1881 			* For IN trfr the descriptors will be programmed and
1882 			* P bit will be set when
1883 			* we get an IN token
1884 			*/
1885 			pch_udc_wait_ep_stall(ep);
1886 			pch_udc_ep_clear_nak(ep);
1887 			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1888 		}
1889 	}
1890 	/* Now add this request to the ep's pending requests */
1891 	if (req != NULL)
1892 		list_add_tail(&req->queue, &ep->queue);
1893 
1894 probe_end:
1895 	spin_unlock_irqrestore(&dev->lock, iflags);
1896 	return retval;
1897 }
1898 
1899 /**
1900  * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1901  *				It is called by gadget driver
1902  * @usbep:	Reference to the USB endpoint structure
1903  * @usbreq:	Reference to the USB request
1904  *
1905  * Return codes:
1906  *	0:			Success
1907  *	linux error number:	Failure
1908  */
1909 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1910 				struct usb_request *usbreq)
1911 {
1912 	struct pch_udc_ep	*ep;
1913 	struct pch_udc_request	*req;
1914 	unsigned long		flags;
1915 	int ret = -EINVAL;
1916 
1917 	ep = container_of(usbep, struct pch_udc_ep, ep);
1918 	if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1919 		return ret;
1920 	req = container_of(usbreq, struct pch_udc_request, req);
1921 	spin_lock_irqsave(&ep->dev->lock, flags);
1922 	/* make sure it's still queued on this endpoint */
1923 	list_for_each_entry(req, &ep->queue, queue) {
1924 		if (&req->req == usbreq) {
1925 			pch_udc_ep_set_nak(ep);
1926 			if (!list_empty(&req->queue))
1927 				complete_req(ep, req, -ECONNRESET);
1928 			ret = 0;
1929 			break;
1930 		}
1931 	}
1932 	spin_unlock_irqrestore(&ep->dev->lock, flags);
1933 	return ret;
1934 }
1935 
1936 /**
1937  * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1938  *			    feature
1939  * @usbep:	Reference to the USB endpoint structure
1940  * @halt:	Specifies whether to set or clear the feature
1941  *
1942  * Return codes:
1943  *	0:			Success
1944  *	linux error number:	Failure
1945  */
1946 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1947 {
1948 	struct pch_udc_ep	*ep;
1949 	unsigned long iflags;
1950 	int ret;
1951 
1952 	if (!usbep)
1953 		return -EINVAL;
1954 	ep = container_of(usbep, struct pch_udc_ep, ep);
1955 	if (!ep->ep.desc && !ep->num)
1956 		return -EINVAL;
1957 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1958 		return -ESHUTDOWN;
1959 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1960 	if (list_empty(&ep->queue)) {
1961 		if (halt) {
1962 			if (ep->num == PCH_UDC_EP0)
1963 				ep->dev->stall = 1;
1964 			pch_udc_ep_set_stall(ep);
1965 			pch_udc_enable_ep_interrupts(
1966 				ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1967 		} else {
1968 			pch_udc_ep_clear_stall(ep);
1969 		}
1970 		ret = 0;
1971 	} else {
1972 		ret = -EAGAIN;
1973 	}
1974 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1975 	return ret;
1976 }
1977 
1978 /**
1979  * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
1980  *				halt feature
1981  * @usbep:	Reference to the USB endpoint structure
1982  *
1983  * Return codes:
1984  *	0:			Success
1985  *	linux error number:	Failure
1986  */
1987 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
1988 {
1989 	struct pch_udc_ep	*ep;
1990 	unsigned long iflags;
1991 	int ret;
1992 
1993 	if (!usbep)
1994 		return -EINVAL;
1995 	ep = container_of(usbep, struct pch_udc_ep, ep);
1996 	if (!ep->ep.desc && !ep->num)
1997 		return -EINVAL;
1998 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1999 		return -ESHUTDOWN;
2000 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
2001 	if (!list_empty(&ep->queue)) {
2002 		ret = -EAGAIN;
2003 	} else {
2004 		if (ep->num == PCH_UDC_EP0)
2005 			ep->dev->stall = 1;
2006 		pch_udc_ep_set_stall(ep);
2007 		pch_udc_enable_ep_interrupts(ep->dev,
2008 					     PCH_UDC_EPINT(ep->in, ep->num));
2009 		ep->dev->prot_stall = 1;
2010 		ret = 0;
2011 	}
2012 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2013 	return ret;
2014 }
2015 
2016 /**
2017  * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2018  * @usbep:	Reference to the USB endpoint structure
2019  */
2020 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2021 {
2022 	struct pch_udc_ep  *ep;
2023 
2024 	if (!usbep)
2025 		return;
2026 
2027 	ep = container_of(usbep, struct pch_udc_ep, ep);
2028 	if (ep->ep.desc || !ep->num)
2029 		pch_udc_ep_fifo_flush(ep, ep->in);
2030 }
2031 
2032 static const struct usb_ep_ops pch_udc_ep_ops = {
2033 	.enable		= pch_udc_pcd_ep_enable,
2034 	.disable	= pch_udc_pcd_ep_disable,
2035 	.alloc_request	= pch_udc_alloc_request,
2036 	.free_request	= pch_udc_free_request,
2037 	.queue		= pch_udc_pcd_queue,
2038 	.dequeue	= pch_udc_pcd_dequeue,
2039 	.set_halt	= pch_udc_pcd_set_halt,
2040 	.set_wedge	= pch_udc_pcd_set_wedge,
2041 	.fifo_status	= NULL,
2042 	.fifo_flush	= pch_udc_pcd_fifo_flush,
2043 };
2044 
2045 /**
2046  * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2047  * @td_stp:	Reference to the SETP buffer structure
2048  */
2049 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2050 {
2051 	static u32	pky_marker;
2052 
2053 	if (!td_stp)
2054 		return;
2055 	td_stp->reserved = ++pky_marker;
2056 	memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2057 	td_stp->status = PCH_UDC_BS_HST_RDY;
2058 }
2059 
2060 /**
2061  * pch_udc_start_next_txrequest() - This function starts
2062  *					the next transmission requirement
2063  * @ep:	Reference to the endpoint structure
2064  */
2065 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2066 {
2067 	struct pch_udc_request *req;
2068 	struct pch_udc_data_dma_desc *td_data;
2069 
2070 	if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2071 		return;
2072 
2073 	if (list_empty(&ep->queue))
2074 		return;
2075 
2076 	/* next request */
2077 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2078 	if (req->dma_going)
2079 		return;
2080 	if (!req->td_data)
2081 		return;
2082 	pch_udc_wait_ep_stall(ep);
2083 	req->dma_going = 1;
2084 	pch_udc_ep_set_ddptr(ep, 0);
2085 	td_data = req->td_data;
2086 	while (1) {
2087 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2088 				   PCH_UDC_BS_HST_RDY;
2089 		if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2090 			break;
2091 		td_data = phys_to_virt(td_data->next);
2092 	}
2093 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2094 	pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2095 	pch_udc_ep_set_pd(ep);
2096 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2097 	pch_udc_ep_clear_nak(ep);
2098 }
2099 
2100 /**
2101  * pch_udc_complete_transfer() - This function completes a transfer
2102  * @ep:		Reference to the endpoint structure
2103  */
2104 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2105 {
2106 	struct pch_udc_request *req;
2107 	struct pch_udc_dev *dev = ep->dev;
2108 
2109 	if (list_empty(&ep->queue))
2110 		return;
2111 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2112 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2113 	    PCH_UDC_BS_DMA_DONE)
2114 		return;
2115 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2116 	     PCH_UDC_RTS_SUCC) {
2117 		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2118 			"epstatus=0x%08x\n",
2119 		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
2120 		       (int)(ep->epsts));
2121 		return;
2122 	}
2123 
2124 	req->req.actual = req->req.length;
2125 	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2126 	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2127 	complete_req(ep, req, 0);
2128 	req->dma_going = 0;
2129 	if (!list_empty(&ep->queue)) {
2130 		pch_udc_wait_ep_stall(ep);
2131 		pch_udc_ep_clear_nak(ep);
2132 		pch_udc_enable_ep_interrupts(ep->dev,
2133 					     PCH_UDC_EPINT(ep->in, ep->num));
2134 	} else {
2135 		pch_udc_disable_ep_interrupts(ep->dev,
2136 					      PCH_UDC_EPINT(ep->in, ep->num));
2137 	}
2138 }
2139 
2140 /**
2141  * pch_udc_complete_receiver() - This function completes a receiver
2142  * @ep:		Reference to the endpoint structure
2143  */
2144 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2145 {
2146 	struct pch_udc_request *req;
2147 	struct pch_udc_dev *dev = ep->dev;
2148 	unsigned int count;
2149 	struct pch_udc_data_dma_desc *td;
2150 	dma_addr_t addr;
2151 
2152 	if (list_empty(&ep->queue))
2153 		return;
2154 	/* next request */
2155 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2156 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2157 	pch_udc_ep_set_ddptr(ep, 0);
2158 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2159 	    PCH_UDC_BS_DMA_DONE)
2160 		td = req->td_data_last;
2161 	else
2162 		td = req->td_data;
2163 
2164 	while (1) {
2165 		if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2166 			dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2167 				"epstatus=0x%08x\n",
2168 				(req->td_data->status & PCH_UDC_RXTX_STS),
2169 				(int)(ep->epsts));
2170 			return;
2171 		}
2172 		if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2173 			if (td->status & PCH_UDC_DMA_LAST) {
2174 				count = td->status & PCH_UDC_RXTX_BYTES;
2175 				break;
2176 			}
2177 		if (td == req->td_data_last) {
2178 			dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2179 			return;
2180 		}
2181 		addr = (dma_addr_t)td->next;
2182 		td = phys_to_virt(addr);
2183 	}
2184 	/* on 64k packets the RXBYTES field is zero */
2185 	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2186 		count = UDC_DMA_MAXPACKET;
2187 	req->td_data->status |= PCH_UDC_DMA_LAST;
2188 	td->status |= PCH_UDC_BS_HST_BSY;
2189 
2190 	req->dma_going = 0;
2191 	req->req.actual = count;
2192 	complete_req(ep, req, 0);
2193 	/* If there is a new/failed requests try that now */
2194 	if (!list_empty(&ep->queue)) {
2195 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2196 		pch_udc_start_rxrequest(ep, req);
2197 	}
2198 }
2199 
2200 /**
2201  * pch_udc_svc_data_in() - This function process endpoint interrupts
2202  *				for IN endpoints
2203  * @dev:	Reference to the device structure
2204  * @ep_num:	Endpoint that generated the interrupt
2205  */
2206 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2207 {
2208 	u32	epsts;
2209 	struct pch_udc_ep	*ep;
2210 
2211 	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2212 	epsts = ep->epsts;
2213 	ep->epsts = 0;
2214 
2215 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA  | UDC_EPSTS_HE |
2216 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2217 		       UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2218 		return;
2219 	if ((epsts & UDC_EPSTS_BNA))
2220 		return;
2221 	if (epsts & UDC_EPSTS_HE)
2222 		return;
2223 	if (epsts & UDC_EPSTS_RSS) {
2224 		pch_udc_ep_set_stall(ep);
2225 		pch_udc_enable_ep_interrupts(ep->dev,
2226 					     PCH_UDC_EPINT(ep->in, ep->num));
2227 	}
2228 	if (epsts & UDC_EPSTS_RCS) {
2229 		if (!dev->prot_stall) {
2230 			pch_udc_ep_clear_stall(ep);
2231 		} else {
2232 			pch_udc_ep_set_stall(ep);
2233 			pch_udc_enable_ep_interrupts(ep->dev,
2234 						PCH_UDC_EPINT(ep->in, ep->num));
2235 		}
2236 	}
2237 	if (epsts & UDC_EPSTS_TDC)
2238 		pch_udc_complete_transfer(ep);
2239 	/* On IN interrupt, provide data if we have any */
2240 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2241 	    !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2242 		pch_udc_start_next_txrequest(ep);
2243 }
2244 
2245 /**
2246  * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2247  * @dev:	Reference to the device structure
2248  * @ep_num:	Endpoint that generated the interrupt
2249  */
2250 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2251 {
2252 	u32			epsts;
2253 	struct pch_udc_ep		*ep;
2254 	struct pch_udc_request		*req = NULL;
2255 
2256 	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2257 	epsts = ep->epsts;
2258 	ep->epsts = 0;
2259 
2260 	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2261 		/* next request */
2262 		req = list_entry(ep->queue.next, struct pch_udc_request,
2263 				 queue);
2264 		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2265 		     PCH_UDC_BS_DMA_DONE) {
2266 			if (!req->dma_going)
2267 				pch_udc_start_rxrequest(ep, req);
2268 			return;
2269 		}
2270 	}
2271 	if (epsts & UDC_EPSTS_HE)
2272 		return;
2273 	if (epsts & UDC_EPSTS_RSS) {
2274 		pch_udc_ep_set_stall(ep);
2275 		pch_udc_enable_ep_interrupts(ep->dev,
2276 					     PCH_UDC_EPINT(ep->in, ep->num));
2277 	}
2278 	if (epsts & UDC_EPSTS_RCS) {
2279 		if (!dev->prot_stall) {
2280 			pch_udc_ep_clear_stall(ep);
2281 		} else {
2282 			pch_udc_ep_set_stall(ep);
2283 			pch_udc_enable_ep_interrupts(ep->dev,
2284 						PCH_UDC_EPINT(ep->in, ep->num));
2285 		}
2286 	}
2287 	if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2288 	    UDC_EPSTS_OUT_DATA) {
2289 		if (ep->dev->prot_stall == 1) {
2290 			pch_udc_ep_set_stall(ep);
2291 			pch_udc_enable_ep_interrupts(ep->dev,
2292 						PCH_UDC_EPINT(ep->in, ep->num));
2293 		} else {
2294 			pch_udc_complete_receiver(ep);
2295 		}
2296 	}
2297 	if (list_empty(&ep->queue))
2298 		pch_udc_set_dma(dev, DMA_DIR_RX);
2299 }
2300 
2301 /**
2302  * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2303  * @dev:	Reference to the device structure
2304  */
2305 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2306 {
2307 	u32	epsts;
2308 	struct pch_udc_ep	*ep;
2309 	struct pch_udc_ep	*ep_out;
2310 
2311 	ep = &dev->ep[UDC_EP0IN_IDX];
2312 	ep_out = &dev->ep[UDC_EP0OUT_IDX];
2313 	epsts = ep->epsts;
2314 	ep->epsts = 0;
2315 
2316 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2317 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2318 		       UDC_EPSTS_XFERDONE)))
2319 		return;
2320 	if ((epsts & UDC_EPSTS_BNA))
2321 		return;
2322 	if (epsts & UDC_EPSTS_HE)
2323 		return;
2324 	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2325 		pch_udc_complete_transfer(ep);
2326 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2327 		ep_out->td_data->status = (ep_out->td_data->status &
2328 					~PCH_UDC_BUFF_STS) |
2329 					PCH_UDC_BS_HST_RDY;
2330 		pch_udc_ep_clear_nak(ep_out);
2331 		pch_udc_set_dma(dev, DMA_DIR_RX);
2332 		pch_udc_ep_set_rrdy(ep_out);
2333 	}
2334 	/* On IN interrupt, provide data if we have any */
2335 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2336 	     !(epsts & UDC_EPSTS_TXEMPTY))
2337 		pch_udc_start_next_txrequest(ep);
2338 }
2339 
2340 /**
2341  * pch_udc_svc_control_out() - Routine that handle Control
2342  *					OUT endpoint interrupts
2343  * @dev:	Reference to the device structure
2344  */
2345 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2346 	__releases(&dev->lock)
2347 	__acquires(&dev->lock)
2348 {
2349 	u32	stat;
2350 	int setup_supported;
2351 	struct pch_udc_ep	*ep;
2352 
2353 	ep = &dev->ep[UDC_EP0OUT_IDX];
2354 	stat = ep->epsts;
2355 	ep->epsts = 0;
2356 
2357 	/* If setup data */
2358 	if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2359 	    UDC_EPSTS_OUT_SETUP) {
2360 		dev->stall = 0;
2361 		dev->ep[UDC_EP0IN_IDX].halted = 0;
2362 		dev->ep[UDC_EP0OUT_IDX].halted = 0;
2363 		dev->setup_data = ep->td_stp->request;
2364 		pch_udc_init_setup_buff(ep->td_stp);
2365 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2366 		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2367 				      dev->ep[UDC_EP0IN_IDX].in);
2368 		if ((dev->setup_data.bRequestType & USB_DIR_IN))
2369 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2370 		else /* OUT */
2371 			dev->gadget.ep0 = &ep->ep;
2372 		spin_lock(&dev->lock);
2373 		/* If Mass storage Reset */
2374 		if ((dev->setup_data.bRequestType == 0x21) &&
2375 		    (dev->setup_data.bRequest == 0xFF))
2376 			dev->prot_stall = 0;
2377 		/* call gadget with setup data received */
2378 		setup_supported = dev->driver->setup(&dev->gadget,
2379 						     &dev->setup_data);
2380 		spin_unlock(&dev->lock);
2381 
2382 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
2383 			ep->td_data->status = (ep->td_data->status &
2384 						~PCH_UDC_BUFF_STS) |
2385 						PCH_UDC_BS_HST_RDY;
2386 			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2387 		}
2388 		/* ep0 in returns data on IN phase */
2389 		if (setup_supported >= 0 && setup_supported <
2390 					    UDC_EP0IN_MAX_PKT_SIZE) {
2391 			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2392 			/* Gadget would have queued a request when
2393 			 * we called the setup */
2394 			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2395 				pch_udc_set_dma(dev, DMA_DIR_RX);
2396 				pch_udc_ep_clear_nak(ep);
2397 			}
2398 		} else if (setup_supported < 0) {
2399 			/* if unsupported request, then stall */
2400 			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2401 			pch_udc_enable_ep_interrupts(ep->dev,
2402 						PCH_UDC_EPINT(ep->in, ep->num));
2403 			dev->stall = 0;
2404 			pch_udc_set_dma(dev, DMA_DIR_RX);
2405 		} else {
2406 			dev->waiting_zlp_ack = 1;
2407 		}
2408 	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2409 		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
2410 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2411 		pch_udc_ep_set_ddptr(ep, 0);
2412 		if (!list_empty(&ep->queue)) {
2413 			ep->epsts = stat;
2414 			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2415 		}
2416 		pch_udc_set_dma(dev, DMA_DIR_RX);
2417 	}
2418 	pch_udc_ep_set_rrdy(ep);
2419 }
2420 
2421 
2422 /**
2423  * pch_udc_postsvc_epinters() - This function enables end point interrupts
2424  *				and clears NAK status
2425  * @dev:	Reference to the device structure
2426  * @ep_num:	End point number
2427  */
2428 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2429 {
2430 	struct pch_udc_ep	*ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2431 	if (list_empty(&ep->queue))
2432 		return;
2433 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2434 	pch_udc_ep_clear_nak(ep);
2435 }
2436 
2437 /**
2438  * pch_udc_read_all_epstatus() - This function read all endpoint status
2439  * @dev:	Reference to the device structure
2440  * @ep_intr:	Status of endpoint interrupt
2441  */
2442 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2443 {
2444 	int i;
2445 	struct pch_udc_ep	*ep;
2446 
2447 	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2448 		/* IN */
2449 		if (ep_intr & (0x1 << i)) {
2450 			ep = &dev->ep[UDC_EPIN_IDX(i)];
2451 			ep->epsts = pch_udc_read_ep_status(ep);
2452 			pch_udc_clear_ep_status(ep, ep->epsts);
2453 		}
2454 		/* OUT */
2455 		if (ep_intr & (0x10000 << i)) {
2456 			ep = &dev->ep[UDC_EPOUT_IDX(i)];
2457 			ep->epsts = pch_udc_read_ep_status(ep);
2458 			pch_udc_clear_ep_status(ep, ep->epsts);
2459 		}
2460 	}
2461 }
2462 
2463 /**
2464  * pch_udc_activate_control_ep() - This function enables the control endpoints
2465  *					for traffic after a reset
2466  * @dev:	Reference to the device structure
2467  */
2468 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2469 {
2470 	struct pch_udc_ep	*ep;
2471 	u32 val;
2472 
2473 	/* Setup the IN endpoint */
2474 	ep = &dev->ep[UDC_EP0IN_IDX];
2475 	pch_udc_clear_ep_control(ep);
2476 	pch_udc_ep_fifo_flush(ep, ep->in);
2477 	pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2478 	pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2479 	/* Initialize the IN EP Descriptor */
2480 	ep->td_data      = NULL;
2481 	ep->td_stp       = NULL;
2482 	ep->td_data_phys = 0;
2483 	ep->td_stp_phys  = 0;
2484 
2485 	/* Setup the OUT endpoint */
2486 	ep = &dev->ep[UDC_EP0OUT_IDX];
2487 	pch_udc_clear_ep_control(ep);
2488 	pch_udc_ep_fifo_flush(ep, ep->in);
2489 	pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2490 	pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2491 	val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2492 	pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2493 
2494 	/* Initialize the SETUP buffer */
2495 	pch_udc_init_setup_buff(ep->td_stp);
2496 	/* Write the pointer address of dma descriptor */
2497 	pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2498 	/* Write the pointer address of Setup descriptor */
2499 	pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2500 
2501 	/* Initialize the dma descriptor */
2502 	ep->td_data->status  = PCH_UDC_DMA_LAST;
2503 	ep->td_data->dataptr = dev->dma_addr;
2504 	ep->td_data->next    = ep->td_data_phys;
2505 
2506 	pch_udc_ep_clear_nak(ep);
2507 }
2508 
2509 
2510 /**
2511  * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2512  * @dev:	Reference to driver structure
2513  */
2514 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2515 {
2516 	struct pch_udc_ep	*ep;
2517 	int i;
2518 
2519 	pch_udc_clear_dma(dev, DMA_DIR_TX);
2520 	pch_udc_clear_dma(dev, DMA_DIR_RX);
2521 	/* Mask all endpoint interrupts */
2522 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2523 	/* clear all endpoint interrupts */
2524 	pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2525 
2526 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2527 		ep = &dev->ep[i];
2528 		pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2529 		pch_udc_clear_ep_control(ep);
2530 		pch_udc_ep_set_ddptr(ep, 0);
2531 		pch_udc_write_csr(ep->dev, 0x00, i);
2532 	}
2533 	dev->stall = 0;
2534 	dev->prot_stall = 0;
2535 	dev->waiting_zlp_ack = 0;
2536 	dev->set_cfg_not_acked = 0;
2537 
2538 	/* disable ep to empty req queue. Skip the control EP's */
2539 	for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2540 		ep = &dev->ep[i];
2541 		pch_udc_ep_set_nak(ep);
2542 		pch_udc_ep_fifo_flush(ep, ep->in);
2543 		/* Complete request queue */
2544 		empty_req_queue(ep);
2545 	}
2546 	if (dev->driver) {
2547 		spin_unlock(&dev->lock);
2548 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2549 		spin_lock(&dev->lock);
2550 	}
2551 }
2552 
2553 /**
2554  * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2555  *				done interrupt
2556  * @dev:	Reference to driver structure
2557  */
2558 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2559 {
2560 	u32 dev_stat, dev_speed;
2561 	u32 speed = USB_SPEED_FULL;
2562 
2563 	dev_stat = pch_udc_read_device_status(dev);
2564 	dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2565 						 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2566 	switch (dev_speed) {
2567 	case UDC_DEVSTS_ENUM_SPEED_HIGH:
2568 		speed = USB_SPEED_HIGH;
2569 		break;
2570 	case  UDC_DEVSTS_ENUM_SPEED_FULL:
2571 		speed = USB_SPEED_FULL;
2572 		break;
2573 	case  UDC_DEVSTS_ENUM_SPEED_LOW:
2574 		speed = USB_SPEED_LOW;
2575 		break;
2576 	default:
2577 		BUG();
2578 	}
2579 	dev->gadget.speed = speed;
2580 	pch_udc_activate_control_ep(dev);
2581 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2582 	pch_udc_set_dma(dev, DMA_DIR_TX);
2583 	pch_udc_set_dma(dev, DMA_DIR_RX);
2584 	pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2585 
2586 	/* enable device interrupts */
2587 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2588 					UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2589 					UDC_DEVINT_SI | UDC_DEVINT_SC);
2590 }
2591 
2592 /**
2593  * pch_udc_svc_intf_interrupt() - This function handles a set interface
2594  *				  interrupt
2595  * @dev:	Reference to driver structure
2596  */
2597 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2598 {
2599 	u32 reg, dev_stat = 0;
2600 	int i;
2601 
2602 	dev_stat = pch_udc_read_device_status(dev);
2603 	dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2604 							 UDC_DEVSTS_INTF_SHIFT;
2605 	dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2606 							 UDC_DEVSTS_ALT_SHIFT;
2607 	dev->set_cfg_not_acked = 1;
2608 	/* Construct the usb request for gadget driver and inform it */
2609 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2610 	dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2611 	dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2612 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2613 	dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2614 	/* programm the Endpoint Cfg registers */
2615 	/* Only one end point cfg register */
2616 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2617 	reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2618 	      (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2619 	reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2620 	      (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2621 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2622 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2623 		/* clear stall bits */
2624 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2625 		dev->ep[i].halted = 0;
2626 	}
2627 	dev->stall = 0;
2628 	spin_unlock(&dev->lock);
2629 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2630 	spin_lock(&dev->lock);
2631 }
2632 
2633 /**
2634  * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2635  *				interrupt
2636  * @dev:	Reference to driver structure
2637  */
2638 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2639 {
2640 	int i;
2641 	u32 reg, dev_stat = 0;
2642 
2643 	dev_stat = pch_udc_read_device_status(dev);
2644 	dev->set_cfg_not_acked = 1;
2645 	dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2646 				UDC_DEVSTS_CFG_SHIFT;
2647 	/* make usb request for gadget driver */
2648 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2649 	dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2650 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2651 	/* program the NE registers */
2652 	/* Only one end point cfg register */
2653 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2654 	reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2655 	      (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2656 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2657 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2658 		/* clear stall bits */
2659 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2660 		dev->ep[i].halted = 0;
2661 	}
2662 	dev->stall = 0;
2663 
2664 	/* call gadget zero with setup data received */
2665 	spin_unlock(&dev->lock);
2666 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2667 	spin_lock(&dev->lock);
2668 }
2669 
2670 /**
2671  * pch_udc_dev_isr() - This function services device interrupts
2672  *			by invoking appropriate routines.
2673  * @dev:	Reference to the device structure
2674  * @dev_intr:	The Device interrupt status.
2675  */
2676 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2677 {
2678 	int vbus;
2679 
2680 	/* USB Reset Interrupt */
2681 	if (dev_intr & UDC_DEVINT_UR) {
2682 		pch_udc_svc_ur_interrupt(dev);
2683 		dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2684 	}
2685 	/* Enumeration Done Interrupt */
2686 	if (dev_intr & UDC_DEVINT_ENUM) {
2687 		pch_udc_svc_enum_interrupt(dev);
2688 		dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2689 	}
2690 	/* Set Interface Interrupt */
2691 	if (dev_intr & UDC_DEVINT_SI)
2692 		pch_udc_svc_intf_interrupt(dev);
2693 	/* Set Config Interrupt */
2694 	if (dev_intr & UDC_DEVINT_SC)
2695 		pch_udc_svc_cfg_interrupt(dev);
2696 	/* USB Suspend interrupt */
2697 	if (dev_intr & UDC_DEVINT_US) {
2698 		if (dev->driver
2699 			&& dev->driver->suspend) {
2700 			spin_unlock(&dev->lock);
2701 			dev->driver->suspend(&dev->gadget);
2702 			spin_lock(&dev->lock);
2703 		}
2704 
2705 		vbus = pch_vbus_gpio_get_value(dev);
2706 		if ((dev->vbus_session == 0)
2707 			&& (vbus != 1)) {
2708 			if (dev->driver && dev->driver->disconnect) {
2709 				spin_unlock(&dev->lock);
2710 				dev->driver->disconnect(&dev->gadget);
2711 				spin_lock(&dev->lock);
2712 			}
2713 			pch_udc_reconnect(dev);
2714 		} else if ((dev->vbus_session == 0)
2715 			&& (vbus == 1)
2716 			&& !dev->vbus_gpio.intr)
2717 			schedule_work(&dev->vbus_gpio.irq_work_fall);
2718 
2719 		dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2720 	}
2721 	/* Clear the SOF interrupt, if enabled */
2722 	if (dev_intr & UDC_DEVINT_SOF)
2723 		dev_dbg(&dev->pdev->dev, "SOF\n");
2724 	/* ES interrupt, IDLE > 3ms on the USB */
2725 	if (dev_intr & UDC_DEVINT_ES)
2726 		dev_dbg(&dev->pdev->dev, "ES\n");
2727 	/* RWKP interrupt */
2728 	if (dev_intr & UDC_DEVINT_RWKP)
2729 		dev_dbg(&dev->pdev->dev, "RWKP\n");
2730 }
2731 
2732 /**
2733  * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2734  * @irq:	Interrupt request number
2735  * @pdev:	Reference to the device structure
2736  */
2737 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2738 {
2739 	struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2740 	u32 dev_intr, ep_intr;
2741 	int i;
2742 
2743 	dev_intr = pch_udc_read_device_interrupts(dev);
2744 	ep_intr = pch_udc_read_ep_interrupts(dev);
2745 
2746 	/* For a hot plug, this find that the controller is hung up. */
2747 	if (dev_intr == ep_intr)
2748 		if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2749 			dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2750 			/* The controller is reset */
2751 			pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2752 			return IRQ_HANDLED;
2753 		}
2754 	if (dev_intr)
2755 		/* Clear device interrupts */
2756 		pch_udc_write_device_interrupts(dev, dev_intr);
2757 	if (ep_intr)
2758 		/* Clear ep interrupts */
2759 		pch_udc_write_ep_interrupts(dev, ep_intr);
2760 	if (!dev_intr && !ep_intr)
2761 		return IRQ_NONE;
2762 	spin_lock(&dev->lock);
2763 	if (dev_intr)
2764 		pch_udc_dev_isr(dev, dev_intr);
2765 	if (ep_intr) {
2766 		pch_udc_read_all_epstatus(dev, ep_intr);
2767 		/* Process Control In interrupts, if present */
2768 		if (ep_intr & UDC_EPINT_IN_EP0) {
2769 			pch_udc_svc_control_in(dev);
2770 			pch_udc_postsvc_epinters(dev, 0);
2771 		}
2772 		/* Process Control Out interrupts, if present */
2773 		if (ep_intr & UDC_EPINT_OUT_EP0)
2774 			pch_udc_svc_control_out(dev);
2775 		/* Process data in end point interrupts */
2776 		for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2777 			if (ep_intr & (1 <<  i)) {
2778 				pch_udc_svc_data_in(dev, i);
2779 				pch_udc_postsvc_epinters(dev, i);
2780 			}
2781 		}
2782 		/* Process data out end point interrupts */
2783 		for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2784 						 PCH_UDC_USED_EP_NUM); i++)
2785 			if (ep_intr & (1 <<  i))
2786 				pch_udc_svc_data_out(dev, i -
2787 							 UDC_EPINT_OUT_SHIFT);
2788 	}
2789 	spin_unlock(&dev->lock);
2790 	return IRQ_HANDLED;
2791 }
2792 
2793 /**
2794  * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2795  * @dev:	Reference to the device structure
2796  */
2797 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2798 {
2799 	/* enable ep0 interrupts */
2800 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2801 						UDC_EPINT_OUT_EP0);
2802 	/* enable device interrupts */
2803 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2804 				       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2805 				       UDC_DEVINT_SI | UDC_DEVINT_SC);
2806 }
2807 
2808 /**
2809  * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2810  * @dev:	Reference to the driver structure
2811  */
2812 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2813 {
2814 	const char *const ep_string[] = {
2815 		ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2816 		"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2817 		"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2818 		"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2819 		"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2820 		"ep15in", "ep15out",
2821 	};
2822 	int i;
2823 
2824 	dev->gadget.speed = USB_SPEED_UNKNOWN;
2825 	INIT_LIST_HEAD(&dev->gadget.ep_list);
2826 
2827 	/* Initialize the endpoints structures */
2828 	memset(dev->ep, 0, sizeof dev->ep);
2829 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2830 		struct pch_udc_ep *ep = &dev->ep[i];
2831 		ep->dev = dev;
2832 		ep->halted = 1;
2833 		ep->num = i / 2;
2834 		ep->in = ~i & 1;
2835 		ep->ep.name = ep_string[i];
2836 		ep->ep.ops = &pch_udc_ep_ops;
2837 		if (ep->in) {
2838 			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2839 			ep->ep.caps.dir_in = true;
2840 		} else {
2841 			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2842 					  UDC_EP_REG_SHIFT;
2843 			ep->ep.caps.dir_out = true;
2844 		}
2845 		if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2846 			ep->ep.caps.type_control = true;
2847 		} else {
2848 			ep->ep.caps.type_iso = true;
2849 			ep->ep.caps.type_bulk = true;
2850 			ep->ep.caps.type_int = true;
2851 		}
2852 		/* need to set ep->ep.maxpacket and set Default Configuration?*/
2853 		usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2854 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2855 		INIT_LIST_HEAD(&ep->queue);
2856 	}
2857 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2858 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2859 
2860 	/* remove ep0 in and out from the list.  They have own pointer */
2861 	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2862 	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2863 
2864 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2865 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2866 }
2867 
2868 /**
2869  * pch_udc_pcd_init() - This API initializes the driver structure
2870  * @dev:	Reference to the driver structure
2871  *
2872  * Return codes:
2873  *	0: Success
2874  */
2875 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2876 {
2877 	pch_udc_init(dev);
2878 	pch_udc_pcd_reinit(dev);
2879 	pch_vbus_gpio_init(dev);
2880 	return 0;
2881 }
2882 
2883 /**
2884  * init_dma_pools() - create dma pools during initialization
2885  * @dev:	reference to struct pci_dev
2886  */
2887 static int init_dma_pools(struct pch_udc_dev *dev)
2888 {
2889 	struct pch_udc_stp_dma_desc	*td_stp;
2890 	struct pch_udc_data_dma_desc	*td_data;
2891 	void				*ep0out_buf;
2892 
2893 	/* DMA setup */
2894 	dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
2895 		sizeof(struct pch_udc_data_dma_desc), 0, 0);
2896 	if (!dev->data_requests) {
2897 		dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2898 			__func__);
2899 		return -ENOMEM;
2900 	}
2901 
2902 	/* dma desc for setup data */
2903 	dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
2904 		sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2905 	if (!dev->stp_requests) {
2906 		dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2907 			__func__);
2908 		return -ENOMEM;
2909 	}
2910 	/* setup */
2911 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
2912 				&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2913 	if (!td_stp) {
2914 		dev_err(&dev->pdev->dev,
2915 			"%s: can't allocate setup dma descriptor\n", __func__);
2916 		return -ENOMEM;
2917 	}
2918 	dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2919 
2920 	/* data: 0 packets !? */
2921 	td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
2922 				&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2923 	if (!td_data) {
2924 		dev_err(&dev->pdev->dev,
2925 			"%s: can't allocate data dma descriptor\n", __func__);
2926 		return -ENOMEM;
2927 	}
2928 	dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2929 	dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2930 	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2931 	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2932 	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2933 
2934 	ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2935 				  GFP_KERNEL);
2936 	if (!ep0out_buf)
2937 		return -ENOMEM;
2938 	dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2939 				       UDC_EP0OUT_BUFF_SIZE * 4,
2940 				       DMA_FROM_DEVICE);
2941 	return 0;
2942 }
2943 
2944 static int pch_udc_start(struct usb_gadget *g,
2945 		struct usb_gadget_driver *driver)
2946 {
2947 	struct pch_udc_dev	*dev = to_pch_udc(g);
2948 
2949 	driver->driver.bus = NULL;
2950 	dev->driver = driver;
2951 
2952 	/* get ready for ep0 traffic */
2953 	pch_udc_setup_ep0(dev);
2954 
2955 	/* clear SD */
2956 	if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2957 		pch_udc_clear_disconnect(dev);
2958 
2959 	dev->connected = 1;
2960 	return 0;
2961 }
2962 
2963 static int pch_udc_stop(struct usb_gadget *g)
2964 {
2965 	struct pch_udc_dev	*dev = to_pch_udc(g);
2966 
2967 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2968 
2969 	/* Assures that there are no pending requests with this driver */
2970 	dev->driver = NULL;
2971 	dev->connected = 0;
2972 
2973 	/* set SD */
2974 	pch_udc_set_disconnect(dev);
2975 
2976 	return 0;
2977 }
2978 
2979 static void pch_udc_shutdown(struct pci_dev *pdev)
2980 {
2981 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2982 
2983 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2984 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2985 
2986 	/* disable the pullup so the host will think we're gone */
2987 	pch_udc_set_disconnect(dev);
2988 }
2989 
2990 static void pch_udc_remove(struct pci_dev *pdev)
2991 {
2992 	struct pch_udc_dev	*dev = pci_get_drvdata(pdev);
2993 
2994 	usb_del_gadget_udc(&dev->gadget);
2995 
2996 	/* gadget driver must not be registered */
2997 	if (dev->driver)
2998 		dev_err(&pdev->dev,
2999 			"%s: gadget driver still bound!!!\n", __func__);
3000 	/* dma pool cleanup */
3001 	dma_pool_destroy(dev->data_requests);
3002 
3003 	if (dev->stp_requests) {
3004 		/* cleanup DMA desc's for ep0in */
3005 		if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3006 			dma_pool_free(dev->stp_requests,
3007 				dev->ep[UDC_EP0OUT_IDX].td_stp,
3008 				dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3009 		}
3010 		if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3011 			dma_pool_free(dev->stp_requests,
3012 				dev->ep[UDC_EP0OUT_IDX].td_data,
3013 				dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3014 		}
3015 		dma_pool_destroy(dev->stp_requests);
3016 	}
3017 
3018 	if (dev->dma_addr)
3019 		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3020 				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3021 
3022 	pch_vbus_gpio_free(dev);
3023 
3024 	pch_udc_exit(dev);
3025 }
3026 
3027 #ifdef CONFIG_PM_SLEEP
3028 static int pch_udc_suspend(struct device *d)
3029 {
3030 	struct pch_udc_dev *dev = dev_get_drvdata(d);
3031 
3032 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3033 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3034 
3035 	return 0;
3036 }
3037 
3038 static int pch_udc_resume(struct device *d)
3039 {
3040 	return 0;
3041 }
3042 
3043 static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3044 #define PCH_UDC_PM_OPS		(&pch_udc_pm)
3045 #else
3046 #define PCH_UDC_PM_OPS		NULL
3047 #endif /* CONFIG_PM_SLEEP */
3048 
3049 static int pch_udc_probe(struct pci_dev *pdev,
3050 			  const struct pci_device_id *id)
3051 {
3052 	int			bar;
3053 	int			retval;
3054 	struct pch_udc_dev	*dev;
3055 
3056 	/* init */
3057 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3058 	if (!dev)
3059 		return -ENOMEM;
3060 
3061 	/* pci setup */
3062 	retval = pcim_enable_device(pdev);
3063 	if (retval)
3064 		return retval;
3065 
3066 	pci_set_drvdata(pdev, dev);
3067 
3068 	/* Determine BAR based on PCI ID */
3069 	if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3070 		bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3071 	else
3072 		bar = PCH_UDC_PCI_BAR;
3073 
3074 	/* PCI resource allocation */
3075 	retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3076 	if (retval)
3077 		return retval;
3078 
3079 	dev->base_addr = pcim_iomap_table(pdev)[bar];
3080 
3081 	/*
3082 	 * FIXME: add a GPIO descriptor table to pdev.dev using
3083 	 * gpiod_add_descriptor_table() from <linux/gpio/machine.h> based on
3084 	 * the PCI subsystem ID. The system-dependent GPIO is necessary for
3085 	 * VBUS operation.
3086 	 */
3087 
3088 	/* initialize the hardware */
3089 	if (pch_udc_pcd_init(dev))
3090 		return -ENODEV;
3091 
3092 	pci_enable_msi(pdev);
3093 
3094 	retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3095 				  IRQF_SHARED, KBUILD_MODNAME, dev);
3096 	if (retval) {
3097 		dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3098 			pdev->irq);
3099 		goto finished;
3100 	}
3101 
3102 	pci_set_master(pdev);
3103 	pci_try_set_mwi(pdev);
3104 
3105 	/* device struct setup */
3106 	spin_lock_init(&dev->lock);
3107 	dev->pdev = pdev;
3108 	dev->gadget.ops = &pch_udc_ops;
3109 
3110 	retval = init_dma_pools(dev);
3111 	if (retval)
3112 		goto finished;
3113 
3114 	dev->gadget.name = KBUILD_MODNAME;
3115 	dev->gadget.max_speed = USB_SPEED_HIGH;
3116 
3117 	/* Put the device in disconnected state till a driver is bound */
3118 	pch_udc_set_disconnect(dev);
3119 	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3120 	if (retval)
3121 		goto finished;
3122 	return 0;
3123 
3124 finished:
3125 	pch_udc_remove(pdev);
3126 	return retval;
3127 }
3128 
3129 static const struct pci_device_id pch_udc_pcidev_id[] = {
3130 	{
3131 		PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3132 			   PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3133 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3134 		.class_mask = 0xffffffff,
3135 	},
3136 	{
3137 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3138 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3139 		.class_mask = 0xffffffff,
3140 	},
3141 	{
3142 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3143 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3144 		.class_mask = 0xffffffff,
3145 	},
3146 	{
3147 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3148 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3149 		.class_mask = 0xffffffff,
3150 	},
3151 	{ 0 },
3152 };
3153 
3154 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3155 
3156 static struct pci_driver pch_udc_driver = {
3157 	.name =	KBUILD_MODNAME,
3158 	.id_table =	pch_udc_pcidev_id,
3159 	.probe =	pch_udc_probe,
3160 	.remove =	pch_udc_remove,
3161 	.shutdown =	pch_udc_shutdown,
3162 	.driver = {
3163 		.pm = PCH_UDC_PM_OPS,
3164 	},
3165 };
3166 
3167 module_pci_driver(pch_udc_driver);
3168 
3169 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3170 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3171 MODULE_LICENSE("GPL");
3172