xref: /openbmc/linux/drivers/usb/gadget/udc/pch_udc.c (revision 151f4e2b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/list.h>
12 #include <linux/interrupt.h>
13 #include <linux/usb/ch9.h>
14 #include <linux/usb/gadget.h>
15 #include <linux/gpio.h>
16 #include <linux/irq.h>
17 
18 /* GPIO port for VBUS detecting */
19 static int vbus_gpio_port = -1;		/* GPIO port number (-1:Not used) */
20 
21 #define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
22 #define PCH_VBUS_INTERVAL	10	/* VBUS polling interval (msec) */
23 
24 /* Address offset of Registers */
25 #define UDC_EP_REG_SHIFT	0x20	/* Offset to next EP */
26 
27 #define UDC_EPCTL_ADDR		0x00	/* Endpoint control */
28 #define UDC_EPSTS_ADDR		0x04	/* Endpoint status */
29 #define UDC_BUFIN_FRAMENUM_ADDR	0x08	/* buffer size in / frame number out */
30 #define UDC_BUFOUT_MAXPKT_ADDR	0x0C	/* buffer size out / maxpkt in */
31 #define UDC_SUBPTR_ADDR		0x10	/* setup buffer pointer */
32 #define UDC_DESPTR_ADDR		0x14	/* Data descriptor pointer */
33 #define UDC_CONFIRM_ADDR	0x18	/* Write/Read confirmation */
34 
35 #define UDC_DEVCFG_ADDR		0x400	/* Device configuration */
36 #define UDC_DEVCTL_ADDR		0x404	/* Device control */
37 #define UDC_DEVSTS_ADDR		0x408	/* Device status */
38 #define UDC_DEVIRQSTS_ADDR	0x40C	/* Device irq status */
39 #define UDC_DEVIRQMSK_ADDR	0x410	/* Device irq mask */
40 #define UDC_EPIRQSTS_ADDR	0x414	/* Endpoint irq status */
41 #define UDC_EPIRQMSK_ADDR	0x418	/* Endpoint irq mask */
42 #define UDC_DEVLPM_ADDR		0x41C	/* LPM control / status */
43 #define UDC_CSR_BUSY_ADDR	0x4f0	/* UDC_CSR_BUSY Status register */
44 #define UDC_SRST_ADDR		0x4fc	/* SOFT RESET register */
45 #define UDC_CSR_ADDR		0x500	/* USB_DEVICE endpoint register */
46 
47 /* Endpoint control register */
48 /* Bit position */
49 #define UDC_EPCTL_MRXFLUSH		(1 << 12)
50 #define UDC_EPCTL_RRDY			(1 << 9)
51 #define UDC_EPCTL_CNAK			(1 << 8)
52 #define UDC_EPCTL_SNAK			(1 << 7)
53 #define UDC_EPCTL_NAK			(1 << 6)
54 #define UDC_EPCTL_P			(1 << 3)
55 #define UDC_EPCTL_F			(1 << 1)
56 #define UDC_EPCTL_S			(1 << 0)
57 #define UDC_EPCTL_ET_SHIFT		4
58 /* Mask patern */
59 #define UDC_EPCTL_ET_MASK		0x00000030
60 /* Value for ET field */
61 #define UDC_EPCTL_ET_CONTROL		0
62 #define UDC_EPCTL_ET_ISO		1
63 #define UDC_EPCTL_ET_BULK		2
64 #define UDC_EPCTL_ET_INTERRUPT		3
65 
66 /* Endpoint status register */
67 /* Bit position */
68 #define UDC_EPSTS_XFERDONE		(1 << 27)
69 #define UDC_EPSTS_RSS			(1 << 26)
70 #define UDC_EPSTS_RCS			(1 << 25)
71 #define UDC_EPSTS_TXEMPTY		(1 << 24)
72 #define UDC_EPSTS_TDC			(1 << 10)
73 #define UDC_EPSTS_HE			(1 << 9)
74 #define UDC_EPSTS_MRXFIFO_EMP		(1 << 8)
75 #define UDC_EPSTS_BNA			(1 << 7)
76 #define UDC_EPSTS_IN			(1 << 6)
77 #define UDC_EPSTS_OUT_SHIFT		4
78 /* Mask patern */
79 #define UDC_EPSTS_OUT_MASK		0x00000030
80 #define UDC_EPSTS_ALL_CLR_MASK		0x1F0006F0
81 /* Value for OUT field */
82 #define UDC_EPSTS_OUT_SETUP		2
83 #define UDC_EPSTS_OUT_DATA		1
84 
85 /* Device configuration register */
86 /* Bit position */
87 #define UDC_DEVCFG_CSR_PRG		(1 << 17)
88 #define UDC_DEVCFG_SP			(1 << 3)
89 /* SPD Valee */
90 #define UDC_DEVCFG_SPD_HS		0x0
91 #define UDC_DEVCFG_SPD_FS		0x1
92 #define UDC_DEVCFG_SPD_LS		0x2
93 
94 /* Device control register */
95 /* Bit position */
96 #define UDC_DEVCTL_THLEN_SHIFT		24
97 #define UDC_DEVCTL_BRLEN_SHIFT		16
98 #define UDC_DEVCTL_CSR_DONE		(1 << 13)
99 #define UDC_DEVCTL_SD			(1 << 10)
100 #define UDC_DEVCTL_MODE			(1 << 9)
101 #define UDC_DEVCTL_BREN			(1 << 8)
102 #define UDC_DEVCTL_THE			(1 << 7)
103 #define UDC_DEVCTL_DU			(1 << 4)
104 #define UDC_DEVCTL_TDE			(1 << 3)
105 #define UDC_DEVCTL_RDE			(1 << 2)
106 #define UDC_DEVCTL_RES			(1 << 0)
107 
108 /* Device status register */
109 /* Bit position */
110 #define UDC_DEVSTS_TS_SHIFT		18
111 #define UDC_DEVSTS_ENUM_SPEED_SHIFT	13
112 #define UDC_DEVSTS_ALT_SHIFT		8
113 #define UDC_DEVSTS_INTF_SHIFT		4
114 #define UDC_DEVSTS_CFG_SHIFT		0
115 /* Mask patern */
116 #define UDC_DEVSTS_TS_MASK		0xfffc0000
117 #define UDC_DEVSTS_ENUM_SPEED_MASK	0x00006000
118 #define UDC_DEVSTS_ALT_MASK		0x00000f00
119 #define UDC_DEVSTS_INTF_MASK		0x000000f0
120 #define UDC_DEVSTS_CFG_MASK		0x0000000f
121 /* value for maximum speed for SPEED field */
122 #define UDC_DEVSTS_ENUM_SPEED_FULL	1
123 #define UDC_DEVSTS_ENUM_SPEED_HIGH	0
124 #define UDC_DEVSTS_ENUM_SPEED_LOW	2
125 #define UDC_DEVSTS_ENUM_SPEED_FULLX	3
126 
127 /* Device irq register */
128 /* Bit position */
129 #define UDC_DEVINT_RWKP			(1 << 7)
130 #define UDC_DEVINT_ENUM			(1 << 6)
131 #define UDC_DEVINT_SOF			(1 << 5)
132 #define UDC_DEVINT_US			(1 << 4)
133 #define UDC_DEVINT_UR			(1 << 3)
134 #define UDC_DEVINT_ES			(1 << 2)
135 #define UDC_DEVINT_SI			(1 << 1)
136 #define UDC_DEVINT_SC			(1 << 0)
137 /* Mask patern */
138 #define UDC_DEVINT_MSK			0x7f
139 
140 /* Endpoint irq register */
141 /* Bit position */
142 #define UDC_EPINT_IN_SHIFT		0
143 #define UDC_EPINT_OUT_SHIFT		16
144 #define UDC_EPINT_IN_EP0		(1 << 0)
145 #define UDC_EPINT_OUT_EP0		(1 << 16)
146 /* Mask patern */
147 #define UDC_EPINT_MSK_DISABLE_ALL	0xffffffff
148 
149 /* UDC_CSR_BUSY Status register */
150 /* Bit position */
151 #define UDC_CSR_BUSY			(1 << 0)
152 
153 /* SOFT RESET register */
154 /* Bit position */
155 #define UDC_PSRST			(1 << 1)
156 #define UDC_SRST			(1 << 0)
157 
158 /* USB_DEVICE endpoint register */
159 /* Bit position */
160 #define UDC_CSR_NE_NUM_SHIFT		0
161 #define UDC_CSR_NE_DIR_SHIFT		4
162 #define UDC_CSR_NE_TYPE_SHIFT		5
163 #define UDC_CSR_NE_CFG_SHIFT		7
164 #define UDC_CSR_NE_INTF_SHIFT		11
165 #define UDC_CSR_NE_ALT_SHIFT		15
166 #define UDC_CSR_NE_MAX_PKT_SHIFT	19
167 /* Mask patern */
168 #define UDC_CSR_NE_NUM_MASK		0x0000000f
169 #define UDC_CSR_NE_DIR_MASK		0x00000010
170 #define UDC_CSR_NE_TYPE_MASK		0x00000060
171 #define UDC_CSR_NE_CFG_MASK		0x00000780
172 #define UDC_CSR_NE_INTF_MASK		0x00007800
173 #define UDC_CSR_NE_ALT_MASK		0x00078000
174 #define UDC_CSR_NE_MAX_PKT_MASK		0x3ff80000
175 
176 #define PCH_UDC_CSR(ep)	(UDC_CSR_ADDR + ep*4)
177 #define PCH_UDC_EPINT(in, num)\
178 		(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
179 
180 /* Index of endpoint */
181 #define UDC_EP0IN_IDX		0
182 #define UDC_EP0OUT_IDX		1
183 #define UDC_EPIN_IDX(ep)	(ep * 2)
184 #define UDC_EPOUT_IDX(ep)	(ep * 2 + 1)
185 #define PCH_UDC_EP0		0
186 #define PCH_UDC_EP1		1
187 #define PCH_UDC_EP2		2
188 #define PCH_UDC_EP3		3
189 
190 /* Number of endpoint */
191 #define PCH_UDC_EP_NUM		32	/* Total number of EPs (16 IN,16 OUT) */
192 #define PCH_UDC_USED_EP_NUM	4	/* EP number of EP's really used */
193 /* Length Value */
194 #define PCH_UDC_BRLEN		0x0F	/* Burst length */
195 #define PCH_UDC_THLEN		0x1F	/* Threshold length */
196 /* Value of EP Buffer Size */
197 #define UDC_EP0IN_BUFF_SIZE	16
198 #define UDC_EPIN_BUFF_SIZE	256
199 #define UDC_EP0OUT_BUFF_SIZE	16
200 #define UDC_EPOUT_BUFF_SIZE	256
201 /* Value of EP maximum packet size */
202 #define UDC_EP0IN_MAX_PKT_SIZE	64
203 #define UDC_EP0OUT_MAX_PKT_SIZE	64
204 #define UDC_BULK_MAX_PKT_SIZE	512
205 
206 /* DMA */
207 #define DMA_DIR_RX		1	/* DMA for data receive */
208 #define DMA_DIR_TX		2	/* DMA for data transmit */
209 #define DMA_ADDR_INVALID	(~(dma_addr_t)0)
210 #define UDC_DMA_MAXPACKET	65536	/* maximum packet size for DMA */
211 
212 /**
213  * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
214  *				  for data
215  * @status:		Status quadlet
216  * @reserved:		Reserved
217  * @dataptr:		Buffer descriptor
218  * @next:		Next descriptor
219  */
220 struct pch_udc_data_dma_desc {
221 	u32 status;
222 	u32 reserved;
223 	u32 dataptr;
224 	u32 next;
225 };
226 
227 /**
228  * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
229  *				 for control data
230  * @status:	Status
231  * @reserved:	Reserved
232  * @data12:	First setup word
233  * @data34:	Second setup word
234  */
235 struct pch_udc_stp_dma_desc {
236 	u32 status;
237 	u32 reserved;
238 	struct usb_ctrlrequest request;
239 } __attribute((packed));
240 
241 /* DMA status definitions */
242 /* Buffer status */
243 #define PCH_UDC_BUFF_STS	0xC0000000
244 #define PCH_UDC_BS_HST_RDY	0x00000000
245 #define PCH_UDC_BS_DMA_BSY	0x40000000
246 #define PCH_UDC_BS_DMA_DONE	0x80000000
247 #define PCH_UDC_BS_HST_BSY	0xC0000000
248 /*  Rx/Tx Status */
249 #define PCH_UDC_RXTX_STS	0x30000000
250 #define PCH_UDC_RTS_SUCC	0x00000000
251 #define PCH_UDC_RTS_DESERR	0x10000000
252 #define PCH_UDC_RTS_BUFERR	0x30000000
253 /* Last Descriptor Indication */
254 #define PCH_UDC_DMA_LAST	0x08000000
255 /* Number of Rx/Tx Bytes Mask */
256 #define PCH_UDC_RXTX_BYTES	0x0000ffff
257 
258 /**
259  * struct pch_udc_cfg_data - Structure to hold current configuration
260  *			     and interface information
261  * @cur_cfg:	current configuration in use
262  * @cur_intf:	current interface in use
263  * @cur_alt:	current alt interface in use
264  */
265 struct pch_udc_cfg_data {
266 	u16 cur_cfg;
267 	u16 cur_intf;
268 	u16 cur_alt;
269 };
270 
271 /**
272  * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
273  * @ep:			embedded ep request
274  * @td_stp_phys:	for setup request
275  * @td_data_phys:	for data request
276  * @td_stp:		for setup request
277  * @td_data:		for data request
278  * @dev:		reference to device struct
279  * @offset_addr:	offset address of ep register
280  * @desc:		for this ep
281  * @queue:		queue for requests
282  * @num:		endpoint number
283  * @in:			endpoint is IN
284  * @halted:		endpoint halted?
285  * @epsts:		Endpoint status
286  */
287 struct pch_udc_ep {
288 	struct usb_ep			ep;
289 	dma_addr_t			td_stp_phys;
290 	dma_addr_t			td_data_phys;
291 	struct pch_udc_stp_dma_desc	*td_stp;
292 	struct pch_udc_data_dma_desc	*td_data;
293 	struct pch_udc_dev		*dev;
294 	unsigned long			offset_addr;
295 	struct list_head		queue;
296 	unsigned			num:5,
297 					in:1,
298 					halted:1;
299 	unsigned long			epsts;
300 };
301 
302 /**
303  * struct pch_vbus_gpio_data - Structure holding GPIO informaton
304  *					for detecting VBUS
305  * @port:		gpio port number
306  * @intr:		gpio interrupt number
307  * @irq_work_fall	Structure for WorkQueue
308  * @irq_work_rise	Structure for WorkQueue
309  */
310 struct pch_vbus_gpio_data {
311 	int			port;
312 	int			intr;
313 	struct work_struct	irq_work_fall;
314 	struct work_struct	irq_work_rise;
315 };
316 
317 /**
318  * struct pch_udc_dev - Structure holding complete information
319  *			of the PCH USB device
320  * @gadget:		gadget driver data
321  * @driver:		reference to gadget driver bound
322  * @pdev:		reference to the PCI device
323  * @ep:			array of endpoints
324  * @lock:		protects all state
325  * @stall:		stall requested
326  * @prot_stall:		protcol stall requested
327  * @registered:		driver registered with system
328  * @suspended:		driver in suspended state
329  * @connected:		gadget driver associated
330  * @vbus_session:	required vbus_session state
331  * @set_cfg_not_acked:	pending acknowledgement 4 setup
332  * @waiting_zlp_ack:	pending acknowledgement 4 ZLP
333  * @data_requests:	DMA pool for data requests
334  * @stp_requests:	DMA pool for setup requests
335  * @dma_addr:		DMA pool for received
336  * @setup_data:		Received setup data
337  * @base_addr:		for mapped device memory
338  * @cfg_data:		current cfg, intf, and alt in use
339  * @vbus_gpio:		GPIO informaton for detecting VBUS
340  */
341 struct pch_udc_dev {
342 	struct usb_gadget		gadget;
343 	struct usb_gadget_driver	*driver;
344 	struct pci_dev			*pdev;
345 	struct pch_udc_ep		ep[PCH_UDC_EP_NUM];
346 	spinlock_t			lock; /* protects all state */
347 	unsigned
348 			stall:1,
349 			prot_stall:1,
350 			suspended:1,
351 			connected:1,
352 			vbus_session:1,
353 			set_cfg_not_acked:1,
354 			waiting_zlp_ack:1;
355 	struct dma_pool		*data_requests;
356 	struct dma_pool		*stp_requests;
357 	dma_addr_t			dma_addr;
358 	struct usb_ctrlrequest		setup_data;
359 	void __iomem			*base_addr;
360 	struct pch_udc_cfg_data		cfg_data;
361 	struct pch_vbus_gpio_data	vbus_gpio;
362 };
363 #define to_pch_udc(g)	(container_of((g), struct pch_udc_dev, gadget))
364 
365 #define PCH_UDC_PCI_BAR_QUARK_X1000	0
366 #define PCH_UDC_PCI_BAR			1
367 
368 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC	0x0939
369 #define PCI_DEVICE_ID_INTEL_EG20T_UDC		0x8808
370 
371 #define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
372 #define PCI_DEVICE_ID_ML7831_IOH_UDC	0x8808
373 
374 static const char	ep0_string[] = "ep0in";
375 static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
376 static bool speed_fs;
377 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
378 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
379 
380 /**
381  * struct pch_udc_request - Structure holding a PCH USB device request packet
382  * @req:		embedded ep request
383  * @td_data_phys:	phys. address
384  * @td_data:		first dma desc. of chain
385  * @td_data_last:	last dma desc. of chain
386  * @queue:		associated queue
387  * @dma_going:		DMA in progress for request
388  * @dma_mapped:		DMA memory mapped for request
389  * @dma_done:		DMA completed for request
390  * @chain_len:		chain length
391  * @buf:		Buffer memory for align adjustment
392  * @dma:		DMA memory for align adjustment
393  */
394 struct pch_udc_request {
395 	struct usb_request		req;
396 	dma_addr_t			td_data_phys;
397 	struct pch_udc_data_dma_desc	*td_data;
398 	struct pch_udc_data_dma_desc	*td_data_last;
399 	struct list_head		queue;
400 	unsigned			dma_going:1,
401 					dma_mapped:1,
402 					dma_done:1;
403 	unsigned			chain_len;
404 	void				*buf;
405 	dma_addr_t			dma;
406 };
407 
408 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
409 {
410 	return ioread32(dev->base_addr + reg);
411 }
412 
413 static inline void pch_udc_writel(struct pch_udc_dev *dev,
414 				    unsigned long val, unsigned long reg)
415 {
416 	iowrite32(val, dev->base_addr + reg);
417 }
418 
419 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
420 				     unsigned long reg,
421 				     unsigned long bitmask)
422 {
423 	pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
424 }
425 
426 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
427 				     unsigned long reg,
428 				     unsigned long bitmask)
429 {
430 	pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
431 }
432 
433 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
434 {
435 	return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
436 }
437 
438 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
439 				    unsigned long val, unsigned long reg)
440 {
441 	iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
442 }
443 
444 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
445 				     unsigned long reg,
446 				     unsigned long bitmask)
447 {
448 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
449 }
450 
451 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
452 				     unsigned long reg,
453 				     unsigned long bitmask)
454 {
455 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
456 }
457 
458 /**
459  * pch_udc_csr_busy() - Wait till idle.
460  * @dev:	Reference to pch_udc_dev structure
461  */
462 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
463 {
464 	unsigned int count = 200;
465 
466 	/* Wait till idle */
467 	while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
468 		&& --count)
469 		cpu_relax();
470 	if (!count)
471 		dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
472 }
473 
474 /**
475  * pch_udc_write_csr() - Write the command and status registers.
476  * @dev:	Reference to pch_udc_dev structure
477  * @val:	value to be written to CSR register
478  * @addr:	address of CSR register
479  */
480 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
481 			       unsigned int ep)
482 {
483 	unsigned long reg = PCH_UDC_CSR(ep);
484 
485 	pch_udc_csr_busy(dev);		/* Wait till idle */
486 	pch_udc_writel(dev, val, reg);
487 	pch_udc_csr_busy(dev);		/* Wait till idle */
488 }
489 
490 /**
491  * pch_udc_read_csr() - Read the command and status registers.
492  * @dev:	Reference to pch_udc_dev structure
493  * @addr:	address of CSR register
494  *
495  * Return codes:	content of CSR register
496  */
497 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
498 {
499 	unsigned long reg = PCH_UDC_CSR(ep);
500 
501 	pch_udc_csr_busy(dev);		/* Wait till idle */
502 	pch_udc_readl(dev, reg);	/* Dummy read */
503 	pch_udc_csr_busy(dev);		/* Wait till idle */
504 	return pch_udc_readl(dev, reg);
505 }
506 
507 /**
508  * pch_udc_rmt_wakeup() - Initiate for remote wakeup
509  * @dev:	Reference to pch_udc_dev structure
510  */
511 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
512 {
513 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
514 	mdelay(1);
515 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
516 }
517 
518 /**
519  * pch_udc_get_frame() - Get the current frame from device status register
520  * @dev:	Reference to pch_udc_dev structure
521  * Retern	current frame
522  */
523 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
524 {
525 	u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
526 	return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
527 }
528 
529 /**
530  * pch_udc_clear_selfpowered() - Clear the self power control
531  * @dev:	Reference to pch_udc_regs structure
532  */
533 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
534 {
535 	pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
536 }
537 
538 /**
539  * pch_udc_set_selfpowered() - Set the self power control
540  * @dev:	Reference to pch_udc_regs structure
541  */
542 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
543 {
544 	pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
545 }
546 
547 /**
548  * pch_udc_set_disconnect() - Set the disconnect status.
549  * @dev:	Reference to pch_udc_regs structure
550  */
551 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
552 {
553 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
554 }
555 
556 /**
557  * pch_udc_clear_disconnect() - Clear the disconnect status.
558  * @dev:	Reference to pch_udc_regs structure
559  */
560 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
561 {
562 	/* Clear the disconnect */
563 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
564 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
565 	mdelay(1);
566 	/* Resume USB signalling */
567 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
568 }
569 
570 /**
571  * pch_udc_reconnect() - This API initializes usb device controller,
572  *						and clear the disconnect status.
573  * @dev:		Reference to pch_udc_regs structure
574  */
575 static void pch_udc_init(struct pch_udc_dev *dev);
576 static void pch_udc_reconnect(struct pch_udc_dev *dev)
577 {
578 	pch_udc_init(dev);
579 
580 	/* enable device interrupts */
581 	/* pch_udc_enable_interrupts() */
582 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
583 			UDC_DEVINT_UR | UDC_DEVINT_ENUM);
584 
585 	/* Clear the disconnect */
586 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
587 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
588 	mdelay(1);
589 	/* Resume USB signalling */
590 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
591 }
592 
593 /**
594  * pch_udc_vbus_session() - set or clearr the disconnect status.
595  * @dev:	Reference to pch_udc_regs structure
596  * @is_active:	Parameter specifying the action
597  *		  0:   indicating VBUS power is ending
598  *		  !0:  indicating VBUS power is starting
599  */
600 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
601 					  int is_active)
602 {
603 	if (is_active) {
604 		pch_udc_reconnect(dev);
605 		dev->vbus_session = 1;
606 	} else {
607 		if (dev->driver && dev->driver->disconnect) {
608 			spin_lock(&dev->lock);
609 			dev->driver->disconnect(&dev->gadget);
610 			spin_unlock(&dev->lock);
611 		}
612 		pch_udc_set_disconnect(dev);
613 		dev->vbus_session = 0;
614 	}
615 }
616 
617 /**
618  * pch_udc_ep_set_stall() - Set the stall of endpoint
619  * @ep:		Reference to structure of type pch_udc_ep_regs
620  */
621 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
622 {
623 	if (ep->in) {
624 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
625 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
626 	} else {
627 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
628 	}
629 }
630 
631 /**
632  * pch_udc_ep_clear_stall() - Clear the stall of endpoint
633  * @ep:		Reference to structure of type pch_udc_ep_regs
634  */
635 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
636 {
637 	/* Clear the stall */
638 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
639 	/* Clear NAK by writing CNAK */
640 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
641 }
642 
643 /**
644  * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
645  * @ep:		Reference to structure of type pch_udc_ep_regs
646  * @type:	Type of endpoint
647  */
648 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
649 					u8 type)
650 {
651 	pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
652 				UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
653 }
654 
655 /**
656  * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
657  * @ep:		Reference to structure of type pch_udc_ep_regs
658  * @buf_size:	The buffer word size
659  */
660 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
661 						 u32 buf_size, u32 ep_in)
662 {
663 	u32 data;
664 	if (ep_in) {
665 		data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
666 		data = (data & 0xffff0000) | (buf_size & 0xffff);
667 		pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
668 	} else {
669 		data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
670 		data = (buf_size << 16) | (data & 0xffff);
671 		pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
672 	}
673 }
674 
675 /**
676  * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
677  * @ep:		Reference to structure of type pch_udc_ep_regs
678  * @pkt_size:	The packet byte size
679  */
680 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
681 {
682 	u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
683 	data = (data & 0xffff0000) | (pkt_size & 0xffff);
684 	pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
685 }
686 
687 /**
688  * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
689  * @ep:		Reference to structure of type pch_udc_ep_regs
690  * @addr:	Address of the register
691  */
692 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
693 {
694 	pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
695 }
696 
697 /**
698  * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
699  * @ep:		Reference to structure of type pch_udc_ep_regs
700  * @addr:	Address of the register
701  */
702 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
703 {
704 	pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
705 }
706 
707 /**
708  * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
709  * @ep:		Reference to structure of type pch_udc_ep_regs
710  */
711 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
712 {
713 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
714 }
715 
716 /**
717  * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
718  * @ep:		Reference to structure of type pch_udc_ep_regs
719  */
720 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
721 {
722 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
723 }
724 
725 /**
726  * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
727  * @ep:		Reference to structure of type pch_udc_ep_regs
728  */
729 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
730 {
731 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
732 }
733 
734 /**
735  * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
736  *			register depending on the direction specified
737  * @dev:	Reference to structure of type pch_udc_regs
738  * @dir:	whether Tx or Rx
739  *		  DMA_DIR_RX: Receive
740  *		  DMA_DIR_TX: Transmit
741  */
742 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
743 {
744 	if (dir == DMA_DIR_RX)
745 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
746 	else if (dir == DMA_DIR_TX)
747 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
748 }
749 
750 /**
751  * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
752  *				 register depending on the direction specified
753  * @dev:	Reference to structure of type pch_udc_regs
754  * @dir:	Whether Tx or Rx
755  *		  DMA_DIR_RX: Receive
756  *		  DMA_DIR_TX: Transmit
757  */
758 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
759 {
760 	if (dir == DMA_DIR_RX)
761 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
762 	else if (dir == DMA_DIR_TX)
763 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
764 }
765 
766 /**
767  * pch_udc_set_csr_done() - Set the device control register
768  *				CSR done field (bit 13)
769  * @dev:	reference to structure of type pch_udc_regs
770  */
771 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
772 {
773 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
774 }
775 
776 /**
777  * pch_udc_disable_interrupts() - Disables the specified interrupts
778  * @dev:	Reference to structure of type pch_udc_regs
779  * @mask:	Mask to disable interrupts
780  */
781 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
782 					    u32 mask)
783 {
784 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
785 }
786 
787 /**
788  * pch_udc_enable_interrupts() - Enable the specified interrupts
789  * @dev:	Reference to structure of type pch_udc_regs
790  * @mask:	Mask to enable interrupts
791  */
792 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
793 					   u32 mask)
794 {
795 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
796 }
797 
798 /**
799  * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
800  * @dev:	Reference to structure of type pch_udc_regs
801  * @mask:	Mask to disable interrupts
802  */
803 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
804 						u32 mask)
805 {
806 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
807 }
808 
809 /**
810  * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
811  * @dev:	Reference to structure of type pch_udc_regs
812  * @mask:	Mask to enable interrupts
813  */
814 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
815 					      u32 mask)
816 {
817 	pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
818 }
819 
820 /**
821  * pch_udc_read_device_interrupts() - Read the device interrupts
822  * @dev:	Reference to structure of type pch_udc_regs
823  * Retern	The device interrupts
824  */
825 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
826 {
827 	return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
828 }
829 
830 /**
831  * pch_udc_write_device_interrupts() - Write device interrupts
832  * @dev:	Reference to structure of type pch_udc_regs
833  * @val:	The value to be written to interrupt register
834  */
835 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
836 						     u32 val)
837 {
838 	pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
839 }
840 
841 /**
842  * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
843  * @dev:	Reference to structure of type pch_udc_regs
844  * Retern	The endpoint interrupt
845  */
846 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
847 {
848 	return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
849 }
850 
851 /**
852  * pch_udc_write_ep_interrupts() - Clear endpoint interupts
853  * @dev:	Reference to structure of type pch_udc_regs
854  * @val:	The value to be written to interrupt register
855  */
856 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
857 					     u32 val)
858 {
859 	pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
860 }
861 
862 /**
863  * pch_udc_read_device_status() - Read the device status
864  * @dev:	Reference to structure of type pch_udc_regs
865  * Retern	The device status
866  */
867 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
868 {
869 	return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
870 }
871 
872 /**
873  * pch_udc_read_ep_control() - Read the endpoint control
874  * @ep:		Reference to structure of type pch_udc_ep_regs
875  * Retern	The endpoint control register value
876  */
877 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
878 {
879 	return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
880 }
881 
882 /**
883  * pch_udc_clear_ep_control() - Clear the endpoint control register
884  * @ep:		Reference to structure of type pch_udc_ep_regs
885  * Retern	The endpoint control register value
886  */
887 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
888 {
889 	return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
890 }
891 
892 /**
893  * pch_udc_read_ep_status() - Read the endpoint status
894  * @ep:		Reference to structure of type pch_udc_ep_regs
895  * Retern	The endpoint status
896  */
897 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
898 {
899 	return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
900 }
901 
902 /**
903  * pch_udc_clear_ep_status() - Clear the endpoint status
904  * @ep:		Reference to structure of type pch_udc_ep_regs
905  * @stat:	Endpoint status
906  */
907 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
908 					 u32 stat)
909 {
910 	return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
911 }
912 
913 /**
914  * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
915  *				of the endpoint control register
916  * @ep:		Reference to structure of type pch_udc_ep_regs
917  */
918 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
919 {
920 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
921 }
922 
923 /**
924  * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
925  *				of the endpoint control register
926  * @ep:		reference to structure of type pch_udc_ep_regs
927  */
928 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
929 {
930 	unsigned int loopcnt = 0;
931 	struct pch_udc_dev *dev = ep->dev;
932 
933 	if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
934 		return;
935 	if (!ep->in) {
936 		loopcnt = 10000;
937 		while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
938 			--loopcnt)
939 			udelay(5);
940 		if (!loopcnt)
941 			dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
942 				__func__);
943 	}
944 	loopcnt = 10000;
945 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
946 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
947 		udelay(5);
948 	}
949 	if (!loopcnt)
950 		dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
951 			__func__, ep->num, (ep->in ? "in" : "out"));
952 }
953 
954 /**
955  * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
956  * @ep:	reference to structure of type pch_udc_ep_regs
957  * @dir:	direction of endpoint
958  *		  0:  endpoint is OUT
959  *		  !0: endpoint is IN
960  */
961 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
962 {
963 	if (dir) {	/* IN ep */
964 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
965 		return;
966 	}
967 }
968 
969 /**
970  * pch_udc_ep_enable() - This api enables endpoint
971  * @regs:	Reference to structure pch_udc_ep_regs
972  * @desc:	endpoint descriptor
973  */
974 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
975 			       struct pch_udc_cfg_data *cfg,
976 			       const struct usb_endpoint_descriptor *desc)
977 {
978 	u32 val = 0;
979 	u32 buff_size = 0;
980 
981 	pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
982 	if (ep->in)
983 		buff_size = UDC_EPIN_BUFF_SIZE;
984 	else
985 		buff_size = UDC_EPOUT_BUFF_SIZE;
986 	pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
987 	pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
988 	pch_udc_ep_set_nak(ep);
989 	pch_udc_ep_fifo_flush(ep, ep->in);
990 	/* Configure the endpoint */
991 	val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
992 	      ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
993 		UDC_CSR_NE_TYPE_SHIFT) |
994 	      (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
995 	      (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
996 	      (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
997 	      usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
998 
999 	if (ep->in)
1000 		pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1001 	else
1002 		pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1003 }
1004 
1005 /**
1006  * pch_udc_ep_disable() - This api disables endpoint
1007  * @regs:	Reference to structure pch_udc_ep_regs
1008  */
1009 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1010 {
1011 	if (ep->in) {
1012 		/* flush the fifo */
1013 		pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1014 		/* set NAK */
1015 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1016 		pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1017 	} else {
1018 		/* set NAK */
1019 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1020 	}
1021 	/* reset desc pointer */
1022 	pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1023 }
1024 
1025 /**
1026  * pch_udc_wait_ep_stall() - Wait EP stall.
1027  * @dev:	Reference to pch_udc_dev structure
1028  */
1029 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1030 {
1031 	unsigned int count = 10000;
1032 
1033 	/* Wait till idle */
1034 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1035 		udelay(5);
1036 	if (!count)
1037 		dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1038 }
1039 
1040 /**
1041  * pch_udc_init() - This API initializes usb device controller
1042  * @dev:	Rreference to pch_udc_regs structure
1043  */
1044 static void pch_udc_init(struct pch_udc_dev *dev)
1045 {
1046 	if (NULL == dev) {
1047 		pr_err("%s: Invalid address\n", __func__);
1048 		return;
1049 	}
1050 	/* Soft Reset and Reset PHY */
1051 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1052 	pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1053 	mdelay(1);
1054 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1055 	pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1056 	mdelay(1);
1057 	/* mask and clear all device interrupts */
1058 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1059 	pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1060 
1061 	/* mask and clear all ep interrupts */
1062 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1063 	pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1064 
1065 	/* enable dynamic CSR programmingi, self powered and device speed */
1066 	if (speed_fs)
1067 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1068 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1069 	else /* defaul high speed */
1070 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1071 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1072 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1073 			(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1074 			(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1075 			UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1076 			UDC_DEVCTL_THE);
1077 }
1078 
1079 /**
1080  * pch_udc_exit() - This API exit usb device controller
1081  * @dev:	Reference to pch_udc_regs structure
1082  */
1083 static void pch_udc_exit(struct pch_udc_dev *dev)
1084 {
1085 	/* mask all device interrupts */
1086 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1087 	/* mask all ep interrupts */
1088 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1089 	/* put device in disconnected state */
1090 	pch_udc_set_disconnect(dev);
1091 }
1092 
1093 /**
1094  * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1095  * @gadget:	Reference to the gadget driver
1096  *
1097  * Return codes:
1098  *	0:		Success
1099  *	-EINVAL:	If the gadget passed is NULL
1100  */
1101 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1102 {
1103 	struct pch_udc_dev	*dev;
1104 
1105 	if (!gadget)
1106 		return -EINVAL;
1107 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1108 	return pch_udc_get_frame(dev);
1109 }
1110 
1111 /**
1112  * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1113  * @gadget:	Reference to the gadget driver
1114  *
1115  * Return codes:
1116  *	0:		Success
1117  *	-EINVAL:	If the gadget passed is NULL
1118  */
1119 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1120 {
1121 	struct pch_udc_dev	*dev;
1122 	unsigned long		flags;
1123 
1124 	if (!gadget)
1125 		return -EINVAL;
1126 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1127 	spin_lock_irqsave(&dev->lock, flags);
1128 	pch_udc_rmt_wakeup(dev);
1129 	spin_unlock_irqrestore(&dev->lock, flags);
1130 	return 0;
1131 }
1132 
1133 /**
1134  * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1135  *				is self powered or not
1136  * @gadget:	Reference to the gadget driver
1137  * @value:	Specifies self powered or not
1138  *
1139  * Return codes:
1140  *	0:		Success
1141  *	-EINVAL:	If the gadget passed is NULL
1142  */
1143 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1144 {
1145 	struct pch_udc_dev	*dev;
1146 
1147 	if (!gadget)
1148 		return -EINVAL;
1149 	gadget->is_selfpowered = (value != 0);
1150 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1151 	if (value)
1152 		pch_udc_set_selfpowered(dev);
1153 	else
1154 		pch_udc_clear_selfpowered(dev);
1155 	return 0;
1156 }
1157 
1158 /**
1159  * pch_udc_pcd_pullup() - This API is invoked to make the device
1160  *				visible/invisible to the host
1161  * @gadget:	Reference to the gadget driver
1162  * @is_on:	Specifies whether the pull up is made active or inactive
1163  *
1164  * Return codes:
1165  *	0:		Success
1166  *	-EINVAL:	If the gadget passed is NULL
1167  */
1168 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1169 {
1170 	struct pch_udc_dev	*dev;
1171 
1172 	if (!gadget)
1173 		return -EINVAL;
1174 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1175 	if (is_on) {
1176 		pch_udc_reconnect(dev);
1177 	} else {
1178 		if (dev->driver && dev->driver->disconnect) {
1179 			spin_lock(&dev->lock);
1180 			dev->driver->disconnect(&dev->gadget);
1181 			spin_unlock(&dev->lock);
1182 		}
1183 		pch_udc_set_disconnect(dev);
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 /**
1190  * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1191  *				transceiver (or GPIO) that
1192  *				detects a VBUS power session starting/ending
1193  * @gadget:	Reference to the gadget driver
1194  * @is_active:	specifies whether the session is starting or ending
1195  *
1196  * Return codes:
1197  *	0:		Success
1198  *	-EINVAL:	If the gadget passed is NULL
1199  */
1200 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1201 {
1202 	struct pch_udc_dev	*dev;
1203 
1204 	if (!gadget)
1205 		return -EINVAL;
1206 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1207 	pch_udc_vbus_session(dev, is_active);
1208 	return 0;
1209 }
1210 
1211 /**
1212  * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1213  *				SET_CONFIGURATION calls to
1214  *				specify how much power the device can consume
1215  * @gadget:	Reference to the gadget driver
1216  * @mA:		specifies the current limit in 2mA unit
1217  *
1218  * Return codes:
1219  *	-EINVAL:	If the gadget passed is NULL
1220  *	-EOPNOTSUPP:
1221  */
1222 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1223 {
1224 	return -EOPNOTSUPP;
1225 }
1226 
1227 static int pch_udc_start(struct usb_gadget *g,
1228 		struct usb_gadget_driver *driver);
1229 static int pch_udc_stop(struct usb_gadget *g);
1230 
1231 static const struct usb_gadget_ops pch_udc_ops = {
1232 	.get_frame = pch_udc_pcd_get_frame,
1233 	.wakeup = pch_udc_pcd_wakeup,
1234 	.set_selfpowered = pch_udc_pcd_selfpowered,
1235 	.pullup = pch_udc_pcd_pullup,
1236 	.vbus_session = pch_udc_pcd_vbus_session,
1237 	.vbus_draw = pch_udc_pcd_vbus_draw,
1238 	.udc_start = pch_udc_start,
1239 	.udc_stop = pch_udc_stop,
1240 };
1241 
1242 /**
1243  * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1244  * @dev:	Reference to the driver structure
1245  *
1246  * Return value:
1247  *	1: VBUS is high
1248  *	0: VBUS is low
1249  *     -1: It is not enable to detect VBUS using GPIO
1250  */
1251 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1252 {
1253 	int vbus = 0;
1254 
1255 	if (dev->vbus_gpio.port)
1256 		vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1257 	else
1258 		vbus = -1;
1259 
1260 	return vbus;
1261 }
1262 
1263 /**
1264  * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1265  *                             If VBUS is Low, disconnect is processed
1266  * @irq_work:	Structure for WorkQueue
1267  *
1268  */
1269 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1270 {
1271 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1272 		struct pch_vbus_gpio_data, irq_work_fall);
1273 	struct pch_udc_dev *dev =
1274 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1275 	int vbus_saved = -1;
1276 	int vbus;
1277 	int count;
1278 
1279 	if (!dev->vbus_gpio.port)
1280 		return;
1281 
1282 	for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1283 		count++) {
1284 		vbus = pch_vbus_gpio_get_value(dev);
1285 
1286 		if ((vbus_saved == vbus) && (vbus == 0)) {
1287 			dev_dbg(&dev->pdev->dev, "VBUS fell");
1288 			if (dev->driver
1289 				&& dev->driver->disconnect) {
1290 				dev->driver->disconnect(
1291 					&dev->gadget);
1292 			}
1293 			if (dev->vbus_gpio.intr)
1294 				pch_udc_init(dev);
1295 			else
1296 				pch_udc_reconnect(dev);
1297 			return;
1298 		}
1299 		vbus_saved = vbus;
1300 		mdelay(PCH_VBUS_INTERVAL);
1301 	}
1302 }
1303 
1304 /**
1305  * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1306  *                             If VBUS is High, connect is processed
1307  * @irq_work:	Structure for WorkQueue
1308  *
1309  */
1310 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1311 {
1312 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1313 		struct pch_vbus_gpio_data, irq_work_rise);
1314 	struct pch_udc_dev *dev =
1315 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1316 	int vbus;
1317 
1318 	if (!dev->vbus_gpio.port)
1319 		return;
1320 
1321 	mdelay(PCH_VBUS_INTERVAL);
1322 	vbus = pch_vbus_gpio_get_value(dev);
1323 
1324 	if (vbus == 1) {
1325 		dev_dbg(&dev->pdev->dev, "VBUS rose");
1326 		pch_udc_reconnect(dev);
1327 		return;
1328 	}
1329 }
1330 
1331 /**
1332  * pch_vbus_gpio_irq() - IRQ handler for GPIO interrupt for changing VBUS
1333  * @irq:	Interrupt request number
1334  * @dev:	Reference to the device structure
1335  *
1336  * Return codes:
1337  *	0: Success
1338  *	-EINVAL: GPIO port is invalid or can't be initialized.
1339  */
1340 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1341 {
1342 	struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1343 
1344 	if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1345 		return IRQ_NONE;
1346 
1347 	if (pch_vbus_gpio_get_value(dev))
1348 		schedule_work(&dev->vbus_gpio.irq_work_rise);
1349 	else
1350 		schedule_work(&dev->vbus_gpio.irq_work_fall);
1351 
1352 	return IRQ_HANDLED;
1353 }
1354 
1355 /**
1356  * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1357  * @dev:	Reference to the driver structure
1358  * @vbus_gpio	Number of GPIO port to detect gpio
1359  *
1360  * Return codes:
1361  *	0: Success
1362  *	-EINVAL: GPIO port is invalid or can't be initialized.
1363  */
1364 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1365 {
1366 	int err;
1367 	int irq_num = 0;
1368 
1369 	dev->vbus_gpio.port = 0;
1370 	dev->vbus_gpio.intr = 0;
1371 
1372 	if (vbus_gpio_port <= -1)
1373 		return -EINVAL;
1374 
1375 	err = gpio_is_valid(vbus_gpio_port);
1376 	if (!err) {
1377 		pr_err("%s: gpio port %d is invalid\n",
1378 			__func__, vbus_gpio_port);
1379 		return -EINVAL;
1380 	}
1381 
1382 	err = gpio_request(vbus_gpio_port, "pch_vbus");
1383 	if (err) {
1384 		pr_err("%s: can't request gpio port %d, err: %d\n",
1385 			__func__, vbus_gpio_port, err);
1386 		return -EINVAL;
1387 	}
1388 
1389 	dev->vbus_gpio.port = vbus_gpio_port;
1390 	gpio_direction_input(vbus_gpio_port);
1391 	INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1392 
1393 	irq_num = gpio_to_irq(vbus_gpio_port);
1394 	if (irq_num > 0) {
1395 		irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1396 		err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1397 			"vbus_detect", dev);
1398 		if (!err) {
1399 			dev->vbus_gpio.intr = irq_num;
1400 			INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1401 				pch_vbus_gpio_work_rise);
1402 		} else {
1403 			pr_err("%s: can't request irq %d, err: %d\n",
1404 				__func__, irq_num, err);
1405 		}
1406 	}
1407 
1408 	return 0;
1409 }
1410 
1411 /**
1412  * pch_vbus_gpio_free() - This API frees resources of GPIO port
1413  * @dev:	Reference to the driver structure
1414  */
1415 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1416 {
1417 	if (dev->vbus_gpio.intr)
1418 		free_irq(dev->vbus_gpio.intr, dev);
1419 
1420 	if (dev->vbus_gpio.port)
1421 		gpio_free(dev->vbus_gpio.port);
1422 }
1423 
1424 /**
1425  * complete_req() - This API is invoked from the driver when processing
1426  *			of a request is complete
1427  * @ep:		Reference to the endpoint structure
1428  * @req:	Reference to the request structure
1429  * @status:	Indicates the success/failure of completion
1430  */
1431 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1432 								 int status)
1433 	__releases(&dev->lock)
1434 	__acquires(&dev->lock)
1435 {
1436 	struct pch_udc_dev	*dev;
1437 	unsigned halted = ep->halted;
1438 
1439 	list_del_init(&req->queue);
1440 
1441 	/* set new status if pending */
1442 	if (req->req.status == -EINPROGRESS)
1443 		req->req.status = status;
1444 	else
1445 		status = req->req.status;
1446 
1447 	dev = ep->dev;
1448 	if (req->dma_mapped) {
1449 		if (req->dma == DMA_ADDR_INVALID) {
1450 			if (ep->in)
1451 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1452 						 req->req.length,
1453 						 DMA_TO_DEVICE);
1454 			else
1455 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1456 						 req->req.length,
1457 						 DMA_FROM_DEVICE);
1458 			req->req.dma = DMA_ADDR_INVALID;
1459 		} else {
1460 			if (ep->in)
1461 				dma_unmap_single(&dev->pdev->dev, req->dma,
1462 						 req->req.length,
1463 						 DMA_TO_DEVICE);
1464 			else {
1465 				dma_unmap_single(&dev->pdev->dev, req->dma,
1466 						 req->req.length,
1467 						 DMA_FROM_DEVICE);
1468 				memcpy(req->req.buf, req->buf, req->req.length);
1469 			}
1470 			kfree(req->buf);
1471 			req->dma = DMA_ADDR_INVALID;
1472 		}
1473 		req->dma_mapped = 0;
1474 	}
1475 	ep->halted = 1;
1476 	spin_unlock(&dev->lock);
1477 	if (!ep->in)
1478 		pch_udc_ep_clear_rrdy(ep);
1479 	usb_gadget_giveback_request(&ep->ep, &req->req);
1480 	spin_lock(&dev->lock);
1481 	ep->halted = halted;
1482 }
1483 
1484 /**
1485  * empty_req_queue() - This API empties the request queue of an endpoint
1486  * @ep:		Reference to the endpoint structure
1487  */
1488 static void empty_req_queue(struct pch_udc_ep *ep)
1489 {
1490 	struct pch_udc_request	*req;
1491 
1492 	ep->halted = 1;
1493 	while (!list_empty(&ep->queue)) {
1494 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1495 		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
1496 	}
1497 }
1498 
1499 /**
1500  * pch_udc_free_dma_chain() - This function frees the DMA chain created
1501  *				for the request
1502  * @dev		Reference to the driver structure
1503  * @req		Reference to the request to be freed
1504  *
1505  * Return codes:
1506  *	0: Success
1507  */
1508 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1509 				   struct pch_udc_request *req)
1510 {
1511 	struct pch_udc_data_dma_desc *td = req->td_data;
1512 	unsigned i = req->chain_len;
1513 
1514 	dma_addr_t addr2;
1515 	dma_addr_t addr = (dma_addr_t)td->next;
1516 	td->next = 0x00;
1517 	for (; i > 1; --i) {
1518 		/* do not free first desc., will be done by free for request */
1519 		td = phys_to_virt(addr);
1520 		addr2 = (dma_addr_t)td->next;
1521 		dma_pool_free(dev->data_requests, td, addr);
1522 		td->next = 0x00;
1523 		addr = addr2;
1524 	}
1525 	req->chain_len = 1;
1526 }
1527 
1528 /**
1529  * pch_udc_create_dma_chain() - This function creates or reinitializes
1530  *				a DMA chain
1531  * @ep:		Reference to the endpoint structure
1532  * @req:	Reference to the request
1533  * @buf_len:	The buffer length
1534  * @gfp_flags:	Flags to be used while mapping the data buffer
1535  *
1536  * Return codes:
1537  *	0:		success,
1538  *	-ENOMEM:	dma_pool_alloc invocation fails
1539  */
1540 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1541 				    struct pch_udc_request *req,
1542 				    unsigned long buf_len,
1543 				    gfp_t gfp_flags)
1544 {
1545 	struct pch_udc_data_dma_desc *td = req->td_data, *last;
1546 	unsigned long bytes = req->req.length, i = 0;
1547 	dma_addr_t dma_addr;
1548 	unsigned len = 1;
1549 
1550 	if (req->chain_len > 1)
1551 		pch_udc_free_dma_chain(ep->dev, req);
1552 
1553 	if (req->dma == DMA_ADDR_INVALID)
1554 		td->dataptr = req->req.dma;
1555 	else
1556 		td->dataptr = req->dma;
1557 
1558 	td->status = PCH_UDC_BS_HST_BSY;
1559 	for (; ; bytes -= buf_len, ++len) {
1560 		td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1561 		if (bytes <= buf_len)
1562 			break;
1563 		last = td;
1564 		td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
1565 				    &dma_addr);
1566 		if (!td)
1567 			goto nomem;
1568 		i += buf_len;
1569 		td->dataptr = req->td_data->dataptr + i;
1570 		last->next = dma_addr;
1571 	}
1572 
1573 	req->td_data_last = td;
1574 	td->status |= PCH_UDC_DMA_LAST;
1575 	td->next = req->td_data_phys;
1576 	req->chain_len = len;
1577 	return 0;
1578 
1579 nomem:
1580 	if (len > 1) {
1581 		req->chain_len = len;
1582 		pch_udc_free_dma_chain(ep->dev, req);
1583 	}
1584 	req->chain_len = 1;
1585 	return -ENOMEM;
1586 }
1587 
1588 /**
1589  * prepare_dma() - This function creates and initializes the DMA chain
1590  *			for the request
1591  * @ep:		Reference to the endpoint structure
1592  * @req:	Reference to the request
1593  * @gfp:	Flag to be used while mapping the data buffer
1594  *
1595  * Return codes:
1596  *	0:		Success
1597  *	Other 0:	linux error number on failure
1598  */
1599 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1600 			  gfp_t gfp)
1601 {
1602 	int	retval;
1603 
1604 	/* Allocate and create a DMA chain */
1605 	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1606 	if (retval) {
1607 		pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1608 		return retval;
1609 	}
1610 	if (ep->in)
1611 		req->td_data->status = (req->td_data->status &
1612 				~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1613 	return 0;
1614 }
1615 
1616 /**
1617  * process_zlp() - This function process zero length packets
1618  *			from the gadget driver
1619  * @ep:		Reference to the endpoint structure
1620  * @req:	Reference to the request
1621  */
1622 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1623 {
1624 	struct pch_udc_dev	*dev = ep->dev;
1625 
1626 	/* IN zlp's are handled by hardware */
1627 	complete_req(ep, req, 0);
1628 
1629 	/* if set_config or set_intf is waiting for ack by zlp
1630 	 * then set CSR_DONE
1631 	 */
1632 	if (dev->set_cfg_not_acked) {
1633 		pch_udc_set_csr_done(dev);
1634 		dev->set_cfg_not_acked = 0;
1635 	}
1636 	/* setup command is ACK'ed now by zlp */
1637 	if (!dev->stall && dev->waiting_zlp_ack) {
1638 		pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1639 		dev->waiting_zlp_ack = 0;
1640 	}
1641 }
1642 
1643 /**
1644  * pch_udc_start_rxrequest() - This function starts the receive requirement.
1645  * @ep:		Reference to the endpoint structure
1646  * @req:	Reference to the request structure
1647  */
1648 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1649 					 struct pch_udc_request *req)
1650 {
1651 	struct pch_udc_data_dma_desc *td_data;
1652 
1653 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1654 	td_data = req->td_data;
1655 	/* Set the status bits for all descriptors */
1656 	while (1) {
1657 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1658 				    PCH_UDC_BS_HST_RDY;
1659 		if ((td_data->status & PCH_UDC_DMA_LAST) ==  PCH_UDC_DMA_LAST)
1660 			break;
1661 		td_data = phys_to_virt(td_data->next);
1662 	}
1663 	/* Write the descriptor pointer */
1664 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1665 	req->dma_going = 1;
1666 	pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1667 	pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1668 	pch_udc_ep_clear_nak(ep);
1669 	pch_udc_ep_set_rrdy(ep);
1670 }
1671 
1672 /**
1673  * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1674  *				from gadget driver
1675  * @usbep:	Reference to the USB endpoint structure
1676  * @desc:	Reference to the USB endpoint descriptor structure
1677  *
1678  * Return codes:
1679  *	0:		Success
1680  *	-EINVAL:
1681  *	-ESHUTDOWN:
1682  */
1683 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1684 				    const struct usb_endpoint_descriptor *desc)
1685 {
1686 	struct pch_udc_ep	*ep;
1687 	struct pch_udc_dev	*dev;
1688 	unsigned long		iflags;
1689 
1690 	if (!usbep || (usbep->name == ep0_string) || !desc ||
1691 	    (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1692 		return -EINVAL;
1693 
1694 	ep = container_of(usbep, struct pch_udc_ep, ep);
1695 	dev = ep->dev;
1696 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1697 		return -ESHUTDOWN;
1698 	spin_lock_irqsave(&dev->lock, iflags);
1699 	ep->ep.desc = desc;
1700 	ep->halted = 0;
1701 	pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1702 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1703 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1704 	spin_unlock_irqrestore(&dev->lock, iflags);
1705 	return 0;
1706 }
1707 
1708 /**
1709  * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1710  *				from gadget driver
1711  * @usbep	Reference to the USB endpoint structure
1712  *
1713  * Return codes:
1714  *	0:		Success
1715  *	-EINVAL:
1716  */
1717 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1718 {
1719 	struct pch_udc_ep	*ep;
1720 	unsigned long	iflags;
1721 
1722 	if (!usbep)
1723 		return -EINVAL;
1724 
1725 	ep = container_of(usbep, struct pch_udc_ep, ep);
1726 	if ((usbep->name == ep0_string) || !ep->ep.desc)
1727 		return -EINVAL;
1728 
1729 	spin_lock_irqsave(&ep->dev->lock, iflags);
1730 	empty_req_queue(ep);
1731 	ep->halted = 1;
1732 	pch_udc_ep_disable(ep);
1733 	pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1734 	ep->ep.desc = NULL;
1735 	INIT_LIST_HEAD(&ep->queue);
1736 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1737 	return 0;
1738 }
1739 
1740 /**
1741  * pch_udc_alloc_request() - This function allocates request structure.
1742  *				It is called by gadget driver
1743  * @usbep:	Reference to the USB endpoint structure
1744  * @gfp:	Flag to be used while allocating memory
1745  *
1746  * Return codes:
1747  *	NULL:			Failure
1748  *	Allocated address:	Success
1749  */
1750 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1751 						  gfp_t gfp)
1752 {
1753 	struct pch_udc_request		*req;
1754 	struct pch_udc_ep		*ep;
1755 	struct pch_udc_data_dma_desc	*dma_desc;
1756 
1757 	if (!usbep)
1758 		return NULL;
1759 	ep = container_of(usbep, struct pch_udc_ep, ep);
1760 	req = kzalloc(sizeof *req, gfp);
1761 	if (!req)
1762 		return NULL;
1763 	req->req.dma = DMA_ADDR_INVALID;
1764 	req->dma = DMA_ADDR_INVALID;
1765 	INIT_LIST_HEAD(&req->queue);
1766 	if (!ep->dev->dma_addr)
1767 		return &req->req;
1768 	/* ep0 in requests are allocated from data pool here */
1769 	dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
1770 				  &req->td_data_phys);
1771 	if (NULL == dma_desc) {
1772 		kfree(req);
1773 		return NULL;
1774 	}
1775 	/* prevent from using desc. - set HOST BUSY */
1776 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
1777 	dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1778 	req->td_data = dma_desc;
1779 	req->td_data_last = dma_desc;
1780 	req->chain_len = 1;
1781 	return &req->req;
1782 }
1783 
1784 /**
1785  * pch_udc_free_request() - This function frees request structure.
1786  *				It is called by gadget driver
1787  * @usbep:	Reference to the USB endpoint structure
1788  * @usbreq:	Reference to the USB request
1789  */
1790 static void pch_udc_free_request(struct usb_ep *usbep,
1791 				  struct usb_request *usbreq)
1792 {
1793 	struct pch_udc_ep	*ep;
1794 	struct pch_udc_request	*req;
1795 	struct pch_udc_dev	*dev;
1796 
1797 	if (!usbep || !usbreq)
1798 		return;
1799 	ep = container_of(usbep, struct pch_udc_ep, ep);
1800 	req = container_of(usbreq, struct pch_udc_request, req);
1801 	dev = ep->dev;
1802 	if (!list_empty(&req->queue))
1803 		dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1804 			__func__, usbep->name, req);
1805 	if (req->td_data != NULL) {
1806 		if (req->chain_len > 1)
1807 			pch_udc_free_dma_chain(ep->dev, req);
1808 		dma_pool_free(ep->dev->data_requests, req->td_data,
1809 			      req->td_data_phys);
1810 	}
1811 	kfree(req);
1812 }
1813 
1814 /**
1815  * pch_udc_pcd_queue() - This function queues a request packet. It is called
1816  *			by gadget driver
1817  * @usbep:	Reference to the USB endpoint structure
1818  * @usbreq:	Reference to the USB request
1819  * @gfp:	Flag to be used while mapping the data buffer
1820  *
1821  * Return codes:
1822  *	0:			Success
1823  *	linux error number:	Failure
1824  */
1825 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1826 								 gfp_t gfp)
1827 {
1828 	int retval = 0;
1829 	struct pch_udc_ep	*ep;
1830 	struct pch_udc_dev	*dev;
1831 	struct pch_udc_request	*req;
1832 	unsigned long	iflags;
1833 
1834 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1835 		return -EINVAL;
1836 	ep = container_of(usbep, struct pch_udc_ep, ep);
1837 	dev = ep->dev;
1838 	if (!ep->ep.desc && ep->num)
1839 		return -EINVAL;
1840 	req = container_of(usbreq, struct pch_udc_request, req);
1841 	if (!list_empty(&req->queue))
1842 		return -EINVAL;
1843 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1844 		return -ESHUTDOWN;
1845 	spin_lock_irqsave(&dev->lock, iflags);
1846 	/* map the buffer for dma */
1847 	if (usbreq->length &&
1848 	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1849 		if (!((unsigned long)(usbreq->buf) & 0x03)) {
1850 			if (ep->in)
1851 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1852 							     usbreq->buf,
1853 							     usbreq->length,
1854 							     DMA_TO_DEVICE);
1855 			else
1856 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1857 							     usbreq->buf,
1858 							     usbreq->length,
1859 							     DMA_FROM_DEVICE);
1860 		} else {
1861 			req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1862 			if (!req->buf) {
1863 				retval = -ENOMEM;
1864 				goto probe_end;
1865 			}
1866 			if (ep->in) {
1867 				memcpy(req->buf, usbreq->buf, usbreq->length);
1868 				req->dma = dma_map_single(&dev->pdev->dev,
1869 							  req->buf,
1870 							  usbreq->length,
1871 							  DMA_TO_DEVICE);
1872 			} else
1873 				req->dma = dma_map_single(&dev->pdev->dev,
1874 							  req->buf,
1875 							  usbreq->length,
1876 							  DMA_FROM_DEVICE);
1877 		}
1878 		req->dma_mapped = 1;
1879 	}
1880 	if (usbreq->length > 0) {
1881 		retval = prepare_dma(ep, req, GFP_ATOMIC);
1882 		if (retval)
1883 			goto probe_end;
1884 	}
1885 	usbreq->actual = 0;
1886 	usbreq->status = -EINPROGRESS;
1887 	req->dma_done = 0;
1888 	if (list_empty(&ep->queue) && !ep->halted) {
1889 		/* no pending transfer, so start this req */
1890 		if (!usbreq->length) {
1891 			process_zlp(ep, req);
1892 			retval = 0;
1893 			goto probe_end;
1894 		}
1895 		if (!ep->in) {
1896 			pch_udc_start_rxrequest(ep, req);
1897 		} else {
1898 			/*
1899 			* For IN trfr the descriptors will be programmed and
1900 			* P bit will be set when
1901 			* we get an IN token
1902 			*/
1903 			pch_udc_wait_ep_stall(ep);
1904 			pch_udc_ep_clear_nak(ep);
1905 			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1906 		}
1907 	}
1908 	/* Now add this request to the ep's pending requests */
1909 	if (req != NULL)
1910 		list_add_tail(&req->queue, &ep->queue);
1911 
1912 probe_end:
1913 	spin_unlock_irqrestore(&dev->lock, iflags);
1914 	return retval;
1915 }
1916 
1917 /**
1918  * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1919  *				It is called by gadget driver
1920  * @usbep:	Reference to the USB endpoint structure
1921  * @usbreq:	Reference to the USB request
1922  *
1923  * Return codes:
1924  *	0:			Success
1925  *	linux error number:	Failure
1926  */
1927 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1928 				struct usb_request *usbreq)
1929 {
1930 	struct pch_udc_ep	*ep;
1931 	struct pch_udc_request	*req;
1932 	unsigned long		flags;
1933 	int ret = -EINVAL;
1934 
1935 	ep = container_of(usbep, struct pch_udc_ep, ep);
1936 	if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1937 		return ret;
1938 	req = container_of(usbreq, struct pch_udc_request, req);
1939 	spin_lock_irqsave(&ep->dev->lock, flags);
1940 	/* make sure it's still queued on this endpoint */
1941 	list_for_each_entry(req, &ep->queue, queue) {
1942 		if (&req->req == usbreq) {
1943 			pch_udc_ep_set_nak(ep);
1944 			if (!list_empty(&req->queue))
1945 				complete_req(ep, req, -ECONNRESET);
1946 			ret = 0;
1947 			break;
1948 		}
1949 	}
1950 	spin_unlock_irqrestore(&ep->dev->lock, flags);
1951 	return ret;
1952 }
1953 
1954 /**
1955  * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1956  *			    feature
1957  * @usbep:	Reference to the USB endpoint structure
1958  * @halt:	Specifies whether to set or clear the feature
1959  *
1960  * Return codes:
1961  *	0:			Success
1962  *	linux error number:	Failure
1963  */
1964 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1965 {
1966 	struct pch_udc_ep	*ep;
1967 	unsigned long iflags;
1968 	int ret;
1969 
1970 	if (!usbep)
1971 		return -EINVAL;
1972 	ep = container_of(usbep, struct pch_udc_ep, ep);
1973 	if (!ep->ep.desc && !ep->num)
1974 		return -EINVAL;
1975 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1976 		return -ESHUTDOWN;
1977 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1978 	if (list_empty(&ep->queue)) {
1979 		if (halt) {
1980 			if (ep->num == PCH_UDC_EP0)
1981 				ep->dev->stall = 1;
1982 			pch_udc_ep_set_stall(ep);
1983 			pch_udc_enable_ep_interrupts(
1984 				ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1985 		} else {
1986 			pch_udc_ep_clear_stall(ep);
1987 		}
1988 		ret = 0;
1989 	} else {
1990 		ret = -EAGAIN;
1991 	}
1992 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1993 	return ret;
1994 }
1995 
1996 /**
1997  * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
1998  *				halt feature
1999  * @usbep:	Reference to the USB endpoint structure
2000  * @halt:	Specifies whether to set or clear the feature
2001  *
2002  * Return codes:
2003  *	0:			Success
2004  *	linux error number:	Failure
2005  */
2006 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2007 {
2008 	struct pch_udc_ep	*ep;
2009 	unsigned long iflags;
2010 	int ret;
2011 
2012 	if (!usbep)
2013 		return -EINVAL;
2014 	ep = container_of(usbep, struct pch_udc_ep, ep);
2015 	if (!ep->ep.desc && !ep->num)
2016 		return -EINVAL;
2017 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2018 		return -ESHUTDOWN;
2019 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
2020 	if (!list_empty(&ep->queue)) {
2021 		ret = -EAGAIN;
2022 	} else {
2023 		if (ep->num == PCH_UDC_EP0)
2024 			ep->dev->stall = 1;
2025 		pch_udc_ep_set_stall(ep);
2026 		pch_udc_enable_ep_interrupts(ep->dev,
2027 					     PCH_UDC_EPINT(ep->in, ep->num));
2028 		ep->dev->prot_stall = 1;
2029 		ret = 0;
2030 	}
2031 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2032 	return ret;
2033 }
2034 
2035 /**
2036  * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2037  * @usbep:	Reference to the USB endpoint structure
2038  */
2039 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2040 {
2041 	struct pch_udc_ep  *ep;
2042 
2043 	if (!usbep)
2044 		return;
2045 
2046 	ep = container_of(usbep, struct pch_udc_ep, ep);
2047 	if (ep->ep.desc || !ep->num)
2048 		pch_udc_ep_fifo_flush(ep, ep->in);
2049 }
2050 
2051 static const struct usb_ep_ops pch_udc_ep_ops = {
2052 	.enable		= pch_udc_pcd_ep_enable,
2053 	.disable	= pch_udc_pcd_ep_disable,
2054 	.alloc_request	= pch_udc_alloc_request,
2055 	.free_request	= pch_udc_free_request,
2056 	.queue		= pch_udc_pcd_queue,
2057 	.dequeue	= pch_udc_pcd_dequeue,
2058 	.set_halt	= pch_udc_pcd_set_halt,
2059 	.set_wedge	= pch_udc_pcd_set_wedge,
2060 	.fifo_status	= NULL,
2061 	.fifo_flush	= pch_udc_pcd_fifo_flush,
2062 };
2063 
2064 /**
2065  * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2066  * @td_stp:	Reference to the SETP buffer structure
2067  */
2068 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2069 {
2070 	static u32	pky_marker;
2071 
2072 	if (!td_stp)
2073 		return;
2074 	td_stp->reserved = ++pky_marker;
2075 	memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2076 	td_stp->status = PCH_UDC_BS_HST_RDY;
2077 }
2078 
2079 /**
2080  * pch_udc_start_next_txrequest() - This function starts
2081  *					the next transmission requirement
2082  * @ep:	Reference to the endpoint structure
2083  */
2084 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2085 {
2086 	struct pch_udc_request *req;
2087 	struct pch_udc_data_dma_desc *td_data;
2088 
2089 	if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2090 		return;
2091 
2092 	if (list_empty(&ep->queue))
2093 		return;
2094 
2095 	/* next request */
2096 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2097 	if (req->dma_going)
2098 		return;
2099 	if (!req->td_data)
2100 		return;
2101 	pch_udc_wait_ep_stall(ep);
2102 	req->dma_going = 1;
2103 	pch_udc_ep_set_ddptr(ep, 0);
2104 	td_data = req->td_data;
2105 	while (1) {
2106 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2107 				   PCH_UDC_BS_HST_RDY;
2108 		if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2109 			break;
2110 		td_data = phys_to_virt(td_data->next);
2111 	}
2112 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2113 	pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2114 	pch_udc_ep_set_pd(ep);
2115 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2116 	pch_udc_ep_clear_nak(ep);
2117 }
2118 
2119 /**
2120  * pch_udc_complete_transfer() - This function completes a transfer
2121  * @ep:		Reference to the endpoint structure
2122  */
2123 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2124 {
2125 	struct pch_udc_request *req;
2126 	struct pch_udc_dev *dev = ep->dev;
2127 
2128 	if (list_empty(&ep->queue))
2129 		return;
2130 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2131 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2132 	    PCH_UDC_BS_DMA_DONE)
2133 		return;
2134 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2135 	     PCH_UDC_RTS_SUCC) {
2136 		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2137 			"epstatus=0x%08x\n",
2138 		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
2139 		       (int)(ep->epsts));
2140 		return;
2141 	}
2142 
2143 	req->req.actual = req->req.length;
2144 	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2145 	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2146 	complete_req(ep, req, 0);
2147 	req->dma_going = 0;
2148 	if (!list_empty(&ep->queue)) {
2149 		pch_udc_wait_ep_stall(ep);
2150 		pch_udc_ep_clear_nak(ep);
2151 		pch_udc_enable_ep_interrupts(ep->dev,
2152 					     PCH_UDC_EPINT(ep->in, ep->num));
2153 	} else {
2154 		pch_udc_disable_ep_interrupts(ep->dev,
2155 					      PCH_UDC_EPINT(ep->in, ep->num));
2156 	}
2157 }
2158 
2159 /**
2160  * pch_udc_complete_receiver() - This function completes a receiver
2161  * @ep:		Reference to the endpoint structure
2162  */
2163 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2164 {
2165 	struct pch_udc_request *req;
2166 	struct pch_udc_dev *dev = ep->dev;
2167 	unsigned int count;
2168 	struct pch_udc_data_dma_desc *td;
2169 	dma_addr_t addr;
2170 
2171 	if (list_empty(&ep->queue))
2172 		return;
2173 	/* next request */
2174 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2175 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2176 	pch_udc_ep_set_ddptr(ep, 0);
2177 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2178 	    PCH_UDC_BS_DMA_DONE)
2179 		td = req->td_data_last;
2180 	else
2181 		td = req->td_data;
2182 
2183 	while (1) {
2184 		if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2185 			dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2186 				"epstatus=0x%08x\n",
2187 				(req->td_data->status & PCH_UDC_RXTX_STS),
2188 				(int)(ep->epsts));
2189 			return;
2190 		}
2191 		if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2192 			if (td->status & PCH_UDC_DMA_LAST) {
2193 				count = td->status & PCH_UDC_RXTX_BYTES;
2194 				break;
2195 			}
2196 		if (td == req->td_data_last) {
2197 			dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2198 			return;
2199 		}
2200 		addr = (dma_addr_t)td->next;
2201 		td = phys_to_virt(addr);
2202 	}
2203 	/* on 64k packets the RXBYTES field is zero */
2204 	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2205 		count = UDC_DMA_MAXPACKET;
2206 	req->td_data->status |= PCH_UDC_DMA_LAST;
2207 	td->status |= PCH_UDC_BS_HST_BSY;
2208 
2209 	req->dma_going = 0;
2210 	req->req.actual = count;
2211 	complete_req(ep, req, 0);
2212 	/* If there is a new/failed requests try that now */
2213 	if (!list_empty(&ep->queue)) {
2214 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2215 		pch_udc_start_rxrequest(ep, req);
2216 	}
2217 }
2218 
2219 /**
2220  * pch_udc_svc_data_in() - This function process endpoint interrupts
2221  *				for IN endpoints
2222  * @dev:	Reference to the device structure
2223  * @ep_num:	Endpoint that generated the interrupt
2224  */
2225 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2226 {
2227 	u32	epsts;
2228 	struct pch_udc_ep	*ep;
2229 
2230 	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2231 	epsts = ep->epsts;
2232 	ep->epsts = 0;
2233 
2234 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA  | UDC_EPSTS_HE |
2235 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2236 		       UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2237 		return;
2238 	if ((epsts & UDC_EPSTS_BNA))
2239 		return;
2240 	if (epsts & UDC_EPSTS_HE)
2241 		return;
2242 	if (epsts & UDC_EPSTS_RSS) {
2243 		pch_udc_ep_set_stall(ep);
2244 		pch_udc_enable_ep_interrupts(ep->dev,
2245 					     PCH_UDC_EPINT(ep->in, ep->num));
2246 	}
2247 	if (epsts & UDC_EPSTS_RCS) {
2248 		if (!dev->prot_stall) {
2249 			pch_udc_ep_clear_stall(ep);
2250 		} else {
2251 			pch_udc_ep_set_stall(ep);
2252 			pch_udc_enable_ep_interrupts(ep->dev,
2253 						PCH_UDC_EPINT(ep->in, ep->num));
2254 		}
2255 	}
2256 	if (epsts & UDC_EPSTS_TDC)
2257 		pch_udc_complete_transfer(ep);
2258 	/* On IN interrupt, provide data if we have any */
2259 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2260 	    !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2261 		pch_udc_start_next_txrequest(ep);
2262 }
2263 
2264 /**
2265  * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2266  * @dev:	Reference to the device structure
2267  * @ep_num:	Endpoint that generated the interrupt
2268  */
2269 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2270 {
2271 	u32			epsts;
2272 	struct pch_udc_ep		*ep;
2273 	struct pch_udc_request		*req = NULL;
2274 
2275 	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2276 	epsts = ep->epsts;
2277 	ep->epsts = 0;
2278 
2279 	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2280 		/* next request */
2281 		req = list_entry(ep->queue.next, struct pch_udc_request,
2282 				 queue);
2283 		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2284 		     PCH_UDC_BS_DMA_DONE) {
2285 			if (!req->dma_going)
2286 				pch_udc_start_rxrequest(ep, req);
2287 			return;
2288 		}
2289 	}
2290 	if (epsts & UDC_EPSTS_HE)
2291 		return;
2292 	if (epsts & UDC_EPSTS_RSS) {
2293 		pch_udc_ep_set_stall(ep);
2294 		pch_udc_enable_ep_interrupts(ep->dev,
2295 					     PCH_UDC_EPINT(ep->in, ep->num));
2296 	}
2297 	if (epsts & UDC_EPSTS_RCS) {
2298 		if (!dev->prot_stall) {
2299 			pch_udc_ep_clear_stall(ep);
2300 		} else {
2301 			pch_udc_ep_set_stall(ep);
2302 			pch_udc_enable_ep_interrupts(ep->dev,
2303 						PCH_UDC_EPINT(ep->in, ep->num));
2304 		}
2305 	}
2306 	if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2307 	    UDC_EPSTS_OUT_DATA) {
2308 		if (ep->dev->prot_stall == 1) {
2309 			pch_udc_ep_set_stall(ep);
2310 			pch_udc_enable_ep_interrupts(ep->dev,
2311 						PCH_UDC_EPINT(ep->in, ep->num));
2312 		} else {
2313 			pch_udc_complete_receiver(ep);
2314 		}
2315 	}
2316 	if (list_empty(&ep->queue))
2317 		pch_udc_set_dma(dev, DMA_DIR_RX);
2318 }
2319 
2320 /**
2321  * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2322  * @dev:	Reference to the device structure
2323  */
2324 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2325 {
2326 	u32	epsts;
2327 	struct pch_udc_ep	*ep;
2328 	struct pch_udc_ep	*ep_out;
2329 
2330 	ep = &dev->ep[UDC_EP0IN_IDX];
2331 	ep_out = &dev->ep[UDC_EP0OUT_IDX];
2332 	epsts = ep->epsts;
2333 	ep->epsts = 0;
2334 
2335 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2336 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2337 		       UDC_EPSTS_XFERDONE)))
2338 		return;
2339 	if ((epsts & UDC_EPSTS_BNA))
2340 		return;
2341 	if (epsts & UDC_EPSTS_HE)
2342 		return;
2343 	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2344 		pch_udc_complete_transfer(ep);
2345 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2346 		ep_out->td_data->status = (ep_out->td_data->status &
2347 					~PCH_UDC_BUFF_STS) |
2348 					PCH_UDC_BS_HST_RDY;
2349 		pch_udc_ep_clear_nak(ep_out);
2350 		pch_udc_set_dma(dev, DMA_DIR_RX);
2351 		pch_udc_ep_set_rrdy(ep_out);
2352 	}
2353 	/* On IN interrupt, provide data if we have any */
2354 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2355 	     !(epsts & UDC_EPSTS_TXEMPTY))
2356 		pch_udc_start_next_txrequest(ep);
2357 }
2358 
2359 /**
2360  * pch_udc_svc_control_out() - Routine that handle Control
2361  *					OUT endpoint interrupts
2362  * @dev:	Reference to the device structure
2363  */
2364 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2365 	__releases(&dev->lock)
2366 	__acquires(&dev->lock)
2367 {
2368 	u32	stat;
2369 	int setup_supported;
2370 	struct pch_udc_ep	*ep;
2371 
2372 	ep = &dev->ep[UDC_EP0OUT_IDX];
2373 	stat = ep->epsts;
2374 	ep->epsts = 0;
2375 
2376 	/* If setup data */
2377 	if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2378 	    UDC_EPSTS_OUT_SETUP) {
2379 		dev->stall = 0;
2380 		dev->ep[UDC_EP0IN_IDX].halted = 0;
2381 		dev->ep[UDC_EP0OUT_IDX].halted = 0;
2382 		dev->setup_data = ep->td_stp->request;
2383 		pch_udc_init_setup_buff(ep->td_stp);
2384 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2385 		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2386 				      dev->ep[UDC_EP0IN_IDX].in);
2387 		if ((dev->setup_data.bRequestType & USB_DIR_IN))
2388 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2389 		else /* OUT */
2390 			dev->gadget.ep0 = &ep->ep;
2391 		spin_lock(&dev->lock);
2392 		/* If Mass storage Reset */
2393 		if ((dev->setup_data.bRequestType == 0x21) &&
2394 		    (dev->setup_data.bRequest == 0xFF))
2395 			dev->prot_stall = 0;
2396 		/* call gadget with setup data received */
2397 		setup_supported = dev->driver->setup(&dev->gadget,
2398 						     &dev->setup_data);
2399 		spin_unlock(&dev->lock);
2400 
2401 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
2402 			ep->td_data->status = (ep->td_data->status &
2403 						~PCH_UDC_BUFF_STS) |
2404 						PCH_UDC_BS_HST_RDY;
2405 			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2406 		}
2407 		/* ep0 in returns data on IN phase */
2408 		if (setup_supported >= 0 && setup_supported <
2409 					    UDC_EP0IN_MAX_PKT_SIZE) {
2410 			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2411 			/* Gadget would have queued a request when
2412 			 * we called the setup */
2413 			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2414 				pch_udc_set_dma(dev, DMA_DIR_RX);
2415 				pch_udc_ep_clear_nak(ep);
2416 			}
2417 		} else if (setup_supported < 0) {
2418 			/* if unsupported request, then stall */
2419 			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2420 			pch_udc_enable_ep_interrupts(ep->dev,
2421 						PCH_UDC_EPINT(ep->in, ep->num));
2422 			dev->stall = 0;
2423 			pch_udc_set_dma(dev, DMA_DIR_RX);
2424 		} else {
2425 			dev->waiting_zlp_ack = 1;
2426 		}
2427 	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2428 		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
2429 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2430 		pch_udc_ep_set_ddptr(ep, 0);
2431 		if (!list_empty(&ep->queue)) {
2432 			ep->epsts = stat;
2433 			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2434 		}
2435 		pch_udc_set_dma(dev, DMA_DIR_RX);
2436 	}
2437 	pch_udc_ep_set_rrdy(ep);
2438 }
2439 
2440 
2441 /**
2442  * pch_udc_postsvc_epinters() - This function enables end point interrupts
2443  *				and clears NAK status
2444  * @dev:	Reference to the device structure
2445  * @ep_num:	End point number
2446  */
2447 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2448 {
2449 	struct pch_udc_ep	*ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2450 	if (list_empty(&ep->queue))
2451 		return;
2452 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2453 	pch_udc_ep_clear_nak(ep);
2454 }
2455 
2456 /**
2457  * pch_udc_read_all_epstatus() - This function read all endpoint status
2458  * @dev:	Reference to the device structure
2459  * @ep_intr:	Status of endpoint interrupt
2460  */
2461 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2462 {
2463 	int i;
2464 	struct pch_udc_ep	*ep;
2465 
2466 	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2467 		/* IN */
2468 		if (ep_intr & (0x1 << i)) {
2469 			ep = &dev->ep[UDC_EPIN_IDX(i)];
2470 			ep->epsts = pch_udc_read_ep_status(ep);
2471 			pch_udc_clear_ep_status(ep, ep->epsts);
2472 		}
2473 		/* OUT */
2474 		if (ep_intr & (0x10000 << i)) {
2475 			ep = &dev->ep[UDC_EPOUT_IDX(i)];
2476 			ep->epsts = pch_udc_read_ep_status(ep);
2477 			pch_udc_clear_ep_status(ep, ep->epsts);
2478 		}
2479 	}
2480 }
2481 
2482 /**
2483  * pch_udc_activate_control_ep() - This function enables the control endpoints
2484  *					for traffic after a reset
2485  * @dev:	Reference to the device structure
2486  */
2487 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2488 {
2489 	struct pch_udc_ep	*ep;
2490 	u32 val;
2491 
2492 	/* Setup the IN endpoint */
2493 	ep = &dev->ep[UDC_EP0IN_IDX];
2494 	pch_udc_clear_ep_control(ep);
2495 	pch_udc_ep_fifo_flush(ep, ep->in);
2496 	pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2497 	pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2498 	/* Initialize the IN EP Descriptor */
2499 	ep->td_data      = NULL;
2500 	ep->td_stp       = NULL;
2501 	ep->td_data_phys = 0;
2502 	ep->td_stp_phys  = 0;
2503 
2504 	/* Setup the OUT endpoint */
2505 	ep = &dev->ep[UDC_EP0OUT_IDX];
2506 	pch_udc_clear_ep_control(ep);
2507 	pch_udc_ep_fifo_flush(ep, ep->in);
2508 	pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2509 	pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2510 	val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2511 	pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2512 
2513 	/* Initialize the SETUP buffer */
2514 	pch_udc_init_setup_buff(ep->td_stp);
2515 	/* Write the pointer address of dma descriptor */
2516 	pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2517 	/* Write the pointer address of Setup descriptor */
2518 	pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2519 
2520 	/* Initialize the dma descriptor */
2521 	ep->td_data->status  = PCH_UDC_DMA_LAST;
2522 	ep->td_data->dataptr = dev->dma_addr;
2523 	ep->td_data->next    = ep->td_data_phys;
2524 
2525 	pch_udc_ep_clear_nak(ep);
2526 }
2527 
2528 
2529 /**
2530  * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2531  * @dev:	Reference to driver structure
2532  */
2533 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2534 {
2535 	struct pch_udc_ep	*ep;
2536 	int i;
2537 
2538 	pch_udc_clear_dma(dev, DMA_DIR_TX);
2539 	pch_udc_clear_dma(dev, DMA_DIR_RX);
2540 	/* Mask all endpoint interrupts */
2541 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2542 	/* clear all endpoint interrupts */
2543 	pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2544 
2545 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2546 		ep = &dev->ep[i];
2547 		pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2548 		pch_udc_clear_ep_control(ep);
2549 		pch_udc_ep_set_ddptr(ep, 0);
2550 		pch_udc_write_csr(ep->dev, 0x00, i);
2551 	}
2552 	dev->stall = 0;
2553 	dev->prot_stall = 0;
2554 	dev->waiting_zlp_ack = 0;
2555 	dev->set_cfg_not_acked = 0;
2556 
2557 	/* disable ep to empty req queue. Skip the control EP's */
2558 	for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2559 		ep = &dev->ep[i];
2560 		pch_udc_ep_set_nak(ep);
2561 		pch_udc_ep_fifo_flush(ep, ep->in);
2562 		/* Complete request queue */
2563 		empty_req_queue(ep);
2564 	}
2565 	if (dev->driver) {
2566 		spin_unlock(&dev->lock);
2567 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2568 		spin_lock(&dev->lock);
2569 	}
2570 }
2571 
2572 /**
2573  * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2574  *				done interrupt
2575  * @dev:	Reference to driver structure
2576  */
2577 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2578 {
2579 	u32 dev_stat, dev_speed;
2580 	u32 speed = USB_SPEED_FULL;
2581 
2582 	dev_stat = pch_udc_read_device_status(dev);
2583 	dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2584 						 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2585 	switch (dev_speed) {
2586 	case UDC_DEVSTS_ENUM_SPEED_HIGH:
2587 		speed = USB_SPEED_HIGH;
2588 		break;
2589 	case  UDC_DEVSTS_ENUM_SPEED_FULL:
2590 		speed = USB_SPEED_FULL;
2591 		break;
2592 	case  UDC_DEVSTS_ENUM_SPEED_LOW:
2593 		speed = USB_SPEED_LOW;
2594 		break;
2595 	default:
2596 		BUG();
2597 	}
2598 	dev->gadget.speed = speed;
2599 	pch_udc_activate_control_ep(dev);
2600 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2601 	pch_udc_set_dma(dev, DMA_DIR_TX);
2602 	pch_udc_set_dma(dev, DMA_DIR_RX);
2603 	pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2604 
2605 	/* enable device interrupts */
2606 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2607 					UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2608 					UDC_DEVINT_SI | UDC_DEVINT_SC);
2609 }
2610 
2611 /**
2612  * pch_udc_svc_intf_interrupt() - This function handles a set interface
2613  *				  interrupt
2614  * @dev:	Reference to driver structure
2615  */
2616 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2617 {
2618 	u32 reg, dev_stat = 0;
2619 	int i;
2620 
2621 	dev_stat = pch_udc_read_device_status(dev);
2622 	dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2623 							 UDC_DEVSTS_INTF_SHIFT;
2624 	dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2625 							 UDC_DEVSTS_ALT_SHIFT;
2626 	dev->set_cfg_not_acked = 1;
2627 	/* Construct the usb request for gadget driver and inform it */
2628 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2629 	dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2630 	dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2631 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2632 	dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2633 	/* programm the Endpoint Cfg registers */
2634 	/* Only one end point cfg register */
2635 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2636 	reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2637 	      (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2638 	reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2639 	      (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2640 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2641 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2642 		/* clear stall bits */
2643 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2644 		dev->ep[i].halted = 0;
2645 	}
2646 	dev->stall = 0;
2647 	spin_unlock(&dev->lock);
2648 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2649 	spin_lock(&dev->lock);
2650 }
2651 
2652 /**
2653  * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2654  *				interrupt
2655  * @dev:	Reference to driver structure
2656  */
2657 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2658 {
2659 	int i;
2660 	u32 reg, dev_stat = 0;
2661 
2662 	dev_stat = pch_udc_read_device_status(dev);
2663 	dev->set_cfg_not_acked = 1;
2664 	dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2665 				UDC_DEVSTS_CFG_SHIFT;
2666 	/* make usb request for gadget driver */
2667 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2668 	dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2669 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2670 	/* program the NE registers */
2671 	/* Only one end point cfg register */
2672 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2673 	reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2674 	      (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2675 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2676 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2677 		/* clear stall bits */
2678 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2679 		dev->ep[i].halted = 0;
2680 	}
2681 	dev->stall = 0;
2682 
2683 	/* call gadget zero with setup data received */
2684 	spin_unlock(&dev->lock);
2685 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2686 	spin_lock(&dev->lock);
2687 }
2688 
2689 /**
2690  * pch_udc_dev_isr() - This function services device interrupts
2691  *			by invoking appropriate routines.
2692  * @dev:	Reference to the device structure
2693  * @dev_intr:	The Device interrupt status.
2694  */
2695 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2696 {
2697 	int vbus;
2698 
2699 	/* USB Reset Interrupt */
2700 	if (dev_intr & UDC_DEVINT_UR) {
2701 		pch_udc_svc_ur_interrupt(dev);
2702 		dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2703 	}
2704 	/* Enumeration Done Interrupt */
2705 	if (dev_intr & UDC_DEVINT_ENUM) {
2706 		pch_udc_svc_enum_interrupt(dev);
2707 		dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2708 	}
2709 	/* Set Interface Interrupt */
2710 	if (dev_intr & UDC_DEVINT_SI)
2711 		pch_udc_svc_intf_interrupt(dev);
2712 	/* Set Config Interrupt */
2713 	if (dev_intr & UDC_DEVINT_SC)
2714 		pch_udc_svc_cfg_interrupt(dev);
2715 	/* USB Suspend interrupt */
2716 	if (dev_intr & UDC_DEVINT_US) {
2717 		if (dev->driver
2718 			&& dev->driver->suspend) {
2719 			spin_unlock(&dev->lock);
2720 			dev->driver->suspend(&dev->gadget);
2721 			spin_lock(&dev->lock);
2722 		}
2723 
2724 		vbus = pch_vbus_gpio_get_value(dev);
2725 		if ((dev->vbus_session == 0)
2726 			&& (vbus != 1)) {
2727 			if (dev->driver && dev->driver->disconnect) {
2728 				spin_unlock(&dev->lock);
2729 				dev->driver->disconnect(&dev->gadget);
2730 				spin_lock(&dev->lock);
2731 			}
2732 			pch_udc_reconnect(dev);
2733 		} else if ((dev->vbus_session == 0)
2734 			&& (vbus == 1)
2735 			&& !dev->vbus_gpio.intr)
2736 			schedule_work(&dev->vbus_gpio.irq_work_fall);
2737 
2738 		dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2739 	}
2740 	/* Clear the SOF interrupt, if enabled */
2741 	if (dev_intr & UDC_DEVINT_SOF)
2742 		dev_dbg(&dev->pdev->dev, "SOF\n");
2743 	/* ES interrupt, IDLE > 3ms on the USB */
2744 	if (dev_intr & UDC_DEVINT_ES)
2745 		dev_dbg(&dev->pdev->dev, "ES\n");
2746 	/* RWKP interrupt */
2747 	if (dev_intr & UDC_DEVINT_RWKP)
2748 		dev_dbg(&dev->pdev->dev, "RWKP\n");
2749 }
2750 
2751 /**
2752  * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2753  * @irq:	Interrupt request number
2754  * @dev:	Reference to the device structure
2755  */
2756 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2757 {
2758 	struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2759 	u32 dev_intr, ep_intr;
2760 	int i;
2761 
2762 	dev_intr = pch_udc_read_device_interrupts(dev);
2763 	ep_intr = pch_udc_read_ep_interrupts(dev);
2764 
2765 	/* For a hot plug, this find that the controller is hung up. */
2766 	if (dev_intr == ep_intr)
2767 		if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2768 			dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2769 			/* The controller is reset */
2770 			pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2771 			return IRQ_HANDLED;
2772 		}
2773 	if (dev_intr)
2774 		/* Clear device interrupts */
2775 		pch_udc_write_device_interrupts(dev, dev_intr);
2776 	if (ep_intr)
2777 		/* Clear ep interrupts */
2778 		pch_udc_write_ep_interrupts(dev, ep_intr);
2779 	if (!dev_intr && !ep_intr)
2780 		return IRQ_NONE;
2781 	spin_lock(&dev->lock);
2782 	if (dev_intr)
2783 		pch_udc_dev_isr(dev, dev_intr);
2784 	if (ep_intr) {
2785 		pch_udc_read_all_epstatus(dev, ep_intr);
2786 		/* Process Control In interrupts, if present */
2787 		if (ep_intr & UDC_EPINT_IN_EP0) {
2788 			pch_udc_svc_control_in(dev);
2789 			pch_udc_postsvc_epinters(dev, 0);
2790 		}
2791 		/* Process Control Out interrupts, if present */
2792 		if (ep_intr & UDC_EPINT_OUT_EP0)
2793 			pch_udc_svc_control_out(dev);
2794 		/* Process data in end point interrupts */
2795 		for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2796 			if (ep_intr & (1 <<  i)) {
2797 				pch_udc_svc_data_in(dev, i);
2798 				pch_udc_postsvc_epinters(dev, i);
2799 			}
2800 		}
2801 		/* Process data out end point interrupts */
2802 		for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2803 						 PCH_UDC_USED_EP_NUM); i++)
2804 			if (ep_intr & (1 <<  i))
2805 				pch_udc_svc_data_out(dev, i -
2806 							 UDC_EPINT_OUT_SHIFT);
2807 	}
2808 	spin_unlock(&dev->lock);
2809 	return IRQ_HANDLED;
2810 }
2811 
2812 /**
2813  * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2814  * @dev:	Reference to the device structure
2815  */
2816 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2817 {
2818 	/* enable ep0 interrupts */
2819 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2820 						UDC_EPINT_OUT_EP0);
2821 	/* enable device interrupts */
2822 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2823 				       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2824 				       UDC_DEVINT_SI | UDC_DEVINT_SC);
2825 }
2826 
2827 /**
2828  * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2829  * @dev:	Reference to the driver structure
2830  */
2831 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2832 {
2833 	const char *const ep_string[] = {
2834 		ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2835 		"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2836 		"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2837 		"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2838 		"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2839 		"ep15in", "ep15out",
2840 	};
2841 	int i;
2842 
2843 	dev->gadget.speed = USB_SPEED_UNKNOWN;
2844 	INIT_LIST_HEAD(&dev->gadget.ep_list);
2845 
2846 	/* Initialize the endpoints structures */
2847 	memset(dev->ep, 0, sizeof dev->ep);
2848 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2849 		struct pch_udc_ep *ep = &dev->ep[i];
2850 		ep->dev = dev;
2851 		ep->halted = 1;
2852 		ep->num = i / 2;
2853 		ep->in = ~i & 1;
2854 		ep->ep.name = ep_string[i];
2855 		ep->ep.ops = &pch_udc_ep_ops;
2856 		if (ep->in) {
2857 			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2858 			ep->ep.caps.dir_in = true;
2859 		} else {
2860 			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2861 					  UDC_EP_REG_SHIFT;
2862 			ep->ep.caps.dir_out = true;
2863 		}
2864 		if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2865 			ep->ep.caps.type_control = true;
2866 		} else {
2867 			ep->ep.caps.type_iso = true;
2868 			ep->ep.caps.type_bulk = true;
2869 			ep->ep.caps.type_int = true;
2870 		}
2871 		/* need to set ep->ep.maxpacket and set Default Configuration?*/
2872 		usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2873 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2874 		INIT_LIST_HEAD(&ep->queue);
2875 	}
2876 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2877 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2878 
2879 	/* remove ep0 in and out from the list.  They have own pointer */
2880 	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2881 	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2882 
2883 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2884 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2885 }
2886 
2887 /**
2888  * pch_udc_pcd_init() - This API initializes the driver structure
2889  * @dev:	Reference to the driver structure
2890  *
2891  * Return codes:
2892  *	0: Success
2893  */
2894 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2895 {
2896 	pch_udc_init(dev);
2897 	pch_udc_pcd_reinit(dev);
2898 	pch_vbus_gpio_init(dev, vbus_gpio_port);
2899 	return 0;
2900 }
2901 
2902 /**
2903  * init_dma_pools() - create dma pools during initialization
2904  * @pdev:	reference to struct pci_dev
2905  */
2906 static int init_dma_pools(struct pch_udc_dev *dev)
2907 {
2908 	struct pch_udc_stp_dma_desc	*td_stp;
2909 	struct pch_udc_data_dma_desc	*td_data;
2910 	void				*ep0out_buf;
2911 
2912 	/* DMA setup */
2913 	dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
2914 		sizeof(struct pch_udc_data_dma_desc), 0, 0);
2915 	if (!dev->data_requests) {
2916 		dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2917 			__func__);
2918 		return -ENOMEM;
2919 	}
2920 
2921 	/* dma desc for setup data */
2922 	dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
2923 		sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2924 	if (!dev->stp_requests) {
2925 		dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2926 			__func__);
2927 		return -ENOMEM;
2928 	}
2929 	/* setup */
2930 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
2931 				&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2932 	if (!td_stp) {
2933 		dev_err(&dev->pdev->dev,
2934 			"%s: can't allocate setup dma descriptor\n", __func__);
2935 		return -ENOMEM;
2936 	}
2937 	dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2938 
2939 	/* data: 0 packets !? */
2940 	td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
2941 				&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2942 	if (!td_data) {
2943 		dev_err(&dev->pdev->dev,
2944 			"%s: can't allocate data dma descriptor\n", __func__);
2945 		return -ENOMEM;
2946 	}
2947 	dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2948 	dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2949 	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2950 	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2951 	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2952 
2953 	ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2954 				  GFP_KERNEL);
2955 	if (!ep0out_buf)
2956 		return -ENOMEM;
2957 	dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2958 				       UDC_EP0OUT_BUFF_SIZE * 4,
2959 				       DMA_FROM_DEVICE);
2960 	return 0;
2961 }
2962 
2963 static int pch_udc_start(struct usb_gadget *g,
2964 		struct usb_gadget_driver *driver)
2965 {
2966 	struct pch_udc_dev	*dev = to_pch_udc(g);
2967 
2968 	driver->driver.bus = NULL;
2969 	dev->driver = driver;
2970 
2971 	/* get ready for ep0 traffic */
2972 	pch_udc_setup_ep0(dev);
2973 
2974 	/* clear SD */
2975 	if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2976 		pch_udc_clear_disconnect(dev);
2977 
2978 	dev->connected = 1;
2979 	return 0;
2980 }
2981 
2982 static int pch_udc_stop(struct usb_gadget *g)
2983 {
2984 	struct pch_udc_dev	*dev = to_pch_udc(g);
2985 
2986 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2987 
2988 	/* Assures that there are no pending requests with this driver */
2989 	dev->driver = NULL;
2990 	dev->connected = 0;
2991 
2992 	/* set SD */
2993 	pch_udc_set_disconnect(dev);
2994 
2995 	return 0;
2996 }
2997 
2998 static void pch_udc_shutdown(struct pci_dev *pdev)
2999 {
3000 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3001 
3002 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3003 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3004 
3005 	/* disable the pullup so the host will think we're gone */
3006 	pch_udc_set_disconnect(dev);
3007 }
3008 
3009 static void pch_udc_remove(struct pci_dev *pdev)
3010 {
3011 	struct pch_udc_dev	*dev = pci_get_drvdata(pdev);
3012 
3013 	usb_del_gadget_udc(&dev->gadget);
3014 
3015 	/* gadget driver must not be registered */
3016 	if (dev->driver)
3017 		dev_err(&pdev->dev,
3018 			"%s: gadget driver still bound!!!\n", __func__);
3019 	/* dma pool cleanup */
3020 	dma_pool_destroy(dev->data_requests);
3021 
3022 	if (dev->stp_requests) {
3023 		/* cleanup DMA desc's for ep0in */
3024 		if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3025 			dma_pool_free(dev->stp_requests,
3026 				dev->ep[UDC_EP0OUT_IDX].td_stp,
3027 				dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3028 		}
3029 		if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3030 			dma_pool_free(dev->stp_requests,
3031 				dev->ep[UDC_EP0OUT_IDX].td_data,
3032 				dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3033 		}
3034 		dma_pool_destroy(dev->stp_requests);
3035 	}
3036 
3037 	if (dev->dma_addr)
3038 		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3039 				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3040 
3041 	pch_vbus_gpio_free(dev);
3042 
3043 	pch_udc_exit(dev);
3044 }
3045 
3046 #ifdef CONFIG_PM_SLEEP
3047 static int pch_udc_suspend(struct device *d)
3048 {
3049 	struct pci_dev *pdev = to_pci_dev(d);
3050 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3051 
3052 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3053 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3054 
3055 	return 0;
3056 }
3057 
3058 static int pch_udc_resume(struct device *d)
3059 {
3060 	return 0;
3061 }
3062 
3063 static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3064 #define PCH_UDC_PM_OPS		(&pch_udc_pm)
3065 #else
3066 #define PCH_UDC_PM_OPS		NULL
3067 #endif /* CONFIG_PM_SLEEP */
3068 
3069 static int pch_udc_probe(struct pci_dev *pdev,
3070 			  const struct pci_device_id *id)
3071 {
3072 	int			bar;
3073 	int			retval;
3074 	struct pch_udc_dev	*dev;
3075 
3076 	/* init */
3077 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3078 	if (!dev)
3079 		return -ENOMEM;
3080 
3081 	/* pci setup */
3082 	retval = pcim_enable_device(pdev);
3083 	if (retval)
3084 		return retval;
3085 
3086 	pci_set_drvdata(pdev, dev);
3087 
3088 	/* Determine BAR based on PCI ID */
3089 	if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3090 		bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3091 	else
3092 		bar = PCH_UDC_PCI_BAR;
3093 
3094 	/* PCI resource allocation */
3095 	retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3096 	if (retval)
3097 		return retval;
3098 
3099 	dev->base_addr = pcim_iomap_table(pdev)[bar];
3100 
3101 	/* initialize the hardware */
3102 	if (pch_udc_pcd_init(dev))
3103 		return -ENODEV;
3104 
3105 	pci_enable_msi(pdev);
3106 
3107 	retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3108 				  IRQF_SHARED, KBUILD_MODNAME, dev);
3109 	if (retval) {
3110 		dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3111 			pdev->irq);
3112 		goto finished;
3113 	}
3114 
3115 	pci_set_master(pdev);
3116 	pci_try_set_mwi(pdev);
3117 
3118 	/* device struct setup */
3119 	spin_lock_init(&dev->lock);
3120 	dev->pdev = pdev;
3121 	dev->gadget.ops = &pch_udc_ops;
3122 
3123 	retval = init_dma_pools(dev);
3124 	if (retval)
3125 		goto finished;
3126 
3127 	dev->gadget.name = KBUILD_MODNAME;
3128 	dev->gadget.max_speed = USB_SPEED_HIGH;
3129 
3130 	/* Put the device in disconnected state till a driver is bound */
3131 	pch_udc_set_disconnect(dev);
3132 	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3133 	if (retval)
3134 		goto finished;
3135 	return 0;
3136 
3137 finished:
3138 	pch_udc_remove(pdev);
3139 	return retval;
3140 }
3141 
3142 static const struct pci_device_id pch_udc_pcidev_id[] = {
3143 	{
3144 		PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3145 			   PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3146 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3147 		.class_mask = 0xffffffff,
3148 	},
3149 	{
3150 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3151 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3152 		.class_mask = 0xffffffff,
3153 	},
3154 	{
3155 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3156 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3157 		.class_mask = 0xffffffff,
3158 	},
3159 	{
3160 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3161 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3162 		.class_mask = 0xffffffff,
3163 	},
3164 	{ 0 },
3165 };
3166 
3167 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3168 
3169 static struct pci_driver pch_udc_driver = {
3170 	.name =	KBUILD_MODNAME,
3171 	.id_table =	pch_udc_pcidev_id,
3172 	.probe =	pch_udc_probe,
3173 	.remove =	pch_udc_remove,
3174 	.shutdown =	pch_udc_shutdown,
3175 	.driver = {
3176 		.pm = PCH_UDC_PM_OPS,
3177 	},
3178 };
3179 
3180 module_pci_driver(pch_udc_driver);
3181 
3182 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3183 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3184 MODULE_LICENSE("GPL");
3185