xref: /openbmc/linux/drivers/usb/gadget/udc/pch_udc.c (revision 8c749ce9)
1 /*
2  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/interrupt.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/gadget.h>
18 #include <linux/gpio.h>
19 #include <linux/irq.h>
20 
21 /* GPIO port for VBUS detecting */
22 static int vbus_gpio_port = -1;		/* GPIO port number (-1:Not used) */
23 
24 #define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
25 #define PCH_VBUS_INTERVAL	10	/* VBUS polling interval (msec) */
26 
27 /* Address offset of Registers */
28 #define UDC_EP_REG_SHIFT	0x20	/* Offset to next EP */
29 
30 #define UDC_EPCTL_ADDR		0x00	/* Endpoint control */
31 #define UDC_EPSTS_ADDR		0x04	/* Endpoint status */
32 #define UDC_BUFIN_FRAMENUM_ADDR	0x08	/* buffer size in / frame number out */
33 #define UDC_BUFOUT_MAXPKT_ADDR	0x0C	/* buffer size out / maxpkt in */
34 #define UDC_SUBPTR_ADDR		0x10	/* setup buffer pointer */
35 #define UDC_DESPTR_ADDR		0x14	/* Data descriptor pointer */
36 #define UDC_CONFIRM_ADDR	0x18	/* Write/Read confirmation */
37 
38 #define UDC_DEVCFG_ADDR		0x400	/* Device configuration */
39 #define UDC_DEVCTL_ADDR		0x404	/* Device control */
40 #define UDC_DEVSTS_ADDR		0x408	/* Device status */
41 #define UDC_DEVIRQSTS_ADDR	0x40C	/* Device irq status */
42 #define UDC_DEVIRQMSK_ADDR	0x410	/* Device irq mask */
43 #define UDC_EPIRQSTS_ADDR	0x414	/* Endpoint irq status */
44 #define UDC_EPIRQMSK_ADDR	0x418	/* Endpoint irq mask */
45 #define UDC_DEVLPM_ADDR		0x41C	/* LPM control / status */
46 #define UDC_CSR_BUSY_ADDR	0x4f0	/* UDC_CSR_BUSY Status register */
47 #define UDC_SRST_ADDR		0x4fc	/* SOFT RESET register */
48 #define UDC_CSR_ADDR		0x500	/* USB_DEVICE endpoint register */
49 
50 /* Endpoint control register */
51 /* Bit position */
52 #define UDC_EPCTL_MRXFLUSH		(1 << 12)
53 #define UDC_EPCTL_RRDY			(1 << 9)
54 #define UDC_EPCTL_CNAK			(1 << 8)
55 #define UDC_EPCTL_SNAK			(1 << 7)
56 #define UDC_EPCTL_NAK			(1 << 6)
57 #define UDC_EPCTL_P			(1 << 3)
58 #define UDC_EPCTL_F			(1 << 1)
59 #define UDC_EPCTL_S			(1 << 0)
60 #define UDC_EPCTL_ET_SHIFT		4
61 /* Mask patern */
62 #define UDC_EPCTL_ET_MASK		0x00000030
63 /* Value for ET field */
64 #define UDC_EPCTL_ET_CONTROL		0
65 #define UDC_EPCTL_ET_ISO		1
66 #define UDC_EPCTL_ET_BULK		2
67 #define UDC_EPCTL_ET_INTERRUPT		3
68 
69 /* Endpoint status register */
70 /* Bit position */
71 #define UDC_EPSTS_XFERDONE		(1 << 27)
72 #define UDC_EPSTS_RSS			(1 << 26)
73 #define UDC_EPSTS_RCS			(1 << 25)
74 #define UDC_EPSTS_TXEMPTY		(1 << 24)
75 #define UDC_EPSTS_TDC			(1 << 10)
76 #define UDC_EPSTS_HE			(1 << 9)
77 #define UDC_EPSTS_MRXFIFO_EMP		(1 << 8)
78 #define UDC_EPSTS_BNA			(1 << 7)
79 #define UDC_EPSTS_IN			(1 << 6)
80 #define UDC_EPSTS_OUT_SHIFT		4
81 /* Mask patern */
82 #define UDC_EPSTS_OUT_MASK		0x00000030
83 #define UDC_EPSTS_ALL_CLR_MASK		0x1F0006F0
84 /* Value for OUT field */
85 #define UDC_EPSTS_OUT_SETUP		2
86 #define UDC_EPSTS_OUT_DATA		1
87 
88 /* Device configuration register */
89 /* Bit position */
90 #define UDC_DEVCFG_CSR_PRG		(1 << 17)
91 #define UDC_DEVCFG_SP			(1 << 3)
92 /* SPD Valee */
93 #define UDC_DEVCFG_SPD_HS		0x0
94 #define UDC_DEVCFG_SPD_FS		0x1
95 #define UDC_DEVCFG_SPD_LS		0x2
96 
97 /* Device control register */
98 /* Bit position */
99 #define UDC_DEVCTL_THLEN_SHIFT		24
100 #define UDC_DEVCTL_BRLEN_SHIFT		16
101 #define UDC_DEVCTL_CSR_DONE		(1 << 13)
102 #define UDC_DEVCTL_SD			(1 << 10)
103 #define UDC_DEVCTL_MODE			(1 << 9)
104 #define UDC_DEVCTL_BREN			(1 << 8)
105 #define UDC_DEVCTL_THE			(1 << 7)
106 #define UDC_DEVCTL_DU			(1 << 4)
107 #define UDC_DEVCTL_TDE			(1 << 3)
108 #define UDC_DEVCTL_RDE			(1 << 2)
109 #define UDC_DEVCTL_RES			(1 << 0)
110 
111 /* Device status register */
112 /* Bit position */
113 #define UDC_DEVSTS_TS_SHIFT		18
114 #define UDC_DEVSTS_ENUM_SPEED_SHIFT	13
115 #define UDC_DEVSTS_ALT_SHIFT		8
116 #define UDC_DEVSTS_INTF_SHIFT		4
117 #define UDC_DEVSTS_CFG_SHIFT		0
118 /* Mask patern */
119 #define UDC_DEVSTS_TS_MASK		0xfffc0000
120 #define UDC_DEVSTS_ENUM_SPEED_MASK	0x00006000
121 #define UDC_DEVSTS_ALT_MASK		0x00000f00
122 #define UDC_DEVSTS_INTF_MASK		0x000000f0
123 #define UDC_DEVSTS_CFG_MASK		0x0000000f
124 /* value for maximum speed for SPEED field */
125 #define UDC_DEVSTS_ENUM_SPEED_FULL	1
126 #define UDC_DEVSTS_ENUM_SPEED_HIGH	0
127 #define UDC_DEVSTS_ENUM_SPEED_LOW	2
128 #define UDC_DEVSTS_ENUM_SPEED_FULLX	3
129 
130 /* Device irq register */
131 /* Bit position */
132 #define UDC_DEVINT_RWKP			(1 << 7)
133 #define UDC_DEVINT_ENUM			(1 << 6)
134 #define UDC_DEVINT_SOF			(1 << 5)
135 #define UDC_DEVINT_US			(1 << 4)
136 #define UDC_DEVINT_UR			(1 << 3)
137 #define UDC_DEVINT_ES			(1 << 2)
138 #define UDC_DEVINT_SI			(1 << 1)
139 #define UDC_DEVINT_SC			(1 << 0)
140 /* Mask patern */
141 #define UDC_DEVINT_MSK			0x7f
142 
143 /* Endpoint irq register */
144 /* Bit position */
145 #define UDC_EPINT_IN_SHIFT		0
146 #define UDC_EPINT_OUT_SHIFT		16
147 #define UDC_EPINT_IN_EP0		(1 << 0)
148 #define UDC_EPINT_OUT_EP0		(1 << 16)
149 /* Mask patern */
150 #define UDC_EPINT_MSK_DISABLE_ALL	0xffffffff
151 
152 /* UDC_CSR_BUSY Status register */
153 /* Bit position */
154 #define UDC_CSR_BUSY			(1 << 0)
155 
156 /* SOFT RESET register */
157 /* Bit position */
158 #define UDC_PSRST			(1 << 1)
159 #define UDC_SRST			(1 << 0)
160 
161 /* USB_DEVICE endpoint register */
162 /* Bit position */
163 #define UDC_CSR_NE_NUM_SHIFT		0
164 #define UDC_CSR_NE_DIR_SHIFT		4
165 #define UDC_CSR_NE_TYPE_SHIFT		5
166 #define UDC_CSR_NE_CFG_SHIFT		7
167 #define UDC_CSR_NE_INTF_SHIFT		11
168 #define UDC_CSR_NE_ALT_SHIFT		15
169 #define UDC_CSR_NE_MAX_PKT_SHIFT	19
170 /* Mask patern */
171 #define UDC_CSR_NE_NUM_MASK		0x0000000f
172 #define UDC_CSR_NE_DIR_MASK		0x00000010
173 #define UDC_CSR_NE_TYPE_MASK		0x00000060
174 #define UDC_CSR_NE_CFG_MASK		0x00000780
175 #define UDC_CSR_NE_INTF_MASK		0x00007800
176 #define UDC_CSR_NE_ALT_MASK		0x00078000
177 #define UDC_CSR_NE_MAX_PKT_MASK		0x3ff80000
178 
179 #define PCH_UDC_CSR(ep)	(UDC_CSR_ADDR + ep*4)
180 #define PCH_UDC_EPINT(in, num)\
181 		(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
182 
183 /* Index of endpoint */
184 #define UDC_EP0IN_IDX		0
185 #define UDC_EP0OUT_IDX		1
186 #define UDC_EPIN_IDX(ep)	(ep * 2)
187 #define UDC_EPOUT_IDX(ep)	(ep * 2 + 1)
188 #define PCH_UDC_EP0		0
189 #define PCH_UDC_EP1		1
190 #define PCH_UDC_EP2		2
191 #define PCH_UDC_EP3		3
192 
193 /* Number of endpoint */
194 #define PCH_UDC_EP_NUM		32	/* Total number of EPs (16 IN,16 OUT) */
195 #define PCH_UDC_USED_EP_NUM	4	/* EP number of EP's really used */
196 /* Length Value */
197 #define PCH_UDC_BRLEN		0x0F	/* Burst length */
198 #define PCH_UDC_THLEN		0x1F	/* Threshold length */
199 /* Value of EP Buffer Size */
200 #define UDC_EP0IN_BUFF_SIZE	16
201 #define UDC_EPIN_BUFF_SIZE	256
202 #define UDC_EP0OUT_BUFF_SIZE	16
203 #define UDC_EPOUT_BUFF_SIZE	256
204 /* Value of EP maximum packet size */
205 #define UDC_EP0IN_MAX_PKT_SIZE	64
206 #define UDC_EP0OUT_MAX_PKT_SIZE	64
207 #define UDC_BULK_MAX_PKT_SIZE	512
208 
209 /* DMA */
210 #define DMA_DIR_RX		1	/* DMA for data receive */
211 #define DMA_DIR_TX		2	/* DMA for data transmit */
212 #define DMA_ADDR_INVALID	(~(dma_addr_t)0)
213 #define UDC_DMA_MAXPACKET	65536	/* maximum packet size for DMA */
214 
215 /**
216  * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
217  *				  for data
218  * @status:		Status quadlet
219  * @reserved:		Reserved
220  * @dataptr:		Buffer descriptor
221  * @next:		Next descriptor
222  */
223 struct pch_udc_data_dma_desc {
224 	u32 status;
225 	u32 reserved;
226 	u32 dataptr;
227 	u32 next;
228 };
229 
230 /**
231  * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
232  *				 for control data
233  * @status:	Status
234  * @reserved:	Reserved
235  * @data12:	First setup word
236  * @data34:	Second setup word
237  */
238 struct pch_udc_stp_dma_desc {
239 	u32 status;
240 	u32 reserved;
241 	struct usb_ctrlrequest request;
242 } __attribute((packed));
243 
244 /* DMA status definitions */
245 /* Buffer status */
246 #define PCH_UDC_BUFF_STS	0xC0000000
247 #define PCH_UDC_BS_HST_RDY	0x00000000
248 #define PCH_UDC_BS_DMA_BSY	0x40000000
249 #define PCH_UDC_BS_DMA_DONE	0x80000000
250 #define PCH_UDC_BS_HST_BSY	0xC0000000
251 /*  Rx/Tx Status */
252 #define PCH_UDC_RXTX_STS	0x30000000
253 #define PCH_UDC_RTS_SUCC	0x00000000
254 #define PCH_UDC_RTS_DESERR	0x10000000
255 #define PCH_UDC_RTS_BUFERR	0x30000000
256 /* Last Descriptor Indication */
257 #define PCH_UDC_DMA_LAST	0x08000000
258 /* Number of Rx/Tx Bytes Mask */
259 #define PCH_UDC_RXTX_BYTES	0x0000ffff
260 
261 /**
262  * struct pch_udc_cfg_data - Structure to hold current configuration
263  *			     and interface information
264  * @cur_cfg:	current configuration in use
265  * @cur_intf:	current interface in use
266  * @cur_alt:	current alt interface in use
267  */
268 struct pch_udc_cfg_data {
269 	u16 cur_cfg;
270 	u16 cur_intf;
271 	u16 cur_alt;
272 };
273 
274 /**
275  * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
276  * @ep:			embedded ep request
277  * @td_stp_phys:	for setup request
278  * @td_data_phys:	for data request
279  * @td_stp:		for setup request
280  * @td_data:		for data request
281  * @dev:		reference to device struct
282  * @offset_addr:	offset address of ep register
283  * @desc:		for this ep
284  * @queue:		queue for requests
285  * @num:		endpoint number
286  * @in:			endpoint is IN
287  * @halted:		endpoint halted?
288  * @epsts:		Endpoint status
289  */
290 struct pch_udc_ep {
291 	struct usb_ep			ep;
292 	dma_addr_t			td_stp_phys;
293 	dma_addr_t			td_data_phys;
294 	struct pch_udc_stp_dma_desc	*td_stp;
295 	struct pch_udc_data_dma_desc	*td_data;
296 	struct pch_udc_dev		*dev;
297 	unsigned long			offset_addr;
298 	struct list_head		queue;
299 	unsigned			num:5,
300 					in:1,
301 					halted:1;
302 	unsigned long			epsts;
303 };
304 
305 /**
306  * struct pch_vbus_gpio_data - Structure holding GPIO informaton
307  *					for detecting VBUS
308  * @port:		gpio port number
309  * @intr:		gpio interrupt number
310  * @irq_work_fall	Structure for WorkQueue
311  * @irq_work_rise	Structure for WorkQueue
312  */
313 struct pch_vbus_gpio_data {
314 	int			port;
315 	int			intr;
316 	struct work_struct	irq_work_fall;
317 	struct work_struct	irq_work_rise;
318 };
319 
320 /**
321  * struct pch_udc_dev - Structure holding complete information
322  *			of the PCH USB device
323  * @gadget:		gadget driver data
324  * @driver:		reference to gadget driver bound
325  * @pdev:		reference to the PCI device
326  * @ep:			array of endpoints
327  * @lock:		protects all state
328  * @active:		enabled the PCI device
329  * @stall:		stall requested
330  * @prot_stall:		protcol stall requested
331  * @irq_registered:	irq registered with system
332  * @mem_region:		device memory mapped
333  * @registered:		driver registered with system
334  * @suspended:		driver in suspended state
335  * @connected:		gadget driver associated
336  * @vbus_session:	required vbus_session state
337  * @set_cfg_not_acked:	pending acknowledgement 4 setup
338  * @waiting_zlp_ack:	pending acknowledgement 4 ZLP
339  * @data_requests:	DMA pool for data requests
340  * @stp_requests:	DMA pool for setup requests
341  * @dma_addr:		DMA pool for received
342  * @ep0out_buf:		Buffer for DMA
343  * @setup_data:		Received setup data
344  * @phys_addr:		of device memory
345  * @base_addr:		for mapped device memory
346  * @bar:		Indicates which PCI BAR for USB regs
347  * @irq:		IRQ line for the device
348  * @cfg_data:		current cfg, intf, and alt in use
349  * @vbus_gpio:		GPIO informaton for detecting VBUS
350  */
351 struct pch_udc_dev {
352 	struct usb_gadget		gadget;
353 	struct usb_gadget_driver	*driver;
354 	struct pci_dev			*pdev;
355 	struct pch_udc_ep		ep[PCH_UDC_EP_NUM];
356 	spinlock_t			lock; /* protects all state */
357 	unsigned	active:1,
358 			stall:1,
359 			prot_stall:1,
360 			irq_registered:1,
361 			mem_region:1,
362 			suspended:1,
363 			connected:1,
364 			vbus_session:1,
365 			set_cfg_not_acked:1,
366 			waiting_zlp_ack:1;
367 	struct pci_pool		*data_requests;
368 	struct pci_pool		*stp_requests;
369 	dma_addr_t			dma_addr;
370 	void				*ep0out_buf;
371 	struct usb_ctrlrequest		setup_data;
372 	unsigned long			phys_addr;
373 	void __iomem			*base_addr;
374 	unsigned			bar;
375 	unsigned			irq;
376 	struct pch_udc_cfg_data		cfg_data;
377 	struct pch_vbus_gpio_data	vbus_gpio;
378 };
379 #define to_pch_udc(g)	(container_of((g), struct pch_udc_dev, gadget))
380 
381 #define PCH_UDC_PCI_BAR_QUARK_X1000	0
382 #define PCH_UDC_PCI_BAR			1
383 #define PCI_DEVICE_ID_INTEL_EG20T_UDC	0x8808
384 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC	0x0939
385 #define PCI_VENDOR_ID_ROHM		0x10DB
386 #define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
387 #define PCI_DEVICE_ID_ML7831_IOH_UDC	0x8808
388 
389 static const char	ep0_string[] = "ep0in";
390 static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
391 static bool speed_fs;
392 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
393 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
394 
395 /**
396  * struct pch_udc_request - Structure holding a PCH USB device request packet
397  * @req:		embedded ep request
398  * @td_data_phys:	phys. address
399  * @td_data:		first dma desc. of chain
400  * @td_data_last:	last dma desc. of chain
401  * @queue:		associated queue
402  * @dma_going:		DMA in progress for request
403  * @dma_mapped:		DMA memory mapped for request
404  * @dma_done:		DMA completed for request
405  * @chain_len:		chain length
406  * @buf:		Buffer memory for align adjustment
407  * @dma:		DMA memory for align adjustment
408  */
409 struct pch_udc_request {
410 	struct usb_request		req;
411 	dma_addr_t			td_data_phys;
412 	struct pch_udc_data_dma_desc	*td_data;
413 	struct pch_udc_data_dma_desc	*td_data_last;
414 	struct list_head		queue;
415 	unsigned			dma_going:1,
416 					dma_mapped:1,
417 					dma_done:1;
418 	unsigned			chain_len;
419 	void				*buf;
420 	dma_addr_t			dma;
421 };
422 
423 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
424 {
425 	return ioread32(dev->base_addr + reg);
426 }
427 
428 static inline void pch_udc_writel(struct pch_udc_dev *dev,
429 				    unsigned long val, unsigned long reg)
430 {
431 	iowrite32(val, dev->base_addr + reg);
432 }
433 
434 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
435 				     unsigned long reg,
436 				     unsigned long bitmask)
437 {
438 	pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
439 }
440 
441 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
442 				     unsigned long reg,
443 				     unsigned long bitmask)
444 {
445 	pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
446 }
447 
448 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
449 {
450 	return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
451 }
452 
453 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
454 				    unsigned long val, unsigned long reg)
455 {
456 	iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
457 }
458 
459 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
460 				     unsigned long reg,
461 				     unsigned long bitmask)
462 {
463 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
464 }
465 
466 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
467 				     unsigned long reg,
468 				     unsigned long bitmask)
469 {
470 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
471 }
472 
473 /**
474  * pch_udc_csr_busy() - Wait till idle.
475  * @dev:	Reference to pch_udc_dev structure
476  */
477 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
478 {
479 	unsigned int count = 200;
480 
481 	/* Wait till idle */
482 	while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
483 		&& --count)
484 		cpu_relax();
485 	if (!count)
486 		dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
487 }
488 
489 /**
490  * pch_udc_write_csr() - Write the command and status registers.
491  * @dev:	Reference to pch_udc_dev structure
492  * @val:	value to be written to CSR register
493  * @addr:	address of CSR register
494  */
495 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
496 			       unsigned int ep)
497 {
498 	unsigned long reg = PCH_UDC_CSR(ep);
499 
500 	pch_udc_csr_busy(dev);		/* Wait till idle */
501 	pch_udc_writel(dev, val, reg);
502 	pch_udc_csr_busy(dev);		/* Wait till idle */
503 }
504 
505 /**
506  * pch_udc_read_csr() - Read the command and status registers.
507  * @dev:	Reference to pch_udc_dev structure
508  * @addr:	address of CSR register
509  *
510  * Return codes:	content of CSR register
511  */
512 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
513 {
514 	unsigned long reg = PCH_UDC_CSR(ep);
515 
516 	pch_udc_csr_busy(dev);		/* Wait till idle */
517 	pch_udc_readl(dev, reg);	/* Dummy read */
518 	pch_udc_csr_busy(dev);		/* Wait till idle */
519 	return pch_udc_readl(dev, reg);
520 }
521 
522 /**
523  * pch_udc_rmt_wakeup() - Initiate for remote wakeup
524  * @dev:	Reference to pch_udc_dev structure
525  */
526 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
527 {
528 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
529 	mdelay(1);
530 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
531 }
532 
533 /**
534  * pch_udc_get_frame() - Get the current frame from device status register
535  * @dev:	Reference to pch_udc_dev structure
536  * Retern	current frame
537  */
538 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
539 {
540 	u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
541 	return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
542 }
543 
544 /**
545  * pch_udc_clear_selfpowered() - Clear the self power control
546  * @dev:	Reference to pch_udc_regs structure
547  */
548 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
549 {
550 	pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
551 }
552 
553 /**
554  * pch_udc_set_selfpowered() - Set the self power control
555  * @dev:	Reference to pch_udc_regs structure
556  */
557 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
558 {
559 	pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
560 }
561 
562 /**
563  * pch_udc_set_disconnect() - Set the disconnect status.
564  * @dev:	Reference to pch_udc_regs structure
565  */
566 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
567 {
568 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
569 }
570 
571 /**
572  * pch_udc_clear_disconnect() - Clear the disconnect status.
573  * @dev:	Reference to pch_udc_regs structure
574  */
575 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
576 {
577 	/* Clear the disconnect */
578 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
579 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
580 	mdelay(1);
581 	/* Resume USB signalling */
582 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
583 }
584 
585 /**
586  * pch_udc_reconnect() - This API initializes usb device controller,
587  *						and clear the disconnect status.
588  * @dev:		Reference to pch_udc_regs structure
589  */
590 static void pch_udc_init(struct pch_udc_dev *dev);
591 static void pch_udc_reconnect(struct pch_udc_dev *dev)
592 {
593 	pch_udc_init(dev);
594 
595 	/* enable device interrupts */
596 	/* pch_udc_enable_interrupts() */
597 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
598 			UDC_DEVINT_UR | UDC_DEVINT_ENUM);
599 
600 	/* Clear the disconnect */
601 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
602 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
603 	mdelay(1);
604 	/* Resume USB signalling */
605 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
606 }
607 
608 /**
609  * pch_udc_vbus_session() - set or clearr the disconnect status.
610  * @dev:	Reference to pch_udc_regs structure
611  * @is_active:	Parameter specifying the action
612  *		  0:   indicating VBUS power is ending
613  *		  !0:  indicating VBUS power is starting
614  */
615 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
616 					  int is_active)
617 {
618 	if (is_active) {
619 		pch_udc_reconnect(dev);
620 		dev->vbus_session = 1;
621 	} else {
622 		if (dev->driver && dev->driver->disconnect) {
623 			spin_lock(&dev->lock);
624 			dev->driver->disconnect(&dev->gadget);
625 			spin_unlock(&dev->lock);
626 		}
627 		pch_udc_set_disconnect(dev);
628 		dev->vbus_session = 0;
629 	}
630 }
631 
632 /**
633  * pch_udc_ep_set_stall() - Set the stall of endpoint
634  * @ep:		Reference to structure of type pch_udc_ep_regs
635  */
636 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
637 {
638 	if (ep->in) {
639 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
640 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
641 	} else {
642 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
643 	}
644 }
645 
646 /**
647  * pch_udc_ep_clear_stall() - Clear the stall of endpoint
648  * @ep:		Reference to structure of type pch_udc_ep_regs
649  */
650 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
651 {
652 	/* Clear the stall */
653 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
654 	/* Clear NAK by writing CNAK */
655 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
656 }
657 
658 /**
659  * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
660  * @ep:		Reference to structure of type pch_udc_ep_regs
661  * @type:	Type of endpoint
662  */
663 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
664 					u8 type)
665 {
666 	pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
667 				UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
668 }
669 
670 /**
671  * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
672  * @ep:		Reference to structure of type pch_udc_ep_regs
673  * @buf_size:	The buffer word size
674  */
675 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
676 						 u32 buf_size, u32 ep_in)
677 {
678 	u32 data;
679 	if (ep_in) {
680 		data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
681 		data = (data & 0xffff0000) | (buf_size & 0xffff);
682 		pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
683 	} else {
684 		data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
685 		data = (buf_size << 16) | (data & 0xffff);
686 		pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
687 	}
688 }
689 
690 /**
691  * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
692  * @ep:		Reference to structure of type pch_udc_ep_regs
693  * @pkt_size:	The packet byte size
694  */
695 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
696 {
697 	u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
698 	data = (data & 0xffff0000) | (pkt_size & 0xffff);
699 	pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
700 }
701 
702 /**
703  * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
704  * @ep:		Reference to structure of type pch_udc_ep_regs
705  * @addr:	Address of the register
706  */
707 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
708 {
709 	pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
710 }
711 
712 /**
713  * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
714  * @ep:		Reference to structure of type pch_udc_ep_regs
715  * @addr:	Address of the register
716  */
717 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
718 {
719 	pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
720 }
721 
722 /**
723  * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
724  * @ep:		Reference to structure of type pch_udc_ep_regs
725  */
726 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
727 {
728 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
729 }
730 
731 /**
732  * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
733  * @ep:		Reference to structure of type pch_udc_ep_regs
734  */
735 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
736 {
737 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
738 }
739 
740 /**
741  * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
742  * @ep:		Reference to structure of type pch_udc_ep_regs
743  */
744 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
745 {
746 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
747 }
748 
749 /**
750  * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
751  *			register depending on the direction specified
752  * @dev:	Reference to structure of type pch_udc_regs
753  * @dir:	whether Tx or Rx
754  *		  DMA_DIR_RX: Receive
755  *		  DMA_DIR_TX: Transmit
756  */
757 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
758 {
759 	if (dir == DMA_DIR_RX)
760 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
761 	else if (dir == DMA_DIR_TX)
762 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
763 }
764 
765 /**
766  * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
767  *				 register depending on the direction specified
768  * @dev:	Reference to structure of type pch_udc_regs
769  * @dir:	Whether Tx or Rx
770  *		  DMA_DIR_RX: Receive
771  *		  DMA_DIR_TX: Transmit
772  */
773 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
774 {
775 	if (dir == DMA_DIR_RX)
776 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
777 	else if (dir == DMA_DIR_TX)
778 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
779 }
780 
781 /**
782  * pch_udc_set_csr_done() - Set the device control register
783  *				CSR done field (bit 13)
784  * @dev:	reference to structure of type pch_udc_regs
785  */
786 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
787 {
788 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
789 }
790 
791 /**
792  * pch_udc_disable_interrupts() - Disables the specified interrupts
793  * @dev:	Reference to structure of type pch_udc_regs
794  * @mask:	Mask to disable interrupts
795  */
796 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
797 					    u32 mask)
798 {
799 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
800 }
801 
802 /**
803  * pch_udc_enable_interrupts() - Enable the specified interrupts
804  * @dev:	Reference to structure of type pch_udc_regs
805  * @mask:	Mask to enable interrupts
806  */
807 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
808 					   u32 mask)
809 {
810 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
811 }
812 
813 /**
814  * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
815  * @dev:	Reference to structure of type pch_udc_regs
816  * @mask:	Mask to disable interrupts
817  */
818 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
819 						u32 mask)
820 {
821 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
822 }
823 
824 /**
825  * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
826  * @dev:	Reference to structure of type pch_udc_regs
827  * @mask:	Mask to enable interrupts
828  */
829 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
830 					      u32 mask)
831 {
832 	pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
833 }
834 
835 /**
836  * pch_udc_read_device_interrupts() - Read the device interrupts
837  * @dev:	Reference to structure of type pch_udc_regs
838  * Retern	The device interrupts
839  */
840 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
841 {
842 	return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
843 }
844 
845 /**
846  * pch_udc_write_device_interrupts() - Write device interrupts
847  * @dev:	Reference to structure of type pch_udc_regs
848  * @val:	The value to be written to interrupt register
849  */
850 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
851 						     u32 val)
852 {
853 	pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
854 }
855 
856 /**
857  * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
858  * @dev:	Reference to structure of type pch_udc_regs
859  * Retern	The endpoint interrupt
860  */
861 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
862 {
863 	return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
864 }
865 
866 /**
867  * pch_udc_write_ep_interrupts() - Clear endpoint interupts
868  * @dev:	Reference to structure of type pch_udc_regs
869  * @val:	The value to be written to interrupt register
870  */
871 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
872 					     u32 val)
873 {
874 	pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
875 }
876 
877 /**
878  * pch_udc_read_device_status() - Read the device status
879  * @dev:	Reference to structure of type pch_udc_regs
880  * Retern	The device status
881  */
882 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
883 {
884 	return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
885 }
886 
887 /**
888  * pch_udc_read_ep_control() - Read the endpoint control
889  * @ep:		Reference to structure of type pch_udc_ep_regs
890  * Retern	The endpoint control register value
891  */
892 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
893 {
894 	return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
895 }
896 
897 /**
898  * pch_udc_clear_ep_control() - Clear the endpoint control register
899  * @ep:		Reference to structure of type pch_udc_ep_regs
900  * Retern	The endpoint control register value
901  */
902 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
903 {
904 	return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
905 }
906 
907 /**
908  * pch_udc_read_ep_status() - Read the endpoint status
909  * @ep:		Reference to structure of type pch_udc_ep_regs
910  * Retern	The endpoint status
911  */
912 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
913 {
914 	return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
915 }
916 
917 /**
918  * pch_udc_clear_ep_status() - Clear the endpoint status
919  * @ep:		Reference to structure of type pch_udc_ep_regs
920  * @stat:	Endpoint status
921  */
922 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
923 					 u32 stat)
924 {
925 	return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
926 }
927 
928 /**
929  * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
930  *				of the endpoint control register
931  * @ep:		Reference to structure of type pch_udc_ep_regs
932  */
933 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
934 {
935 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
936 }
937 
938 /**
939  * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
940  *				of the endpoint control register
941  * @ep:		reference to structure of type pch_udc_ep_regs
942  */
943 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
944 {
945 	unsigned int loopcnt = 0;
946 	struct pch_udc_dev *dev = ep->dev;
947 
948 	if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
949 		return;
950 	if (!ep->in) {
951 		loopcnt = 10000;
952 		while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
953 			--loopcnt)
954 			udelay(5);
955 		if (!loopcnt)
956 			dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
957 				__func__);
958 	}
959 	loopcnt = 10000;
960 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
961 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
962 		udelay(5);
963 	}
964 	if (!loopcnt)
965 		dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
966 			__func__, ep->num, (ep->in ? "in" : "out"));
967 }
968 
969 /**
970  * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
971  * @ep:	reference to structure of type pch_udc_ep_regs
972  * @dir:	direction of endpoint
973  *		  0:  endpoint is OUT
974  *		  !0: endpoint is IN
975  */
976 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
977 {
978 	if (dir) {	/* IN ep */
979 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
980 		return;
981 	}
982 }
983 
984 /**
985  * pch_udc_ep_enable() - This api enables endpoint
986  * @regs:	Reference to structure pch_udc_ep_regs
987  * @desc:	endpoint descriptor
988  */
989 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
990 			       struct pch_udc_cfg_data *cfg,
991 			       const struct usb_endpoint_descriptor *desc)
992 {
993 	u32 val = 0;
994 	u32 buff_size = 0;
995 
996 	pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
997 	if (ep->in)
998 		buff_size = UDC_EPIN_BUFF_SIZE;
999 	else
1000 		buff_size = UDC_EPOUT_BUFF_SIZE;
1001 	pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
1002 	pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
1003 	pch_udc_ep_set_nak(ep);
1004 	pch_udc_ep_fifo_flush(ep, ep->in);
1005 	/* Configure the endpoint */
1006 	val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
1007 	      ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
1008 		UDC_CSR_NE_TYPE_SHIFT) |
1009 	      (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
1010 	      (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
1011 	      (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
1012 	      usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1013 
1014 	if (ep->in)
1015 		pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1016 	else
1017 		pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1018 }
1019 
1020 /**
1021  * pch_udc_ep_disable() - This api disables endpoint
1022  * @regs:	Reference to structure pch_udc_ep_regs
1023  */
1024 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1025 {
1026 	if (ep->in) {
1027 		/* flush the fifo */
1028 		pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1029 		/* set NAK */
1030 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1031 		pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1032 	} else {
1033 		/* set NAK */
1034 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1035 	}
1036 	/* reset desc pointer */
1037 	pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1038 }
1039 
1040 /**
1041  * pch_udc_wait_ep_stall() - Wait EP stall.
1042  * @dev:	Reference to pch_udc_dev structure
1043  */
1044 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1045 {
1046 	unsigned int count = 10000;
1047 
1048 	/* Wait till idle */
1049 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1050 		udelay(5);
1051 	if (!count)
1052 		dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1053 }
1054 
1055 /**
1056  * pch_udc_init() - This API initializes usb device controller
1057  * @dev:	Rreference to pch_udc_regs structure
1058  */
1059 static void pch_udc_init(struct pch_udc_dev *dev)
1060 {
1061 	if (NULL == dev) {
1062 		pr_err("%s: Invalid address\n", __func__);
1063 		return;
1064 	}
1065 	/* Soft Reset and Reset PHY */
1066 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1067 	pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1068 	mdelay(1);
1069 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1070 	pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1071 	mdelay(1);
1072 	/* mask and clear all device interrupts */
1073 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1074 	pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1075 
1076 	/* mask and clear all ep interrupts */
1077 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1078 	pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1079 
1080 	/* enable dynamic CSR programmingi, self powered and device speed */
1081 	if (speed_fs)
1082 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1083 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1084 	else /* defaul high speed */
1085 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1086 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1087 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1088 			(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1089 			(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1090 			UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1091 			UDC_DEVCTL_THE);
1092 }
1093 
1094 /**
1095  * pch_udc_exit() - This API exit usb device controller
1096  * @dev:	Reference to pch_udc_regs structure
1097  */
1098 static void pch_udc_exit(struct pch_udc_dev *dev)
1099 {
1100 	/* mask all device interrupts */
1101 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1102 	/* mask all ep interrupts */
1103 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1104 	/* put device in disconnected state */
1105 	pch_udc_set_disconnect(dev);
1106 }
1107 
1108 /**
1109  * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1110  * @gadget:	Reference to the gadget driver
1111  *
1112  * Return codes:
1113  *	0:		Success
1114  *	-EINVAL:	If the gadget passed is NULL
1115  */
1116 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1117 {
1118 	struct pch_udc_dev	*dev;
1119 
1120 	if (!gadget)
1121 		return -EINVAL;
1122 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1123 	return pch_udc_get_frame(dev);
1124 }
1125 
1126 /**
1127  * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1128  * @gadget:	Reference to the gadget driver
1129  *
1130  * Return codes:
1131  *	0:		Success
1132  *	-EINVAL:	If the gadget passed is NULL
1133  */
1134 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1135 {
1136 	struct pch_udc_dev	*dev;
1137 	unsigned long		flags;
1138 
1139 	if (!gadget)
1140 		return -EINVAL;
1141 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1142 	spin_lock_irqsave(&dev->lock, flags);
1143 	pch_udc_rmt_wakeup(dev);
1144 	spin_unlock_irqrestore(&dev->lock, flags);
1145 	return 0;
1146 }
1147 
1148 /**
1149  * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1150  *				is self powered or not
1151  * @gadget:	Reference to the gadget driver
1152  * @value:	Specifies self powered or not
1153  *
1154  * Return codes:
1155  *	0:		Success
1156  *	-EINVAL:	If the gadget passed is NULL
1157  */
1158 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1159 {
1160 	struct pch_udc_dev	*dev;
1161 
1162 	if (!gadget)
1163 		return -EINVAL;
1164 	gadget->is_selfpowered = (value != 0);
1165 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1166 	if (value)
1167 		pch_udc_set_selfpowered(dev);
1168 	else
1169 		pch_udc_clear_selfpowered(dev);
1170 	return 0;
1171 }
1172 
1173 /**
1174  * pch_udc_pcd_pullup() - This API is invoked to make the device
1175  *				visible/invisible to the host
1176  * @gadget:	Reference to the gadget driver
1177  * @is_on:	Specifies whether the pull up is made active or inactive
1178  *
1179  * Return codes:
1180  *	0:		Success
1181  *	-EINVAL:	If the gadget passed is NULL
1182  */
1183 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1184 {
1185 	struct pch_udc_dev	*dev;
1186 
1187 	if (!gadget)
1188 		return -EINVAL;
1189 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1190 	if (is_on) {
1191 		pch_udc_reconnect(dev);
1192 	} else {
1193 		if (dev->driver && dev->driver->disconnect) {
1194 			spin_lock(&dev->lock);
1195 			dev->driver->disconnect(&dev->gadget);
1196 			spin_unlock(&dev->lock);
1197 		}
1198 		pch_udc_set_disconnect(dev);
1199 	}
1200 
1201 	return 0;
1202 }
1203 
1204 /**
1205  * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1206  *				transceiver (or GPIO) that
1207  *				detects a VBUS power session starting/ending
1208  * @gadget:	Reference to the gadget driver
1209  * @is_active:	specifies whether the session is starting or ending
1210  *
1211  * Return codes:
1212  *	0:		Success
1213  *	-EINVAL:	If the gadget passed is NULL
1214  */
1215 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1216 {
1217 	struct pch_udc_dev	*dev;
1218 
1219 	if (!gadget)
1220 		return -EINVAL;
1221 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1222 	pch_udc_vbus_session(dev, is_active);
1223 	return 0;
1224 }
1225 
1226 /**
1227  * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1228  *				SET_CONFIGURATION calls to
1229  *				specify how much power the device can consume
1230  * @gadget:	Reference to the gadget driver
1231  * @mA:		specifies the current limit in 2mA unit
1232  *
1233  * Return codes:
1234  *	-EINVAL:	If the gadget passed is NULL
1235  *	-EOPNOTSUPP:
1236  */
1237 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1238 {
1239 	return -EOPNOTSUPP;
1240 }
1241 
1242 static int pch_udc_start(struct usb_gadget *g,
1243 		struct usb_gadget_driver *driver);
1244 static int pch_udc_stop(struct usb_gadget *g);
1245 
1246 static const struct usb_gadget_ops pch_udc_ops = {
1247 	.get_frame = pch_udc_pcd_get_frame,
1248 	.wakeup = pch_udc_pcd_wakeup,
1249 	.set_selfpowered = pch_udc_pcd_selfpowered,
1250 	.pullup = pch_udc_pcd_pullup,
1251 	.vbus_session = pch_udc_pcd_vbus_session,
1252 	.vbus_draw = pch_udc_pcd_vbus_draw,
1253 	.udc_start = pch_udc_start,
1254 	.udc_stop = pch_udc_stop,
1255 };
1256 
1257 /**
1258  * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1259  * @dev:	Reference to the driver structure
1260  *
1261  * Return value:
1262  *	1: VBUS is high
1263  *	0: VBUS is low
1264  *     -1: It is not enable to detect VBUS using GPIO
1265  */
1266 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1267 {
1268 	int vbus = 0;
1269 
1270 	if (dev->vbus_gpio.port)
1271 		vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1272 	else
1273 		vbus = -1;
1274 
1275 	return vbus;
1276 }
1277 
1278 /**
1279  * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1280  *                             If VBUS is Low, disconnect is processed
1281  * @irq_work:	Structure for WorkQueue
1282  *
1283  */
1284 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1285 {
1286 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1287 		struct pch_vbus_gpio_data, irq_work_fall);
1288 	struct pch_udc_dev *dev =
1289 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1290 	int vbus_saved = -1;
1291 	int vbus;
1292 	int count;
1293 
1294 	if (!dev->vbus_gpio.port)
1295 		return;
1296 
1297 	for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1298 		count++) {
1299 		vbus = pch_vbus_gpio_get_value(dev);
1300 
1301 		if ((vbus_saved == vbus) && (vbus == 0)) {
1302 			dev_dbg(&dev->pdev->dev, "VBUS fell");
1303 			if (dev->driver
1304 				&& dev->driver->disconnect) {
1305 				dev->driver->disconnect(
1306 					&dev->gadget);
1307 			}
1308 			if (dev->vbus_gpio.intr)
1309 				pch_udc_init(dev);
1310 			else
1311 				pch_udc_reconnect(dev);
1312 			return;
1313 		}
1314 		vbus_saved = vbus;
1315 		mdelay(PCH_VBUS_INTERVAL);
1316 	}
1317 }
1318 
1319 /**
1320  * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1321  *                             If VBUS is High, connect is processed
1322  * @irq_work:	Structure for WorkQueue
1323  *
1324  */
1325 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1326 {
1327 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1328 		struct pch_vbus_gpio_data, irq_work_rise);
1329 	struct pch_udc_dev *dev =
1330 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1331 	int vbus;
1332 
1333 	if (!dev->vbus_gpio.port)
1334 		return;
1335 
1336 	mdelay(PCH_VBUS_INTERVAL);
1337 	vbus = pch_vbus_gpio_get_value(dev);
1338 
1339 	if (vbus == 1) {
1340 		dev_dbg(&dev->pdev->dev, "VBUS rose");
1341 		pch_udc_reconnect(dev);
1342 		return;
1343 	}
1344 }
1345 
1346 /**
1347  * pch_vbus_gpio_irq() - IRQ handler for GPIO intrerrupt for changing VBUS
1348  * @irq:	Interrupt request number
1349  * @dev:	Reference to the device structure
1350  *
1351  * Return codes:
1352  *	0: Success
1353  *	-EINVAL: GPIO port is invalid or can't be initialized.
1354  */
1355 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1356 {
1357 	struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1358 
1359 	if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1360 		return IRQ_NONE;
1361 
1362 	if (pch_vbus_gpio_get_value(dev))
1363 		schedule_work(&dev->vbus_gpio.irq_work_rise);
1364 	else
1365 		schedule_work(&dev->vbus_gpio.irq_work_fall);
1366 
1367 	return IRQ_HANDLED;
1368 }
1369 
1370 /**
1371  * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1372  * @dev:	Reference to the driver structure
1373  * @vbus_gpio	Number of GPIO port to detect gpio
1374  *
1375  * Return codes:
1376  *	0: Success
1377  *	-EINVAL: GPIO port is invalid or can't be initialized.
1378  */
1379 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1380 {
1381 	int err;
1382 	int irq_num = 0;
1383 
1384 	dev->vbus_gpio.port = 0;
1385 	dev->vbus_gpio.intr = 0;
1386 
1387 	if (vbus_gpio_port <= -1)
1388 		return -EINVAL;
1389 
1390 	err = gpio_is_valid(vbus_gpio_port);
1391 	if (!err) {
1392 		pr_err("%s: gpio port %d is invalid\n",
1393 			__func__, vbus_gpio_port);
1394 		return -EINVAL;
1395 	}
1396 
1397 	err = gpio_request(vbus_gpio_port, "pch_vbus");
1398 	if (err) {
1399 		pr_err("%s: can't request gpio port %d, err: %d\n",
1400 			__func__, vbus_gpio_port, err);
1401 		return -EINVAL;
1402 	}
1403 
1404 	dev->vbus_gpio.port = vbus_gpio_port;
1405 	gpio_direction_input(vbus_gpio_port);
1406 	INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1407 
1408 	irq_num = gpio_to_irq(vbus_gpio_port);
1409 	if (irq_num > 0) {
1410 		irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1411 		err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1412 			"vbus_detect", dev);
1413 		if (!err) {
1414 			dev->vbus_gpio.intr = irq_num;
1415 			INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1416 				pch_vbus_gpio_work_rise);
1417 		} else {
1418 			pr_err("%s: can't request irq %d, err: %d\n",
1419 				__func__, irq_num, err);
1420 		}
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 /**
1427  * pch_vbus_gpio_free() - This API frees resources of GPIO port
1428  * @dev:	Reference to the driver structure
1429  */
1430 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1431 {
1432 	if (dev->vbus_gpio.intr)
1433 		free_irq(dev->vbus_gpio.intr, dev);
1434 
1435 	if (dev->vbus_gpio.port)
1436 		gpio_free(dev->vbus_gpio.port);
1437 }
1438 
1439 /**
1440  * complete_req() - This API is invoked from the driver when processing
1441  *			of a request is complete
1442  * @ep:		Reference to the endpoint structure
1443  * @req:	Reference to the request structure
1444  * @status:	Indicates the success/failure of completion
1445  */
1446 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1447 								 int status)
1448 	__releases(&dev->lock)
1449 	__acquires(&dev->lock)
1450 {
1451 	struct pch_udc_dev	*dev;
1452 	unsigned halted = ep->halted;
1453 
1454 	list_del_init(&req->queue);
1455 
1456 	/* set new status if pending */
1457 	if (req->req.status == -EINPROGRESS)
1458 		req->req.status = status;
1459 	else
1460 		status = req->req.status;
1461 
1462 	dev = ep->dev;
1463 	if (req->dma_mapped) {
1464 		if (req->dma == DMA_ADDR_INVALID) {
1465 			if (ep->in)
1466 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1467 						 req->req.length,
1468 						 DMA_TO_DEVICE);
1469 			else
1470 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1471 						 req->req.length,
1472 						 DMA_FROM_DEVICE);
1473 			req->req.dma = DMA_ADDR_INVALID;
1474 		} else {
1475 			if (ep->in)
1476 				dma_unmap_single(&dev->pdev->dev, req->dma,
1477 						 req->req.length,
1478 						 DMA_TO_DEVICE);
1479 			else {
1480 				dma_unmap_single(&dev->pdev->dev, req->dma,
1481 						 req->req.length,
1482 						 DMA_FROM_DEVICE);
1483 				memcpy(req->req.buf, req->buf, req->req.length);
1484 			}
1485 			kfree(req->buf);
1486 			req->dma = DMA_ADDR_INVALID;
1487 		}
1488 		req->dma_mapped = 0;
1489 	}
1490 	ep->halted = 1;
1491 	spin_lock(&dev->lock);
1492 	if (!ep->in)
1493 		pch_udc_ep_clear_rrdy(ep);
1494 	usb_gadget_giveback_request(&ep->ep, &req->req);
1495 	spin_unlock(&dev->lock);
1496 	ep->halted = halted;
1497 }
1498 
1499 /**
1500  * empty_req_queue() - This API empties the request queue of an endpoint
1501  * @ep:		Reference to the endpoint structure
1502  */
1503 static void empty_req_queue(struct pch_udc_ep *ep)
1504 {
1505 	struct pch_udc_request	*req;
1506 
1507 	ep->halted = 1;
1508 	while (!list_empty(&ep->queue)) {
1509 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1510 		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
1511 	}
1512 }
1513 
1514 /**
1515  * pch_udc_free_dma_chain() - This function frees the DMA chain created
1516  *				for the request
1517  * @dev		Reference to the driver structure
1518  * @req		Reference to the request to be freed
1519  *
1520  * Return codes:
1521  *	0: Success
1522  */
1523 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1524 				   struct pch_udc_request *req)
1525 {
1526 	struct pch_udc_data_dma_desc *td = req->td_data;
1527 	unsigned i = req->chain_len;
1528 
1529 	dma_addr_t addr2;
1530 	dma_addr_t addr = (dma_addr_t)td->next;
1531 	td->next = 0x00;
1532 	for (; i > 1; --i) {
1533 		/* do not free first desc., will be done by free for request */
1534 		td = phys_to_virt(addr);
1535 		addr2 = (dma_addr_t)td->next;
1536 		pci_pool_free(dev->data_requests, td, addr);
1537 		td->next = 0x00;
1538 		addr = addr2;
1539 	}
1540 	req->chain_len = 1;
1541 }
1542 
1543 /**
1544  * pch_udc_create_dma_chain() - This function creates or reinitializes
1545  *				a DMA chain
1546  * @ep:		Reference to the endpoint structure
1547  * @req:	Reference to the request
1548  * @buf_len:	The buffer length
1549  * @gfp_flags:	Flags to be used while mapping the data buffer
1550  *
1551  * Return codes:
1552  *	0:		success,
1553  *	-ENOMEM:	pci_pool_alloc invocation fails
1554  */
1555 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1556 				    struct pch_udc_request *req,
1557 				    unsigned long buf_len,
1558 				    gfp_t gfp_flags)
1559 {
1560 	struct pch_udc_data_dma_desc *td = req->td_data, *last;
1561 	unsigned long bytes = req->req.length, i = 0;
1562 	dma_addr_t dma_addr;
1563 	unsigned len = 1;
1564 
1565 	if (req->chain_len > 1)
1566 		pch_udc_free_dma_chain(ep->dev, req);
1567 
1568 	if (req->dma == DMA_ADDR_INVALID)
1569 		td->dataptr = req->req.dma;
1570 	else
1571 		td->dataptr = req->dma;
1572 
1573 	td->status = PCH_UDC_BS_HST_BSY;
1574 	for (; ; bytes -= buf_len, ++len) {
1575 		td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1576 		if (bytes <= buf_len)
1577 			break;
1578 		last = td;
1579 		td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1580 				    &dma_addr);
1581 		if (!td)
1582 			goto nomem;
1583 		i += buf_len;
1584 		td->dataptr = req->td_data->dataptr + i;
1585 		last->next = dma_addr;
1586 	}
1587 
1588 	req->td_data_last = td;
1589 	td->status |= PCH_UDC_DMA_LAST;
1590 	td->next = req->td_data_phys;
1591 	req->chain_len = len;
1592 	return 0;
1593 
1594 nomem:
1595 	if (len > 1) {
1596 		req->chain_len = len;
1597 		pch_udc_free_dma_chain(ep->dev, req);
1598 	}
1599 	req->chain_len = 1;
1600 	return -ENOMEM;
1601 }
1602 
1603 /**
1604  * prepare_dma() - This function creates and initializes the DMA chain
1605  *			for the request
1606  * @ep:		Reference to the endpoint structure
1607  * @req:	Reference to the request
1608  * @gfp:	Flag to be used while mapping the data buffer
1609  *
1610  * Return codes:
1611  *	0:		Success
1612  *	Other 0:	linux error number on failure
1613  */
1614 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1615 			  gfp_t gfp)
1616 {
1617 	int	retval;
1618 
1619 	/* Allocate and create a DMA chain */
1620 	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1621 	if (retval) {
1622 		pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1623 		return retval;
1624 	}
1625 	if (ep->in)
1626 		req->td_data->status = (req->td_data->status &
1627 				~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1628 	return 0;
1629 }
1630 
1631 /**
1632  * process_zlp() - This function process zero length packets
1633  *			from the gadget driver
1634  * @ep:		Reference to the endpoint structure
1635  * @req:	Reference to the request
1636  */
1637 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1638 {
1639 	struct pch_udc_dev	*dev = ep->dev;
1640 
1641 	/* IN zlp's are handled by hardware */
1642 	complete_req(ep, req, 0);
1643 
1644 	/* if set_config or set_intf is waiting for ack by zlp
1645 	 * then set CSR_DONE
1646 	 */
1647 	if (dev->set_cfg_not_acked) {
1648 		pch_udc_set_csr_done(dev);
1649 		dev->set_cfg_not_acked = 0;
1650 	}
1651 	/* setup command is ACK'ed now by zlp */
1652 	if (!dev->stall && dev->waiting_zlp_ack) {
1653 		pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1654 		dev->waiting_zlp_ack = 0;
1655 	}
1656 }
1657 
1658 /**
1659  * pch_udc_start_rxrequest() - This function starts the receive requirement.
1660  * @ep:		Reference to the endpoint structure
1661  * @req:	Reference to the request structure
1662  */
1663 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1664 					 struct pch_udc_request *req)
1665 {
1666 	struct pch_udc_data_dma_desc *td_data;
1667 
1668 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1669 	td_data = req->td_data;
1670 	/* Set the status bits for all descriptors */
1671 	while (1) {
1672 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1673 				    PCH_UDC_BS_HST_RDY;
1674 		if ((td_data->status & PCH_UDC_DMA_LAST) ==  PCH_UDC_DMA_LAST)
1675 			break;
1676 		td_data = phys_to_virt(td_data->next);
1677 	}
1678 	/* Write the descriptor pointer */
1679 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1680 	req->dma_going = 1;
1681 	pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1682 	pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1683 	pch_udc_ep_clear_nak(ep);
1684 	pch_udc_ep_set_rrdy(ep);
1685 }
1686 
1687 /**
1688  * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1689  *				from gadget driver
1690  * @usbep:	Reference to the USB endpoint structure
1691  * @desc:	Reference to the USB endpoint descriptor structure
1692  *
1693  * Return codes:
1694  *	0:		Success
1695  *	-EINVAL:
1696  *	-ESHUTDOWN:
1697  */
1698 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1699 				    const struct usb_endpoint_descriptor *desc)
1700 {
1701 	struct pch_udc_ep	*ep;
1702 	struct pch_udc_dev	*dev;
1703 	unsigned long		iflags;
1704 
1705 	if (!usbep || (usbep->name == ep0_string) || !desc ||
1706 	    (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1707 		return -EINVAL;
1708 
1709 	ep = container_of(usbep, struct pch_udc_ep, ep);
1710 	dev = ep->dev;
1711 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1712 		return -ESHUTDOWN;
1713 	spin_lock_irqsave(&dev->lock, iflags);
1714 	ep->ep.desc = desc;
1715 	ep->halted = 0;
1716 	pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1717 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1718 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1719 	spin_unlock_irqrestore(&dev->lock, iflags);
1720 	return 0;
1721 }
1722 
1723 /**
1724  * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1725  *				from gadget driver
1726  * @usbep	Reference to the USB endpoint structure
1727  *
1728  * Return codes:
1729  *	0:		Success
1730  *	-EINVAL:
1731  */
1732 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1733 {
1734 	struct pch_udc_ep	*ep;
1735 	struct pch_udc_dev	*dev;
1736 	unsigned long	iflags;
1737 
1738 	if (!usbep)
1739 		return -EINVAL;
1740 
1741 	ep = container_of(usbep, struct pch_udc_ep, ep);
1742 	dev = ep->dev;
1743 	if ((usbep->name == ep0_string) || !ep->ep.desc)
1744 		return -EINVAL;
1745 
1746 	spin_lock_irqsave(&ep->dev->lock, iflags);
1747 	empty_req_queue(ep);
1748 	ep->halted = 1;
1749 	pch_udc_ep_disable(ep);
1750 	pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1751 	ep->ep.desc = NULL;
1752 	INIT_LIST_HEAD(&ep->queue);
1753 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1754 	return 0;
1755 }
1756 
1757 /**
1758  * pch_udc_alloc_request() - This function allocates request structure.
1759  *				It is called by gadget driver
1760  * @usbep:	Reference to the USB endpoint structure
1761  * @gfp:	Flag to be used while allocating memory
1762  *
1763  * Return codes:
1764  *	NULL:			Failure
1765  *	Allocated address:	Success
1766  */
1767 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1768 						  gfp_t gfp)
1769 {
1770 	struct pch_udc_request		*req;
1771 	struct pch_udc_ep		*ep;
1772 	struct pch_udc_data_dma_desc	*dma_desc;
1773 	struct pch_udc_dev		*dev;
1774 
1775 	if (!usbep)
1776 		return NULL;
1777 	ep = container_of(usbep, struct pch_udc_ep, ep);
1778 	dev = ep->dev;
1779 	req = kzalloc(sizeof *req, gfp);
1780 	if (!req)
1781 		return NULL;
1782 	req->req.dma = DMA_ADDR_INVALID;
1783 	req->dma = DMA_ADDR_INVALID;
1784 	INIT_LIST_HEAD(&req->queue);
1785 	if (!ep->dev->dma_addr)
1786 		return &req->req;
1787 	/* ep0 in requests are allocated from data pool here */
1788 	dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1789 				  &req->td_data_phys);
1790 	if (NULL == dma_desc) {
1791 		kfree(req);
1792 		return NULL;
1793 	}
1794 	/* prevent from using desc. - set HOST BUSY */
1795 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
1796 	dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1797 	req->td_data = dma_desc;
1798 	req->td_data_last = dma_desc;
1799 	req->chain_len = 1;
1800 	return &req->req;
1801 }
1802 
1803 /**
1804  * pch_udc_free_request() - This function frees request structure.
1805  *				It is called by gadget driver
1806  * @usbep:	Reference to the USB endpoint structure
1807  * @usbreq:	Reference to the USB request
1808  */
1809 static void pch_udc_free_request(struct usb_ep *usbep,
1810 				  struct usb_request *usbreq)
1811 {
1812 	struct pch_udc_ep	*ep;
1813 	struct pch_udc_request	*req;
1814 	struct pch_udc_dev	*dev;
1815 
1816 	if (!usbep || !usbreq)
1817 		return;
1818 	ep = container_of(usbep, struct pch_udc_ep, ep);
1819 	req = container_of(usbreq, struct pch_udc_request, req);
1820 	dev = ep->dev;
1821 	if (!list_empty(&req->queue))
1822 		dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1823 			__func__, usbep->name, req);
1824 	if (req->td_data != NULL) {
1825 		if (req->chain_len > 1)
1826 			pch_udc_free_dma_chain(ep->dev, req);
1827 		pci_pool_free(ep->dev->data_requests, req->td_data,
1828 			      req->td_data_phys);
1829 	}
1830 	kfree(req);
1831 }
1832 
1833 /**
1834  * pch_udc_pcd_queue() - This function queues a request packet. It is called
1835  *			by gadget driver
1836  * @usbep:	Reference to the USB endpoint structure
1837  * @usbreq:	Reference to the USB request
1838  * @gfp:	Flag to be used while mapping the data buffer
1839  *
1840  * Return codes:
1841  *	0:			Success
1842  *	linux error number:	Failure
1843  */
1844 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1845 								 gfp_t gfp)
1846 {
1847 	int retval = 0;
1848 	struct pch_udc_ep	*ep;
1849 	struct pch_udc_dev	*dev;
1850 	struct pch_udc_request	*req;
1851 	unsigned long	iflags;
1852 
1853 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1854 		return -EINVAL;
1855 	ep = container_of(usbep, struct pch_udc_ep, ep);
1856 	dev = ep->dev;
1857 	if (!ep->ep.desc && ep->num)
1858 		return -EINVAL;
1859 	req = container_of(usbreq, struct pch_udc_request, req);
1860 	if (!list_empty(&req->queue))
1861 		return -EINVAL;
1862 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1863 		return -ESHUTDOWN;
1864 	spin_lock_irqsave(&dev->lock, iflags);
1865 	/* map the buffer for dma */
1866 	if (usbreq->length &&
1867 	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1868 		if (!((unsigned long)(usbreq->buf) & 0x03)) {
1869 			if (ep->in)
1870 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1871 							     usbreq->buf,
1872 							     usbreq->length,
1873 							     DMA_TO_DEVICE);
1874 			else
1875 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1876 							     usbreq->buf,
1877 							     usbreq->length,
1878 							     DMA_FROM_DEVICE);
1879 		} else {
1880 			req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1881 			if (!req->buf) {
1882 				retval = -ENOMEM;
1883 				goto probe_end;
1884 			}
1885 			if (ep->in) {
1886 				memcpy(req->buf, usbreq->buf, usbreq->length);
1887 				req->dma = dma_map_single(&dev->pdev->dev,
1888 							  req->buf,
1889 							  usbreq->length,
1890 							  DMA_TO_DEVICE);
1891 			} else
1892 				req->dma = dma_map_single(&dev->pdev->dev,
1893 							  req->buf,
1894 							  usbreq->length,
1895 							  DMA_FROM_DEVICE);
1896 		}
1897 		req->dma_mapped = 1;
1898 	}
1899 	if (usbreq->length > 0) {
1900 		retval = prepare_dma(ep, req, GFP_ATOMIC);
1901 		if (retval)
1902 			goto probe_end;
1903 	}
1904 	usbreq->actual = 0;
1905 	usbreq->status = -EINPROGRESS;
1906 	req->dma_done = 0;
1907 	if (list_empty(&ep->queue) && !ep->halted) {
1908 		/* no pending transfer, so start this req */
1909 		if (!usbreq->length) {
1910 			process_zlp(ep, req);
1911 			retval = 0;
1912 			goto probe_end;
1913 		}
1914 		if (!ep->in) {
1915 			pch_udc_start_rxrequest(ep, req);
1916 		} else {
1917 			/*
1918 			* For IN trfr the descriptors will be programmed and
1919 			* P bit will be set when
1920 			* we get an IN token
1921 			*/
1922 			pch_udc_wait_ep_stall(ep);
1923 			pch_udc_ep_clear_nak(ep);
1924 			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1925 		}
1926 	}
1927 	/* Now add this request to the ep's pending requests */
1928 	if (req != NULL)
1929 		list_add_tail(&req->queue, &ep->queue);
1930 
1931 probe_end:
1932 	spin_unlock_irqrestore(&dev->lock, iflags);
1933 	return retval;
1934 }
1935 
1936 /**
1937  * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1938  *				It is called by gadget driver
1939  * @usbep:	Reference to the USB endpoint structure
1940  * @usbreq:	Reference to the USB request
1941  *
1942  * Return codes:
1943  *	0:			Success
1944  *	linux error number:	Failure
1945  */
1946 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1947 				struct usb_request *usbreq)
1948 {
1949 	struct pch_udc_ep	*ep;
1950 	struct pch_udc_request	*req;
1951 	struct pch_udc_dev	*dev;
1952 	unsigned long		flags;
1953 	int ret = -EINVAL;
1954 
1955 	ep = container_of(usbep, struct pch_udc_ep, ep);
1956 	dev = ep->dev;
1957 	if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1958 		return ret;
1959 	req = container_of(usbreq, struct pch_udc_request, req);
1960 	spin_lock_irqsave(&ep->dev->lock, flags);
1961 	/* make sure it's still queued on this endpoint */
1962 	list_for_each_entry(req, &ep->queue, queue) {
1963 		if (&req->req == usbreq) {
1964 			pch_udc_ep_set_nak(ep);
1965 			if (!list_empty(&req->queue))
1966 				complete_req(ep, req, -ECONNRESET);
1967 			ret = 0;
1968 			break;
1969 		}
1970 	}
1971 	spin_unlock_irqrestore(&ep->dev->lock, flags);
1972 	return ret;
1973 }
1974 
1975 /**
1976  * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1977  *			    feature
1978  * @usbep:	Reference to the USB endpoint structure
1979  * @halt:	Specifies whether to set or clear the feature
1980  *
1981  * Return codes:
1982  *	0:			Success
1983  *	linux error number:	Failure
1984  */
1985 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1986 {
1987 	struct pch_udc_ep	*ep;
1988 	struct pch_udc_dev	*dev;
1989 	unsigned long iflags;
1990 	int ret;
1991 
1992 	if (!usbep)
1993 		return -EINVAL;
1994 	ep = container_of(usbep, struct pch_udc_ep, ep);
1995 	dev = ep->dev;
1996 	if (!ep->ep.desc && !ep->num)
1997 		return -EINVAL;
1998 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1999 		return -ESHUTDOWN;
2000 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
2001 	if (list_empty(&ep->queue)) {
2002 		if (halt) {
2003 			if (ep->num == PCH_UDC_EP0)
2004 				ep->dev->stall = 1;
2005 			pch_udc_ep_set_stall(ep);
2006 			pch_udc_enable_ep_interrupts(ep->dev,
2007 						     PCH_UDC_EPINT(ep->in,
2008 								   ep->num));
2009 		} else {
2010 			pch_udc_ep_clear_stall(ep);
2011 		}
2012 		ret = 0;
2013 	} else {
2014 		ret = -EAGAIN;
2015 	}
2016 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2017 	return ret;
2018 }
2019 
2020 /**
2021  * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
2022  *				halt feature
2023  * @usbep:	Reference to the USB endpoint structure
2024  * @halt:	Specifies whether to set or clear the feature
2025  *
2026  * Return codes:
2027  *	0:			Success
2028  *	linux error number:	Failure
2029  */
2030 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2031 {
2032 	struct pch_udc_ep	*ep;
2033 	struct pch_udc_dev	*dev;
2034 	unsigned long iflags;
2035 	int ret;
2036 
2037 	if (!usbep)
2038 		return -EINVAL;
2039 	ep = container_of(usbep, struct pch_udc_ep, ep);
2040 	dev = ep->dev;
2041 	if (!ep->ep.desc && !ep->num)
2042 		return -EINVAL;
2043 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2044 		return -ESHUTDOWN;
2045 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
2046 	if (!list_empty(&ep->queue)) {
2047 		ret = -EAGAIN;
2048 	} else {
2049 		if (ep->num == PCH_UDC_EP0)
2050 			ep->dev->stall = 1;
2051 		pch_udc_ep_set_stall(ep);
2052 		pch_udc_enable_ep_interrupts(ep->dev,
2053 					     PCH_UDC_EPINT(ep->in, ep->num));
2054 		ep->dev->prot_stall = 1;
2055 		ret = 0;
2056 	}
2057 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2058 	return ret;
2059 }
2060 
2061 /**
2062  * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2063  * @usbep:	Reference to the USB endpoint structure
2064  */
2065 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2066 {
2067 	struct pch_udc_ep  *ep;
2068 
2069 	if (!usbep)
2070 		return;
2071 
2072 	ep = container_of(usbep, struct pch_udc_ep, ep);
2073 	if (ep->ep.desc || !ep->num)
2074 		pch_udc_ep_fifo_flush(ep, ep->in);
2075 }
2076 
2077 static const struct usb_ep_ops pch_udc_ep_ops = {
2078 	.enable		= pch_udc_pcd_ep_enable,
2079 	.disable	= pch_udc_pcd_ep_disable,
2080 	.alloc_request	= pch_udc_alloc_request,
2081 	.free_request	= pch_udc_free_request,
2082 	.queue		= pch_udc_pcd_queue,
2083 	.dequeue	= pch_udc_pcd_dequeue,
2084 	.set_halt	= pch_udc_pcd_set_halt,
2085 	.set_wedge	= pch_udc_pcd_set_wedge,
2086 	.fifo_status	= NULL,
2087 	.fifo_flush	= pch_udc_pcd_fifo_flush,
2088 };
2089 
2090 /**
2091  * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2092  * @td_stp:	Reference to the SETP buffer structure
2093  */
2094 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2095 {
2096 	static u32	pky_marker;
2097 
2098 	if (!td_stp)
2099 		return;
2100 	td_stp->reserved = ++pky_marker;
2101 	memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2102 	td_stp->status = PCH_UDC_BS_HST_RDY;
2103 }
2104 
2105 /**
2106  * pch_udc_start_next_txrequest() - This function starts
2107  *					the next transmission requirement
2108  * @ep:	Reference to the endpoint structure
2109  */
2110 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2111 {
2112 	struct pch_udc_request *req;
2113 	struct pch_udc_data_dma_desc *td_data;
2114 
2115 	if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2116 		return;
2117 
2118 	if (list_empty(&ep->queue))
2119 		return;
2120 
2121 	/* next request */
2122 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2123 	if (req->dma_going)
2124 		return;
2125 	if (!req->td_data)
2126 		return;
2127 	pch_udc_wait_ep_stall(ep);
2128 	req->dma_going = 1;
2129 	pch_udc_ep_set_ddptr(ep, 0);
2130 	td_data = req->td_data;
2131 	while (1) {
2132 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2133 				   PCH_UDC_BS_HST_RDY;
2134 		if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2135 			break;
2136 		td_data = phys_to_virt(td_data->next);
2137 	}
2138 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2139 	pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2140 	pch_udc_ep_set_pd(ep);
2141 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2142 	pch_udc_ep_clear_nak(ep);
2143 }
2144 
2145 /**
2146  * pch_udc_complete_transfer() - This function completes a transfer
2147  * @ep:		Reference to the endpoint structure
2148  */
2149 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2150 {
2151 	struct pch_udc_request *req;
2152 	struct pch_udc_dev *dev = ep->dev;
2153 
2154 	if (list_empty(&ep->queue))
2155 		return;
2156 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2157 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2158 	    PCH_UDC_BS_DMA_DONE)
2159 		return;
2160 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2161 	     PCH_UDC_RTS_SUCC) {
2162 		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2163 			"epstatus=0x%08x\n",
2164 		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
2165 		       (int)(ep->epsts));
2166 		return;
2167 	}
2168 
2169 	req->req.actual = req->req.length;
2170 	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2171 	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2172 	complete_req(ep, req, 0);
2173 	req->dma_going = 0;
2174 	if (!list_empty(&ep->queue)) {
2175 		pch_udc_wait_ep_stall(ep);
2176 		pch_udc_ep_clear_nak(ep);
2177 		pch_udc_enable_ep_interrupts(ep->dev,
2178 					     PCH_UDC_EPINT(ep->in, ep->num));
2179 	} else {
2180 		pch_udc_disable_ep_interrupts(ep->dev,
2181 					      PCH_UDC_EPINT(ep->in, ep->num));
2182 	}
2183 }
2184 
2185 /**
2186  * pch_udc_complete_receiver() - This function completes a receiver
2187  * @ep:		Reference to the endpoint structure
2188  */
2189 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2190 {
2191 	struct pch_udc_request *req;
2192 	struct pch_udc_dev *dev = ep->dev;
2193 	unsigned int count;
2194 	struct pch_udc_data_dma_desc *td;
2195 	dma_addr_t addr;
2196 
2197 	if (list_empty(&ep->queue))
2198 		return;
2199 	/* next request */
2200 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2201 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2202 	pch_udc_ep_set_ddptr(ep, 0);
2203 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2204 	    PCH_UDC_BS_DMA_DONE)
2205 		td = req->td_data_last;
2206 	else
2207 		td = req->td_data;
2208 
2209 	while (1) {
2210 		if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2211 			dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2212 				"epstatus=0x%08x\n",
2213 				(req->td_data->status & PCH_UDC_RXTX_STS),
2214 				(int)(ep->epsts));
2215 			return;
2216 		}
2217 		if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2218 			if (td->status & PCH_UDC_DMA_LAST) {
2219 				count = td->status & PCH_UDC_RXTX_BYTES;
2220 				break;
2221 			}
2222 		if (td == req->td_data_last) {
2223 			dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2224 			return;
2225 		}
2226 		addr = (dma_addr_t)td->next;
2227 		td = phys_to_virt(addr);
2228 	}
2229 	/* on 64k packets the RXBYTES field is zero */
2230 	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2231 		count = UDC_DMA_MAXPACKET;
2232 	req->td_data->status |= PCH_UDC_DMA_LAST;
2233 	td->status |= PCH_UDC_BS_HST_BSY;
2234 
2235 	req->dma_going = 0;
2236 	req->req.actual = count;
2237 	complete_req(ep, req, 0);
2238 	/* If there is a new/failed requests try that now */
2239 	if (!list_empty(&ep->queue)) {
2240 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2241 		pch_udc_start_rxrequest(ep, req);
2242 	}
2243 }
2244 
2245 /**
2246  * pch_udc_svc_data_in() - This function process endpoint interrupts
2247  *				for IN endpoints
2248  * @dev:	Reference to the device structure
2249  * @ep_num:	Endpoint that generated the interrupt
2250  */
2251 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2252 {
2253 	u32	epsts;
2254 	struct pch_udc_ep	*ep;
2255 
2256 	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2257 	epsts = ep->epsts;
2258 	ep->epsts = 0;
2259 
2260 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA  | UDC_EPSTS_HE |
2261 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2262 		       UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2263 		return;
2264 	if ((epsts & UDC_EPSTS_BNA))
2265 		return;
2266 	if (epsts & UDC_EPSTS_HE)
2267 		return;
2268 	if (epsts & UDC_EPSTS_RSS) {
2269 		pch_udc_ep_set_stall(ep);
2270 		pch_udc_enable_ep_interrupts(ep->dev,
2271 					     PCH_UDC_EPINT(ep->in, ep->num));
2272 	}
2273 	if (epsts & UDC_EPSTS_RCS) {
2274 		if (!dev->prot_stall) {
2275 			pch_udc_ep_clear_stall(ep);
2276 		} else {
2277 			pch_udc_ep_set_stall(ep);
2278 			pch_udc_enable_ep_interrupts(ep->dev,
2279 						PCH_UDC_EPINT(ep->in, ep->num));
2280 		}
2281 	}
2282 	if (epsts & UDC_EPSTS_TDC)
2283 		pch_udc_complete_transfer(ep);
2284 	/* On IN interrupt, provide data if we have any */
2285 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2286 	    !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2287 		pch_udc_start_next_txrequest(ep);
2288 }
2289 
2290 /**
2291  * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2292  * @dev:	Reference to the device structure
2293  * @ep_num:	Endpoint that generated the interrupt
2294  */
2295 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2296 {
2297 	u32			epsts;
2298 	struct pch_udc_ep		*ep;
2299 	struct pch_udc_request		*req = NULL;
2300 
2301 	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2302 	epsts = ep->epsts;
2303 	ep->epsts = 0;
2304 
2305 	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2306 		/* next request */
2307 		req = list_entry(ep->queue.next, struct pch_udc_request,
2308 				 queue);
2309 		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2310 		     PCH_UDC_BS_DMA_DONE) {
2311 			if (!req->dma_going)
2312 				pch_udc_start_rxrequest(ep, req);
2313 			return;
2314 		}
2315 	}
2316 	if (epsts & UDC_EPSTS_HE)
2317 		return;
2318 	if (epsts & UDC_EPSTS_RSS) {
2319 		pch_udc_ep_set_stall(ep);
2320 		pch_udc_enable_ep_interrupts(ep->dev,
2321 					     PCH_UDC_EPINT(ep->in, ep->num));
2322 	}
2323 	if (epsts & UDC_EPSTS_RCS) {
2324 		if (!dev->prot_stall) {
2325 			pch_udc_ep_clear_stall(ep);
2326 		} else {
2327 			pch_udc_ep_set_stall(ep);
2328 			pch_udc_enable_ep_interrupts(ep->dev,
2329 						PCH_UDC_EPINT(ep->in, ep->num));
2330 		}
2331 	}
2332 	if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2333 	    UDC_EPSTS_OUT_DATA) {
2334 		if (ep->dev->prot_stall == 1) {
2335 			pch_udc_ep_set_stall(ep);
2336 			pch_udc_enable_ep_interrupts(ep->dev,
2337 						PCH_UDC_EPINT(ep->in, ep->num));
2338 		} else {
2339 			pch_udc_complete_receiver(ep);
2340 		}
2341 	}
2342 	if (list_empty(&ep->queue))
2343 		pch_udc_set_dma(dev, DMA_DIR_RX);
2344 }
2345 
2346 /**
2347  * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2348  * @dev:	Reference to the device structure
2349  */
2350 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2351 {
2352 	u32	epsts;
2353 	struct pch_udc_ep	*ep;
2354 	struct pch_udc_ep	*ep_out;
2355 
2356 	ep = &dev->ep[UDC_EP0IN_IDX];
2357 	ep_out = &dev->ep[UDC_EP0OUT_IDX];
2358 	epsts = ep->epsts;
2359 	ep->epsts = 0;
2360 
2361 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2362 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2363 		       UDC_EPSTS_XFERDONE)))
2364 		return;
2365 	if ((epsts & UDC_EPSTS_BNA))
2366 		return;
2367 	if (epsts & UDC_EPSTS_HE)
2368 		return;
2369 	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2370 		pch_udc_complete_transfer(ep);
2371 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2372 		ep_out->td_data->status = (ep_out->td_data->status &
2373 					~PCH_UDC_BUFF_STS) |
2374 					PCH_UDC_BS_HST_RDY;
2375 		pch_udc_ep_clear_nak(ep_out);
2376 		pch_udc_set_dma(dev, DMA_DIR_RX);
2377 		pch_udc_ep_set_rrdy(ep_out);
2378 	}
2379 	/* On IN interrupt, provide data if we have any */
2380 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2381 	     !(epsts & UDC_EPSTS_TXEMPTY))
2382 		pch_udc_start_next_txrequest(ep);
2383 }
2384 
2385 /**
2386  * pch_udc_svc_control_out() - Routine that handle Control
2387  *					OUT endpoint interrupts
2388  * @dev:	Reference to the device structure
2389  */
2390 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2391 	__releases(&dev->lock)
2392 	__acquires(&dev->lock)
2393 {
2394 	u32	stat;
2395 	int setup_supported;
2396 	struct pch_udc_ep	*ep;
2397 
2398 	ep = &dev->ep[UDC_EP0OUT_IDX];
2399 	stat = ep->epsts;
2400 	ep->epsts = 0;
2401 
2402 	/* If setup data */
2403 	if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2404 	    UDC_EPSTS_OUT_SETUP) {
2405 		dev->stall = 0;
2406 		dev->ep[UDC_EP0IN_IDX].halted = 0;
2407 		dev->ep[UDC_EP0OUT_IDX].halted = 0;
2408 		dev->setup_data = ep->td_stp->request;
2409 		pch_udc_init_setup_buff(ep->td_stp);
2410 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2411 		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2412 				      dev->ep[UDC_EP0IN_IDX].in);
2413 		if ((dev->setup_data.bRequestType & USB_DIR_IN))
2414 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2415 		else /* OUT */
2416 			dev->gadget.ep0 = &ep->ep;
2417 		spin_lock(&dev->lock);
2418 		/* If Mass storage Reset */
2419 		if ((dev->setup_data.bRequestType == 0x21) &&
2420 		    (dev->setup_data.bRequest == 0xFF))
2421 			dev->prot_stall = 0;
2422 		/* call gadget with setup data received */
2423 		setup_supported = dev->driver->setup(&dev->gadget,
2424 						     &dev->setup_data);
2425 		spin_unlock(&dev->lock);
2426 
2427 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
2428 			ep->td_data->status = (ep->td_data->status &
2429 						~PCH_UDC_BUFF_STS) |
2430 						PCH_UDC_BS_HST_RDY;
2431 			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2432 		}
2433 		/* ep0 in returns data on IN phase */
2434 		if (setup_supported >= 0 && setup_supported <
2435 					    UDC_EP0IN_MAX_PKT_SIZE) {
2436 			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2437 			/* Gadget would have queued a request when
2438 			 * we called the setup */
2439 			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2440 				pch_udc_set_dma(dev, DMA_DIR_RX);
2441 				pch_udc_ep_clear_nak(ep);
2442 			}
2443 		} else if (setup_supported < 0) {
2444 			/* if unsupported request, then stall */
2445 			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2446 			pch_udc_enable_ep_interrupts(ep->dev,
2447 						PCH_UDC_EPINT(ep->in, ep->num));
2448 			dev->stall = 0;
2449 			pch_udc_set_dma(dev, DMA_DIR_RX);
2450 		} else {
2451 			dev->waiting_zlp_ack = 1;
2452 		}
2453 	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2454 		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
2455 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2456 		pch_udc_ep_set_ddptr(ep, 0);
2457 		if (!list_empty(&ep->queue)) {
2458 			ep->epsts = stat;
2459 			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2460 		}
2461 		pch_udc_set_dma(dev, DMA_DIR_RX);
2462 	}
2463 	pch_udc_ep_set_rrdy(ep);
2464 }
2465 
2466 
2467 /**
2468  * pch_udc_postsvc_epinters() - This function enables end point interrupts
2469  *				and clears NAK status
2470  * @dev:	Reference to the device structure
2471  * @ep_num:	End point number
2472  */
2473 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2474 {
2475 	struct pch_udc_ep	*ep;
2476 	struct pch_udc_request *req;
2477 
2478 	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2479 	if (!list_empty(&ep->queue)) {
2480 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2481 		pch_udc_enable_ep_interrupts(ep->dev,
2482 					     PCH_UDC_EPINT(ep->in, ep->num));
2483 		pch_udc_ep_clear_nak(ep);
2484 	}
2485 }
2486 
2487 /**
2488  * pch_udc_read_all_epstatus() - This function read all endpoint status
2489  * @dev:	Reference to the device structure
2490  * @ep_intr:	Status of endpoint interrupt
2491  */
2492 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2493 {
2494 	int i;
2495 	struct pch_udc_ep	*ep;
2496 
2497 	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2498 		/* IN */
2499 		if (ep_intr & (0x1 << i)) {
2500 			ep = &dev->ep[UDC_EPIN_IDX(i)];
2501 			ep->epsts = pch_udc_read_ep_status(ep);
2502 			pch_udc_clear_ep_status(ep, ep->epsts);
2503 		}
2504 		/* OUT */
2505 		if (ep_intr & (0x10000 << i)) {
2506 			ep = &dev->ep[UDC_EPOUT_IDX(i)];
2507 			ep->epsts = pch_udc_read_ep_status(ep);
2508 			pch_udc_clear_ep_status(ep, ep->epsts);
2509 		}
2510 	}
2511 }
2512 
2513 /**
2514  * pch_udc_activate_control_ep() - This function enables the control endpoints
2515  *					for traffic after a reset
2516  * @dev:	Reference to the device structure
2517  */
2518 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2519 {
2520 	struct pch_udc_ep	*ep;
2521 	u32 val;
2522 
2523 	/* Setup the IN endpoint */
2524 	ep = &dev->ep[UDC_EP0IN_IDX];
2525 	pch_udc_clear_ep_control(ep);
2526 	pch_udc_ep_fifo_flush(ep, ep->in);
2527 	pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2528 	pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2529 	/* Initialize the IN EP Descriptor */
2530 	ep->td_data      = NULL;
2531 	ep->td_stp       = NULL;
2532 	ep->td_data_phys = 0;
2533 	ep->td_stp_phys  = 0;
2534 
2535 	/* Setup the OUT endpoint */
2536 	ep = &dev->ep[UDC_EP0OUT_IDX];
2537 	pch_udc_clear_ep_control(ep);
2538 	pch_udc_ep_fifo_flush(ep, ep->in);
2539 	pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2540 	pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2541 	val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2542 	pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2543 
2544 	/* Initialize the SETUP buffer */
2545 	pch_udc_init_setup_buff(ep->td_stp);
2546 	/* Write the pointer address of dma descriptor */
2547 	pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2548 	/* Write the pointer address of Setup descriptor */
2549 	pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2550 
2551 	/* Initialize the dma descriptor */
2552 	ep->td_data->status  = PCH_UDC_DMA_LAST;
2553 	ep->td_data->dataptr = dev->dma_addr;
2554 	ep->td_data->next    = ep->td_data_phys;
2555 
2556 	pch_udc_ep_clear_nak(ep);
2557 }
2558 
2559 
2560 /**
2561  * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2562  * @dev:	Reference to driver structure
2563  */
2564 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2565 {
2566 	struct pch_udc_ep	*ep;
2567 	int i;
2568 
2569 	pch_udc_clear_dma(dev, DMA_DIR_TX);
2570 	pch_udc_clear_dma(dev, DMA_DIR_RX);
2571 	/* Mask all endpoint interrupts */
2572 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2573 	/* clear all endpoint interrupts */
2574 	pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2575 
2576 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2577 		ep = &dev->ep[i];
2578 		pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2579 		pch_udc_clear_ep_control(ep);
2580 		pch_udc_ep_set_ddptr(ep, 0);
2581 		pch_udc_write_csr(ep->dev, 0x00, i);
2582 	}
2583 	dev->stall = 0;
2584 	dev->prot_stall = 0;
2585 	dev->waiting_zlp_ack = 0;
2586 	dev->set_cfg_not_acked = 0;
2587 
2588 	/* disable ep to empty req queue. Skip the control EP's */
2589 	for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2590 		ep = &dev->ep[i];
2591 		pch_udc_ep_set_nak(ep);
2592 		pch_udc_ep_fifo_flush(ep, ep->in);
2593 		/* Complete request queue */
2594 		empty_req_queue(ep);
2595 	}
2596 	if (dev->driver) {
2597 		spin_lock(&dev->lock);
2598 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2599 		spin_unlock(&dev->lock);
2600 	}
2601 }
2602 
2603 /**
2604  * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2605  *				done interrupt
2606  * @dev:	Reference to driver structure
2607  */
2608 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2609 {
2610 	u32 dev_stat, dev_speed;
2611 	u32 speed = USB_SPEED_FULL;
2612 
2613 	dev_stat = pch_udc_read_device_status(dev);
2614 	dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2615 						 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2616 	switch (dev_speed) {
2617 	case UDC_DEVSTS_ENUM_SPEED_HIGH:
2618 		speed = USB_SPEED_HIGH;
2619 		break;
2620 	case  UDC_DEVSTS_ENUM_SPEED_FULL:
2621 		speed = USB_SPEED_FULL;
2622 		break;
2623 	case  UDC_DEVSTS_ENUM_SPEED_LOW:
2624 		speed = USB_SPEED_LOW;
2625 		break;
2626 	default:
2627 		BUG();
2628 	}
2629 	dev->gadget.speed = speed;
2630 	pch_udc_activate_control_ep(dev);
2631 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2632 	pch_udc_set_dma(dev, DMA_DIR_TX);
2633 	pch_udc_set_dma(dev, DMA_DIR_RX);
2634 	pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2635 
2636 	/* enable device interrupts */
2637 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2638 					UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2639 					UDC_DEVINT_SI | UDC_DEVINT_SC);
2640 }
2641 
2642 /**
2643  * pch_udc_svc_intf_interrupt() - This function handles a set interface
2644  *				  interrupt
2645  * @dev:	Reference to driver structure
2646  */
2647 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2648 {
2649 	u32 reg, dev_stat = 0;
2650 	int i, ret;
2651 
2652 	dev_stat = pch_udc_read_device_status(dev);
2653 	dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2654 							 UDC_DEVSTS_INTF_SHIFT;
2655 	dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2656 							 UDC_DEVSTS_ALT_SHIFT;
2657 	dev->set_cfg_not_acked = 1;
2658 	/* Construct the usb request for gadget driver and inform it */
2659 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2660 	dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2661 	dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2662 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2663 	dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2664 	/* programm the Endpoint Cfg registers */
2665 	/* Only one end point cfg register */
2666 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2667 	reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2668 	      (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2669 	reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2670 	      (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2671 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2672 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2673 		/* clear stall bits */
2674 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2675 		dev->ep[i].halted = 0;
2676 	}
2677 	dev->stall = 0;
2678 	spin_lock(&dev->lock);
2679 	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2680 	spin_unlock(&dev->lock);
2681 }
2682 
2683 /**
2684  * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2685  *				interrupt
2686  * @dev:	Reference to driver structure
2687  */
2688 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2689 {
2690 	int i, ret;
2691 	u32 reg, dev_stat = 0;
2692 
2693 	dev_stat = pch_udc_read_device_status(dev);
2694 	dev->set_cfg_not_acked = 1;
2695 	dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2696 				UDC_DEVSTS_CFG_SHIFT;
2697 	/* make usb request for gadget driver */
2698 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2699 	dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2700 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2701 	/* program the NE registers */
2702 	/* Only one end point cfg register */
2703 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2704 	reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2705 	      (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2706 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2707 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2708 		/* clear stall bits */
2709 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2710 		dev->ep[i].halted = 0;
2711 	}
2712 	dev->stall = 0;
2713 
2714 	/* call gadget zero with setup data received */
2715 	spin_lock(&dev->lock);
2716 	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2717 	spin_unlock(&dev->lock);
2718 }
2719 
2720 /**
2721  * pch_udc_dev_isr() - This function services device interrupts
2722  *			by invoking appropriate routines.
2723  * @dev:	Reference to the device structure
2724  * @dev_intr:	The Device interrupt status.
2725  */
2726 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2727 {
2728 	int vbus;
2729 
2730 	/* USB Reset Interrupt */
2731 	if (dev_intr & UDC_DEVINT_UR) {
2732 		pch_udc_svc_ur_interrupt(dev);
2733 		dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2734 	}
2735 	/* Enumeration Done Interrupt */
2736 	if (dev_intr & UDC_DEVINT_ENUM) {
2737 		pch_udc_svc_enum_interrupt(dev);
2738 		dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2739 	}
2740 	/* Set Interface Interrupt */
2741 	if (dev_intr & UDC_DEVINT_SI)
2742 		pch_udc_svc_intf_interrupt(dev);
2743 	/* Set Config Interrupt */
2744 	if (dev_intr & UDC_DEVINT_SC)
2745 		pch_udc_svc_cfg_interrupt(dev);
2746 	/* USB Suspend interrupt */
2747 	if (dev_intr & UDC_DEVINT_US) {
2748 		if (dev->driver
2749 			&& dev->driver->suspend) {
2750 			spin_unlock(&dev->lock);
2751 			dev->driver->suspend(&dev->gadget);
2752 			spin_lock(&dev->lock);
2753 		}
2754 
2755 		vbus = pch_vbus_gpio_get_value(dev);
2756 		if ((dev->vbus_session == 0)
2757 			&& (vbus != 1)) {
2758 			if (dev->driver && dev->driver->disconnect) {
2759 				spin_unlock(&dev->lock);
2760 				dev->driver->disconnect(&dev->gadget);
2761 				spin_lock(&dev->lock);
2762 			}
2763 			pch_udc_reconnect(dev);
2764 		} else if ((dev->vbus_session == 0)
2765 			&& (vbus == 1)
2766 			&& !dev->vbus_gpio.intr)
2767 			schedule_work(&dev->vbus_gpio.irq_work_fall);
2768 
2769 		dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2770 	}
2771 	/* Clear the SOF interrupt, if enabled */
2772 	if (dev_intr & UDC_DEVINT_SOF)
2773 		dev_dbg(&dev->pdev->dev, "SOF\n");
2774 	/* ES interrupt, IDLE > 3ms on the USB */
2775 	if (dev_intr & UDC_DEVINT_ES)
2776 		dev_dbg(&dev->pdev->dev, "ES\n");
2777 	/* RWKP interrupt */
2778 	if (dev_intr & UDC_DEVINT_RWKP)
2779 		dev_dbg(&dev->pdev->dev, "RWKP\n");
2780 }
2781 
2782 /**
2783  * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2784  * @irq:	Interrupt request number
2785  * @dev:	Reference to the device structure
2786  */
2787 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2788 {
2789 	struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2790 	u32 dev_intr, ep_intr;
2791 	int i;
2792 
2793 	dev_intr = pch_udc_read_device_interrupts(dev);
2794 	ep_intr = pch_udc_read_ep_interrupts(dev);
2795 
2796 	/* For a hot plug, this find that the controller is hung up. */
2797 	if (dev_intr == ep_intr)
2798 		if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2799 			dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2800 			/* The controller is reset */
2801 			pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2802 			return IRQ_HANDLED;
2803 		}
2804 	if (dev_intr)
2805 		/* Clear device interrupts */
2806 		pch_udc_write_device_interrupts(dev, dev_intr);
2807 	if (ep_intr)
2808 		/* Clear ep interrupts */
2809 		pch_udc_write_ep_interrupts(dev, ep_intr);
2810 	if (!dev_intr && !ep_intr)
2811 		return IRQ_NONE;
2812 	spin_lock(&dev->lock);
2813 	if (dev_intr)
2814 		pch_udc_dev_isr(dev, dev_intr);
2815 	if (ep_intr) {
2816 		pch_udc_read_all_epstatus(dev, ep_intr);
2817 		/* Process Control In interrupts, if present */
2818 		if (ep_intr & UDC_EPINT_IN_EP0) {
2819 			pch_udc_svc_control_in(dev);
2820 			pch_udc_postsvc_epinters(dev, 0);
2821 		}
2822 		/* Process Control Out interrupts, if present */
2823 		if (ep_intr & UDC_EPINT_OUT_EP0)
2824 			pch_udc_svc_control_out(dev);
2825 		/* Process data in end point interrupts */
2826 		for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2827 			if (ep_intr & (1 <<  i)) {
2828 				pch_udc_svc_data_in(dev, i);
2829 				pch_udc_postsvc_epinters(dev, i);
2830 			}
2831 		}
2832 		/* Process data out end point interrupts */
2833 		for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2834 						 PCH_UDC_USED_EP_NUM); i++)
2835 			if (ep_intr & (1 <<  i))
2836 				pch_udc_svc_data_out(dev, i -
2837 							 UDC_EPINT_OUT_SHIFT);
2838 	}
2839 	spin_unlock(&dev->lock);
2840 	return IRQ_HANDLED;
2841 }
2842 
2843 /**
2844  * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2845  * @dev:	Reference to the device structure
2846  */
2847 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2848 {
2849 	/* enable ep0 interrupts */
2850 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2851 						UDC_EPINT_OUT_EP0);
2852 	/* enable device interrupts */
2853 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2854 				       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2855 				       UDC_DEVINT_SI | UDC_DEVINT_SC);
2856 }
2857 
2858 /**
2859  * gadget_release() - Free the gadget driver private data
2860  * @pdev	reference to struct pci_dev
2861  */
2862 static void gadget_release(struct device *pdev)
2863 {
2864 	struct pch_udc_dev *dev = dev_get_drvdata(pdev);
2865 
2866 	kfree(dev);
2867 }
2868 
2869 /**
2870  * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2871  * @dev:	Reference to the driver structure
2872  */
2873 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2874 {
2875 	const char *const ep_string[] = {
2876 		ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2877 		"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2878 		"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2879 		"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2880 		"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2881 		"ep15in", "ep15out",
2882 	};
2883 	int i;
2884 
2885 	dev->gadget.speed = USB_SPEED_UNKNOWN;
2886 	INIT_LIST_HEAD(&dev->gadget.ep_list);
2887 
2888 	/* Initialize the endpoints structures */
2889 	memset(dev->ep, 0, sizeof dev->ep);
2890 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2891 		struct pch_udc_ep *ep = &dev->ep[i];
2892 		ep->dev = dev;
2893 		ep->halted = 1;
2894 		ep->num = i / 2;
2895 		ep->in = ~i & 1;
2896 		ep->ep.name = ep_string[i];
2897 		ep->ep.ops = &pch_udc_ep_ops;
2898 		if (ep->in) {
2899 			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2900 			ep->ep.caps.dir_in = true;
2901 		} else {
2902 			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2903 					  UDC_EP_REG_SHIFT;
2904 			ep->ep.caps.dir_out = true;
2905 		}
2906 		if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2907 			ep->ep.caps.type_control = true;
2908 		} else {
2909 			ep->ep.caps.type_iso = true;
2910 			ep->ep.caps.type_bulk = true;
2911 			ep->ep.caps.type_int = true;
2912 		}
2913 		/* need to set ep->ep.maxpacket and set Default Configuration?*/
2914 		usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2915 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2916 		INIT_LIST_HEAD(&ep->queue);
2917 	}
2918 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2919 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2920 
2921 	/* remove ep0 in and out from the list.  They have own pointer */
2922 	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2923 	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2924 
2925 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2926 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2927 }
2928 
2929 /**
2930  * pch_udc_pcd_init() - This API initializes the driver structure
2931  * @dev:	Reference to the driver structure
2932  *
2933  * Return codes:
2934  *	0: Success
2935  */
2936 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2937 {
2938 	pch_udc_init(dev);
2939 	pch_udc_pcd_reinit(dev);
2940 	pch_vbus_gpio_init(dev, vbus_gpio_port);
2941 	return 0;
2942 }
2943 
2944 /**
2945  * init_dma_pools() - create dma pools during initialization
2946  * @pdev:	reference to struct pci_dev
2947  */
2948 static int init_dma_pools(struct pch_udc_dev *dev)
2949 {
2950 	struct pch_udc_stp_dma_desc	*td_stp;
2951 	struct pch_udc_data_dma_desc	*td_data;
2952 
2953 	/* DMA setup */
2954 	dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2955 		sizeof(struct pch_udc_data_dma_desc), 0, 0);
2956 	if (!dev->data_requests) {
2957 		dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2958 			__func__);
2959 		return -ENOMEM;
2960 	}
2961 
2962 	/* dma desc for setup data */
2963 	dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2964 		sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2965 	if (!dev->stp_requests) {
2966 		dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2967 			__func__);
2968 		return -ENOMEM;
2969 	}
2970 	/* setup */
2971 	td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2972 				&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2973 	if (!td_stp) {
2974 		dev_err(&dev->pdev->dev,
2975 			"%s: can't allocate setup dma descriptor\n", __func__);
2976 		return -ENOMEM;
2977 	}
2978 	dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2979 
2980 	/* data: 0 packets !? */
2981 	td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2982 				&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2983 	if (!td_data) {
2984 		dev_err(&dev->pdev->dev,
2985 			"%s: can't allocate data dma descriptor\n", __func__);
2986 		return -ENOMEM;
2987 	}
2988 	dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2989 	dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2990 	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2991 	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2992 	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2993 
2994 	dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
2995 	if (!dev->ep0out_buf)
2996 		return -ENOMEM;
2997 	dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
2998 				       UDC_EP0OUT_BUFF_SIZE * 4,
2999 				       DMA_FROM_DEVICE);
3000 	return 0;
3001 }
3002 
3003 static int pch_udc_start(struct usb_gadget *g,
3004 		struct usb_gadget_driver *driver)
3005 {
3006 	struct pch_udc_dev	*dev = to_pch_udc(g);
3007 
3008 	driver->driver.bus = NULL;
3009 	dev->driver = driver;
3010 
3011 	/* get ready for ep0 traffic */
3012 	pch_udc_setup_ep0(dev);
3013 
3014 	/* clear SD */
3015 	if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
3016 		pch_udc_clear_disconnect(dev);
3017 
3018 	dev->connected = 1;
3019 	return 0;
3020 }
3021 
3022 static int pch_udc_stop(struct usb_gadget *g)
3023 {
3024 	struct pch_udc_dev	*dev = to_pch_udc(g);
3025 
3026 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3027 
3028 	/* Assures that there are no pending requests with this driver */
3029 	dev->driver = NULL;
3030 	dev->connected = 0;
3031 
3032 	/* set SD */
3033 	pch_udc_set_disconnect(dev);
3034 
3035 	return 0;
3036 }
3037 
3038 static void pch_udc_shutdown(struct pci_dev *pdev)
3039 {
3040 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3041 
3042 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3043 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3044 
3045 	/* disable the pullup so the host will think we're gone */
3046 	pch_udc_set_disconnect(dev);
3047 }
3048 
3049 static void pch_udc_remove(struct pci_dev *pdev)
3050 {
3051 	struct pch_udc_dev	*dev = pci_get_drvdata(pdev);
3052 
3053 	usb_del_gadget_udc(&dev->gadget);
3054 
3055 	/* gadget driver must not be registered */
3056 	if (dev->driver)
3057 		dev_err(&pdev->dev,
3058 			"%s: gadget driver still bound!!!\n", __func__);
3059 	/* dma pool cleanup */
3060 	if (dev->data_requests)
3061 		pci_pool_destroy(dev->data_requests);
3062 
3063 	if (dev->stp_requests) {
3064 		/* cleanup DMA desc's for ep0in */
3065 		if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3066 			pci_pool_free(dev->stp_requests,
3067 				dev->ep[UDC_EP0OUT_IDX].td_stp,
3068 				dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3069 		}
3070 		if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3071 			pci_pool_free(dev->stp_requests,
3072 				dev->ep[UDC_EP0OUT_IDX].td_data,
3073 				dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3074 		}
3075 		pci_pool_destroy(dev->stp_requests);
3076 	}
3077 
3078 	if (dev->dma_addr)
3079 		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3080 				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3081 	kfree(dev->ep0out_buf);
3082 
3083 	pch_vbus_gpio_free(dev);
3084 
3085 	pch_udc_exit(dev);
3086 
3087 	if (dev->irq_registered)
3088 		free_irq(pdev->irq, dev);
3089 	if (dev->base_addr)
3090 		iounmap(dev->base_addr);
3091 	if (dev->mem_region)
3092 		release_mem_region(dev->phys_addr,
3093 				   pci_resource_len(pdev, dev->bar));
3094 	if (dev->active)
3095 		pci_disable_device(pdev);
3096 	kfree(dev);
3097 }
3098 
3099 #ifdef CONFIG_PM
3100 static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3101 {
3102 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3103 
3104 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3105 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3106 
3107 	pci_disable_device(pdev);
3108 	pci_enable_wake(pdev, PCI_D3hot, 0);
3109 
3110 	if (pci_save_state(pdev)) {
3111 		dev_err(&pdev->dev,
3112 			"%s: could not save PCI config state\n", __func__);
3113 		return -ENOMEM;
3114 	}
3115 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
3116 	return 0;
3117 }
3118 
3119 static int pch_udc_resume(struct pci_dev *pdev)
3120 {
3121 	int ret;
3122 
3123 	pci_set_power_state(pdev, PCI_D0);
3124 	pci_restore_state(pdev);
3125 	ret = pci_enable_device(pdev);
3126 	if (ret) {
3127 		dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
3128 		return ret;
3129 	}
3130 	pci_enable_wake(pdev, PCI_D3hot, 0);
3131 	return 0;
3132 }
3133 #else
3134 #define pch_udc_suspend	NULL
3135 #define pch_udc_resume	NULL
3136 #endif /* CONFIG_PM */
3137 
3138 static int pch_udc_probe(struct pci_dev *pdev,
3139 			  const struct pci_device_id *id)
3140 {
3141 	unsigned long		resource;
3142 	unsigned long		len;
3143 	int			retval;
3144 	struct pch_udc_dev	*dev;
3145 
3146 	/* init */
3147 	dev = kzalloc(sizeof *dev, GFP_KERNEL);
3148 	if (!dev) {
3149 		pr_err("%s: no memory for device structure\n", __func__);
3150 		return -ENOMEM;
3151 	}
3152 	/* pci setup */
3153 	if (pci_enable_device(pdev) < 0) {
3154 		kfree(dev);
3155 		pr_err("%s: pci_enable_device failed\n", __func__);
3156 		return -ENODEV;
3157 	}
3158 	dev->active = 1;
3159 	pci_set_drvdata(pdev, dev);
3160 
3161 	/* Determine BAR based on PCI ID */
3162 	if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3163 		dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3164 	else
3165 		dev->bar = PCH_UDC_PCI_BAR;
3166 
3167 	/* PCI resource allocation */
3168 	resource = pci_resource_start(pdev, dev->bar);
3169 	len = pci_resource_len(pdev, dev->bar);
3170 
3171 	if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
3172 		dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
3173 		retval = -EBUSY;
3174 		goto finished;
3175 	}
3176 	dev->phys_addr = resource;
3177 	dev->mem_region = 1;
3178 
3179 	dev->base_addr = ioremap_nocache(resource, len);
3180 	if (!dev->base_addr) {
3181 		pr_err("%s: device memory cannot be mapped\n", __func__);
3182 		retval = -ENOMEM;
3183 		goto finished;
3184 	}
3185 	if (!pdev->irq) {
3186 		dev_err(&pdev->dev, "%s: irq not set\n", __func__);
3187 		retval = -ENODEV;
3188 		goto finished;
3189 	}
3190 	/* initialize the hardware */
3191 	if (pch_udc_pcd_init(dev)) {
3192 		retval = -ENODEV;
3193 		goto finished;
3194 	}
3195 	if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
3196 			dev)) {
3197 		dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3198 			pdev->irq);
3199 		retval = -ENODEV;
3200 		goto finished;
3201 	}
3202 	dev->irq = pdev->irq;
3203 	dev->irq_registered = 1;
3204 
3205 	pci_set_master(pdev);
3206 	pci_try_set_mwi(pdev);
3207 
3208 	/* device struct setup */
3209 	spin_lock_init(&dev->lock);
3210 	dev->pdev = pdev;
3211 	dev->gadget.ops = &pch_udc_ops;
3212 
3213 	retval = init_dma_pools(dev);
3214 	if (retval)
3215 		goto finished;
3216 
3217 	dev->gadget.name = KBUILD_MODNAME;
3218 	dev->gadget.max_speed = USB_SPEED_HIGH;
3219 
3220 	/* Put the device in disconnected state till a driver is bound */
3221 	pch_udc_set_disconnect(dev);
3222 	retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3223 			gadget_release);
3224 	if (retval)
3225 		goto finished;
3226 	return 0;
3227 
3228 finished:
3229 	pch_udc_remove(pdev);
3230 	return retval;
3231 }
3232 
3233 static const struct pci_device_id pch_udc_pcidev_id[] = {
3234 	{
3235 		PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3236 			   PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3237 		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3238 		.class_mask = 0xffffffff,
3239 	},
3240 	{
3241 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3242 		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3243 		.class_mask = 0xffffffff,
3244 	},
3245 	{
3246 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3247 		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3248 		.class_mask = 0xffffffff,
3249 	},
3250 	{
3251 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3252 		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3253 		.class_mask = 0xffffffff,
3254 	},
3255 	{ 0 },
3256 };
3257 
3258 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3259 
3260 static struct pci_driver pch_udc_driver = {
3261 	.name =	KBUILD_MODNAME,
3262 	.id_table =	pch_udc_pcidev_id,
3263 	.probe =	pch_udc_probe,
3264 	.remove =	pch_udc_remove,
3265 	.suspend =	pch_udc_suspend,
3266 	.resume =	pch_udc_resume,
3267 	.shutdown =	pch_udc_shutdown,
3268 };
3269 
3270 module_pci_driver(pch_udc_driver);
3271 
3272 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3273 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3274 MODULE_LICENSE("GPL");
3275