xref: /openbmc/linux/drivers/usb/gadget/udc/pch_udc.c (revision 33ac9dba)
1 /*
2  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/interrupt.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/gadget.h>
18 #include <linux/gpio.h>
19 #include <linux/irq.h>
20 
21 /* GPIO port for VBUS detecting */
22 static int vbus_gpio_port = -1;		/* GPIO port number (-1:Not used) */
23 
24 #define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
25 #define PCH_VBUS_INTERVAL	10	/* VBUS polling interval (msec) */
26 
27 /* Address offset of Registers */
28 #define UDC_EP_REG_SHIFT	0x20	/* Offset to next EP */
29 
30 #define UDC_EPCTL_ADDR		0x00	/* Endpoint control */
31 #define UDC_EPSTS_ADDR		0x04	/* Endpoint status */
32 #define UDC_BUFIN_FRAMENUM_ADDR	0x08	/* buffer size in / frame number out */
33 #define UDC_BUFOUT_MAXPKT_ADDR	0x0C	/* buffer size out / maxpkt in */
34 #define UDC_SUBPTR_ADDR		0x10	/* setup buffer pointer */
35 #define UDC_DESPTR_ADDR		0x14	/* Data descriptor pointer */
36 #define UDC_CONFIRM_ADDR	0x18	/* Write/Read confirmation */
37 
38 #define UDC_DEVCFG_ADDR		0x400	/* Device configuration */
39 #define UDC_DEVCTL_ADDR		0x404	/* Device control */
40 #define UDC_DEVSTS_ADDR		0x408	/* Device status */
41 #define UDC_DEVIRQSTS_ADDR	0x40C	/* Device irq status */
42 #define UDC_DEVIRQMSK_ADDR	0x410	/* Device irq mask */
43 #define UDC_EPIRQSTS_ADDR	0x414	/* Endpoint irq status */
44 #define UDC_EPIRQMSK_ADDR	0x418	/* Endpoint irq mask */
45 #define UDC_DEVLPM_ADDR		0x41C	/* LPM control / status */
46 #define UDC_CSR_BUSY_ADDR	0x4f0	/* UDC_CSR_BUSY Status register */
47 #define UDC_SRST_ADDR		0x4fc	/* SOFT RESET register */
48 #define UDC_CSR_ADDR		0x500	/* USB_DEVICE endpoint register */
49 
50 /* Endpoint control register */
51 /* Bit position */
52 #define UDC_EPCTL_MRXFLUSH		(1 << 12)
53 #define UDC_EPCTL_RRDY			(1 << 9)
54 #define UDC_EPCTL_CNAK			(1 << 8)
55 #define UDC_EPCTL_SNAK			(1 << 7)
56 #define UDC_EPCTL_NAK			(1 << 6)
57 #define UDC_EPCTL_P			(1 << 3)
58 #define UDC_EPCTL_F			(1 << 1)
59 #define UDC_EPCTL_S			(1 << 0)
60 #define UDC_EPCTL_ET_SHIFT		4
61 /* Mask patern */
62 #define UDC_EPCTL_ET_MASK		0x00000030
63 /* Value for ET field */
64 #define UDC_EPCTL_ET_CONTROL		0
65 #define UDC_EPCTL_ET_ISO		1
66 #define UDC_EPCTL_ET_BULK		2
67 #define UDC_EPCTL_ET_INTERRUPT		3
68 
69 /* Endpoint status register */
70 /* Bit position */
71 #define UDC_EPSTS_XFERDONE		(1 << 27)
72 #define UDC_EPSTS_RSS			(1 << 26)
73 #define UDC_EPSTS_RCS			(1 << 25)
74 #define UDC_EPSTS_TXEMPTY		(1 << 24)
75 #define UDC_EPSTS_TDC			(1 << 10)
76 #define UDC_EPSTS_HE			(1 << 9)
77 #define UDC_EPSTS_MRXFIFO_EMP		(1 << 8)
78 #define UDC_EPSTS_BNA			(1 << 7)
79 #define UDC_EPSTS_IN			(1 << 6)
80 #define UDC_EPSTS_OUT_SHIFT		4
81 /* Mask patern */
82 #define UDC_EPSTS_OUT_MASK		0x00000030
83 #define UDC_EPSTS_ALL_CLR_MASK		0x1F0006F0
84 /* Value for OUT field */
85 #define UDC_EPSTS_OUT_SETUP		2
86 #define UDC_EPSTS_OUT_DATA		1
87 
88 /* Device configuration register */
89 /* Bit position */
90 #define UDC_DEVCFG_CSR_PRG		(1 << 17)
91 #define UDC_DEVCFG_SP			(1 << 3)
92 /* SPD Valee */
93 #define UDC_DEVCFG_SPD_HS		0x0
94 #define UDC_DEVCFG_SPD_FS		0x1
95 #define UDC_DEVCFG_SPD_LS		0x2
96 
97 /* Device control register */
98 /* Bit position */
99 #define UDC_DEVCTL_THLEN_SHIFT		24
100 #define UDC_DEVCTL_BRLEN_SHIFT		16
101 #define UDC_DEVCTL_CSR_DONE		(1 << 13)
102 #define UDC_DEVCTL_SD			(1 << 10)
103 #define UDC_DEVCTL_MODE			(1 << 9)
104 #define UDC_DEVCTL_BREN			(1 << 8)
105 #define UDC_DEVCTL_THE			(1 << 7)
106 #define UDC_DEVCTL_DU			(1 << 4)
107 #define UDC_DEVCTL_TDE			(1 << 3)
108 #define UDC_DEVCTL_RDE			(1 << 2)
109 #define UDC_DEVCTL_RES			(1 << 0)
110 
111 /* Device status register */
112 /* Bit position */
113 #define UDC_DEVSTS_TS_SHIFT		18
114 #define UDC_DEVSTS_ENUM_SPEED_SHIFT	13
115 #define UDC_DEVSTS_ALT_SHIFT		8
116 #define UDC_DEVSTS_INTF_SHIFT		4
117 #define UDC_DEVSTS_CFG_SHIFT		0
118 /* Mask patern */
119 #define UDC_DEVSTS_TS_MASK		0xfffc0000
120 #define UDC_DEVSTS_ENUM_SPEED_MASK	0x00006000
121 #define UDC_DEVSTS_ALT_MASK		0x00000f00
122 #define UDC_DEVSTS_INTF_MASK		0x000000f0
123 #define UDC_DEVSTS_CFG_MASK		0x0000000f
124 /* value for maximum speed for SPEED field */
125 #define UDC_DEVSTS_ENUM_SPEED_FULL	1
126 #define UDC_DEVSTS_ENUM_SPEED_HIGH	0
127 #define UDC_DEVSTS_ENUM_SPEED_LOW	2
128 #define UDC_DEVSTS_ENUM_SPEED_FULLX	3
129 
130 /* Device irq register */
131 /* Bit position */
132 #define UDC_DEVINT_RWKP			(1 << 7)
133 #define UDC_DEVINT_ENUM			(1 << 6)
134 #define UDC_DEVINT_SOF			(1 << 5)
135 #define UDC_DEVINT_US			(1 << 4)
136 #define UDC_DEVINT_UR			(1 << 3)
137 #define UDC_DEVINT_ES			(1 << 2)
138 #define UDC_DEVINT_SI			(1 << 1)
139 #define UDC_DEVINT_SC			(1 << 0)
140 /* Mask patern */
141 #define UDC_DEVINT_MSK			0x7f
142 
143 /* Endpoint irq register */
144 /* Bit position */
145 #define UDC_EPINT_IN_SHIFT		0
146 #define UDC_EPINT_OUT_SHIFT		16
147 #define UDC_EPINT_IN_EP0		(1 << 0)
148 #define UDC_EPINT_OUT_EP0		(1 << 16)
149 /* Mask patern */
150 #define UDC_EPINT_MSK_DISABLE_ALL	0xffffffff
151 
152 /* UDC_CSR_BUSY Status register */
153 /* Bit position */
154 #define UDC_CSR_BUSY			(1 << 0)
155 
156 /* SOFT RESET register */
157 /* Bit position */
158 #define UDC_PSRST			(1 << 1)
159 #define UDC_SRST			(1 << 0)
160 
161 /* USB_DEVICE endpoint register */
162 /* Bit position */
163 #define UDC_CSR_NE_NUM_SHIFT		0
164 #define UDC_CSR_NE_DIR_SHIFT		4
165 #define UDC_CSR_NE_TYPE_SHIFT		5
166 #define UDC_CSR_NE_CFG_SHIFT		7
167 #define UDC_CSR_NE_INTF_SHIFT		11
168 #define UDC_CSR_NE_ALT_SHIFT		15
169 #define UDC_CSR_NE_MAX_PKT_SHIFT	19
170 /* Mask patern */
171 #define UDC_CSR_NE_NUM_MASK		0x0000000f
172 #define UDC_CSR_NE_DIR_MASK		0x00000010
173 #define UDC_CSR_NE_TYPE_MASK		0x00000060
174 #define UDC_CSR_NE_CFG_MASK		0x00000780
175 #define UDC_CSR_NE_INTF_MASK		0x00007800
176 #define UDC_CSR_NE_ALT_MASK		0x00078000
177 #define UDC_CSR_NE_MAX_PKT_MASK		0x3ff80000
178 
179 #define PCH_UDC_CSR(ep)	(UDC_CSR_ADDR + ep*4)
180 #define PCH_UDC_EPINT(in, num)\
181 		(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
182 
183 /* Index of endpoint */
184 #define UDC_EP0IN_IDX		0
185 #define UDC_EP0OUT_IDX		1
186 #define UDC_EPIN_IDX(ep)	(ep * 2)
187 #define UDC_EPOUT_IDX(ep)	(ep * 2 + 1)
188 #define PCH_UDC_EP0		0
189 #define PCH_UDC_EP1		1
190 #define PCH_UDC_EP2		2
191 #define PCH_UDC_EP3		3
192 
193 /* Number of endpoint */
194 #define PCH_UDC_EP_NUM		32	/* Total number of EPs (16 IN,16 OUT) */
195 #define PCH_UDC_USED_EP_NUM	4	/* EP number of EP's really used */
196 /* Length Value */
197 #define PCH_UDC_BRLEN		0x0F	/* Burst length */
198 #define PCH_UDC_THLEN		0x1F	/* Threshold length */
199 /* Value of EP Buffer Size */
200 #define UDC_EP0IN_BUFF_SIZE	16
201 #define UDC_EPIN_BUFF_SIZE	256
202 #define UDC_EP0OUT_BUFF_SIZE	16
203 #define UDC_EPOUT_BUFF_SIZE	256
204 /* Value of EP maximum packet size */
205 #define UDC_EP0IN_MAX_PKT_SIZE	64
206 #define UDC_EP0OUT_MAX_PKT_SIZE	64
207 #define UDC_BULK_MAX_PKT_SIZE	512
208 
209 /* DMA */
210 #define DMA_DIR_RX		1	/* DMA for data receive */
211 #define DMA_DIR_TX		2	/* DMA for data transmit */
212 #define DMA_ADDR_INVALID	(~(dma_addr_t)0)
213 #define UDC_DMA_MAXPACKET	65536	/* maximum packet size for DMA */
214 
215 /**
216  * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
217  *				  for data
218  * @status:		Status quadlet
219  * @reserved:		Reserved
220  * @dataptr:		Buffer descriptor
221  * @next:		Next descriptor
222  */
223 struct pch_udc_data_dma_desc {
224 	u32 status;
225 	u32 reserved;
226 	u32 dataptr;
227 	u32 next;
228 };
229 
230 /**
231  * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
232  *				 for control data
233  * @status:	Status
234  * @reserved:	Reserved
235  * @data12:	First setup word
236  * @data34:	Second setup word
237  */
238 struct pch_udc_stp_dma_desc {
239 	u32 status;
240 	u32 reserved;
241 	struct usb_ctrlrequest request;
242 } __attribute((packed));
243 
244 /* DMA status definitions */
245 /* Buffer status */
246 #define PCH_UDC_BUFF_STS	0xC0000000
247 #define PCH_UDC_BS_HST_RDY	0x00000000
248 #define PCH_UDC_BS_DMA_BSY	0x40000000
249 #define PCH_UDC_BS_DMA_DONE	0x80000000
250 #define PCH_UDC_BS_HST_BSY	0xC0000000
251 /*  Rx/Tx Status */
252 #define PCH_UDC_RXTX_STS	0x30000000
253 #define PCH_UDC_RTS_SUCC	0x00000000
254 #define PCH_UDC_RTS_DESERR	0x10000000
255 #define PCH_UDC_RTS_BUFERR	0x30000000
256 /* Last Descriptor Indication */
257 #define PCH_UDC_DMA_LAST	0x08000000
258 /* Number of Rx/Tx Bytes Mask */
259 #define PCH_UDC_RXTX_BYTES	0x0000ffff
260 
261 /**
262  * struct pch_udc_cfg_data - Structure to hold current configuration
263  *			     and interface information
264  * @cur_cfg:	current configuration in use
265  * @cur_intf:	current interface in use
266  * @cur_alt:	current alt interface in use
267  */
268 struct pch_udc_cfg_data {
269 	u16 cur_cfg;
270 	u16 cur_intf;
271 	u16 cur_alt;
272 };
273 
274 /**
275  * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
276  * @ep:			embedded ep request
277  * @td_stp_phys:	for setup request
278  * @td_data_phys:	for data request
279  * @td_stp:		for setup request
280  * @td_data:		for data request
281  * @dev:		reference to device struct
282  * @offset_addr:	offset address of ep register
283  * @desc:		for this ep
284  * @queue:		queue for requests
285  * @num:		endpoint number
286  * @in:			endpoint is IN
287  * @halted:		endpoint halted?
288  * @epsts:		Endpoint status
289  */
290 struct pch_udc_ep {
291 	struct usb_ep			ep;
292 	dma_addr_t			td_stp_phys;
293 	dma_addr_t			td_data_phys;
294 	struct pch_udc_stp_dma_desc	*td_stp;
295 	struct pch_udc_data_dma_desc	*td_data;
296 	struct pch_udc_dev		*dev;
297 	unsigned long			offset_addr;
298 	struct list_head		queue;
299 	unsigned			num:5,
300 					in:1,
301 					halted:1;
302 	unsigned long			epsts;
303 };
304 
305 /**
306  * struct pch_vbus_gpio_data - Structure holding GPIO informaton
307  *					for detecting VBUS
308  * @port:		gpio port number
309  * @intr:		gpio interrupt number
310  * @irq_work_fall	Structure for WorkQueue
311  * @irq_work_rise	Structure for WorkQueue
312  */
313 struct pch_vbus_gpio_data {
314 	int			port;
315 	int			intr;
316 	struct work_struct	irq_work_fall;
317 	struct work_struct	irq_work_rise;
318 };
319 
320 /**
321  * struct pch_udc_dev - Structure holding complete information
322  *			of the PCH USB device
323  * @gadget:		gadget driver data
324  * @driver:		reference to gadget driver bound
325  * @pdev:		reference to the PCI device
326  * @ep:			array of endpoints
327  * @lock:		protects all state
328  * @active:		enabled the PCI device
329  * @stall:		stall requested
330  * @prot_stall:		protcol stall requested
331  * @irq_registered:	irq registered with system
332  * @mem_region:		device memory mapped
333  * @registered:		driver regsitered with system
334  * @suspended:		driver in suspended state
335  * @connected:		gadget driver associated
336  * @vbus_session:	required vbus_session state
337  * @set_cfg_not_acked:	pending acknowledgement 4 setup
338  * @waiting_zlp_ack:	pending acknowledgement 4 ZLP
339  * @data_requests:	DMA pool for data requests
340  * @stp_requests:	DMA pool for setup requests
341  * @dma_addr:		DMA pool for received
342  * @ep0out_buf:		Buffer for DMA
343  * @setup_data:		Received setup data
344  * @phys_addr:		of device memory
345  * @base_addr:		for mapped device memory
346  * @irq:		IRQ line for the device
347  * @cfg_data:		current cfg, intf, and alt in use
348  * @vbus_gpio:		GPIO informaton for detecting VBUS
349  */
350 struct pch_udc_dev {
351 	struct usb_gadget		gadget;
352 	struct usb_gadget_driver	*driver;
353 	struct pci_dev			*pdev;
354 	struct pch_udc_ep		ep[PCH_UDC_EP_NUM];
355 	spinlock_t			lock; /* protects all state */
356 	unsigned	active:1,
357 			stall:1,
358 			prot_stall:1,
359 			irq_registered:1,
360 			mem_region:1,
361 			suspended:1,
362 			connected:1,
363 			vbus_session:1,
364 			set_cfg_not_acked:1,
365 			waiting_zlp_ack:1;
366 	struct pci_pool		*data_requests;
367 	struct pci_pool		*stp_requests;
368 	dma_addr_t			dma_addr;
369 	void				*ep0out_buf;
370 	struct usb_ctrlrequest		setup_data;
371 	unsigned long			phys_addr;
372 	void __iomem			*base_addr;
373 	unsigned			irq;
374 	struct pch_udc_cfg_data		cfg_data;
375 	struct pch_vbus_gpio_data	vbus_gpio;
376 };
377 #define to_pch_udc(g)	(container_of((g), struct pch_udc_dev, gadget))
378 
379 #define PCH_UDC_PCI_BAR			1
380 #define PCI_DEVICE_ID_INTEL_EG20T_UDC	0x8808
381 #define PCI_VENDOR_ID_ROHM		0x10DB
382 #define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
383 #define PCI_DEVICE_ID_ML7831_IOH_UDC	0x8808
384 
385 static const char	ep0_string[] = "ep0in";
386 static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
387 static bool speed_fs;
388 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
389 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
390 
391 /**
392  * struct pch_udc_request - Structure holding a PCH USB device request packet
393  * @req:		embedded ep request
394  * @td_data_phys:	phys. address
395  * @td_data:		first dma desc. of chain
396  * @td_data_last:	last dma desc. of chain
397  * @queue:		associated queue
398  * @dma_going:		DMA in progress for request
399  * @dma_mapped:		DMA memory mapped for request
400  * @dma_done:		DMA completed for request
401  * @chain_len:		chain length
402  * @buf:		Buffer memory for align adjustment
403  * @dma:		DMA memory for align adjustment
404  */
405 struct pch_udc_request {
406 	struct usb_request		req;
407 	dma_addr_t			td_data_phys;
408 	struct pch_udc_data_dma_desc	*td_data;
409 	struct pch_udc_data_dma_desc	*td_data_last;
410 	struct list_head		queue;
411 	unsigned			dma_going:1,
412 					dma_mapped:1,
413 					dma_done:1;
414 	unsigned			chain_len;
415 	void				*buf;
416 	dma_addr_t			dma;
417 };
418 
419 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
420 {
421 	return ioread32(dev->base_addr + reg);
422 }
423 
424 static inline void pch_udc_writel(struct pch_udc_dev *dev,
425 				    unsigned long val, unsigned long reg)
426 {
427 	iowrite32(val, dev->base_addr + reg);
428 }
429 
430 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
431 				     unsigned long reg,
432 				     unsigned long bitmask)
433 {
434 	pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
435 }
436 
437 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
438 				     unsigned long reg,
439 				     unsigned long bitmask)
440 {
441 	pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
442 }
443 
444 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
445 {
446 	return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
447 }
448 
449 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
450 				    unsigned long val, unsigned long reg)
451 {
452 	iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
453 }
454 
455 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
456 				     unsigned long reg,
457 				     unsigned long bitmask)
458 {
459 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
460 }
461 
462 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
463 				     unsigned long reg,
464 				     unsigned long bitmask)
465 {
466 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
467 }
468 
469 /**
470  * pch_udc_csr_busy() - Wait till idle.
471  * @dev:	Reference to pch_udc_dev structure
472  */
473 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
474 {
475 	unsigned int count = 200;
476 
477 	/* Wait till idle */
478 	while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
479 		&& --count)
480 		cpu_relax();
481 	if (!count)
482 		dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
483 }
484 
485 /**
486  * pch_udc_write_csr() - Write the command and status registers.
487  * @dev:	Reference to pch_udc_dev structure
488  * @val:	value to be written to CSR register
489  * @addr:	address of CSR register
490  */
491 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
492 			       unsigned int ep)
493 {
494 	unsigned long reg = PCH_UDC_CSR(ep);
495 
496 	pch_udc_csr_busy(dev);		/* Wait till idle */
497 	pch_udc_writel(dev, val, reg);
498 	pch_udc_csr_busy(dev);		/* Wait till idle */
499 }
500 
501 /**
502  * pch_udc_read_csr() - Read the command and status registers.
503  * @dev:	Reference to pch_udc_dev structure
504  * @addr:	address of CSR register
505  *
506  * Return codes:	content of CSR register
507  */
508 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
509 {
510 	unsigned long reg = PCH_UDC_CSR(ep);
511 
512 	pch_udc_csr_busy(dev);		/* Wait till idle */
513 	pch_udc_readl(dev, reg);	/* Dummy read */
514 	pch_udc_csr_busy(dev);		/* Wait till idle */
515 	return pch_udc_readl(dev, reg);
516 }
517 
518 /**
519  * pch_udc_rmt_wakeup() - Initiate for remote wakeup
520  * @dev:	Reference to pch_udc_dev structure
521  */
522 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
523 {
524 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
525 	mdelay(1);
526 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
527 }
528 
529 /**
530  * pch_udc_get_frame() - Get the current frame from device status register
531  * @dev:	Reference to pch_udc_dev structure
532  * Retern	current frame
533  */
534 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
535 {
536 	u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
537 	return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
538 }
539 
540 /**
541  * pch_udc_clear_selfpowered() - Clear the self power control
542  * @dev:	Reference to pch_udc_regs structure
543  */
544 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
545 {
546 	pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
547 }
548 
549 /**
550  * pch_udc_set_selfpowered() - Set the self power control
551  * @dev:	Reference to pch_udc_regs structure
552  */
553 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
554 {
555 	pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
556 }
557 
558 /**
559  * pch_udc_set_disconnect() - Set the disconnect status.
560  * @dev:	Reference to pch_udc_regs structure
561  */
562 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
563 {
564 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
565 }
566 
567 /**
568  * pch_udc_clear_disconnect() - Clear the disconnect status.
569  * @dev:	Reference to pch_udc_regs structure
570  */
571 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
572 {
573 	/* Clear the disconnect */
574 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
575 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
576 	mdelay(1);
577 	/* Resume USB signalling */
578 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
579 }
580 
581 /**
582  * pch_udc_reconnect() - This API initializes usb device controller,
583  *						and clear the disconnect status.
584  * @dev:		Reference to pch_udc_regs structure
585  */
586 static void pch_udc_init(struct pch_udc_dev *dev);
587 static void pch_udc_reconnect(struct pch_udc_dev *dev)
588 {
589 	pch_udc_init(dev);
590 
591 	/* enable device interrupts */
592 	/* pch_udc_enable_interrupts() */
593 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
594 			UDC_DEVINT_UR | UDC_DEVINT_ENUM);
595 
596 	/* Clear the disconnect */
597 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
598 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
599 	mdelay(1);
600 	/* Resume USB signalling */
601 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
602 }
603 
604 /**
605  * pch_udc_vbus_session() - set or clearr the disconnect status.
606  * @dev:	Reference to pch_udc_regs structure
607  * @is_active:	Parameter specifying the action
608  *		  0:   indicating VBUS power is ending
609  *		  !0:  indicating VBUS power is starting
610  */
611 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
612 					  int is_active)
613 {
614 	if (is_active) {
615 		pch_udc_reconnect(dev);
616 		dev->vbus_session = 1;
617 	} else {
618 		if (dev->driver && dev->driver->disconnect) {
619 			spin_unlock(&dev->lock);
620 			dev->driver->disconnect(&dev->gadget);
621 			spin_lock(&dev->lock);
622 		}
623 		pch_udc_set_disconnect(dev);
624 		dev->vbus_session = 0;
625 	}
626 }
627 
628 /**
629  * pch_udc_ep_set_stall() - Set the stall of endpoint
630  * @ep:		Reference to structure of type pch_udc_ep_regs
631  */
632 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
633 {
634 	if (ep->in) {
635 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
636 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
637 	} else {
638 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
639 	}
640 }
641 
642 /**
643  * pch_udc_ep_clear_stall() - Clear the stall of endpoint
644  * @ep:		Reference to structure of type pch_udc_ep_regs
645  */
646 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
647 {
648 	/* Clear the stall */
649 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
650 	/* Clear NAK by writing CNAK */
651 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
652 }
653 
654 /**
655  * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
656  * @ep:		Reference to structure of type pch_udc_ep_regs
657  * @type:	Type of endpoint
658  */
659 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
660 					u8 type)
661 {
662 	pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
663 				UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
664 }
665 
666 /**
667  * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
668  * @ep:		Reference to structure of type pch_udc_ep_regs
669  * @buf_size:	The buffer word size
670  */
671 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
672 						 u32 buf_size, u32 ep_in)
673 {
674 	u32 data;
675 	if (ep_in) {
676 		data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
677 		data = (data & 0xffff0000) | (buf_size & 0xffff);
678 		pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
679 	} else {
680 		data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
681 		data = (buf_size << 16) | (data & 0xffff);
682 		pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
683 	}
684 }
685 
686 /**
687  * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
688  * @ep:		Reference to structure of type pch_udc_ep_regs
689  * @pkt_size:	The packet byte size
690  */
691 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
692 {
693 	u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
694 	data = (data & 0xffff0000) | (pkt_size & 0xffff);
695 	pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
696 }
697 
698 /**
699  * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
700  * @ep:		Reference to structure of type pch_udc_ep_regs
701  * @addr:	Address of the register
702  */
703 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
704 {
705 	pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
706 }
707 
708 /**
709  * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
710  * @ep:		Reference to structure of type pch_udc_ep_regs
711  * @addr:	Address of the register
712  */
713 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
714 {
715 	pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
716 }
717 
718 /**
719  * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
720  * @ep:		Reference to structure of type pch_udc_ep_regs
721  */
722 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
723 {
724 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
725 }
726 
727 /**
728  * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
729  * @ep:		Reference to structure of type pch_udc_ep_regs
730  */
731 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
732 {
733 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
734 }
735 
736 /**
737  * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
738  * @ep:		Reference to structure of type pch_udc_ep_regs
739  */
740 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
741 {
742 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
743 }
744 
745 /**
746  * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
747  *			register depending on the direction specified
748  * @dev:	Reference to structure of type pch_udc_regs
749  * @dir:	whether Tx or Rx
750  *		  DMA_DIR_RX: Receive
751  *		  DMA_DIR_TX: Transmit
752  */
753 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
754 {
755 	if (dir == DMA_DIR_RX)
756 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
757 	else if (dir == DMA_DIR_TX)
758 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
759 }
760 
761 /**
762  * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
763  *				 register depending on the direction specified
764  * @dev:	Reference to structure of type pch_udc_regs
765  * @dir:	Whether Tx or Rx
766  *		  DMA_DIR_RX: Receive
767  *		  DMA_DIR_TX: Transmit
768  */
769 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
770 {
771 	if (dir == DMA_DIR_RX)
772 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
773 	else if (dir == DMA_DIR_TX)
774 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
775 }
776 
777 /**
778  * pch_udc_set_csr_done() - Set the device control register
779  *				CSR done field (bit 13)
780  * @dev:	reference to structure of type pch_udc_regs
781  */
782 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
783 {
784 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
785 }
786 
787 /**
788  * pch_udc_disable_interrupts() - Disables the specified interrupts
789  * @dev:	Reference to structure of type pch_udc_regs
790  * @mask:	Mask to disable interrupts
791  */
792 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
793 					    u32 mask)
794 {
795 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
796 }
797 
798 /**
799  * pch_udc_enable_interrupts() - Enable the specified interrupts
800  * @dev:	Reference to structure of type pch_udc_regs
801  * @mask:	Mask to enable interrupts
802  */
803 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
804 					   u32 mask)
805 {
806 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
807 }
808 
809 /**
810  * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
811  * @dev:	Reference to structure of type pch_udc_regs
812  * @mask:	Mask to disable interrupts
813  */
814 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
815 						u32 mask)
816 {
817 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
818 }
819 
820 /**
821  * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
822  * @dev:	Reference to structure of type pch_udc_regs
823  * @mask:	Mask to enable interrupts
824  */
825 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
826 					      u32 mask)
827 {
828 	pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
829 }
830 
831 /**
832  * pch_udc_read_device_interrupts() - Read the device interrupts
833  * @dev:	Reference to structure of type pch_udc_regs
834  * Retern	The device interrupts
835  */
836 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
837 {
838 	return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
839 }
840 
841 /**
842  * pch_udc_write_device_interrupts() - Write device interrupts
843  * @dev:	Reference to structure of type pch_udc_regs
844  * @val:	The value to be written to interrupt register
845  */
846 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
847 						     u32 val)
848 {
849 	pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
850 }
851 
852 /**
853  * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
854  * @dev:	Reference to structure of type pch_udc_regs
855  * Retern	The endpoint interrupt
856  */
857 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
858 {
859 	return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
860 }
861 
862 /**
863  * pch_udc_write_ep_interrupts() - Clear endpoint interupts
864  * @dev:	Reference to structure of type pch_udc_regs
865  * @val:	The value to be written to interrupt register
866  */
867 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
868 					     u32 val)
869 {
870 	pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
871 }
872 
873 /**
874  * pch_udc_read_device_status() - Read the device status
875  * @dev:	Reference to structure of type pch_udc_regs
876  * Retern	The device status
877  */
878 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
879 {
880 	return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
881 }
882 
883 /**
884  * pch_udc_read_ep_control() - Read the endpoint control
885  * @ep:		Reference to structure of type pch_udc_ep_regs
886  * Retern	The endpoint control register value
887  */
888 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
889 {
890 	return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
891 }
892 
893 /**
894  * pch_udc_clear_ep_control() - Clear the endpoint control register
895  * @ep:		Reference to structure of type pch_udc_ep_regs
896  * Retern	The endpoint control register value
897  */
898 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
899 {
900 	return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
901 }
902 
903 /**
904  * pch_udc_read_ep_status() - Read the endpoint status
905  * @ep:		Reference to structure of type pch_udc_ep_regs
906  * Retern	The endpoint status
907  */
908 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
909 {
910 	return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
911 }
912 
913 /**
914  * pch_udc_clear_ep_status() - Clear the endpoint status
915  * @ep:		Reference to structure of type pch_udc_ep_regs
916  * @stat:	Endpoint status
917  */
918 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
919 					 u32 stat)
920 {
921 	return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
922 }
923 
924 /**
925  * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
926  *				of the endpoint control register
927  * @ep:		Reference to structure of type pch_udc_ep_regs
928  */
929 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
930 {
931 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
932 }
933 
934 /**
935  * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
936  *				of the endpoint control register
937  * @ep:		reference to structure of type pch_udc_ep_regs
938  */
939 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
940 {
941 	unsigned int loopcnt = 0;
942 	struct pch_udc_dev *dev = ep->dev;
943 
944 	if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
945 		return;
946 	if (!ep->in) {
947 		loopcnt = 10000;
948 		while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
949 			--loopcnt)
950 			udelay(5);
951 		if (!loopcnt)
952 			dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
953 				__func__);
954 	}
955 	loopcnt = 10000;
956 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
957 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
958 		udelay(5);
959 	}
960 	if (!loopcnt)
961 		dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
962 			__func__, ep->num, (ep->in ? "in" : "out"));
963 }
964 
965 /**
966  * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
967  * @ep:	reference to structure of type pch_udc_ep_regs
968  * @dir:	direction of endpoint
969  *		  0:  endpoint is OUT
970  *		  !0: endpoint is IN
971  */
972 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
973 {
974 	if (dir) {	/* IN ep */
975 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
976 		return;
977 	}
978 }
979 
980 /**
981  * pch_udc_ep_enable() - This api enables endpoint
982  * @regs:	Reference to structure pch_udc_ep_regs
983  * @desc:	endpoint descriptor
984  */
985 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
986 			       struct pch_udc_cfg_data *cfg,
987 			       const struct usb_endpoint_descriptor *desc)
988 {
989 	u32 val = 0;
990 	u32 buff_size = 0;
991 
992 	pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
993 	if (ep->in)
994 		buff_size = UDC_EPIN_BUFF_SIZE;
995 	else
996 		buff_size = UDC_EPOUT_BUFF_SIZE;
997 	pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
998 	pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
999 	pch_udc_ep_set_nak(ep);
1000 	pch_udc_ep_fifo_flush(ep, ep->in);
1001 	/* Configure the endpoint */
1002 	val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
1003 	      ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
1004 		UDC_CSR_NE_TYPE_SHIFT) |
1005 	      (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
1006 	      (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
1007 	      (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
1008 	      usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1009 
1010 	if (ep->in)
1011 		pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1012 	else
1013 		pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1014 }
1015 
1016 /**
1017  * pch_udc_ep_disable() - This api disables endpoint
1018  * @regs:	Reference to structure pch_udc_ep_regs
1019  */
1020 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1021 {
1022 	if (ep->in) {
1023 		/* flush the fifo */
1024 		pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1025 		/* set NAK */
1026 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1027 		pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1028 	} else {
1029 		/* set NAK */
1030 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1031 	}
1032 	/* reset desc pointer */
1033 	pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1034 }
1035 
1036 /**
1037  * pch_udc_wait_ep_stall() - Wait EP stall.
1038  * @dev:	Reference to pch_udc_dev structure
1039  */
1040 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1041 {
1042 	unsigned int count = 10000;
1043 
1044 	/* Wait till idle */
1045 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1046 		udelay(5);
1047 	if (!count)
1048 		dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1049 }
1050 
1051 /**
1052  * pch_udc_init() - This API initializes usb device controller
1053  * @dev:	Rreference to pch_udc_regs structure
1054  */
1055 static void pch_udc_init(struct pch_udc_dev *dev)
1056 {
1057 	if (NULL == dev) {
1058 		pr_err("%s: Invalid address\n", __func__);
1059 		return;
1060 	}
1061 	/* Soft Reset and Reset PHY */
1062 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1063 	pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1064 	mdelay(1);
1065 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1066 	pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1067 	mdelay(1);
1068 	/* mask and clear all device interrupts */
1069 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1070 	pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1071 
1072 	/* mask and clear all ep interrupts */
1073 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1074 	pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1075 
1076 	/* enable dynamic CSR programmingi, self powered and device speed */
1077 	if (speed_fs)
1078 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1079 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1080 	else /* defaul high speed */
1081 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1082 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1083 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1084 			(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1085 			(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1086 			UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1087 			UDC_DEVCTL_THE);
1088 }
1089 
1090 /**
1091  * pch_udc_exit() - This API exit usb device controller
1092  * @dev:	Reference to pch_udc_regs structure
1093  */
1094 static void pch_udc_exit(struct pch_udc_dev *dev)
1095 {
1096 	/* mask all device interrupts */
1097 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1098 	/* mask all ep interrupts */
1099 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1100 	/* put device in disconnected state */
1101 	pch_udc_set_disconnect(dev);
1102 }
1103 
1104 /**
1105  * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1106  * @gadget:	Reference to the gadget driver
1107  *
1108  * Return codes:
1109  *	0:		Success
1110  *	-EINVAL:	If the gadget passed is NULL
1111  */
1112 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1113 {
1114 	struct pch_udc_dev	*dev;
1115 
1116 	if (!gadget)
1117 		return -EINVAL;
1118 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1119 	return pch_udc_get_frame(dev);
1120 }
1121 
1122 /**
1123  * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1124  * @gadget:	Reference to the gadget driver
1125  *
1126  * Return codes:
1127  *	0:		Success
1128  *	-EINVAL:	If the gadget passed is NULL
1129  */
1130 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1131 {
1132 	struct pch_udc_dev	*dev;
1133 	unsigned long		flags;
1134 
1135 	if (!gadget)
1136 		return -EINVAL;
1137 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1138 	spin_lock_irqsave(&dev->lock, flags);
1139 	pch_udc_rmt_wakeup(dev);
1140 	spin_unlock_irqrestore(&dev->lock, flags);
1141 	return 0;
1142 }
1143 
1144 /**
1145  * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1146  *				is self powered or not
1147  * @gadget:	Reference to the gadget driver
1148  * @value:	Specifies self powered or not
1149  *
1150  * Return codes:
1151  *	0:		Success
1152  *	-EINVAL:	If the gadget passed is NULL
1153  */
1154 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1155 {
1156 	struct pch_udc_dev	*dev;
1157 
1158 	if (!gadget)
1159 		return -EINVAL;
1160 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1161 	if (value)
1162 		pch_udc_set_selfpowered(dev);
1163 	else
1164 		pch_udc_clear_selfpowered(dev);
1165 	return 0;
1166 }
1167 
1168 /**
1169  * pch_udc_pcd_pullup() - This API is invoked to make the device
1170  *				visible/invisible to the host
1171  * @gadget:	Reference to the gadget driver
1172  * @is_on:	Specifies whether the pull up is made active or inactive
1173  *
1174  * Return codes:
1175  *	0:		Success
1176  *	-EINVAL:	If the gadget passed is NULL
1177  */
1178 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1179 {
1180 	struct pch_udc_dev	*dev;
1181 
1182 	if (!gadget)
1183 		return -EINVAL;
1184 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1185 	if (is_on) {
1186 		pch_udc_reconnect(dev);
1187 	} else {
1188 		if (dev->driver && dev->driver->disconnect) {
1189 			spin_unlock(&dev->lock);
1190 			dev->driver->disconnect(&dev->gadget);
1191 			spin_lock(&dev->lock);
1192 		}
1193 		pch_udc_set_disconnect(dev);
1194 	}
1195 
1196 	return 0;
1197 }
1198 
1199 /**
1200  * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1201  *				transceiver (or GPIO) that
1202  *				detects a VBUS power session starting/ending
1203  * @gadget:	Reference to the gadget driver
1204  * @is_active:	specifies whether the session is starting or ending
1205  *
1206  * Return codes:
1207  *	0:		Success
1208  *	-EINVAL:	If the gadget passed is NULL
1209  */
1210 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1211 {
1212 	struct pch_udc_dev	*dev;
1213 
1214 	if (!gadget)
1215 		return -EINVAL;
1216 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1217 	pch_udc_vbus_session(dev, is_active);
1218 	return 0;
1219 }
1220 
1221 /**
1222  * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1223  *				SET_CONFIGURATION calls to
1224  *				specify how much power the device can consume
1225  * @gadget:	Reference to the gadget driver
1226  * @mA:		specifies the current limit in 2mA unit
1227  *
1228  * Return codes:
1229  *	-EINVAL:	If the gadget passed is NULL
1230  *	-EOPNOTSUPP:
1231  */
1232 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1233 {
1234 	return -EOPNOTSUPP;
1235 }
1236 
1237 static int pch_udc_start(struct usb_gadget *g,
1238 		struct usb_gadget_driver *driver);
1239 static int pch_udc_stop(struct usb_gadget *g,
1240 		struct usb_gadget_driver *driver);
1241 static const struct usb_gadget_ops pch_udc_ops = {
1242 	.get_frame = pch_udc_pcd_get_frame,
1243 	.wakeup = pch_udc_pcd_wakeup,
1244 	.set_selfpowered = pch_udc_pcd_selfpowered,
1245 	.pullup = pch_udc_pcd_pullup,
1246 	.vbus_session = pch_udc_pcd_vbus_session,
1247 	.vbus_draw = pch_udc_pcd_vbus_draw,
1248 	.udc_start = pch_udc_start,
1249 	.udc_stop = pch_udc_stop,
1250 };
1251 
1252 /**
1253  * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1254  * @dev:	Reference to the driver structure
1255  *
1256  * Return value:
1257  *	1: VBUS is high
1258  *	0: VBUS is low
1259  *     -1: It is not enable to detect VBUS using GPIO
1260  */
1261 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1262 {
1263 	int vbus = 0;
1264 
1265 	if (dev->vbus_gpio.port)
1266 		vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1267 	else
1268 		vbus = -1;
1269 
1270 	return vbus;
1271 }
1272 
1273 /**
1274  * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1275  *                             If VBUS is Low, disconnect is processed
1276  * @irq_work:	Structure for WorkQueue
1277  *
1278  */
1279 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1280 {
1281 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1282 		struct pch_vbus_gpio_data, irq_work_fall);
1283 	struct pch_udc_dev *dev =
1284 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1285 	int vbus_saved = -1;
1286 	int vbus;
1287 	int count;
1288 
1289 	if (!dev->vbus_gpio.port)
1290 		return;
1291 
1292 	for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1293 		count++) {
1294 		vbus = pch_vbus_gpio_get_value(dev);
1295 
1296 		if ((vbus_saved == vbus) && (vbus == 0)) {
1297 			dev_dbg(&dev->pdev->dev, "VBUS fell");
1298 			if (dev->driver
1299 				&& dev->driver->disconnect) {
1300 				dev->driver->disconnect(
1301 					&dev->gadget);
1302 			}
1303 			if (dev->vbus_gpio.intr)
1304 				pch_udc_init(dev);
1305 			else
1306 				pch_udc_reconnect(dev);
1307 			return;
1308 		}
1309 		vbus_saved = vbus;
1310 		mdelay(PCH_VBUS_INTERVAL);
1311 	}
1312 }
1313 
1314 /**
1315  * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1316  *                             If VBUS is High, connect is processed
1317  * @irq_work:	Structure for WorkQueue
1318  *
1319  */
1320 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1321 {
1322 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1323 		struct pch_vbus_gpio_data, irq_work_rise);
1324 	struct pch_udc_dev *dev =
1325 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1326 	int vbus;
1327 
1328 	if (!dev->vbus_gpio.port)
1329 		return;
1330 
1331 	mdelay(PCH_VBUS_INTERVAL);
1332 	vbus = pch_vbus_gpio_get_value(dev);
1333 
1334 	if (vbus == 1) {
1335 		dev_dbg(&dev->pdev->dev, "VBUS rose");
1336 		pch_udc_reconnect(dev);
1337 		return;
1338 	}
1339 }
1340 
1341 /**
1342  * pch_vbus_gpio_irq() - IRQ handler for GPIO intrerrupt for changing VBUS
1343  * @irq:	Interrupt request number
1344  * @dev:	Reference to the device structure
1345  *
1346  * Return codes:
1347  *	0: Success
1348  *	-EINVAL: GPIO port is invalid or can't be initialized.
1349  */
1350 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1351 {
1352 	struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1353 
1354 	if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1355 		return IRQ_NONE;
1356 
1357 	if (pch_vbus_gpio_get_value(dev))
1358 		schedule_work(&dev->vbus_gpio.irq_work_rise);
1359 	else
1360 		schedule_work(&dev->vbus_gpio.irq_work_fall);
1361 
1362 	return IRQ_HANDLED;
1363 }
1364 
1365 /**
1366  * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1367  * @dev:	Reference to the driver structure
1368  * @vbus_gpio	Number of GPIO port to detect gpio
1369  *
1370  * Return codes:
1371  *	0: Success
1372  *	-EINVAL: GPIO port is invalid or can't be initialized.
1373  */
1374 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1375 {
1376 	int err;
1377 	int irq_num = 0;
1378 
1379 	dev->vbus_gpio.port = 0;
1380 	dev->vbus_gpio.intr = 0;
1381 
1382 	if (vbus_gpio_port <= -1)
1383 		return -EINVAL;
1384 
1385 	err = gpio_is_valid(vbus_gpio_port);
1386 	if (!err) {
1387 		pr_err("%s: gpio port %d is invalid\n",
1388 			__func__, vbus_gpio_port);
1389 		return -EINVAL;
1390 	}
1391 
1392 	err = gpio_request(vbus_gpio_port, "pch_vbus");
1393 	if (err) {
1394 		pr_err("%s: can't request gpio port %d, err: %d\n",
1395 			__func__, vbus_gpio_port, err);
1396 		return -EINVAL;
1397 	}
1398 
1399 	dev->vbus_gpio.port = vbus_gpio_port;
1400 	gpio_direction_input(vbus_gpio_port);
1401 	INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1402 
1403 	irq_num = gpio_to_irq(vbus_gpio_port);
1404 	if (irq_num > 0) {
1405 		irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1406 		err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1407 			"vbus_detect", dev);
1408 		if (!err) {
1409 			dev->vbus_gpio.intr = irq_num;
1410 			INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1411 				pch_vbus_gpio_work_rise);
1412 		} else {
1413 			pr_err("%s: can't request irq %d, err: %d\n",
1414 				__func__, irq_num, err);
1415 		}
1416 	}
1417 
1418 	return 0;
1419 }
1420 
1421 /**
1422  * pch_vbus_gpio_free() - This API frees resources of GPIO port
1423  * @dev:	Reference to the driver structure
1424  */
1425 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1426 {
1427 	if (dev->vbus_gpio.intr)
1428 		free_irq(dev->vbus_gpio.intr, dev);
1429 
1430 	if (dev->vbus_gpio.port)
1431 		gpio_free(dev->vbus_gpio.port);
1432 }
1433 
1434 /**
1435  * complete_req() - This API is invoked from the driver when processing
1436  *			of a request is complete
1437  * @ep:		Reference to the endpoint structure
1438  * @req:	Reference to the request structure
1439  * @status:	Indicates the success/failure of completion
1440  */
1441 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1442 								 int status)
1443 	__releases(&dev->lock)
1444 	__acquires(&dev->lock)
1445 {
1446 	struct pch_udc_dev	*dev;
1447 	unsigned halted = ep->halted;
1448 
1449 	list_del_init(&req->queue);
1450 
1451 	/* set new status if pending */
1452 	if (req->req.status == -EINPROGRESS)
1453 		req->req.status = status;
1454 	else
1455 		status = req->req.status;
1456 
1457 	dev = ep->dev;
1458 	if (req->dma_mapped) {
1459 		if (req->dma == DMA_ADDR_INVALID) {
1460 			if (ep->in)
1461 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1462 						 req->req.length,
1463 						 DMA_TO_DEVICE);
1464 			else
1465 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1466 						 req->req.length,
1467 						 DMA_FROM_DEVICE);
1468 			req->req.dma = DMA_ADDR_INVALID;
1469 		} else {
1470 			if (ep->in)
1471 				dma_unmap_single(&dev->pdev->dev, req->dma,
1472 						 req->req.length,
1473 						 DMA_TO_DEVICE);
1474 			else {
1475 				dma_unmap_single(&dev->pdev->dev, req->dma,
1476 						 req->req.length,
1477 						 DMA_FROM_DEVICE);
1478 				memcpy(req->req.buf, req->buf, req->req.length);
1479 			}
1480 			kfree(req->buf);
1481 			req->dma = DMA_ADDR_INVALID;
1482 		}
1483 		req->dma_mapped = 0;
1484 	}
1485 	ep->halted = 1;
1486 	spin_unlock(&dev->lock);
1487 	if (!ep->in)
1488 		pch_udc_ep_clear_rrdy(ep);
1489 	req->req.complete(&ep->ep, &req->req);
1490 	spin_lock(&dev->lock);
1491 	ep->halted = halted;
1492 }
1493 
1494 /**
1495  * empty_req_queue() - This API empties the request queue of an endpoint
1496  * @ep:		Reference to the endpoint structure
1497  */
1498 static void empty_req_queue(struct pch_udc_ep *ep)
1499 {
1500 	struct pch_udc_request	*req;
1501 
1502 	ep->halted = 1;
1503 	while (!list_empty(&ep->queue)) {
1504 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1505 		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
1506 	}
1507 }
1508 
1509 /**
1510  * pch_udc_free_dma_chain() - This function frees the DMA chain created
1511  *				for the request
1512  * @dev		Reference to the driver structure
1513  * @req		Reference to the request to be freed
1514  *
1515  * Return codes:
1516  *	0: Success
1517  */
1518 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1519 				   struct pch_udc_request *req)
1520 {
1521 	struct pch_udc_data_dma_desc *td = req->td_data;
1522 	unsigned i = req->chain_len;
1523 
1524 	dma_addr_t addr2;
1525 	dma_addr_t addr = (dma_addr_t)td->next;
1526 	td->next = 0x00;
1527 	for (; i > 1; --i) {
1528 		/* do not free first desc., will be done by free for request */
1529 		td = phys_to_virt(addr);
1530 		addr2 = (dma_addr_t)td->next;
1531 		pci_pool_free(dev->data_requests, td, addr);
1532 		td->next = 0x00;
1533 		addr = addr2;
1534 	}
1535 	req->chain_len = 1;
1536 }
1537 
1538 /**
1539  * pch_udc_create_dma_chain() - This function creates or reinitializes
1540  *				a DMA chain
1541  * @ep:		Reference to the endpoint structure
1542  * @req:	Reference to the request
1543  * @buf_len:	The buffer length
1544  * @gfp_flags:	Flags to be used while mapping the data buffer
1545  *
1546  * Return codes:
1547  *	0:		success,
1548  *	-ENOMEM:	pci_pool_alloc invocation fails
1549  */
1550 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1551 				    struct pch_udc_request *req,
1552 				    unsigned long buf_len,
1553 				    gfp_t gfp_flags)
1554 {
1555 	struct pch_udc_data_dma_desc *td = req->td_data, *last;
1556 	unsigned long bytes = req->req.length, i = 0;
1557 	dma_addr_t dma_addr;
1558 	unsigned len = 1;
1559 
1560 	if (req->chain_len > 1)
1561 		pch_udc_free_dma_chain(ep->dev, req);
1562 
1563 	if (req->dma == DMA_ADDR_INVALID)
1564 		td->dataptr = req->req.dma;
1565 	else
1566 		td->dataptr = req->dma;
1567 
1568 	td->status = PCH_UDC_BS_HST_BSY;
1569 	for (; ; bytes -= buf_len, ++len) {
1570 		td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1571 		if (bytes <= buf_len)
1572 			break;
1573 		last = td;
1574 		td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1575 				    &dma_addr);
1576 		if (!td)
1577 			goto nomem;
1578 		i += buf_len;
1579 		td->dataptr = req->td_data->dataptr + i;
1580 		last->next = dma_addr;
1581 	}
1582 
1583 	req->td_data_last = td;
1584 	td->status |= PCH_UDC_DMA_LAST;
1585 	td->next = req->td_data_phys;
1586 	req->chain_len = len;
1587 	return 0;
1588 
1589 nomem:
1590 	if (len > 1) {
1591 		req->chain_len = len;
1592 		pch_udc_free_dma_chain(ep->dev, req);
1593 	}
1594 	req->chain_len = 1;
1595 	return -ENOMEM;
1596 }
1597 
1598 /**
1599  * prepare_dma() - This function creates and initializes the DMA chain
1600  *			for the request
1601  * @ep:		Reference to the endpoint structure
1602  * @req:	Reference to the request
1603  * @gfp:	Flag to be used while mapping the data buffer
1604  *
1605  * Return codes:
1606  *	0:		Success
1607  *	Other 0:	linux error number on failure
1608  */
1609 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1610 			  gfp_t gfp)
1611 {
1612 	int	retval;
1613 
1614 	/* Allocate and create a DMA chain */
1615 	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1616 	if (retval) {
1617 		pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1618 		return retval;
1619 	}
1620 	if (ep->in)
1621 		req->td_data->status = (req->td_data->status &
1622 				~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1623 	return 0;
1624 }
1625 
1626 /**
1627  * process_zlp() - This function process zero length packets
1628  *			from the gadget driver
1629  * @ep:		Reference to the endpoint structure
1630  * @req:	Reference to the request
1631  */
1632 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1633 {
1634 	struct pch_udc_dev	*dev = ep->dev;
1635 
1636 	/* IN zlp's are handled by hardware */
1637 	complete_req(ep, req, 0);
1638 
1639 	/* if set_config or set_intf is waiting for ack by zlp
1640 	 * then set CSR_DONE
1641 	 */
1642 	if (dev->set_cfg_not_acked) {
1643 		pch_udc_set_csr_done(dev);
1644 		dev->set_cfg_not_acked = 0;
1645 	}
1646 	/* setup command is ACK'ed now by zlp */
1647 	if (!dev->stall && dev->waiting_zlp_ack) {
1648 		pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1649 		dev->waiting_zlp_ack = 0;
1650 	}
1651 }
1652 
1653 /**
1654  * pch_udc_start_rxrequest() - This function starts the receive requirement.
1655  * @ep:		Reference to the endpoint structure
1656  * @req:	Reference to the request structure
1657  */
1658 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1659 					 struct pch_udc_request *req)
1660 {
1661 	struct pch_udc_data_dma_desc *td_data;
1662 
1663 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1664 	td_data = req->td_data;
1665 	/* Set the status bits for all descriptors */
1666 	while (1) {
1667 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1668 				    PCH_UDC_BS_HST_RDY;
1669 		if ((td_data->status & PCH_UDC_DMA_LAST) ==  PCH_UDC_DMA_LAST)
1670 			break;
1671 		td_data = phys_to_virt(td_data->next);
1672 	}
1673 	/* Write the descriptor pointer */
1674 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1675 	req->dma_going = 1;
1676 	pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1677 	pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1678 	pch_udc_ep_clear_nak(ep);
1679 	pch_udc_ep_set_rrdy(ep);
1680 }
1681 
1682 /**
1683  * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1684  *				from gadget driver
1685  * @usbep:	Reference to the USB endpoint structure
1686  * @desc:	Reference to the USB endpoint descriptor structure
1687  *
1688  * Return codes:
1689  *	0:		Success
1690  *	-EINVAL:
1691  *	-ESHUTDOWN:
1692  */
1693 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1694 				    const struct usb_endpoint_descriptor *desc)
1695 {
1696 	struct pch_udc_ep	*ep;
1697 	struct pch_udc_dev	*dev;
1698 	unsigned long		iflags;
1699 
1700 	if (!usbep || (usbep->name == ep0_string) || !desc ||
1701 	    (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1702 		return -EINVAL;
1703 
1704 	ep = container_of(usbep, struct pch_udc_ep, ep);
1705 	dev = ep->dev;
1706 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1707 		return -ESHUTDOWN;
1708 	spin_lock_irqsave(&dev->lock, iflags);
1709 	ep->ep.desc = desc;
1710 	ep->halted = 0;
1711 	pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1712 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1713 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1714 	spin_unlock_irqrestore(&dev->lock, iflags);
1715 	return 0;
1716 }
1717 
1718 /**
1719  * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1720  *				from gadget driver
1721  * @usbep	Reference to the USB endpoint structure
1722  *
1723  * Return codes:
1724  *	0:		Success
1725  *	-EINVAL:
1726  */
1727 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1728 {
1729 	struct pch_udc_ep	*ep;
1730 	struct pch_udc_dev	*dev;
1731 	unsigned long	iflags;
1732 
1733 	if (!usbep)
1734 		return -EINVAL;
1735 
1736 	ep = container_of(usbep, struct pch_udc_ep, ep);
1737 	dev = ep->dev;
1738 	if ((usbep->name == ep0_string) || !ep->ep.desc)
1739 		return -EINVAL;
1740 
1741 	spin_lock_irqsave(&ep->dev->lock, iflags);
1742 	empty_req_queue(ep);
1743 	ep->halted = 1;
1744 	pch_udc_ep_disable(ep);
1745 	pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1746 	ep->ep.desc = NULL;
1747 	INIT_LIST_HEAD(&ep->queue);
1748 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1749 	return 0;
1750 }
1751 
1752 /**
1753  * pch_udc_alloc_request() - This function allocates request structure.
1754  *				It is called by gadget driver
1755  * @usbep:	Reference to the USB endpoint structure
1756  * @gfp:	Flag to be used while allocating memory
1757  *
1758  * Return codes:
1759  *	NULL:			Failure
1760  *	Allocated address:	Success
1761  */
1762 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1763 						  gfp_t gfp)
1764 {
1765 	struct pch_udc_request		*req;
1766 	struct pch_udc_ep		*ep;
1767 	struct pch_udc_data_dma_desc	*dma_desc;
1768 	struct pch_udc_dev		*dev;
1769 
1770 	if (!usbep)
1771 		return NULL;
1772 	ep = container_of(usbep, struct pch_udc_ep, ep);
1773 	dev = ep->dev;
1774 	req = kzalloc(sizeof *req, gfp);
1775 	if (!req)
1776 		return NULL;
1777 	req->req.dma = DMA_ADDR_INVALID;
1778 	req->dma = DMA_ADDR_INVALID;
1779 	INIT_LIST_HEAD(&req->queue);
1780 	if (!ep->dev->dma_addr)
1781 		return &req->req;
1782 	/* ep0 in requests are allocated from data pool here */
1783 	dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1784 				  &req->td_data_phys);
1785 	if (NULL == dma_desc) {
1786 		kfree(req);
1787 		return NULL;
1788 	}
1789 	/* prevent from using desc. - set HOST BUSY */
1790 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
1791 	dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
1792 	req->td_data = dma_desc;
1793 	req->td_data_last = dma_desc;
1794 	req->chain_len = 1;
1795 	return &req->req;
1796 }
1797 
1798 /**
1799  * pch_udc_free_request() - This function frees request structure.
1800  *				It is called by gadget driver
1801  * @usbep:	Reference to the USB endpoint structure
1802  * @usbreq:	Reference to the USB request
1803  */
1804 static void pch_udc_free_request(struct usb_ep *usbep,
1805 				  struct usb_request *usbreq)
1806 {
1807 	struct pch_udc_ep	*ep;
1808 	struct pch_udc_request	*req;
1809 	struct pch_udc_dev	*dev;
1810 
1811 	if (!usbep || !usbreq)
1812 		return;
1813 	ep = container_of(usbep, struct pch_udc_ep, ep);
1814 	req = container_of(usbreq, struct pch_udc_request, req);
1815 	dev = ep->dev;
1816 	if (!list_empty(&req->queue))
1817 		dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1818 			__func__, usbep->name, req);
1819 	if (req->td_data != NULL) {
1820 		if (req->chain_len > 1)
1821 			pch_udc_free_dma_chain(ep->dev, req);
1822 		pci_pool_free(ep->dev->data_requests, req->td_data,
1823 			      req->td_data_phys);
1824 	}
1825 	kfree(req);
1826 }
1827 
1828 /**
1829  * pch_udc_pcd_queue() - This function queues a request packet. It is called
1830  *			by gadget driver
1831  * @usbep:	Reference to the USB endpoint structure
1832  * @usbreq:	Reference to the USB request
1833  * @gfp:	Flag to be used while mapping the data buffer
1834  *
1835  * Return codes:
1836  *	0:			Success
1837  *	linux error number:	Failure
1838  */
1839 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1840 								 gfp_t gfp)
1841 {
1842 	int retval = 0;
1843 	struct pch_udc_ep	*ep;
1844 	struct pch_udc_dev	*dev;
1845 	struct pch_udc_request	*req;
1846 	unsigned long	iflags;
1847 
1848 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1849 		return -EINVAL;
1850 	ep = container_of(usbep, struct pch_udc_ep, ep);
1851 	dev = ep->dev;
1852 	if (!ep->ep.desc && ep->num)
1853 		return -EINVAL;
1854 	req = container_of(usbreq, struct pch_udc_request, req);
1855 	if (!list_empty(&req->queue))
1856 		return -EINVAL;
1857 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1858 		return -ESHUTDOWN;
1859 	spin_lock_irqsave(&dev->lock, iflags);
1860 	/* map the buffer for dma */
1861 	if (usbreq->length &&
1862 	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1863 		if (!((unsigned long)(usbreq->buf) & 0x03)) {
1864 			if (ep->in)
1865 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1866 							     usbreq->buf,
1867 							     usbreq->length,
1868 							     DMA_TO_DEVICE);
1869 			else
1870 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1871 							     usbreq->buf,
1872 							     usbreq->length,
1873 							     DMA_FROM_DEVICE);
1874 		} else {
1875 			req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1876 			if (!req->buf) {
1877 				retval = -ENOMEM;
1878 				goto probe_end;
1879 			}
1880 			if (ep->in) {
1881 				memcpy(req->buf, usbreq->buf, usbreq->length);
1882 				req->dma = dma_map_single(&dev->pdev->dev,
1883 							  req->buf,
1884 							  usbreq->length,
1885 							  DMA_TO_DEVICE);
1886 			} else
1887 				req->dma = dma_map_single(&dev->pdev->dev,
1888 							  req->buf,
1889 							  usbreq->length,
1890 							  DMA_FROM_DEVICE);
1891 		}
1892 		req->dma_mapped = 1;
1893 	}
1894 	if (usbreq->length > 0) {
1895 		retval = prepare_dma(ep, req, GFP_ATOMIC);
1896 		if (retval)
1897 			goto probe_end;
1898 	}
1899 	usbreq->actual = 0;
1900 	usbreq->status = -EINPROGRESS;
1901 	req->dma_done = 0;
1902 	if (list_empty(&ep->queue) && !ep->halted) {
1903 		/* no pending transfer, so start this req */
1904 		if (!usbreq->length) {
1905 			process_zlp(ep, req);
1906 			retval = 0;
1907 			goto probe_end;
1908 		}
1909 		if (!ep->in) {
1910 			pch_udc_start_rxrequest(ep, req);
1911 		} else {
1912 			/*
1913 			* For IN trfr the descriptors will be programmed and
1914 			* P bit will be set when
1915 			* we get an IN token
1916 			*/
1917 			pch_udc_wait_ep_stall(ep);
1918 			pch_udc_ep_clear_nak(ep);
1919 			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1920 		}
1921 	}
1922 	/* Now add this request to the ep's pending requests */
1923 	if (req != NULL)
1924 		list_add_tail(&req->queue, &ep->queue);
1925 
1926 probe_end:
1927 	spin_unlock_irqrestore(&dev->lock, iflags);
1928 	return retval;
1929 }
1930 
1931 /**
1932  * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1933  *				It is called by gadget driver
1934  * @usbep:	Reference to the USB endpoint structure
1935  * @usbreq:	Reference to the USB request
1936  *
1937  * Return codes:
1938  *	0:			Success
1939  *	linux error number:	Failure
1940  */
1941 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1942 				struct usb_request *usbreq)
1943 {
1944 	struct pch_udc_ep	*ep;
1945 	struct pch_udc_request	*req;
1946 	struct pch_udc_dev	*dev;
1947 	unsigned long		flags;
1948 	int ret = -EINVAL;
1949 
1950 	ep = container_of(usbep, struct pch_udc_ep, ep);
1951 	dev = ep->dev;
1952 	if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1953 		return ret;
1954 	req = container_of(usbreq, struct pch_udc_request, req);
1955 	spin_lock_irqsave(&ep->dev->lock, flags);
1956 	/* make sure it's still queued on this endpoint */
1957 	list_for_each_entry(req, &ep->queue, queue) {
1958 		if (&req->req == usbreq) {
1959 			pch_udc_ep_set_nak(ep);
1960 			if (!list_empty(&req->queue))
1961 				complete_req(ep, req, -ECONNRESET);
1962 			ret = 0;
1963 			break;
1964 		}
1965 	}
1966 	spin_unlock_irqrestore(&ep->dev->lock, flags);
1967 	return ret;
1968 }
1969 
1970 /**
1971  * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1972  *			    feature
1973  * @usbep:	Reference to the USB endpoint structure
1974  * @halt:	Specifies whether to set or clear the feature
1975  *
1976  * Return codes:
1977  *	0:			Success
1978  *	linux error number:	Failure
1979  */
1980 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1981 {
1982 	struct pch_udc_ep	*ep;
1983 	struct pch_udc_dev	*dev;
1984 	unsigned long iflags;
1985 	int ret;
1986 
1987 	if (!usbep)
1988 		return -EINVAL;
1989 	ep = container_of(usbep, struct pch_udc_ep, ep);
1990 	dev = ep->dev;
1991 	if (!ep->ep.desc && !ep->num)
1992 		return -EINVAL;
1993 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1994 		return -ESHUTDOWN;
1995 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1996 	if (list_empty(&ep->queue)) {
1997 		if (halt) {
1998 			if (ep->num == PCH_UDC_EP0)
1999 				ep->dev->stall = 1;
2000 			pch_udc_ep_set_stall(ep);
2001 			pch_udc_enable_ep_interrupts(ep->dev,
2002 						     PCH_UDC_EPINT(ep->in,
2003 								   ep->num));
2004 		} else {
2005 			pch_udc_ep_clear_stall(ep);
2006 		}
2007 		ret = 0;
2008 	} else {
2009 		ret = -EAGAIN;
2010 	}
2011 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2012 	return ret;
2013 }
2014 
2015 /**
2016  * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
2017  *				halt feature
2018  * @usbep:	Reference to the USB endpoint structure
2019  * @halt:	Specifies whether to set or clear the feature
2020  *
2021  * Return codes:
2022  *	0:			Success
2023  *	linux error number:	Failure
2024  */
2025 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2026 {
2027 	struct pch_udc_ep	*ep;
2028 	struct pch_udc_dev	*dev;
2029 	unsigned long iflags;
2030 	int ret;
2031 
2032 	if (!usbep)
2033 		return -EINVAL;
2034 	ep = container_of(usbep, struct pch_udc_ep, ep);
2035 	dev = ep->dev;
2036 	if (!ep->ep.desc && !ep->num)
2037 		return -EINVAL;
2038 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2039 		return -ESHUTDOWN;
2040 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
2041 	if (!list_empty(&ep->queue)) {
2042 		ret = -EAGAIN;
2043 	} else {
2044 		if (ep->num == PCH_UDC_EP0)
2045 			ep->dev->stall = 1;
2046 		pch_udc_ep_set_stall(ep);
2047 		pch_udc_enable_ep_interrupts(ep->dev,
2048 					     PCH_UDC_EPINT(ep->in, ep->num));
2049 		ep->dev->prot_stall = 1;
2050 		ret = 0;
2051 	}
2052 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2053 	return ret;
2054 }
2055 
2056 /**
2057  * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2058  * @usbep:	Reference to the USB endpoint structure
2059  */
2060 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2061 {
2062 	struct pch_udc_ep  *ep;
2063 
2064 	if (!usbep)
2065 		return;
2066 
2067 	ep = container_of(usbep, struct pch_udc_ep, ep);
2068 	if (ep->ep.desc || !ep->num)
2069 		pch_udc_ep_fifo_flush(ep, ep->in);
2070 }
2071 
2072 static const struct usb_ep_ops pch_udc_ep_ops = {
2073 	.enable		= pch_udc_pcd_ep_enable,
2074 	.disable	= pch_udc_pcd_ep_disable,
2075 	.alloc_request	= pch_udc_alloc_request,
2076 	.free_request	= pch_udc_free_request,
2077 	.queue		= pch_udc_pcd_queue,
2078 	.dequeue	= pch_udc_pcd_dequeue,
2079 	.set_halt	= pch_udc_pcd_set_halt,
2080 	.set_wedge	= pch_udc_pcd_set_wedge,
2081 	.fifo_status	= NULL,
2082 	.fifo_flush	= pch_udc_pcd_fifo_flush,
2083 };
2084 
2085 /**
2086  * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2087  * @td_stp:	Reference to the SETP buffer structure
2088  */
2089 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2090 {
2091 	static u32	pky_marker;
2092 
2093 	if (!td_stp)
2094 		return;
2095 	td_stp->reserved = ++pky_marker;
2096 	memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2097 	td_stp->status = PCH_UDC_BS_HST_RDY;
2098 }
2099 
2100 /**
2101  * pch_udc_start_next_txrequest() - This function starts
2102  *					the next transmission requirement
2103  * @ep:	Reference to the endpoint structure
2104  */
2105 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2106 {
2107 	struct pch_udc_request *req;
2108 	struct pch_udc_data_dma_desc *td_data;
2109 
2110 	if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2111 		return;
2112 
2113 	if (list_empty(&ep->queue))
2114 		return;
2115 
2116 	/* next request */
2117 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2118 	if (req->dma_going)
2119 		return;
2120 	if (!req->td_data)
2121 		return;
2122 	pch_udc_wait_ep_stall(ep);
2123 	req->dma_going = 1;
2124 	pch_udc_ep_set_ddptr(ep, 0);
2125 	td_data = req->td_data;
2126 	while (1) {
2127 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2128 				   PCH_UDC_BS_HST_RDY;
2129 		if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2130 			break;
2131 		td_data = phys_to_virt(td_data->next);
2132 	}
2133 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2134 	pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2135 	pch_udc_ep_set_pd(ep);
2136 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2137 	pch_udc_ep_clear_nak(ep);
2138 }
2139 
2140 /**
2141  * pch_udc_complete_transfer() - This function completes a transfer
2142  * @ep:		Reference to the endpoint structure
2143  */
2144 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2145 {
2146 	struct pch_udc_request *req;
2147 	struct pch_udc_dev *dev = ep->dev;
2148 
2149 	if (list_empty(&ep->queue))
2150 		return;
2151 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2152 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2153 	    PCH_UDC_BS_DMA_DONE)
2154 		return;
2155 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2156 	     PCH_UDC_RTS_SUCC) {
2157 		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2158 			"epstatus=0x%08x\n",
2159 		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
2160 		       (int)(ep->epsts));
2161 		return;
2162 	}
2163 
2164 	req->req.actual = req->req.length;
2165 	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2166 	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2167 	complete_req(ep, req, 0);
2168 	req->dma_going = 0;
2169 	if (!list_empty(&ep->queue)) {
2170 		pch_udc_wait_ep_stall(ep);
2171 		pch_udc_ep_clear_nak(ep);
2172 		pch_udc_enable_ep_interrupts(ep->dev,
2173 					     PCH_UDC_EPINT(ep->in, ep->num));
2174 	} else {
2175 		pch_udc_disable_ep_interrupts(ep->dev,
2176 					      PCH_UDC_EPINT(ep->in, ep->num));
2177 	}
2178 }
2179 
2180 /**
2181  * pch_udc_complete_receiver() - This function completes a receiver
2182  * @ep:		Reference to the endpoint structure
2183  */
2184 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2185 {
2186 	struct pch_udc_request *req;
2187 	struct pch_udc_dev *dev = ep->dev;
2188 	unsigned int count;
2189 	struct pch_udc_data_dma_desc *td;
2190 	dma_addr_t addr;
2191 
2192 	if (list_empty(&ep->queue))
2193 		return;
2194 	/* next request */
2195 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2196 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2197 	pch_udc_ep_set_ddptr(ep, 0);
2198 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2199 	    PCH_UDC_BS_DMA_DONE)
2200 		td = req->td_data_last;
2201 	else
2202 		td = req->td_data;
2203 
2204 	while (1) {
2205 		if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2206 			dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2207 				"epstatus=0x%08x\n",
2208 				(req->td_data->status & PCH_UDC_RXTX_STS),
2209 				(int)(ep->epsts));
2210 			return;
2211 		}
2212 		if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2213 			if (td->status & PCH_UDC_DMA_LAST) {
2214 				count = td->status & PCH_UDC_RXTX_BYTES;
2215 				break;
2216 			}
2217 		if (td == req->td_data_last) {
2218 			dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2219 			return;
2220 		}
2221 		addr = (dma_addr_t)td->next;
2222 		td = phys_to_virt(addr);
2223 	}
2224 	/* on 64k packets the RXBYTES field is zero */
2225 	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2226 		count = UDC_DMA_MAXPACKET;
2227 	req->td_data->status |= PCH_UDC_DMA_LAST;
2228 	td->status |= PCH_UDC_BS_HST_BSY;
2229 
2230 	req->dma_going = 0;
2231 	req->req.actual = count;
2232 	complete_req(ep, req, 0);
2233 	/* If there is a new/failed requests try that now */
2234 	if (!list_empty(&ep->queue)) {
2235 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2236 		pch_udc_start_rxrequest(ep, req);
2237 	}
2238 }
2239 
2240 /**
2241  * pch_udc_svc_data_in() - This function process endpoint interrupts
2242  *				for IN endpoints
2243  * @dev:	Reference to the device structure
2244  * @ep_num:	Endpoint that generated the interrupt
2245  */
2246 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2247 {
2248 	u32	epsts;
2249 	struct pch_udc_ep	*ep;
2250 
2251 	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2252 	epsts = ep->epsts;
2253 	ep->epsts = 0;
2254 
2255 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA  | UDC_EPSTS_HE |
2256 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2257 		       UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2258 		return;
2259 	if ((epsts & UDC_EPSTS_BNA))
2260 		return;
2261 	if (epsts & UDC_EPSTS_HE)
2262 		return;
2263 	if (epsts & UDC_EPSTS_RSS) {
2264 		pch_udc_ep_set_stall(ep);
2265 		pch_udc_enable_ep_interrupts(ep->dev,
2266 					     PCH_UDC_EPINT(ep->in, ep->num));
2267 	}
2268 	if (epsts & UDC_EPSTS_RCS) {
2269 		if (!dev->prot_stall) {
2270 			pch_udc_ep_clear_stall(ep);
2271 		} else {
2272 			pch_udc_ep_set_stall(ep);
2273 			pch_udc_enable_ep_interrupts(ep->dev,
2274 						PCH_UDC_EPINT(ep->in, ep->num));
2275 		}
2276 	}
2277 	if (epsts & UDC_EPSTS_TDC)
2278 		pch_udc_complete_transfer(ep);
2279 	/* On IN interrupt, provide data if we have any */
2280 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2281 	    !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2282 		pch_udc_start_next_txrequest(ep);
2283 }
2284 
2285 /**
2286  * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2287  * @dev:	Reference to the device structure
2288  * @ep_num:	Endpoint that generated the interrupt
2289  */
2290 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2291 {
2292 	u32			epsts;
2293 	struct pch_udc_ep		*ep;
2294 	struct pch_udc_request		*req = NULL;
2295 
2296 	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2297 	epsts = ep->epsts;
2298 	ep->epsts = 0;
2299 
2300 	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2301 		/* next request */
2302 		req = list_entry(ep->queue.next, struct pch_udc_request,
2303 				 queue);
2304 		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2305 		     PCH_UDC_BS_DMA_DONE) {
2306 			if (!req->dma_going)
2307 				pch_udc_start_rxrequest(ep, req);
2308 			return;
2309 		}
2310 	}
2311 	if (epsts & UDC_EPSTS_HE)
2312 		return;
2313 	if (epsts & UDC_EPSTS_RSS) {
2314 		pch_udc_ep_set_stall(ep);
2315 		pch_udc_enable_ep_interrupts(ep->dev,
2316 					     PCH_UDC_EPINT(ep->in, ep->num));
2317 	}
2318 	if (epsts & UDC_EPSTS_RCS) {
2319 		if (!dev->prot_stall) {
2320 			pch_udc_ep_clear_stall(ep);
2321 		} else {
2322 			pch_udc_ep_set_stall(ep);
2323 			pch_udc_enable_ep_interrupts(ep->dev,
2324 						PCH_UDC_EPINT(ep->in, ep->num));
2325 		}
2326 	}
2327 	if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2328 	    UDC_EPSTS_OUT_DATA) {
2329 		if (ep->dev->prot_stall == 1) {
2330 			pch_udc_ep_set_stall(ep);
2331 			pch_udc_enable_ep_interrupts(ep->dev,
2332 						PCH_UDC_EPINT(ep->in, ep->num));
2333 		} else {
2334 			pch_udc_complete_receiver(ep);
2335 		}
2336 	}
2337 	if (list_empty(&ep->queue))
2338 		pch_udc_set_dma(dev, DMA_DIR_RX);
2339 }
2340 
2341 /**
2342  * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2343  * @dev:	Reference to the device structure
2344  */
2345 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2346 {
2347 	u32	epsts;
2348 	struct pch_udc_ep	*ep;
2349 	struct pch_udc_ep	*ep_out;
2350 
2351 	ep = &dev->ep[UDC_EP0IN_IDX];
2352 	ep_out = &dev->ep[UDC_EP0OUT_IDX];
2353 	epsts = ep->epsts;
2354 	ep->epsts = 0;
2355 
2356 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2357 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2358 		       UDC_EPSTS_XFERDONE)))
2359 		return;
2360 	if ((epsts & UDC_EPSTS_BNA))
2361 		return;
2362 	if (epsts & UDC_EPSTS_HE)
2363 		return;
2364 	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2365 		pch_udc_complete_transfer(ep);
2366 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2367 		ep_out->td_data->status = (ep_out->td_data->status &
2368 					~PCH_UDC_BUFF_STS) |
2369 					PCH_UDC_BS_HST_RDY;
2370 		pch_udc_ep_clear_nak(ep_out);
2371 		pch_udc_set_dma(dev, DMA_DIR_RX);
2372 		pch_udc_ep_set_rrdy(ep_out);
2373 	}
2374 	/* On IN interrupt, provide data if we have any */
2375 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2376 	     !(epsts & UDC_EPSTS_TXEMPTY))
2377 		pch_udc_start_next_txrequest(ep);
2378 }
2379 
2380 /**
2381  * pch_udc_svc_control_out() - Routine that handle Control
2382  *					OUT endpoint interrupts
2383  * @dev:	Reference to the device structure
2384  */
2385 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2386 	__releases(&dev->lock)
2387 	__acquires(&dev->lock)
2388 {
2389 	u32	stat;
2390 	int setup_supported;
2391 	struct pch_udc_ep	*ep;
2392 
2393 	ep = &dev->ep[UDC_EP0OUT_IDX];
2394 	stat = ep->epsts;
2395 	ep->epsts = 0;
2396 
2397 	/* If setup data */
2398 	if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2399 	    UDC_EPSTS_OUT_SETUP) {
2400 		dev->stall = 0;
2401 		dev->ep[UDC_EP0IN_IDX].halted = 0;
2402 		dev->ep[UDC_EP0OUT_IDX].halted = 0;
2403 		dev->setup_data = ep->td_stp->request;
2404 		pch_udc_init_setup_buff(ep->td_stp);
2405 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2406 		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2407 				      dev->ep[UDC_EP0IN_IDX].in);
2408 		if ((dev->setup_data.bRequestType & USB_DIR_IN))
2409 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2410 		else /* OUT */
2411 			dev->gadget.ep0 = &ep->ep;
2412 		spin_unlock(&dev->lock);
2413 		/* If Mass storage Reset */
2414 		if ((dev->setup_data.bRequestType == 0x21) &&
2415 		    (dev->setup_data.bRequest == 0xFF))
2416 			dev->prot_stall = 0;
2417 		/* call gadget with setup data received */
2418 		setup_supported = dev->driver->setup(&dev->gadget,
2419 						     &dev->setup_data);
2420 		spin_lock(&dev->lock);
2421 
2422 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
2423 			ep->td_data->status = (ep->td_data->status &
2424 						~PCH_UDC_BUFF_STS) |
2425 						PCH_UDC_BS_HST_RDY;
2426 			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2427 		}
2428 		/* ep0 in returns data on IN phase */
2429 		if (setup_supported >= 0 && setup_supported <
2430 					    UDC_EP0IN_MAX_PKT_SIZE) {
2431 			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2432 			/* Gadget would have queued a request when
2433 			 * we called the setup */
2434 			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2435 				pch_udc_set_dma(dev, DMA_DIR_RX);
2436 				pch_udc_ep_clear_nak(ep);
2437 			}
2438 		} else if (setup_supported < 0) {
2439 			/* if unsupported request, then stall */
2440 			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2441 			pch_udc_enable_ep_interrupts(ep->dev,
2442 						PCH_UDC_EPINT(ep->in, ep->num));
2443 			dev->stall = 0;
2444 			pch_udc_set_dma(dev, DMA_DIR_RX);
2445 		} else {
2446 			dev->waiting_zlp_ack = 1;
2447 		}
2448 	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2449 		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
2450 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2451 		pch_udc_ep_set_ddptr(ep, 0);
2452 		if (!list_empty(&ep->queue)) {
2453 			ep->epsts = stat;
2454 			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2455 		}
2456 		pch_udc_set_dma(dev, DMA_DIR_RX);
2457 	}
2458 	pch_udc_ep_set_rrdy(ep);
2459 }
2460 
2461 
2462 /**
2463  * pch_udc_postsvc_epinters() - This function enables end point interrupts
2464  *				and clears NAK status
2465  * @dev:	Reference to the device structure
2466  * @ep_num:	End point number
2467  */
2468 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2469 {
2470 	struct pch_udc_ep	*ep;
2471 	struct pch_udc_request *req;
2472 
2473 	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2474 	if (!list_empty(&ep->queue)) {
2475 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2476 		pch_udc_enable_ep_interrupts(ep->dev,
2477 					     PCH_UDC_EPINT(ep->in, ep->num));
2478 		pch_udc_ep_clear_nak(ep);
2479 	}
2480 }
2481 
2482 /**
2483  * pch_udc_read_all_epstatus() - This function read all endpoint status
2484  * @dev:	Reference to the device structure
2485  * @ep_intr:	Status of endpoint interrupt
2486  */
2487 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2488 {
2489 	int i;
2490 	struct pch_udc_ep	*ep;
2491 
2492 	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2493 		/* IN */
2494 		if (ep_intr & (0x1 << i)) {
2495 			ep = &dev->ep[UDC_EPIN_IDX(i)];
2496 			ep->epsts = pch_udc_read_ep_status(ep);
2497 			pch_udc_clear_ep_status(ep, ep->epsts);
2498 		}
2499 		/* OUT */
2500 		if (ep_intr & (0x10000 << i)) {
2501 			ep = &dev->ep[UDC_EPOUT_IDX(i)];
2502 			ep->epsts = pch_udc_read_ep_status(ep);
2503 			pch_udc_clear_ep_status(ep, ep->epsts);
2504 		}
2505 	}
2506 }
2507 
2508 /**
2509  * pch_udc_activate_control_ep() - This function enables the control endpoints
2510  *					for traffic after a reset
2511  * @dev:	Reference to the device structure
2512  */
2513 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2514 {
2515 	struct pch_udc_ep	*ep;
2516 	u32 val;
2517 
2518 	/* Setup the IN endpoint */
2519 	ep = &dev->ep[UDC_EP0IN_IDX];
2520 	pch_udc_clear_ep_control(ep);
2521 	pch_udc_ep_fifo_flush(ep, ep->in);
2522 	pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2523 	pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2524 	/* Initialize the IN EP Descriptor */
2525 	ep->td_data      = NULL;
2526 	ep->td_stp       = NULL;
2527 	ep->td_data_phys = 0;
2528 	ep->td_stp_phys  = 0;
2529 
2530 	/* Setup the OUT endpoint */
2531 	ep = &dev->ep[UDC_EP0OUT_IDX];
2532 	pch_udc_clear_ep_control(ep);
2533 	pch_udc_ep_fifo_flush(ep, ep->in);
2534 	pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2535 	pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2536 	val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2537 	pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2538 
2539 	/* Initialize the SETUP buffer */
2540 	pch_udc_init_setup_buff(ep->td_stp);
2541 	/* Write the pointer address of dma descriptor */
2542 	pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2543 	/* Write the pointer address of Setup descriptor */
2544 	pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2545 
2546 	/* Initialize the dma descriptor */
2547 	ep->td_data->status  = PCH_UDC_DMA_LAST;
2548 	ep->td_data->dataptr = dev->dma_addr;
2549 	ep->td_data->next    = ep->td_data_phys;
2550 
2551 	pch_udc_ep_clear_nak(ep);
2552 }
2553 
2554 
2555 /**
2556  * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2557  * @dev:	Reference to driver structure
2558  */
2559 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2560 {
2561 	struct pch_udc_ep	*ep;
2562 	int i;
2563 
2564 	pch_udc_clear_dma(dev, DMA_DIR_TX);
2565 	pch_udc_clear_dma(dev, DMA_DIR_RX);
2566 	/* Mask all endpoint interrupts */
2567 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2568 	/* clear all endpoint interrupts */
2569 	pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2570 
2571 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2572 		ep = &dev->ep[i];
2573 		pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2574 		pch_udc_clear_ep_control(ep);
2575 		pch_udc_ep_set_ddptr(ep, 0);
2576 		pch_udc_write_csr(ep->dev, 0x00, i);
2577 	}
2578 	dev->stall = 0;
2579 	dev->prot_stall = 0;
2580 	dev->waiting_zlp_ack = 0;
2581 	dev->set_cfg_not_acked = 0;
2582 
2583 	/* disable ep to empty req queue. Skip the control EP's */
2584 	for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2585 		ep = &dev->ep[i];
2586 		pch_udc_ep_set_nak(ep);
2587 		pch_udc_ep_fifo_flush(ep, ep->in);
2588 		/* Complete request queue */
2589 		empty_req_queue(ep);
2590 	}
2591 	if (dev->driver && dev->driver->disconnect) {
2592 		spin_unlock(&dev->lock);
2593 		dev->driver->disconnect(&dev->gadget);
2594 		spin_lock(&dev->lock);
2595 	}
2596 }
2597 
2598 /**
2599  * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2600  *				done interrupt
2601  * @dev:	Reference to driver structure
2602  */
2603 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2604 {
2605 	u32 dev_stat, dev_speed;
2606 	u32 speed = USB_SPEED_FULL;
2607 
2608 	dev_stat = pch_udc_read_device_status(dev);
2609 	dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2610 						 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2611 	switch (dev_speed) {
2612 	case UDC_DEVSTS_ENUM_SPEED_HIGH:
2613 		speed = USB_SPEED_HIGH;
2614 		break;
2615 	case  UDC_DEVSTS_ENUM_SPEED_FULL:
2616 		speed = USB_SPEED_FULL;
2617 		break;
2618 	case  UDC_DEVSTS_ENUM_SPEED_LOW:
2619 		speed = USB_SPEED_LOW;
2620 		break;
2621 	default:
2622 		BUG();
2623 	}
2624 	dev->gadget.speed = speed;
2625 	pch_udc_activate_control_ep(dev);
2626 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2627 	pch_udc_set_dma(dev, DMA_DIR_TX);
2628 	pch_udc_set_dma(dev, DMA_DIR_RX);
2629 	pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2630 
2631 	/* enable device interrupts */
2632 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2633 					UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2634 					UDC_DEVINT_SI | UDC_DEVINT_SC);
2635 }
2636 
2637 /**
2638  * pch_udc_svc_intf_interrupt() - This function handles a set interface
2639  *				  interrupt
2640  * @dev:	Reference to driver structure
2641  */
2642 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2643 {
2644 	u32 reg, dev_stat = 0;
2645 	int i, ret;
2646 
2647 	dev_stat = pch_udc_read_device_status(dev);
2648 	dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2649 							 UDC_DEVSTS_INTF_SHIFT;
2650 	dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2651 							 UDC_DEVSTS_ALT_SHIFT;
2652 	dev->set_cfg_not_acked = 1;
2653 	/* Construct the usb request for gadget driver and inform it */
2654 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2655 	dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2656 	dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2657 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2658 	dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2659 	/* programm the Endpoint Cfg registers */
2660 	/* Only one end point cfg register */
2661 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2662 	reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2663 	      (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2664 	reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2665 	      (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2666 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2667 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2668 		/* clear stall bits */
2669 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2670 		dev->ep[i].halted = 0;
2671 	}
2672 	dev->stall = 0;
2673 	spin_unlock(&dev->lock);
2674 	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2675 	spin_lock(&dev->lock);
2676 }
2677 
2678 /**
2679  * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2680  *				interrupt
2681  * @dev:	Reference to driver structure
2682  */
2683 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2684 {
2685 	int i, ret;
2686 	u32 reg, dev_stat = 0;
2687 
2688 	dev_stat = pch_udc_read_device_status(dev);
2689 	dev->set_cfg_not_acked = 1;
2690 	dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2691 				UDC_DEVSTS_CFG_SHIFT;
2692 	/* make usb request for gadget driver */
2693 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2694 	dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2695 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2696 	/* program the NE registers */
2697 	/* Only one end point cfg register */
2698 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2699 	reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2700 	      (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2701 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2702 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2703 		/* clear stall bits */
2704 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2705 		dev->ep[i].halted = 0;
2706 	}
2707 	dev->stall = 0;
2708 
2709 	/* call gadget zero with setup data received */
2710 	spin_unlock(&dev->lock);
2711 	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2712 	spin_lock(&dev->lock);
2713 }
2714 
2715 /**
2716  * pch_udc_dev_isr() - This function services device interrupts
2717  *			by invoking appropriate routines.
2718  * @dev:	Reference to the device structure
2719  * @dev_intr:	The Device interrupt status.
2720  */
2721 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2722 {
2723 	int vbus;
2724 
2725 	/* USB Reset Interrupt */
2726 	if (dev_intr & UDC_DEVINT_UR) {
2727 		pch_udc_svc_ur_interrupt(dev);
2728 		dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2729 	}
2730 	/* Enumeration Done Interrupt */
2731 	if (dev_intr & UDC_DEVINT_ENUM) {
2732 		pch_udc_svc_enum_interrupt(dev);
2733 		dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2734 	}
2735 	/* Set Interface Interrupt */
2736 	if (dev_intr & UDC_DEVINT_SI)
2737 		pch_udc_svc_intf_interrupt(dev);
2738 	/* Set Config Interrupt */
2739 	if (dev_intr & UDC_DEVINT_SC)
2740 		pch_udc_svc_cfg_interrupt(dev);
2741 	/* USB Suspend interrupt */
2742 	if (dev_intr & UDC_DEVINT_US) {
2743 		if (dev->driver
2744 			&& dev->driver->suspend) {
2745 			spin_unlock(&dev->lock);
2746 			dev->driver->suspend(&dev->gadget);
2747 			spin_lock(&dev->lock);
2748 		}
2749 
2750 		vbus = pch_vbus_gpio_get_value(dev);
2751 		if ((dev->vbus_session == 0)
2752 			&& (vbus != 1)) {
2753 			if (dev->driver && dev->driver->disconnect) {
2754 				spin_unlock(&dev->lock);
2755 				dev->driver->disconnect(&dev->gadget);
2756 				spin_lock(&dev->lock);
2757 			}
2758 			pch_udc_reconnect(dev);
2759 		} else if ((dev->vbus_session == 0)
2760 			&& (vbus == 1)
2761 			&& !dev->vbus_gpio.intr)
2762 			schedule_work(&dev->vbus_gpio.irq_work_fall);
2763 
2764 		dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2765 	}
2766 	/* Clear the SOF interrupt, if enabled */
2767 	if (dev_intr & UDC_DEVINT_SOF)
2768 		dev_dbg(&dev->pdev->dev, "SOF\n");
2769 	/* ES interrupt, IDLE > 3ms on the USB */
2770 	if (dev_intr & UDC_DEVINT_ES)
2771 		dev_dbg(&dev->pdev->dev, "ES\n");
2772 	/* RWKP interrupt */
2773 	if (dev_intr & UDC_DEVINT_RWKP)
2774 		dev_dbg(&dev->pdev->dev, "RWKP\n");
2775 }
2776 
2777 /**
2778  * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2779  * @irq:	Interrupt request number
2780  * @dev:	Reference to the device structure
2781  */
2782 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2783 {
2784 	struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2785 	u32 dev_intr, ep_intr;
2786 	int i;
2787 
2788 	dev_intr = pch_udc_read_device_interrupts(dev);
2789 	ep_intr = pch_udc_read_ep_interrupts(dev);
2790 
2791 	/* For a hot plug, this find that the controller is hung up. */
2792 	if (dev_intr == ep_intr)
2793 		if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2794 			dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2795 			/* The controller is reset */
2796 			pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2797 			return IRQ_HANDLED;
2798 		}
2799 	if (dev_intr)
2800 		/* Clear device interrupts */
2801 		pch_udc_write_device_interrupts(dev, dev_intr);
2802 	if (ep_intr)
2803 		/* Clear ep interrupts */
2804 		pch_udc_write_ep_interrupts(dev, ep_intr);
2805 	if (!dev_intr && !ep_intr)
2806 		return IRQ_NONE;
2807 	spin_lock(&dev->lock);
2808 	if (dev_intr)
2809 		pch_udc_dev_isr(dev, dev_intr);
2810 	if (ep_intr) {
2811 		pch_udc_read_all_epstatus(dev, ep_intr);
2812 		/* Process Control In interrupts, if present */
2813 		if (ep_intr & UDC_EPINT_IN_EP0) {
2814 			pch_udc_svc_control_in(dev);
2815 			pch_udc_postsvc_epinters(dev, 0);
2816 		}
2817 		/* Process Control Out interrupts, if present */
2818 		if (ep_intr & UDC_EPINT_OUT_EP0)
2819 			pch_udc_svc_control_out(dev);
2820 		/* Process data in end point interrupts */
2821 		for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2822 			if (ep_intr & (1 <<  i)) {
2823 				pch_udc_svc_data_in(dev, i);
2824 				pch_udc_postsvc_epinters(dev, i);
2825 			}
2826 		}
2827 		/* Process data out end point interrupts */
2828 		for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2829 						 PCH_UDC_USED_EP_NUM); i++)
2830 			if (ep_intr & (1 <<  i))
2831 				pch_udc_svc_data_out(dev, i -
2832 							 UDC_EPINT_OUT_SHIFT);
2833 	}
2834 	spin_unlock(&dev->lock);
2835 	return IRQ_HANDLED;
2836 }
2837 
2838 /**
2839  * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2840  * @dev:	Reference to the device structure
2841  */
2842 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2843 {
2844 	/* enable ep0 interrupts */
2845 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2846 						UDC_EPINT_OUT_EP0);
2847 	/* enable device interrupts */
2848 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2849 				       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2850 				       UDC_DEVINT_SI | UDC_DEVINT_SC);
2851 }
2852 
2853 /**
2854  * gadget_release() - Free the gadget driver private data
2855  * @pdev	reference to struct pci_dev
2856  */
2857 static void gadget_release(struct device *pdev)
2858 {
2859 	struct pch_udc_dev *dev = dev_get_drvdata(pdev);
2860 
2861 	kfree(dev);
2862 }
2863 
2864 /**
2865  * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2866  * @dev:	Reference to the driver structure
2867  */
2868 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2869 {
2870 	const char *const ep_string[] = {
2871 		ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2872 		"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2873 		"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2874 		"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2875 		"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2876 		"ep15in", "ep15out",
2877 	};
2878 	int i;
2879 
2880 	dev->gadget.speed = USB_SPEED_UNKNOWN;
2881 	INIT_LIST_HEAD(&dev->gadget.ep_list);
2882 
2883 	/* Initialize the endpoints structures */
2884 	memset(dev->ep, 0, sizeof dev->ep);
2885 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2886 		struct pch_udc_ep *ep = &dev->ep[i];
2887 		ep->dev = dev;
2888 		ep->halted = 1;
2889 		ep->num = i / 2;
2890 		ep->in = ~i & 1;
2891 		ep->ep.name = ep_string[i];
2892 		ep->ep.ops = &pch_udc_ep_ops;
2893 		if (ep->in)
2894 			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2895 		else
2896 			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2897 					  UDC_EP_REG_SHIFT;
2898 		/* need to set ep->ep.maxpacket and set Default Configuration?*/
2899 		usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2900 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2901 		INIT_LIST_HEAD(&ep->queue);
2902 	}
2903 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2904 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2905 
2906 	/* remove ep0 in and out from the list.  They have own pointer */
2907 	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2908 	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2909 
2910 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2911 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2912 }
2913 
2914 /**
2915  * pch_udc_pcd_init() - This API initializes the driver structure
2916  * @dev:	Reference to the driver structure
2917  *
2918  * Return codes:
2919  *	0: Success
2920  */
2921 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2922 {
2923 	pch_udc_init(dev);
2924 	pch_udc_pcd_reinit(dev);
2925 	pch_vbus_gpio_init(dev, vbus_gpio_port);
2926 	return 0;
2927 }
2928 
2929 /**
2930  * init_dma_pools() - create dma pools during initialization
2931  * @pdev:	reference to struct pci_dev
2932  */
2933 static int init_dma_pools(struct pch_udc_dev *dev)
2934 {
2935 	struct pch_udc_stp_dma_desc	*td_stp;
2936 	struct pch_udc_data_dma_desc	*td_data;
2937 
2938 	/* DMA setup */
2939 	dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2940 		sizeof(struct pch_udc_data_dma_desc), 0, 0);
2941 	if (!dev->data_requests) {
2942 		dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2943 			__func__);
2944 		return -ENOMEM;
2945 	}
2946 
2947 	/* dma desc for setup data */
2948 	dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2949 		sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2950 	if (!dev->stp_requests) {
2951 		dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2952 			__func__);
2953 		return -ENOMEM;
2954 	}
2955 	/* setup */
2956 	td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2957 				&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2958 	if (!td_stp) {
2959 		dev_err(&dev->pdev->dev,
2960 			"%s: can't allocate setup dma descriptor\n", __func__);
2961 		return -ENOMEM;
2962 	}
2963 	dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2964 
2965 	/* data: 0 packets !? */
2966 	td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2967 				&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2968 	if (!td_data) {
2969 		dev_err(&dev->pdev->dev,
2970 			"%s: can't allocate data dma descriptor\n", __func__);
2971 		return -ENOMEM;
2972 	}
2973 	dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2974 	dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2975 	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2976 	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2977 	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2978 
2979 	dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
2980 	if (!dev->ep0out_buf)
2981 		return -ENOMEM;
2982 	dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
2983 				       UDC_EP0OUT_BUFF_SIZE * 4,
2984 				       DMA_FROM_DEVICE);
2985 	return 0;
2986 }
2987 
2988 static int pch_udc_start(struct usb_gadget *g,
2989 		struct usb_gadget_driver *driver)
2990 {
2991 	struct pch_udc_dev	*dev = to_pch_udc(g);
2992 
2993 	driver->driver.bus = NULL;
2994 	dev->driver = driver;
2995 
2996 	/* get ready for ep0 traffic */
2997 	pch_udc_setup_ep0(dev);
2998 
2999 	/* clear SD */
3000 	if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
3001 		pch_udc_clear_disconnect(dev);
3002 
3003 	dev->connected = 1;
3004 	return 0;
3005 }
3006 
3007 static int pch_udc_stop(struct usb_gadget *g,
3008 		struct usb_gadget_driver *driver)
3009 {
3010 	struct pch_udc_dev	*dev = to_pch_udc(g);
3011 
3012 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3013 
3014 	/* Assures that there are no pending requests with this driver */
3015 	dev->driver = NULL;
3016 	dev->connected = 0;
3017 
3018 	/* set SD */
3019 	pch_udc_set_disconnect(dev);
3020 
3021 	return 0;
3022 }
3023 
3024 static void pch_udc_shutdown(struct pci_dev *pdev)
3025 {
3026 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3027 
3028 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3029 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3030 
3031 	/* disable the pullup so the host will think we're gone */
3032 	pch_udc_set_disconnect(dev);
3033 }
3034 
3035 static void pch_udc_remove(struct pci_dev *pdev)
3036 {
3037 	struct pch_udc_dev	*dev = pci_get_drvdata(pdev);
3038 
3039 	usb_del_gadget_udc(&dev->gadget);
3040 
3041 	/* gadget driver must not be registered */
3042 	if (dev->driver)
3043 		dev_err(&pdev->dev,
3044 			"%s: gadget driver still bound!!!\n", __func__);
3045 	/* dma pool cleanup */
3046 	if (dev->data_requests)
3047 		pci_pool_destroy(dev->data_requests);
3048 
3049 	if (dev->stp_requests) {
3050 		/* cleanup DMA desc's for ep0in */
3051 		if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3052 			pci_pool_free(dev->stp_requests,
3053 				dev->ep[UDC_EP0OUT_IDX].td_stp,
3054 				dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3055 		}
3056 		if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3057 			pci_pool_free(dev->stp_requests,
3058 				dev->ep[UDC_EP0OUT_IDX].td_data,
3059 				dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3060 		}
3061 		pci_pool_destroy(dev->stp_requests);
3062 	}
3063 
3064 	if (dev->dma_addr)
3065 		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3066 				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3067 	kfree(dev->ep0out_buf);
3068 
3069 	pch_vbus_gpio_free(dev);
3070 
3071 	pch_udc_exit(dev);
3072 
3073 	if (dev->irq_registered)
3074 		free_irq(pdev->irq, dev);
3075 	if (dev->base_addr)
3076 		iounmap(dev->base_addr);
3077 	if (dev->mem_region)
3078 		release_mem_region(dev->phys_addr,
3079 				   pci_resource_len(pdev, PCH_UDC_PCI_BAR));
3080 	if (dev->active)
3081 		pci_disable_device(pdev);
3082 	kfree(dev);
3083 }
3084 
3085 #ifdef CONFIG_PM
3086 static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3087 {
3088 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3089 
3090 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3091 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3092 
3093 	pci_disable_device(pdev);
3094 	pci_enable_wake(pdev, PCI_D3hot, 0);
3095 
3096 	if (pci_save_state(pdev)) {
3097 		dev_err(&pdev->dev,
3098 			"%s: could not save PCI config state\n", __func__);
3099 		return -ENOMEM;
3100 	}
3101 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
3102 	return 0;
3103 }
3104 
3105 static int pch_udc_resume(struct pci_dev *pdev)
3106 {
3107 	int ret;
3108 
3109 	pci_set_power_state(pdev, PCI_D0);
3110 	pci_restore_state(pdev);
3111 	ret = pci_enable_device(pdev);
3112 	if (ret) {
3113 		dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
3114 		return ret;
3115 	}
3116 	pci_enable_wake(pdev, PCI_D3hot, 0);
3117 	return 0;
3118 }
3119 #else
3120 #define pch_udc_suspend	NULL
3121 #define pch_udc_resume	NULL
3122 #endif /* CONFIG_PM */
3123 
3124 static int pch_udc_probe(struct pci_dev *pdev,
3125 			  const struct pci_device_id *id)
3126 {
3127 	unsigned long		resource;
3128 	unsigned long		len;
3129 	int			retval;
3130 	struct pch_udc_dev	*dev;
3131 
3132 	/* init */
3133 	dev = kzalloc(sizeof *dev, GFP_KERNEL);
3134 	if (!dev) {
3135 		pr_err("%s: no memory for device structure\n", __func__);
3136 		return -ENOMEM;
3137 	}
3138 	/* pci setup */
3139 	if (pci_enable_device(pdev) < 0) {
3140 		kfree(dev);
3141 		pr_err("%s: pci_enable_device failed\n", __func__);
3142 		return -ENODEV;
3143 	}
3144 	dev->active = 1;
3145 	pci_set_drvdata(pdev, dev);
3146 
3147 	/* PCI resource allocation */
3148 	resource = pci_resource_start(pdev, 1);
3149 	len = pci_resource_len(pdev, 1);
3150 
3151 	if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
3152 		dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
3153 		retval = -EBUSY;
3154 		goto finished;
3155 	}
3156 	dev->phys_addr = resource;
3157 	dev->mem_region = 1;
3158 
3159 	dev->base_addr = ioremap_nocache(resource, len);
3160 	if (!dev->base_addr) {
3161 		pr_err("%s: device memory cannot be mapped\n", __func__);
3162 		retval = -ENOMEM;
3163 		goto finished;
3164 	}
3165 	if (!pdev->irq) {
3166 		dev_err(&pdev->dev, "%s: irq not set\n", __func__);
3167 		retval = -ENODEV;
3168 		goto finished;
3169 	}
3170 	/* initialize the hardware */
3171 	if (pch_udc_pcd_init(dev)) {
3172 		retval = -ENODEV;
3173 		goto finished;
3174 	}
3175 	if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
3176 			dev)) {
3177 		dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3178 			pdev->irq);
3179 		retval = -ENODEV;
3180 		goto finished;
3181 	}
3182 	dev->irq = pdev->irq;
3183 	dev->irq_registered = 1;
3184 
3185 	pci_set_master(pdev);
3186 	pci_try_set_mwi(pdev);
3187 
3188 	/* device struct setup */
3189 	spin_lock_init(&dev->lock);
3190 	dev->pdev = pdev;
3191 	dev->gadget.ops = &pch_udc_ops;
3192 
3193 	retval = init_dma_pools(dev);
3194 	if (retval)
3195 		goto finished;
3196 
3197 	dev->gadget.name = KBUILD_MODNAME;
3198 	dev->gadget.max_speed = USB_SPEED_HIGH;
3199 
3200 	/* Put the device in disconnected state till a driver is bound */
3201 	pch_udc_set_disconnect(dev);
3202 	retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3203 			gadget_release);
3204 	if (retval)
3205 		goto finished;
3206 	return 0;
3207 
3208 finished:
3209 	pch_udc_remove(pdev);
3210 	return retval;
3211 }
3212 
3213 static const struct pci_device_id pch_udc_pcidev_id[] = {
3214 	{
3215 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3216 		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3217 		.class_mask = 0xffffffff,
3218 	},
3219 	{
3220 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3221 		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3222 		.class_mask = 0xffffffff,
3223 	},
3224 	{
3225 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3226 		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3227 		.class_mask = 0xffffffff,
3228 	},
3229 	{ 0 },
3230 };
3231 
3232 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3233 
3234 static struct pci_driver pch_udc_driver = {
3235 	.name =	KBUILD_MODNAME,
3236 	.id_table =	pch_udc_pcidev_id,
3237 	.probe =	pch_udc_probe,
3238 	.remove =	pch_udc_remove,
3239 	.suspend =	pch_udc_suspend,
3240 	.resume =	pch_udc_resume,
3241 	.shutdown =	pch_udc_shutdown,
3242 };
3243 
3244 module_pci_driver(pch_udc_driver);
3245 
3246 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3247 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3248 MODULE_LICENSE("GPL");
3249