xref: /openbmc/linux/drivers/usb/gadget/udc/pch_udc.c (revision c67e8ec0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/list.h>
12 #include <linux/interrupt.h>
13 #include <linux/usb/ch9.h>
14 #include <linux/usb/gadget.h>
15 #include <linux/gpio.h>
16 #include <linux/irq.h>
17 
18 /* GPIO port for VBUS detecting */
19 static int vbus_gpio_port = -1;		/* GPIO port number (-1:Not used) */
20 
21 #define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
22 #define PCH_VBUS_INTERVAL	10	/* VBUS polling interval (msec) */
23 
24 /* Address offset of Registers */
25 #define UDC_EP_REG_SHIFT	0x20	/* Offset to next EP */
26 
27 #define UDC_EPCTL_ADDR		0x00	/* Endpoint control */
28 #define UDC_EPSTS_ADDR		0x04	/* Endpoint status */
29 #define UDC_BUFIN_FRAMENUM_ADDR	0x08	/* buffer size in / frame number out */
30 #define UDC_BUFOUT_MAXPKT_ADDR	0x0C	/* buffer size out / maxpkt in */
31 #define UDC_SUBPTR_ADDR		0x10	/* setup buffer pointer */
32 #define UDC_DESPTR_ADDR		0x14	/* Data descriptor pointer */
33 #define UDC_CONFIRM_ADDR	0x18	/* Write/Read confirmation */
34 
35 #define UDC_DEVCFG_ADDR		0x400	/* Device configuration */
36 #define UDC_DEVCTL_ADDR		0x404	/* Device control */
37 #define UDC_DEVSTS_ADDR		0x408	/* Device status */
38 #define UDC_DEVIRQSTS_ADDR	0x40C	/* Device irq status */
39 #define UDC_DEVIRQMSK_ADDR	0x410	/* Device irq mask */
40 #define UDC_EPIRQSTS_ADDR	0x414	/* Endpoint irq status */
41 #define UDC_EPIRQMSK_ADDR	0x418	/* Endpoint irq mask */
42 #define UDC_DEVLPM_ADDR		0x41C	/* LPM control / status */
43 #define UDC_CSR_BUSY_ADDR	0x4f0	/* UDC_CSR_BUSY Status register */
44 #define UDC_SRST_ADDR		0x4fc	/* SOFT RESET register */
45 #define UDC_CSR_ADDR		0x500	/* USB_DEVICE endpoint register */
46 
47 /* Endpoint control register */
48 /* Bit position */
49 #define UDC_EPCTL_MRXFLUSH		(1 << 12)
50 #define UDC_EPCTL_RRDY			(1 << 9)
51 #define UDC_EPCTL_CNAK			(1 << 8)
52 #define UDC_EPCTL_SNAK			(1 << 7)
53 #define UDC_EPCTL_NAK			(1 << 6)
54 #define UDC_EPCTL_P			(1 << 3)
55 #define UDC_EPCTL_F			(1 << 1)
56 #define UDC_EPCTL_S			(1 << 0)
57 #define UDC_EPCTL_ET_SHIFT		4
58 /* Mask patern */
59 #define UDC_EPCTL_ET_MASK		0x00000030
60 /* Value for ET field */
61 #define UDC_EPCTL_ET_CONTROL		0
62 #define UDC_EPCTL_ET_ISO		1
63 #define UDC_EPCTL_ET_BULK		2
64 #define UDC_EPCTL_ET_INTERRUPT		3
65 
66 /* Endpoint status register */
67 /* Bit position */
68 #define UDC_EPSTS_XFERDONE		(1 << 27)
69 #define UDC_EPSTS_RSS			(1 << 26)
70 #define UDC_EPSTS_RCS			(1 << 25)
71 #define UDC_EPSTS_TXEMPTY		(1 << 24)
72 #define UDC_EPSTS_TDC			(1 << 10)
73 #define UDC_EPSTS_HE			(1 << 9)
74 #define UDC_EPSTS_MRXFIFO_EMP		(1 << 8)
75 #define UDC_EPSTS_BNA			(1 << 7)
76 #define UDC_EPSTS_IN			(1 << 6)
77 #define UDC_EPSTS_OUT_SHIFT		4
78 /* Mask patern */
79 #define UDC_EPSTS_OUT_MASK		0x00000030
80 #define UDC_EPSTS_ALL_CLR_MASK		0x1F0006F0
81 /* Value for OUT field */
82 #define UDC_EPSTS_OUT_SETUP		2
83 #define UDC_EPSTS_OUT_DATA		1
84 
85 /* Device configuration register */
86 /* Bit position */
87 #define UDC_DEVCFG_CSR_PRG		(1 << 17)
88 #define UDC_DEVCFG_SP			(1 << 3)
89 /* SPD Valee */
90 #define UDC_DEVCFG_SPD_HS		0x0
91 #define UDC_DEVCFG_SPD_FS		0x1
92 #define UDC_DEVCFG_SPD_LS		0x2
93 
94 /* Device control register */
95 /* Bit position */
96 #define UDC_DEVCTL_THLEN_SHIFT		24
97 #define UDC_DEVCTL_BRLEN_SHIFT		16
98 #define UDC_DEVCTL_CSR_DONE		(1 << 13)
99 #define UDC_DEVCTL_SD			(1 << 10)
100 #define UDC_DEVCTL_MODE			(1 << 9)
101 #define UDC_DEVCTL_BREN			(1 << 8)
102 #define UDC_DEVCTL_THE			(1 << 7)
103 #define UDC_DEVCTL_DU			(1 << 4)
104 #define UDC_DEVCTL_TDE			(1 << 3)
105 #define UDC_DEVCTL_RDE			(1 << 2)
106 #define UDC_DEVCTL_RES			(1 << 0)
107 
108 /* Device status register */
109 /* Bit position */
110 #define UDC_DEVSTS_TS_SHIFT		18
111 #define UDC_DEVSTS_ENUM_SPEED_SHIFT	13
112 #define UDC_DEVSTS_ALT_SHIFT		8
113 #define UDC_DEVSTS_INTF_SHIFT		4
114 #define UDC_DEVSTS_CFG_SHIFT		0
115 /* Mask patern */
116 #define UDC_DEVSTS_TS_MASK		0xfffc0000
117 #define UDC_DEVSTS_ENUM_SPEED_MASK	0x00006000
118 #define UDC_DEVSTS_ALT_MASK		0x00000f00
119 #define UDC_DEVSTS_INTF_MASK		0x000000f0
120 #define UDC_DEVSTS_CFG_MASK		0x0000000f
121 /* value for maximum speed for SPEED field */
122 #define UDC_DEVSTS_ENUM_SPEED_FULL	1
123 #define UDC_DEVSTS_ENUM_SPEED_HIGH	0
124 #define UDC_DEVSTS_ENUM_SPEED_LOW	2
125 #define UDC_DEVSTS_ENUM_SPEED_FULLX	3
126 
127 /* Device irq register */
128 /* Bit position */
129 #define UDC_DEVINT_RWKP			(1 << 7)
130 #define UDC_DEVINT_ENUM			(1 << 6)
131 #define UDC_DEVINT_SOF			(1 << 5)
132 #define UDC_DEVINT_US			(1 << 4)
133 #define UDC_DEVINT_UR			(1 << 3)
134 #define UDC_DEVINT_ES			(1 << 2)
135 #define UDC_DEVINT_SI			(1 << 1)
136 #define UDC_DEVINT_SC			(1 << 0)
137 /* Mask patern */
138 #define UDC_DEVINT_MSK			0x7f
139 
140 /* Endpoint irq register */
141 /* Bit position */
142 #define UDC_EPINT_IN_SHIFT		0
143 #define UDC_EPINT_OUT_SHIFT		16
144 #define UDC_EPINT_IN_EP0		(1 << 0)
145 #define UDC_EPINT_OUT_EP0		(1 << 16)
146 /* Mask patern */
147 #define UDC_EPINT_MSK_DISABLE_ALL	0xffffffff
148 
149 /* UDC_CSR_BUSY Status register */
150 /* Bit position */
151 #define UDC_CSR_BUSY			(1 << 0)
152 
153 /* SOFT RESET register */
154 /* Bit position */
155 #define UDC_PSRST			(1 << 1)
156 #define UDC_SRST			(1 << 0)
157 
158 /* USB_DEVICE endpoint register */
159 /* Bit position */
160 #define UDC_CSR_NE_NUM_SHIFT		0
161 #define UDC_CSR_NE_DIR_SHIFT		4
162 #define UDC_CSR_NE_TYPE_SHIFT		5
163 #define UDC_CSR_NE_CFG_SHIFT		7
164 #define UDC_CSR_NE_INTF_SHIFT		11
165 #define UDC_CSR_NE_ALT_SHIFT		15
166 #define UDC_CSR_NE_MAX_PKT_SHIFT	19
167 /* Mask patern */
168 #define UDC_CSR_NE_NUM_MASK		0x0000000f
169 #define UDC_CSR_NE_DIR_MASK		0x00000010
170 #define UDC_CSR_NE_TYPE_MASK		0x00000060
171 #define UDC_CSR_NE_CFG_MASK		0x00000780
172 #define UDC_CSR_NE_INTF_MASK		0x00007800
173 #define UDC_CSR_NE_ALT_MASK		0x00078000
174 #define UDC_CSR_NE_MAX_PKT_MASK		0x3ff80000
175 
176 #define PCH_UDC_CSR(ep)	(UDC_CSR_ADDR + ep*4)
177 #define PCH_UDC_EPINT(in, num)\
178 		(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
179 
180 /* Index of endpoint */
181 #define UDC_EP0IN_IDX		0
182 #define UDC_EP0OUT_IDX		1
183 #define UDC_EPIN_IDX(ep)	(ep * 2)
184 #define UDC_EPOUT_IDX(ep)	(ep * 2 + 1)
185 #define PCH_UDC_EP0		0
186 #define PCH_UDC_EP1		1
187 #define PCH_UDC_EP2		2
188 #define PCH_UDC_EP3		3
189 
190 /* Number of endpoint */
191 #define PCH_UDC_EP_NUM		32	/* Total number of EPs (16 IN,16 OUT) */
192 #define PCH_UDC_USED_EP_NUM	4	/* EP number of EP's really used */
193 /* Length Value */
194 #define PCH_UDC_BRLEN		0x0F	/* Burst length */
195 #define PCH_UDC_THLEN		0x1F	/* Threshold length */
196 /* Value of EP Buffer Size */
197 #define UDC_EP0IN_BUFF_SIZE	16
198 #define UDC_EPIN_BUFF_SIZE	256
199 #define UDC_EP0OUT_BUFF_SIZE	16
200 #define UDC_EPOUT_BUFF_SIZE	256
201 /* Value of EP maximum packet size */
202 #define UDC_EP0IN_MAX_PKT_SIZE	64
203 #define UDC_EP0OUT_MAX_PKT_SIZE	64
204 #define UDC_BULK_MAX_PKT_SIZE	512
205 
206 /* DMA */
207 #define DMA_DIR_RX		1	/* DMA for data receive */
208 #define DMA_DIR_TX		2	/* DMA for data transmit */
209 #define DMA_ADDR_INVALID	(~(dma_addr_t)0)
210 #define UDC_DMA_MAXPACKET	65536	/* maximum packet size for DMA */
211 
212 /**
213  * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
214  *				  for data
215  * @status:		Status quadlet
216  * @reserved:		Reserved
217  * @dataptr:		Buffer descriptor
218  * @next:		Next descriptor
219  */
220 struct pch_udc_data_dma_desc {
221 	u32 status;
222 	u32 reserved;
223 	u32 dataptr;
224 	u32 next;
225 };
226 
227 /**
228  * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
229  *				 for control data
230  * @status:	Status
231  * @reserved:	Reserved
232  * @data12:	First setup word
233  * @data34:	Second setup word
234  */
235 struct pch_udc_stp_dma_desc {
236 	u32 status;
237 	u32 reserved;
238 	struct usb_ctrlrequest request;
239 } __attribute((packed));
240 
241 /* DMA status definitions */
242 /* Buffer status */
243 #define PCH_UDC_BUFF_STS	0xC0000000
244 #define PCH_UDC_BS_HST_RDY	0x00000000
245 #define PCH_UDC_BS_DMA_BSY	0x40000000
246 #define PCH_UDC_BS_DMA_DONE	0x80000000
247 #define PCH_UDC_BS_HST_BSY	0xC0000000
248 /*  Rx/Tx Status */
249 #define PCH_UDC_RXTX_STS	0x30000000
250 #define PCH_UDC_RTS_SUCC	0x00000000
251 #define PCH_UDC_RTS_DESERR	0x10000000
252 #define PCH_UDC_RTS_BUFERR	0x30000000
253 /* Last Descriptor Indication */
254 #define PCH_UDC_DMA_LAST	0x08000000
255 /* Number of Rx/Tx Bytes Mask */
256 #define PCH_UDC_RXTX_BYTES	0x0000ffff
257 
258 /**
259  * struct pch_udc_cfg_data - Structure to hold current configuration
260  *			     and interface information
261  * @cur_cfg:	current configuration in use
262  * @cur_intf:	current interface in use
263  * @cur_alt:	current alt interface in use
264  */
265 struct pch_udc_cfg_data {
266 	u16 cur_cfg;
267 	u16 cur_intf;
268 	u16 cur_alt;
269 };
270 
271 /**
272  * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
273  * @ep:			embedded ep request
274  * @td_stp_phys:	for setup request
275  * @td_data_phys:	for data request
276  * @td_stp:		for setup request
277  * @td_data:		for data request
278  * @dev:		reference to device struct
279  * @offset_addr:	offset address of ep register
280  * @desc:		for this ep
281  * @queue:		queue for requests
282  * @num:		endpoint number
283  * @in:			endpoint is IN
284  * @halted:		endpoint halted?
285  * @epsts:		Endpoint status
286  */
287 struct pch_udc_ep {
288 	struct usb_ep			ep;
289 	dma_addr_t			td_stp_phys;
290 	dma_addr_t			td_data_phys;
291 	struct pch_udc_stp_dma_desc	*td_stp;
292 	struct pch_udc_data_dma_desc	*td_data;
293 	struct pch_udc_dev		*dev;
294 	unsigned long			offset_addr;
295 	struct list_head		queue;
296 	unsigned			num:5,
297 					in:1,
298 					halted:1;
299 	unsigned long			epsts;
300 };
301 
302 /**
303  * struct pch_vbus_gpio_data - Structure holding GPIO informaton
304  *					for detecting VBUS
305  * @port:		gpio port number
306  * @intr:		gpio interrupt number
307  * @irq_work_fall	Structure for WorkQueue
308  * @irq_work_rise	Structure for WorkQueue
309  */
310 struct pch_vbus_gpio_data {
311 	int			port;
312 	int			intr;
313 	struct work_struct	irq_work_fall;
314 	struct work_struct	irq_work_rise;
315 };
316 
317 /**
318  * struct pch_udc_dev - Structure holding complete information
319  *			of the PCH USB device
320  * @gadget:		gadget driver data
321  * @driver:		reference to gadget driver bound
322  * @pdev:		reference to the PCI device
323  * @ep:			array of endpoints
324  * @lock:		protects all state
325  * @stall:		stall requested
326  * @prot_stall:		protcol stall requested
327  * @registered:		driver registered with system
328  * @suspended:		driver in suspended state
329  * @connected:		gadget driver associated
330  * @vbus_session:	required vbus_session state
331  * @set_cfg_not_acked:	pending acknowledgement 4 setup
332  * @waiting_zlp_ack:	pending acknowledgement 4 ZLP
333  * @data_requests:	DMA pool for data requests
334  * @stp_requests:	DMA pool for setup requests
335  * @dma_addr:		DMA pool for received
336  * @setup_data:		Received setup data
337  * @base_addr:		for mapped device memory
338  * @cfg_data:		current cfg, intf, and alt in use
339  * @vbus_gpio:		GPIO informaton for detecting VBUS
340  */
341 struct pch_udc_dev {
342 	struct usb_gadget		gadget;
343 	struct usb_gadget_driver	*driver;
344 	struct pci_dev			*pdev;
345 	struct pch_udc_ep		ep[PCH_UDC_EP_NUM];
346 	spinlock_t			lock; /* protects all state */
347 	unsigned
348 			stall:1,
349 			prot_stall:1,
350 			suspended:1,
351 			connected:1,
352 			vbus_session:1,
353 			set_cfg_not_acked:1,
354 			waiting_zlp_ack:1;
355 	struct dma_pool		*data_requests;
356 	struct dma_pool		*stp_requests;
357 	dma_addr_t			dma_addr;
358 	struct usb_ctrlrequest		setup_data;
359 	void __iomem			*base_addr;
360 	struct pch_udc_cfg_data		cfg_data;
361 	struct pch_vbus_gpio_data	vbus_gpio;
362 };
363 #define to_pch_udc(g)	(container_of((g), struct pch_udc_dev, gadget))
364 
365 #define PCH_UDC_PCI_BAR_QUARK_X1000	0
366 #define PCH_UDC_PCI_BAR			1
367 
368 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC	0x0939
369 #define PCI_DEVICE_ID_INTEL_EG20T_UDC		0x8808
370 
371 #define PCI_VENDOR_ID_ROHM		0x10DB
372 #define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
373 #define PCI_DEVICE_ID_ML7831_IOH_UDC	0x8808
374 
375 static const char	ep0_string[] = "ep0in";
376 static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
377 static bool speed_fs;
378 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
379 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
380 
381 /**
382  * struct pch_udc_request - Structure holding a PCH USB device request packet
383  * @req:		embedded ep request
384  * @td_data_phys:	phys. address
385  * @td_data:		first dma desc. of chain
386  * @td_data_last:	last dma desc. of chain
387  * @queue:		associated queue
388  * @dma_going:		DMA in progress for request
389  * @dma_mapped:		DMA memory mapped for request
390  * @dma_done:		DMA completed for request
391  * @chain_len:		chain length
392  * @buf:		Buffer memory for align adjustment
393  * @dma:		DMA memory for align adjustment
394  */
395 struct pch_udc_request {
396 	struct usb_request		req;
397 	dma_addr_t			td_data_phys;
398 	struct pch_udc_data_dma_desc	*td_data;
399 	struct pch_udc_data_dma_desc	*td_data_last;
400 	struct list_head		queue;
401 	unsigned			dma_going:1,
402 					dma_mapped:1,
403 					dma_done:1;
404 	unsigned			chain_len;
405 	void				*buf;
406 	dma_addr_t			dma;
407 };
408 
409 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
410 {
411 	return ioread32(dev->base_addr + reg);
412 }
413 
414 static inline void pch_udc_writel(struct pch_udc_dev *dev,
415 				    unsigned long val, unsigned long reg)
416 {
417 	iowrite32(val, dev->base_addr + reg);
418 }
419 
420 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
421 				     unsigned long reg,
422 				     unsigned long bitmask)
423 {
424 	pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
425 }
426 
427 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
428 				     unsigned long reg,
429 				     unsigned long bitmask)
430 {
431 	pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
432 }
433 
434 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
435 {
436 	return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
437 }
438 
439 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
440 				    unsigned long val, unsigned long reg)
441 {
442 	iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
443 }
444 
445 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
446 				     unsigned long reg,
447 				     unsigned long bitmask)
448 {
449 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
450 }
451 
452 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
453 				     unsigned long reg,
454 				     unsigned long bitmask)
455 {
456 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
457 }
458 
459 /**
460  * pch_udc_csr_busy() - Wait till idle.
461  * @dev:	Reference to pch_udc_dev structure
462  */
463 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
464 {
465 	unsigned int count = 200;
466 
467 	/* Wait till idle */
468 	while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
469 		&& --count)
470 		cpu_relax();
471 	if (!count)
472 		dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
473 }
474 
475 /**
476  * pch_udc_write_csr() - Write the command and status registers.
477  * @dev:	Reference to pch_udc_dev structure
478  * @val:	value to be written to CSR register
479  * @addr:	address of CSR register
480  */
481 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
482 			       unsigned int ep)
483 {
484 	unsigned long reg = PCH_UDC_CSR(ep);
485 
486 	pch_udc_csr_busy(dev);		/* Wait till idle */
487 	pch_udc_writel(dev, val, reg);
488 	pch_udc_csr_busy(dev);		/* Wait till idle */
489 }
490 
491 /**
492  * pch_udc_read_csr() - Read the command and status registers.
493  * @dev:	Reference to pch_udc_dev structure
494  * @addr:	address of CSR register
495  *
496  * Return codes:	content of CSR register
497  */
498 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
499 {
500 	unsigned long reg = PCH_UDC_CSR(ep);
501 
502 	pch_udc_csr_busy(dev);		/* Wait till idle */
503 	pch_udc_readl(dev, reg);	/* Dummy read */
504 	pch_udc_csr_busy(dev);		/* Wait till idle */
505 	return pch_udc_readl(dev, reg);
506 }
507 
508 /**
509  * pch_udc_rmt_wakeup() - Initiate for remote wakeup
510  * @dev:	Reference to pch_udc_dev structure
511  */
512 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
513 {
514 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
515 	mdelay(1);
516 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
517 }
518 
519 /**
520  * pch_udc_get_frame() - Get the current frame from device status register
521  * @dev:	Reference to pch_udc_dev structure
522  * Retern	current frame
523  */
524 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
525 {
526 	u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
527 	return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
528 }
529 
530 /**
531  * pch_udc_clear_selfpowered() - Clear the self power control
532  * @dev:	Reference to pch_udc_regs structure
533  */
534 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
535 {
536 	pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
537 }
538 
539 /**
540  * pch_udc_set_selfpowered() - Set the self power control
541  * @dev:	Reference to pch_udc_regs structure
542  */
543 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
544 {
545 	pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
546 }
547 
548 /**
549  * pch_udc_set_disconnect() - Set the disconnect status.
550  * @dev:	Reference to pch_udc_regs structure
551  */
552 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
553 {
554 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
555 }
556 
557 /**
558  * pch_udc_clear_disconnect() - Clear the disconnect status.
559  * @dev:	Reference to pch_udc_regs structure
560  */
561 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
562 {
563 	/* Clear the disconnect */
564 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
565 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
566 	mdelay(1);
567 	/* Resume USB signalling */
568 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
569 }
570 
571 /**
572  * pch_udc_reconnect() - This API initializes usb device controller,
573  *						and clear the disconnect status.
574  * @dev:		Reference to pch_udc_regs structure
575  */
576 static void pch_udc_init(struct pch_udc_dev *dev);
577 static void pch_udc_reconnect(struct pch_udc_dev *dev)
578 {
579 	pch_udc_init(dev);
580 
581 	/* enable device interrupts */
582 	/* pch_udc_enable_interrupts() */
583 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
584 			UDC_DEVINT_UR | UDC_DEVINT_ENUM);
585 
586 	/* Clear the disconnect */
587 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
588 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
589 	mdelay(1);
590 	/* Resume USB signalling */
591 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
592 }
593 
594 /**
595  * pch_udc_vbus_session() - set or clearr the disconnect status.
596  * @dev:	Reference to pch_udc_regs structure
597  * @is_active:	Parameter specifying the action
598  *		  0:   indicating VBUS power is ending
599  *		  !0:  indicating VBUS power is starting
600  */
601 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
602 					  int is_active)
603 {
604 	if (is_active) {
605 		pch_udc_reconnect(dev);
606 		dev->vbus_session = 1;
607 	} else {
608 		if (dev->driver && dev->driver->disconnect) {
609 			spin_lock(&dev->lock);
610 			dev->driver->disconnect(&dev->gadget);
611 			spin_unlock(&dev->lock);
612 		}
613 		pch_udc_set_disconnect(dev);
614 		dev->vbus_session = 0;
615 	}
616 }
617 
618 /**
619  * pch_udc_ep_set_stall() - Set the stall of endpoint
620  * @ep:		Reference to structure of type pch_udc_ep_regs
621  */
622 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
623 {
624 	if (ep->in) {
625 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
626 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
627 	} else {
628 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
629 	}
630 }
631 
632 /**
633  * pch_udc_ep_clear_stall() - Clear the stall of endpoint
634  * @ep:		Reference to structure of type pch_udc_ep_regs
635  */
636 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
637 {
638 	/* Clear the stall */
639 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
640 	/* Clear NAK by writing CNAK */
641 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
642 }
643 
644 /**
645  * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
646  * @ep:		Reference to structure of type pch_udc_ep_regs
647  * @type:	Type of endpoint
648  */
649 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
650 					u8 type)
651 {
652 	pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
653 				UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
654 }
655 
656 /**
657  * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
658  * @ep:		Reference to structure of type pch_udc_ep_regs
659  * @buf_size:	The buffer word size
660  */
661 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
662 						 u32 buf_size, u32 ep_in)
663 {
664 	u32 data;
665 	if (ep_in) {
666 		data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
667 		data = (data & 0xffff0000) | (buf_size & 0xffff);
668 		pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
669 	} else {
670 		data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
671 		data = (buf_size << 16) | (data & 0xffff);
672 		pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
673 	}
674 }
675 
676 /**
677  * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
678  * @ep:		Reference to structure of type pch_udc_ep_regs
679  * @pkt_size:	The packet byte size
680  */
681 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
682 {
683 	u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
684 	data = (data & 0xffff0000) | (pkt_size & 0xffff);
685 	pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
686 }
687 
688 /**
689  * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
690  * @ep:		Reference to structure of type pch_udc_ep_regs
691  * @addr:	Address of the register
692  */
693 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
694 {
695 	pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
696 }
697 
698 /**
699  * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
700  * @ep:		Reference to structure of type pch_udc_ep_regs
701  * @addr:	Address of the register
702  */
703 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
704 {
705 	pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
706 }
707 
708 /**
709  * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
710  * @ep:		Reference to structure of type pch_udc_ep_regs
711  */
712 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
713 {
714 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
715 }
716 
717 /**
718  * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
719  * @ep:		Reference to structure of type pch_udc_ep_regs
720  */
721 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
722 {
723 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
724 }
725 
726 /**
727  * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
728  * @ep:		Reference to structure of type pch_udc_ep_regs
729  */
730 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
731 {
732 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
733 }
734 
735 /**
736  * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
737  *			register depending on the direction specified
738  * @dev:	Reference to structure of type pch_udc_regs
739  * @dir:	whether Tx or Rx
740  *		  DMA_DIR_RX: Receive
741  *		  DMA_DIR_TX: Transmit
742  */
743 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
744 {
745 	if (dir == DMA_DIR_RX)
746 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
747 	else if (dir == DMA_DIR_TX)
748 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
749 }
750 
751 /**
752  * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
753  *				 register depending on the direction specified
754  * @dev:	Reference to structure of type pch_udc_regs
755  * @dir:	Whether Tx or Rx
756  *		  DMA_DIR_RX: Receive
757  *		  DMA_DIR_TX: Transmit
758  */
759 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
760 {
761 	if (dir == DMA_DIR_RX)
762 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
763 	else if (dir == DMA_DIR_TX)
764 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
765 }
766 
767 /**
768  * pch_udc_set_csr_done() - Set the device control register
769  *				CSR done field (bit 13)
770  * @dev:	reference to structure of type pch_udc_regs
771  */
772 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
773 {
774 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
775 }
776 
777 /**
778  * pch_udc_disable_interrupts() - Disables the specified interrupts
779  * @dev:	Reference to structure of type pch_udc_regs
780  * @mask:	Mask to disable interrupts
781  */
782 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
783 					    u32 mask)
784 {
785 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
786 }
787 
788 /**
789  * pch_udc_enable_interrupts() - Enable the specified interrupts
790  * @dev:	Reference to structure of type pch_udc_regs
791  * @mask:	Mask to enable interrupts
792  */
793 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
794 					   u32 mask)
795 {
796 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
797 }
798 
799 /**
800  * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
801  * @dev:	Reference to structure of type pch_udc_regs
802  * @mask:	Mask to disable interrupts
803  */
804 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
805 						u32 mask)
806 {
807 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
808 }
809 
810 /**
811  * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
812  * @dev:	Reference to structure of type pch_udc_regs
813  * @mask:	Mask to enable interrupts
814  */
815 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
816 					      u32 mask)
817 {
818 	pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
819 }
820 
821 /**
822  * pch_udc_read_device_interrupts() - Read the device interrupts
823  * @dev:	Reference to structure of type pch_udc_regs
824  * Retern	The device interrupts
825  */
826 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
827 {
828 	return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
829 }
830 
831 /**
832  * pch_udc_write_device_interrupts() - Write device interrupts
833  * @dev:	Reference to structure of type pch_udc_regs
834  * @val:	The value to be written to interrupt register
835  */
836 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
837 						     u32 val)
838 {
839 	pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
840 }
841 
842 /**
843  * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
844  * @dev:	Reference to structure of type pch_udc_regs
845  * Retern	The endpoint interrupt
846  */
847 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
848 {
849 	return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
850 }
851 
852 /**
853  * pch_udc_write_ep_interrupts() - Clear endpoint interupts
854  * @dev:	Reference to structure of type pch_udc_regs
855  * @val:	The value to be written to interrupt register
856  */
857 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
858 					     u32 val)
859 {
860 	pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
861 }
862 
863 /**
864  * pch_udc_read_device_status() - Read the device status
865  * @dev:	Reference to structure of type pch_udc_regs
866  * Retern	The device status
867  */
868 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
869 {
870 	return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
871 }
872 
873 /**
874  * pch_udc_read_ep_control() - Read the endpoint control
875  * @ep:		Reference to structure of type pch_udc_ep_regs
876  * Retern	The endpoint control register value
877  */
878 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
879 {
880 	return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
881 }
882 
883 /**
884  * pch_udc_clear_ep_control() - Clear the endpoint control register
885  * @ep:		Reference to structure of type pch_udc_ep_regs
886  * Retern	The endpoint control register value
887  */
888 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
889 {
890 	return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
891 }
892 
893 /**
894  * pch_udc_read_ep_status() - Read the endpoint status
895  * @ep:		Reference to structure of type pch_udc_ep_regs
896  * Retern	The endpoint status
897  */
898 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
899 {
900 	return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
901 }
902 
903 /**
904  * pch_udc_clear_ep_status() - Clear the endpoint status
905  * @ep:		Reference to structure of type pch_udc_ep_regs
906  * @stat:	Endpoint status
907  */
908 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
909 					 u32 stat)
910 {
911 	return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
912 }
913 
914 /**
915  * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
916  *				of the endpoint control register
917  * @ep:		Reference to structure of type pch_udc_ep_regs
918  */
919 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
920 {
921 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
922 }
923 
924 /**
925  * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
926  *				of the endpoint control register
927  * @ep:		reference to structure of type pch_udc_ep_regs
928  */
929 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
930 {
931 	unsigned int loopcnt = 0;
932 	struct pch_udc_dev *dev = ep->dev;
933 
934 	if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
935 		return;
936 	if (!ep->in) {
937 		loopcnt = 10000;
938 		while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
939 			--loopcnt)
940 			udelay(5);
941 		if (!loopcnt)
942 			dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
943 				__func__);
944 	}
945 	loopcnt = 10000;
946 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
947 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
948 		udelay(5);
949 	}
950 	if (!loopcnt)
951 		dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
952 			__func__, ep->num, (ep->in ? "in" : "out"));
953 }
954 
955 /**
956  * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
957  * @ep:	reference to structure of type pch_udc_ep_regs
958  * @dir:	direction of endpoint
959  *		  0:  endpoint is OUT
960  *		  !0: endpoint is IN
961  */
962 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
963 {
964 	if (dir) {	/* IN ep */
965 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
966 		return;
967 	}
968 }
969 
970 /**
971  * pch_udc_ep_enable() - This api enables endpoint
972  * @regs:	Reference to structure pch_udc_ep_regs
973  * @desc:	endpoint descriptor
974  */
975 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
976 			       struct pch_udc_cfg_data *cfg,
977 			       const struct usb_endpoint_descriptor *desc)
978 {
979 	u32 val = 0;
980 	u32 buff_size = 0;
981 
982 	pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
983 	if (ep->in)
984 		buff_size = UDC_EPIN_BUFF_SIZE;
985 	else
986 		buff_size = UDC_EPOUT_BUFF_SIZE;
987 	pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
988 	pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
989 	pch_udc_ep_set_nak(ep);
990 	pch_udc_ep_fifo_flush(ep, ep->in);
991 	/* Configure the endpoint */
992 	val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
993 	      ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
994 		UDC_CSR_NE_TYPE_SHIFT) |
995 	      (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
996 	      (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
997 	      (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
998 	      usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
999 
1000 	if (ep->in)
1001 		pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1002 	else
1003 		pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1004 }
1005 
1006 /**
1007  * pch_udc_ep_disable() - This api disables endpoint
1008  * @regs:	Reference to structure pch_udc_ep_regs
1009  */
1010 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1011 {
1012 	if (ep->in) {
1013 		/* flush the fifo */
1014 		pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1015 		/* set NAK */
1016 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1017 		pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1018 	} else {
1019 		/* set NAK */
1020 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1021 	}
1022 	/* reset desc pointer */
1023 	pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1024 }
1025 
1026 /**
1027  * pch_udc_wait_ep_stall() - Wait EP stall.
1028  * @dev:	Reference to pch_udc_dev structure
1029  */
1030 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1031 {
1032 	unsigned int count = 10000;
1033 
1034 	/* Wait till idle */
1035 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1036 		udelay(5);
1037 	if (!count)
1038 		dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1039 }
1040 
1041 /**
1042  * pch_udc_init() - This API initializes usb device controller
1043  * @dev:	Rreference to pch_udc_regs structure
1044  */
1045 static void pch_udc_init(struct pch_udc_dev *dev)
1046 {
1047 	if (NULL == dev) {
1048 		pr_err("%s: Invalid address\n", __func__);
1049 		return;
1050 	}
1051 	/* Soft Reset and Reset PHY */
1052 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1053 	pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1054 	mdelay(1);
1055 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1056 	pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1057 	mdelay(1);
1058 	/* mask and clear all device interrupts */
1059 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1060 	pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1061 
1062 	/* mask and clear all ep interrupts */
1063 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1064 	pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1065 
1066 	/* enable dynamic CSR programmingi, self powered and device speed */
1067 	if (speed_fs)
1068 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1069 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1070 	else /* defaul high speed */
1071 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1072 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1073 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1074 			(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1075 			(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1076 			UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1077 			UDC_DEVCTL_THE);
1078 }
1079 
1080 /**
1081  * pch_udc_exit() - This API exit usb device controller
1082  * @dev:	Reference to pch_udc_regs structure
1083  */
1084 static void pch_udc_exit(struct pch_udc_dev *dev)
1085 {
1086 	/* mask all device interrupts */
1087 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1088 	/* mask all ep interrupts */
1089 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1090 	/* put device in disconnected state */
1091 	pch_udc_set_disconnect(dev);
1092 }
1093 
1094 /**
1095  * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1096  * @gadget:	Reference to the gadget driver
1097  *
1098  * Return codes:
1099  *	0:		Success
1100  *	-EINVAL:	If the gadget passed is NULL
1101  */
1102 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1103 {
1104 	struct pch_udc_dev	*dev;
1105 
1106 	if (!gadget)
1107 		return -EINVAL;
1108 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1109 	return pch_udc_get_frame(dev);
1110 }
1111 
1112 /**
1113  * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1114  * @gadget:	Reference to the gadget driver
1115  *
1116  * Return codes:
1117  *	0:		Success
1118  *	-EINVAL:	If the gadget passed is NULL
1119  */
1120 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1121 {
1122 	struct pch_udc_dev	*dev;
1123 	unsigned long		flags;
1124 
1125 	if (!gadget)
1126 		return -EINVAL;
1127 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1128 	spin_lock_irqsave(&dev->lock, flags);
1129 	pch_udc_rmt_wakeup(dev);
1130 	spin_unlock_irqrestore(&dev->lock, flags);
1131 	return 0;
1132 }
1133 
1134 /**
1135  * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1136  *				is self powered or not
1137  * @gadget:	Reference to the gadget driver
1138  * @value:	Specifies self powered or not
1139  *
1140  * Return codes:
1141  *	0:		Success
1142  *	-EINVAL:	If the gadget passed is NULL
1143  */
1144 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1145 {
1146 	struct pch_udc_dev	*dev;
1147 
1148 	if (!gadget)
1149 		return -EINVAL;
1150 	gadget->is_selfpowered = (value != 0);
1151 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1152 	if (value)
1153 		pch_udc_set_selfpowered(dev);
1154 	else
1155 		pch_udc_clear_selfpowered(dev);
1156 	return 0;
1157 }
1158 
1159 /**
1160  * pch_udc_pcd_pullup() - This API is invoked to make the device
1161  *				visible/invisible to the host
1162  * @gadget:	Reference to the gadget driver
1163  * @is_on:	Specifies whether the pull up is made active or inactive
1164  *
1165  * Return codes:
1166  *	0:		Success
1167  *	-EINVAL:	If the gadget passed is NULL
1168  */
1169 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1170 {
1171 	struct pch_udc_dev	*dev;
1172 
1173 	if (!gadget)
1174 		return -EINVAL;
1175 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1176 	if (is_on) {
1177 		pch_udc_reconnect(dev);
1178 	} else {
1179 		if (dev->driver && dev->driver->disconnect) {
1180 			spin_lock(&dev->lock);
1181 			dev->driver->disconnect(&dev->gadget);
1182 			spin_unlock(&dev->lock);
1183 		}
1184 		pch_udc_set_disconnect(dev);
1185 	}
1186 
1187 	return 0;
1188 }
1189 
1190 /**
1191  * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1192  *				transceiver (or GPIO) that
1193  *				detects a VBUS power session starting/ending
1194  * @gadget:	Reference to the gadget driver
1195  * @is_active:	specifies whether the session is starting or ending
1196  *
1197  * Return codes:
1198  *	0:		Success
1199  *	-EINVAL:	If the gadget passed is NULL
1200  */
1201 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1202 {
1203 	struct pch_udc_dev	*dev;
1204 
1205 	if (!gadget)
1206 		return -EINVAL;
1207 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1208 	pch_udc_vbus_session(dev, is_active);
1209 	return 0;
1210 }
1211 
1212 /**
1213  * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1214  *				SET_CONFIGURATION calls to
1215  *				specify how much power the device can consume
1216  * @gadget:	Reference to the gadget driver
1217  * @mA:		specifies the current limit in 2mA unit
1218  *
1219  * Return codes:
1220  *	-EINVAL:	If the gadget passed is NULL
1221  *	-EOPNOTSUPP:
1222  */
1223 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1224 {
1225 	return -EOPNOTSUPP;
1226 }
1227 
1228 static int pch_udc_start(struct usb_gadget *g,
1229 		struct usb_gadget_driver *driver);
1230 static int pch_udc_stop(struct usb_gadget *g);
1231 
1232 static const struct usb_gadget_ops pch_udc_ops = {
1233 	.get_frame = pch_udc_pcd_get_frame,
1234 	.wakeup = pch_udc_pcd_wakeup,
1235 	.set_selfpowered = pch_udc_pcd_selfpowered,
1236 	.pullup = pch_udc_pcd_pullup,
1237 	.vbus_session = pch_udc_pcd_vbus_session,
1238 	.vbus_draw = pch_udc_pcd_vbus_draw,
1239 	.udc_start = pch_udc_start,
1240 	.udc_stop = pch_udc_stop,
1241 };
1242 
1243 /**
1244  * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1245  * @dev:	Reference to the driver structure
1246  *
1247  * Return value:
1248  *	1: VBUS is high
1249  *	0: VBUS is low
1250  *     -1: It is not enable to detect VBUS using GPIO
1251  */
1252 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1253 {
1254 	int vbus = 0;
1255 
1256 	if (dev->vbus_gpio.port)
1257 		vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1258 	else
1259 		vbus = -1;
1260 
1261 	return vbus;
1262 }
1263 
1264 /**
1265  * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1266  *                             If VBUS is Low, disconnect is processed
1267  * @irq_work:	Structure for WorkQueue
1268  *
1269  */
1270 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1271 {
1272 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1273 		struct pch_vbus_gpio_data, irq_work_fall);
1274 	struct pch_udc_dev *dev =
1275 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1276 	int vbus_saved = -1;
1277 	int vbus;
1278 	int count;
1279 
1280 	if (!dev->vbus_gpio.port)
1281 		return;
1282 
1283 	for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1284 		count++) {
1285 		vbus = pch_vbus_gpio_get_value(dev);
1286 
1287 		if ((vbus_saved == vbus) && (vbus == 0)) {
1288 			dev_dbg(&dev->pdev->dev, "VBUS fell");
1289 			if (dev->driver
1290 				&& dev->driver->disconnect) {
1291 				dev->driver->disconnect(
1292 					&dev->gadget);
1293 			}
1294 			if (dev->vbus_gpio.intr)
1295 				pch_udc_init(dev);
1296 			else
1297 				pch_udc_reconnect(dev);
1298 			return;
1299 		}
1300 		vbus_saved = vbus;
1301 		mdelay(PCH_VBUS_INTERVAL);
1302 	}
1303 }
1304 
1305 /**
1306  * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1307  *                             If VBUS is High, connect is processed
1308  * @irq_work:	Structure for WorkQueue
1309  *
1310  */
1311 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1312 {
1313 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1314 		struct pch_vbus_gpio_data, irq_work_rise);
1315 	struct pch_udc_dev *dev =
1316 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1317 	int vbus;
1318 
1319 	if (!dev->vbus_gpio.port)
1320 		return;
1321 
1322 	mdelay(PCH_VBUS_INTERVAL);
1323 	vbus = pch_vbus_gpio_get_value(dev);
1324 
1325 	if (vbus == 1) {
1326 		dev_dbg(&dev->pdev->dev, "VBUS rose");
1327 		pch_udc_reconnect(dev);
1328 		return;
1329 	}
1330 }
1331 
1332 /**
1333  * pch_vbus_gpio_irq() - IRQ handler for GPIO interrupt for changing VBUS
1334  * @irq:	Interrupt request number
1335  * @dev:	Reference to the device structure
1336  *
1337  * Return codes:
1338  *	0: Success
1339  *	-EINVAL: GPIO port is invalid or can't be initialized.
1340  */
1341 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1342 {
1343 	struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1344 
1345 	if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1346 		return IRQ_NONE;
1347 
1348 	if (pch_vbus_gpio_get_value(dev))
1349 		schedule_work(&dev->vbus_gpio.irq_work_rise);
1350 	else
1351 		schedule_work(&dev->vbus_gpio.irq_work_fall);
1352 
1353 	return IRQ_HANDLED;
1354 }
1355 
1356 /**
1357  * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1358  * @dev:	Reference to the driver structure
1359  * @vbus_gpio	Number of GPIO port to detect gpio
1360  *
1361  * Return codes:
1362  *	0: Success
1363  *	-EINVAL: GPIO port is invalid or can't be initialized.
1364  */
1365 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1366 {
1367 	int err;
1368 	int irq_num = 0;
1369 
1370 	dev->vbus_gpio.port = 0;
1371 	dev->vbus_gpio.intr = 0;
1372 
1373 	if (vbus_gpio_port <= -1)
1374 		return -EINVAL;
1375 
1376 	err = gpio_is_valid(vbus_gpio_port);
1377 	if (!err) {
1378 		pr_err("%s: gpio port %d is invalid\n",
1379 			__func__, vbus_gpio_port);
1380 		return -EINVAL;
1381 	}
1382 
1383 	err = gpio_request(vbus_gpio_port, "pch_vbus");
1384 	if (err) {
1385 		pr_err("%s: can't request gpio port %d, err: %d\n",
1386 			__func__, vbus_gpio_port, err);
1387 		return -EINVAL;
1388 	}
1389 
1390 	dev->vbus_gpio.port = vbus_gpio_port;
1391 	gpio_direction_input(vbus_gpio_port);
1392 	INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1393 
1394 	irq_num = gpio_to_irq(vbus_gpio_port);
1395 	if (irq_num > 0) {
1396 		irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1397 		err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1398 			"vbus_detect", dev);
1399 		if (!err) {
1400 			dev->vbus_gpio.intr = irq_num;
1401 			INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1402 				pch_vbus_gpio_work_rise);
1403 		} else {
1404 			pr_err("%s: can't request irq %d, err: %d\n",
1405 				__func__, irq_num, err);
1406 		}
1407 	}
1408 
1409 	return 0;
1410 }
1411 
1412 /**
1413  * pch_vbus_gpio_free() - This API frees resources of GPIO port
1414  * @dev:	Reference to the driver structure
1415  */
1416 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1417 {
1418 	if (dev->vbus_gpio.intr)
1419 		free_irq(dev->vbus_gpio.intr, dev);
1420 
1421 	if (dev->vbus_gpio.port)
1422 		gpio_free(dev->vbus_gpio.port);
1423 }
1424 
1425 /**
1426  * complete_req() - This API is invoked from the driver when processing
1427  *			of a request is complete
1428  * @ep:		Reference to the endpoint structure
1429  * @req:	Reference to the request structure
1430  * @status:	Indicates the success/failure of completion
1431  */
1432 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1433 								 int status)
1434 	__releases(&dev->lock)
1435 	__acquires(&dev->lock)
1436 {
1437 	struct pch_udc_dev	*dev;
1438 	unsigned halted = ep->halted;
1439 
1440 	list_del_init(&req->queue);
1441 
1442 	/* set new status if pending */
1443 	if (req->req.status == -EINPROGRESS)
1444 		req->req.status = status;
1445 	else
1446 		status = req->req.status;
1447 
1448 	dev = ep->dev;
1449 	if (req->dma_mapped) {
1450 		if (req->dma == DMA_ADDR_INVALID) {
1451 			if (ep->in)
1452 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1453 						 req->req.length,
1454 						 DMA_TO_DEVICE);
1455 			else
1456 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1457 						 req->req.length,
1458 						 DMA_FROM_DEVICE);
1459 			req->req.dma = DMA_ADDR_INVALID;
1460 		} else {
1461 			if (ep->in)
1462 				dma_unmap_single(&dev->pdev->dev, req->dma,
1463 						 req->req.length,
1464 						 DMA_TO_DEVICE);
1465 			else {
1466 				dma_unmap_single(&dev->pdev->dev, req->dma,
1467 						 req->req.length,
1468 						 DMA_FROM_DEVICE);
1469 				memcpy(req->req.buf, req->buf, req->req.length);
1470 			}
1471 			kfree(req->buf);
1472 			req->dma = DMA_ADDR_INVALID;
1473 		}
1474 		req->dma_mapped = 0;
1475 	}
1476 	ep->halted = 1;
1477 	spin_unlock(&dev->lock);
1478 	if (!ep->in)
1479 		pch_udc_ep_clear_rrdy(ep);
1480 	usb_gadget_giveback_request(&ep->ep, &req->req);
1481 	spin_lock(&dev->lock);
1482 	ep->halted = halted;
1483 }
1484 
1485 /**
1486  * empty_req_queue() - This API empties the request queue of an endpoint
1487  * @ep:		Reference to the endpoint structure
1488  */
1489 static void empty_req_queue(struct pch_udc_ep *ep)
1490 {
1491 	struct pch_udc_request	*req;
1492 
1493 	ep->halted = 1;
1494 	while (!list_empty(&ep->queue)) {
1495 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1496 		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
1497 	}
1498 }
1499 
1500 /**
1501  * pch_udc_free_dma_chain() - This function frees the DMA chain created
1502  *				for the request
1503  * @dev		Reference to the driver structure
1504  * @req		Reference to the request to be freed
1505  *
1506  * Return codes:
1507  *	0: Success
1508  */
1509 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1510 				   struct pch_udc_request *req)
1511 {
1512 	struct pch_udc_data_dma_desc *td = req->td_data;
1513 	unsigned i = req->chain_len;
1514 
1515 	dma_addr_t addr2;
1516 	dma_addr_t addr = (dma_addr_t)td->next;
1517 	td->next = 0x00;
1518 	for (; i > 1; --i) {
1519 		/* do not free first desc., will be done by free for request */
1520 		td = phys_to_virt(addr);
1521 		addr2 = (dma_addr_t)td->next;
1522 		dma_pool_free(dev->data_requests, td, addr);
1523 		td->next = 0x00;
1524 		addr = addr2;
1525 	}
1526 	req->chain_len = 1;
1527 }
1528 
1529 /**
1530  * pch_udc_create_dma_chain() - This function creates or reinitializes
1531  *				a DMA chain
1532  * @ep:		Reference to the endpoint structure
1533  * @req:	Reference to the request
1534  * @buf_len:	The buffer length
1535  * @gfp_flags:	Flags to be used while mapping the data buffer
1536  *
1537  * Return codes:
1538  *	0:		success,
1539  *	-ENOMEM:	dma_pool_alloc invocation fails
1540  */
1541 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1542 				    struct pch_udc_request *req,
1543 				    unsigned long buf_len,
1544 				    gfp_t gfp_flags)
1545 {
1546 	struct pch_udc_data_dma_desc *td = req->td_data, *last;
1547 	unsigned long bytes = req->req.length, i = 0;
1548 	dma_addr_t dma_addr;
1549 	unsigned len = 1;
1550 
1551 	if (req->chain_len > 1)
1552 		pch_udc_free_dma_chain(ep->dev, req);
1553 
1554 	if (req->dma == DMA_ADDR_INVALID)
1555 		td->dataptr = req->req.dma;
1556 	else
1557 		td->dataptr = req->dma;
1558 
1559 	td->status = PCH_UDC_BS_HST_BSY;
1560 	for (; ; bytes -= buf_len, ++len) {
1561 		td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1562 		if (bytes <= buf_len)
1563 			break;
1564 		last = td;
1565 		td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
1566 				    &dma_addr);
1567 		if (!td)
1568 			goto nomem;
1569 		i += buf_len;
1570 		td->dataptr = req->td_data->dataptr + i;
1571 		last->next = dma_addr;
1572 	}
1573 
1574 	req->td_data_last = td;
1575 	td->status |= PCH_UDC_DMA_LAST;
1576 	td->next = req->td_data_phys;
1577 	req->chain_len = len;
1578 	return 0;
1579 
1580 nomem:
1581 	if (len > 1) {
1582 		req->chain_len = len;
1583 		pch_udc_free_dma_chain(ep->dev, req);
1584 	}
1585 	req->chain_len = 1;
1586 	return -ENOMEM;
1587 }
1588 
1589 /**
1590  * prepare_dma() - This function creates and initializes the DMA chain
1591  *			for the request
1592  * @ep:		Reference to the endpoint structure
1593  * @req:	Reference to the request
1594  * @gfp:	Flag to be used while mapping the data buffer
1595  *
1596  * Return codes:
1597  *	0:		Success
1598  *	Other 0:	linux error number on failure
1599  */
1600 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1601 			  gfp_t gfp)
1602 {
1603 	int	retval;
1604 
1605 	/* Allocate and create a DMA chain */
1606 	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1607 	if (retval) {
1608 		pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1609 		return retval;
1610 	}
1611 	if (ep->in)
1612 		req->td_data->status = (req->td_data->status &
1613 				~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1614 	return 0;
1615 }
1616 
1617 /**
1618  * process_zlp() - This function process zero length packets
1619  *			from the gadget driver
1620  * @ep:		Reference to the endpoint structure
1621  * @req:	Reference to the request
1622  */
1623 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1624 {
1625 	struct pch_udc_dev	*dev = ep->dev;
1626 
1627 	/* IN zlp's are handled by hardware */
1628 	complete_req(ep, req, 0);
1629 
1630 	/* if set_config or set_intf is waiting for ack by zlp
1631 	 * then set CSR_DONE
1632 	 */
1633 	if (dev->set_cfg_not_acked) {
1634 		pch_udc_set_csr_done(dev);
1635 		dev->set_cfg_not_acked = 0;
1636 	}
1637 	/* setup command is ACK'ed now by zlp */
1638 	if (!dev->stall && dev->waiting_zlp_ack) {
1639 		pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1640 		dev->waiting_zlp_ack = 0;
1641 	}
1642 }
1643 
1644 /**
1645  * pch_udc_start_rxrequest() - This function starts the receive requirement.
1646  * @ep:		Reference to the endpoint structure
1647  * @req:	Reference to the request structure
1648  */
1649 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1650 					 struct pch_udc_request *req)
1651 {
1652 	struct pch_udc_data_dma_desc *td_data;
1653 
1654 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1655 	td_data = req->td_data;
1656 	/* Set the status bits for all descriptors */
1657 	while (1) {
1658 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1659 				    PCH_UDC_BS_HST_RDY;
1660 		if ((td_data->status & PCH_UDC_DMA_LAST) ==  PCH_UDC_DMA_LAST)
1661 			break;
1662 		td_data = phys_to_virt(td_data->next);
1663 	}
1664 	/* Write the descriptor pointer */
1665 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1666 	req->dma_going = 1;
1667 	pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1668 	pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1669 	pch_udc_ep_clear_nak(ep);
1670 	pch_udc_ep_set_rrdy(ep);
1671 }
1672 
1673 /**
1674  * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1675  *				from gadget driver
1676  * @usbep:	Reference to the USB endpoint structure
1677  * @desc:	Reference to the USB endpoint descriptor structure
1678  *
1679  * Return codes:
1680  *	0:		Success
1681  *	-EINVAL:
1682  *	-ESHUTDOWN:
1683  */
1684 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1685 				    const struct usb_endpoint_descriptor *desc)
1686 {
1687 	struct pch_udc_ep	*ep;
1688 	struct pch_udc_dev	*dev;
1689 	unsigned long		iflags;
1690 
1691 	if (!usbep || (usbep->name == ep0_string) || !desc ||
1692 	    (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1693 		return -EINVAL;
1694 
1695 	ep = container_of(usbep, struct pch_udc_ep, ep);
1696 	dev = ep->dev;
1697 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1698 		return -ESHUTDOWN;
1699 	spin_lock_irqsave(&dev->lock, iflags);
1700 	ep->ep.desc = desc;
1701 	ep->halted = 0;
1702 	pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1703 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1704 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1705 	spin_unlock_irqrestore(&dev->lock, iflags);
1706 	return 0;
1707 }
1708 
1709 /**
1710  * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1711  *				from gadget driver
1712  * @usbep	Reference to the USB endpoint structure
1713  *
1714  * Return codes:
1715  *	0:		Success
1716  *	-EINVAL:
1717  */
1718 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1719 {
1720 	struct pch_udc_ep	*ep;
1721 	unsigned long	iflags;
1722 
1723 	if (!usbep)
1724 		return -EINVAL;
1725 
1726 	ep = container_of(usbep, struct pch_udc_ep, ep);
1727 	if ((usbep->name == ep0_string) || !ep->ep.desc)
1728 		return -EINVAL;
1729 
1730 	spin_lock_irqsave(&ep->dev->lock, iflags);
1731 	empty_req_queue(ep);
1732 	ep->halted = 1;
1733 	pch_udc_ep_disable(ep);
1734 	pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1735 	ep->ep.desc = NULL;
1736 	INIT_LIST_HEAD(&ep->queue);
1737 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1738 	return 0;
1739 }
1740 
1741 /**
1742  * pch_udc_alloc_request() - This function allocates request structure.
1743  *				It is called by gadget driver
1744  * @usbep:	Reference to the USB endpoint structure
1745  * @gfp:	Flag to be used while allocating memory
1746  *
1747  * Return codes:
1748  *	NULL:			Failure
1749  *	Allocated address:	Success
1750  */
1751 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1752 						  gfp_t gfp)
1753 {
1754 	struct pch_udc_request		*req;
1755 	struct pch_udc_ep		*ep;
1756 	struct pch_udc_data_dma_desc	*dma_desc;
1757 
1758 	if (!usbep)
1759 		return NULL;
1760 	ep = container_of(usbep, struct pch_udc_ep, ep);
1761 	req = kzalloc(sizeof *req, gfp);
1762 	if (!req)
1763 		return NULL;
1764 	req->req.dma = DMA_ADDR_INVALID;
1765 	req->dma = DMA_ADDR_INVALID;
1766 	INIT_LIST_HEAD(&req->queue);
1767 	if (!ep->dev->dma_addr)
1768 		return &req->req;
1769 	/* ep0 in requests are allocated from data pool here */
1770 	dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
1771 				  &req->td_data_phys);
1772 	if (NULL == dma_desc) {
1773 		kfree(req);
1774 		return NULL;
1775 	}
1776 	/* prevent from using desc. - set HOST BUSY */
1777 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
1778 	dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1779 	req->td_data = dma_desc;
1780 	req->td_data_last = dma_desc;
1781 	req->chain_len = 1;
1782 	return &req->req;
1783 }
1784 
1785 /**
1786  * pch_udc_free_request() - This function frees request structure.
1787  *				It is called by gadget driver
1788  * @usbep:	Reference to the USB endpoint structure
1789  * @usbreq:	Reference to the USB request
1790  */
1791 static void pch_udc_free_request(struct usb_ep *usbep,
1792 				  struct usb_request *usbreq)
1793 {
1794 	struct pch_udc_ep	*ep;
1795 	struct pch_udc_request	*req;
1796 	struct pch_udc_dev	*dev;
1797 
1798 	if (!usbep || !usbreq)
1799 		return;
1800 	ep = container_of(usbep, struct pch_udc_ep, ep);
1801 	req = container_of(usbreq, struct pch_udc_request, req);
1802 	dev = ep->dev;
1803 	if (!list_empty(&req->queue))
1804 		dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1805 			__func__, usbep->name, req);
1806 	if (req->td_data != NULL) {
1807 		if (req->chain_len > 1)
1808 			pch_udc_free_dma_chain(ep->dev, req);
1809 		dma_pool_free(ep->dev->data_requests, req->td_data,
1810 			      req->td_data_phys);
1811 	}
1812 	kfree(req);
1813 }
1814 
1815 /**
1816  * pch_udc_pcd_queue() - This function queues a request packet. It is called
1817  *			by gadget driver
1818  * @usbep:	Reference to the USB endpoint structure
1819  * @usbreq:	Reference to the USB request
1820  * @gfp:	Flag to be used while mapping the data buffer
1821  *
1822  * Return codes:
1823  *	0:			Success
1824  *	linux error number:	Failure
1825  */
1826 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1827 								 gfp_t gfp)
1828 {
1829 	int retval = 0;
1830 	struct pch_udc_ep	*ep;
1831 	struct pch_udc_dev	*dev;
1832 	struct pch_udc_request	*req;
1833 	unsigned long	iflags;
1834 
1835 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1836 		return -EINVAL;
1837 	ep = container_of(usbep, struct pch_udc_ep, ep);
1838 	dev = ep->dev;
1839 	if (!ep->ep.desc && ep->num)
1840 		return -EINVAL;
1841 	req = container_of(usbreq, struct pch_udc_request, req);
1842 	if (!list_empty(&req->queue))
1843 		return -EINVAL;
1844 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1845 		return -ESHUTDOWN;
1846 	spin_lock_irqsave(&dev->lock, iflags);
1847 	/* map the buffer for dma */
1848 	if (usbreq->length &&
1849 	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1850 		if (!((unsigned long)(usbreq->buf) & 0x03)) {
1851 			if (ep->in)
1852 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1853 							     usbreq->buf,
1854 							     usbreq->length,
1855 							     DMA_TO_DEVICE);
1856 			else
1857 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1858 							     usbreq->buf,
1859 							     usbreq->length,
1860 							     DMA_FROM_DEVICE);
1861 		} else {
1862 			req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1863 			if (!req->buf) {
1864 				retval = -ENOMEM;
1865 				goto probe_end;
1866 			}
1867 			if (ep->in) {
1868 				memcpy(req->buf, usbreq->buf, usbreq->length);
1869 				req->dma = dma_map_single(&dev->pdev->dev,
1870 							  req->buf,
1871 							  usbreq->length,
1872 							  DMA_TO_DEVICE);
1873 			} else
1874 				req->dma = dma_map_single(&dev->pdev->dev,
1875 							  req->buf,
1876 							  usbreq->length,
1877 							  DMA_FROM_DEVICE);
1878 		}
1879 		req->dma_mapped = 1;
1880 	}
1881 	if (usbreq->length > 0) {
1882 		retval = prepare_dma(ep, req, GFP_ATOMIC);
1883 		if (retval)
1884 			goto probe_end;
1885 	}
1886 	usbreq->actual = 0;
1887 	usbreq->status = -EINPROGRESS;
1888 	req->dma_done = 0;
1889 	if (list_empty(&ep->queue) && !ep->halted) {
1890 		/* no pending transfer, so start this req */
1891 		if (!usbreq->length) {
1892 			process_zlp(ep, req);
1893 			retval = 0;
1894 			goto probe_end;
1895 		}
1896 		if (!ep->in) {
1897 			pch_udc_start_rxrequest(ep, req);
1898 		} else {
1899 			/*
1900 			* For IN trfr the descriptors will be programmed and
1901 			* P bit will be set when
1902 			* we get an IN token
1903 			*/
1904 			pch_udc_wait_ep_stall(ep);
1905 			pch_udc_ep_clear_nak(ep);
1906 			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1907 		}
1908 	}
1909 	/* Now add this request to the ep's pending requests */
1910 	if (req != NULL)
1911 		list_add_tail(&req->queue, &ep->queue);
1912 
1913 probe_end:
1914 	spin_unlock_irqrestore(&dev->lock, iflags);
1915 	return retval;
1916 }
1917 
1918 /**
1919  * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1920  *				It is called by gadget driver
1921  * @usbep:	Reference to the USB endpoint structure
1922  * @usbreq:	Reference to the USB request
1923  *
1924  * Return codes:
1925  *	0:			Success
1926  *	linux error number:	Failure
1927  */
1928 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1929 				struct usb_request *usbreq)
1930 {
1931 	struct pch_udc_ep	*ep;
1932 	struct pch_udc_request	*req;
1933 	unsigned long		flags;
1934 	int ret = -EINVAL;
1935 
1936 	ep = container_of(usbep, struct pch_udc_ep, ep);
1937 	if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1938 		return ret;
1939 	req = container_of(usbreq, struct pch_udc_request, req);
1940 	spin_lock_irqsave(&ep->dev->lock, flags);
1941 	/* make sure it's still queued on this endpoint */
1942 	list_for_each_entry(req, &ep->queue, queue) {
1943 		if (&req->req == usbreq) {
1944 			pch_udc_ep_set_nak(ep);
1945 			if (!list_empty(&req->queue))
1946 				complete_req(ep, req, -ECONNRESET);
1947 			ret = 0;
1948 			break;
1949 		}
1950 	}
1951 	spin_unlock_irqrestore(&ep->dev->lock, flags);
1952 	return ret;
1953 }
1954 
1955 /**
1956  * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1957  *			    feature
1958  * @usbep:	Reference to the USB endpoint structure
1959  * @halt:	Specifies whether to set or clear the feature
1960  *
1961  * Return codes:
1962  *	0:			Success
1963  *	linux error number:	Failure
1964  */
1965 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1966 {
1967 	struct pch_udc_ep	*ep;
1968 	unsigned long iflags;
1969 	int ret;
1970 
1971 	if (!usbep)
1972 		return -EINVAL;
1973 	ep = container_of(usbep, struct pch_udc_ep, ep);
1974 	if (!ep->ep.desc && !ep->num)
1975 		return -EINVAL;
1976 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1977 		return -ESHUTDOWN;
1978 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1979 	if (list_empty(&ep->queue)) {
1980 		if (halt) {
1981 			if (ep->num == PCH_UDC_EP0)
1982 				ep->dev->stall = 1;
1983 			pch_udc_ep_set_stall(ep);
1984 			pch_udc_enable_ep_interrupts(
1985 				ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1986 		} else {
1987 			pch_udc_ep_clear_stall(ep);
1988 		}
1989 		ret = 0;
1990 	} else {
1991 		ret = -EAGAIN;
1992 	}
1993 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1994 	return ret;
1995 }
1996 
1997 /**
1998  * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
1999  *				halt feature
2000  * @usbep:	Reference to the USB endpoint structure
2001  * @halt:	Specifies whether to set or clear the feature
2002  *
2003  * Return codes:
2004  *	0:			Success
2005  *	linux error number:	Failure
2006  */
2007 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2008 {
2009 	struct pch_udc_ep	*ep;
2010 	unsigned long iflags;
2011 	int ret;
2012 
2013 	if (!usbep)
2014 		return -EINVAL;
2015 	ep = container_of(usbep, struct pch_udc_ep, ep);
2016 	if (!ep->ep.desc && !ep->num)
2017 		return -EINVAL;
2018 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2019 		return -ESHUTDOWN;
2020 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
2021 	if (!list_empty(&ep->queue)) {
2022 		ret = -EAGAIN;
2023 	} else {
2024 		if (ep->num == PCH_UDC_EP0)
2025 			ep->dev->stall = 1;
2026 		pch_udc_ep_set_stall(ep);
2027 		pch_udc_enable_ep_interrupts(ep->dev,
2028 					     PCH_UDC_EPINT(ep->in, ep->num));
2029 		ep->dev->prot_stall = 1;
2030 		ret = 0;
2031 	}
2032 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2033 	return ret;
2034 }
2035 
2036 /**
2037  * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2038  * @usbep:	Reference to the USB endpoint structure
2039  */
2040 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2041 {
2042 	struct pch_udc_ep  *ep;
2043 
2044 	if (!usbep)
2045 		return;
2046 
2047 	ep = container_of(usbep, struct pch_udc_ep, ep);
2048 	if (ep->ep.desc || !ep->num)
2049 		pch_udc_ep_fifo_flush(ep, ep->in);
2050 }
2051 
2052 static const struct usb_ep_ops pch_udc_ep_ops = {
2053 	.enable		= pch_udc_pcd_ep_enable,
2054 	.disable	= pch_udc_pcd_ep_disable,
2055 	.alloc_request	= pch_udc_alloc_request,
2056 	.free_request	= pch_udc_free_request,
2057 	.queue		= pch_udc_pcd_queue,
2058 	.dequeue	= pch_udc_pcd_dequeue,
2059 	.set_halt	= pch_udc_pcd_set_halt,
2060 	.set_wedge	= pch_udc_pcd_set_wedge,
2061 	.fifo_status	= NULL,
2062 	.fifo_flush	= pch_udc_pcd_fifo_flush,
2063 };
2064 
2065 /**
2066  * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2067  * @td_stp:	Reference to the SETP buffer structure
2068  */
2069 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2070 {
2071 	static u32	pky_marker;
2072 
2073 	if (!td_stp)
2074 		return;
2075 	td_stp->reserved = ++pky_marker;
2076 	memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2077 	td_stp->status = PCH_UDC_BS_HST_RDY;
2078 }
2079 
2080 /**
2081  * pch_udc_start_next_txrequest() - This function starts
2082  *					the next transmission requirement
2083  * @ep:	Reference to the endpoint structure
2084  */
2085 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2086 {
2087 	struct pch_udc_request *req;
2088 	struct pch_udc_data_dma_desc *td_data;
2089 
2090 	if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2091 		return;
2092 
2093 	if (list_empty(&ep->queue))
2094 		return;
2095 
2096 	/* next request */
2097 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2098 	if (req->dma_going)
2099 		return;
2100 	if (!req->td_data)
2101 		return;
2102 	pch_udc_wait_ep_stall(ep);
2103 	req->dma_going = 1;
2104 	pch_udc_ep_set_ddptr(ep, 0);
2105 	td_data = req->td_data;
2106 	while (1) {
2107 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2108 				   PCH_UDC_BS_HST_RDY;
2109 		if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2110 			break;
2111 		td_data = phys_to_virt(td_data->next);
2112 	}
2113 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2114 	pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2115 	pch_udc_ep_set_pd(ep);
2116 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2117 	pch_udc_ep_clear_nak(ep);
2118 }
2119 
2120 /**
2121  * pch_udc_complete_transfer() - This function completes a transfer
2122  * @ep:		Reference to the endpoint structure
2123  */
2124 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2125 {
2126 	struct pch_udc_request *req;
2127 	struct pch_udc_dev *dev = ep->dev;
2128 
2129 	if (list_empty(&ep->queue))
2130 		return;
2131 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2132 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2133 	    PCH_UDC_BS_DMA_DONE)
2134 		return;
2135 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2136 	     PCH_UDC_RTS_SUCC) {
2137 		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2138 			"epstatus=0x%08x\n",
2139 		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
2140 		       (int)(ep->epsts));
2141 		return;
2142 	}
2143 
2144 	req->req.actual = req->req.length;
2145 	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2146 	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2147 	complete_req(ep, req, 0);
2148 	req->dma_going = 0;
2149 	if (!list_empty(&ep->queue)) {
2150 		pch_udc_wait_ep_stall(ep);
2151 		pch_udc_ep_clear_nak(ep);
2152 		pch_udc_enable_ep_interrupts(ep->dev,
2153 					     PCH_UDC_EPINT(ep->in, ep->num));
2154 	} else {
2155 		pch_udc_disable_ep_interrupts(ep->dev,
2156 					      PCH_UDC_EPINT(ep->in, ep->num));
2157 	}
2158 }
2159 
2160 /**
2161  * pch_udc_complete_receiver() - This function completes a receiver
2162  * @ep:		Reference to the endpoint structure
2163  */
2164 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2165 {
2166 	struct pch_udc_request *req;
2167 	struct pch_udc_dev *dev = ep->dev;
2168 	unsigned int count;
2169 	struct pch_udc_data_dma_desc *td;
2170 	dma_addr_t addr;
2171 
2172 	if (list_empty(&ep->queue))
2173 		return;
2174 	/* next request */
2175 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2176 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2177 	pch_udc_ep_set_ddptr(ep, 0);
2178 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2179 	    PCH_UDC_BS_DMA_DONE)
2180 		td = req->td_data_last;
2181 	else
2182 		td = req->td_data;
2183 
2184 	while (1) {
2185 		if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2186 			dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2187 				"epstatus=0x%08x\n",
2188 				(req->td_data->status & PCH_UDC_RXTX_STS),
2189 				(int)(ep->epsts));
2190 			return;
2191 		}
2192 		if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2193 			if (td->status & PCH_UDC_DMA_LAST) {
2194 				count = td->status & PCH_UDC_RXTX_BYTES;
2195 				break;
2196 			}
2197 		if (td == req->td_data_last) {
2198 			dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2199 			return;
2200 		}
2201 		addr = (dma_addr_t)td->next;
2202 		td = phys_to_virt(addr);
2203 	}
2204 	/* on 64k packets the RXBYTES field is zero */
2205 	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2206 		count = UDC_DMA_MAXPACKET;
2207 	req->td_data->status |= PCH_UDC_DMA_LAST;
2208 	td->status |= PCH_UDC_BS_HST_BSY;
2209 
2210 	req->dma_going = 0;
2211 	req->req.actual = count;
2212 	complete_req(ep, req, 0);
2213 	/* If there is a new/failed requests try that now */
2214 	if (!list_empty(&ep->queue)) {
2215 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2216 		pch_udc_start_rxrequest(ep, req);
2217 	}
2218 }
2219 
2220 /**
2221  * pch_udc_svc_data_in() - This function process endpoint interrupts
2222  *				for IN endpoints
2223  * @dev:	Reference to the device structure
2224  * @ep_num:	Endpoint that generated the interrupt
2225  */
2226 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2227 {
2228 	u32	epsts;
2229 	struct pch_udc_ep	*ep;
2230 
2231 	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2232 	epsts = ep->epsts;
2233 	ep->epsts = 0;
2234 
2235 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA  | UDC_EPSTS_HE |
2236 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2237 		       UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2238 		return;
2239 	if ((epsts & UDC_EPSTS_BNA))
2240 		return;
2241 	if (epsts & UDC_EPSTS_HE)
2242 		return;
2243 	if (epsts & UDC_EPSTS_RSS) {
2244 		pch_udc_ep_set_stall(ep);
2245 		pch_udc_enable_ep_interrupts(ep->dev,
2246 					     PCH_UDC_EPINT(ep->in, ep->num));
2247 	}
2248 	if (epsts & UDC_EPSTS_RCS) {
2249 		if (!dev->prot_stall) {
2250 			pch_udc_ep_clear_stall(ep);
2251 		} else {
2252 			pch_udc_ep_set_stall(ep);
2253 			pch_udc_enable_ep_interrupts(ep->dev,
2254 						PCH_UDC_EPINT(ep->in, ep->num));
2255 		}
2256 	}
2257 	if (epsts & UDC_EPSTS_TDC)
2258 		pch_udc_complete_transfer(ep);
2259 	/* On IN interrupt, provide data if we have any */
2260 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2261 	    !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2262 		pch_udc_start_next_txrequest(ep);
2263 }
2264 
2265 /**
2266  * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2267  * @dev:	Reference to the device structure
2268  * @ep_num:	Endpoint that generated the interrupt
2269  */
2270 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2271 {
2272 	u32			epsts;
2273 	struct pch_udc_ep		*ep;
2274 	struct pch_udc_request		*req = NULL;
2275 
2276 	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2277 	epsts = ep->epsts;
2278 	ep->epsts = 0;
2279 
2280 	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2281 		/* next request */
2282 		req = list_entry(ep->queue.next, struct pch_udc_request,
2283 				 queue);
2284 		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2285 		     PCH_UDC_BS_DMA_DONE) {
2286 			if (!req->dma_going)
2287 				pch_udc_start_rxrequest(ep, req);
2288 			return;
2289 		}
2290 	}
2291 	if (epsts & UDC_EPSTS_HE)
2292 		return;
2293 	if (epsts & UDC_EPSTS_RSS) {
2294 		pch_udc_ep_set_stall(ep);
2295 		pch_udc_enable_ep_interrupts(ep->dev,
2296 					     PCH_UDC_EPINT(ep->in, ep->num));
2297 	}
2298 	if (epsts & UDC_EPSTS_RCS) {
2299 		if (!dev->prot_stall) {
2300 			pch_udc_ep_clear_stall(ep);
2301 		} else {
2302 			pch_udc_ep_set_stall(ep);
2303 			pch_udc_enable_ep_interrupts(ep->dev,
2304 						PCH_UDC_EPINT(ep->in, ep->num));
2305 		}
2306 	}
2307 	if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2308 	    UDC_EPSTS_OUT_DATA) {
2309 		if (ep->dev->prot_stall == 1) {
2310 			pch_udc_ep_set_stall(ep);
2311 			pch_udc_enable_ep_interrupts(ep->dev,
2312 						PCH_UDC_EPINT(ep->in, ep->num));
2313 		} else {
2314 			pch_udc_complete_receiver(ep);
2315 		}
2316 	}
2317 	if (list_empty(&ep->queue))
2318 		pch_udc_set_dma(dev, DMA_DIR_RX);
2319 }
2320 
2321 /**
2322  * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2323  * @dev:	Reference to the device structure
2324  */
2325 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2326 {
2327 	u32	epsts;
2328 	struct pch_udc_ep	*ep;
2329 	struct pch_udc_ep	*ep_out;
2330 
2331 	ep = &dev->ep[UDC_EP0IN_IDX];
2332 	ep_out = &dev->ep[UDC_EP0OUT_IDX];
2333 	epsts = ep->epsts;
2334 	ep->epsts = 0;
2335 
2336 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2337 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2338 		       UDC_EPSTS_XFERDONE)))
2339 		return;
2340 	if ((epsts & UDC_EPSTS_BNA))
2341 		return;
2342 	if (epsts & UDC_EPSTS_HE)
2343 		return;
2344 	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2345 		pch_udc_complete_transfer(ep);
2346 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2347 		ep_out->td_data->status = (ep_out->td_data->status &
2348 					~PCH_UDC_BUFF_STS) |
2349 					PCH_UDC_BS_HST_RDY;
2350 		pch_udc_ep_clear_nak(ep_out);
2351 		pch_udc_set_dma(dev, DMA_DIR_RX);
2352 		pch_udc_ep_set_rrdy(ep_out);
2353 	}
2354 	/* On IN interrupt, provide data if we have any */
2355 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2356 	     !(epsts & UDC_EPSTS_TXEMPTY))
2357 		pch_udc_start_next_txrequest(ep);
2358 }
2359 
2360 /**
2361  * pch_udc_svc_control_out() - Routine that handle Control
2362  *					OUT endpoint interrupts
2363  * @dev:	Reference to the device structure
2364  */
2365 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2366 	__releases(&dev->lock)
2367 	__acquires(&dev->lock)
2368 {
2369 	u32	stat;
2370 	int setup_supported;
2371 	struct pch_udc_ep	*ep;
2372 
2373 	ep = &dev->ep[UDC_EP0OUT_IDX];
2374 	stat = ep->epsts;
2375 	ep->epsts = 0;
2376 
2377 	/* If setup data */
2378 	if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2379 	    UDC_EPSTS_OUT_SETUP) {
2380 		dev->stall = 0;
2381 		dev->ep[UDC_EP0IN_IDX].halted = 0;
2382 		dev->ep[UDC_EP0OUT_IDX].halted = 0;
2383 		dev->setup_data = ep->td_stp->request;
2384 		pch_udc_init_setup_buff(ep->td_stp);
2385 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2386 		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2387 				      dev->ep[UDC_EP0IN_IDX].in);
2388 		if ((dev->setup_data.bRequestType & USB_DIR_IN))
2389 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2390 		else /* OUT */
2391 			dev->gadget.ep0 = &ep->ep;
2392 		spin_lock(&dev->lock);
2393 		/* If Mass storage Reset */
2394 		if ((dev->setup_data.bRequestType == 0x21) &&
2395 		    (dev->setup_data.bRequest == 0xFF))
2396 			dev->prot_stall = 0;
2397 		/* call gadget with setup data received */
2398 		setup_supported = dev->driver->setup(&dev->gadget,
2399 						     &dev->setup_data);
2400 		spin_unlock(&dev->lock);
2401 
2402 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
2403 			ep->td_data->status = (ep->td_data->status &
2404 						~PCH_UDC_BUFF_STS) |
2405 						PCH_UDC_BS_HST_RDY;
2406 			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2407 		}
2408 		/* ep0 in returns data on IN phase */
2409 		if (setup_supported >= 0 && setup_supported <
2410 					    UDC_EP0IN_MAX_PKT_SIZE) {
2411 			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2412 			/* Gadget would have queued a request when
2413 			 * we called the setup */
2414 			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2415 				pch_udc_set_dma(dev, DMA_DIR_RX);
2416 				pch_udc_ep_clear_nak(ep);
2417 			}
2418 		} else if (setup_supported < 0) {
2419 			/* if unsupported request, then stall */
2420 			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2421 			pch_udc_enable_ep_interrupts(ep->dev,
2422 						PCH_UDC_EPINT(ep->in, ep->num));
2423 			dev->stall = 0;
2424 			pch_udc_set_dma(dev, DMA_DIR_RX);
2425 		} else {
2426 			dev->waiting_zlp_ack = 1;
2427 		}
2428 	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2429 		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
2430 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2431 		pch_udc_ep_set_ddptr(ep, 0);
2432 		if (!list_empty(&ep->queue)) {
2433 			ep->epsts = stat;
2434 			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2435 		}
2436 		pch_udc_set_dma(dev, DMA_DIR_RX);
2437 	}
2438 	pch_udc_ep_set_rrdy(ep);
2439 }
2440 
2441 
2442 /**
2443  * pch_udc_postsvc_epinters() - This function enables end point interrupts
2444  *				and clears NAK status
2445  * @dev:	Reference to the device structure
2446  * @ep_num:	End point number
2447  */
2448 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2449 {
2450 	struct pch_udc_ep	*ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2451 	if (list_empty(&ep->queue))
2452 		return;
2453 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2454 	pch_udc_ep_clear_nak(ep);
2455 }
2456 
2457 /**
2458  * pch_udc_read_all_epstatus() - This function read all endpoint status
2459  * @dev:	Reference to the device structure
2460  * @ep_intr:	Status of endpoint interrupt
2461  */
2462 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2463 {
2464 	int i;
2465 	struct pch_udc_ep	*ep;
2466 
2467 	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2468 		/* IN */
2469 		if (ep_intr & (0x1 << i)) {
2470 			ep = &dev->ep[UDC_EPIN_IDX(i)];
2471 			ep->epsts = pch_udc_read_ep_status(ep);
2472 			pch_udc_clear_ep_status(ep, ep->epsts);
2473 		}
2474 		/* OUT */
2475 		if (ep_intr & (0x10000 << i)) {
2476 			ep = &dev->ep[UDC_EPOUT_IDX(i)];
2477 			ep->epsts = pch_udc_read_ep_status(ep);
2478 			pch_udc_clear_ep_status(ep, ep->epsts);
2479 		}
2480 	}
2481 }
2482 
2483 /**
2484  * pch_udc_activate_control_ep() - This function enables the control endpoints
2485  *					for traffic after a reset
2486  * @dev:	Reference to the device structure
2487  */
2488 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2489 {
2490 	struct pch_udc_ep	*ep;
2491 	u32 val;
2492 
2493 	/* Setup the IN endpoint */
2494 	ep = &dev->ep[UDC_EP0IN_IDX];
2495 	pch_udc_clear_ep_control(ep);
2496 	pch_udc_ep_fifo_flush(ep, ep->in);
2497 	pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2498 	pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2499 	/* Initialize the IN EP Descriptor */
2500 	ep->td_data      = NULL;
2501 	ep->td_stp       = NULL;
2502 	ep->td_data_phys = 0;
2503 	ep->td_stp_phys  = 0;
2504 
2505 	/* Setup the OUT endpoint */
2506 	ep = &dev->ep[UDC_EP0OUT_IDX];
2507 	pch_udc_clear_ep_control(ep);
2508 	pch_udc_ep_fifo_flush(ep, ep->in);
2509 	pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2510 	pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2511 	val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2512 	pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2513 
2514 	/* Initialize the SETUP buffer */
2515 	pch_udc_init_setup_buff(ep->td_stp);
2516 	/* Write the pointer address of dma descriptor */
2517 	pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2518 	/* Write the pointer address of Setup descriptor */
2519 	pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2520 
2521 	/* Initialize the dma descriptor */
2522 	ep->td_data->status  = PCH_UDC_DMA_LAST;
2523 	ep->td_data->dataptr = dev->dma_addr;
2524 	ep->td_data->next    = ep->td_data_phys;
2525 
2526 	pch_udc_ep_clear_nak(ep);
2527 }
2528 
2529 
2530 /**
2531  * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2532  * @dev:	Reference to driver structure
2533  */
2534 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2535 {
2536 	struct pch_udc_ep	*ep;
2537 	int i;
2538 
2539 	pch_udc_clear_dma(dev, DMA_DIR_TX);
2540 	pch_udc_clear_dma(dev, DMA_DIR_RX);
2541 	/* Mask all endpoint interrupts */
2542 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2543 	/* clear all endpoint interrupts */
2544 	pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2545 
2546 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2547 		ep = &dev->ep[i];
2548 		pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2549 		pch_udc_clear_ep_control(ep);
2550 		pch_udc_ep_set_ddptr(ep, 0);
2551 		pch_udc_write_csr(ep->dev, 0x00, i);
2552 	}
2553 	dev->stall = 0;
2554 	dev->prot_stall = 0;
2555 	dev->waiting_zlp_ack = 0;
2556 	dev->set_cfg_not_acked = 0;
2557 
2558 	/* disable ep to empty req queue. Skip the control EP's */
2559 	for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2560 		ep = &dev->ep[i];
2561 		pch_udc_ep_set_nak(ep);
2562 		pch_udc_ep_fifo_flush(ep, ep->in);
2563 		/* Complete request queue */
2564 		empty_req_queue(ep);
2565 	}
2566 	if (dev->driver) {
2567 		spin_unlock(&dev->lock);
2568 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2569 		spin_lock(&dev->lock);
2570 	}
2571 }
2572 
2573 /**
2574  * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2575  *				done interrupt
2576  * @dev:	Reference to driver structure
2577  */
2578 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2579 {
2580 	u32 dev_stat, dev_speed;
2581 	u32 speed = USB_SPEED_FULL;
2582 
2583 	dev_stat = pch_udc_read_device_status(dev);
2584 	dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2585 						 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2586 	switch (dev_speed) {
2587 	case UDC_DEVSTS_ENUM_SPEED_HIGH:
2588 		speed = USB_SPEED_HIGH;
2589 		break;
2590 	case  UDC_DEVSTS_ENUM_SPEED_FULL:
2591 		speed = USB_SPEED_FULL;
2592 		break;
2593 	case  UDC_DEVSTS_ENUM_SPEED_LOW:
2594 		speed = USB_SPEED_LOW;
2595 		break;
2596 	default:
2597 		BUG();
2598 	}
2599 	dev->gadget.speed = speed;
2600 	pch_udc_activate_control_ep(dev);
2601 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2602 	pch_udc_set_dma(dev, DMA_DIR_TX);
2603 	pch_udc_set_dma(dev, DMA_DIR_RX);
2604 	pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2605 
2606 	/* enable device interrupts */
2607 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2608 					UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2609 					UDC_DEVINT_SI | UDC_DEVINT_SC);
2610 }
2611 
2612 /**
2613  * pch_udc_svc_intf_interrupt() - This function handles a set interface
2614  *				  interrupt
2615  * @dev:	Reference to driver structure
2616  */
2617 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2618 {
2619 	u32 reg, dev_stat = 0;
2620 	int i;
2621 
2622 	dev_stat = pch_udc_read_device_status(dev);
2623 	dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2624 							 UDC_DEVSTS_INTF_SHIFT;
2625 	dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2626 							 UDC_DEVSTS_ALT_SHIFT;
2627 	dev->set_cfg_not_acked = 1;
2628 	/* Construct the usb request for gadget driver and inform it */
2629 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2630 	dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2631 	dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2632 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2633 	dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2634 	/* programm the Endpoint Cfg registers */
2635 	/* Only one end point cfg register */
2636 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2637 	reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2638 	      (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2639 	reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2640 	      (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2641 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2642 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2643 		/* clear stall bits */
2644 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2645 		dev->ep[i].halted = 0;
2646 	}
2647 	dev->stall = 0;
2648 	spin_unlock(&dev->lock);
2649 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2650 	spin_lock(&dev->lock);
2651 }
2652 
2653 /**
2654  * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2655  *				interrupt
2656  * @dev:	Reference to driver structure
2657  */
2658 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2659 {
2660 	int i;
2661 	u32 reg, dev_stat = 0;
2662 
2663 	dev_stat = pch_udc_read_device_status(dev);
2664 	dev->set_cfg_not_acked = 1;
2665 	dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2666 				UDC_DEVSTS_CFG_SHIFT;
2667 	/* make usb request for gadget driver */
2668 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2669 	dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2670 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2671 	/* program the NE registers */
2672 	/* Only one end point cfg register */
2673 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2674 	reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2675 	      (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2676 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2677 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2678 		/* clear stall bits */
2679 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2680 		dev->ep[i].halted = 0;
2681 	}
2682 	dev->stall = 0;
2683 
2684 	/* call gadget zero with setup data received */
2685 	spin_unlock(&dev->lock);
2686 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2687 	spin_lock(&dev->lock);
2688 }
2689 
2690 /**
2691  * pch_udc_dev_isr() - This function services device interrupts
2692  *			by invoking appropriate routines.
2693  * @dev:	Reference to the device structure
2694  * @dev_intr:	The Device interrupt status.
2695  */
2696 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2697 {
2698 	int vbus;
2699 
2700 	/* USB Reset Interrupt */
2701 	if (dev_intr & UDC_DEVINT_UR) {
2702 		pch_udc_svc_ur_interrupt(dev);
2703 		dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2704 	}
2705 	/* Enumeration Done Interrupt */
2706 	if (dev_intr & UDC_DEVINT_ENUM) {
2707 		pch_udc_svc_enum_interrupt(dev);
2708 		dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2709 	}
2710 	/* Set Interface Interrupt */
2711 	if (dev_intr & UDC_DEVINT_SI)
2712 		pch_udc_svc_intf_interrupt(dev);
2713 	/* Set Config Interrupt */
2714 	if (dev_intr & UDC_DEVINT_SC)
2715 		pch_udc_svc_cfg_interrupt(dev);
2716 	/* USB Suspend interrupt */
2717 	if (dev_intr & UDC_DEVINT_US) {
2718 		if (dev->driver
2719 			&& dev->driver->suspend) {
2720 			spin_unlock(&dev->lock);
2721 			dev->driver->suspend(&dev->gadget);
2722 			spin_lock(&dev->lock);
2723 		}
2724 
2725 		vbus = pch_vbus_gpio_get_value(dev);
2726 		if ((dev->vbus_session == 0)
2727 			&& (vbus != 1)) {
2728 			if (dev->driver && dev->driver->disconnect) {
2729 				spin_unlock(&dev->lock);
2730 				dev->driver->disconnect(&dev->gadget);
2731 				spin_lock(&dev->lock);
2732 			}
2733 			pch_udc_reconnect(dev);
2734 		} else if ((dev->vbus_session == 0)
2735 			&& (vbus == 1)
2736 			&& !dev->vbus_gpio.intr)
2737 			schedule_work(&dev->vbus_gpio.irq_work_fall);
2738 
2739 		dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2740 	}
2741 	/* Clear the SOF interrupt, if enabled */
2742 	if (dev_intr & UDC_DEVINT_SOF)
2743 		dev_dbg(&dev->pdev->dev, "SOF\n");
2744 	/* ES interrupt, IDLE > 3ms on the USB */
2745 	if (dev_intr & UDC_DEVINT_ES)
2746 		dev_dbg(&dev->pdev->dev, "ES\n");
2747 	/* RWKP interrupt */
2748 	if (dev_intr & UDC_DEVINT_RWKP)
2749 		dev_dbg(&dev->pdev->dev, "RWKP\n");
2750 }
2751 
2752 /**
2753  * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2754  * @irq:	Interrupt request number
2755  * @dev:	Reference to the device structure
2756  */
2757 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2758 {
2759 	struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2760 	u32 dev_intr, ep_intr;
2761 	int i;
2762 
2763 	dev_intr = pch_udc_read_device_interrupts(dev);
2764 	ep_intr = pch_udc_read_ep_interrupts(dev);
2765 
2766 	/* For a hot plug, this find that the controller is hung up. */
2767 	if (dev_intr == ep_intr)
2768 		if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2769 			dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2770 			/* The controller is reset */
2771 			pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2772 			return IRQ_HANDLED;
2773 		}
2774 	if (dev_intr)
2775 		/* Clear device interrupts */
2776 		pch_udc_write_device_interrupts(dev, dev_intr);
2777 	if (ep_intr)
2778 		/* Clear ep interrupts */
2779 		pch_udc_write_ep_interrupts(dev, ep_intr);
2780 	if (!dev_intr && !ep_intr)
2781 		return IRQ_NONE;
2782 	spin_lock(&dev->lock);
2783 	if (dev_intr)
2784 		pch_udc_dev_isr(dev, dev_intr);
2785 	if (ep_intr) {
2786 		pch_udc_read_all_epstatus(dev, ep_intr);
2787 		/* Process Control In interrupts, if present */
2788 		if (ep_intr & UDC_EPINT_IN_EP0) {
2789 			pch_udc_svc_control_in(dev);
2790 			pch_udc_postsvc_epinters(dev, 0);
2791 		}
2792 		/* Process Control Out interrupts, if present */
2793 		if (ep_intr & UDC_EPINT_OUT_EP0)
2794 			pch_udc_svc_control_out(dev);
2795 		/* Process data in end point interrupts */
2796 		for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2797 			if (ep_intr & (1 <<  i)) {
2798 				pch_udc_svc_data_in(dev, i);
2799 				pch_udc_postsvc_epinters(dev, i);
2800 			}
2801 		}
2802 		/* Process data out end point interrupts */
2803 		for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2804 						 PCH_UDC_USED_EP_NUM); i++)
2805 			if (ep_intr & (1 <<  i))
2806 				pch_udc_svc_data_out(dev, i -
2807 							 UDC_EPINT_OUT_SHIFT);
2808 	}
2809 	spin_unlock(&dev->lock);
2810 	return IRQ_HANDLED;
2811 }
2812 
2813 /**
2814  * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2815  * @dev:	Reference to the device structure
2816  */
2817 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2818 {
2819 	/* enable ep0 interrupts */
2820 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2821 						UDC_EPINT_OUT_EP0);
2822 	/* enable device interrupts */
2823 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2824 				       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2825 				       UDC_DEVINT_SI | UDC_DEVINT_SC);
2826 }
2827 
2828 /**
2829  * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2830  * @dev:	Reference to the driver structure
2831  */
2832 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2833 {
2834 	const char *const ep_string[] = {
2835 		ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2836 		"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2837 		"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2838 		"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2839 		"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2840 		"ep15in", "ep15out",
2841 	};
2842 	int i;
2843 
2844 	dev->gadget.speed = USB_SPEED_UNKNOWN;
2845 	INIT_LIST_HEAD(&dev->gadget.ep_list);
2846 
2847 	/* Initialize the endpoints structures */
2848 	memset(dev->ep, 0, sizeof dev->ep);
2849 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2850 		struct pch_udc_ep *ep = &dev->ep[i];
2851 		ep->dev = dev;
2852 		ep->halted = 1;
2853 		ep->num = i / 2;
2854 		ep->in = ~i & 1;
2855 		ep->ep.name = ep_string[i];
2856 		ep->ep.ops = &pch_udc_ep_ops;
2857 		if (ep->in) {
2858 			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2859 			ep->ep.caps.dir_in = true;
2860 		} else {
2861 			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2862 					  UDC_EP_REG_SHIFT;
2863 			ep->ep.caps.dir_out = true;
2864 		}
2865 		if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2866 			ep->ep.caps.type_control = true;
2867 		} else {
2868 			ep->ep.caps.type_iso = true;
2869 			ep->ep.caps.type_bulk = true;
2870 			ep->ep.caps.type_int = true;
2871 		}
2872 		/* need to set ep->ep.maxpacket and set Default Configuration?*/
2873 		usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2874 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2875 		INIT_LIST_HEAD(&ep->queue);
2876 	}
2877 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2878 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2879 
2880 	/* remove ep0 in and out from the list.  They have own pointer */
2881 	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2882 	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2883 
2884 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2885 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2886 }
2887 
2888 /**
2889  * pch_udc_pcd_init() - This API initializes the driver structure
2890  * @dev:	Reference to the driver structure
2891  *
2892  * Return codes:
2893  *	0: Success
2894  */
2895 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2896 {
2897 	pch_udc_init(dev);
2898 	pch_udc_pcd_reinit(dev);
2899 	pch_vbus_gpio_init(dev, vbus_gpio_port);
2900 	return 0;
2901 }
2902 
2903 /**
2904  * init_dma_pools() - create dma pools during initialization
2905  * @pdev:	reference to struct pci_dev
2906  */
2907 static int init_dma_pools(struct pch_udc_dev *dev)
2908 {
2909 	struct pch_udc_stp_dma_desc	*td_stp;
2910 	struct pch_udc_data_dma_desc	*td_data;
2911 	void				*ep0out_buf;
2912 
2913 	/* DMA setup */
2914 	dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
2915 		sizeof(struct pch_udc_data_dma_desc), 0, 0);
2916 	if (!dev->data_requests) {
2917 		dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2918 			__func__);
2919 		return -ENOMEM;
2920 	}
2921 
2922 	/* dma desc for setup data */
2923 	dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
2924 		sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2925 	if (!dev->stp_requests) {
2926 		dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2927 			__func__);
2928 		return -ENOMEM;
2929 	}
2930 	/* setup */
2931 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
2932 				&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2933 	if (!td_stp) {
2934 		dev_err(&dev->pdev->dev,
2935 			"%s: can't allocate setup dma descriptor\n", __func__);
2936 		return -ENOMEM;
2937 	}
2938 	dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2939 
2940 	/* data: 0 packets !? */
2941 	td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
2942 				&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2943 	if (!td_data) {
2944 		dev_err(&dev->pdev->dev,
2945 			"%s: can't allocate data dma descriptor\n", __func__);
2946 		return -ENOMEM;
2947 	}
2948 	dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2949 	dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2950 	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2951 	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2952 	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2953 
2954 	ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2955 				  GFP_KERNEL);
2956 	if (!ep0out_buf)
2957 		return -ENOMEM;
2958 	dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2959 				       UDC_EP0OUT_BUFF_SIZE * 4,
2960 				       DMA_FROM_DEVICE);
2961 	return 0;
2962 }
2963 
2964 static int pch_udc_start(struct usb_gadget *g,
2965 		struct usb_gadget_driver *driver)
2966 {
2967 	struct pch_udc_dev	*dev = to_pch_udc(g);
2968 
2969 	driver->driver.bus = NULL;
2970 	dev->driver = driver;
2971 
2972 	/* get ready for ep0 traffic */
2973 	pch_udc_setup_ep0(dev);
2974 
2975 	/* clear SD */
2976 	if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2977 		pch_udc_clear_disconnect(dev);
2978 
2979 	dev->connected = 1;
2980 	return 0;
2981 }
2982 
2983 static int pch_udc_stop(struct usb_gadget *g)
2984 {
2985 	struct pch_udc_dev	*dev = to_pch_udc(g);
2986 
2987 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2988 
2989 	/* Assures that there are no pending requests with this driver */
2990 	dev->driver = NULL;
2991 	dev->connected = 0;
2992 
2993 	/* set SD */
2994 	pch_udc_set_disconnect(dev);
2995 
2996 	return 0;
2997 }
2998 
2999 static void pch_udc_shutdown(struct pci_dev *pdev)
3000 {
3001 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3002 
3003 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3004 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3005 
3006 	/* disable the pullup so the host will think we're gone */
3007 	pch_udc_set_disconnect(dev);
3008 }
3009 
3010 static void pch_udc_remove(struct pci_dev *pdev)
3011 {
3012 	struct pch_udc_dev	*dev = pci_get_drvdata(pdev);
3013 
3014 	usb_del_gadget_udc(&dev->gadget);
3015 
3016 	/* gadget driver must not be registered */
3017 	if (dev->driver)
3018 		dev_err(&pdev->dev,
3019 			"%s: gadget driver still bound!!!\n", __func__);
3020 	/* dma pool cleanup */
3021 	dma_pool_destroy(dev->data_requests);
3022 
3023 	if (dev->stp_requests) {
3024 		/* cleanup DMA desc's for ep0in */
3025 		if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3026 			dma_pool_free(dev->stp_requests,
3027 				dev->ep[UDC_EP0OUT_IDX].td_stp,
3028 				dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3029 		}
3030 		if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3031 			dma_pool_free(dev->stp_requests,
3032 				dev->ep[UDC_EP0OUT_IDX].td_data,
3033 				dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3034 		}
3035 		dma_pool_destroy(dev->stp_requests);
3036 	}
3037 
3038 	if (dev->dma_addr)
3039 		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3040 				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3041 
3042 	pch_vbus_gpio_free(dev);
3043 
3044 	pch_udc_exit(dev);
3045 }
3046 
3047 #ifdef CONFIG_PM_SLEEP
3048 static int pch_udc_suspend(struct device *d)
3049 {
3050 	struct pci_dev *pdev = to_pci_dev(d);
3051 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3052 
3053 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3054 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3055 
3056 	return 0;
3057 }
3058 
3059 static int pch_udc_resume(struct device *d)
3060 {
3061 	return 0;
3062 }
3063 
3064 static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3065 #define PCH_UDC_PM_OPS		(&pch_udc_pm)
3066 #else
3067 #define PCH_UDC_PM_OPS		NULL
3068 #endif /* CONFIG_PM_SLEEP */
3069 
3070 static int pch_udc_probe(struct pci_dev *pdev,
3071 			  const struct pci_device_id *id)
3072 {
3073 	int			bar;
3074 	int			retval;
3075 	struct pch_udc_dev	*dev;
3076 
3077 	/* init */
3078 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3079 	if (!dev)
3080 		return -ENOMEM;
3081 
3082 	/* pci setup */
3083 	retval = pcim_enable_device(pdev);
3084 	if (retval)
3085 		return retval;
3086 
3087 	pci_set_drvdata(pdev, dev);
3088 
3089 	/* Determine BAR based on PCI ID */
3090 	if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3091 		bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3092 	else
3093 		bar = PCH_UDC_PCI_BAR;
3094 
3095 	/* PCI resource allocation */
3096 	retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3097 	if (retval)
3098 		return retval;
3099 
3100 	dev->base_addr = pcim_iomap_table(pdev)[bar];
3101 
3102 	/* initialize the hardware */
3103 	if (pch_udc_pcd_init(dev))
3104 		return -ENODEV;
3105 
3106 	pci_enable_msi(pdev);
3107 
3108 	retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3109 				  IRQF_SHARED, KBUILD_MODNAME, dev);
3110 	if (retval) {
3111 		dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3112 			pdev->irq);
3113 		goto finished;
3114 	}
3115 
3116 	pci_set_master(pdev);
3117 	pci_try_set_mwi(pdev);
3118 
3119 	/* device struct setup */
3120 	spin_lock_init(&dev->lock);
3121 	dev->pdev = pdev;
3122 	dev->gadget.ops = &pch_udc_ops;
3123 
3124 	retval = init_dma_pools(dev);
3125 	if (retval)
3126 		goto finished;
3127 
3128 	dev->gadget.name = KBUILD_MODNAME;
3129 	dev->gadget.max_speed = USB_SPEED_HIGH;
3130 
3131 	/* Put the device in disconnected state till a driver is bound */
3132 	pch_udc_set_disconnect(dev);
3133 	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3134 	if (retval)
3135 		goto finished;
3136 	return 0;
3137 
3138 finished:
3139 	pch_udc_remove(pdev);
3140 	return retval;
3141 }
3142 
3143 static const struct pci_device_id pch_udc_pcidev_id[] = {
3144 	{
3145 		PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3146 			   PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3147 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3148 		.class_mask = 0xffffffff,
3149 	},
3150 	{
3151 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3152 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3153 		.class_mask = 0xffffffff,
3154 	},
3155 	{
3156 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3157 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3158 		.class_mask = 0xffffffff,
3159 	},
3160 	{
3161 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3162 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3163 		.class_mask = 0xffffffff,
3164 	},
3165 	{ 0 },
3166 };
3167 
3168 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3169 
3170 static struct pci_driver pch_udc_driver = {
3171 	.name =	KBUILD_MODNAME,
3172 	.id_table =	pch_udc_pcidev_id,
3173 	.probe =	pch_udc_probe,
3174 	.remove =	pch_udc_remove,
3175 	.shutdown =	pch_udc_shutdown,
3176 	.driver = {
3177 		.pm = PCH_UDC_PM_OPS,
3178 	},
3179 };
3180 
3181 module_pci_driver(pch_udc_driver);
3182 
3183 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3184 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3185 MODULE_LICENSE("GPL");
3186