xref: /openbmc/linux/drivers/usb/gadget/udc/pch_udc.c (revision 4f6cce39)
1 /*
2  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/interrupt.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/gadget.h>
18 #include <linux/gpio.h>
19 #include <linux/irq.h>
20 
21 /* GPIO port for VBUS detecting */
22 static int vbus_gpio_port = -1;		/* GPIO port number (-1:Not used) */
23 
24 #define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
25 #define PCH_VBUS_INTERVAL	10	/* VBUS polling interval (msec) */
26 
27 /* Address offset of Registers */
28 #define UDC_EP_REG_SHIFT	0x20	/* Offset to next EP */
29 
30 #define UDC_EPCTL_ADDR		0x00	/* Endpoint control */
31 #define UDC_EPSTS_ADDR		0x04	/* Endpoint status */
32 #define UDC_BUFIN_FRAMENUM_ADDR	0x08	/* buffer size in / frame number out */
33 #define UDC_BUFOUT_MAXPKT_ADDR	0x0C	/* buffer size out / maxpkt in */
34 #define UDC_SUBPTR_ADDR		0x10	/* setup buffer pointer */
35 #define UDC_DESPTR_ADDR		0x14	/* Data descriptor pointer */
36 #define UDC_CONFIRM_ADDR	0x18	/* Write/Read confirmation */
37 
38 #define UDC_DEVCFG_ADDR		0x400	/* Device configuration */
39 #define UDC_DEVCTL_ADDR		0x404	/* Device control */
40 #define UDC_DEVSTS_ADDR		0x408	/* Device status */
41 #define UDC_DEVIRQSTS_ADDR	0x40C	/* Device irq status */
42 #define UDC_DEVIRQMSK_ADDR	0x410	/* Device irq mask */
43 #define UDC_EPIRQSTS_ADDR	0x414	/* Endpoint irq status */
44 #define UDC_EPIRQMSK_ADDR	0x418	/* Endpoint irq mask */
45 #define UDC_DEVLPM_ADDR		0x41C	/* LPM control / status */
46 #define UDC_CSR_BUSY_ADDR	0x4f0	/* UDC_CSR_BUSY Status register */
47 #define UDC_SRST_ADDR		0x4fc	/* SOFT RESET register */
48 #define UDC_CSR_ADDR		0x500	/* USB_DEVICE endpoint register */
49 
50 /* Endpoint control register */
51 /* Bit position */
52 #define UDC_EPCTL_MRXFLUSH		(1 << 12)
53 #define UDC_EPCTL_RRDY			(1 << 9)
54 #define UDC_EPCTL_CNAK			(1 << 8)
55 #define UDC_EPCTL_SNAK			(1 << 7)
56 #define UDC_EPCTL_NAK			(1 << 6)
57 #define UDC_EPCTL_P			(1 << 3)
58 #define UDC_EPCTL_F			(1 << 1)
59 #define UDC_EPCTL_S			(1 << 0)
60 #define UDC_EPCTL_ET_SHIFT		4
61 /* Mask patern */
62 #define UDC_EPCTL_ET_MASK		0x00000030
63 /* Value for ET field */
64 #define UDC_EPCTL_ET_CONTROL		0
65 #define UDC_EPCTL_ET_ISO		1
66 #define UDC_EPCTL_ET_BULK		2
67 #define UDC_EPCTL_ET_INTERRUPT		3
68 
69 /* Endpoint status register */
70 /* Bit position */
71 #define UDC_EPSTS_XFERDONE		(1 << 27)
72 #define UDC_EPSTS_RSS			(1 << 26)
73 #define UDC_EPSTS_RCS			(1 << 25)
74 #define UDC_EPSTS_TXEMPTY		(1 << 24)
75 #define UDC_EPSTS_TDC			(1 << 10)
76 #define UDC_EPSTS_HE			(1 << 9)
77 #define UDC_EPSTS_MRXFIFO_EMP		(1 << 8)
78 #define UDC_EPSTS_BNA			(1 << 7)
79 #define UDC_EPSTS_IN			(1 << 6)
80 #define UDC_EPSTS_OUT_SHIFT		4
81 /* Mask patern */
82 #define UDC_EPSTS_OUT_MASK		0x00000030
83 #define UDC_EPSTS_ALL_CLR_MASK		0x1F0006F0
84 /* Value for OUT field */
85 #define UDC_EPSTS_OUT_SETUP		2
86 #define UDC_EPSTS_OUT_DATA		1
87 
88 /* Device configuration register */
89 /* Bit position */
90 #define UDC_DEVCFG_CSR_PRG		(1 << 17)
91 #define UDC_DEVCFG_SP			(1 << 3)
92 /* SPD Valee */
93 #define UDC_DEVCFG_SPD_HS		0x0
94 #define UDC_DEVCFG_SPD_FS		0x1
95 #define UDC_DEVCFG_SPD_LS		0x2
96 
97 /* Device control register */
98 /* Bit position */
99 #define UDC_DEVCTL_THLEN_SHIFT		24
100 #define UDC_DEVCTL_BRLEN_SHIFT		16
101 #define UDC_DEVCTL_CSR_DONE		(1 << 13)
102 #define UDC_DEVCTL_SD			(1 << 10)
103 #define UDC_DEVCTL_MODE			(1 << 9)
104 #define UDC_DEVCTL_BREN			(1 << 8)
105 #define UDC_DEVCTL_THE			(1 << 7)
106 #define UDC_DEVCTL_DU			(1 << 4)
107 #define UDC_DEVCTL_TDE			(1 << 3)
108 #define UDC_DEVCTL_RDE			(1 << 2)
109 #define UDC_DEVCTL_RES			(1 << 0)
110 
111 /* Device status register */
112 /* Bit position */
113 #define UDC_DEVSTS_TS_SHIFT		18
114 #define UDC_DEVSTS_ENUM_SPEED_SHIFT	13
115 #define UDC_DEVSTS_ALT_SHIFT		8
116 #define UDC_DEVSTS_INTF_SHIFT		4
117 #define UDC_DEVSTS_CFG_SHIFT		0
118 /* Mask patern */
119 #define UDC_DEVSTS_TS_MASK		0xfffc0000
120 #define UDC_DEVSTS_ENUM_SPEED_MASK	0x00006000
121 #define UDC_DEVSTS_ALT_MASK		0x00000f00
122 #define UDC_DEVSTS_INTF_MASK		0x000000f0
123 #define UDC_DEVSTS_CFG_MASK		0x0000000f
124 /* value for maximum speed for SPEED field */
125 #define UDC_DEVSTS_ENUM_SPEED_FULL	1
126 #define UDC_DEVSTS_ENUM_SPEED_HIGH	0
127 #define UDC_DEVSTS_ENUM_SPEED_LOW	2
128 #define UDC_DEVSTS_ENUM_SPEED_FULLX	3
129 
130 /* Device irq register */
131 /* Bit position */
132 #define UDC_DEVINT_RWKP			(1 << 7)
133 #define UDC_DEVINT_ENUM			(1 << 6)
134 #define UDC_DEVINT_SOF			(1 << 5)
135 #define UDC_DEVINT_US			(1 << 4)
136 #define UDC_DEVINT_UR			(1 << 3)
137 #define UDC_DEVINT_ES			(1 << 2)
138 #define UDC_DEVINT_SI			(1 << 1)
139 #define UDC_DEVINT_SC			(1 << 0)
140 /* Mask patern */
141 #define UDC_DEVINT_MSK			0x7f
142 
143 /* Endpoint irq register */
144 /* Bit position */
145 #define UDC_EPINT_IN_SHIFT		0
146 #define UDC_EPINT_OUT_SHIFT		16
147 #define UDC_EPINT_IN_EP0		(1 << 0)
148 #define UDC_EPINT_OUT_EP0		(1 << 16)
149 /* Mask patern */
150 #define UDC_EPINT_MSK_DISABLE_ALL	0xffffffff
151 
152 /* UDC_CSR_BUSY Status register */
153 /* Bit position */
154 #define UDC_CSR_BUSY			(1 << 0)
155 
156 /* SOFT RESET register */
157 /* Bit position */
158 #define UDC_PSRST			(1 << 1)
159 #define UDC_SRST			(1 << 0)
160 
161 /* USB_DEVICE endpoint register */
162 /* Bit position */
163 #define UDC_CSR_NE_NUM_SHIFT		0
164 #define UDC_CSR_NE_DIR_SHIFT		4
165 #define UDC_CSR_NE_TYPE_SHIFT		5
166 #define UDC_CSR_NE_CFG_SHIFT		7
167 #define UDC_CSR_NE_INTF_SHIFT		11
168 #define UDC_CSR_NE_ALT_SHIFT		15
169 #define UDC_CSR_NE_MAX_PKT_SHIFT	19
170 /* Mask patern */
171 #define UDC_CSR_NE_NUM_MASK		0x0000000f
172 #define UDC_CSR_NE_DIR_MASK		0x00000010
173 #define UDC_CSR_NE_TYPE_MASK		0x00000060
174 #define UDC_CSR_NE_CFG_MASK		0x00000780
175 #define UDC_CSR_NE_INTF_MASK		0x00007800
176 #define UDC_CSR_NE_ALT_MASK		0x00078000
177 #define UDC_CSR_NE_MAX_PKT_MASK		0x3ff80000
178 
179 #define PCH_UDC_CSR(ep)	(UDC_CSR_ADDR + ep*4)
180 #define PCH_UDC_EPINT(in, num)\
181 		(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
182 
183 /* Index of endpoint */
184 #define UDC_EP0IN_IDX		0
185 #define UDC_EP0OUT_IDX		1
186 #define UDC_EPIN_IDX(ep)	(ep * 2)
187 #define UDC_EPOUT_IDX(ep)	(ep * 2 + 1)
188 #define PCH_UDC_EP0		0
189 #define PCH_UDC_EP1		1
190 #define PCH_UDC_EP2		2
191 #define PCH_UDC_EP3		3
192 
193 /* Number of endpoint */
194 #define PCH_UDC_EP_NUM		32	/* Total number of EPs (16 IN,16 OUT) */
195 #define PCH_UDC_USED_EP_NUM	4	/* EP number of EP's really used */
196 /* Length Value */
197 #define PCH_UDC_BRLEN		0x0F	/* Burst length */
198 #define PCH_UDC_THLEN		0x1F	/* Threshold length */
199 /* Value of EP Buffer Size */
200 #define UDC_EP0IN_BUFF_SIZE	16
201 #define UDC_EPIN_BUFF_SIZE	256
202 #define UDC_EP0OUT_BUFF_SIZE	16
203 #define UDC_EPOUT_BUFF_SIZE	256
204 /* Value of EP maximum packet size */
205 #define UDC_EP0IN_MAX_PKT_SIZE	64
206 #define UDC_EP0OUT_MAX_PKT_SIZE	64
207 #define UDC_BULK_MAX_PKT_SIZE	512
208 
209 /* DMA */
210 #define DMA_DIR_RX		1	/* DMA for data receive */
211 #define DMA_DIR_TX		2	/* DMA for data transmit */
212 #define DMA_ADDR_INVALID	(~(dma_addr_t)0)
213 #define UDC_DMA_MAXPACKET	65536	/* maximum packet size for DMA */
214 
215 /**
216  * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
217  *				  for data
218  * @status:		Status quadlet
219  * @reserved:		Reserved
220  * @dataptr:		Buffer descriptor
221  * @next:		Next descriptor
222  */
223 struct pch_udc_data_dma_desc {
224 	u32 status;
225 	u32 reserved;
226 	u32 dataptr;
227 	u32 next;
228 };
229 
230 /**
231  * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
232  *				 for control data
233  * @status:	Status
234  * @reserved:	Reserved
235  * @data12:	First setup word
236  * @data34:	Second setup word
237  */
238 struct pch_udc_stp_dma_desc {
239 	u32 status;
240 	u32 reserved;
241 	struct usb_ctrlrequest request;
242 } __attribute((packed));
243 
244 /* DMA status definitions */
245 /* Buffer status */
246 #define PCH_UDC_BUFF_STS	0xC0000000
247 #define PCH_UDC_BS_HST_RDY	0x00000000
248 #define PCH_UDC_BS_DMA_BSY	0x40000000
249 #define PCH_UDC_BS_DMA_DONE	0x80000000
250 #define PCH_UDC_BS_HST_BSY	0xC0000000
251 /*  Rx/Tx Status */
252 #define PCH_UDC_RXTX_STS	0x30000000
253 #define PCH_UDC_RTS_SUCC	0x00000000
254 #define PCH_UDC_RTS_DESERR	0x10000000
255 #define PCH_UDC_RTS_BUFERR	0x30000000
256 /* Last Descriptor Indication */
257 #define PCH_UDC_DMA_LAST	0x08000000
258 /* Number of Rx/Tx Bytes Mask */
259 #define PCH_UDC_RXTX_BYTES	0x0000ffff
260 
261 /**
262  * struct pch_udc_cfg_data - Structure to hold current configuration
263  *			     and interface information
264  * @cur_cfg:	current configuration in use
265  * @cur_intf:	current interface in use
266  * @cur_alt:	current alt interface in use
267  */
268 struct pch_udc_cfg_data {
269 	u16 cur_cfg;
270 	u16 cur_intf;
271 	u16 cur_alt;
272 };
273 
274 /**
275  * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
276  * @ep:			embedded ep request
277  * @td_stp_phys:	for setup request
278  * @td_data_phys:	for data request
279  * @td_stp:		for setup request
280  * @td_data:		for data request
281  * @dev:		reference to device struct
282  * @offset_addr:	offset address of ep register
283  * @desc:		for this ep
284  * @queue:		queue for requests
285  * @num:		endpoint number
286  * @in:			endpoint is IN
287  * @halted:		endpoint halted?
288  * @epsts:		Endpoint status
289  */
290 struct pch_udc_ep {
291 	struct usb_ep			ep;
292 	dma_addr_t			td_stp_phys;
293 	dma_addr_t			td_data_phys;
294 	struct pch_udc_stp_dma_desc	*td_stp;
295 	struct pch_udc_data_dma_desc	*td_data;
296 	struct pch_udc_dev		*dev;
297 	unsigned long			offset_addr;
298 	struct list_head		queue;
299 	unsigned			num:5,
300 					in:1,
301 					halted:1;
302 	unsigned long			epsts;
303 };
304 
305 /**
306  * struct pch_vbus_gpio_data - Structure holding GPIO informaton
307  *					for detecting VBUS
308  * @port:		gpio port number
309  * @intr:		gpio interrupt number
310  * @irq_work_fall	Structure for WorkQueue
311  * @irq_work_rise	Structure for WorkQueue
312  */
313 struct pch_vbus_gpio_data {
314 	int			port;
315 	int			intr;
316 	struct work_struct	irq_work_fall;
317 	struct work_struct	irq_work_rise;
318 };
319 
320 /**
321  * struct pch_udc_dev - Structure holding complete information
322  *			of the PCH USB device
323  * @gadget:		gadget driver data
324  * @driver:		reference to gadget driver bound
325  * @pdev:		reference to the PCI device
326  * @ep:			array of endpoints
327  * @lock:		protects all state
328  * @stall:		stall requested
329  * @prot_stall:		protcol stall requested
330  * @registered:		driver registered with system
331  * @suspended:		driver in suspended state
332  * @connected:		gadget driver associated
333  * @vbus_session:	required vbus_session state
334  * @set_cfg_not_acked:	pending acknowledgement 4 setup
335  * @waiting_zlp_ack:	pending acknowledgement 4 ZLP
336  * @data_requests:	DMA pool for data requests
337  * @stp_requests:	DMA pool for setup requests
338  * @dma_addr:		DMA pool for received
339  * @setup_data:		Received setup data
340  * @base_addr:		for mapped device memory
341  * @cfg_data:		current cfg, intf, and alt in use
342  * @vbus_gpio:		GPIO informaton for detecting VBUS
343  */
344 struct pch_udc_dev {
345 	struct usb_gadget		gadget;
346 	struct usb_gadget_driver	*driver;
347 	struct pci_dev			*pdev;
348 	struct pch_udc_ep		ep[PCH_UDC_EP_NUM];
349 	spinlock_t			lock; /* protects all state */
350 	unsigned
351 			stall:1,
352 			prot_stall:1,
353 			suspended:1,
354 			connected:1,
355 			vbus_session:1,
356 			set_cfg_not_acked:1,
357 			waiting_zlp_ack:1;
358 	struct pci_pool		*data_requests;
359 	struct pci_pool		*stp_requests;
360 	dma_addr_t			dma_addr;
361 	struct usb_ctrlrequest		setup_data;
362 	void __iomem			*base_addr;
363 	struct pch_udc_cfg_data		cfg_data;
364 	struct pch_vbus_gpio_data	vbus_gpio;
365 };
366 #define to_pch_udc(g)	(container_of((g), struct pch_udc_dev, gadget))
367 
368 #define PCH_UDC_PCI_BAR_QUARK_X1000	0
369 #define PCH_UDC_PCI_BAR			1
370 
371 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC	0x0939
372 #define PCI_DEVICE_ID_INTEL_EG20T_UDC		0x8808
373 
374 #define PCI_VENDOR_ID_ROHM		0x10DB
375 #define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
376 #define PCI_DEVICE_ID_ML7831_IOH_UDC	0x8808
377 
378 static const char	ep0_string[] = "ep0in";
379 static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
380 static bool speed_fs;
381 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
382 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
383 
384 /**
385  * struct pch_udc_request - Structure holding a PCH USB device request packet
386  * @req:		embedded ep request
387  * @td_data_phys:	phys. address
388  * @td_data:		first dma desc. of chain
389  * @td_data_last:	last dma desc. of chain
390  * @queue:		associated queue
391  * @dma_going:		DMA in progress for request
392  * @dma_mapped:		DMA memory mapped for request
393  * @dma_done:		DMA completed for request
394  * @chain_len:		chain length
395  * @buf:		Buffer memory for align adjustment
396  * @dma:		DMA memory for align adjustment
397  */
398 struct pch_udc_request {
399 	struct usb_request		req;
400 	dma_addr_t			td_data_phys;
401 	struct pch_udc_data_dma_desc	*td_data;
402 	struct pch_udc_data_dma_desc	*td_data_last;
403 	struct list_head		queue;
404 	unsigned			dma_going:1,
405 					dma_mapped:1,
406 					dma_done:1;
407 	unsigned			chain_len;
408 	void				*buf;
409 	dma_addr_t			dma;
410 };
411 
412 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
413 {
414 	return ioread32(dev->base_addr + reg);
415 }
416 
417 static inline void pch_udc_writel(struct pch_udc_dev *dev,
418 				    unsigned long val, unsigned long reg)
419 {
420 	iowrite32(val, dev->base_addr + reg);
421 }
422 
423 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
424 				     unsigned long reg,
425 				     unsigned long bitmask)
426 {
427 	pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
428 }
429 
430 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
431 				     unsigned long reg,
432 				     unsigned long bitmask)
433 {
434 	pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
435 }
436 
437 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
438 {
439 	return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
440 }
441 
442 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
443 				    unsigned long val, unsigned long reg)
444 {
445 	iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
446 }
447 
448 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
449 				     unsigned long reg,
450 				     unsigned long bitmask)
451 {
452 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
453 }
454 
455 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
456 				     unsigned long reg,
457 				     unsigned long bitmask)
458 {
459 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
460 }
461 
462 /**
463  * pch_udc_csr_busy() - Wait till idle.
464  * @dev:	Reference to pch_udc_dev structure
465  */
466 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
467 {
468 	unsigned int count = 200;
469 
470 	/* Wait till idle */
471 	while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
472 		&& --count)
473 		cpu_relax();
474 	if (!count)
475 		dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
476 }
477 
478 /**
479  * pch_udc_write_csr() - Write the command and status registers.
480  * @dev:	Reference to pch_udc_dev structure
481  * @val:	value to be written to CSR register
482  * @addr:	address of CSR register
483  */
484 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
485 			       unsigned int ep)
486 {
487 	unsigned long reg = PCH_UDC_CSR(ep);
488 
489 	pch_udc_csr_busy(dev);		/* Wait till idle */
490 	pch_udc_writel(dev, val, reg);
491 	pch_udc_csr_busy(dev);		/* Wait till idle */
492 }
493 
494 /**
495  * pch_udc_read_csr() - Read the command and status registers.
496  * @dev:	Reference to pch_udc_dev structure
497  * @addr:	address of CSR register
498  *
499  * Return codes:	content of CSR register
500  */
501 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
502 {
503 	unsigned long reg = PCH_UDC_CSR(ep);
504 
505 	pch_udc_csr_busy(dev);		/* Wait till idle */
506 	pch_udc_readl(dev, reg);	/* Dummy read */
507 	pch_udc_csr_busy(dev);		/* Wait till idle */
508 	return pch_udc_readl(dev, reg);
509 }
510 
511 /**
512  * pch_udc_rmt_wakeup() - Initiate for remote wakeup
513  * @dev:	Reference to pch_udc_dev structure
514  */
515 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
516 {
517 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
518 	mdelay(1);
519 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
520 }
521 
522 /**
523  * pch_udc_get_frame() - Get the current frame from device status register
524  * @dev:	Reference to pch_udc_dev structure
525  * Retern	current frame
526  */
527 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
528 {
529 	u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
530 	return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
531 }
532 
533 /**
534  * pch_udc_clear_selfpowered() - Clear the self power control
535  * @dev:	Reference to pch_udc_regs structure
536  */
537 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
538 {
539 	pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
540 }
541 
542 /**
543  * pch_udc_set_selfpowered() - Set the self power control
544  * @dev:	Reference to pch_udc_regs structure
545  */
546 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
547 {
548 	pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
549 }
550 
551 /**
552  * pch_udc_set_disconnect() - Set the disconnect status.
553  * @dev:	Reference to pch_udc_regs structure
554  */
555 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
556 {
557 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
558 }
559 
560 /**
561  * pch_udc_clear_disconnect() - Clear the disconnect status.
562  * @dev:	Reference to pch_udc_regs structure
563  */
564 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
565 {
566 	/* Clear the disconnect */
567 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
568 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
569 	mdelay(1);
570 	/* Resume USB signalling */
571 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
572 }
573 
574 /**
575  * pch_udc_reconnect() - This API initializes usb device controller,
576  *						and clear the disconnect status.
577  * @dev:		Reference to pch_udc_regs structure
578  */
579 static void pch_udc_init(struct pch_udc_dev *dev);
580 static void pch_udc_reconnect(struct pch_udc_dev *dev)
581 {
582 	pch_udc_init(dev);
583 
584 	/* enable device interrupts */
585 	/* pch_udc_enable_interrupts() */
586 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
587 			UDC_DEVINT_UR | UDC_DEVINT_ENUM);
588 
589 	/* Clear the disconnect */
590 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
591 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
592 	mdelay(1);
593 	/* Resume USB signalling */
594 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
595 }
596 
597 /**
598  * pch_udc_vbus_session() - set or clearr the disconnect status.
599  * @dev:	Reference to pch_udc_regs structure
600  * @is_active:	Parameter specifying the action
601  *		  0:   indicating VBUS power is ending
602  *		  !0:  indicating VBUS power is starting
603  */
604 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
605 					  int is_active)
606 {
607 	if (is_active) {
608 		pch_udc_reconnect(dev);
609 		dev->vbus_session = 1;
610 	} else {
611 		if (dev->driver && dev->driver->disconnect) {
612 			spin_lock(&dev->lock);
613 			dev->driver->disconnect(&dev->gadget);
614 			spin_unlock(&dev->lock);
615 		}
616 		pch_udc_set_disconnect(dev);
617 		dev->vbus_session = 0;
618 	}
619 }
620 
621 /**
622  * pch_udc_ep_set_stall() - Set the stall of endpoint
623  * @ep:		Reference to structure of type pch_udc_ep_regs
624  */
625 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
626 {
627 	if (ep->in) {
628 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
629 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
630 	} else {
631 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
632 	}
633 }
634 
635 /**
636  * pch_udc_ep_clear_stall() - Clear the stall of endpoint
637  * @ep:		Reference to structure of type pch_udc_ep_regs
638  */
639 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
640 {
641 	/* Clear the stall */
642 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
643 	/* Clear NAK by writing CNAK */
644 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
645 }
646 
647 /**
648  * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
649  * @ep:		Reference to structure of type pch_udc_ep_regs
650  * @type:	Type of endpoint
651  */
652 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
653 					u8 type)
654 {
655 	pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
656 				UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
657 }
658 
659 /**
660  * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
661  * @ep:		Reference to structure of type pch_udc_ep_regs
662  * @buf_size:	The buffer word size
663  */
664 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
665 						 u32 buf_size, u32 ep_in)
666 {
667 	u32 data;
668 	if (ep_in) {
669 		data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
670 		data = (data & 0xffff0000) | (buf_size & 0xffff);
671 		pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
672 	} else {
673 		data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
674 		data = (buf_size << 16) | (data & 0xffff);
675 		pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
676 	}
677 }
678 
679 /**
680  * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
681  * @ep:		Reference to structure of type pch_udc_ep_regs
682  * @pkt_size:	The packet byte size
683  */
684 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
685 {
686 	u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
687 	data = (data & 0xffff0000) | (pkt_size & 0xffff);
688 	pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
689 }
690 
691 /**
692  * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
693  * @ep:		Reference to structure of type pch_udc_ep_regs
694  * @addr:	Address of the register
695  */
696 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
697 {
698 	pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
699 }
700 
701 /**
702  * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
703  * @ep:		Reference to structure of type pch_udc_ep_regs
704  * @addr:	Address of the register
705  */
706 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
707 {
708 	pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
709 }
710 
711 /**
712  * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
713  * @ep:		Reference to structure of type pch_udc_ep_regs
714  */
715 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
716 {
717 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
718 }
719 
720 /**
721  * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
722  * @ep:		Reference to structure of type pch_udc_ep_regs
723  */
724 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
725 {
726 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
727 }
728 
729 /**
730  * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
731  * @ep:		Reference to structure of type pch_udc_ep_regs
732  */
733 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
734 {
735 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
736 }
737 
738 /**
739  * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
740  *			register depending on the direction specified
741  * @dev:	Reference to structure of type pch_udc_regs
742  * @dir:	whether Tx or Rx
743  *		  DMA_DIR_RX: Receive
744  *		  DMA_DIR_TX: Transmit
745  */
746 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
747 {
748 	if (dir == DMA_DIR_RX)
749 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
750 	else if (dir == DMA_DIR_TX)
751 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
752 }
753 
754 /**
755  * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
756  *				 register depending on the direction specified
757  * @dev:	Reference to structure of type pch_udc_regs
758  * @dir:	Whether Tx or Rx
759  *		  DMA_DIR_RX: Receive
760  *		  DMA_DIR_TX: Transmit
761  */
762 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
763 {
764 	if (dir == DMA_DIR_RX)
765 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
766 	else if (dir == DMA_DIR_TX)
767 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
768 }
769 
770 /**
771  * pch_udc_set_csr_done() - Set the device control register
772  *				CSR done field (bit 13)
773  * @dev:	reference to structure of type pch_udc_regs
774  */
775 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
776 {
777 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
778 }
779 
780 /**
781  * pch_udc_disable_interrupts() - Disables the specified interrupts
782  * @dev:	Reference to structure of type pch_udc_regs
783  * @mask:	Mask to disable interrupts
784  */
785 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
786 					    u32 mask)
787 {
788 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
789 }
790 
791 /**
792  * pch_udc_enable_interrupts() - Enable the specified interrupts
793  * @dev:	Reference to structure of type pch_udc_regs
794  * @mask:	Mask to enable interrupts
795  */
796 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
797 					   u32 mask)
798 {
799 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
800 }
801 
802 /**
803  * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
804  * @dev:	Reference to structure of type pch_udc_regs
805  * @mask:	Mask to disable interrupts
806  */
807 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
808 						u32 mask)
809 {
810 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
811 }
812 
813 /**
814  * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
815  * @dev:	Reference to structure of type pch_udc_regs
816  * @mask:	Mask to enable interrupts
817  */
818 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
819 					      u32 mask)
820 {
821 	pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
822 }
823 
824 /**
825  * pch_udc_read_device_interrupts() - Read the device interrupts
826  * @dev:	Reference to structure of type pch_udc_regs
827  * Retern	The device interrupts
828  */
829 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
830 {
831 	return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
832 }
833 
834 /**
835  * pch_udc_write_device_interrupts() - Write device interrupts
836  * @dev:	Reference to structure of type pch_udc_regs
837  * @val:	The value to be written to interrupt register
838  */
839 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
840 						     u32 val)
841 {
842 	pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
843 }
844 
845 /**
846  * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
847  * @dev:	Reference to structure of type pch_udc_regs
848  * Retern	The endpoint interrupt
849  */
850 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
851 {
852 	return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
853 }
854 
855 /**
856  * pch_udc_write_ep_interrupts() - Clear endpoint interupts
857  * @dev:	Reference to structure of type pch_udc_regs
858  * @val:	The value to be written to interrupt register
859  */
860 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
861 					     u32 val)
862 {
863 	pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
864 }
865 
866 /**
867  * pch_udc_read_device_status() - Read the device status
868  * @dev:	Reference to structure of type pch_udc_regs
869  * Retern	The device status
870  */
871 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
872 {
873 	return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
874 }
875 
876 /**
877  * pch_udc_read_ep_control() - Read the endpoint control
878  * @ep:		Reference to structure of type pch_udc_ep_regs
879  * Retern	The endpoint control register value
880  */
881 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
882 {
883 	return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
884 }
885 
886 /**
887  * pch_udc_clear_ep_control() - Clear the endpoint control register
888  * @ep:		Reference to structure of type pch_udc_ep_regs
889  * Retern	The endpoint control register value
890  */
891 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
892 {
893 	return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
894 }
895 
896 /**
897  * pch_udc_read_ep_status() - Read the endpoint status
898  * @ep:		Reference to structure of type pch_udc_ep_regs
899  * Retern	The endpoint status
900  */
901 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
902 {
903 	return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
904 }
905 
906 /**
907  * pch_udc_clear_ep_status() - Clear the endpoint status
908  * @ep:		Reference to structure of type pch_udc_ep_regs
909  * @stat:	Endpoint status
910  */
911 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
912 					 u32 stat)
913 {
914 	return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
915 }
916 
917 /**
918  * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
919  *				of the endpoint control register
920  * @ep:		Reference to structure of type pch_udc_ep_regs
921  */
922 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
923 {
924 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
925 }
926 
927 /**
928  * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
929  *				of the endpoint control register
930  * @ep:		reference to structure of type pch_udc_ep_regs
931  */
932 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
933 {
934 	unsigned int loopcnt = 0;
935 	struct pch_udc_dev *dev = ep->dev;
936 
937 	if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
938 		return;
939 	if (!ep->in) {
940 		loopcnt = 10000;
941 		while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
942 			--loopcnt)
943 			udelay(5);
944 		if (!loopcnt)
945 			dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
946 				__func__);
947 	}
948 	loopcnt = 10000;
949 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
950 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
951 		udelay(5);
952 	}
953 	if (!loopcnt)
954 		dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
955 			__func__, ep->num, (ep->in ? "in" : "out"));
956 }
957 
958 /**
959  * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
960  * @ep:	reference to structure of type pch_udc_ep_regs
961  * @dir:	direction of endpoint
962  *		  0:  endpoint is OUT
963  *		  !0: endpoint is IN
964  */
965 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
966 {
967 	if (dir) {	/* IN ep */
968 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
969 		return;
970 	}
971 }
972 
973 /**
974  * pch_udc_ep_enable() - This api enables endpoint
975  * @regs:	Reference to structure pch_udc_ep_regs
976  * @desc:	endpoint descriptor
977  */
978 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
979 			       struct pch_udc_cfg_data *cfg,
980 			       const struct usb_endpoint_descriptor *desc)
981 {
982 	u32 val = 0;
983 	u32 buff_size = 0;
984 
985 	pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
986 	if (ep->in)
987 		buff_size = UDC_EPIN_BUFF_SIZE;
988 	else
989 		buff_size = UDC_EPOUT_BUFF_SIZE;
990 	pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
991 	pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
992 	pch_udc_ep_set_nak(ep);
993 	pch_udc_ep_fifo_flush(ep, ep->in);
994 	/* Configure the endpoint */
995 	val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
996 	      ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
997 		UDC_CSR_NE_TYPE_SHIFT) |
998 	      (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
999 	      (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
1000 	      (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
1001 	      usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1002 
1003 	if (ep->in)
1004 		pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1005 	else
1006 		pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1007 }
1008 
1009 /**
1010  * pch_udc_ep_disable() - This api disables endpoint
1011  * @regs:	Reference to structure pch_udc_ep_regs
1012  */
1013 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1014 {
1015 	if (ep->in) {
1016 		/* flush the fifo */
1017 		pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1018 		/* set NAK */
1019 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1020 		pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1021 	} else {
1022 		/* set NAK */
1023 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1024 	}
1025 	/* reset desc pointer */
1026 	pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1027 }
1028 
1029 /**
1030  * pch_udc_wait_ep_stall() - Wait EP stall.
1031  * @dev:	Reference to pch_udc_dev structure
1032  */
1033 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1034 {
1035 	unsigned int count = 10000;
1036 
1037 	/* Wait till idle */
1038 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1039 		udelay(5);
1040 	if (!count)
1041 		dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1042 }
1043 
1044 /**
1045  * pch_udc_init() - This API initializes usb device controller
1046  * @dev:	Rreference to pch_udc_regs structure
1047  */
1048 static void pch_udc_init(struct pch_udc_dev *dev)
1049 {
1050 	if (NULL == dev) {
1051 		pr_err("%s: Invalid address\n", __func__);
1052 		return;
1053 	}
1054 	/* Soft Reset and Reset PHY */
1055 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1056 	pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1057 	mdelay(1);
1058 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1059 	pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1060 	mdelay(1);
1061 	/* mask and clear all device interrupts */
1062 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1063 	pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1064 
1065 	/* mask and clear all ep interrupts */
1066 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1067 	pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1068 
1069 	/* enable dynamic CSR programmingi, self powered and device speed */
1070 	if (speed_fs)
1071 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1072 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1073 	else /* defaul high speed */
1074 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1075 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1076 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1077 			(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1078 			(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1079 			UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1080 			UDC_DEVCTL_THE);
1081 }
1082 
1083 /**
1084  * pch_udc_exit() - This API exit usb device controller
1085  * @dev:	Reference to pch_udc_regs structure
1086  */
1087 static void pch_udc_exit(struct pch_udc_dev *dev)
1088 {
1089 	/* mask all device interrupts */
1090 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1091 	/* mask all ep interrupts */
1092 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1093 	/* put device in disconnected state */
1094 	pch_udc_set_disconnect(dev);
1095 }
1096 
1097 /**
1098  * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1099  * @gadget:	Reference to the gadget driver
1100  *
1101  * Return codes:
1102  *	0:		Success
1103  *	-EINVAL:	If the gadget passed is NULL
1104  */
1105 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1106 {
1107 	struct pch_udc_dev	*dev;
1108 
1109 	if (!gadget)
1110 		return -EINVAL;
1111 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1112 	return pch_udc_get_frame(dev);
1113 }
1114 
1115 /**
1116  * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1117  * @gadget:	Reference to the gadget driver
1118  *
1119  * Return codes:
1120  *	0:		Success
1121  *	-EINVAL:	If the gadget passed is NULL
1122  */
1123 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1124 {
1125 	struct pch_udc_dev	*dev;
1126 	unsigned long		flags;
1127 
1128 	if (!gadget)
1129 		return -EINVAL;
1130 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1131 	spin_lock_irqsave(&dev->lock, flags);
1132 	pch_udc_rmt_wakeup(dev);
1133 	spin_unlock_irqrestore(&dev->lock, flags);
1134 	return 0;
1135 }
1136 
1137 /**
1138  * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1139  *				is self powered or not
1140  * @gadget:	Reference to the gadget driver
1141  * @value:	Specifies self powered or not
1142  *
1143  * Return codes:
1144  *	0:		Success
1145  *	-EINVAL:	If the gadget passed is NULL
1146  */
1147 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1148 {
1149 	struct pch_udc_dev	*dev;
1150 
1151 	if (!gadget)
1152 		return -EINVAL;
1153 	gadget->is_selfpowered = (value != 0);
1154 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1155 	if (value)
1156 		pch_udc_set_selfpowered(dev);
1157 	else
1158 		pch_udc_clear_selfpowered(dev);
1159 	return 0;
1160 }
1161 
1162 /**
1163  * pch_udc_pcd_pullup() - This API is invoked to make the device
1164  *				visible/invisible to the host
1165  * @gadget:	Reference to the gadget driver
1166  * @is_on:	Specifies whether the pull up is made active or inactive
1167  *
1168  * Return codes:
1169  *	0:		Success
1170  *	-EINVAL:	If the gadget passed is NULL
1171  */
1172 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1173 {
1174 	struct pch_udc_dev	*dev;
1175 
1176 	if (!gadget)
1177 		return -EINVAL;
1178 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1179 	if (is_on) {
1180 		pch_udc_reconnect(dev);
1181 	} else {
1182 		if (dev->driver && dev->driver->disconnect) {
1183 			spin_lock(&dev->lock);
1184 			dev->driver->disconnect(&dev->gadget);
1185 			spin_unlock(&dev->lock);
1186 		}
1187 		pch_udc_set_disconnect(dev);
1188 	}
1189 
1190 	return 0;
1191 }
1192 
1193 /**
1194  * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1195  *				transceiver (or GPIO) that
1196  *				detects a VBUS power session starting/ending
1197  * @gadget:	Reference to the gadget driver
1198  * @is_active:	specifies whether the session is starting or ending
1199  *
1200  * Return codes:
1201  *	0:		Success
1202  *	-EINVAL:	If the gadget passed is NULL
1203  */
1204 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1205 {
1206 	struct pch_udc_dev	*dev;
1207 
1208 	if (!gadget)
1209 		return -EINVAL;
1210 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1211 	pch_udc_vbus_session(dev, is_active);
1212 	return 0;
1213 }
1214 
1215 /**
1216  * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1217  *				SET_CONFIGURATION calls to
1218  *				specify how much power the device can consume
1219  * @gadget:	Reference to the gadget driver
1220  * @mA:		specifies the current limit in 2mA unit
1221  *
1222  * Return codes:
1223  *	-EINVAL:	If the gadget passed is NULL
1224  *	-EOPNOTSUPP:
1225  */
1226 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1227 {
1228 	return -EOPNOTSUPP;
1229 }
1230 
1231 static int pch_udc_start(struct usb_gadget *g,
1232 		struct usb_gadget_driver *driver);
1233 static int pch_udc_stop(struct usb_gadget *g);
1234 
1235 static const struct usb_gadget_ops pch_udc_ops = {
1236 	.get_frame = pch_udc_pcd_get_frame,
1237 	.wakeup = pch_udc_pcd_wakeup,
1238 	.set_selfpowered = pch_udc_pcd_selfpowered,
1239 	.pullup = pch_udc_pcd_pullup,
1240 	.vbus_session = pch_udc_pcd_vbus_session,
1241 	.vbus_draw = pch_udc_pcd_vbus_draw,
1242 	.udc_start = pch_udc_start,
1243 	.udc_stop = pch_udc_stop,
1244 };
1245 
1246 /**
1247  * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1248  * @dev:	Reference to the driver structure
1249  *
1250  * Return value:
1251  *	1: VBUS is high
1252  *	0: VBUS is low
1253  *     -1: It is not enable to detect VBUS using GPIO
1254  */
1255 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1256 {
1257 	int vbus = 0;
1258 
1259 	if (dev->vbus_gpio.port)
1260 		vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1261 	else
1262 		vbus = -1;
1263 
1264 	return vbus;
1265 }
1266 
1267 /**
1268  * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1269  *                             If VBUS is Low, disconnect is processed
1270  * @irq_work:	Structure for WorkQueue
1271  *
1272  */
1273 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1274 {
1275 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1276 		struct pch_vbus_gpio_data, irq_work_fall);
1277 	struct pch_udc_dev *dev =
1278 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1279 	int vbus_saved = -1;
1280 	int vbus;
1281 	int count;
1282 
1283 	if (!dev->vbus_gpio.port)
1284 		return;
1285 
1286 	for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1287 		count++) {
1288 		vbus = pch_vbus_gpio_get_value(dev);
1289 
1290 		if ((vbus_saved == vbus) && (vbus == 0)) {
1291 			dev_dbg(&dev->pdev->dev, "VBUS fell");
1292 			if (dev->driver
1293 				&& dev->driver->disconnect) {
1294 				dev->driver->disconnect(
1295 					&dev->gadget);
1296 			}
1297 			if (dev->vbus_gpio.intr)
1298 				pch_udc_init(dev);
1299 			else
1300 				pch_udc_reconnect(dev);
1301 			return;
1302 		}
1303 		vbus_saved = vbus;
1304 		mdelay(PCH_VBUS_INTERVAL);
1305 	}
1306 }
1307 
1308 /**
1309  * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1310  *                             If VBUS is High, connect is processed
1311  * @irq_work:	Structure for WorkQueue
1312  *
1313  */
1314 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1315 {
1316 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1317 		struct pch_vbus_gpio_data, irq_work_rise);
1318 	struct pch_udc_dev *dev =
1319 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1320 	int vbus;
1321 
1322 	if (!dev->vbus_gpio.port)
1323 		return;
1324 
1325 	mdelay(PCH_VBUS_INTERVAL);
1326 	vbus = pch_vbus_gpio_get_value(dev);
1327 
1328 	if (vbus == 1) {
1329 		dev_dbg(&dev->pdev->dev, "VBUS rose");
1330 		pch_udc_reconnect(dev);
1331 		return;
1332 	}
1333 }
1334 
1335 /**
1336  * pch_vbus_gpio_irq() - IRQ handler for GPIO intrerrupt for changing VBUS
1337  * @irq:	Interrupt request number
1338  * @dev:	Reference to the device structure
1339  *
1340  * Return codes:
1341  *	0: Success
1342  *	-EINVAL: GPIO port is invalid or can't be initialized.
1343  */
1344 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1345 {
1346 	struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1347 
1348 	if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1349 		return IRQ_NONE;
1350 
1351 	if (pch_vbus_gpio_get_value(dev))
1352 		schedule_work(&dev->vbus_gpio.irq_work_rise);
1353 	else
1354 		schedule_work(&dev->vbus_gpio.irq_work_fall);
1355 
1356 	return IRQ_HANDLED;
1357 }
1358 
1359 /**
1360  * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1361  * @dev:	Reference to the driver structure
1362  * @vbus_gpio	Number of GPIO port to detect gpio
1363  *
1364  * Return codes:
1365  *	0: Success
1366  *	-EINVAL: GPIO port is invalid or can't be initialized.
1367  */
1368 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1369 {
1370 	int err;
1371 	int irq_num = 0;
1372 
1373 	dev->vbus_gpio.port = 0;
1374 	dev->vbus_gpio.intr = 0;
1375 
1376 	if (vbus_gpio_port <= -1)
1377 		return -EINVAL;
1378 
1379 	err = gpio_is_valid(vbus_gpio_port);
1380 	if (!err) {
1381 		pr_err("%s: gpio port %d is invalid\n",
1382 			__func__, vbus_gpio_port);
1383 		return -EINVAL;
1384 	}
1385 
1386 	err = gpio_request(vbus_gpio_port, "pch_vbus");
1387 	if (err) {
1388 		pr_err("%s: can't request gpio port %d, err: %d\n",
1389 			__func__, vbus_gpio_port, err);
1390 		return -EINVAL;
1391 	}
1392 
1393 	dev->vbus_gpio.port = vbus_gpio_port;
1394 	gpio_direction_input(vbus_gpio_port);
1395 	INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1396 
1397 	irq_num = gpio_to_irq(vbus_gpio_port);
1398 	if (irq_num > 0) {
1399 		irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1400 		err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1401 			"vbus_detect", dev);
1402 		if (!err) {
1403 			dev->vbus_gpio.intr = irq_num;
1404 			INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1405 				pch_vbus_gpio_work_rise);
1406 		} else {
1407 			pr_err("%s: can't request irq %d, err: %d\n",
1408 				__func__, irq_num, err);
1409 		}
1410 	}
1411 
1412 	return 0;
1413 }
1414 
1415 /**
1416  * pch_vbus_gpio_free() - This API frees resources of GPIO port
1417  * @dev:	Reference to the driver structure
1418  */
1419 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1420 {
1421 	if (dev->vbus_gpio.intr)
1422 		free_irq(dev->vbus_gpio.intr, dev);
1423 
1424 	if (dev->vbus_gpio.port)
1425 		gpio_free(dev->vbus_gpio.port);
1426 }
1427 
1428 /**
1429  * complete_req() - This API is invoked from the driver when processing
1430  *			of a request is complete
1431  * @ep:		Reference to the endpoint structure
1432  * @req:	Reference to the request structure
1433  * @status:	Indicates the success/failure of completion
1434  */
1435 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1436 								 int status)
1437 	__releases(&dev->lock)
1438 	__acquires(&dev->lock)
1439 {
1440 	struct pch_udc_dev	*dev;
1441 	unsigned halted = ep->halted;
1442 
1443 	list_del_init(&req->queue);
1444 
1445 	/* set new status if pending */
1446 	if (req->req.status == -EINPROGRESS)
1447 		req->req.status = status;
1448 	else
1449 		status = req->req.status;
1450 
1451 	dev = ep->dev;
1452 	if (req->dma_mapped) {
1453 		if (req->dma == DMA_ADDR_INVALID) {
1454 			if (ep->in)
1455 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1456 						 req->req.length,
1457 						 DMA_TO_DEVICE);
1458 			else
1459 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1460 						 req->req.length,
1461 						 DMA_FROM_DEVICE);
1462 			req->req.dma = DMA_ADDR_INVALID;
1463 		} else {
1464 			if (ep->in)
1465 				dma_unmap_single(&dev->pdev->dev, req->dma,
1466 						 req->req.length,
1467 						 DMA_TO_DEVICE);
1468 			else {
1469 				dma_unmap_single(&dev->pdev->dev, req->dma,
1470 						 req->req.length,
1471 						 DMA_FROM_DEVICE);
1472 				memcpy(req->req.buf, req->buf, req->req.length);
1473 			}
1474 			kfree(req->buf);
1475 			req->dma = DMA_ADDR_INVALID;
1476 		}
1477 		req->dma_mapped = 0;
1478 	}
1479 	ep->halted = 1;
1480 	spin_unlock(&dev->lock);
1481 	if (!ep->in)
1482 		pch_udc_ep_clear_rrdy(ep);
1483 	usb_gadget_giveback_request(&ep->ep, &req->req);
1484 	spin_lock(&dev->lock);
1485 	ep->halted = halted;
1486 }
1487 
1488 /**
1489  * empty_req_queue() - This API empties the request queue of an endpoint
1490  * @ep:		Reference to the endpoint structure
1491  */
1492 static void empty_req_queue(struct pch_udc_ep *ep)
1493 {
1494 	struct pch_udc_request	*req;
1495 
1496 	ep->halted = 1;
1497 	while (!list_empty(&ep->queue)) {
1498 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1499 		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
1500 	}
1501 }
1502 
1503 /**
1504  * pch_udc_free_dma_chain() - This function frees the DMA chain created
1505  *				for the request
1506  * @dev		Reference to the driver structure
1507  * @req		Reference to the request to be freed
1508  *
1509  * Return codes:
1510  *	0: Success
1511  */
1512 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1513 				   struct pch_udc_request *req)
1514 {
1515 	struct pch_udc_data_dma_desc *td = req->td_data;
1516 	unsigned i = req->chain_len;
1517 
1518 	dma_addr_t addr2;
1519 	dma_addr_t addr = (dma_addr_t)td->next;
1520 	td->next = 0x00;
1521 	for (; i > 1; --i) {
1522 		/* do not free first desc., will be done by free for request */
1523 		td = phys_to_virt(addr);
1524 		addr2 = (dma_addr_t)td->next;
1525 		pci_pool_free(dev->data_requests, td, addr);
1526 		td->next = 0x00;
1527 		addr = addr2;
1528 	}
1529 	req->chain_len = 1;
1530 }
1531 
1532 /**
1533  * pch_udc_create_dma_chain() - This function creates or reinitializes
1534  *				a DMA chain
1535  * @ep:		Reference to the endpoint structure
1536  * @req:	Reference to the request
1537  * @buf_len:	The buffer length
1538  * @gfp_flags:	Flags to be used while mapping the data buffer
1539  *
1540  * Return codes:
1541  *	0:		success,
1542  *	-ENOMEM:	pci_pool_alloc invocation fails
1543  */
1544 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1545 				    struct pch_udc_request *req,
1546 				    unsigned long buf_len,
1547 				    gfp_t gfp_flags)
1548 {
1549 	struct pch_udc_data_dma_desc *td = req->td_data, *last;
1550 	unsigned long bytes = req->req.length, i = 0;
1551 	dma_addr_t dma_addr;
1552 	unsigned len = 1;
1553 
1554 	if (req->chain_len > 1)
1555 		pch_udc_free_dma_chain(ep->dev, req);
1556 
1557 	if (req->dma == DMA_ADDR_INVALID)
1558 		td->dataptr = req->req.dma;
1559 	else
1560 		td->dataptr = req->dma;
1561 
1562 	td->status = PCH_UDC_BS_HST_BSY;
1563 	for (; ; bytes -= buf_len, ++len) {
1564 		td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1565 		if (bytes <= buf_len)
1566 			break;
1567 		last = td;
1568 		td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1569 				    &dma_addr);
1570 		if (!td)
1571 			goto nomem;
1572 		i += buf_len;
1573 		td->dataptr = req->td_data->dataptr + i;
1574 		last->next = dma_addr;
1575 	}
1576 
1577 	req->td_data_last = td;
1578 	td->status |= PCH_UDC_DMA_LAST;
1579 	td->next = req->td_data_phys;
1580 	req->chain_len = len;
1581 	return 0;
1582 
1583 nomem:
1584 	if (len > 1) {
1585 		req->chain_len = len;
1586 		pch_udc_free_dma_chain(ep->dev, req);
1587 	}
1588 	req->chain_len = 1;
1589 	return -ENOMEM;
1590 }
1591 
1592 /**
1593  * prepare_dma() - This function creates and initializes the DMA chain
1594  *			for the request
1595  * @ep:		Reference to the endpoint structure
1596  * @req:	Reference to the request
1597  * @gfp:	Flag to be used while mapping the data buffer
1598  *
1599  * Return codes:
1600  *	0:		Success
1601  *	Other 0:	linux error number on failure
1602  */
1603 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1604 			  gfp_t gfp)
1605 {
1606 	int	retval;
1607 
1608 	/* Allocate and create a DMA chain */
1609 	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1610 	if (retval) {
1611 		pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1612 		return retval;
1613 	}
1614 	if (ep->in)
1615 		req->td_data->status = (req->td_data->status &
1616 				~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1617 	return 0;
1618 }
1619 
1620 /**
1621  * process_zlp() - This function process zero length packets
1622  *			from the gadget driver
1623  * @ep:		Reference to the endpoint structure
1624  * @req:	Reference to the request
1625  */
1626 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1627 {
1628 	struct pch_udc_dev	*dev = ep->dev;
1629 
1630 	/* IN zlp's are handled by hardware */
1631 	complete_req(ep, req, 0);
1632 
1633 	/* if set_config or set_intf is waiting for ack by zlp
1634 	 * then set CSR_DONE
1635 	 */
1636 	if (dev->set_cfg_not_acked) {
1637 		pch_udc_set_csr_done(dev);
1638 		dev->set_cfg_not_acked = 0;
1639 	}
1640 	/* setup command is ACK'ed now by zlp */
1641 	if (!dev->stall && dev->waiting_zlp_ack) {
1642 		pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1643 		dev->waiting_zlp_ack = 0;
1644 	}
1645 }
1646 
1647 /**
1648  * pch_udc_start_rxrequest() - This function starts the receive requirement.
1649  * @ep:		Reference to the endpoint structure
1650  * @req:	Reference to the request structure
1651  */
1652 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1653 					 struct pch_udc_request *req)
1654 {
1655 	struct pch_udc_data_dma_desc *td_data;
1656 
1657 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1658 	td_data = req->td_data;
1659 	/* Set the status bits for all descriptors */
1660 	while (1) {
1661 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1662 				    PCH_UDC_BS_HST_RDY;
1663 		if ((td_data->status & PCH_UDC_DMA_LAST) ==  PCH_UDC_DMA_LAST)
1664 			break;
1665 		td_data = phys_to_virt(td_data->next);
1666 	}
1667 	/* Write the descriptor pointer */
1668 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1669 	req->dma_going = 1;
1670 	pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1671 	pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1672 	pch_udc_ep_clear_nak(ep);
1673 	pch_udc_ep_set_rrdy(ep);
1674 }
1675 
1676 /**
1677  * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1678  *				from gadget driver
1679  * @usbep:	Reference to the USB endpoint structure
1680  * @desc:	Reference to the USB endpoint descriptor structure
1681  *
1682  * Return codes:
1683  *	0:		Success
1684  *	-EINVAL:
1685  *	-ESHUTDOWN:
1686  */
1687 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1688 				    const struct usb_endpoint_descriptor *desc)
1689 {
1690 	struct pch_udc_ep	*ep;
1691 	struct pch_udc_dev	*dev;
1692 	unsigned long		iflags;
1693 
1694 	if (!usbep || (usbep->name == ep0_string) || !desc ||
1695 	    (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1696 		return -EINVAL;
1697 
1698 	ep = container_of(usbep, struct pch_udc_ep, ep);
1699 	dev = ep->dev;
1700 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1701 		return -ESHUTDOWN;
1702 	spin_lock_irqsave(&dev->lock, iflags);
1703 	ep->ep.desc = desc;
1704 	ep->halted = 0;
1705 	pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1706 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1707 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1708 	spin_unlock_irqrestore(&dev->lock, iflags);
1709 	return 0;
1710 }
1711 
1712 /**
1713  * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1714  *				from gadget driver
1715  * @usbep	Reference to the USB endpoint structure
1716  *
1717  * Return codes:
1718  *	0:		Success
1719  *	-EINVAL:
1720  */
1721 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1722 {
1723 	struct pch_udc_ep	*ep;
1724 	unsigned long	iflags;
1725 
1726 	if (!usbep)
1727 		return -EINVAL;
1728 
1729 	ep = container_of(usbep, struct pch_udc_ep, ep);
1730 	if ((usbep->name == ep0_string) || !ep->ep.desc)
1731 		return -EINVAL;
1732 
1733 	spin_lock_irqsave(&ep->dev->lock, iflags);
1734 	empty_req_queue(ep);
1735 	ep->halted = 1;
1736 	pch_udc_ep_disable(ep);
1737 	pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1738 	ep->ep.desc = NULL;
1739 	INIT_LIST_HEAD(&ep->queue);
1740 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1741 	return 0;
1742 }
1743 
1744 /**
1745  * pch_udc_alloc_request() - This function allocates request structure.
1746  *				It is called by gadget driver
1747  * @usbep:	Reference to the USB endpoint structure
1748  * @gfp:	Flag to be used while allocating memory
1749  *
1750  * Return codes:
1751  *	NULL:			Failure
1752  *	Allocated address:	Success
1753  */
1754 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1755 						  gfp_t gfp)
1756 {
1757 	struct pch_udc_request		*req;
1758 	struct pch_udc_ep		*ep;
1759 	struct pch_udc_data_dma_desc	*dma_desc;
1760 
1761 	if (!usbep)
1762 		return NULL;
1763 	ep = container_of(usbep, struct pch_udc_ep, ep);
1764 	req = kzalloc(sizeof *req, gfp);
1765 	if (!req)
1766 		return NULL;
1767 	req->req.dma = DMA_ADDR_INVALID;
1768 	req->dma = DMA_ADDR_INVALID;
1769 	INIT_LIST_HEAD(&req->queue);
1770 	if (!ep->dev->dma_addr)
1771 		return &req->req;
1772 	/* ep0 in requests are allocated from data pool here */
1773 	dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1774 				  &req->td_data_phys);
1775 	if (NULL == dma_desc) {
1776 		kfree(req);
1777 		return NULL;
1778 	}
1779 	/* prevent from using desc. - set HOST BUSY */
1780 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
1781 	dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1782 	req->td_data = dma_desc;
1783 	req->td_data_last = dma_desc;
1784 	req->chain_len = 1;
1785 	return &req->req;
1786 }
1787 
1788 /**
1789  * pch_udc_free_request() - This function frees request structure.
1790  *				It is called by gadget driver
1791  * @usbep:	Reference to the USB endpoint structure
1792  * @usbreq:	Reference to the USB request
1793  */
1794 static void pch_udc_free_request(struct usb_ep *usbep,
1795 				  struct usb_request *usbreq)
1796 {
1797 	struct pch_udc_ep	*ep;
1798 	struct pch_udc_request	*req;
1799 	struct pch_udc_dev	*dev;
1800 
1801 	if (!usbep || !usbreq)
1802 		return;
1803 	ep = container_of(usbep, struct pch_udc_ep, ep);
1804 	req = container_of(usbreq, struct pch_udc_request, req);
1805 	dev = ep->dev;
1806 	if (!list_empty(&req->queue))
1807 		dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1808 			__func__, usbep->name, req);
1809 	if (req->td_data != NULL) {
1810 		if (req->chain_len > 1)
1811 			pch_udc_free_dma_chain(ep->dev, req);
1812 		pci_pool_free(ep->dev->data_requests, req->td_data,
1813 			      req->td_data_phys);
1814 	}
1815 	kfree(req);
1816 }
1817 
1818 /**
1819  * pch_udc_pcd_queue() - This function queues a request packet. It is called
1820  *			by gadget driver
1821  * @usbep:	Reference to the USB endpoint structure
1822  * @usbreq:	Reference to the USB request
1823  * @gfp:	Flag to be used while mapping the data buffer
1824  *
1825  * Return codes:
1826  *	0:			Success
1827  *	linux error number:	Failure
1828  */
1829 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1830 								 gfp_t gfp)
1831 {
1832 	int retval = 0;
1833 	struct pch_udc_ep	*ep;
1834 	struct pch_udc_dev	*dev;
1835 	struct pch_udc_request	*req;
1836 	unsigned long	iflags;
1837 
1838 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1839 		return -EINVAL;
1840 	ep = container_of(usbep, struct pch_udc_ep, ep);
1841 	dev = ep->dev;
1842 	if (!ep->ep.desc && ep->num)
1843 		return -EINVAL;
1844 	req = container_of(usbreq, struct pch_udc_request, req);
1845 	if (!list_empty(&req->queue))
1846 		return -EINVAL;
1847 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1848 		return -ESHUTDOWN;
1849 	spin_lock_irqsave(&dev->lock, iflags);
1850 	/* map the buffer for dma */
1851 	if (usbreq->length &&
1852 	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1853 		if (!((unsigned long)(usbreq->buf) & 0x03)) {
1854 			if (ep->in)
1855 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1856 							     usbreq->buf,
1857 							     usbreq->length,
1858 							     DMA_TO_DEVICE);
1859 			else
1860 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1861 							     usbreq->buf,
1862 							     usbreq->length,
1863 							     DMA_FROM_DEVICE);
1864 		} else {
1865 			req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1866 			if (!req->buf) {
1867 				retval = -ENOMEM;
1868 				goto probe_end;
1869 			}
1870 			if (ep->in) {
1871 				memcpy(req->buf, usbreq->buf, usbreq->length);
1872 				req->dma = dma_map_single(&dev->pdev->dev,
1873 							  req->buf,
1874 							  usbreq->length,
1875 							  DMA_TO_DEVICE);
1876 			} else
1877 				req->dma = dma_map_single(&dev->pdev->dev,
1878 							  req->buf,
1879 							  usbreq->length,
1880 							  DMA_FROM_DEVICE);
1881 		}
1882 		req->dma_mapped = 1;
1883 	}
1884 	if (usbreq->length > 0) {
1885 		retval = prepare_dma(ep, req, GFP_ATOMIC);
1886 		if (retval)
1887 			goto probe_end;
1888 	}
1889 	usbreq->actual = 0;
1890 	usbreq->status = -EINPROGRESS;
1891 	req->dma_done = 0;
1892 	if (list_empty(&ep->queue) && !ep->halted) {
1893 		/* no pending transfer, so start this req */
1894 		if (!usbreq->length) {
1895 			process_zlp(ep, req);
1896 			retval = 0;
1897 			goto probe_end;
1898 		}
1899 		if (!ep->in) {
1900 			pch_udc_start_rxrequest(ep, req);
1901 		} else {
1902 			/*
1903 			* For IN trfr the descriptors will be programmed and
1904 			* P bit will be set when
1905 			* we get an IN token
1906 			*/
1907 			pch_udc_wait_ep_stall(ep);
1908 			pch_udc_ep_clear_nak(ep);
1909 			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1910 		}
1911 	}
1912 	/* Now add this request to the ep's pending requests */
1913 	if (req != NULL)
1914 		list_add_tail(&req->queue, &ep->queue);
1915 
1916 probe_end:
1917 	spin_unlock_irqrestore(&dev->lock, iflags);
1918 	return retval;
1919 }
1920 
1921 /**
1922  * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1923  *				It is called by gadget driver
1924  * @usbep:	Reference to the USB endpoint structure
1925  * @usbreq:	Reference to the USB request
1926  *
1927  * Return codes:
1928  *	0:			Success
1929  *	linux error number:	Failure
1930  */
1931 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1932 				struct usb_request *usbreq)
1933 {
1934 	struct pch_udc_ep	*ep;
1935 	struct pch_udc_request	*req;
1936 	unsigned long		flags;
1937 	int ret = -EINVAL;
1938 
1939 	ep = container_of(usbep, struct pch_udc_ep, ep);
1940 	if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1941 		return ret;
1942 	req = container_of(usbreq, struct pch_udc_request, req);
1943 	spin_lock_irqsave(&ep->dev->lock, flags);
1944 	/* make sure it's still queued on this endpoint */
1945 	list_for_each_entry(req, &ep->queue, queue) {
1946 		if (&req->req == usbreq) {
1947 			pch_udc_ep_set_nak(ep);
1948 			if (!list_empty(&req->queue))
1949 				complete_req(ep, req, -ECONNRESET);
1950 			ret = 0;
1951 			break;
1952 		}
1953 	}
1954 	spin_unlock_irqrestore(&ep->dev->lock, flags);
1955 	return ret;
1956 }
1957 
1958 /**
1959  * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1960  *			    feature
1961  * @usbep:	Reference to the USB endpoint structure
1962  * @halt:	Specifies whether to set or clear the feature
1963  *
1964  * Return codes:
1965  *	0:			Success
1966  *	linux error number:	Failure
1967  */
1968 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1969 {
1970 	struct pch_udc_ep	*ep;
1971 	unsigned long iflags;
1972 	int ret;
1973 
1974 	if (!usbep)
1975 		return -EINVAL;
1976 	ep = container_of(usbep, struct pch_udc_ep, ep);
1977 	if (!ep->ep.desc && !ep->num)
1978 		return -EINVAL;
1979 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1980 		return -ESHUTDOWN;
1981 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1982 	if (list_empty(&ep->queue)) {
1983 		if (halt) {
1984 			if (ep->num == PCH_UDC_EP0)
1985 				ep->dev->stall = 1;
1986 			pch_udc_ep_set_stall(ep);
1987 			pch_udc_enable_ep_interrupts(
1988 				ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1989 		} else {
1990 			pch_udc_ep_clear_stall(ep);
1991 		}
1992 		ret = 0;
1993 	} else {
1994 		ret = -EAGAIN;
1995 	}
1996 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1997 	return ret;
1998 }
1999 
2000 /**
2001  * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
2002  *				halt feature
2003  * @usbep:	Reference to the USB endpoint structure
2004  * @halt:	Specifies whether to set or clear the feature
2005  *
2006  * Return codes:
2007  *	0:			Success
2008  *	linux error number:	Failure
2009  */
2010 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2011 {
2012 	struct pch_udc_ep	*ep;
2013 	unsigned long iflags;
2014 	int ret;
2015 
2016 	if (!usbep)
2017 		return -EINVAL;
2018 	ep = container_of(usbep, struct pch_udc_ep, ep);
2019 	if (!ep->ep.desc && !ep->num)
2020 		return -EINVAL;
2021 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2022 		return -ESHUTDOWN;
2023 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
2024 	if (!list_empty(&ep->queue)) {
2025 		ret = -EAGAIN;
2026 	} else {
2027 		if (ep->num == PCH_UDC_EP0)
2028 			ep->dev->stall = 1;
2029 		pch_udc_ep_set_stall(ep);
2030 		pch_udc_enable_ep_interrupts(ep->dev,
2031 					     PCH_UDC_EPINT(ep->in, ep->num));
2032 		ep->dev->prot_stall = 1;
2033 		ret = 0;
2034 	}
2035 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2036 	return ret;
2037 }
2038 
2039 /**
2040  * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2041  * @usbep:	Reference to the USB endpoint structure
2042  */
2043 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2044 {
2045 	struct pch_udc_ep  *ep;
2046 
2047 	if (!usbep)
2048 		return;
2049 
2050 	ep = container_of(usbep, struct pch_udc_ep, ep);
2051 	if (ep->ep.desc || !ep->num)
2052 		pch_udc_ep_fifo_flush(ep, ep->in);
2053 }
2054 
2055 static const struct usb_ep_ops pch_udc_ep_ops = {
2056 	.enable		= pch_udc_pcd_ep_enable,
2057 	.disable	= pch_udc_pcd_ep_disable,
2058 	.alloc_request	= pch_udc_alloc_request,
2059 	.free_request	= pch_udc_free_request,
2060 	.queue		= pch_udc_pcd_queue,
2061 	.dequeue	= pch_udc_pcd_dequeue,
2062 	.set_halt	= pch_udc_pcd_set_halt,
2063 	.set_wedge	= pch_udc_pcd_set_wedge,
2064 	.fifo_status	= NULL,
2065 	.fifo_flush	= pch_udc_pcd_fifo_flush,
2066 };
2067 
2068 /**
2069  * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2070  * @td_stp:	Reference to the SETP buffer structure
2071  */
2072 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2073 {
2074 	static u32	pky_marker;
2075 
2076 	if (!td_stp)
2077 		return;
2078 	td_stp->reserved = ++pky_marker;
2079 	memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2080 	td_stp->status = PCH_UDC_BS_HST_RDY;
2081 }
2082 
2083 /**
2084  * pch_udc_start_next_txrequest() - This function starts
2085  *					the next transmission requirement
2086  * @ep:	Reference to the endpoint structure
2087  */
2088 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2089 {
2090 	struct pch_udc_request *req;
2091 	struct pch_udc_data_dma_desc *td_data;
2092 
2093 	if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2094 		return;
2095 
2096 	if (list_empty(&ep->queue))
2097 		return;
2098 
2099 	/* next request */
2100 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2101 	if (req->dma_going)
2102 		return;
2103 	if (!req->td_data)
2104 		return;
2105 	pch_udc_wait_ep_stall(ep);
2106 	req->dma_going = 1;
2107 	pch_udc_ep_set_ddptr(ep, 0);
2108 	td_data = req->td_data;
2109 	while (1) {
2110 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2111 				   PCH_UDC_BS_HST_RDY;
2112 		if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2113 			break;
2114 		td_data = phys_to_virt(td_data->next);
2115 	}
2116 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2117 	pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2118 	pch_udc_ep_set_pd(ep);
2119 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2120 	pch_udc_ep_clear_nak(ep);
2121 }
2122 
2123 /**
2124  * pch_udc_complete_transfer() - This function completes a transfer
2125  * @ep:		Reference to the endpoint structure
2126  */
2127 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2128 {
2129 	struct pch_udc_request *req;
2130 	struct pch_udc_dev *dev = ep->dev;
2131 
2132 	if (list_empty(&ep->queue))
2133 		return;
2134 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2135 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2136 	    PCH_UDC_BS_DMA_DONE)
2137 		return;
2138 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2139 	     PCH_UDC_RTS_SUCC) {
2140 		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2141 			"epstatus=0x%08x\n",
2142 		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
2143 		       (int)(ep->epsts));
2144 		return;
2145 	}
2146 
2147 	req->req.actual = req->req.length;
2148 	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2149 	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2150 	complete_req(ep, req, 0);
2151 	req->dma_going = 0;
2152 	if (!list_empty(&ep->queue)) {
2153 		pch_udc_wait_ep_stall(ep);
2154 		pch_udc_ep_clear_nak(ep);
2155 		pch_udc_enable_ep_interrupts(ep->dev,
2156 					     PCH_UDC_EPINT(ep->in, ep->num));
2157 	} else {
2158 		pch_udc_disable_ep_interrupts(ep->dev,
2159 					      PCH_UDC_EPINT(ep->in, ep->num));
2160 	}
2161 }
2162 
2163 /**
2164  * pch_udc_complete_receiver() - This function completes a receiver
2165  * @ep:		Reference to the endpoint structure
2166  */
2167 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2168 {
2169 	struct pch_udc_request *req;
2170 	struct pch_udc_dev *dev = ep->dev;
2171 	unsigned int count;
2172 	struct pch_udc_data_dma_desc *td;
2173 	dma_addr_t addr;
2174 
2175 	if (list_empty(&ep->queue))
2176 		return;
2177 	/* next request */
2178 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2179 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2180 	pch_udc_ep_set_ddptr(ep, 0);
2181 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2182 	    PCH_UDC_BS_DMA_DONE)
2183 		td = req->td_data_last;
2184 	else
2185 		td = req->td_data;
2186 
2187 	while (1) {
2188 		if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2189 			dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2190 				"epstatus=0x%08x\n",
2191 				(req->td_data->status & PCH_UDC_RXTX_STS),
2192 				(int)(ep->epsts));
2193 			return;
2194 		}
2195 		if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2196 			if (td->status & PCH_UDC_DMA_LAST) {
2197 				count = td->status & PCH_UDC_RXTX_BYTES;
2198 				break;
2199 			}
2200 		if (td == req->td_data_last) {
2201 			dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2202 			return;
2203 		}
2204 		addr = (dma_addr_t)td->next;
2205 		td = phys_to_virt(addr);
2206 	}
2207 	/* on 64k packets the RXBYTES field is zero */
2208 	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2209 		count = UDC_DMA_MAXPACKET;
2210 	req->td_data->status |= PCH_UDC_DMA_LAST;
2211 	td->status |= PCH_UDC_BS_HST_BSY;
2212 
2213 	req->dma_going = 0;
2214 	req->req.actual = count;
2215 	complete_req(ep, req, 0);
2216 	/* If there is a new/failed requests try that now */
2217 	if (!list_empty(&ep->queue)) {
2218 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2219 		pch_udc_start_rxrequest(ep, req);
2220 	}
2221 }
2222 
2223 /**
2224  * pch_udc_svc_data_in() - This function process endpoint interrupts
2225  *				for IN endpoints
2226  * @dev:	Reference to the device structure
2227  * @ep_num:	Endpoint that generated the interrupt
2228  */
2229 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2230 {
2231 	u32	epsts;
2232 	struct pch_udc_ep	*ep;
2233 
2234 	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2235 	epsts = ep->epsts;
2236 	ep->epsts = 0;
2237 
2238 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA  | UDC_EPSTS_HE |
2239 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2240 		       UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2241 		return;
2242 	if ((epsts & UDC_EPSTS_BNA))
2243 		return;
2244 	if (epsts & UDC_EPSTS_HE)
2245 		return;
2246 	if (epsts & UDC_EPSTS_RSS) {
2247 		pch_udc_ep_set_stall(ep);
2248 		pch_udc_enable_ep_interrupts(ep->dev,
2249 					     PCH_UDC_EPINT(ep->in, ep->num));
2250 	}
2251 	if (epsts & UDC_EPSTS_RCS) {
2252 		if (!dev->prot_stall) {
2253 			pch_udc_ep_clear_stall(ep);
2254 		} else {
2255 			pch_udc_ep_set_stall(ep);
2256 			pch_udc_enable_ep_interrupts(ep->dev,
2257 						PCH_UDC_EPINT(ep->in, ep->num));
2258 		}
2259 	}
2260 	if (epsts & UDC_EPSTS_TDC)
2261 		pch_udc_complete_transfer(ep);
2262 	/* On IN interrupt, provide data if we have any */
2263 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2264 	    !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2265 		pch_udc_start_next_txrequest(ep);
2266 }
2267 
2268 /**
2269  * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2270  * @dev:	Reference to the device structure
2271  * @ep_num:	Endpoint that generated the interrupt
2272  */
2273 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2274 {
2275 	u32			epsts;
2276 	struct pch_udc_ep		*ep;
2277 	struct pch_udc_request		*req = NULL;
2278 
2279 	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2280 	epsts = ep->epsts;
2281 	ep->epsts = 0;
2282 
2283 	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2284 		/* next request */
2285 		req = list_entry(ep->queue.next, struct pch_udc_request,
2286 				 queue);
2287 		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2288 		     PCH_UDC_BS_DMA_DONE) {
2289 			if (!req->dma_going)
2290 				pch_udc_start_rxrequest(ep, req);
2291 			return;
2292 		}
2293 	}
2294 	if (epsts & UDC_EPSTS_HE)
2295 		return;
2296 	if (epsts & UDC_EPSTS_RSS) {
2297 		pch_udc_ep_set_stall(ep);
2298 		pch_udc_enable_ep_interrupts(ep->dev,
2299 					     PCH_UDC_EPINT(ep->in, ep->num));
2300 	}
2301 	if (epsts & UDC_EPSTS_RCS) {
2302 		if (!dev->prot_stall) {
2303 			pch_udc_ep_clear_stall(ep);
2304 		} else {
2305 			pch_udc_ep_set_stall(ep);
2306 			pch_udc_enable_ep_interrupts(ep->dev,
2307 						PCH_UDC_EPINT(ep->in, ep->num));
2308 		}
2309 	}
2310 	if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2311 	    UDC_EPSTS_OUT_DATA) {
2312 		if (ep->dev->prot_stall == 1) {
2313 			pch_udc_ep_set_stall(ep);
2314 			pch_udc_enable_ep_interrupts(ep->dev,
2315 						PCH_UDC_EPINT(ep->in, ep->num));
2316 		} else {
2317 			pch_udc_complete_receiver(ep);
2318 		}
2319 	}
2320 	if (list_empty(&ep->queue))
2321 		pch_udc_set_dma(dev, DMA_DIR_RX);
2322 }
2323 
2324 /**
2325  * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2326  * @dev:	Reference to the device structure
2327  */
2328 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2329 {
2330 	u32	epsts;
2331 	struct pch_udc_ep	*ep;
2332 	struct pch_udc_ep	*ep_out;
2333 
2334 	ep = &dev->ep[UDC_EP0IN_IDX];
2335 	ep_out = &dev->ep[UDC_EP0OUT_IDX];
2336 	epsts = ep->epsts;
2337 	ep->epsts = 0;
2338 
2339 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2340 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2341 		       UDC_EPSTS_XFERDONE)))
2342 		return;
2343 	if ((epsts & UDC_EPSTS_BNA))
2344 		return;
2345 	if (epsts & UDC_EPSTS_HE)
2346 		return;
2347 	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2348 		pch_udc_complete_transfer(ep);
2349 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2350 		ep_out->td_data->status = (ep_out->td_data->status &
2351 					~PCH_UDC_BUFF_STS) |
2352 					PCH_UDC_BS_HST_RDY;
2353 		pch_udc_ep_clear_nak(ep_out);
2354 		pch_udc_set_dma(dev, DMA_DIR_RX);
2355 		pch_udc_ep_set_rrdy(ep_out);
2356 	}
2357 	/* On IN interrupt, provide data if we have any */
2358 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2359 	     !(epsts & UDC_EPSTS_TXEMPTY))
2360 		pch_udc_start_next_txrequest(ep);
2361 }
2362 
2363 /**
2364  * pch_udc_svc_control_out() - Routine that handle Control
2365  *					OUT endpoint interrupts
2366  * @dev:	Reference to the device structure
2367  */
2368 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2369 	__releases(&dev->lock)
2370 	__acquires(&dev->lock)
2371 {
2372 	u32	stat;
2373 	int setup_supported;
2374 	struct pch_udc_ep	*ep;
2375 
2376 	ep = &dev->ep[UDC_EP0OUT_IDX];
2377 	stat = ep->epsts;
2378 	ep->epsts = 0;
2379 
2380 	/* If setup data */
2381 	if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2382 	    UDC_EPSTS_OUT_SETUP) {
2383 		dev->stall = 0;
2384 		dev->ep[UDC_EP0IN_IDX].halted = 0;
2385 		dev->ep[UDC_EP0OUT_IDX].halted = 0;
2386 		dev->setup_data = ep->td_stp->request;
2387 		pch_udc_init_setup_buff(ep->td_stp);
2388 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2389 		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2390 				      dev->ep[UDC_EP0IN_IDX].in);
2391 		if ((dev->setup_data.bRequestType & USB_DIR_IN))
2392 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2393 		else /* OUT */
2394 			dev->gadget.ep0 = &ep->ep;
2395 		spin_lock(&dev->lock);
2396 		/* If Mass storage Reset */
2397 		if ((dev->setup_data.bRequestType == 0x21) &&
2398 		    (dev->setup_data.bRequest == 0xFF))
2399 			dev->prot_stall = 0;
2400 		/* call gadget with setup data received */
2401 		setup_supported = dev->driver->setup(&dev->gadget,
2402 						     &dev->setup_data);
2403 		spin_unlock(&dev->lock);
2404 
2405 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
2406 			ep->td_data->status = (ep->td_data->status &
2407 						~PCH_UDC_BUFF_STS) |
2408 						PCH_UDC_BS_HST_RDY;
2409 			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2410 		}
2411 		/* ep0 in returns data on IN phase */
2412 		if (setup_supported >= 0 && setup_supported <
2413 					    UDC_EP0IN_MAX_PKT_SIZE) {
2414 			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2415 			/* Gadget would have queued a request when
2416 			 * we called the setup */
2417 			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2418 				pch_udc_set_dma(dev, DMA_DIR_RX);
2419 				pch_udc_ep_clear_nak(ep);
2420 			}
2421 		} else if (setup_supported < 0) {
2422 			/* if unsupported request, then stall */
2423 			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2424 			pch_udc_enable_ep_interrupts(ep->dev,
2425 						PCH_UDC_EPINT(ep->in, ep->num));
2426 			dev->stall = 0;
2427 			pch_udc_set_dma(dev, DMA_DIR_RX);
2428 		} else {
2429 			dev->waiting_zlp_ack = 1;
2430 		}
2431 	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2432 		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
2433 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2434 		pch_udc_ep_set_ddptr(ep, 0);
2435 		if (!list_empty(&ep->queue)) {
2436 			ep->epsts = stat;
2437 			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2438 		}
2439 		pch_udc_set_dma(dev, DMA_DIR_RX);
2440 	}
2441 	pch_udc_ep_set_rrdy(ep);
2442 }
2443 
2444 
2445 /**
2446  * pch_udc_postsvc_epinters() - This function enables end point interrupts
2447  *				and clears NAK status
2448  * @dev:	Reference to the device structure
2449  * @ep_num:	End point number
2450  */
2451 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2452 {
2453 	struct pch_udc_ep	*ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2454 	if (list_empty(&ep->queue))
2455 		return;
2456 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2457 	pch_udc_ep_clear_nak(ep);
2458 }
2459 
2460 /**
2461  * pch_udc_read_all_epstatus() - This function read all endpoint status
2462  * @dev:	Reference to the device structure
2463  * @ep_intr:	Status of endpoint interrupt
2464  */
2465 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2466 {
2467 	int i;
2468 	struct pch_udc_ep	*ep;
2469 
2470 	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2471 		/* IN */
2472 		if (ep_intr & (0x1 << i)) {
2473 			ep = &dev->ep[UDC_EPIN_IDX(i)];
2474 			ep->epsts = pch_udc_read_ep_status(ep);
2475 			pch_udc_clear_ep_status(ep, ep->epsts);
2476 		}
2477 		/* OUT */
2478 		if (ep_intr & (0x10000 << i)) {
2479 			ep = &dev->ep[UDC_EPOUT_IDX(i)];
2480 			ep->epsts = pch_udc_read_ep_status(ep);
2481 			pch_udc_clear_ep_status(ep, ep->epsts);
2482 		}
2483 	}
2484 }
2485 
2486 /**
2487  * pch_udc_activate_control_ep() - This function enables the control endpoints
2488  *					for traffic after a reset
2489  * @dev:	Reference to the device structure
2490  */
2491 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2492 {
2493 	struct pch_udc_ep	*ep;
2494 	u32 val;
2495 
2496 	/* Setup the IN endpoint */
2497 	ep = &dev->ep[UDC_EP0IN_IDX];
2498 	pch_udc_clear_ep_control(ep);
2499 	pch_udc_ep_fifo_flush(ep, ep->in);
2500 	pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2501 	pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2502 	/* Initialize the IN EP Descriptor */
2503 	ep->td_data      = NULL;
2504 	ep->td_stp       = NULL;
2505 	ep->td_data_phys = 0;
2506 	ep->td_stp_phys  = 0;
2507 
2508 	/* Setup the OUT endpoint */
2509 	ep = &dev->ep[UDC_EP0OUT_IDX];
2510 	pch_udc_clear_ep_control(ep);
2511 	pch_udc_ep_fifo_flush(ep, ep->in);
2512 	pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2513 	pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2514 	val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2515 	pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2516 
2517 	/* Initialize the SETUP buffer */
2518 	pch_udc_init_setup_buff(ep->td_stp);
2519 	/* Write the pointer address of dma descriptor */
2520 	pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2521 	/* Write the pointer address of Setup descriptor */
2522 	pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2523 
2524 	/* Initialize the dma descriptor */
2525 	ep->td_data->status  = PCH_UDC_DMA_LAST;
2526 	ep->td_data->dataptr = dev->dma_addr;
2527 	ep->td_data->next    = ep->td_data_phys;
2528 
2529 	pch_udc_ep_clear_nak(ep);
2530 }
2531 
2532 
2533 /**
2534  * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2535  * @dev:	Reference to driver structure
2536  */
2537 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2538 {
2539 	struct pch_udc_ep	*ep;
2540 	int i;
2541 
2542 	pch_udc_clear_dma(dev, DMA_DIR_TX);
2543 	pch_udc_clear_dma(dev, DMA_DIR_RX);
2544 	/* Mask all endpoint interrupts */
2545 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2546 	/* clear all endpoint interrupts */
2547 	pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2548 
2549 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2550 		ep = &dev->ep[i];
2551 		pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2552 		pch_udc_clear_ep_control(ep);
2553 		pch_udc_ep_set_ddptr(ep, 0);
2554 		pch_udc_write_csr(ep->dev, 0x00, i);
2555 	}
2556 	dev->stall = 0;
2557 	dev->prot_stall = 0;
2558 	dev->waiting_zlp_ack = 0;
2559 	dev->set_cfg_not_acked = 0;
2560 
2561 	/* disable ep to empty req queue. Skip the control EP's */
2562 	for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2563 		ep = &dev->ep[i];
2564 		pch_udc_ep_set_nak(ep);
2565 		pch_udc_ep_fifo_flush(ep, ep->in);
2566 		/* Complete request queue */
2567 		empty_req_queue(ep);
2568 	}
2569 	if (dev->driver) {
2570 		spin_unlock(&dev->lock);
2571 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2572 		spin_lock(&dev->lock);
2573 	}
2574 }
2575 
2576 /**
2577  * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2578  *				done interrupt
2579  * @dev:	Reference to driver structure
2580  */
2581 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2582 {
2583 	u32 dev_stat, dev_speed;
2584 	u32 speed = USB_SPEED_FULL;
2585 
2586 	dev_stat = pch_udc_read_device_status(dev);
2587 	dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2588 						 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2589 	switch (dev_speed) {
2590 	case UDC_DEVSTS_ENUM_SPEED_HIGH:
2591 		speed = USB_SPEED_HIGH;
2592 		break;
2593 	case  UDC_DEVSTS_ENUM_SPEED_FULL:
2594 		speed = USB_SPEED_FULL;
2595 		break;
2596 	case  UDC_DEVSTS_ENUM_SPEED_LOW:
2597 		speed = USB_SPEED_LOW;
2598 		break;
2599 	default:
2600 		BUG();
2601 	}
2602 	dev->gadget.speed = speed;
2603 	pch_udc_activate_control_ep(dev);
2604 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2605 	pch_udc_set_dma(dev, DMA_DIR_TX);
2606 	pch_udc_set_dma(dev, DMA_DIR_RX);
2607 	pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2608 
2609 	/* enable device interrupts */
2610 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2611 					UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2612 					UDC_DEVINT_SI | UDC_DEVINT_SC);
2613 }
2614 
2615 /**
2616  * pch_udc_svc_intf_interrupt() - This function handles a set interface
2617  *				  interrupt
2618  * @dev:	Reference to driver structure
2619  */
2620 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2621 {
2622 	u32 reg, dev_stat = 0;
2623 	int i;
2624 
2625 	dev_stat = pch_udc_read_device_status(dev);
2626 	dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2627 							 UDC_DEVSTS_INTF_SHIFT;
2628 	dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2629 							 UDC_DEVSTS_ALT_SHIFT;
2630 	dev->set_cfg_not_acked = 1;
2631 	/* Construct the usb request for gadget driver and inform it */
2632 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2633 	dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2634 	dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2635 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2636 	dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2637 	/* programm the Endpoint Cfg registers */
2638 	/* Only one end point cfg register */
2639 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2640 	reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2641 	      (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2642 	reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2643 	      (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2644 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2645 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2646 		/* clear stall bits */
2647 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2648 		dev->ep[i].halted = 0;
2649 	}
2650 	dev->stall = 0;
2651 	spin_unlock(&dev->lock);
2652 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2653 	spin_lock(&dev->lock);
2654 }
2655 
2656 /**
2657  * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2658  *				interrupt
2659  * @dev:	Reference to driver structure
2660  */
2661 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2662 {
2663 	int i;
2664 	u32 reg, dev_stat = 0;
2665 
2666 	dev_stat = pch_udc_read_device_status(dev);
2667 	dev->set_cfg_not_acked = 1;
2668 	dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2669 				UDC_DEVSTS_CFG_SHIFT;
2670 	/* make usb request for gadget driver */
2671 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2672 	dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2673 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2674 	/* program the NE registers */
2675 	/* Only one end point cfg register */
2676 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2677 	reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2678 	      (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2679 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2680 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2681 		/* clear stall bits */
2682 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2683 		dev->ep[i].halted = 0;
2684 	}
2685 	dev->stall = 0;
2686 
2687 	/* call gadget zero with setup data received */
2688 	spin_unlock(&dev->lock);
2689 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2690 	spin_lock(&dev->lock);
2691 }
2692 
2693 /**
2694  * pch_udc_dev_isr() - This function services device interrupts
2695  *			by invoking appropriate routines.
2696  * @dev:	Reference to the device structure
2697  * @dev_intr:	The Device interrupt status.
2698  */
2699 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2700 {
2701 	int vbus;
2702 
2703 	/* USB Reset Interrupt */
2704 	if (dev_intr & UDC_DEVINT_UR) {
2705 		pch_udc_svc_ur_interrupt(dev);
2706 		dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2707 	}
2708 	/* Enumeration Done Interrupt */
2709 	if (dev_intr & UDC_DEVINT_ENUM) {
2710 		pch_udc_svc_enum_interrupt(dev);
2711 		dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2712 	}
2713 	/* Set Interface Interrupt */
2714 	if (dev_intr & UDC_DEVINT_SI)
2715 		pch_udc_svc_intf_interrupt(dev);
2716 	/* Set Config Interrupt */
2717 	if (dev_intr & UDC_DEVINT_SC)
2718 		pch_udc_svc_cfg_interrupt(dev);
2719 	/* USB Suspend interrupt */
2720 	if (dev_intr & UDC_DEVINT_US) {
2721 		if (dev->driver
2722 			&& dev->driver->suspend) {
2723 			spin_unlock(&dev->lock);
2724 			dev->driver->suspend(&dev->gadget);
2725 			spin_lock(&dev->lock);
2726 		}
2727 
2728 		vbus = pch_vbus_gpio_get_value(dev);
2729 		if ((dev->vbus_session == 0)
2730 			&& (vbus != 1)) {
2731 			if (dev->driver && dev->driver->disconnect) {
2732 				spin_unlock(&dev->lock);
2733 				dev->driver->disconnect(&dev->gadget);
2734 				spin_lock(&dev->lock);
2735 			}
2736 			pch_udc_reconnect(dev);
2737 		} else if ((dev->vbus_session == 0)
2738 			&& (vbus == 1)
2739 			&& !dev->vbus_gpio.intr)
2740 			schedule_work(&dev->vbus_gpio.irq_work_fall);
2741 
2742 		dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2743 	}
2744 	/* Clear the SOF interrupt, if enabled */
2745 	if (dev_intr & UDC_DEVINT_SOF)
2746 		dev_dbg(&dev->pdev->dev, "SOF\n");
2747 	/* ES interrupt, IDLE > 3ms on the USB */
2748 	if (dev_intr & UDC_DEVINT_ES)
2749 		dev_dbg(&dev->pdev->dev, "ES\n");
2750 	/* RWKP interrupt */
2751 	if (dev_intr & UDC_DEVINT_RWKP)
2752 		dev_dbg(&dev->pdev->dev, "RWKP\n");
2753 }
2754 
2755 /**
2756  * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2757  * @irq:	Interrupt request number
2758  * @dev:	Reference to the device structure
2759  */
2760 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2761 {
2762 	struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2763 	u32 dev_intr, ep_intr;
2764 	int i;
2765 
2766 	dev_intr = pch_udc_read_device_interrupts(dev);
2767 	ep_intr = pch_udc_read_ep_interrupts(dev);
2768 
2769 	/* For a hot plug, this find that the controller is hung up. */
2770 	if (dev_intr == ep_intr)
2771 		if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2772 			dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2773 			/* The controller is reset */
2774 			pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2775 			return IRQ_HANDLED;
2776 		}
2777 	if (dev_intr)
2778 		/* Clear device interrupts */
2779 		pch_udc_write_device_interrupts(dev, dev_intr);
2780 	if (ep_intr)
2781 		/* Clear ep interrupts */
2782 		pch_udc_write_ep_interrupts(dev, ep_intr);
2783 	if (!dev_intr && !ep_intr)
2784 		return IRQ_NONE;
2785 	spin_lock(&dev->lock);
2786 	if (dev_intr)
2787 		pch_udc_dev_isr(dev, dev_intr);
2788 	if (ep_intr) {
2789 		pch_udc_read_all_epstatus(dev, ep_intr);
2790 		/* Process Control In interrupts, if present */
2791 		if (ep_intr & UDC_EPINT_IN_EP0) {
2792 			pch_udc_svc_control_in(dev);
2793 			pch_udc_postsvc_epinters(dev, 0);
2794 		}
2795 		/* Process Control Out interrupts, if present */
2796 		if (ep_intr & UDC_EPINT_OUT_EP0)
2797 			pch_udc_svc_control_out(dev);
2798 		/* Process data in end point interrupts */
2799 		for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2800 			if (ep_intr & (1 <<  i)) {
2801 				pch_udc_svc_data_in(dev, i);
2802 				pch_udc_postsvc_epinters(dev, i);
2803 			}
2804 		}
2805 		/* Process data out end point interrupts */
2806 		for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2807 						 PCH_UDC_USED_EP_NUM); i++)
2808 			if (ep_intr & (1 <<  i))
2809 				pch_udc_svc_data_out(dev, i -
2810 							 UDC_EPINT_OUT_SHIFT);
2811 	}
2812 	spin_unlock(&dev->lock);
2813 	return IRQ_HANDLED;
2814 }
2815 
2816 /**
2817  * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2818  * @dev:	Reference to the device structure
2819  */
2820 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2821 {
2822 	/* enable ep0 interrupts */
2823 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2824 						UDC_EPINT_OUT_EP0);
2825 	/* enable device interrupts */
2826 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2827 				       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2828 				       UDC_DEVINT_SI | UDC_DEVINT_SC);
2829 }
2830 
2831 /**
2832  * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2833  * @dev:	Reference to the driver structure
2834  */
2835 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2836 {
2837 	const char *const ep_string[] = {
2838 		ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2839 		"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2840 		"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2841 		"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2842 		"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2843 		"ep15in", "ep15out",
2844 	};
2845 	int i;
2846 
2847 	dev->gadget.speed = USB_SPEED_UNKNOWN;
2848 	INIT_LIST_HEAD(&dev->gadget.ep_list);
2849 
2850 	/* Initialize the endpoints structures */
2851 	memset(dev->ep, 0, sizeof dev->ep);
2852 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2853 		struct pch_udc_ep *ep = &dev->ep[i];
2854 		ep->dev = dev;
2855 		ep->halted = 1;
2856 		ep->num = i / 2;
2857 		ep->in = ~i & 1;
2858 		ep->ep.name = ep_string[i];
2859 		ep->ep.ops = &pch_udc_ep_ops;
2860 		if (ep->in) {
2861 			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2862 			ep->ep.caps.dir_in = true;
2863 		} else {
2864 			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2865 					  UDC_EP_REG_SHIFT;
2866 			ep->ep.caps.dir_out = true;
2867 		}
2868 		if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2869 			ep->ep.caps.type_control = true;
2870 		} else {
2871 			ep->ep.caps.type_iso = true;
2872 			ep->ep.caps.type_bulk = true;
2873 			ep->ep.caps.type_int = true;
2874 		}
2875 		/* need to set ep->ep.maxpacket and set Default Configuration?*/
2876 		usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2877 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2878 		INIT_LIST_HEAD(&ep->queue);
2879 	}
2880 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2881 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2882 
2883 	/* remove ep0 in and out from the list.  They have own pointer */
2884 	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2885 	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2886 
2887 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2888 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2889 }
2890 
2891 /**
2892  * pch_udc_pcd_init() - This API initializes the driver structure
2893  * @dev:	Reference to the driver structure
2894  *
2895  * Return codes:
2896  *	0: Success
2897  */
2898 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2899 {
2900 	pch_udc_init(dev);
2901 	pch_udc_pcd_reinit(dev);
2902 	pch_vbus_gpio_init(dev, vbus_gpio_port);
2903 	return 0;
2904 }
2905 
2906 /**
2907  * init_dma_pools() - create dma pools during initialization
2908  * @pdev:	reference to struct pci_dev
2909  */
2910 static int init_dma_pools(struct pch_udc_dev *dev)
2911 {
2912 	struct pch_udc_stp_dma_desc	*td_stp;
2913 	struct pch_udc_data_dma_desc	*td_data;
2914 	void				*ep0out_buf;
2915 
2916 	/* DMA setup */
2917 	dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2918 		sizeof(struct pch_udc_data_dma_desc), 0, 0);
2919 	if (!dev->data_requests) {
2920 		dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2921 			__func__);
2922 		return -ENOMEM;
2923 	}
2924 
2925 	/* dma desc for setup data */
2926 	dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2927 		sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2928 	if (!dev->stp_requests) {
2929 		dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2930 			__func__);
2931 		return -ENOMEM;
2932 	}
2933 	/* setup */
2934 	td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2935 				&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2936 	if (!td_stp) {
2937 		dev_err(&dev->pdev->dev,
2938 			"%s: can't allocate setup dma descriptor\n", __func__);
2939 		return -ENOMEM;
2940 	}
2941 	dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2942 
2943 	/* data: 0 packets !? */
2944 	td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2945 				&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2946 	if (!td_data) {
2947 		dev_err(&dev->pdev->dev,
2948 			"%s: can't allocate data dma descriptor\n", __func__);
2949 		return -ENOMEM;
2950 	}
2951 	dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2952 	dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2953 	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2954 	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2955 	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2956 
2957 	ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2958 				  GFP_KERNEL);
2959 	if (!ep0out_buf)
2960 		return -ENOMEM;
2961 	dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2962 				       UDC_EP0OUT_BUFF_SIZE * 4,
2963 				       DMA_FROM_DEVICE);
2964 	return 0;
2965 }
2966 
2967 static int pch_udc_start(struct usb_gadget *g,
2968 		struct usb_gadget_driver *driver)
2969 {
2970 	struct pch_udc_dev	*dev = to_pch_udc(g);
2971 
2972 	driver->driver.bus = NULL;
2973 	dev->driver = driver;
2974 
2975 	/* get ready for ep0 traffic */
2976 	pch_udc_setup_ep0(dev);
2977 
2978 	/* clear SD */
2979 	if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2980 		pch_udc_clear_disconnect(dev);
2981 
2982 	dev->connected = 1;
2983 	return 0;
2984 }
2985 
2986 static int pch_udc_stop(struct usb_gadget *g)
2987 {
2988 	struct pch_udc_dev	*dev = to_pch_udc(g);
2989 
2990 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2991 
2992 	/* Assures that there are no pending requests with this driver */
2993 	dev->driver = NULL;
2994 	dev->connected = 0;
2995 
2996 	/* set SD */
2997 	pch_udc_set_disconnect(dev);
2998 
2999 	return 0;
3000 }
3001 
3002 static void pch_udc_shutdown(struct pci_dev *pdev)
3003 {
3004 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3005 
3006 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3007 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3008 
3009 	/* disable the pullup so the host will think we're gone */
3010 	pch_udc_set_disconnect(dev);
3011 }
3012 
3013 static void pch_udc_remove(struct pci_dev *pdev)
3014 {
3015 	struct pch_udc_dev	*dev = pci_get_drvdata(pdev);
3016 
3017 	usb_del_gadget_udc(&dev->gadget);
3018 
3019 	/* gadget driver must not be registered */
3020 	if (dev->driver)
3021 		dev_err(&pdev->dev,
3022 			"%s: gadget driver still bound!!!\n", __func__);
3023 	/* dma pool cleanup */
3024 	if (dev->data_requests)
3025 		pci_pool_destroy(dev->data_requests);
3026 
3027 	if (dev->stp_requests) {
3028 		/* cleanup DMA desc's for ep0in */
3029 		if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3030 			pci_pool_free(dev->stp_requests,
3031 				dev->ep[UDC_EP0OUT_IDX].td_stp,
3032 				dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3033 		}
3034 		if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3035 			pci_pool_free(dev->stp_requests,
3036 				dev->ep[UDC_EP0OUT_IDX].td_data,
3037 				dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3038 		}
3039 		pci_pool_destroy(dev->stp_requests);
3040 	}
3041 
3042 	if (dev->dma_addr)
3043 		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3044 				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3045 
3046 	pch_vbus_gpio_free(dev);
3047 
3048 	pch_udc_exit(dev);
3049 }
3050 
3051 #ifdef CONFIG_PM_SLEEP
3052 static int pch_udc_suspend(struct device *d)
3053 {
3054 	struct pci_dev *pdev = to_pci_dev(d);
3055 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3056 
3057 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3058 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3059 
3060 	return 0;
3061 }
3062 
3063 static int pch_udc_resume(struct device *d)
3064 {
3065 	return 0;
3066 }
3067 
3068 static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3069 #define PCH_UDC_PM_OPS		(&pch_udc_pm)
3070 #else
3071 #define PCH_UDC_PM_OPS		NULL
3072 #endif /* CONFIG_PM_SLEEP */
3073 
3074 static int pch_udc_probe(struct pci_dev *pdev,
3075 			  const struct pci_device_id *id)
3076 {
3077 	int			bar;
3078 	int			retval;
3079 	struct pch_udc_dev	*dev;
3080 
3081 	/* init */
3082 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3083 	if (!dev)
3084 		return -ENOMEM;
3085 
3086 	/* pci setup */
3087 	retval = pcim_enable_device(pdev);
3088 	if (retval)
3089 		return retval;
3090 
3091 	pci_set_drvdata(pdev, dev);
3092 
3093 	/* Determine BAR based on PCI ID */
3094 	if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3095 		bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3096 	else
3097 		bar = PCH_UDC_PCI_BAR;
3098 
3099 	/* PCI resource allocation */
3100 	retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3101 	if (retval)
3102 		return retval;
3103 
3104 	dev->base_addr = pcim_iomap_table(pdev)[bar];
3105 
3106 	/* initialize the hardware */
3107 	if (pch_udc_pcd_init(dev))
3108 		return -ENODEV;
3109 
3110 	pci_enable_msi(pdev);
3111 
3112 	retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3113 				  IRQF_SHARED, KBUILD_MODNAME, dev);
3114 	if (retval) {
3115 		dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3116 			pdev->irq);
3117 		goto finished;
3118 	}
3119 
3120 	pci_set_master(pdev);
3121 	pci_try_set_mwi(pdev);
3122 
3123 	/* device struct setup */
3124 	spin_lock_init(&dev->lock);
3125 	dev->pdev = pdev;
3126 	dev->gadget.ops = &pch_udc_ops;
3127 
3128 	retval = init_dma_pools(dev);
3129 	if (retval)
3130 		goto finished;
3131 
3132 	dev->gadget.name = KBUILD_MODNAME;
3133 	dev->gadget.max_speed = USB_SPEED_HIGH;
3134 
3135 	/* Put the device in disconnected state till a driver is bound */
3136 	pch_udc_set_disconnect(dev);
3137 	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3138 	if (retval)
3139 		goto finished;
3140 	return 0;
3141 
3142 finished:
3143 	pch_udc_remove(pdev);
3144 	return retval;
3145 }
3146 
3147 static const struct pci_device_id pch_udc_pcidev_id[] = {
3148 	{
3149 		PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3150 			   PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3151 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3152 		.class_mask = 0xffffffff,
3153 	},
3154 	{
3155 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3156 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3157 		.class_mask = 0xffffffff,
3158 	},
3159 	{
3160 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3161 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3162 		.class_mask = 0xffffffff,
3163 	},
3164 	{
3165 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3166 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3167 		.class_mask = 0xffffffff,
3168 	},
3169 	{ 0 },
3170 };
3171 
3172 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3173 
3174 static struct pci_driver pch_udc_driver = {
3175 	.name =	KBUILD_MODNAME,
3176 	.id_table =	pch_udc_pcidev_id,
3177 	.probe =	pch_udc_probe,
3178 	.remove =	pch_udc_remove,
3179 	.shutdown =	pch_udc_shutdown,
3180 	.driver = {
3181 		.pm = PCH_UDC_PM_OPS,
3182 	},
3183 };
3184 
3185 module_pci_driver(pch_udc_driver);
3186 
3187 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3188 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3189 MODULE_LICENSE("GPL");
3190