xref: /openbmc/linux/drivers/usb/gadget/udc/pch_udc.c (revision 61163895)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/list.h>
12 #include <linux/interrupt.h>
13 #include <linux/usb/ch9.h>
14 #include <linux/usb/gadget.h>
15 #include <linux/gpio.h>
16 #include <linux/irq.h>
17 
18 /* GPIO port for VBUS detecting */
19 static int vbus_gpio_port = -1;		/* GPIO port number (-1:Not used) */
20 
21 #define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
22 #define PCH_VBUS_INTERVAL	10	/* VBUS polling interval (msec) */
23 
24 /* Address offset of Registers */
25 #define UDC_EP_REG_SHIFT	0x20	/* Offset to next EP */
26 
27 #define UDC_EPCTL_ADDR		0x00	/* Endpoint control */
28 #define UDC_EPSTS_ADDR		0x04	/* Endpoint status */
29 #define UDC_BUFIN_FRAMENUM_ADDR	0x08	/* buffer size in / frame number out */
30 #define UDC_BUFOUT_MAXPKT_ADDR	0x0C	/* buffer size out / maxpkt in */
31 #define UDC_SUBPTR_ADDR		0x10	/* setup buffer pointer */
32 #define UDC_DESPTR_ADDR		0x14	/* Data descriptor pointer */
33 #define UDC_CONFIRM_ADDR	0x18	/* Write/Read confirmation */
34 
35 #define UDC_DEVCFG_ADDR		0x400	/* Device configuration */
36 #define UDC_DEVCTL_ADDR		0x404	/* Device control */
37 #define UDC_DEVSTS_ADDR		0x408	/* Device status */
38 #define UDC_DEVIRQSTS_ADDR	0x40C	/* Device irq status */
39 #define UDC_DEVIRQMSK_ADDR	0x410	/* Device irq mask */
40 #define UDC_EPIRQSTS_ADDR	0x414	/* Endpoint irq status */
41 #define UDC_EPIRQMSK_ADDR	0x418	/* Endpoint irq mask */
42 #define UDC_DEVLPM_ADDR		0x41C	/* LPM control / status */
43 #define UDC_CSR_BUSY_ADDR	0x4f0	/* UDC_CSR_BUSY Status register */
44 #define UDC_SRST_ADDR		0x4fc	/* SOFT RESET register */
45 #define UDC_CSR_ADDR		0x500	/* USB_DEVICE endpoint register */
46 
47 /* Endpoint control register */
48 /* Bit position */
49 #define UDC_EPCTL_MRXFLUSH		(1 << 12)
50 #define UDC_EPCTL_RRDY			(1 << 9)
51 #define UDC_EPCTL_CNAK			(1 << 8)
52 #define UDC_EPCTL_SNAK			(1 << 7)
53 #define UDC_EPCTL_NAK			(1 << 6)
54 #define UDC_EPCTL_P			(1 << 3)
55 #define UDC_EPCTL_F			(1 << 1)
56 #define UDC_EPCTL_S			(1 << 0)
57 #define UDC_EPCTL_ET_SHIFT		4
58 /* Mask patern */
59 #define UDC_EPCTL_ET_MASK		0x00000030
60 /* Value for ET field */
61 #define UDC_EPCTL_ET_CONTROL		0
62 #define UDC_EPCTL_ET_ISO		1
63 #define UDC_EPCTL_ET_BULK		2
64 #define UDC_EPCTL_ET_INTERRUPT		3
65 
66 /* Endpoint status register */
67 /* Bit position */
68 #define UDC_EPSTS_XFERDONE		(1 << 27)
69 #define UDC_EPSTS_RSS			(1 << 26)
70 #define UDC_EPSTS_RCS			(1 << 25)
71 #define UDC_EPSTS_TXEMPTY		(1 << 24)
72 #define UDC_EPSTS_TDC			(1 << 10)
73 #define UDC_EPSTS_HE			(1 << 9)
74 #define UDC_EPSTS_MRXFIFO_EMP		(1 << 8)
75 #define UDC_EPSTS_BNA			(1 << 7)
76 #define UDC_EPSTS_IN			(1 << 6)
77 #define UDC_EPSTS_OUT_SHIFT		4
78 /* Mask patern */
79 #define UDC_EPSTS_OUT_MASK		0x00000030
80 #define UDC_EPSTS_ALL_CLR_MASK		0x1F0006F0
81 /* Value for OUT field */
82 #define UDC_EPSTS_OUT_SETUP		2
83 #define UDC_EPSTS_OUT_DATA		1
84 
85 /* Device configuration register */
86 /* Bit position */
87 #define UDC_DEVCFG_CSR_PRG		(1 << 17)
88 #define UDC_DEVCFG_SP			(1 << 3)
89 /* SPD Valee */
90 #define UDC_DEVCFG_SPD_HS		0x0
91 #define UDC_DEVCFG_SPD_FS		0x1
92 #define UDC_DEVCFG_SPD_LS		0x2
93 
94 /* Device control register */
95 /* Bit position */
96 #define UDC_DEVCTL_THLEN_SHIFT		24
97 #define UDC_DEVCTL_BRLEN_SHIFT		16
98 #define UDC_DEVCTL_CSR_DONE		(1 << 13)
99 #define UDC_DEVCTL_SD			(1 << 10)
100 #define UDC_DEVCTL_MODE			(1 << 9)
101 #define UDC_DEVCTL_BREN			(1 << 8)
102 #define UDC_DEVCTL_THE			(1 << 7)
103 #define UDC_DEVCTL_DU			(1 << 4)
104 #define UDC_DEVCTL_TDE			(1 << 3)
105 #define UDC_DEVCTL_RDE			(1 << 2)
106 #define UDC_DEVCTL_RES			(1 << 0)
107 
108 /* Device status register */
109 /* Bit position */
110 #define UDC_DEVSTS_TS_SHIFT		18
111 #define UDC_DEVSTS_ENUM_SPEED_SHIFT	13
112 #define UDC_DEVSTS_ALT_SHIFT		8
113 #define UDC_DEVSTS_INTF_SHIFT		4
114 #define UDC_DEVSTS_CFG_SHIFT		0
115 /* Mask patern */
116 #define UDC_DEVSTS_TS_MASK		0xfffc0000
117 #define UDC_DEVSTS_ENUM_SPEED_MASK	0x00006000
118 #define UDC_DEVSTS_ALT_MASK		0x00000f00
119 #define UDC_DEVSTS_INTF_MASK		0x000000f0
120 #define UDC_DEVSTS_CFG_MASK		0x0000000f
121 /* value for maximum speed for SPEED field */
122 #define UDC_DEVSTS_ENUM_SPEED_FULL	1
123 #define UDC_DEVSTS_ENUM_SPEED_HIGH	0
124 #define UDC_DEVSTS_ENUM_SPEED_LOW	2
125 #define UDC_DEVSTS_ENUM_SPEED_FULLX	3
126 
127 /* Device irq register */
128 /* Bit position */
129 #define UDC_DEVINT_RWKP			(1 << 7)
130 #define UDC_DEVINT_ENUM			(1 << 6)
131 #define UDC_DEVINT_SOF			(1 << 5)
132 #define UDC_DEVINT_US			(1 << 4)
133 #define UDC_DEVINT_UR			(1 << 3)
134 #define UDC_DEVINT_ES			(1 << 2)
135 #define UDC_DEVINT_SI			(1 << 1)
136 #define UDC_DEVINT_SC			(1 << 0)
137 /* Mask patern */
138 #define UDC_DEVINT_MSK			0x7f
139 
140 /* Endpoint irq register */
141 /* Bit position */
142 #define UDC_EPINT_IN_SHIFT		0
143 #define UDC_EPINT_OUT_SHIFT		16
144 #define UDC_EPINT_IN_EP0		(1 << 0)
145 #define UDC_EPINT_OUT_EP0		(1 << 16)
146 /* Mask patern */
147 #define UDC_EPINT_MSK_DISABLE_ALL	0xffffffff
148 
149 /* UDC_CSR_BUSY Status register */
150 /* Bit position */
151 #define UDC_CSR_BUSY			(1 << 0)
152 
153 /* SOFT RESET register */
154 /* Bit position */
155 #define UDC_PSRST			(1 << 1)
156 #define UDC_SRST			(1 << 0)
157 
158 /* USB_DEVICE endpoint register */
159 /* Bit position */
160 #define UDC_CSR_NE_NUM_SHIFT		0
161 #define UDC_CSR_NE_DIR_SHIFT		4
162 #define UDC_CSR_NE_TYPE_SHIFT		5
163 #define UDC_CSR_NE_CFG_SHIFT		7
164 #define UDC_CSR_NE_INTF_SHIFT		11
165 #define UDC_CSR_NE_ALT_SHIFT		15
166 #define UDC_CSR_NE_MAX_PKT_SHIFT	19
167 /* Mask patern */
168 #define UDC_CSR_NE_NUM_MASK		0x0000000f
169 #define UDC_CSR_NE_DIR_MASK		0x00000010
170 #define UDC_CSR_NE_TYPE_MASK		0x00000060
171 #define UDC_CSR_NE_CFG_MASK		0x00000780
172 #define UDC_CSR_NE_INTF_MASK		0x00007800
173 #define UDC_CSR_NE_ALT_MASK		0x00078000
174 #define UDC_CSR_NE_MAX_PKT_MASK		0x3ff80000
175 
176 #define PCH_UDC_CSR(ep)	(UDC_CSR_ADDR + ep*4)
177 #define PCH_UDC_EPINT(in, num)\
178 		(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
179 
180 /* Index of endpoint */
181 #define UDC_EP0IN_IDX		0
182 #define UDC_EP0OUT_IDX		1
183 #define UDC_EPIN_IDX(ep)	(ep * 2)
184 #define UDC_EPOUT_IDX(ep)	(ep * 2 + 1)
185 #define PCH_UDC_EP0		0
186 #define PCH_UDC_EP1		1
187 #define PCH_UDC_EP2		2
188 #define PCH_UDC_EP3		3
189 
190 /* Number of endpoint */
191 #define PCH_UDC_EP_NUM		32	/* Total number of EPs (16 IN,16 OUT) */
192 #define PCH_UDC_USED_EP_NUM	4	/* EP number of EP's really used */
193 /* Length Value */
194 #define PCH_UDC_BRLEN		0x0F	/* Burst length */
195 #define PCH_UDC_THLEN		0x1F	/* Threshold length */
196 /* Value of EP Buffer Size */
197 #define UDC_EP0IN_BUFF_SIZE	16
198 #define UDC_EPIN_BUFF_SIZE	256
199 #define UDC_EP0OUT_BUFF_SIZE	16
200 #define UDC_EPOUT_BUFF_SIZE	256
201 /* Value of EP maximum packet size */
202 #define UDC_EP0IN_MAX_PKT_SIZE	64
203 #define UDC_EP0OUT_MAX_PKT_SIZE	64
204 #define UDC_BULK_MAX_PKT_SIZE	512
205 
206 /* DMA */
207 #define DMA_DIR_RX		1	/* DMA for data receive */
208 #define DMA_DIR_TX		2	/* DMA for data transmit */
209 #define DMA_ADDR_INVALID	(~(dma_addr_t)0)
210 #define UDC_DMA_MAXPACKET	65536	/* maximum packet size for DMA */
211 
212 /**
213  * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
214  *				  for data
215  * @status:		Status quadlet
216  * @reserved:		Reserved
217  * @dataptr:		Buffer descriptor
218  * @next:		Next descriptor
219  */
220 struct pch_udc_data_dma_desc {
221 	u32 status;
222 	u32 reserved;
223 	u32 dataptr;
224 	u32 next;
225 };
226 
227 /**
228  * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
229  *				 for control data
230  * @status:	Status
231  * @reserved:	Reserved
232  * @request:	Control Request
233  */
234 struct pch_udc_stp_dma_desc {
235 	u32 status;
236 	u32 reserved;
237 	struct usb_ctrlrequest request;
238 } __attribute((packed));
239 
240 /* DMA status definitions */
241 /* Buffer status */
242 #define PCH_UDC_BUFF_STS	0xC0000000
243 #define PCH_UDC_BS_HST_RDY	0x00000000
244 #define PCH_UDC_BS_DMA_BSY	0x40000000
245 #define PCH_UDC_BS_DMA_DONE	0x80000000
246 #define PCH_UDC_BS_HST_BSY	0xC0000000
247 /*  Rx/Tx Status */
248 #define PCH_UDC_RXTX_STS	0x30000000
249 #define PCH_UDC_RTS_SUCC	0x00000000
250 #define PCH_UDC_RTS_DESERR	0x10000000
251 #define PCH_UDC_RTS_BUFERR	0x30000000
252 /* Last Descriptor Indication */
253 #define PCH_UDC_DMA_LAST	0x08000000
254 /* Number of Rx/Tx Bytes Mask */
255 #define PCH_UDC_RXTX_BYTES	0x0000ffff
256 
257 /**
258  * struct pch_udc_cfg_data - Structure to hold current configuration
259  *			     and interface information
260  * @cur_cfg:	current configuration in use
261  * @cur_intf:	current interface in use
262  * @cur_alt:	current alt interface in use
263  */
264 struct pch_udc_cfg_data {
265 	u16 cur_cfg;
266 	u16 cur_intf;
267 	u16 cur_alt;
268 };
269 
270 /**
271  * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
272  * @ep:			embedded ep request
273  * @td_stp_phys:	for setup request
274  * @td_data_phys:	for data request
275  * @td_stp:		for setup request
276  * @td_data:		for data request
277  * @dev:		reference to device struct
278  * @offset_addr:	offset address of ep register
279  * @desc:		for this ep
280  * @queue:		queue for requests
281  * @num:		endpoint number
282  * @in:			endpoint is IN
283  * @halted:		endpoint halted?
284  * @epsts:		Endpoint status
285  */
286 struct pch_udc_ep {
287 	struct usb_ep			ep;
288 	dma_addr_t			td_stp_phys;
289 	dma_addr_t			td_data_phys;
290 	struct pch_udc_stp_dma_desc	*td_stp;
291 	struct pch_udc_data_dma_desc	*td_data;
292 	struct pch_udc_dev		*dev;
293 	unsigned long			offset_addr;
294 	struct list_head		queue;
295 	unsigned			num:5,
296 					in:1,
297 					halted:1;
298 	unsigned long			epsts;
299 };
300 
301 /**
302  * struct pch_vbus_gpio_data - Structure holding GPIO informaton
303  *					for detecting VBUS
304  * @port:		gpio port number
305  * @intr:		gpio interrupt number
306  * @irq_work_fall:	Structure for WorkQueue
307  * @irq_work_rise:	Structure for WorkQueue
308  */
309 struct pch_vbus_gpio_data {
310 	int			port;
311 	int			intr;
312 	struct work_struct	irq_work_fall;
313 	struct work_struct	irq_work_rise;
314 };
315 
316 /**
317  * struct pch_udc_dev - Structure holding complete information
318  *			of the PCH USB device
319  * @gadget:		gadget driver data
320  * @driver:		reference to gadget driver bound
321  * @pdev:		reference to the PCI device
322  * @ep:			array of endpoints
323  * @lock:		protects all state
324  * @stall:		stall requested
325  * @prot_stall:		protcol stall requested
326  * @registered:		driver registered with system
327  * @suspended:		driver in suspended state
328  * @connected:		gadget driver associated
329  * @vbus_session:	required vbus_session state
330  * @set_cfg_not_acked:	pending acknowledgement 4 setup
331  * @waiting_zlp_ack:	pending acknowledgement 4 ZLP
332  * @data_requests:	DMA pool for data requests
333  * @stp_requests:	DMA pool for setup requests
334  * @dma_addr:		DMA pool for received
335  * @setup_data:		Received setup data
336  * @base_addr:		for mapped device memory
337  * @cfg_data:		current cfg, intf, and alt in use
338  * @vbus_gpio:		GPIO informaton for detecting VBUS
339  */
340 struct pch_udc_dev {
341 	struct usb_gadget		gadget;
342 	struct usb_gadget_driver	*driver;
343 	struct pci_dev			*pdev;
344 	struct pch_udc_ep		ep[PCH_UDC_EP_NUM];
345 	spinlock_t			lock; /* protects all state */
346 	unsigned
347 			stall:1,
348 			prot_stall:1,
349 			suspended:1,
350 			connected:1,
351 			vbus_session:1,
352 			set_cfg_not_acked:1,
353 			waiting_zlp_ack:1;
354 	struct dma_pool		*data_requests;
355 	struct dma_pool		*stp_requests;
356 	dma_addr_t			dma_addr;
357 	struct usb_ctrlrequest		setup_data;
358 	void __iomem			*base_addr;
359 	struct pch_udc_cfg_data		cfg_data;
360 	struct pch_vbus_gpio_data	vbus_gpio;
361 };
362 #define to_pch_udc(g)	(container_of((g), struct pch_udc_dev, gadget))
363 
364 #define PCH_UDC_PCI_BAR_QUARK_X1000	0
365 #define PCH_UDC_PCI_BAR			1
366 
367 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC	0x0939
368 #define PCI_DEVICE_ID_INTEL_EG20T_UDC		0x8808
369 
370 #define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
371 #define PCI_DEVICE_ID_ML7831_IOH_UDC	0x8808
372 
373 static const char	ep0_string[] = "ep0in";
374 static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
375 static bool speed_fs;
376 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
377 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
378 
379 /**
380  * struct pch_udc_request - Structure holding a PCH USB device request packet
381  * @req:		embedded ep request
382  * @td_data_phys:	phys. address
383  * @td_data:		first dma desc. of chain
384  * @td_data_last:	last dma desc. of chain
385  * @queue:		associated queue
386  * @dma_going:		DMA in progress for request
387  * @dma_mapped:		DMA memory mapped for request
388  * @dma_done:		DMA completed for request
389  * @chain_len:		chain length
390  * @buf:		Buffer memory for align adjustment
391  * @dma:		DMA memory for align adjustment
392  */
393 struct pch_udc_request {
394 	struct usb_request		req;
395 	dma_addr_t			td_data_phys;
396 	struct pch_udc_data_dma_desc	*td_data;
397 	struct pch_udc_data_dma_desc	*td_data_last;
398 	struct list_head		queue;
399 	unsigned			dma_going:1,
400 					dma_mapped:1,
401 					dma_done:1;
402 	unsigned			chain_len;
403 	void				*buf;
404 	dma_addr_t			dma;
405 };
406 
407 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
408 {
409 	return ioread32(dev->base_addr + reg);
410 }
411 
412 static inline void pch_udc_writel(struct pch_udc_dev *dev,
413 				    unsigned long val, unsigned long reg)
414 {
415 	iowrite32(val, dev->base_addr + reg);
416 }
417 
418 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
419 				     unsigned long reg,
420 				     unsigned long bitmask)
421 {
422 	pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
423 }
424 
425 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
426 				     unsigned long reg,
427 				     unsigned long bitmask)
428 {
429 	pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
430 }
431 
432 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
433 {
434 	return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
435 }
436 
437 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
438 				    unsigned long val, unsigned long reg)
439 {
440 	iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
441 }
442 
443 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
444 				     unsigned long reg,
445 				     unsigned long bitmask)
446 {
447 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
448 }
449 
450 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
451 				     unsigned long reg,
452 				     unsigned long bitmask)
453 {
454 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
455 }
456 
457 /**
458  * pch_udc_csr_busy() - Wait till idle.
459  * @dev:	Reference to pch_udc_dev structure
460  */
461 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
462 {
463 	unsigned int count = 200;
464 
465 	/* Wait till idle */
466 	while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
467 		&& --count)
468 		cpu_relax();
469 	if (!count)
470 		dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
471 }
472 
473 /**
474  * pch_udc_write_csr() - Write the command and status registers.
475  * @dev:	Reference to pch_udc_dev structure
476  * @val:	value to be written to CSR register
477  * @ep:		end-point number
478  */
479 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
480 			       unsigned int ep)
481 {
482 	unsigned long reg = PCH_UDC_CSR(ep);
483 
484 	pch_udc_csr_busy(dev);		/* Wait till idle */
485 	pch_udc_writel(dev, val, reg);
486 	pch_udc_csr_busy(dev);		/* Wait till idle */
487 }
488 
489 /**
490  * pch_udc_read_csr() - Read the command and status registers.
491  * @dev:	Reference to pch_udc_dev structure
492  * @ep:		end-point number
493  *
494  * Return codes:	content of CSR register
495  */
496 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
497 {
498 	unsigned long reg = PCH_UDC_CSR(ep);
499 
500 	pch_udc_csr_busy(dev);		/* Wait till idle */
501 	pch_udc_readl(dev, reg);	/* Dummy read */
502 	pch_udc_csr_busy(dev);		/* Wait till idle */
503 	return pch_udc_readl(dev, reg);
504 }
505 
506 /**
507  * pch_udc_rmt_wakeup() - Initiate for remote wakeup
508  * @dev:	Reference to pch_udc_dev structure
509  */
510 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
511 {
512 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
513 	mdelay(1);
514 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
515 }
516 
517 /**
518  * pch_udc_get_frame() - Get the current frame from device status register
519  * @dev:	Reference to pch_udc_dev structure
520  * Retern	current frame
521  */
522 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
523 {
524 	u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
525 	return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
526 }
527 
528 /**
529  * pch_udc_clear_selfpowered() - Clear the self power control
530  * @dev:	Reference to pch_udc_regs structure
531  */
532 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
533 {
534 	pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
535 }
536 
537 /**
538  * pch_udc_set_selfpowered() - Set the self power control
539  * @dev:	Reference to pch_udc_regs structure
540  */
541 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
542 {
543 	pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
544 }
545 
546 /**
547  * pch_udc_set_disconnect() - Set the disconnect status.
548  * @dev:	Reference to pch_udc_regs structure
549  */
550 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
551 {
552 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
553 }
554 
555 /**
556  * pch_udc_clear_disconnect() - Clear the disconnect status.
557  * @dev:	Reference to pch_udc_regs structure
558  */
559 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
560 {
561 	/* Clear the disconnect */
562 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
563 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
564 	mdelay(1);
565 	/* Resume USB signalling */
566 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
567 }
568 
569 /**
570  * pch_udc_reconnect() - This API initializes usb device controller,
571  *						and clear the disconnect status.
572  * @dev:		Reference to pch_udc_regs structure
573  */
574 static void pch_udc_init(struct pch_udc_dev *dev);
575 static void pch_udc_reconnect(struct pch_udc_dev *dev)
576 {
577 	pch_udc_init(dev);
578 
579 	/* enable device interrupts */
580 	/* pch_udc_enable_interrupts() */
581 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
582 			UDC_DEVINT_UR | UDC_DEVINT_ENUM);
583 
584 	/* Clear the disconnect */
585 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
586 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
587 	mdelay(1);
588 	/* Resume USB signalling */
589 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
590 }
591 
592 /**
593  * pch_udc_vbus_session() - set or clearr the disconnect status.
594  * @dev:	Reference to pch_udc_regs structure
595  * @is_active:	Parameter specifying the action
596  *		  0:   indicating VBUS power is ending
597  *		  !0:  indicating VBUS power is starting
598  */
599 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
600 					  int is_active)
601 {
602 	if (is_active) {
603 		pch_udc_reconnect(dev);
604 		dev->vbus_session = 1;
605 	} else {
606 		if (dev->driver && dev->driver->disconnect) {
607 			spin_lock(&dev->lock);
608 			dev->driver->disconnect(&dev->gadget);
609 			spin_unlock(&dev->lock);
610 		}
611 		pch_udc_set_disconnect(dev);
612 		dev->vbus_session = 0;
613 	}
614 }
615 
616 /**
617  * pch_udc_ep_set_stall() - Set the stall of endpoint
618  * @ep:		Reference to structure of type pch_udc_ep_regs
619  */
620 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
621 {
622 	if (ep->in) {
623 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
624 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
625 	} else {
626 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
627 	}
628 }
629 
630 /**
631  * pch_udc_ep_clear_stall() - Clear the stall of endpoint
632  * @ep:		Reference to structure of type pch_udc_ep_regs
633  */
634 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
635 {
636 	/* Clear the stall */
637 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
638 	/* Clear NAK by writing CNAK */
639 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
640 }
641 
642 /**
643  * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
644  * @ep:		Reference to structure of type pch_udc_ep_regs
645  * @type:	Type of endpoint
646  */
647 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
648 					u8 type)
649 {
650 	pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
651 				UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
652 }
653 
654 /**
655  * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
656  * @ep:		Reference to structure of type pch_udc_ep_regs
657  * @buf_size:	The buffer word size
658  * @ep_in:	EP is IN
659  */
660 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
661 						 u32 buf_size, u32 ep_in)
662 {
663 	u32 data;
664 	if (ep_in) {
665 		data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
666 		data = (data & 0xffff0000) | (buf_size & 0xffff);
667 		pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
668 	} else {
669 		data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
670 		data = (buf_size << 16) | (data & 0xffff);
671 		pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
672 	}
673 }
674 
675 /**
676  * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
677  * @ep:		Reference to structure of type pch_udc_ep_regs
678  * @pkt_size:	The packet byte size
679  */
680 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
681 {
682 	u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
683 	data = (data & 0xffff0000) | (pkt_size & 0xffff);
684 	pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
685 }
686 
687 /**
688  * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
689  * @ep:		Reference to structure of type pch_udc_ep_regs
690  * @addr:	Address of the register
691  */
692 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
693 {
694 	pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
695 }
696 
697 /**
698  * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
699  * @ep:		Reference to structure of type pch_udc_ep_regs
700  * @addr:	Address of the register
701  */
702 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
703 {
704 	pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
705 }
706 
707 /**
708  * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
709  * @ep:		Reference to structure of type pch_udc_ep_regs
710  */
711 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
712 {
713 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
714 }
715 
716 /**
717  * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
718  * @ep:		Reference to structure of type pch_udc_ep_regs
719  */
720 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
721 {
722 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
723 }
724 
725 /**
726  * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
727  * @ep:		Reference to structure of type pch_udc_ep_regs
728  */
729 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
730 {
731 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
732 }
733 
734 /**
735  * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
736  *			register depending on the direction specified
737  * @dev:	Reference to structure of type pch_udc_regs
738  * @dir:	whether Tx or Rx
739  *		  DMA_DIR_RX: Receive
740  *		  DMA_DIR_TX: Transmit
741  */
742 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
743 {
744 	if (dir == DMA_DIR_RX)
745 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
746 	else if (dir == DMA_DIR_TX)
747 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
748 }
749 
750 /**
751  * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
752  *				 register depending on the direction specified
753  * @dev:	Reference to structure of type pch_udc_regs
754  * @dir:	Whether Tx or Rx
755  *		  DMA_DIR_RX: Receive
756  *		  DMA_DIR_TX: Transmit
757  */
758 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
759 {
760 	if (dir == DMA_DIR_RX)
761 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
762 	else if (dir == DMA_DIR_TX)
763 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
764 }
765 
766 /**
767  * pch_udc_set_csr_done() - Set the device control register
768  *				CSR done field (bit 13)
769  * @dev:	reference to structure of type pch_udc_regs
770  */
771 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
772 {
773 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
774 }
775 
776 /**
777  * pch_udc_disable_interrupts() - Disables the specified interrupts
778  * @dev:	Reference to structure of type pch_udc_regs
779  * @mask:	Mask to disable interrupts
780  */
781 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
782 					    u32 mask)
783 {
784 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
785 }
786 
787 /**
788  * pch_udc_enable_interrupts() - Enable the specified interrupts
789  * @dev:	Reference to structure of type pch_udc_regs
790  * @mask:	Mask to enable interrupts
791  */
792 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
793 					   u32 mask)
794 {
795 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
796 }
797 
798 /**
799  * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
800  * @dev:	Reference to structure of type pch_udc_regs
801  * @mask:	Mask to disable interrupts
802  */
803 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
804 						u32 mask)
805 {
806 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
807 }
808 
809 /**
810  * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
811  * @dev:	Reference to structure of type pch_udc_regs
812  * @mask:	Mask to enable interrupts
813  */
814 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
815 					      u32 mask)
816 {
817 	pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
818 }
819 
820 /**
821  * pch_udc_read_device_interrupts() - Read the device interrupts
822  * @dev:	Reference to structure of type pch_udc_regs
823  * Retern	The device interrupts
824  */
825 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
826 {
827 	return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
828 }
829 
830 /**
831  * pch_udc_write_device_interrupts() - Write device interrupts
832  * @dev:	Reference to structure of type pch_udc_regs
833  * @val:	The value to be written to interrupt register
834  */
835 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
836 						     u32 val)
837 {
838 	pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
839 }
840 
841 /**
842  * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
843  * @dev:	Reference to structure of type pch_udc_regs
844  * Retern	The endpoint interrupt
845  */
846 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
847 {
848 	return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
849 }
850 
851 /**
852  * pch_udc_write_ep_interrupts() - Clear endpoint interupts
853  * @dev:	Reference to structure of type pch_udc_regs
854  * @val:	The value to be written to interrupt register
855  */
856 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
857 					     u32 val)
858 {
859 	pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
860 }
861 
862 /**
863  * pch_udc_read_device_status() - Read the device status
864  * @dev:	Reference to structure of type pch_udc_regs
865  * Retern	The device status
866  */
867 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
868 {
869 	return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
870 }
871 
872 /**
873  * pch_udc_read_ep_control() - Read the endpoint control
874  * @ep:		Reference to structure of type pch_udc_ep_regs
875  * Retern	The endpoint control register value
876  */
877 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
878 {
879 	return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
880 }
881 
882 /**
883  * pch_udc_clear_ep_control() - Clear the endpoint control register
884  * @ep:		Reference to structure of type pch_udc_ep_regs
885  * Retern	The endpoint control register value
886  */
887 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
888 {
889 	return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
890 }
891 
892 /**
893  * pch_udc_read_ep_status() - Read the endpoint status
894  * @ep:		Reference to structure of type pch_udc_ep_regs
895  * Retern	The endpoint status
896  */
897 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
898 {
899 	return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
900 }
901 
902 /**
903  * pch_udc_clear_ep_status() - Clear the endpoint status
904  * @ep:		Reference to structure of type pch_udc_ep_regs
905  * @stat:	Endpoint status
906  */
907 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
908 					 u32 stat)
909 {
910 	return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
911 }
912 
913 /**
914  * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
915  *				of the endpoint control register
916  * @ep:		Reference to structure of type pch_udc_ep_regs
917  */
918 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
919 {
920 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
921 }
922 
923 /**
924  * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
925  *				of the endpoint control register
926  * @ep:		reference to structure of type pch_udc_ep_regs
927  */
928 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
929 {
930 	unsigned int loopcnt = 0;
931 	struct pch_udc_dev *dev = ep->dev;
932 
933 	if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
934 		return;
935 	if (!ep->in) {
936 		loopcnt = 10000;
937 		while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
938 			--loopcnt)
939 			udelay(5);
940 		if (!loopcnt)
941 			dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
942 				__func__);
943 	}
944 	loopcnt = 10000;
945 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
946 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
947 		udelay(5);
948 	}
949 	if (!loopcnt)
950 		dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
951 			__func__, ep->num, (ep->in ? "in" : "out"));
952 }
953 
954 /**
955  * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
956  * @ep:	reference to structure of type pch_udc_ep_regs
957  * @dir:	direction of endpoint
958  *		  0:  endpoint is OUT
959  *		  !0: endpoint is IN
960  */
961 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
962 {
963 	if (dir) {	/* IN ep */
964 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
965 		return;
966 	}
967 }
968 
969 /**
970  * pch_udc_ep_enable() - This api enables endpoint
971  * @ep:		reference to structure of type pch_udc_ep_regs
972  * @cfg:	current configuration information
973  * @desc:	endpoint descriptor
974  */
975 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
976 			       struct pch_udc_cfg_data *cfg,
977 			       const struct usb_endpoint_descriptor *desc)
978 {
979 	u32 val = 0;
980 	u32 buff_size = 0;
981 
982 	pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
983 	if (ep->in)
984 		buff_size = UDC_EPIN_BUFF_SIZE;
985 	else
986 		buff_size = UDC_EPOUT_BUFF_SIZE;
987 	pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
988 	pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
989 	pch_udc_ep_set_nak(ep);
990 	pch_udc_ep_fifo_flush(ep, ep->in);
991 	/* Configure the endpoint */
992 	val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
993 	      ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
994 		UDC_CSR_NE_TYPE_SHIFT) |
995 	      (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
996 	      (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
997 	      (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
998 	      usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
999 
1000 	if (ep->in)
1001 		pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1002 	else
1003 		pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1004 }
1005 
1006 /**
1007  * pch_udc_ep_disable() - This api disables endpoint
1008  * @ep:		reference to structure of type pch_udc_ep_regs
1009  */
1010 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1011 {
1012 	if (ep->in) {
1013 		/* flush the fifo */
1014 		pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1015 		/* set NAK */
1016 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1017 		pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1018 	} else {
1019 		/* set NAK */
1020 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1021 	}
1022 	/* reset desc pointer */
1023 	pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1024 }
1025 
1026 /**
1027  * pch_udc_wait_ep_stall() - Wait EP stall.
1028  * @ep:		reference to structure of type pch_udc_ep_regs
1029  */
1030 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1031 {
1032 	unsigned int count = 10000;
1033 
1034 	/* Wait till idle */
1035 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1036 		udelay(5);
1037 	if (!count)
1038 		dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1039 }
1040 
1041 /**
1042  * pch_udc_init() - This API initializes usb device controller
1043  * @dev:	Rreference to pch_udc_regs structure
1044  */
1045 static void pch_udc_init(struct pch_udc_dev *dev)
1046 {
1047 	if (NULL == dev) {
1048 		pr_err("%s: Invalid address\n", __func__);
1049 		return;
1050 	}
1051 	/* Soft Reset and Reset PHY */
1052 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1053 	pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1054 	mdelay(1);
1055 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1056 	pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1057 	mdelay(1);
1058 	/* mask and clear all device interrupts */
1059 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1060 	pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1061 
1062 	/* mask and clear all ep interrupts */
1063 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1064 	pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1065 
1066 	/* enable dynamic CSR programmingi, self powered and device speed */
1067 	if (speed_fs)
1068 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1069 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1070 	else /* defaul high speed */
1071 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1072 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1073 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1074 			(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1075 			(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1076 			UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1077 			UDC_DEVCTL_THE);
1078 }
1079 
1080 /**
1081  * pch_udc_exit() - This API exit usb device controller
1082  * @dev:	Reference to pch_udc_regs structure
1083  */
1084 static void pch_udc_exit(struct pch_udc_dev *dev)
1085 {
1086 	/* mask all device interrupts */
1087 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1088 	/* mask all ep interrupts */
1089 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1090 	/* put device in disconnected state */
1091 	pch_udc_set_disconnect(dev);
1092 }
1093 
1094 /**
1095  * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1096  * @gadget:	Reference to the gadget driver
1097  *
1098  * Return codes:
1099  *	0:		Success
1100  *	-EINVAL:	If the gadget passed is NULL
1101  */
1102 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1103 {
1104 	struct pch_udc_dev	*dev;
1105 
1106 	if (!gadget)
1107 		return -EINVAL;
1108 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1109 	return pch_udc_get_frame(dev);
1110 }
1111 
1112 /**
1113  * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1114  * @gadget:	Reference to the gadget driver
1115  *
1116  * Return codes:
1117  *	0:		Success
1118  *	-EINVAL:	If the gadget passed is NULL
1119  */
1120 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1121 {
1122 	struct pch_udc_dev	*dev;
1123 	unsigned long		flags;
1124 
1125 	if (!gadget)
1126 		return -EINVAL;
1127 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1128 	spin_lock_irqsave(&dev->lock, flags);
1129 	pch_udc_rmt_wakeup(dev);
1130 	spin_unlock_irqrestore(&dev->lock, flags);
1131 	return 0;
1132 }
1133 
1134 /**
1135  * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1136  *				is self powered or not
1137  * @gadget:	Reference to the gadget driver
1138  * @value:	Specifies self powered or not
1139  *
1140  * Return codes:
1141  *	0:		Success
1142  *	-EINVAL:	If the gadget passed is NULL
1143  */
1144 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1145 {
1146 	struct pch_udc_dev	*dev;
1147 
1148 	if (!gadget)
1149 		return -EINVAL;
1150 	gadget->is_selfpowered = (value != 0);
1151 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1152 	if (value)
1153 		pch_udc_set_selfpowered(dev);
1154 	else
1155 		pch_udc_clear_selfpowered(dev);
1156 	return 0;
1157 }
1158 
1159 /**
1160  * pch_udc_pcd_pullup() - This API is invoked to make the device
1161  *				visible/invisible to the host
1162  * @gadget:	Reference to the gadget driver
1163  * @is_on:	Specifies whether the pull up is made active or inactive
1164  *
1165  * Return codes:
1166  *	0:		Success
1167  *	-EINVAL:	If the gadget passed is NULL
1168  */
1169 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1170 {
1171 	struct pch_udc_dev	*dev;
1172 
1173 	if (!gadget)
1174 		return -EINVAL;
1175 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1176 	if (is_on) {
1177 		pch_udc_reconnect(dev);
1178 	} else {
1179 		if (dev->driver && dev->driver->disconnect) {
1180 			spin_lock(&dev->lock);
1181 			dev->driver->disconnect(&dev->gadget);
1182 			spin_unlock(&dev->lock);
1183 		}
1184 		pch_udc_set_disconnect(dev);
1185 	}
1186 
1187 	return 0;
1188 }
1189 
1190 /**
1191  * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1192  *				transceiver (or GPIO) that
1193  *				detects a VBUS power session starting/ending
1194  * @gadget:	Reference to the gadget driver
1195  * @is_active:	specifies whether the session is starting or ending
1196  *
1197  * Return codes:
1198  *	0:		Success
1199  *	-EINVAL:	If the gadget passed is NULL
1200  */
1201 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1202 {
1203 	struct pch_udc_dev	*dev;
1204 
1205 	if (!gadget)
1206 		return -EINVAL;
1207 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1208 	pch_udc_vbus_session(dev, is_active);
1209 	return 0;
1210 }
1211 
1212 /**
1213  * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1214  *				SET_CONFIGURATION calls to
1215  *				specify how much power the device can consume
1216  * @gadget:	Reference to the gadget driver
1217  * @mA:		specifies the current limit in 2mA unit
1218  *
1219  * Return codes:
1220  *	-EINVAL:	If the gadget passed is NULL
1221  *	-EOPNOTSUPP:
1222  */
1223 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1224 {
1225 	return -EOPNOTSUPP;
1226 }
1227 
1228 static int pch_udc_start(struct usb_gadget *g,
1229 		struct usb_gadget_driver *driver);
1230 static int pch_udc_stop(struct usb_gadget *g);
1231 
1232 static const struct usb_gadget_ops pch_udc_ops = {
1233 	.get_frame = pch_udc_pcd_get_frame,
1234 	.wakeup = pch_udc_pcd_wakeup,
1235 	.set_selfpowered = pch_udc_pcd_selfpowered,
1236 	.pullup = pch_udc_pcd_pullup,
1237 	.vbus_session = pch_udc_pcd_vbus_session,
1238 	.vbus_draw = pch_udc_pcd_vbus_draw,
1239 	.udc_start = pch_udc_start,
1240 	.udc_stop = pch_udc_stop,
1241 };
1242 
1243 /**
1244  * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1245  * @dev:	Reference to the driver structure
1246  *
1247  * Return value:
1248  *	1: VBUS is high
1249  *	0: VBUS is low
1250  *     -1: It is not enable to detect VBUS using GPIO
1251  */
1252 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1253 {
1254 	int vbus = 0;
1255 
1256 	if (dev->vbus_gpio.port)
1257 		vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1258 	else
1259 		vbus = -1;
1260 
1261 	return vbus;
1262 }
1263 
1264 /**
1265  * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1266  *                             If VBUS is Low, disconnect is processed
1267  * @irq_work:	Structure for WorkQueue
1268  *
1269  */
1270 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1271 {
1272 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1273 		struct pch_vbus_gpio_data, irq_work_fall);
1274 	struct pch_udc_dev *dev =
1275 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1276 	int vbus_saved = -1;
1277 	int vbus;
1278 	int count;
1279 
1280 	if (!dev->vbus_gpio.port)
1281 		return;
1282 
1283 	for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1284 		count++) {
1285 		vbus = pch_vbus_gpio_get_value(dev);
1286 
1287 		if ((vbus_saved == vbus) && (vbus == 0)) {
1288 			dev_dbg(&dev->pdev->dev, "VBUS fell");
1289 			if (dev->driver
1290 				&& dev->driver->disconnect) {
1291 				dev->driver->disconnect(
1292 					&dev->gadget);
1293 			}
1294 			if (dev->vbus_gpio.intr)
1295 				pch_udc_init(dev);
1296 			else
1297 				pch_udc_reconnect(dev);
1298 			return;
1299 		}
1300 		vbus_saved = vbus;
1301 		mdelay(PCH_VBUS_INTERVAL);
1302 	}
1303 }
1304 
1305 /**
1306  * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1307  *                             If VBUS is High, connect is processed
1308  * @irq_work:	Structure for WorkQueue
1309  *
1310  */
1311 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1312 {
1313 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1314 		struct pch_vbus_gpio_data, irq_work_rise);
1315 	struct pch_udc_dev *dev =
1316 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1317 	int vbus;
1318 
1319 	if (!dev->vbus_gpio.port)
1320 		return;
1321 
1322 	mdelay(PCH_VBUS_INTERVAL);
1323 	vbus = pch_vbus_gpio_get_value(dev);
1324 
1325 	if (vbus == 1) {
1326 		dev_dbg(&dev->pdev->dev, "VBUS rose");
1327 		pch_udc_reconnect(dev);
1328 		return;
1329 	}
1330 }
1331 
1332 /**
1333  * pch_vbus_gpio_irq() - IRQ handler for GPIO interrupt for changing VBUS
1334  * @irq:	Interrupt request number
1335  * @data:	Reference to the device structure
1336  *
1337  * Return codes:
1338  *	0: Success
1339  *	-EINVAL: GPIO port is invalid or can't be initialized.
1340  */
1341 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1342 {
1343 	struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1344 
1345 	if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1346 		return IRQ_NONE;
1347 
1348 	if (pch_vbus_gpio_get_value(dev))
1349 		schedule_work(&dev->vbus_gpio.irq_work_rise);
1350 	else
1351 		schedule_work(&dev->vbus_gpio.irq_work_fall);
1352 
1353 	return IRQ_HANDLED;
1354 }
1355 
1356 /**
1357  * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1358  * @dev:		Reference to the driver structure
1359  * @vbus_gpio_port:	Number of GPIO port to detect gpio
1360  *
1361  * Return codes:
1362  *	0: Success
1363  *	-EINVAL: GPIO port is invalid or can't be initialized.
1364  */
1365 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1366 {
1367 	int err;
1368 	int irq_num = 0;
1369 
1370 	dev->vbus_gpio.port = 0;
1371 	dev->vbus_gpio.intr = 0;
1372 
1373 	if (vbus_gpio_port <= -1)
1374 		return -EINVAL;
1375 
1376 	err = gpio_is_valid(vbus_gpio_port);
1377 	if (!err) {
1378 		pr_err("%s: gpio port %d is invalid\n",
1379 			__func__, vbus_gpio_port);
1380 		return -EINVAL;
1381 	}
1382 
1383 	err = gpio_request(vbus_gpio_port, "pch_vbus");
1384 	if (err) {
1385 		pr_err("%s: can't request gpio port %d, err: %d\n",
1386 			__func__, vbus_gpio_port, err);
1387 		return -EINVAL;
1388 	}
1389 
1390 	dev->vbus_gpio.port = vbus_gpio_port;
1391 	gpio_direction_input(vbus_gpio_port);
1392 	INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1393 
1394 	irq_num = gpio_to_irq(vbus_gpio_port);
1395 	if (irq_num > 0) {
1396 		irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1397 		err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1398 			"vbus_detect", dev);
1399 		if (!err) {
1400 			dev->vbus_gpio.intr = irq_num;
1401 			INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1402 				pch_vbus_gpio_work_rise);
1403 		} else {
1404 			pr_err("%s: can't request irq %d, err: %d\n",
1405 				__func__, irq_num, err);
1406 		}
1407 	}
1408 
1409 	return 0;
1410 }
1411 
1412 /**
1413  * pch_vbus_gpio_free() - This API frees resources of GPIO port
1414  * @dev:	Reference to the driver structure
1415  */
1416 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1417 {
1418 	if (dev->vbus_gpio.intr)
1419 		free_irq(dev->vbus_gpio.intr, dev);
1420 
1421 	if (dev->vbus_gpio.port)
1422 		gpio_free(dev->vbus_gpio.port);
1423 }
1424 
1425 /**
1426  * complete_req() - This API is invoked from the driver when processing
1427  *			of a request is complete
1428  * @ep:		Reference to the endpoint structure
1429  * @req:	Reference to the request structure
1430  * @status:	Indicates the success/failure of completion
1431  */
1432 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1433 								 int status)
1434 	__releases(&dev->lock)
1435 	__acquires(&dev->lock)
1436 {
1437 	struct pch_udc_dev	*dev;
1438 	unsigned halted = ep->halted;
1439 
1440 	list_del_init(&req->queue);
1441 
1442 	/* set new status if pending */
1443 	if (req->req.status == -EINPROGRESS)
1444 		req->req.status = status;
1445 	else
1446 		status = req->req.status;
1447 
1448 	dev = ep->dev;
1449 	if (req->dma_mapped) {
1450 		if (req->dma == DMA_ADDR_INVALID) {
1451 			if (ep->in)
1452 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1453 						 req->req.length,
1454 						 DMA_TO_DEVICE);
1455 			else
1456 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1457 						 req->req.length,
1458 						 DMA_FROM_DEVICE);
1459 			req->req.dma = DMA_ADDR_INVALID;
1460 		} else {
1461 			if (ep->in)
1462 				dma_unmap_single(&dev->pdev->dev, req->dma,
1463 						 req->req.length,
1464 						 DMA_TO_DEVICE);
1465 			else {
1466 				dma_unmap_single(&dev->pdev->dev, req->dma,
1467 						 req->req.length,
1468 						 DMA_FROM_DEVICE);
1469 				memcpy(req->req.buf, req->buf, req->req.length);
1470 			}
1471 			kfree(req->buf);
1472 			req->dma = DMA_ADDR_INVALID;
1473 		}
1474 		req->dma_mapped = 0;
1475 	}
1476 	ep->halted = 1;
1477 	spin_unlock(&dev->lock);
1478 	if (!ep->in)
1479 		pch_udc_ep_clear_rrdy(ep);
1480 	usb_gadget_giveback_request(&ep->ep, &req->req);
1481 	spin_lock(&dev->lock);
1482 	ep->halted = halted;
1483 }
1484 
1485 /**
1486  * empty_req_queue() - This API empties the request queue of an endpoint
1487  * @ep:		Reference to the endpoint structure
1488  */
1489 static void empty_req_queue(struct pch_udc_ep *ep)
1490 {
1491 	struct pch_udc_request	*req;
1492 
1493 	ep->halted = 1;
1494 	while (!list_empty(&ep->queue)) {
1495 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1496 		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
1497 	}
1498 }
1499 
1500 /**
1501  * pch_udc_free_dma_chain() - This function frees the DMA chain created
1502  *				for the request
1503  * @dev:	Reference to the driver structure
1504  * @req:	Reference to the request to be freed
1505  *
1506  * Return codes:
1507  *	0: Success
1508  */
1509 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1510 				   struct pch_udc_request *req)
1511 {
1512 	struct pch_udc_data_dma_desc *td = req->td_data;
1513 	unsigned i = req->chain_len;
1514 
1515 	dma_addr_t addr2;
1516 	dma_addr_t addr = (dma_addr_t)td->next;
1517 	td->next = 0x00;
1518 	for (; i > 1; --i) {
1519 		/* do not free first desc., will be done by free for request */
1520 		td = phys_to_virt(addr);
1521 		addr2 = (dma_addr_t)td->next;
1522 		dma_pool_free(dev->data_requests, td, addr);
1523 		addr = addr2;
1524 	}
1525 	req->chain_len = 1;
1526 }
1527 
1528 /**
1529  * pch_udc_create_dma_chain() - This function creates or reinitializes
1530  *				a DMA chain
1531  * @ep:		Reference to the endpoint structure
1532  * @req:	Reference to the request
1533  * @buf_len:	The buffer length
1534  * @gfp_flags:	Flags to be used while mapping the data buffer
1535  *
1536  * Return codes:
1537  *	0:		success,
1538  *	-ENOMEM:	dma_pool_alloc invocation fails
1539  */
1540 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1541 				    struct pch_udc_request *req,
1542 				    unsigned long buf_len,
1543 				    gfp_t gfp_flags)
1544 {
1545 	struct pch_udc_data_dma_desc *td = req->td_data, *last;
1546 	unsigned long bytes = req->req.length, i = 0;
1547 	dma_addr_t dma_addr;
1548 	unsigned len = 1;
1549 
1550 	if (req->chain_len > 1)
1551 		pch_udc_free_dma_chain(ep->dev, req);
1552 
1553 	if (req->dma == DMA_ADDR_INVALID)
1554 		td->dataptr = req->req.dma;
1555 	else
1556 		td->dataptr = req->dma;
1557 
1558 	td->status = PCH_UDC_BS_HST_BSY;
1559 	for (; ; bytes -= buf_len, ++len) {
1560 		td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1561 		if (bytes <= buf_len)
1562 			break;
1563 		last = td;
1564 		td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
1565 				    &dma_addr);
1566 		if (!td)
1567 			goto nomem;
1568 		i += buf_len;
1569 		td->dataptr = req->td_data->dataptr + i;
1570 		last->next = dma_addr;
1571 	}
1572 
1573 	req->td_data_last = td;
1574 	td->status |= PCH_UDC_DMA_LAST;
1575 	td->next = req->td_data_phys;
1576 	req->chain_len = len;
1577 	return 0;
1578 
1579 nomem:
1580 	if (len > 1) {
1581 		req->chain_len = len;
1582 		pch_udc_free_dma_chain(ep->dev, req);
1583 	}
1584 	req->chain_len = 1;
1585 	return -ENOMEM;
1586 }
1587 
1588 /**
1589  * prepare_dma() - This function creates and initializes the DMA chain
1590  *			for the request
1591  * @ep:		Reference to the endpoint structure
1592  * @req:	Reference to the request
1593  * @gfp:	Flag to be used while mapping the data buffer
1594  *
1595  * Return codes:
1596  *	0:		Success
1597  *	Other 0:	linux error number on failure
1598  */
1599 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1600 			  gfp_t gfp)
1601 {
1602 	int	retval;
1603 
1604 	/* Allocate and create a DMA chain */
1605 	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1606 	if (retval) {
1607 		pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1608 		return retval;
1609 	}
1610 	if (ep->in)
1611 		req->td_data->status = (req->td_data->status &
1612 				~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1613 	return 0;
1614 }
1615 
1616 /**
1617  * process_zlp() - This function process zero length packets
1618  *			from the gadget driver
1619  * @ep:		Reference to the endpoint structure
1620  * @req:	Reference to the request
1621  */
1622 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1623 {
1624 	struct pch_udc_dev	*dev = ep->dev;
1625 
1626 	/* IN zlp's are handled by hardware */
1627 	complete_req(ep, req, 0);
1628 
1629 	/* if set_config or set_intf is waiting for ack by zlp
1630 	 * then set CSR_DONE
1631 	 */
1632 	if (dev->set_cfg_not_acked) {
1633 		pch_udc_set_csr_done(dev);
1634 		dev->set_cfg_not_acked = 0;
1635 	}
1636 	/* setup command is ACK'ed now by zlp */
1637 	if (!dev->stall && dev->waiting_zlp_ack) {
1638 		pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1639 		dev->waiting_zlp_ack = 0;
1640 	}
1641 }
1642 
1643 /**
1644  * pch_udc_start_rxrequest() - This function starts the receive requirement.
1645  * @ep:		Reference to the endpoint structure
1646  * @req:	Reference to the request structure
1647  */
1648 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1649 					 struct pch_udc_request *req)
1650 {
1651 	struct pch_udc_data_dma_desc *td_data;
1652 
1653 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1654 	td_data = req->td_data;
1655 	/* Set the status bits for all descriptors */
1656 	while (1) {
1657 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1658 				    PCH_UDC_BS_HST_RDY;
1659 		if ((td_data->status & PCH_UDC_DMA_LAST) ==  PCH_UDC_DMA_LAST)
1660 			break;
1661 		td_data = phys_to_virt(td_data->next);
1662 	}
1663 	/* Write the descriptor pointer */
1664 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1665 	req->dma_going = 1;
1666 	pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1667 	pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1668 	pch_udc_ep_clear_nak(ep);
1669 	pch_udc_ep_set_rrdy(ep);
1670 }
1671 
1672 /**
1673  * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1674  *				from gadget driver
1675  * @usbep:	Reference to the USB endpoint structure
1676  * @desc:	Reference to the USB endpoint descriptor structure
1677  *
1678  * Return codes:
1679  *	0:		Success
1680  *	-EINVAL:
1681  *	-ESHUTDOWN:
1682  */
1683 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1684 				    const struct usb_endpoint_descriptor *desc)
1685 {
1686 	struct pch_udc_ep	*ep;
1687 	struct pch_udc_dev	*dev;
1688 	unsigned long		iflags;
1689 
1690 	if (!usbep || (usbep->name == ep0_string) || !desc ||
1691 	    (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1692 		return -EINVAL;
1693 
1694 	ep = container_of(usbep, struct pch_udc_ep, ep);
1695 	dev = ep->dev;
1696 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1697 		return -ESHUTDOWN;
1698 	spin_lock_irqsave(&dev->lock, iflags);
1699 	ep->ep.desc = desc;
1700 	ep->halted = 0;
1701 	pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1702 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1703 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1704 	spin_unlock_irqrestore(&dev->lock, iflags);
1705 	return 0;
1706 }
1707 
1708 /**
1709  * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1710  *				from gadget driver
1711  * @usbep:	Reference to the USB endpoint structure
1712  *
1713  * Return codes:
1714  *	0:		Success
1715  *	-EINVAL:
1716  */
1717 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1718 {
1719 	struct pch_udc_ep	*ep;
1720 	unsigned long	iflags;
1721 
1722 	if (!usbep)
1723 		return -EINVAL;
1724 
1725 	ep = container_of(usbep, struct pch_udc_ep, ep);
1726 	if ((usbep->name == ep0_string) || !ep->ep.desc)
1727 		return -EINVAL;
1728 
1729 	spin_lock_irqsave(&ep->dev->lock, iflags);
1730 	empty_req_queue(ep);
1731 	ep->halted = 1;
1732 	pch_udc_ep_disable(ep);
1733 	pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1734 	ep->ep.desc = NULL;
1735 	INIT_LIST_HEAD(&ep->queue);
1736 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1737 	return 0;
1738 }
1739 
1740 /**
1741  * pch_udc_alloc_request() - This function allocates request structure.
1742  *				It is called by gadget driver
1743  * @usbep:	Reference to the USB endpoint structure
1744  * @gfp:	Flag to be used while allocating memory
1745  *
1746  * Return codes:
1747  *	NULL:			Failure
1748  *	Allocated address:	Success
1749  */
1750 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1751 						  gfp_t gfp)
1752 {
1753 	struct pch_udc_request		*req;
1754 	struct pch_udc_ep		*ep;
1755 	struct pch_udc_data_dma_desc	*dma_desc;
1756 
1757 	if (!usbep)
1758 		return NULL;
1759 	ep = container_of(usbep, struct pch_udc_ep, ep);
1760 	req = kzalloc(sizeof *req, gfp);
1761 	if (!req)
1762 		return NULL;
1763 	req->req.dma = DMA_ADDR_INVALID;
1764 	req->dma = DMA_ADDR_INVALID;
1765 	INIT_LIST_HEAD(&req->queue);
1766 	if (!ep->dev->dma_addr)
1767 		return &req->req;
1768 	/* ep0 in requests are allocated from data pool here */
1769 	dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
1770 				  &req->td_data_phys);
1771 	if (NULL == dma_desc) {
1772 		kfree(req);
1773 		return NULL;
1774 	}
1775 	/* prevent from using desc. - set HOST BUSY */
1776 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
1777 	dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1778 	req->td_data = dma_desc;
1779 	req->td_data_last = dma_desc;
1780 	req->chain_len = 1;
1781 	return &req->req;
1782 }
1783 
1784 /**
1785  * pch_udc_free_request() - This function frees request structure.
1786  *				It is called by gadget driver
1787  * @usbep:	Reference to the USB endpoint structure
1788  * @usbreq:	Reference to the USB request
1789  */
1790 static void pch_udc_free_request(struct usb_ep *usbep,
1791 				  struct usb_request *usbreq)
1792 {
1793 	struct pch_udc_ep	*ep;
1794 	struct pch_udc_request	*req;
1795 	struct pch_udc_dev	*dev;
1796 
1797 	if (!usbep || !usbreq)
1798 		return;
1799 	ep = container_of(usbep, struct pch_udc_ep, ep);
1800 	req = container_of(usbreq, struct pch_udc_request, req);
1801 	dev = ep->dev;
1802 	if (!list_empty(&req->queue))
1803 		dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1804 			__func__, usbep->name, req);
1805 	if (req->td_data != NULL) {
1806 		if (req->chain_len > 1)
1807 			pch_udc_free_dma_chain(ep->dev, req);
1808 		dma_pool_free(ep->dev->data_requests, req->td_data,
1809 			      req->td_data_phys);
1810 	}
1811 	kfree(req);
1812 }
1813 
1814 /**
1815  * pch_udc_pcd_queue() - This function queues a request packet. It is called
1816  *			by gadget driver
1817  * @usbep:	Reference to the USB endpoint structure
1818  * @usbreq:	Reference to the USB request
1819  * @gfp:	Flag to be used while mapping the data buffer
1820  *
1821  * Return codes:
1822  *	0:			Success
1823  *	linux error number:	Failure
1824  */
1825 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1826 								 gfp_t gfp)
1827 {
1828 	int retval = 0;
1829 	struct pch_udc_ep	*ep;
1830 	struct pch_udc_dev	*dev;
1831 	struct pch_udc_request	*req;
1832 	unsigned long	iflags;
1833 
1834 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1835 		return -EINVAL;
1836 	ep = container_of(usbep, struct pch_udc_ep, ep);
1837 	dev = ep->dev;
1838 	if (!ep->ep.desc && ep->num)
1839 		return -EINVAL;
1840 	req = container_of(usbreq, struct pch_udc_request, req);
1841 	if (!list_empty(&req->queue))
1842 		return -EINVAL;
1843 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1844 		return -ESHUTDOWN;
1845 	spin_lock_irqsave(&dev->lock, iflags);
1846 	/* map the buffer for dma */
1847 	if (usbreq->length &&
1848 	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1849 		if (!((unsigned long)(usbreq->buf) & 0x03)) {
1850 			if (ep->in)
1851 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1852 							     usbreq->buf,
1853 							     usbreq->length,
1854 							     DMA_TO_DEVICE);
1855 			else
1856 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1857 							     usbreq->buf,
1858 							     usbreq->length,
1859 							     DMA_FROM_DEVICE);
1860 		} else {
1861 			req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1862 			if (!req->buf) {
1863 				retval = -ENOMEM;
1864 				goto probe_end;
1865 			}
1866 			if (ep->in) {
1867 				memcpy(req->buf, usbreq->buf, usbreq->length);
1868 				req->dma = dma_map_single(&dev->pdev->dev,
1869 							  req->buf,
1870 							  usbreq->length,
1871 							  DMA_TO_DEVICE);
1872 			} else
1873 				req->dma = dma_map_single(&dev->pdev->dev,
1874 							  req->buf,
1875 							  usbreq->length,
1876 							  DMA_FROM_DEVICE);
1877 		}
1878 		req->dma_mapped = 1;
1879 	}
1880 	if (usbreq->length > 0) {
1881 		retval = prepare_dma(ep, req, GFP_ATOMIC);
1882 		if (retval)
1883 			goto probe_end;
1884 	}
1885 	usbreq->actual = 0;
1886 	usbreq->status = -EINPROGRESS;
1887 	req->dma_done = 0;
1888 	if (list_empty(&ep->queue) && !ep->halted) {
1889 		/* no pending transfer, so start this req */
1890 		if (!usbreq->length) {
1891 			process_zlp(ep, req);
1892 			retval = 0;
1893 			goto probe_end;
1894 		}
1895 		if (!ep->in) {
1896 			pch_udc_start_rxrequest(ep, req);
1897 		} else {
1898 			/*
1899 			* For IN trfr the descriptors will be programmed and
1900 			* P bit will be set when
1901 			* we get an IN token
1902 			*/
1903 			pch_udc_wait_ep_stall(ep);
1904 			pch_udc_ep_clear_nak(ep);
1905 			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1906 		}
1907 	}
1908 	/* Now add this request to the ep's pending requests */
1909 	if (req != NULL)
1910 		list_add_tail(&req->queue, &ep->queue);
1911 
1912 probe_end:
1913 	spin_unlock_irqrestore(&dev->lock, iflags);
1914 	return retval;
1915 }
1916 
1917 /**
1918  * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1919  *				It is called by gadget driver
1920  * @usbep:	Reference to the USB endpoint structure
1921  * @usbreq:	Reference to the USB request
1922  *
1923  * Return codes:
1924  *	0:			Success
1925  *	linux error number:	Failure
1926  */
1927 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1928 				struct usb_request *usbreq)
1929 {
1930 	struct pch_udc_ep	*ep;
1931 	struct pch_udc_request	*req;
1932 	unsigned long		flags;
1933 	int ret = -EINVAL;
1934 
1935 	ep = container_of(usbep, struct pch_udc_ep, ep);
1936 	if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1937 		return ret;
1938 	req = container_of(usbreq, struct pch_udc_request, req);
1939 	spin_lock_irqsave(&ep->dev->lock, flags);
1940 	/* make sure it's still queued on this endpoint */
1941 	list_for_each_entry(req, &ep->queue, queue) {
1942 		if (&req->req == usbreq) {
1943 			pch_udc_ep_set_nak(ep);
1944 			if (!list_empty(&req->queue))
1945 				complete_req(ep, req, -ECONNRESET);
1946 			ret = 0;
1947 			break;
1948 		}
1949 	}
1950 	spin_unlock_irqrestore(&ep->dev->lock, flags);
1951 	return ret;
1952 }
1953 
1954 /**
1955  * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1956  *			    feature
1957  * @usbep:	Reference to the USB endpoint structure
1958  * @halt:	Specifies whether to set or clear the feature
1959  *
1960  * Return codes:
1961  *	0:			Success
1962  *	linux error number:	Failure
1963  */
1964 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1965 {
1966 	struct pch_udc_ep	*ep;
1967 	unsigned long iflags;
1968 	int ret;
1969 
1970 	if (!usbep)
1971 		return -EINVAL;
1972 	ep = container_of(usbep, struct pch_udc_ep, ep);
1973 	if (!ep->ep.desc && !ep->num)
1974 		return -EINVAL;
1975 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1976 		return -ESHUTDOWN;
1977 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1978 	if (list_empty(&ep->queue)) {
1979 		if (halt) {
1980 			if (ep->num == PCH_UDC_EP0)
1981 				ep->dev->stall = 1;
1982 			pch_udc_ep_set_stall(ep);
1983 			pch_udc_enable_ep_interrupts(
1984 				ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1985 		} else {
1986 			pch_udc_ep_clear_stall(ep);
1987 		}
1988 		ret = 0;
1989 	} else {
1990 		ret = -EAGAIN;
1991 	}
1992 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1993 	return ret;
1994 }
1995 
1996 /**
1997  * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
1998  *				halt feature
1999  * @usbep:	Reference to the USB endpoint structure
2000  *
2001  * Return codes:
2002  *	0:			Success
2003  *	linux error number:	Failure
2004  */
2005 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2006 {
2007 	struct pch_udc_ep	*ep;
2008 	unsigned long iflags;
2009 	int ret;
2010 
2011 	if (!usbep)
2012 		return -EINVAL;
2013 	ep = container_of(usbep, struct pch_udc_ep, ep);
2014 	if (!ep->ep.desc && !ep->num)
2015 		return -EINVAL;
2016 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2017 		return -ESHUTDOWN;
2018 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
2019 	if (!list_empty(&ep->queue)) {
2020 		ret = -EAGAIN;
2021 	} else {
2022 		if (ep->num == PCH_UDC_EP0)
2023 			ep->dev->stall = 1;
2024 		pch_udc_ep_set_stall(ep);
2025 		pch_udc_enable_ep_interrupts(ep->dev,
2026 					     PCH_UDC_EPINT(ep->in, ep->num));
2027 		ep->dev->prot_stall = 1;
2028 		ret = 0;
2029 	}
2030 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2031 	return ret;
2032 }
2033 
2034 /**
2035  * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2036  * @usbep:	Reference to the USB endpoint structure
2037  */
2038 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2039 {
2040 	struct pch_udc_ep  *ep;
2041 
2042 	if (!usbep)
2043 		return;
2044 
2045 	ep = container_of(usbep, struct pch_udc_ep, ep);
2046 	if (ep->ep.desc || !ep->num)
2047 		pch_udc_ep_fifo_flush(ep, ep->in);
2048 }
2049 
2050 static const struct usb_ep_ops pch_udc_ep_ops = {
2051 	.enable		= pch_udc_pcd_ep_enable,
2052 	.disable	= pch_udc_pcd_ep_disable,
2053 	.alloc_request	= pch_udc_alloc_request,
2054 	.free_request	= pch_udc_free_request,
2055 	.queue		= pch_udc_pcd_queue,
2056 	.dequeue	= pch_udc_pcd_dequeue,
2057 	.set_halt	= pch_udc_pcd_set_halt,
2058 	.set_wedge	= pch_udc_pcd_set_wedge,
2059 	.fifo_status	= NULL,
2060 	.fifo_flush	= pch_udc_pcd_fifo_flush,
2061 };
2062 
2063 /**
2064  * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2065  * @td_stp:	Reference to the SETP buffer structure
2066  */
2067 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2068 {
2069 	static u32	pky_marker;
2070 
2071 	if (!td_stp)
2072 		return;
2073 	td_stp->reserved = ++pky_marker;
2074 	memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2075 	td_stp->status = PCH_UDC_BS_HST_RDY;
2076 }
2077 
2078 /**
2079  * pch_udc_start_next_txrequest() - This function starts
2080  *					the next transmission requirement
2081  * @ep:	Reference to the endpoint structure
2082  */
2083 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2084 {
2085 	struct pch_udc_request *req;
2086 	struct pch_udc_data_dma_desc *td_data;
2087 
2088 	if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2089 		return;
2090 
2091 	if (list_empty(&ep->queue))
2092 		return;
2093 
2094 	/* next request */
2095 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2096 	if (req->dma_going)
2097 		return;
2098 	if (!req->td_data)
2099 		return;
2100 	pch_udc_wait_ep_stall(ep);
2101 	req->dma_going = 1;
2102 	pch_udc_ep_set_ddptr(ep, 0);
2103 	td_data = req->td_data;
2104 	while (1) {
2105 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2106 				   PCH_UDC_BS_HST_RDY;
2107 		if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2108 			break;
2109 		td_data = phys_to_virt(td_data->next);
2110 	}
2111 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2112 	pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2113 	pch_udc_ep_set_pd(ep);
2114 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2115 	pch_udc_ep_clear_nak(ep);
2116 }
2117 
2118 /**
2119  * pch_udc_complete_transfer() - This function completes a transfer
2120  * @ep:		Reference to the endpoint structure
2121  */
2122 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2123 {
2124 	struct pch_udc_request *req;
2125 	struct pch_udc_dev *dev = ep->dev;
2126 
2127 	if (list_empty(&ep->queue))
2128 		return;
2129 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2130 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2131 	    PCH_UDC_BS_DMA_DONE)
2132 		return;
2133 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2134 	     PCH_UDC_RTS_SUCC) {
2135 		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2136 			"epstatus=0x%08x\n",
2137 		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
2138 		       (int)(ep->epsts));
2139 		return;
2140 	}
2141 
2142 	req->req.actual = req->req.length;
2143 	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2144 	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2145 	complete_req(ep, req, 0);
2146 	req->dma_going = 0;
2147 	if (!list_empty(&ep->queue)) {
2148 		pch_udc_wait_ep_stall(ep);
2149 		pch_udc_ep_clear_nak(ep);
2150 		pch_udc_enable_ep_interrupts(ep->dev,
2151 					     PCH_UDC_EPINT(ep->in, ep->num));
2152 	} else {
2153 		pch_udc_disable_ep_interrupts(ep->dev,
2154 					      PCH_UDC_EPINT(ep->in, ep->num));
2155 	}
2156 }
2157 
2158 /**
2159  * pch_udc_complete_receiver() - This function completes a receiver
2160  * @ep:		Reference to the endpoint structure
2161  */
2162 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2163 {
2164 	struct pch_udc_request *req;
2165 	struct pch_udc_dev *dev = ep->dev;
2166 	unsigned int count;
2167 	struct pch_udc_data_dma_desc *td;
2168 	dma_addr_t addr;
2169 
2170 	if (list_empty(&ep->queue))
2171 		return;
2172 	/* next request */
2173 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2174 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2175 	pch_udc_ep_set_ddptr(ep, 0);
2176 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2177 	    PCH_UDC_BS_DMA_DONE)
2178 		td = req->td_data_last;
2179 	else
2180 		td = req->td_data;
2181 
2182 	while (1) {
2183 		if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2184 			dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2185 				"epstatus=0x%08x\n",
2186 				(req->td_data->status & PCH_UDC_RXTX_STS),
2187 				(int)(ep->epsts));
2188 			return;
2189 		}
2190 		if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2191 			if (td->status & PCH_UDC_DMA_LAST) {
2192 				count = td->status & PCH_UDC_RXTX_BYTES;
2193 				break;
2194 			}
2195 		if (td == req->td_data_last) {
2196 			dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2197 			return;
2198 		}
2199 		addr = (dma_addr_t)td->next;
2200 		td = phys_to_virt(addr);
2201 	}
2202 	/* on 64k packets the RXBYTES field is zero */
2203 	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2204 		count = UDC_DMA_MAXPACKET;
2205 	req->td_data->status |= PCH_UDC_DMA_LAST;
2206 	td->status |= PCH_UDC_BS_HST_BSY;
2207 
2208 	req->dma_going = 0;
2209 	req->req.actual = count;
2210 	complete_req(ep, req, 0);
2211 	/* If there is a new/failed requests try that now */
2212 	if (!list_empty(&ep->queue)) {
2213 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2214 		pch_udc_start_rxrequest(ep, req);
2215 	}
2216 }
2217 
2218 /**
2219  * pch_udc_svc_data_in() - This function process endpoint interrupts
2220  *				for IN endpoints
2221  * @dev:	Reference to the device structure
2222  * @ep_num:	Endpoint that generated the interrupt
2223  */
2224 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2225 {
2226 	u32	epsts;
2227 	struct pch_udc_ep	*ep;
2228 
2229 	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2230 	epsts = ep->epsts;
2231 	ep->epsts = 0;
2232 
2233 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA  | UDC_EPSTS_HE |
2234 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2235 		       UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2236 		return;
2237 	if ((epsts & UDC_EPSTS_BNA))
2238 		return;
2239 	if (epsts & UDC_EPSTS_HE)
2240 		return;
2241 	if (epsts & UDC_EPSTS_RSS) {
2242 		pch_udc_ep_set_stall(ep);
2243 		pch_udc_enable_ep_interrupts(ep->dev,
2244 					     PCH_UDC_EPINT(ep->in, ep->num));
2245 	}
2246 	if (epsts & UDC_EPSTS_RCS) {
2247 		if (!dev->prot_stall) {
2248 			pch_udc_ep_clear_stall(ep);
2249 		} else {
2250 			pch_udc_ep_set_stall(ep);
2251 			pch_udc_enable_ep_interrupts(ep->dev,
2252 						PCH_UDC_EPINT(ep->in, ep->num));
2253 		}
2254 	}
2255 	if (epsts & UDC_EPSTS_TDC)
2256 		pch_udc_complete_transfer(ep);
2257 	/* On IN interrupt, provide data if we have any */
2258 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2259 	    !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2260 		pch_udc_start_next_txrequest(ep);
2261 }
2262 
2263 /**
2264  * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2265  * @dev:	Reference to the device structure
2266  * @ep_num:	Endpoint that generated the interrupt
2267  */
2268 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2269 {
2270 	u32			epsts;
2271 	struct pch_udc_ep		*ep;
2272 	struct pch_udc_request		*req = NULL;
2273 
2274 	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2275 	epsts = ep->epsts;
2276 	ep->epsts = 0;
2277 
2278 	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2279 		/* next request */
2280 		req = list_entry(ep->queue.next, struct pch_udc_request,
2281 				 queue);
2282 		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2283 		     PCH_UDC_BS_DMA_DONE) {
2284 			if (!req->dma_going)
2285 				pch_udc_start_rxrequest(ep, req);
2286 			return;
2287 		}
2288 	}
2289 	if (epsts & UDC_EPSTS_HE)
2290 		return;
2291 	if (epsts & UDC_EPSTS_RSS) {
2292 		pch_udc_ep_set_stall(ep);
2293 		pch_udc_enable_ep_interrupts(ep->dev,
2294 					     PCH_UDC_EPINT(ep->in, ep->num));
2295 	}
2296 	if (epsts & UDC_EPSTS_RCS) {
2297 		if (!dev->prot_stall) {
2298 			pch_udc_ep_clear_stall(ep);
2299 		} else {
2300 			pch_udc_ep_set_stall(ep);
2301 			pch_udc_enable_ep_interrupts(ep->dev,
2302 						PCH_UDC_EPINT(ep->in, ep->num));
2303 		}
2304 	}
2305 	if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2306 	    UDC_EPSTS_OUT_DATA) {
2307 		if (ep->dev->prot_stall == 1) {
2308 			pch_udc_ep_set_stall(ep);
2309 			pch_udc_enable_ep_interrupts(ep->dev,
2310 						PCH_UDC_EPINT(ep->in, ep->num));
2311 		} else {
2312 			pch_udc_complete_receiver(ep);
2313 		}
2314 	}
2315 	if (list_empty(&ep->queue))
2316 		pch_udc_set_dma(dev, DMA_DIR_RX);
2317 }
2318 
2319 /**
2320  * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2321  * @dev:	Reference to the device structure
2322  */
2323 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2324 {
2325 	u32	epsts;
2326 	struct pch_udc_ep	*ep;
2327 	struct pch_udc_ep	*ep_out;
2328 
2329 	ep = &dev->ep[UDC_EP0IN_IDX];
2330 	ep_out = &dev->ep[UDC_EP0OUT_IDX];
2331 	epsts = ep->epsts;
2332 	ep->epsts = 0;
2333 
2334 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2335 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2336 		       UDC_EPSTS_XFERDONE)))
2337 		return;
2338 	if ((epsts & UDC_EPSTS_BNA))
2339 		return;
2340 	if (epsts & UDC_EPSTS_HE)
2341 		return;
2342 	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2343 		pch_udc_complete_transfer(ep);
2344 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2345 		ep_out->td_data->status = (ep_out->td_data->status &
2346 					~PCH_UDC_BUFF_STS) |
2347 					PCH_UDC_BS_HST_RDY;
2348 		pch_udc_ep_clear_nak(ep_out);
2349 		pch_udc_set_dma(dev, DMA_DIR_RX);
2350 		pch_udc_ep_set_rrdy(ep_out);
2351 	}
2352 	/* On IN interrupt, provide data if we have any */
2353 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2354 	     !(epsts & UDC_EPSTS_TXEMPTY))
2355 		pch_udc_start_next_txrequest(ep);
2356 }
2357 
2358 /**
2359  * pch_udc_svc_control_out() - Routine that handle Control
2360  *					OUT endpoint interrupts
2361  * @dev:	Reference to the device structure
2362  */
2363 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2364 	__releases(&dev->lock)
2365 	__acquires(&dev->lock)
2366 {
2367 	u32	stat;
2368 	int setup_supported;
2369 	struct pch_udc_ep	*ep;
2370 
2371 	ep = &dev->ep[UDC_EP0OUT_IDX];
2372 	stat = ep->epsts;
2373 	ep->epsts = 0;
2374 
2375 	/* If setup data */
2376 	if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2377 	    UDC_EPSTS_OUT_SETUP) {
2378 		dev->stall = 0;
2379 		dev->ep[UDC_EP0IN_IDX].halted = 0;
2380 		dev->ep[UDC_EP0OUT_IDX].halted = 0;
2381 		dev->setup_data = ep->td_stp->request;
2382 		pch_udc_init_setup_buff(ep->td_stp);
2383 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2384 		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2385 				      dev->ep[UDC_EP0IN_IDX].in);
2386 		if ((dev->setup_data.bRequestType & USB_DIR_IN))
2387 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2388 		else /* OUT */
2389 			dev->gadget.ep0 = &ep->ep;
2390 		spin_lock(&dev->lock);
2391 		/* If Mass storage Reset */
2392 		if ((dev->setup_data.bRequestType == 0x21) &&
2393 		    (dev->setup_data.bRequest == 0xFF))
2394 			dev->prot_stall = 0;
2395 		/* call gadget with setup data received */
2396 		setup_supported = dev->driver->setup(&dev->gadget,
2397 						     &dev->setup_data);
2398 		spin_unlock(&dev->lock);
2399 
2400 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
2401 			ep->td_data->status = (ep->td_data->status &
2402 						~PCH_UDC_BUFF_STS) |
2403 						PCH_UDC_BS_HST_RDY;
2404 			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2405 		}
2406 		/* ep0 in returns data on IN phase */
2407 		if (setup_supported >= 0 && setup_supported <
2408 					    UDC_EP0IN_MAX_PKT_SIZE) {
2409 			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2410 			/* Gadget would have queued a request when
2411 			 * we called the setup */
2412 			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2413 				pch_udc_set_dma(dev, DMA_DIR_RX);
2414 				pch_udc_ep_clear_nak(ep);
2415 			}
2416 		} else if (setup_supported < 0) {
2417 			/* if unsupported request, then stall */
2418 			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2419 			pch_udc_enable_ep_interrupts(ep->dev,
2420 						PCH_UDC_EPINT(ep->in, ep->num));
2421 			dev->stall = 0;
2422 			pch_udc_set_dma(dev, DMA_DIR_RX);
2423 		} else {
2424 			dev->waiting_zlp_ack = 1;
2425 		}
2426 	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2427 		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
2428 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2429 		pch_udc_ep_set_ddptr(ep, 0);
2430 		if (!list_empty(&ep->queue)) {
2431 			ep->epsts = stat;
2432 			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2433 		}
2434 		pch_udc_set_dma(dev, DMA_DIR_RX);
2435 	}
2436 	pch_udc_ep_set_rrdy(ep);
2437 }
2438 
2439 
2440 /**
2441  * pch_udc_postsvc_epinters() - This function enables end point interrupts
2442  *				and clears NAK status
2443  * @dev:	Reference to the device structure
2444  * @ep_num:	End point number
2445  */
2446 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2447 {
2448 	struct pch_udc_ep	*ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2449 	if (list_empty(&ep->queue))
2450 		return;
2451 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2452 	pch_udc_ep_clear_nak(ep);
2453 }
2454 
2455 /**
2456  * pch_udc_read_all_epstatus() - This function read all endpoint status
2457  * @dev:	Reference to the device structure
2458  * @ep_intr:	Status of endpoint interrupt
2459  */
2460 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2461 {
2462 	int i;
2463 	struct pch_udc_ep	*ep;
2464 
2465 	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2466 		/* IN */
2467 		if (ep_intr & (0x1 << i)) {
2468 			ep = &dev->ep[UDC_EPIN_IDX(i)];
2469 			ep->epsts = pch_udc_read_ep_status(ep);
2470 			pch_udc_clear_ep_status(ep, ep->epsts);
2471 		}
2472 		/* OUT */
2473 		if (ep_intr & (0x10000 << i)) {
2474 			ep = &dev->ep[UDC_EPOUT_IDX(i)];
2475 			ep->epsts = pch_udc_read_ep_status(ep);
2476 			pch_udc_clear_ep_status(ep, ep->epsts);
2477 		}
2478 	}
2479 }
2480 
2481 /**
2482  * pch_udc_activate_control_ep() - This function enables the control endpoints
2483  *					for traffic after a reset
2484  * @dev:	Reference to the device structure
2485  */
2486 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2487 {
2488 	struct pch_udc_ep	*ep;
2489 	u32 val;
2490 
2491 	/* Setup the IN endpoint */
2492 	ep = &dev->ep[UDC_EP0IN_IDX];
2493 	pch_udc_clear_ep_control(ep);
2494 	pch_udc_ep_fifo_flush(ep, ep->in);
2495 	pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2496 	pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2497 	/* Initialize the IN EP Descriptor */
2498 	ep->td_data      = NULL;
2499 	ep->td_stp       = NULL;
2500 	ep->td_data_phys = 0;
2501 	ep->td_stp_phys  = 0;
2502 
2503 	/* Setup the OUT endpoint */
2504 	ep = &dev->ep[UDC_EP0OUT_IDX];
2505 	pch_udc_clear_ep_control(ep);
2506 	pch_udc_ep_fifo_flush(ep, ep->in);
2507 	pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2508 	pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2509 	val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2510 	pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2511 
2512 	/* Initialize the SETUP buffer */
2513 	pch_udc_init_setup_buff(ep->td_stp);
2514 	/* Write the pointer address of dma descriptor */
2515 	pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2516 	/* Write the pointer address of Setup descriptor */
2517 	pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2518 
2519 	/* Initialize the dma descriptor */
2520 	ep->td_data->status  = PCH_UDC_DMA_LAST;
2521 	ep->td_data->dataptr = dev->dma_addr;
2522 	ep->td_data->next    = ep->td_data_phys;
2523 
2524 	pch_udc_ep_clear_nak(ep);
2525 }
2526 
2527 
2528 /**
2529  * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2530  * @dev:	Reference to driver structure
2531  */
2532 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2533 {
2534 	struct pch_udc_ep	*ep;
2535 	int i;
2536 
2537 	pch_udc_clear_dma(dev, DMA_DIR_TX);
2538 	pch_udc_clear_dma(dev, DMA_DIR_RX);
2539 	/* Mask all endpoint interrupts */
2540 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2541 	/* clear all endpoint interrupts */
2542 	pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2543 
2544 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2545 		ep = &dev->ep[i];
2546 		pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2547 		pch_udc_clear_ep_control(ep);
2548 		pch_udc_ep_set_ddptr(ep, 0);
2549 		pch_udc_write_csr(ep->dev, 0x00, i);
2550 	}
2551 	dev->stall = 0;
2552 	dev->prot_stall = 0;
2553 	dev->waiting_zlp_ack = 0;
2554 	dev->set_cfg_not_acked = 0;
2555 
2556 	/* disable ep to empty req queue. Skip the control EP's */
2557 	for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2558 		ep = &dev->ep[i];
2559 		pch_udc_ep_set_nak(ep);
2560 		pch_udc_ep_fifo_flush(ep, ep->in);
2561 		/* Complete request queue */
2562 		empty_req_queue(ep);
2563 	}
2564 	if (dev->driver) {
2565 		spin_unlock(&dev->lock);
2566 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2567 		spin_lock(&dev->lock);
2568 	}
2569 }
2570 
2571 /**
2572  * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2573  *				done interrupt
2574  * @dev:	Reference to driver structure
2575  */
2576 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2577 {
2578 	u32 dev_stat, dev_speed;
2579 	u32 speed = USB_SPEED_FULL;
2580 
2581 	dev_stat = pch_udc_read_device_status(dev);
2582 	dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2583 						 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2584 	switch (dev_speed) {
2585 	case UDC_DEVSTS_ENUM_SPEED_HIGH:
2586 		speed = USB_SPEED_HIGH;
2587 		break;
2588 	case  UDC_DEVSTS_ENUM_SPEED_FULL:
2589 		speed = USB_SPEED_FULL;
2590 		break;
2591 	case  UDC_DEVSTS_ENUM_SPEED_LOW:
2592 		speed = USB_SPEED_LOW;
2593 		break;
2594 	default:
2595 		BUG();
2596 	}
2597 	dev->gadget.speed = speed;
2598 	pch_udc_activate_control_ep(dev);
2599 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2600 	pch_udc_set_dma(dev, DMA_DIR_TX);
2601 	pch_udc_set_dma(dev, DMA_DIR_RX);
2602 	pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2603 
2604 	/* enable device interrupts */
2605 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2606 					UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2607 					UDC_DEVINT_SI | UDC_DEVINT_SC);
2608 }
2609 
2610 /**
2611  * pch_udc_svc_intf_interrupt() - This function handles a set interface
2612  *				  interrupt
2613  * @dev:	Reference to driver structure
2614  */
2615 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2616 {
2617 	u32 reg, dev_stat = 0;
2618 	int i;
2619 
2620 	dev_stat = pch_udc_read_device_status(dev);
2621 	dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2622 							 UDC_DEVSTS_INTF_SHIFT;
2623 	dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2624 							 UDC_DEVSTS_ALT_SHIFT;
2625 	dev->set_cfg_not_acked = 1;
2626 	/* Construct the usb request for gadget driver and inform it */
2627 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2628 	dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2629 	dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2630 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2631 	dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2632 	/* programm the Endpoint Cfg registers */
2633 	/* Only one end point cfg register */
2634 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2635 	reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2636 	      (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2637 	reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2638 	      (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2639 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2640 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2641 		/* clear stall bits */
2642 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2643 		dev->ep[i].halted = 0;
2644 	}
2645 	dev->stall = 0;
2646 	spin_unlock(&dev->lock);
2647 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2648 	spin_lock(&dev->lock);
2649 }
2650 
2651 /**
2652  * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2653  *				interrupt
2654  * @dev:	Reference to driver structure
2655  */
2656 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2657 {
2658 	int i;
2659 	u32 reg, dev_stat = 0;
2660 
2661 	dev_stat = pch_udc_read_device_status(dev);
2662 	dev->set_cfg_not_acked = 1;
2663 	dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2664 				UDC_DEVSTS_CFG_SHIFT;
2665 	/* make usb request for gadget driver */
2666 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2667 	dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2668 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2669 	/* program the NE registers */
2670 	/* Only one end point cfg register */
2671 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2672 	reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2673 	      (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2674 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2675 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2676 		/* clear stall bits */
2677 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2678 		dev->ep[i].halted = 0;
2679 	}
2680 	dev->stall = 0;
2681 
2682 	/* call gadget zero with setup data received */
2683 	spin_unlock(&dev->lock);
2684 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2685 	spin_lock(&dev->lock);
2686 }
2687 
2688 /**
2689  * pch_udc_dev_isr() - This function services device interrupts
2690  *			by invoking appropriate routines.
2691  * @dev:	Reference to the device structure
2692  * @dev_intr:	The Device interrupt status.
2693  */
2694 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2695 {
2696 	int vbus;
2697 
2698 	/* USB Reset Interrupt */
2699 	if (dev_intr & UDC_DEVINT_UR) {
2700 		pch_udc_svc_ur_interrupt(dev);
2701 		dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2702 	}
2703 	/* Enumeration Done Interrupt */
2704 	if (dev_intr & UDC_DEVINT_ENUM) {
2705 		pch_udc_svc_enum_interrupt(dev);
2706 		dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2707 	}
2708 	/* Set Interface Interrupt */
2709 	if (dev_intr & UDC_DEVINT_SI)
2710 		pch_udc_svc_intf_interrupt(dev);
2711 	/* Set Config Interrupt */
2712 	if (dev_intr & UDC_DEVINT_SC)
2713 		pch_udc_svc_cfg_interrupt(dev);
2714 	/* USB Suspend interrupt */
2715 	if (dev_intr & UDC_DEVINT_US) {
2716 		if (dev->driver
2717 			&& dev->driver->suspend) {
2718 			spin_unlock(&dev->lock);
2719 			dev->driver->suspend(&dev->gadget);
2720 			spin_lock(&dev->lock);
2721 		}
2722 
2723 		vbus = pch_vbus_gpio_get_value(dev);
2724 		if ((dev->vbus_session == 0)
2725 			&& (vbus != 1)) {
2726 			if (dev->driver && dev->driver->disconnect) {
2727 				spin_unlock(&dev->lock);
2728 				dev->driver->disconnect(&dev->gadget);
2729 				spin_lock(&dev->lock);
2730 			}
2731 			pch_udc_reconnect(dev);
2732 		} else if ((dev->vbus_session == 0)
2733 			&& (vbus == 1)
2734 			&& !dev->vbus_gpio.intr)
2735 			schedule_work(&dev->vbus_gpio.irq_work_fall);
2736 
2737 		dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2738 	}
2739 	/* Clear the SOF interrupt, if enabled */
2740 	if (dev_intr & UDC_DEVINT_SOF)
2741 		dev_dbg(&dev->pdev->dev, "SOF\n");
2742 	/* ES interrupt, IDLE > 3ms on the USB */
2743 	if (dev_intr & UDC_DEVINT_ES)
2744 		dev_dbg(&dev->pdev->dev, "ES\n");
2745 	/* RWKP interrupt */
2746 	if (dev_intr & UDC_DEVINT_RWKP)
2747 		dev_dbg(&dev->pdev->dev, "RWKP\n");
2748 }
2749 
2750 /**
2751  * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2752  * @irq:	Interrupt request number
2753  * @pdev:	Reference to the device structure
2754  */
2755 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2756 {
2757 	struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2758 	u32 dev_intr, ep_intr;
2759 	int i;
2760 
2761 	dev_intr = pch_udc_read_device_interrupts(dev);
2762 	ep_intr = pch_udc_read_ep_interrupts(dev);
2763 
2764 	/* For a hot plug, this find that the controller is hung up. */
2765 	if (dev_intr == ep_intr)
2766 		if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2767 			dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2768 			/* The controller is reset */
2769 			pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2770 			return IRQ_HANDLED;
2771 		}
2772 	if (dev_intr)
2773 		/* Clear device interrupts */
2774 		pch_udc_write_device_interrupts(dev, dev_intr);
2775 	if (ep_intr)
2776 		/* Clear ep interrupts */
2777 		pch_udc_write_ep_interrupts(dev, ep_intr);
2778 	if (!dev_intr && !ep_intr)
2779 		return IRQ_NONE;
2780 	spin_lock(&dev->lock);
2781 	if (dev_intr)
2782 		pch_udc_dev_isr(dev, dev_intr);
2783 	if (ep_intr) {
2784 		pch_udc_read_all_epstatus(dev, ep_intr);
2785 		/* Process Control In interrupts, if present */
2786 		if (ep_intr & UDC_EPINT_IN_EP0) {
2787 			pch_udc_svc_control_in(dev);
2788 			pch_udc_postsvc_epinters(dev, 0);
2789 		}
2790 		/* Process Control Out interrupts, if present */
2791 		if (ep_intr & UDC_EPINT_OUT_EP0)
2792 			pch_udc_svc_control_out(dev);
2793 		/* Process data in end point interrupts */
2794 		for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2795 			if (ep_intr & (1 <<  i)) {
2796 				pch_udc_svc_data_in(dev, i);
2797 				pch_udc_postsvc_epinters(dev, i);
2798 			}
2799 		}
2800 		/* Process data out end point interrupts */
2801 		for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2802 						 PCH_UDC_USED_EP_NUM); i++)
2803 			if (ep_intr & (1 <<  i))
2804 				pch_udc_svc_data_out(dev, i -
2805 							 UDC_EPINT_OUT_SHIFT);
2806 	}
2807 	spin_unlock(&dev->lock);
2808 	return IRQ_HANDLED;
2809 }
2810 
2811 /**
2812  * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2813  * @dev:	Reference to the device structure
2814  */
2815 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2816 {
2817 	/* enable ep0 interrupts */
2818 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2819 						UDC_EPINT_OUT_EP0);
2820 	/* enable device interrupts */
2821 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2822 				       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2823 				       UDC_DEVINT_SI | UDC_DEVINT_SC);
2824 }
2825 
2826 /**
2827  * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2828  * @dev:	Reference to the driver structure
2829  */
2830 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2831 {
2832 	const char *const ep_string[] = {
2833 		ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2834 		"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2835 		"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2836 		"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2837 		"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2838 		"ep15in", "ep15out",
2839 	};
2840 	int i;
2841 
2842 	dev->gadget.speed = USB_SPEED_UNKNOWN;
2843 	INIT_LIST_HEAD(&dev->gadget.ep_list);
2844 
2845 	/* Initialize the endpoints structures */
2846 	memset(dev->ep, 0, sizeof dev->ep);
2847 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2848 		struct pch_udc_ep *ep = &dev->ep[i];
2849 		ep->dev = dev;
2850 		ep->halted = 1;
2851 		ep->num = i / 2;
2852 		ep->in = ~i & 1;
2853 		ep->ep.name = ep_string[i];
2854 		ep->ep.ops = &pch_udc_ep_ops;
2855 		if (ep->in) {
2856 			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2857 			ep->ep.caps.dir_in = true;
2858 		} else {
2859 			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2860 					  UDC_EP_REG_SHIFT;
2861 			ep->ep.caps.dir_out = true;
2862 		}
2863 		if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2864 			ep->ep.caps.type_control = true;
2865 		} else {
2866 			ep->ep.caps.type_iso = true;
2867 			ep->ep.caps.type_bulk = true;
2868 			ep->ep.caps.type_int = true;
2869 		}
2870 		/* need to set ep->ep.maxpacket and set Default Configuration?*/
2871 		usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2872 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2873 		INIT_LIST_HEAD(&ep->queue);
2874 	}
2875 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2876 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2877 
2878 	/* remove ep0 in and out from the list.  They have own pointer */
2879 	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2880 	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2881 
2882 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2883 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2884 }
2885 
2886 /**
2887  * pch_udc_pcd_init() - This API initializes the driver structure
2888  * @dev:	Reference to the driver structure
2889  *
2890  * Return codes:
2891  *	0: Success
2892  */
2893 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2894 {
2895 	pch_udc_init(dev);
2896 	pch_udc_pcd_reinit(dev);
2897 	pch_vbus_gpio_init(dev, vbus_gpio_port);
2898 	return 0;
2899 }
2900 
2901 /**
2902  * init_dma_pools() - create dma pools during initialization
2903  * @dev:	reference to struct pci_dev
2904  */
2905 static int init_dma_pools(struct pch_udc_dev *dev)
2906 {
2907 	struct pch_udc_stp_dma_desc	*td_stp;
2908 	struct pch_udc_data_dma_desc	*td_data;
2909 	void				*ep0out_buf;
2910 
2911 	/* DMA setup */
2912 	dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
2913 		sizeof(struct pch_udc_data_dma_desc), 0, 0);
2914 	if (!dev->data_requests) {
2915 		dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2916 			__func__);
2917 		return -ENOMEM;
2918 	}
2919 
2920 	/* dma desc for setup data */
2921 	dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
2922 		sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2923 	if (!dev->stp_requests) {
2924 		dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2925 			__func__);
2926 		return -ENOMEM;
2927 	}
2928 	/* setup */
2929 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
2930 				&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2931 	if (!td_stp) {
2932 		dev_err(&dev->pdev->dev,
2933 			"%s: can't allocate setup dma descriptor\n", __func__);
2934 		return -ENOMEM;
2935 	}
2936 	dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2937 
2938 	/* data: 0 packets !? */
2939 	td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
2940 				&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2941 	if (!td_data) {
2942 		dev_err(&dev->pdev->dev,
2943 			"%s: can't allocate data dma descriptor\n", __func__);
2944 		return -ENOMEM;
2945 	}
2946 	dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2947 	dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2948 	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2949 	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2950 	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2951 
2952 	ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2953 				  GFP_KERNEL);
2954 	if (!ep0out_buf)
2955 		return -ENOMEM;
2956 	dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2957 				       UDC_EP0OUT_BUFF_SIZE * 4,
2958 				       DMA_FROM_DEVICE);
2959 	return 0;
2960 }
2961 
2962 static int pch_udc_start(struct usb_gadget *g,
2963 		struct usb_gadget_driver *driver)
2964 {
2965 	struct pch_udc_dev	*dev = to_pch_udc(g);
2966 
2967 	driver->driver.bus = NULL;
2968 	dev->driver = driver;
2969 
2970 	/* get ready for ep0 traffic */
2971 	pch_udc_setup_ep0(dev);
2972 
2973 	/* clear SD */
2974 	if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2975 		pch_udc_clear_disconnect(dev);
2976 
2977 	dev->connected = 1;
2978 	return 0;
2979 }
2980 
2981 static int pch_udc_stop(struct usb_gadget *g)
2982 {
2983 	struct pch_udc_dev	*dev = to_pch_udc(g);
2984 
2985 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2986 
2987 	/* Assures that there are no pending requests with this driver */
2988 	dev->driver = NULL;
2989 	dev->connected = 0;
2990 
2991 	/* set SD */
2992 	pch_udc_set_disconnect(dev);
2993 
2994 	return 0;
2995 }
2996 
2997 static void pch_udc_shutdown(struct pci_dev *pdev)
2998 {
2999 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3000 
3001 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3002 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3003 
3004 	/* disable the pullup so the host will think we're gone */
3005 	pch_udc_set_disconnect(dev);
3006 }
3007 
3008 static void pch_udc_remove(struct pci_dev *pdev)
3009 {
3010 	struct pch_udc_dev	*dev = pci_get_drvdata(pdev);
3011 
3012 	usb_del_gadget_udc(&dev->gadget);
3013 
3014 	/* gadget driver must not be registered */
3015 	if (dev->driver)
3016 		dev_err(&pdev->dev,
3017 			"%s: gadget driver still bound!!!\n", __func__);
3018 	/* dma pool cleanup */
3019 	dma_pool_destroy(dev->data_requests);
3020 
3021 	if (dev->stp_requests) {
3022 		/* cleanup DMA desc's for ep0in */
3023 		if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3024 			dma_pool_free(dev->stp_requests,
3025 				dev->ep[UDC_EP0OUT_IDX].td_stp,
3026 				dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3027 		}
3028 		if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3029 			dma_pool_free(dev->stp_requests,
3030 				dev->ep[UDC_EP0OUT_IDX].td_data,
3031 				dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3032 		}
3033 		dma_pool_destroy(dev->stp_requests);
3034 	}
3035 
3036 	if (dev->dma_addr)
3037 		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3038 				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3039 
3040 	pch_vbus_gpio_free(dev);
3041 
3042 	pch_udc_exit(dev);
3043 }
3044 
3045 #ifdef CONFIG_PM_SLEEP
3046 static int pch_udc_suspend(struct device *d)
3047 {
3048 	struct pch_udc_dev *dev = dev_get_drvdata(d);
3049 
3050 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3051 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3052 
3053 	return 0;
3054 }
3055 
3056 static int pch_udc_resume(struct device *d)
3057 {
3058 	return 0;
3059 }
3060 
3061 static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3062 #define PCH_UDC_PM_OPS		(&pch_udc_pm)
3063 #else
3064 #define PCH_UDC_PM_OPS		NULL
3065 #endif /* CONFIG_PM_SLEEP */
3066 
3067 static int pch_udc_probe(struct pci_dev *pdev,
3068 			  const struct pci_device_id *id)
3069 {
3070 	int			bar;
3071 	int			retval;
3072 	struct pch_udc_dev	*dev;
3073 
3074 	/* init */
3075 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3076 	if (!dev)
3077 		return -ENOMEM;
3078 
3079 	/* pci setup */
3080 	retval = pcim_enable_device(pdev);
3081 	if (retval)
3082 		return retval;
3083 
3084 	pci_set_drvdata(pdev, dev);
3085 
3086 	/* Determine BAR based on PCI ID */
3087 	if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3088 		bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3089 	else
3090 		bar = PCH_UDC_PCI_BAR;
3091 
3092 	/* PCI resource allocation */
3093 	retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3094 	if (retval)
3095 		return retval;
3096 
3097 	dev->base_addr = pcim_iomap_table(pdev)[bar];
3098 
3099 	/* initialize the hardware */
3100 	if (pch_udc_pcd_init(dev))
3101 		return -ENODEV;
3102 
3103 	pci_enable_msi(pdev);
3104 
3105 	retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3106 				  IRQF_SHARED, KBUILD_MODNAME, dev);
3107 	if (retval) {
3108 		dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3109 			pdev->irq);
3110 		goto finished;
3111 	}
3112 
3113 	pci_set_master(pdev);
3114 	pci_try_set_mwi(pdev);
3115 
3116 	/* device struct setup */
3117 	spin_lock_init(&dev->lock);
3118 	dev->pdev = pdev;
3119 	dev->gadget.ops = &pch_udc_ops;
3120 
3121 	retval = init_dma_pools(dev);
3122 	if (retval)
3123 		goto finished;
3124 
3125 	dev->gadget.name = KBUILD_MODNAME;
3126 	dev->gadget.max_speed = USB_SPEED_HIGH;
3127 
3128 	/* Put the device in disconnected state till a driver is bound */
3129 	pch_udc_set_disconnect(dev);
3130 	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3131 	if (retval)
3132 		goto finished;
3133 	return 0;
3134 
3135 finished:
3136 	pch_udc_remove(pdev);
3137 	return retval;
3138 }
3139 
3140 static const struct pci_device_id pch_udc_pcidev_id[] = {
3141 	{
3142 		PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3143 			   PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3144 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3145 		.class_mask = 0xffffffff,
3146 	},
3147 	{
3148 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3149 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3150 		.class_mask = 0xffffffff,
3151 	},
3152 	{
3153 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3154 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3155 		.class_mask = 0xffffffff,
3156 	},
3157 	{
3158 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3159 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3160 		.class_mask = 0xffffffff,
3161 	},
3162 	{ 0 },
3163 };
3164 
3165 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3166 
3167 static struct pci_driver pch_udc_driver = {
3168 	.name =	KBUILD_MODNAME,
3169 	.id_table =	pch_udc_pcidev_id,
3170 	.probe =	pch_udc_probe,
3171 	.remove =	pch_udc_remove,
3172 	.shutdown =	pch_udc_shutdown,
3173 	.driver = {
3174 		.pm = PCH_UDC_PM_OPS,
3175 	},
3176 };
3177 
3178 module_pci_driver(pch_udc_driver);
3179 
3180 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3181 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3182 MODULE_LICENSE("GPL");
3183