xref: /openbmc/linux/drivers/usb/gadget/udc/pch_udc.c (revision 55fd7e02)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/list.h>
12 #include <linux/interrupt.h>
13 #include <linux/usb/ch9.h>
14 #include <linux/usb/gadget.h>
15 #include <linux/gpio.h>
16 #include <linux/irq.h>
17 
18 /* GPIO port for VBUS detecting */
19 static int vbus_gpio_port = -1;		/* GPIO port number (-1:Not used) */
20 
21 #define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
22 #define PCH_VBUS_INTERVAL	10	/* VBUS polling interval (msec) */
23 
24 /* Address offset of Registers */
25 #define UDC_EP_REG_SHIFT	0x20	/* Offset to next EP */
26 
27 #define UDC_EPCTL_ADDR		0x00	/* Endpoint control */
28 #define UDC_EPSTS_ADDR		0x04	/* Endpoint status */
29 #define UDC_BUFIN_FRAMENUM_ADDR	0x08	/* buffer size in / frame number out */
30 #define UDC_BUFOUT_MAXPKT_ADDR	0x0C	/* buffer size out / maxpkt in */
31 #define UDC_SUBPTR_ADDR		0x10	/* setup buffer pointer */
32 #define UDC_DESPTR_ADDR		0x14	/* Data descriptor pointer */
33 #define UDC_CONFIRM_ADDR	0x18	/* Write/Read confirmation */
34 
35 #define UDC_DEVCFG_ADDR		0x400	/* Device configuration */
36 #define UDC_DEVCTL_ADDR		0x404	/* Device control */
37 #define UDC_DEVSTS_ADDR		0x408	/* Device status */
38 #define UDC_DEVIRQSTS_ADDR	0x40C	/* Device irq status */
39 #define UDC_DEVIRQMSK_ADDR	0x410	/* Device irq mask */
40 #define UDC_EPIRQSTS_ADDR	0x414	/* Endpoint irq status */
41 #define UDC_EPIRQMSK_ADDR	0x418	/* Endpoint irq mask */
42 #define UDC_DEVLPM_ADDR		0x41C	/* LPM control / status */
43 #define UDC_CSR_BUSY_ADDR	0x4f0	/* UDC_CSR_BUSY Status register */
44 #define UDC_SRST_ADDR		0x4fc	/* SOFT RESET register */
45 #define UDC_CSR_ADDR		0x500	/* USB_DEVICE endpoint register */
46 
47 /* Endpoint control register */
48 /* Bit position */
49 #define UDC_EPCTL_MRXFLUSH		(1 << 12)
50 #define UDC_EPCTL_RRDY			(1 << 9)
51 #define UDC_EPCTL_CNAK			(1 << 8)
52 #define UDC_EPCTL_SNAK			(1 << 7)
53 #define UDC_EPCTL_NAK			(1 << 6)
54 #define UDC_EPCTL_P			(1 << 3)
55 #define UDC_EPCTL_F			(1 << 1)
56 #define UDC_EPCTL_S			(1 << 0)
57 #define UDC_EPCTL_ET_SHIFT		4
58 /* Mask patern */
59 #define UDC_EPCTL_ET_MASK		0x00000030
60 /* Value for ET field */
61 #define UDC_EPCTL_ET_CONTROL		0
62 #define UDC_EPCTL_ET_ISO		1
63 #define UDC_EPCTL_ET_BULK		2
64 #define UDC_EPCTL_ET_INTERRUPT		3
65 
66 /* Endpoint status register */
67 /* Bit position */
68 #define UDC_EPSTS_XFERDONE		(1 << 27)
69 #define UDC_EPSTS_RSS			(1 << 26)
70 #define UDC_EPSTS_RCS			(1 << 25)
71 #define UDC_EPSTS_TXEMPTY		(1 << 24)
72 #define UDC_EPSTS_TDC			(1 << 10)
73 #define UDC_EPSTS_HE			(1 << 9)
74 #define UDC_EPSTS_MRXFIFO_EMP		(1 << 8)
75 #define UDC_EPSTS_BNA			(1 << 7)
76 #define UDC_EPSTS_IN			(1 << 6)
77 #define UDC_EPSTS_OUT_SHIFT		4
78 /* Mask patern */
79 #define UDC_EPSTS_OUT_MASK		0x00000030
80 #define UDC_EPSTS_ALL_CLR_MASK		0x1F0006F0
81 /* Value for OUT field */
82 #define UDC_EPSTS_OUT_SETUP		2
83 #define UDC_EPSTS_OUT_DATA		1
84 
85 /* Device configuration register */
86 /* Bit position */
87 #define UDC_DEVCFG_CSR_PRG		(1 << 17)
88 #define UDC_DEVCFG_SP			(1 << 3)
89 /* SPD Valee */
90 #define UDC_DEVCFG_SPD_HS		0x0
91 #define UDC_DEVCFG_SPD_FS		0x1
92 #define UDC_DEVCFG_SPD_LS		0x2
93 
94 /* Device control register */
95 /* Bit position */
96 #define UDC_DEVCTL_THLEN_SHIFT		24
97 #define UDC_DEVCTL_BRLEN_SHIFT		16
98 #define UDC_DEVCTL_CSR_DONE		(1 << 13)
99 #define UDC_DEVCTL_SD			(1 << 10)
100 #define UDC_DEVCTL_MODE			(1 << 9)
101 #define UDC_DEVCTL_BREN			(1 << 8)
102 #define UDC_DEVCTL_THE			(1 << 7)
103 #define UDC_DEVCTL_DU			(1 << 4)
104 #define UDC_DEVCTL_TDE			(1 << 3)
105 #define UDC_DEVCTL_RDE			(1 << 2)
106 #define UDC_DEVCTL_RES			(1 << 0)
107 
108 /* Device status register */
109 /* Bit position */
110 #define UDC_DEVSTS_TS_SHIFT		18
111 #define UDC_DEVSTS_ENUM_SPEED_SHIFT	13
112 #define UDC_DEVSTS_ALT_SHIFT		8
113 #define UDC_DEVSTS_INTF_SHIFT		4
114 #define UDC_DEVSTS_CFG_SHIFT		0
115 /* Mask patern */
116 #define UDC_DEVSTS_TS_MASK		0xfffc0000
117 #define UDC_DEVSTS_ENUM_SPEED_MASK	0x00006000
118 #define UDC_DEVSTS_ALT_MASK		0x00000f00
119 #define UDC_DEVSTS_INTF_MASK		0x000000f0
120 #define UDC_DEVSTS_CFG_MASK		0x0000000f
121 /* value for maximum speed for SPEED field */
122 #define UDC_DEVSTS_ENUM_SPEED_FULL	1
123 #define UDC_DEVSTS_ENUM_SPEED_HIGH	0
124 #define UDC_DEVSTS_ENUM_SPEED_LOW	2
125 #define UDC_DEVSTS_ENUM_SPEED_FULLX	3
126 
127 /* Device irq register */
128 /* Bit position */
129 #define UDC_DEVINT_RWKP			(1 << 7)
130 #define UDC_DEVINT_ENUM			(1 << 6)
131 #define UDC_DEVINT_SOF			(1 << 5)
132 #define UDC_DEVINT_US			(1 << 4)
133 #define UDC_DEVINT_UR			(1 << 3)
134 #define UDC_DEVINT_ES			(1 << 2)
135 #define UDC_DEVINT_SI			(1 << 1)
136 #define UDC_DEVINT_SC			(1 << 0)
137 /* Mask patern */
138 #define UDC_DEVINT_MSK			0x7f
139 
140 /* Endpoint irq register */
141 /* Bit position */
142 #define UDC_EPINT_IN_SHIFT		0
143 #define UDC_EPINT_OUT_SHIFT		16
144 #define UDC_EPINT_IN_EP0		(1 << 0)
145 #define UDC_EPINT_OUT_EP0		(1 << 16)
146 /* Mask patern */
147 #define UDC_EPINT_MSK_DISABLE_ALL	0xffffffff
148 
149 /* UDC_CSR_BUSY Status register */
150 /* Bit position */
151 #define UDC_CSR_BUSY			(1 << 0)
152 
153 /* SOFT RESET register */
154 /* Bit position */
155 #define UDC_PSRST			(1 << 1)
156 #define UDC_SRST			(1 << 0)
157 
158 /* USB_DEVICE endpoint register */
159 /* Bit position */
160 #define UDC_CSR_NE_NUM_SHIFT		0
161 #define UDC_CSR_NE_DIR_SHIFT		4
162 #define UDC_CSR_NE_TYPE_SHIFT		5
163 #define UDC_CSR_NE_CFG_SHIFT		7
164 #define UDC_CSR_NE_INTF_SHIFT		11
165 #define UDC_CSR_NE_ALT_SHIFT		15
166 #define UDC_CSR_NE_MAX_PKT_SHIFT	19
167 /* Mask patern */
168 #define UDC_CSR_NE_NUM_MASK		0x0000000f
169 #define UDC_CSR_NE_DIR_MASK		0x00000010
170 #define UDC_CSR_NE_TYPE_MASK		0x00000060
171 #define UDC_CSR_NE_CFG_MASK		0x00000780
172 #define UDC_CSR_NE_INTF_MASK		0x00007800
173 #define UDC_CSR_NE_ALT_MASK		0x00078000
174 #define UDC_CSR_NE_MAX_PKT_MASK		0x3ff80000
175 
176 #define PCH_UDC_CSR(ep)	(UDC_CSR_ADDR + ep*4)
177 #define PCH_UDC_EPINT(in, num)\
178 		(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
179 
180 /* Index of endpoint */
181 #define UDC_EP0IN_IDX		0
182 #define UDC_EP0OUT_IDX		1
183 #define UDC_EPIN_IDX(ep)	(ep * 2)
184 #define UDC_EPOUT_IDX(ep)	(ep * 2 + 1)
185 #define PCH_UDC_EP0		0
186 #define PCH_UDC_EP1		1
187 #define PCH_UDC_EP2		2
188 #define PCH_UDC_EP3		3
189 
190 /* Number of endpoint */
191 #define PCH_UDC_EP_NUM		32	/* Total number of EPs (16 IN,16 OUT) */
192 #define PCH_UDC_USED_EP_NUM	4	/* EP number of EP's really used */
193 /* Length Value */
194 #define PCH_UDC_BRLEN		0x0F	/* Burst length */
195 #define PCH_UDC_THLEN		0x1F	/* Threshold length */
196 /* Value of EP Buffer Size */
197 #define UDC_EP0IN_BUFF_SIZE	16
198 #define UDC_EPIN_BUFF_SIZE	256
199 #define UDC_EP0OUT_BUFF_SIZE	16
200 #define UDC_EPOUT_BUFF_SIZE	256
201 /* Value of EP maximum packet size */
202 #define UDC_EP0IN_MAX_PKT_SIZE	64
203 #define UDC_EP0OUT_MAX_PKT_SIZE	64
204 #define UDC_BULK_MAX_PKT_SIZE	512
205 
206 /* DMA */
207 #define DMA_DIR_RX		1	/* DMA for data receive */
208 #define DMA_DIR_TX		2	/* DMA for data transmit */
209 #define DMA_ADDR_INVALID	(~(dma_addr_t)0)
210 #define UDC_DMA_MAXPACKET	65536	/* maximum packet size for DMA */
211 
212 /**
213  * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
214  *				  for data
215  * @status:		Status quadlet
216  * @reserved:		Reserved
217  * @dataptr:		Buffer descriptor
218  * @next:		Next descriptor
219  */
220 struct pch_udc_data_dma_desc {
221 	u32 status;
222 	u32 reserved;
223 	u32 dataptr;
224 	u32 next;
225 };
226 
227 /**
228  * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
229  *				 for control data
230  * @status:	Status
231  * @reserved:	Reserved
232  * @data12:	First setup word
233  * @data34:	Second setup word
234  */
235 struct pch_udc_stp_dma_desc {
236 	u32 status;
237 	u32 reserved;
238 	struct usb_ctrlrequest request;
239 } __attribute((packed));
240 
241 /* DMA status definitions */
242 /* Buffer status */
243 #define PCH_UDC_BUFF_STS	0xC0000000
244 #define PCH_UDC_BS_HST_RDY	0x00000000
245 #define PCH_UDC_BS_DMA_BSY	0x40000000
246 #define PCH_UDC_BS_DMA_DONE	0x80000000
247 #define PCH_UDC_BS_HST_BSY	0xC0000000
248 /*  Rx/Tx Status */
249 #define PCH_UDC_RXTX_STS	0x30000000
250 #define PCH_UDC_RTS_SUCC	0x00000000
251 #define PCH_UDC_RTS_DESERR	0x10000000
252 #define PCH_UDC_RTS_BUFERR	0x30000000
253 /* Last Descriptor Indication */
254 #define PCH_UDC_DMA_LAST	0x08000000
255 /* Number of Rx/Tx Bytes Mask */
256 #define PCH_UDC_RXTX_BYTES	0x0000ffff
257 
258 /**
259  * struct pch_udc_cfg_data - Structure to hold current configuration
260  *			     and interface information
261  * @cur_cfg:	current configuration in use
262  * @cur_intf:	current interface in use
263  * @cur_alt:	current alt interface in use
264  */
265 struct pch_udc_cfg_data {
266 	u16 cur_cfg;
267 	u16 cur_intf;
268 	u16 cur_alt;
269 };
270 
271 /**
272  * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
273  * @ep:			embedded ep request
274  * @td_stp_phys:	for setup request
275  * @td_data_phys:	for data request
276  * @td_stp:		for setup request
277  * @td_data:		for data request
278  * @dev:		reference to device struct
279  * @offset_addr:	offset address of ep register
280  * @desc:		for this ep
281  * @queue:		queue for requests
282  * @num:		endpoint number
283  * @in:			endpoint is IN
284  * @halted:		endpoint halted?
285  * @epsts:		Endpoint status
286  */
287 struct pch_udc_ep {
288 	struct usb_ep			ep;
289 	dma_addr_t			td_stp_phys;
290 	dma_addr_t			td_data_phys;
291 	struct pch_udc_stp_dma_desc	*td_stp;
292 	struct pch_udc_data_dma_desc	*td_data;
293 	struct pch_udc_dev		*dev;
294 	unsigned long			offset_addr;
295 	struct list_head		queue;
296 	unsigned			num:5,
297 					in:1,
298 					halted:1;
299 	unsigned long			epsts;
300 };
301 
302 /**
303  * struct pch_vbus_gpio_data - Structure holding GPIO informaton
304  *					for detecting VBUS
305  * @port:		gpio port number
306  * @intr:		gpio interrupt number
307  * @irq_work_fall	Structure for WorkQueue
308  * @irq_work_rise	Structure for WorkQueue
309  */
310 struct pch_vbus_gpio_data {
311 	int			port;
312 	int			intr;
313 	struct work_struct	irq_work_fall;
314 	struct work_struct	irq_work_rise;
315 };
316 
317 /**
318  * struct pch_udc_dev - Structure holding complete information
319  *			of the PCH USB device
320  * @gadget:		gadget driver data
321  * @driver:		reference to gadget driver bound
322  * @pdev:		reference to the PCI device
323  * @ep:			array of endpoints
324  * @lock:		protects all state
325  * @stall:		stall requested
326  * @prot_stall:		protcol stall requested
327  * @registered:		driver registered with system
328  * @suspended:		driver in suspended state
329  * @connected:		gadget driver associated
330  * @vbus_session:	required vbus_session state
331  * @set_cfg_not_acked:	pending acknowledgement 4 setup
332  * @waiting_zlp_ack:	pending acknowledgement 4 ZLP
333  * @data_requests:	DMA pool for data requests
334  * @stp_requests:	DMA pool for setup requests
335  * @dma_addr:		DMA pool for received
336  * @setup_data:		Received setup data
337  * @base_addr:		for mapped device memory
338  * @cfg_data:		current cfg, intf, and alt in use
339  * @vbus_gpio:		GPIO informaton for detecting VBUS
340  */
341 struct pch_udc_dev {
342 	struct usb_gadget		gadget;
343 	struct usb_gadget_driver	*driver;
344 	struct pci_dev			*pdev;
345 	struct pch_udc_ep		ep[PCH_UDC_EP_NUM];
346 	spinlock_t			lock; /* protects all state */
347 	unsigned
348 			stall:1,
349 			prot_stall:1,
350 			suspended:1,
351 			connected:1,
352 			vbus_session:1,
353 			set_cfg_not_acked:1,
354 			waiting_zlp_ack:1;
355 	struct dma_pool		*data_requests;
356 	struct dma_pool		*stp_requests;
357 	dma_addr_t			dma_addr;
358 	struct usb_ctrlrequest		setup_data;
359 	void __iomem			*base_addr;
360 	struct pch_udc_cfg_data		cfg_data;
361 	struct pch_vbus_gpio_data	vbus_gpio;
362 };
363 #define to_pch_udc(g)	(container_of((g), struct pch_udc_dev, gadget))
364 
365 #define PCH_UDC_PCI_BAR_QUARK_X1000	0
366 #define PCH_UDC_PCI_BAR			1
367 
368 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC	0x0939
369 #define PCI_DEVICE_ID_INTEL_EG20T_UDC		0x8808
370 
371 #define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
372 #define PCI_DEVICE_ID_ML7831_IOH_UDC	0x8808
373 
374 static const char	ep0_string[] = "ep0in";
375 static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
376 static bool speed_fs;
377 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
378 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
379 
380 /**
381  * struct pch_udc_request - Structure holding a PCH USB device request packet
382  * @req:		embedded ep request
383  * @td_data_phys:	phys. address
384  * @td_data:		first dma desc. of chain
385  * @td_data_last:	last dma desc. of chain
386  * @queue:		associated queue
387  * @dma_going:		DMA in progress for request
388  * @dma_mapped:		DMA memory mapped for request
389  * @dma_done:		DMA completed for request
390  * @chain_len:		chain length
391  * @buf:		Buffer memory for align adjustment
392  * @dma:		DMA memory for align adjustment
393  */
394 struct pch_udc_request {
395 	struct usb_request		req;
396 	dma_addr_t			td_data_phys;
397 	struct pch_udc_data_dma_desc	*td_data;
398 	struct pch_udc_data_dma_desc	*td_data_last;
399 	struct list_head		queue;
400 	unsigned			dma_going:1,
401 					dma_mapped:1,
402 					dma_done:1;
403 	unsigned			chain_len;
404 	void				*buf;
405 	dma_addr_t			dma;
406 };
407 
408 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
409 {
410 	return ioread32(dev->base_addr + reg);
411 }
412 
413 static inline void pch_udc_writel(struct pch_udc_dev *dev,
414 				    unsigned long val, unsigned long reg)
415 {
416 	iowrite32(val, dev->base_addr + reg);
417 }
418 
419 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
420 				     unsigned long reg,
421 				     unsigned long bitmask)
422 {
423 	pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
424 }
425 
426 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
427 				     unsigned long reg,
428 				     unsigned long bitmask)
429 {
430 	pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
431 }
432 
433 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
434 {
435 	return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
436 }
437 
438 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
439 				    unsigned long val, unsigned long reg)
440 {
441 	iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
442 }
443 
444 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
445 				     unsigned long reg,
446 				     unsigned long bitmask)
447 {
448 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
449 }
450 
451 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
452 				     unsigned long reg,
453 				     unsigned long bitmask)
454 {
455 	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
456 }
457 
458 /**
459  * pch_udc_csr_busy() - Wait till idle.
460  * @dev:	Reference to pch_udc_dev structure
461  */
462 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
463 {
464 	unsigned int count = 200;
465 
466 	/* Wait till idle */
467 	while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
468 		&& --count)
469 		cpu_relax();
470 	if (!count)
471 		dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
472 }
473 
474 /**
475  * pch_udc_write_csr() - Write the command and status registers.
476  * @dev:	Reference to pch_udc_dev structure
477  * @val:	value to be written to CSR register
478  * @addr:	address of CSR register
479  */
480 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
481 			       unsigned int ep)
482 {
483 	unsigned long reg = PCH_UDC_CSR(ep);
484 
485 	pch_udc_csr_busy(dev);		/* Wait till idle */
486 	pch_udc_writel(dev, val, reg);
487 	pch_udc_csr_busy(dev);		/* Wait till idle */
488 }
489 
490 /**
491  * pch_udc_read_csr() - Read the command and status registers.
492  * @dev:	Reference to pch_udc_dev structure
493  * @addr:	address of CSR register
494  *
495  * Return codes:	content of CSR register
496  */
497 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
498 {
499 	unsigned long reg = PCH_UDC_CSR(ep);
500 
501 	pch_udc_csr_busy(dev);		/* Wait till idle */
502 	pch_udc_readl(dev, reg);	/* Dummy read */
503 	pch_udc_csr_busy(dev);		/* Wait till idle */
504 	return pch_udc_readl(dev, reg);
505 }
506 
507 /**
508  * pch_udc_rmt_wakeup() - Initiate for remote wakeup
509  * @dev:	Reference to pch_udc_dev structure
510  */
511 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
512 {
513 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
514 	mdelay(1);
515 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
516 }
517 
518 /**
519  * pch_udc_get_frame() - Get the current frame from device status register
520  * @dev:	Reference to pch_udc_dev structure
521  * Retern	current frame
522  */
523 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
524 {
525 	u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
526 	return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
527 }
528 
529 /**
530  * pch_udc_clear_selfpowered() - Clear the self power control
531  * @dev:	Reference to pch_udc_regs structure
532  */
533 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
534 {
535 	pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
536 }
537 
538 /**
539  * pch_udc_set_selfpowered() - Set the self power control
540  * @dev:	Reference to pch_udc_regs structure
541  */
542 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
543 {
544 	pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
545 }
546 
547 /**
548  * pch_udc_set_disconnect() - Set the disconnect status.
549  * @dev:	Reference to pch_udc_regs structure
550  */
551 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
552 {
553 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
554 }
555 
556 /**
557  * pch_udc_clear_disconnect() - Clear the disconnect status.
558  * @dev:	Reference to pch_udc_regs structure
559  */
560 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
561 {
562 	/* Clear the disconnect */
563 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
564 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
565 	mdelay(1);
566 	/* Resume USB signalling */
567 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
568 }
569 
570 /**
571  * pch_udc_reconnect() - This API initializes usb device controller,
572  *						and clear the disconnect status.
573  * @dev:		Reference to pch_udc_regs structure
574  */
575 static void pch_udc_init(struct pch_udc_dev *dev);
576 static void pch_udc_reconnect(struct pch_udc_dev *dev)
577 {
578 	pch_udc_init(dev);
579 
580 	/* enable device interrupts */
581 	/* pch_udc_enable_interrupts() */
582 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
583 			UDC_DEVINT_UR | UDC_DEVINT_ENUM);
584 
585 	/* Clear the disconnect */
586 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
587 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
588 	mdelay(1);
589 	/* Resume USB signalling */
590 	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
591 }
592 
593 /**
594  * pch_udc_vbus_session() - set or clearr the disconnect status.
595  * @dev:	Reference to pch_udc_regs structure
596  * @is_active:	Parameter specifying the action
597  *		  0:   indicating VBUS power is ending
598  *		  !0:  indicating VBUS power is starting
599  */
600 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
601 					  int is_active)
602 {
603 	if (is_active) {
604 		pch_udc_reconnect(dev);
605 		dev->vbus_session = 1;
606 	} else {
607 		if (dev->driver && dev->driver->disconnect) {
608 			spin_lock(&dev->lock);
609 			dev->driver->disconnect(&dev->gadget);
610 			spin_unlock(&dev->lock);
611 		}
612 		pch_udc_set_disconnect(dev);
613 		dev->vbus_session = 0;
614 	}
615 }
616 
617 /**
618  * pch_udc_ep_set_stall() - Set the stall of endpoint
619  * @ep:		Reference to structure of type pch_udc_ep_regs
620  */
621 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
622 {
623 	if (ep->in) {
624 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
625 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
626 	} else {
627 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
628 	}
629 }
630 
631 /**
632  * pch_udc_ep_clear_stall() - Clear the stall of endpoint
633  * @ep:		Reference to structure of type pch_udc_ep_regs
634  */
635 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
636 {
637 	/* Clear the stall */
638 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
639 	/* Clear NAK by writing CNAK */
640 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
641 }
642 
643 /**
644  * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
645  * @ep:		Reference to structure of type pch_udc_ep_regs
646  * @type:	Type of endpoint
647  */
648 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
649 					u8 type)
650 {
651 	pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
652 				UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
653 }
654 
655 /**
656  * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
657  * @ep:		Reference to structure of type pch_udc_ep_regs
658  * @buf_size:	The buffer word size
659  */
660 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
661 						 u32 buf_size, u32 ep_in)
662 {
663 	u32 data;
664 	if (ep_in) {
665 		data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
666 		data = (data & 0xffff0000) | (buf_size & 0xffff);
667 		pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
668 	} else {
669 		data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
670 		data = (buf_size << 16) | (data & 0xffff);
671 		pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
672 	}
673 }
674 
675 /**
676  * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
677  * @ep:		Reference to structure of type pch_udc_ep_regs
678  * @pkt_size:	The packet byte size
679  */
680 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
681 {
682 	u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
683 	data = (data & 0xffff0000) | (pkt_size & 0xffff);
684 	pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
685 }
686 
687 /**
688  * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
689  * @ep:		Reference to structure of type pch_udc_ep_regs
690  * @addr:	Address of the register
691  */
692 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
693 {
694 	pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
695 }
696 
697 /**
698  * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
699  * @ep:		Reference to structure of type pch_udc_ep_regs
700  * @addr:	Address of the register
701  */
702 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
703 {
704 	pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
705 }
706 
707 /**
708  * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
709  * @ep:		Reference to structure of type pch_udc_ep_regs
710  */
711 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
712 {
713 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
714 }
715 
716 /**
717  * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
718  * @ep:		Reference to structure of type pch_udc_ep_regs
719  */
720 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
721 {
722 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
723 }
724 
725 /**
726  * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
727  * @ep:		Reference to structure of type pch_udc_ep_regs
728  */
729 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
730 {
731 	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
732 }
733 
734 /**
735  * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
736  *			register depending on the direction specified
737  * @dev:	Reference to structure of type pch_udc_regs
738  * @dir:	whether Tx or Rx
739  *		  DMA_DIR_RX: Receive
740  *		  DMA_DIR_TX: Transmit
741  */
742 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
743 {
744 	if (dir == DMA_DIR_RX)
745 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
746 	else if (dir == DMA_DIR_TX)
747 		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
748 }
749 
750 /**
751  * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
752  *				 register depending on the direction specified
753  * @dev:	Reference to structure of type pch_udc_regs
754  * @dir:	Whether Tx or Rx
755  *		  DMA_DIR_RX: Receive
756  *		  DMA_DIR_TX: Transmit
757  */
758 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
759 {
760 	if (dir == DMA_DIR_RX)
761 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
762 	else if (dir == DMA_DIR_TX)
763 		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
764 }
765 
766 /**
767  * pch_udc_set_csr_done() - Set the device control register
768  *				CSR done field (bit 13)
769  * @dev:	reference to structure of type pch_udc_regs
770  */
771 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
772 {
773 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
774 }
775 
776 /**
777  * pch_udc_disable_interrupts() - Disables the specified interrupts
778  * @dev:	Reference to structure of type pch_udc_regs
779  * @mask:	Mask to disable interrupts
780  */
781 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
782 					    u32 mask)
783 {
784 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
785 }
786 
787 /**
788  * pch_udc_enable_interrupts() - Enable the specified interrupts
789  * @dev:	Reference to structure of type pch_udc_regs
790  * @mask:	Mask to enable interrupts
791  */
792 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
793 					   u32 mask)
794 {
795 	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
796 }
797 
798 /**
799  * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
800  * @dev:	Reference to structure of type pch_udc_regs
801  * @mask:	Mask to disable interrupts
802  */
803 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
804 						u32 mask)
805 {
806 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
807 }
808 
809 /**
810  * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
811  * @dev:	Reference to structure of type pch_udc_regs
812  * @mask:	Mask to enable interrupts
813  */
814 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
815 					      u32 mask)
816 {
817 	pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
818 }
819 
820 /**
821  * pch_udc_read_device_interrupts() - Read the device interrupts
822  * @dev:	Reference to structure of type pch_udc_regs
823  * Retern	The device interrupts
824  */
825 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
826 {
827 	return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
828 }
829 
830 /**
831  * pch_udc_write_device_interrupts() - Write device interrupts
832  * @dev:	Reference to structure of type pch_udc_regs
833  * @val:	The value to be written to interrupt register
834  */
835 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
836 						     u32 val)
837 {
838 	pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
839 }
840 
841 /**
842  * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
843  * @dev:	Reference to structure of type pch_udc_regs
844  * Retern	The endpoint interrupt
845  */
846 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
847 {
848 	return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
849 }
850 
851 /**
852  * pch_udc_write_ep_interrupts() - Clear endpoint interupts
853  * @dev:	Reference to structure of type pch_udc_regs
854  * @val:	The value to be written to interrupt register
855  */
856 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
857 					     u32 val)
858 {
859 	pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
860 }
861 
862 /**
863  * pch_udc_read_device_status() - Read the device status
864  * @dev:	Reference to structure of type pch_udc_regs
865  * Retern	The device status
866  */
867 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
868 {
869 	return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
870 }
871 
872 /**
873  * pch_udc_read_ep_control() - Read the endpoint control
874  * @ep:		Reference to structure of type pch_udc_ep_regs
875  * Retern	The endpoint control register value
876  */
877 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
878 {
879 	return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
880 }
881 
882 /**
883  * pch_udc_clear_ep_control() - Clear the endpoint control register
884  * @ep:		Reference to structure of type pch_udc_ep_regs
885  * Retern	The endpoint control register value
886  */
887 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
888 {
889 	return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
890 }
891 
892 /**
893  * pch_udc_read_ep_status() - Read the endpoint status
894  * @ep:		Reference to structure of type pch_udc_ep_regs
895  * Retern	The endpoint status
896  */
897 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
898 {
899 	return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
900 }
901 
902 /**
903  * pch_udc_clear_ep_status() - Clear the endpoint status
904  * @ep:		Reference to structure of type pch_udc_ep_regs
905  * @stat:	Endpoint status
906  */
907 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
908 					 u32 stat)
909 {
910 	return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
911 }
912 
913 /**
914  * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
915  *				of the endpoint control register
916  * @ep:		Reference to structure of type pch_udc_ep_regs
917  */
918 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
919 {
920 	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
921 }
922 
923 /**
924  * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
925  *				of the endpoint control register
926  * @ep:		reference to structure of type pch_udc_ep_regs
927  */
928 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
929 {
930 	unsigned int loopcnt = 0;
931 	struct pch_udc_dev *dev = ep->dev;
932 
933 	if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
934 		return;
935 	if (!ep->in) {
936 		loopcnt = 10000;
937 		while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
938 			--loopcnt)
939 			udelay(5);
940 		if (!loopcnt)
941 			dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
942 				__func__);
943 	}
944 	loopcnt = 10000;
945 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
946 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
947 		udelay(5);
948 	}
949 	if (!loopcnt)
950 		dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
951 			__func__, ep->num, (ep->in ? "in" : "out"));
952 }
953 
954 /**
955  * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
956  * @ep:	reference to structure of type pch_udc_ep_regs
957  * @dir:	direction of endpoint
958  *		  0:  endpoint is OUT
959  *		  !0: endpoint is IN
960  */
961 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
962 {
963 	if (dir) {	/* IN ep */
964 		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
965 		return;
966 	}
967 }
968 
969 /**
970  * pch_udc_ep_enable() - This api enables endpoint
971  * @regs:	Reference to structure pch_udc_ep_regs
972  * @desc:	endpoint descriptor
973  */
974 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
975 			       struct pch_udc_cfg_data *cfg,
976 			       const struct usb_endpoint_descriptor *desc)
977 {
978 	u32 val = 0;
979 	u32 buff_size = 0;
980 
981 	pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
982 	if (ep->in)
983 		buff_size = UDC_EPIN_BUFF_SIZE;
984 	else
985 		buff_size = UDC_EPOUT_BUFF_SIZE;
986 	pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
987 	pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
988 	pch_udc_ep_set_nak(ep);
989 	pch_udc_ep_fifo_flush(ep, ep->in);
990 	/* Configure the endpoint */
991 	val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
992 	      ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
993 		UDC_CSR_NE_TYPE_SHIFT) |
994 	      (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
995 	      (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
996 	      (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
997 	      usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
998 
999 	if (ep->in)
1000 		pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1001 	else
1002 		pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1003 }
1004 
1005 /**
1006  * pch_udc_ep_disable() - This api disables endpoint
1007  * @regs:	Reference to structure pch_udc_ep_regs
1008  */
1009 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1010 {
1011 	if (ep->in) {
1012 		/* flush the fifo */
1013 		pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1014 		/* set NAK */
1015 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1016 		pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1017 	} else {
1018 		/* set NAK */
1019 		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1020 	}
1021 	/* reset desc pointer */
1022 	pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1023 }
1024 
1025 /**
1026  * pch_udc_wait_ep_stall() - Wait EP stall.
1027  * @dev:	Reference to pch_udc_dev structure
1028  */
1029 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1030 {
1031 	unsigned int count = 10000;
1032 
1033 	/* Wait till idle */
1034 	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1035 		udelay(5);
1036 	if (!count)
1037 		dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1038 }
1039 
1040 /**
1041  * pch_udc_init() - This API initializes usb device controller
1042  * @dev:	Rreference to pch_udc_regs structure
1043  */
1044 static void pch_udc_init(struct pch_udc_dev *dev)
1045 {
1046 	if (NULL == dev) {
1047 		pr_err("%s: Invalid address\n", __func__);
1048 		return;
1049 	}
1050 	/* Soft Reset and Reset PHY */
1051 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1052 	pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1053 	mdelay(1);
1054 	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1055 	pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1056 	mdelay(1);
1057 	/* mask and clear all device interrupts */
1058 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1059 	pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1060 
1061 	/* mask and clear all ep interrupts */
1062 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1063 	pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1064 
1065 	/* enable dynamic CSR programmingi, self powered and device speed */
1066 	if (speed_fs)
1067 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1068 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1069 	else /* defaul high speed */
1070 		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1071 				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1072 	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1073 			(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1074 			(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1075 			UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1076 			UDC_DEVCTL_THE);
1077 }
1078 
1079 /**
1080  * pch_udc_exit() - This API exit usb device controller
1081  * @dev:	Reference to pch_udc_regs structure
1082  */
1083 static void pch_udc_exit(struct pch_udc_dev *dev)
1084 {
1085 	/* mask all device interrupts */
1086 	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1087 	/* mask all ep interrupts */
1088 	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1089 	/* put device in disconnected state */
1090 	pch_udc_set_disconnect(dev);
1091 }
1092 
1093 /**
1094  * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1095  * @gadget:	Reference to the gadget driver
1096  *
1097  * Return codes:
1098  *	0:		Success
1099  *	-EINVAL:	If the gadget passed is NULL
1100  */
1101 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1102 {
1103 	struct pch_udc_dev	*dev;
1104 
1105 	if (!gadget)
1106 		return -EINVAL;
1107 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1108 	return pch_udc_get_frame(dev);
1109 }
1110 
1111 /**
1112  * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1113  * @gadget:	Reference to the gadget driver
1114  *
1115  * Return codes:
1116  *	0:		Success
1117  *	-EINVAL:	If the gadget passed is NULL
1118  */
1119 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1120 {
1121 	struct pch_udc_dev	*dev;
1122 	unsigned long		flags;
1123 
1124 	if (!gadget)
1125 		return -EINVAL;
1126 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1127 	spin_lock_irqsave(&dev->lock, flags);
1128 	pch_udc_rmt_wakeup(dev);
1129 	spin_unlock_irqrestore(&dev->lock, flags);
1130 	return 0;
1131 }
1132 
1133 /**
1134  * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1135  *				is self powered or not
1136  * @gadget:	Reference to the gadget driver
1137  * @value:	Specifies self powered or not
1138  *
1139  * Return codes:
1140  *	0:		Success
1141  *	-EINVAL:	If the gadget passed is NULL
1142  */
1143 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1144 {
1145 	struct pch_udc_dev	*dev;
1146 
1147 	if (!gadget)
1148 		return -EINVAL;
1149 	gadget->is_selfpowered = (value != 0);
1150 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1151 	if (value)
1152 		pch_udc_set_selfpowered(dev);
1153 	else
1154 		pch_udc_clear_selfpowered(dev);
1155 	return 0;
1156 }
1157 
1158 /**
1159  * pch_udc_pcd_pullup() - This API is invoked to make the device
1160  *				visible/invisible to the host
1161  * @gadget:	Reference to the gadget driver
1162  * @is_on:	Specifies whether the pull up is made active or inactive
1163  *
1164  * Return codes:
1165  *	0:		Success
1166  *	-EINVAL:	If the gadget passed is NULL
1167  */
1168 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1169 {
1170 	struct pch_udc_dev	*dev;
1171 
1172 	if (!gadget)
1173 		return -EINVAL;
1174 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1175 	if (is_on) {
1176 		pch_udc_reconnect(dev);
1177 	} else {
1178 		if (dev->driver && dev->driver->disconnect) {
1179 			spin_lock(&dev->lock);
1180 			dev->driver->disconnect(&dev->gadget);
1181 			spin_unlock(&dev->lock);
1182 		}
1183 		pch_udc_set_disconnect(dev);
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 /**
1190  * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1191  *				transceiver (or GPIO) that
1192  *				detects a VBUS power session starting/ending
1193  * @gadget:	Reference to the gadget driver
1194  * @is_active:	specifies whether the session is starting or ending
1195  *
1196  * Return codes:
1197  *	0:		Success
1198  *	-EINVAL:	If the gadget passed is NULL
1199  */
1200 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1201 {
1202 	struct pch_udc_dev	*dev;
1203 
1204 	if (!gadget)
1205 		return -EINVAL;
1206 	dev = container_of(gadget, struct pch_udc_dev, gadget);
1207 	pch_udc_vbus_session(dev, is_active);
1208 	return 0;
1209 }
1210 
1211 /**
1212  * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1213  *				SET_CONFIGURATION calls to
1214  *				specify how much power the device can consume
1215  * @gadget:	Reference to the gadget driver
1216  * @mA:		specifies the current limit in 2mA unit
1217  *
1218  * Return codes:
1219  *	-EINVAL:	If the gadget passed is NULL
1220  *	-EOPNOTSUPP:
1221  */
1222 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1223 {
1224 	return -EOPNOTSUPP;
1225 }
1226 
1227 static int pch_udc_start(struct usb_gadget *g,
1228 		struct usb_gadget_driver *driver);
1229 static int pch_udc_stop(struct usb_gadget *g);
1230 
1231 static const struct usb_gadget_ops pch_udc_ops = {
1232 	.get_frame = pch_udc_pcd_get_frame,
1233 	.wakeup = pch_udc_pcd_wakeup,
1234 	.set_selfpowered = pch_udc_pcd_selfpowered,
1235 	.pullup = pch_udc_pcd_pullup,
1236 	.vbus_session = pch_udc_pcd_vbus_session,
1237 	.vbus_draw = pch_udc_pcd_vbus_draw,
1238 	.udc_start = pch_udc_start,
1239 	.udc_stop = pch_udc_stop,
1240 };
1241 
1242 /**
1243  * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1244  * @dev:	Reference to the driver structure
1245  *
1246  * Return value:
1247  *	1: VBUS is high
1248  *	0: VBUS is low
1249  *     -1: It is not enable to detect VBUS using GPIO
1250  */
1251 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1252 {
1253 	int vbus = 0;
1254 
1255 	if (dev->vbus_gpio.port)
1256 		vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1257 	else
1258 		vbus = -1;
1259 
1260 	return vbus;
1261 }
1262 
1263 /**
1264  * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1265  *                             If VBUS is Low, disconnect is processed
1266  * @irq_work:	Structure for WorkQueue
1267  *
1268  */
1269 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1270 {
1271 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1272 		struct pch_vbus_gpio_data, irq_work_fall);
1273 	struct pch_udc_dev *dev =
1274 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1275 	int vbus_saved = -1;
1276 	int vbus;
1277 	int count;
1278 
1279 	if (!dev->vbus_gpio.port)
1280 		return;
1281 
1282 	for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1283 		count++) {
1284 		vbus = pch_vbus_gpio_get_value(dev);
1285 
1286 		if ((vbus_saved == vbus) && (vbus == 0)) {
1287 			dev_dbg(&dev->pdev->dev, "VBUS fell");
1288 			if (dev->driver
1289 				&& dev->driver->disconnect) {
1290 				dev->driver->disconnect(
1291 					&dev->gadget);
1292 			}
1293 			if (dev->vbus_gpio.intr)
1294 				pch_udc_init(dev);
1295 			else
1296 				pch_udc_reconnect(dev);
1297 			return;
1298 		}
1299 		vbus_saved = vbus;
1300 		mdelay(PCH_VBUS_INTERVAL);
1301 	}
1302 }
1303 
1304 /**
1305  * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1306  *                             If VBUS is High, connect is processed
1307  * @irq_work:	Structure for WorkQueue
1308  *
1309  */
1310 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1311 {
1312 	struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1313 		struct pch_vbus_gpio_data, irq_work_rise);
1314 	struct pch_udc_dev *dev =
1315 		container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1316 	int vbus;
1317 
1318 	if (!dev->vbus_gpio.port)
1319 		return;
1320 
1321 	mdelay(PCH_VBUS_INTERVAL);
1322 	vbus = pch_vbus_gpio_get_value(dev);
1323 
1324 	if (vbus == 1) {
1325 		dev_dbg(&dev->pdev->dev, "VBUS rose");
1326 		pch_udc_reconnect(dev);
1327 		return;
1328 	}
1329 }
1330 
1331 /**
1332  * pch_vbus_gpio_irq() - IRQ handler for GPIO interrupt for changing VBUS
1333  * @irq:	Interrupt request number
1334  * @dev:	Reference to the device structure
1335  *
1336  * Return codes:
1337  *	0: Success
1338  *	-EINVAL: GPIO port is invalid or can't be initialized.
1339  */
1340 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1341 {
1342 	struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1343 
1344 	if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1345 		return IRQ_NONE;
1346 
1347 	if (pch_vbus_gpio_get_value(dev))
1348 		schedule_work(&dev->vbus_gpio.irq_work_rise);
1349 	else
1350 		schedule_work(&dev->vbus_gpio.irq_work_fall);
1351 
1352 	return IRQ_HANDLED;
1353 }
1354 
1355 /**
1356  * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1357  * @dev:	Reference to the driver structure
1358  * @vbus_gpio	Number of GPIO port to detect gpio
1359  *
1360  * Return codes:
1361  *	0: Success
1362  *	-EINVAL: GPIO port is invalid or can't be initialized.
1363  */
1364 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1365 {
1366 	int err;
1367 	int irq_num = 0;
1368 
1369 	dev->vbus_gpio.port = 0;
1370 	dev->vbus_gpio.intr = 0;
1371 
1372 	if (vbus_gpio_port <= -1)
1373 		return -EINVAL;
1374 
1375 	err = gpio_is_valid(vbus_gpio_port);
1376 	if (!err) {
1377 		pr_err("%s: gpio port %d is invalid\n",
1378 			__func__, vbus_gpio_port);
1379 		return -EINVAL;
1380 	}
1381 
1382 	err = gpio_request(vbus_gpio_port, "pch_vbus");
1383 	if (err) {
1384 		pr_err("%s: can't request gpio port %d, err: %d\n",
1385 			__func__, vbus_gpio_port, err);
1386 		return -EINVAL;
1387 	}
1388 
1389 	dev->vbus_gpio.port = vbus_gpio_port;
1390 	gpio_direction_input(vbus_gpio_port);
1391 	INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1392 
1393 	irq_num = gpio_to_irq(vbus_gpio_port);
1394 	if (irq_num > 0) {
1395 		irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1396 		err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1397 			"vbus_detect", dev);
1398 		if (!err) {
1399 			dev->vbus_gpio.intr = irq_num;
1400 			INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1401 				pch_vbus_gpio_work_rise);
1402 		} else {
1403 			pr_err("%s: can't request irq %d, err: %d\n",
1404 				__func__, irq_num, err);
1405 		}
1406 	}
1407 
1408 	return 0;
1409 }
1410 
1411 /**
1412  * pch_vbus_gpio_free() - This API frees resources of GPIO port
1413  * @dev:	Reference to the driver structure
1414  */
1415 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1416 {
1417 	if (dev->vbus_gpio.intr)
1418 		free_irq(dev->vbus_gpio.intr, dev);
1419 
1420 	if (dev->vbus_gpio.port)
1421 		gpio_free(dev->vbus_gpio.port);
1422 }
1423 
1424 /**
1425  * complete_req() - This API is invoked from the driver when processing
1426  *			of a request is complete
1427  * @ep:		Reference to the endpoint structure
1428  * @req:	Reference to the request structure
1429  * @status:	Indicates the success/failure of completion
1430  */
1431 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1432 								 int status)
1433 	__releases(&dev->lock)
1434 	__acquires(&dev->lock)
1435 {
1436 	struct pch_udc_dev	*dev;
1437 	unsigned halted = ep->halted;
1438 
1439 	list_del_init(&req->queue);
1440 
1441 	/* set new status if pending */
1442 	if (req->req.status == -EINPROGRESS)
1443 		req->req.status = status;
1444 	else
1445 		status = req->req.status;
1446 
1447 	dev = ep->dev;
1448 	if (req->dma_mapped) {
1449 		if (req->dma == DMA_ADDR_INVALID) {
1450 			if (ep->in)
1451 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1452 						 req->req.length,
1453 						 DMA_TO_DEVICE);
1454 			else
1455 				dma_unmap_single(&dev->pdev->dev, req->req.dma,
1456 						 req->req.length,
1457 						 DMA_FROM_DEVICE);
1458 			req->req.dma = DMA_ADDR_INVALID;
1459 		} else {
1460 			if (ep->in)
1461 				dma_unmap_single(&dev->pdev->dev, req->dma,
1462 						 req->req.length,
1463 						 DMA_TO_DEVICE);
1464 			else {
1465 				dma_unmap_single(&dev->pdev->dev, req->dma,
1466 						 req->req.length,
1467 						 DMA_FROM_DEVICE);
1468 				memcpy(req->req.buf, req->buf, req->req.length);
1469 			}
1470 			kfree(req->buf);
1471 			req->dma = DMA_ADDR_INVALID;
1472 		}
1473 		req->dma_mapped = 0;
1474 	}
1475 	ep->halted = 1;
1476 	spin_unlock(&dev->lock);
1477 	if (!ep->in)
1478 		pch_udc_ep_clear_rrdy(ep);
1479 	usb_gadget_giveback_request(&ep->ep, &req->req);
1480 	spin_lock(&dev->lock);
1481 	ep->halted = halted;
1482 }
1483 
1484 /**
1485  * empty_req_queue() - This API empties the request queue of an endpoint
1486  * @ep:		Reference to the endpoint structure
1487  */
1488 static void empty_req_queue(struct pch_udc_ep *ep)
1489 {
1490 	struct pch_udc_request	*req;
1491 
1492 	ep->halted = 1;
1493 	while (!list_empty(&ep->queue)) {
1494 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1495 		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
1496 	}
1497 }
1498 
1499 /**
1500  * pch_udc_free_dma_chain() - This function frees the DMA chain created
1501  *				for the request
1502  * @dev		Reference to the driver structure
1503  * @req		Reference to the request to be freed
1504  *
1505  * Return codes:
1506  *	0: Success
1507  */
1508 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1509 				   struct pch_udc_request *req)
1510 {
1511 	struct pch_udc_data_dma_desc *td = req->td_data;
1512 	unsigned i = req->chain_len;
1513 
1514 	dma_addr_t addr2;
1515 	dma_addr_t addr = (dma_addr_t)td->next;
1516 	td->next = 0x00;
1517 	for (; i > 1; --i) {
1518 		/* do not free first desc., will be done by free for request */
1519 		td = phys_to_virt(addr);
1520 		addr2 = (dma_addr_t)td->next;
1521 		dma_pool_free(dev->data_requests, td, addr);
1522 		addr = addr2;
1523 	}
1524 	req->chain_len = 1;
1525 }
1526 
1527 /**
1528  * pch_udc_create_dma_chain() - This function creates or reinitializes
1529  *				a DMA chain
1530  * @ep:		Reference to the endpoint structure
1531  * @req:	Reference to the request
1532  * @buf_len:	The buffer length
1533  * @gfp_flags:	Flags to be used while mapping the data buffer
1534  *
1535  * Return codes:
1536  *	0:		success,
1537  *	-ENOMEM:	dma_pool_alloc invocation fails
1538  */
1539 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1540 				    struct pch_udc_request *req,
1541 				    unsigned long buf_len,
1542 				    gfp_t gfp_flags)
1543 {
1544 	struct pch_udc_data_dma_desc *td = req->td_data, *last;
1545 	unsigned long bytes = req->req.length, i = 0;
1546 	dma_addr_t dma_addr;
1547 	unsigned len = 1;
1548 
1549 	if (req->chain_len > 1)
1550 		pch_udc_free_dma_chain(ep->dev, req);
1551 
1552 	if (req->dma == DMA_ADDR_INVALID)
1553 		td->dataptr = req->req.dma;
1554 	else
1555 		td->dataptr = req->dma;
1556 
1557 	td->status = PCH_UDC_BS_HST_BSY;
1558 	for (; ; bytes -= buf_len, ++len) {
1559 		td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1560 		if (bytes <= buf_len)
1561 			break;
1562 		last = td;
1563 		td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
1564 				    &dma_addr);
1565 		if (!td)
1566 			goto nomem;
1567 		i += buf_len;
1568 		td->dataptr = req->td_data->dataptr + i;
1569 		last->next = dma_addr;
1570 	}
1571 
1572 	req->td_data_last = td;
1573 	td->status |= PCH_UDC_DMA_LAST;
1574 	td->next = req->td_data_phys;
1575 	req->chain_len = len;
1576 	return 0;
1577 
1578 nomem:
1579 	if (len > 1) {
1580 		req->chain_len = len;
1581 		pch_udc_free_dma_chain(ep->dev, req);
1582 	}
1583 	req->chain_len = 1;
1584 	return -ENOMEM;
1585 }
1586 
1587 /**
1588  * prepare_dma() - This function creates and initializes the DMA chain
1589  *			for the request
1590  * @ep:		Reference to the endpoint structure
1591  * @req:	Reference to the request
1592  * @gfp:	Flag to be used while mapping the data buffer
1593  *
1594  * Return codes:
1595  *	0:		Success
1596  *	Other 0:	linux error number on failure
1597  */
1598 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1599 			  gfp_t gfp)
1600 {
1601 	int	retval;
1602 
1603 	/* Allocate and create a DMA chain */
1604 	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1605 	if (retval) {
1606 		pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1607 		return retval;
1608 	}
1609 	if (ep->in)
1610 		req->td_data->status = (req->td_data->status &
1611 				~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1612 	return 0;
1613 }
1614 
1615 /**
1616  * process_zlp() - This function process zero length packets
1617  *			from the gadget driver
1618  * @ep:		Reference to the endpoint structure
1619  * @req:	Reference to the request
1620  */
1621 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1622 {
1623 	struct pch_udc_dev	*dev = ep->dev;
1624 
1625 	/* IN zlp's are handled by hardware */
1626 	complete_req(ep, req, 0);
1627 
1628 	/* if set_config or set_intf is waiting for ack by zlp
1629 	 * then set CSR_DONE
1630 	 */
1631 	if (dev->set_cfg_not_acked) {
1632 		pch_udc_set_csr_done(dev);
1633 		dev->set_cfg_not_acked = 0;
1634 	}
1635 	/* setup command is ACK'ed now by zlp */
1636 	if (!dev->stall && dev->waiting_zlp_ack) {
1637 		pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1638 		dev->waiting_zlp_ack = 0;
1639 	}
1640 }
1641 
1642 /**
1643  * pch_udc_start_rxrequest() - This function starts the receive requirement.
1644  * @ep:		Reference to the endpoint structure
1645  * @req:	Reference to the request structure
1646  */
1647 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1648 					 struct pch_udc_request *req)
1649 {
1650 	struct pch_udc_data_dma_desc *td_data;
1651 
1652 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1653 	td_data = req->td_data;
1654 	/* Set the status bits for all descriptors */
1655 	while (1) {
1656 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1657 				    PCH_UDC_BS_HST_RDY;
1658 		if ((td_data->status & PCH_UDC_DMA_LAST) ==  PCH_UDC_DMA_LAST)
1659 			break;
1660 		td_data = phys_to_virt(td_data->next);
1661 	}
1662 	/* Write the descriptor pointer */
1663 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1664 	req->dma_going = 1;
1665 	pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1666 	pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1667 	pch_udc_ep_clear_nak(ep);
1668 	pch_udc_ep_set_rrdy(ep);
1669 }
1670 
1671 /**
1672  * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1673  *				from gadget driver
1674  * @usbep:	Reference to the USB endpoint structure
1675  * @desc:	Reference to the USB endpoint descriptor structure
1676  *
1677  * Return codes:
1678  *	0:		Success
1679  *	-EINVAL:
1680  *	-ESHUTDOWN:
1681  */
1682 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1683 				    const struct usb_endpoint_descriptor *desc)
1684 {
1685 	struct pch_udc_ep	*ep;
1686 	struct pch_udc_dev	*dev;
1687 	unsigned long		iflags;
1688 
1689 	if (!usbep || (usbep->name == ep0_string) || !desc ||
1690 	    (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1691 		return -EINVAL;
1692 
1693 	ep = container_of(usbep, struct pch_udc_ep, ep);
1694 	dev = ep->dev;
1695 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1696 		return -ESHUTDOWN;
1697 	spin_lock_irqsave(&dev->lock, iflags);
1698 	ep->ep.desc = desc;
1699 	ep->halted = 0;
1700 	pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1701 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
1702 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1703 	spin_unlock_irqrestore(&dev->lock, iflags);
1704 	return 0;
1705 }
1706 
1707 /**
1708  * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1709  *				from gadget driver
1710  * @usbep	Reference to the USB endpoint structure
1711  *
1712  * Return codes:
1713  *	0:		Success
1714  *	-EINVAL:
1715  */
1716 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1717 {
1718 	struct pch_udc_ep	*ep;
1719 	unsigned long	iflags;
1720 
1721 	if (!usbep)
1722 		return -EINVAL;
1723 
1724 	ep = container_of(usbep, struct pch_udc_ep, ep);
1725 	if ((usbep->name == ep0_string) || !ep->ep.desc)
1726 		return -EINVAL;
1727 
1728 	spin_lock_irqsave(&ep->dev->lock, iflags);
1729 	empty_req_queue(ep);
1730 	ep->halted = 1;
1731 	pch_udc_ep_disable(ep);
1732 	pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1733 	ep->ep.desc = NULL;
1734 	INIT_LIST_HEAD(&ep->queue);
1735 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1736 	return 0;
1737 }
1738 
1739 /**
1740  * pch_udc_alloc_request() - This function allocates request structure.
1741  *				It is called by gadget driver
1742  * @usbep:	Reference to the USB endpoint structure
1743  * @gfp:	Flag to be used while allocating memory
1744  *
1745  * Return codes:
1746  *	NULL:			Failure
1747  *	Allocated address:	Success
1748  */
1749 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1750 						  gfp_t gfp)
1751 {
1752 	struct pch_udc_request		*req;
1753 	struct pch_udc_ep		*ep;
1754 	struct pch_udc_data_dma_desc	*dma_desc;
1755 
1756 	if (!usbep)
1757 		return NULL;
1758 	ep = container_of(usbep, struct pch_udc_ep, ep);
1759 	req = kzalloc(sizeof *req, gfp);
1760 	if (!req)
1761 		return NULL;
1762 	req->req.dma = DMA_ADDR_INVALID;
1763 	req->dma = DMA_ADDR_INVALID;
1764 	INIT_LIST_HEAD(&req->queue);
1765 	if (!ep->dev->dma_addr)
1766 		return &req->req;
1767 	/* ep0 in requests are allocated from data pool here */
1768 	dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
1769 				  &req->td_data_phys);
1770 	if (NULL == dma_desc) {
1771 		kfree(req);
1772 		return NULL;
1773 	}
1774 	/* prevent from using desc. - set HOST BUSY */
1775 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
1776 	dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1777 	req->td_data = dma_desc;
1778 	req->td_data_last = dma_desc;
1779 	req->chain_len = 1;
1780 	return &req->req;
1781 }
1782 
1783 /**
1784  * pch_udc_free_request() - This function frees request structure.
1785  *				It is called by gadget driver
1786  * @usbep:	Reference to the USB endpoint structure
1787  * @usbreq:	Reference to the USB request
1788  */
1789 static void pch_udc_free_request(struct usb_ep *usbep,
1790 				  struct usb_request *usbreq)
1791 {
1792 	struct pch_udc_ep	*ep;
1793 	struct pch_udc_request	*req;
1794 	struct pch_udc_dev	*dev;
1795 
1796 	if (!usbep || !usbreq)
1797 		return;
1798 	ep = container_of(usbep, struct pch_udc_ep, ep);
1799 	req = container_of(usbreq, struct pch_udc_request, req);
1800 	dev = ep->dev;
1801 	if (!list_empty(&req->queue))
1802 		dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1803 			__func__, usbep->name, req);
1804 	if (req->td_data != NULL) {
1805 		if (req->chain_len > 1)
1806 			pch_udc_free_dma_chain(ep->dev, req);
1807 		dma_pool_free(ep->dev->data_requests, req->td_data,
1808 			      req->td_data_phys);
1809 	}
1810 	kfree(req);
1811 }
1812 
1813 /**
1814  * pch_udc_pcd_queue() - This function queues a request packet. It is called
1815  *			by gadget driver
1816  * @usbep:	Reference to the USB endpoint structure
1817  * @usbreq:	Reference to the USB request
1818  * @gfp:	Flag to be used while mapping the data buffer
1819  *
1820  * Return codes:
1821  *	0:			Success
1822  *	linux error number:	Failure
1823  */
1824 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1825 								 gfp_t gfp)
1826 {
1827 	int retval = 0;
1828 	struct pch_udc_ep	*ep;
1829 	struct pch_udc_dev	*dev;
1830 	struct pch_udc_request	*req;
1831 	unsigned long	iflags;
1832 
1833 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1834 		return -EINVAL;
1835 	ep = container_of(usbep, struct pch_udc_ep, ep);
1836 	dev = ep->dev;
1837 	if (!ep->ep.desc && ep->num)
1838 		return -EINVAL;
1839 	req = container_of(usbreq, struct pch_udc_request, req);
1840 	if (!list_empty(&req->queue))
1841 		return -EINVAL;
1842 	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1843 		return -ESHUTDOWN;
1844 	spin_lock_irqsave(&dev->lock, iflags);
1845 	/* map the buffer for dma */
1846 	if (usbreq->length &&
1847 	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1848 		if (!((unsigned long)(usbreq->buf) & 0x03)) {
1849 			if (ep->in)
1850 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1851 							     usbreq->buf,
1852 							     usbreq->length,
1853 							     DMA_TO_DEVICE);
1854 			else
1855 				usbreq->dma = dma_map_single(&dev->pdev->dev,
1856 							     usbreq->buf,
1857 							     usbreq->length,
1858 							     DMA_FROM_DEVICE);
1859 		} else {
1860 			req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1861 			if (!req->buf) {
1862 				retval = -ENOMEM;
1863 				goto probe_end;
1864 			}
1865 			if (ep->in) {
1866 				memcpy(req->buf, usbreq->buf, usbreq->length);
1867 				req->dma = dma_map_single(&dev->pdev->dev,
1868 							  req->buf,
1869 							  usbreq->length,
1870 							  DMA_TO_DEVICE);
1871 			} else
1872 				req->dma = dma_map_single(&dev->pdev->dev,
1873 							  req->buf,
1874 							  usbreq->length,
1875 							  DMA_FROM_DEVICE);
1876 		}
1877 		req->dma_mapped = 1;
1878 	}
1879 	if (usbreq->length > 0) {
1880 		retval = prepare_dma(ep, req, GFP_ATOMIC);
1881 		if (retval)
1882 			goto probe_end;
1883 	}
1884 	usbreq->actual = 0;
1885 	usbreq->status = -EINPROGRESS;
1886 	req->dma_done = 0;
1887 	if (list_empty(&ep->queue) && !ep->halted) {
1888 		/* no pending transfer, so start this req */
1889 		if (!usbreq->length) {
1890 			process_zlp(ep, req);
1891 			retval = 0;
1892 			goto probe_end;
1893 		}
1894 		if (!ep->in) {
1895 			pch_udc_start_rxrequest(ep, req);
1896 		} else {
1897 			/*
1898 			* For IN trfr the descriptors will be programmed and
1899 			* P bit will be set when
1900 			* we get an IN token
1901 			*/
1902 			pch_udc_wait_ep_stall(ep);
1903 			pch_udc_ep_clear_nak(ep);
1904 			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1905 		}
1906 	}
1907 	/* Now add this request to the ep's pending requests */
1908 	if (req != NULL)
1909 		list_add_tail(&req->queue, &ep->queue);
1910 
1911 probe_end:
1912 	spin_unlock_irqrestore(&dev->lock, iflags);
1913 	return retval;
1914 }
1915 
1916 /**
1917  * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1918  *				It is called by gadget driver
1919  * @usbep:	Reference to the USB endpoint structure
1920  * @usbreq:	Reference to the USB request
1921  *
1922  * Return codes:
1923  *	0:			Success
1924  *	linux error number:	Failure
1925  */
1926 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1927 				struct usb_request *usbreq)
1928 {
1929 	struct pch_udc_ep	*ep;
1930 	struct pch_udc_request	*req;
1931 	unsigned long		flags;
1932 	int ret = -EINVAL;
1933 
1934 	ep = container_of(usbep, struct pch_udc_ep, ep);
1935 	if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1936 		return ret;
1937 	req = container_of(usbreq, struct pch_udc_request, req);
1938 	spin_lock_irqsave(&ep->dev->lock, flags);
1939 	/* make sure it's still queued on this endpoint */
1940 	list_for_each_entry(req, &ep->queue, queue) {
1941 		if (&req->req == usbreq) {
1942 			pch_udc_ep_set_nak(ep);
1943 			if (!list_empty(&req->queue))
1944 				complete_req(ep, req, -ECONNRESET);
1945 			ret = 0;
1946 			break;
1947 		}
1948 	}
1949 	spin_unlock_irqrestore(&ep->dev->lock, flags);
1950 	return ret;
1951 }
1952 
1953 /**
1954  * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1955  *			    feature
1956  * @usbep:	Reference to the USB endpoint structure
1957  * @halt:	Specifies whether to set or clear the feature
1958  *
1959  * Return codes:
1960  *	0:			Success
1961  *	linux error number:	Failure
1962  */
1963 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1964 {
1965 	struct pch_udc_ep	*ep;
1966 	unsigned long iflags;
1967 	int ret;
1968 
1969 	if (!usbep)
1970 		return -EINVAL;
1971 	ep = container_of(usbep, struct pch_udc_ep, ep);
1972 	if (!ep->ep.desc && !ep->num)
1973 		return -EINVAL;
1974 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1975 		return -ESHUTDOWN;
1976 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1977 	if (list_empty(&ep->queue)) {
1978 		if (halt) {
1979 			if (ep->num == PCH_UDC_EP0)
1980 				ep->dev->stall = 1;
1981 			pch_udc_ep_set_stall(ep);
1982 			pch_udc_enable_ep_interrupts(
1983 				ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1984 		} else {
1985 			pch_udc_ep_clear_stall(ep);
1986 		}
1987 		ret = 0;
1988 	} else {
1989 		ret = -EAGAIN;
1990 	}
1991 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1992 	return ret;
1993 }
1994 
1995 /**
1996  * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
1997  *				halt feature
1998  * @usbep:	Reference to the USB endpoint structure
1999  * @halt:	Specifies whether to set or clear the feature
2000  *
2001  * Return codes:
2002  *	0:			Success
2003  *	linux error number:	Failure
2004  */
2005 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2006 {
2007 	struct pch_udc_ep	*ep;
2008 	unsigned long iflags;
2009 	int ret;
2010 
2011 	if (!usbep)
2012 		return -EINVAL;
2013 	ep = container_of(usbep, struct pch_udc_ep, ep);
2014 	if (!ep->ep.desc && !ep->num)
2015 		return -EINVAL;
2016 	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2017 		return -ESHUTDOWN;
2018 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
2019 	if (!list_empty(&ep->queue)) {
2020 		ret = -EAGAIN;
2021 	} else {
2022 		if (ep->num == PCH_UDC_EP0)
2023 			ep->dev->stall = 1;
2024 		pch_udc_ep_set_stall(ep);
2025 		pch_udc_enable_ep_interrupts(ep->dev,
2026 					     PCH_UDC_EPINT(ep->in, ep->num));
2027 		ep->dev->prot_stall = 1;
2028 		ret = 0;
2029 	}
2030 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2031 	return ret;
2032 }
2033 
2034 /**
2035  * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2036  * @usbep:	Reference to the USB endpoint structure
2037  */
2038 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2039 {
2040 	struct pch_udc_ep  *ep;
2041 
2042 	if (!usbep)
2043 		return;
2044 
2045 	ep = container_of(usbep, struct pch_udc_ep, ep);
2046 	if (ep->ep.desc || !ep->num)
2047 		pch_udc_ep_fifo_flush(ep, ep->in);
2048 }
2049 
2050 static const struct usb_ep_ops pch_udc_ep_ops = {
2051 	.enable		= pch_udc_pcd_ep_enable,
2052 	.disable	= pch_udc_pcd_ep_disable,
2053 	.alloc_request	= pch_udc_alloc_request,
2054 	.free_request	= pch_udc_free_request,
2055 	.queue		= pch_udc_pcd_queue,
2056 	.dequeue	= pch_udc_pcd_dequeue,
2057 	.set_halt	= pch_udc_pcd_set_halt,
2058 	.set_wedge	= pch_udc_pcd_set_wedge,
2059 	.fifo_status	= NULL,
2060 	.fifo_flush	= pch_udc_pcd_fifo_flush,
2061 };
2062 
2063 /**
2064  * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2065  * @td_stp:	Reference to the SETP buffer structure
2066  */
2067 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2068 {
2069 	static u32	pky_marker;
2070 
2071 	if (!td_stp)
2072 		return;
2073 	td_stp->reserved = ++pky_marker;
2074 	memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2075 	td_stp->status = PCH_UDC_BS_HST_RDY;
2076 }
2077 
2078 /**
2079  * pch_udc_start_next_txrequest() - This function starts
2080  *					the next transmission requirement
2081  * @ep:	Reference to the endpoint structure
2082  */
2083 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2084 {
2085 	struct pch_udc_request *req;
2086 	struct pch_udc_data_dma_desc *td_data;
2087 
2088 	if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2089 		return;
2090 
2091 	if (list_empty(&ep->queue))
2092 		return;
2093 
2094 	/* next request */
2095 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2096 	if (req->dma_going)
2097 		return;
2098 	if (!req->td_data)
2099 		return;
2100 	pch_udc_wait_ep_stall(ep);
2101 	req->dma_going = 1;
2102 	pch_udc_ep_set_ddptr(ep, 0);
2103 	td_data = req->td_data;
2104 	while (1) {
2105 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2106 				   PCH_UDC_BS_HST_RDY;
2107 		if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2108 			break;
2109 		td_data = phys_to_virt(td_data->next);
2110 	}
2111 	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2112 	pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2113 	pch_udc_ep_set_pd(ep);
2114 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2115 	pch_udc_ep_clear_nak(ep);
2116 }
2117 
2118 /**
2119  * pch_udc_complete_transfer() - This function completes a transfer
2120  * @ep:		Reference to the endpoint structure
2121  */
2122 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2123 {
2124 	struct pch_udc_request *req;
2125 	struct pch_udc_dev *dev = ep->dev;
2126 
2127 	if (list_empty(&ep->queue))
2128 		return;
2129 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2130 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2131 	    PCH_UDC_BS_DMA_DONE)
2132 		return;
2133 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2134 	     PCH_UDC_RTS_SUCC) {
2135 		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2136 			"epstatus=0x%08x\n",
2137 		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
2138 		       (int)(ep->epsts));
2139 		return;
2140 	}
2141 
2142 	req->req.actual = req->req.length;
2143 	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2144 	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2145 	complete_req(ep, req, 0);
2146 	req->dma_going = 0;
2147 	if (!list_empty(&ep->queue)) {
2148 		pch_udc_wait_ep_stall(ep);
2149 		pch_udc_ep_clear_nak(ep);
2150 		pch_udc_enable_ep_interrupts(ep->dev,
2151 					     PCH_UDC_EPINT(ep->in, ep->num));
2152 	} else {
2153 		pch_udc_disable_ep_interrupts(ep->dev,
2154 					      PCH_UDC_EPINT(ep->in, ep->num));
2155 	}
2156 }
2157 
2158 /**
2159  * pch_udc_complete_receiver() - This function completes a receiver
2160  * @ep:		Reference to the endpoint structure
2161  */
2162 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2163 {
2164 	struct pch_udc_request *req;
2165 	struct pch_udc_dev *dev = ep->dev;
2166 	unsigned int count;
2167 	struct pch_udc_data_dma_desc *td;
2168 	dma_addr_t addr;
2169 
2170 	if (list_empty(&ep->queue))
2171 		return;
2172 	/* next request */
2173 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2174 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2175 	pch_udc_ep_set_ddptr(ep, 0);
2176 	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2177 	    PCH_UDC_BS_DMA_DONE)
2178 		td = req->td_data_last;
2179 	else
2180 		td = req->td_data;
2181 
2182 	while (1) {
2183 		if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2184 			dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2185 				"epstatus=0x%08x\n",
2186 				(req->td_data->status & PCH_UDC_RXTX_STS),
2187 				(int)(ep->epsts));
2188 			return;
2189 		}
2190 		if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2191 			if (td->status & PCH_UDC_DMA_LAST) {
2192 				count = td->status & PCH_UDC_RXTX_BYTES;
2193 				break;
2194 			}
2195 		if (td == req->td_data_last) {
2196 			dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2197 			return;
2198 		}
2199 		addr = (dma_addr_t)td->next;
2200 		td = phys_to_virt(addr);
2201 	}
2202 	/* on 64k packets the RXBYTES field is zero */
2203 	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2204 		count = UDC_DMA_MAXPACKET;
2205 	req->td_data->status |= PCH_UDC_DMA_LAST;
2206 	td->status |= PCH_UDC_BS_HST_BSY;
2207 
2208 	req->dma_going = 0;
2209 	req->req.actual = count;
2210 	complete_req(ep, req, 0);
2211 	/* If there is a new/failed requests try that now */
2212 	if (!list_empty(&ep->queue)) {
2213 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2214 		pch_udc_start_rxrequest(ep, req);
2215 	}
2216 }
2217 
2218 /**
2219  * pch_udc_svc_data_in() - This function process endpoint interrupts
2220  *				for IN endpoints
2221  * @dev:	Reference to the device structure
2222  * @ep_num:	Endpoint that generated the interrupt
2223  */
2224 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2225 {
2226 	u32	epsts;
2227 	struct pch_udc_ep	*ep;
2228 
2229 	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2230 	epsts = ep->epsts;
2231 	ep->epsts = 0;
2232 
2233 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA  | UDC_EPSTS_HE |
2234 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2235 		       UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2236 		return;
2237 	if ((epsts & UDC_EPSTS_BNA))
2238 		return;
2239 	if (epsts & UDC_EPSTS_HE)
2240 		return;
2241 	if (epsts & UDC_EPSTS_RSS) {
2242 		pch_udc_ep_set_stall(ep);
2243 		pch_udc_enable_ep_interrupts(ep->dev,
2244 					     PCH_UDC_EPINT(ep->in, ep->num));
2245 	}
2246 	if (epsts & UDC_EPSTS_RCS) {
2247 		if (!dev->prot_stall) {
2248 			pch_udc_ep_clear_stall(ep);
2249 		} else {
2250 			pch_udc_ep_set_stall(ep);
2251 			pch_udc_enable_ep_interrupts(ep->dev,
2252 						PCH_UDC_EPINT(ep->in, ep->num));
2253 		}
2254 	}
2255 	if (epsts & UDC_EPSTS_TDC)
2256 		pch_udc_complete_transfer(ep);
2257 	/* On IN interrupt, provide data if we have any */
2258 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2259 	    !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2260 		pch_udc_start_next_txrequest(ep);
2261 }
2262 
2263 /**
2264  * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2265  * @dev:	Reference to the device structure
2266  * @ep_num:	Endpoint that generated the interrupt
2267  */
2268 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2269 {
2270 	u32			epsts;
2271 	struct pch_udc_ep		*ep;
2272 	struct pch_udc_request		*req = NULL;
2273 
2274 	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2275 	epsts = ep->epsts;
2276 	ep->epsts = 0;
2277 
2278 	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2279 		/* next request */
2280 		req = list_entry(ep->queue.next, struct pch_udc_request,
2281 				 queue);
2282 		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2283 		     PCH_UDC_BS_DMA_DONE) {
2284 			if (!req->dma_going)
2285 				pch_udc_start_rxrequest(ep, req);
2286 			return;
2287 		}
2288 	}
2289 	if (epsts & UDC_EPSTS_HE)
2290 		return;
2291 	if (epsts & UDC_EPSTS_RSS) {
2292 		pch_udc_ep_set_stall(ep);
2293 		pch_udc_enable_ep_interrupts(ep->dev,
2294 					     PCH_UDC_EPINT(ep->in, ep->num));
2295 	}
2296 	if (epsts & UDC_EPSTS_RCS) {
2297 		if (!dev->prot_stall) {
2298 			pch_udc_ep_clear_stall(ep);
2299 		} else {
2300 			pch_udc_ep_set_stall(ep);
2301 			pch_udc_enable_ep_interrupts(ep->dev,
2302 						PCH_UDC_EPINT(ep->in, ep->num));
2303 		}
2304 	}
2305 	if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2306 	    UDC_EPSTS_OUT_DATA) {
2307 		if (ep->dev->prot_stall == 1) {
2308 			pch_udc_ep_set_stall(ep);
2309 			pch_udc_enable_ep_interrupts(ep->dev,
2310 						PCH_UDC_EPINT(ep->in, ep->num));
2311 		} else {
2312 			pch_udc_complete_receiver(ep);
2313 		}
2314 	}
2315 	if (list_empty(&ep->queue))
2316 		pch_udc_set_dma(dev, DMA_DIR_RX);
2317 }
2318 
2319 /**
2320  * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2321  * @dev:	Reference to the device structure
2322  */
2323 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2324 {
2325 	u32	epsts;
2326 	struct pch_udc_ep	*ep;
2327 	struct pch_udc_ep	*ep_out;
2328 
2329 	ep = &dev->ep[UDC_EP0IN_IDX];
2330 	ep_out = &dev->ep[UDC_EP0OUT_IDX];
2331 	epsts = ep->epsts;
2332 	ep->epsts = 0;
2333 
2334 	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2335 		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2336 		       UDC_EPSTS_XFERDONE)))
2337 		return;
2338 	if ((epsts & UDC_EPSTS_BNA))
2339 		return;
2340 	if (epsts & UDC_EPSTS_HE)
2341 		return;
2342 	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2343 		pch_udc_complete_transfer(ep);
2344 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2345 		ep_out->td_data->status = (ep_out->td_data->status &
2346 					~PCH_UDC_BUFF_STS) |
2347 					PCH_UDC_BS_HST_RDY;
2348 		pch_udc_ep_clear_nak(ep_out);
2349 		pch_udc_set_dma(dev, DMA_DIR_RX);
2350 		pch_udc_ep_set_rrdy(ep_out);
2351 	}
2352 	/* On IN interrupt, provide data if we have any */
2353 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2354 	     !(epsts & UDC_EPSTS_TXEMPTY))
2355 		pch_udc_start_next_txrequest(ep);
2356 }
2357 
2358 /**
2359  * pch_udc_svc_control_out() - Routine that handle Control
2360  *					OUT endpoint interrupts
2361  * @dev:	Reference to the device structure
2362  */
2363 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2364 	__releases(&dev->lock)
2365 	__acquires(&dev->lock)
2366 {
2367 	u32	stat;
2368 	int setup_supported;
2369 	struct pch_udc_ep	*ep;
2370 
2371 	ep = &dev->ep[UDC_EP0OUT_IDX];
2372 	stat = ep->epsts;
2373 	ep->epsts = 0;
2374 
2375 	/* If setup data */
2376 	if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2377 	    UDC_EPSTS_OUT_SETUP) {
2378 		dev->stall = 0;
2379 		dev->ep[UDC_EP0IN_IDX].halted = 0;
2380 		dev->ep[UDC_EP0OUT_IDX].halted = 0;
2381 		dev->setup_data = ep->td_stp->request;
2382 		pch_udc_init_setup_buff(ep->td_stp);
2383 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2384 		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2385 				      dev->ep[UDC_EP0IN_IDX].in);
2386 		if ((dev->setup_data.bRequestType & USB_DIR_IN))
2387 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2388 		else /* OUT */
2389 			dev->gadget.ep0 = &ep->ep;
2390 		spin_lock(&dev->lock);
2391 		/* If Mass storage Reset */
2392 		if ((dev->setup_data.bRequestType == 0x21) &&
2393 		    (dev->setup_data.bRequest == 0xFF))
2394 			dev->prot_stall = 0;
2395 		/* call gadget with setup data received */
2396 		setup_supported = dev->driver->setup(&dev->gadget,
2397 						     &dev->setup_data);
2398 		spin_unlock(&dev->lock);
2399 
2400 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
2401 			ep->td_data->status = (ep->td_data->status &
2402 						~PCH_UDC_BUFF_STS) |
2403 						PCH_UDC_BS_HST_RDY;
2404 			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2405 		}
2406 		/* ep0 in returns data on IN phase */
2407 		if (setup_supported >= 0 && setup_supported <
2408 					    UDC_EP0IN_MAX_PKT_SIZE) {
2409 			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2410 			/* Gadget would have queued a request when
2411 			 * we called the setup */
2412 			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2413 				pch_udc_set_dma(dev, DMA_DIR_RX);
2414 				pch_udc_ep_clear_nak(ep);
2415 			}
2416 		} else if (setup_supported < 0) {
2417 			/* if unsupported request, then stall */
2418 			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2419 			pch_udc_enable_ep_interrupts(ep->dev,
2420 						PCH_UDC_EPINT(ep->in, ep->num));
2421 			dev->stall = 0;
2422 			pch_udc_set_dma(dev, DMA_DIR_RX);
2423 		} else {
2424 			dev->waiting_zlp_ack = 1;
2425 		}
2426 	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2427 		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
2428 		pch_udc_clear_dma(dev, DMA_DIR_RX);
2429 		pch_udc_ep_set_ddptr(ep, 0);
2430 		if (!list_empty(&ep->queue)) {
2431 			ep->epsts = stat;
2432 			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2433 		}
2434 		pch_udc_set_dma(dev, DMA_DIR_RX);
2435 	}
2436 	pch_udc_ep_set_rrdy(ep);
2437 }
2438 
2439 
2440 /**
2441  * pch_udc_postsvc_epinters() - This function enables end point interrupts
2442  *				and clears NAK status
2443  * @dev:	Reference to the device structure
2444  * @ep_num:	End point number
2445  */
2446 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2447 {
2448 	struct pch_udc_ep	*ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2449 	if (list_empty(&ep->queue))
2450 		return;
2451 	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2452 	pch_udc_ep_clear_nak(ep);
2453 }
2454 
2455 /**
2456  * pch_udc_read_all_epstatus() - This function read all endpoint status
2457  * @dev:	Reference to the device structure
2458  * @ep_intr:	Status of endpoint interrupt
2459  */
2460 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2461 {
2462 	int i;
2463 	struct pch_udc_ep	*ep;
2464 
2465 	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2466 		/* IN */
2467 		if (ep_intr & (0x1 << i)) {
2468 			ep = &dev->ep[UDC_EPIN_IDX(i)];
2469 			ep->epsts = pch_udc_read_ep_status(ep);
2470 			pch_udc_clear_ep_status(ep, ep->epsts);
2471 		}
2472 		/* OUT */
2473 		if (ep_intr & (0x10000 << i)) {
2474 			ep = &dev->ep[UDC_EPOUT_IDX(i)];
2475 			ep->epsts = pch_udc_read_ep_status(ep);
2476 			pch_udc_clear_ep_status(ep, ep->epsts);
2477 		}
2478 	}
2479 }
2480 
2481 /**
2482  * pch_udc_activate_control_ep() - This function enables the control endpoints
2483  *					for traffic after a reset
2484  * @dev:	Reference to the device structure
2485  */
2486 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2487 {
2488 	struct pch_udc_ep	*ep;
2489 	u32 val;
2490 
2491 	/* Setup the IN endpoint */
2492 	ep = &dev->ep[UDC_EP0IN_IDX];
2493 	pch_udc_clear_ep_control(ep);
2494 	pch_udc_ep_fifo_flush(ep, ep->in);
2495 	pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2496 	pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2497 	/* Initialize the IN EP Descriptor */
2498 	ep->td_data      = NULL;
2499 	ep->td_stp       = NULL;
2500 	ep->td_data_phys = 0;
2501 	ep->td_stp_phys  = 0;
2502 
2503 	/* Setup the OUT endpoint */
2504 	ep = &dev->ep[UDC_EP0OUT_IDX];
2505 	pch_udc_clear_ep_control(ep);
2506 	pch_udc_ep_fifo_flush(ep, ep->in);
2507 	pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2508 	pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2509 	val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2510 	pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2511 
2512 	/* Initialize the SETUP buffer */
2513 	pch_udc_init_setup_buff(ep->td_stp);
2514 	/* Write the pointer address of dma descriptor */
2515 	pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2516 	/* Write the pointer address of Setup descriptor */
2517 	pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2518 
2519 	/* Initialize the dma descriptor */
2520 	ep->td_data->status  = PCH_UDC_DMA_LAST;
2521 	ep->td_data->dataptr = dev->dma_addr;
2522 	ep->td_data->next    = ep->td_data_phys;
2523 
2524 	pch_udc_ep_clear_nak(ep);
2525 }
2526 
2527 
2528 /**
2529  * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2530  * @dev:	Reference to driver structure
2531  */
2532 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2533 {
2534 	struct pch_udc_ep	*ep;
2535 	int i;
2536 
2537 	pch_udc_clear_dma(dev, DMA_DIR_TX);
2538 	pch_udc_clear_dma(dev, DMA_DIR_RX);
2539 	/* Mask all endpoint interrupts */
2540 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2541 	/* clear all endpoint interrupts */
2542 	pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2543 
2544 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2545 		ep = &dev->ep[i];
2546 		pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2547 		pch_udc_clear_ep_control(ep);
2548 		pch_udc_ep_set_ddptr(ep, 0);
2549 		pch_udc_write_csr(ep->dev, 0x00, i);
2550 	}
2551 	dev->stall = 0;
2552 	dev->prot_stall = 0;
2553 	dev->waiting_zlp_ack = 0;
2554 	dev->set_cfg_not_acked = 0;
2555 
2556 	/* disable ep to empty req queue. Skip the control EP's */
2557 	for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2558 		ep = &dev->ep[i];
2559 		pch_udc_ep_set_nak(ep);
2560 		pch_udc_ep_fifo_flush(ep, ep->in);
2561 		/* Complete request queue */
2562 		empty_req_queue(ep);
2563 	}
2564 	if (dev->driver) {
2565 		spin_unlock(&dev->lock);
2566 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2567 		spin_lock(&dev->lock);
2568 	}
2569 }
2570 
2571 /**
2572  * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2573  *				done interrupt
2574  * @dev:	Reference to driver structure
2575  */
2576 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2577 {
2578 	u32 dev_stat, dev_speed;
2579 	u32 speed = USB_SPEED_FULL;
2580 
2581 	dev_stat = pch_udc_read_device_status(dev);
2582 	dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2583 						 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2584 	switch (dev_speed) {
2585 	case UDC_DEVSTS_ENUM_SPEED_HIGH:
2586 		speed = USB_SPEED_HIGH;
2587 		break;
2588 	case  UDC_DEVSTS_ENUM_SPEED_FULL:
2589 		speed = USB_SPEED_FULL;
2590 		break;
2591 	case  UDC_DEVSTS_ENUM_SPEED_LOW:
2592 		speed = USB_SPEED_LOW;
2593 		break;
2594 	default:
2595 		BUG();
2596 	}
2597 	dev->gadget.speed = speed;
2598 	pch_udc_activate_control_ep(dev);
2599 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2600 	pch_udc_set_dma(dev, DMA_DIR_TX);
2601 	pch_udc_set_dma(dev, DMA_DIR_RX);
2602 	pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2603 
2604 	/* enable device interrupts */
2605 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2606 					UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2607 					UDC_DEVINT_SI | UDC_DEVINT_SC);
2608 }
2609 
2610 /**
2611  * pch_udc_svc_intf_interrupt() - This function handles a set interface
2612  *				  interrupt
2613  * @dev:	Reference to driver structure
2614  */
2615 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2616 {
2617 	u32 reg, dev_stat = 0;
2618 	int i;
2619 
2620 	dev_stat = pch_udc_read_device_status(dev);
2621 	dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2622 							 UDC_DEVSTS_INTF_SHIFT;
2623 	dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2624 							 UDC_DEVSTS_ALT_SHIFT;
2625 	dev->set_cfg_not_acked = 1;
2626 	/* Construct the usb request for gadget driver and inform it */
2627 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2628 	dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2629 	dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2630 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2631 	dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2632 	/* programm the Endpoint Cfg registers */
2633 	/* Only one end point cfg register */
2634 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2635 	reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2636 	      (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2637 	reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2638 	      (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2639 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2640 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2641 		/* clear stall bits */
2642 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2643 		dev->ep[i].halted = 0;
2644 	}
2645 	dev->stall = 0;
2646 	spin_unlock(&dev->lock);
2647 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2648 	spin_lock(&dev->lock);
2649 }
2650 
2651 /**
2652  * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2653  *				interrupt
2654  * @dev:	Reference to driver structure
2655  */
2656 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2657 {
2658 	int i;
2659 	u32 reg, dev_stat = 0;
2660 
2661 	dev_stat = pch_udc_read_device_status(dev);
2662 	dev->set_cfg_not_acked = 1;
2663 	dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2664 				UDC_DEVSTS_CFG_SHIFT;
2665 	/* make usb request for gadget driver */
2666 	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2667 	dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2668 	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2669 	/* program the NE registers */
2670 	/* Only one end point cfg register */
2671 	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2672 	reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2673 	      (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2674 	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2675 	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2676 		/* clear stall bits */
2677 		pch_udc_ep_clear_stall(&(dev->ep[i]));
2678 		dev->ep[i].halted = 0;
2679 	}
2680 	dev->stall = 0;
2681 
2682 	/* call gadget zero with setup data received */
2683 	spin_unlock(&dev->lock);
2684 	dev->driver->setup(&dev->gadget, &dev->setup_data);
2685 	spin_lock(&dev->lock);
2686 }
2687 
2688 /**
2689  * pch_udc_dev_isr() - This function services device interrupts
2690  *			by invoking appropriate routines.
2691  * @dev:	Reference to the device structure
2692  * @dev_intr:	The Device interrupt status.
2693  */
2694 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2695 {
2696 	int vbus;
2697 
2698 	/* USB Reset Interrupt */
2699 	if (dev_intr & UDC_DEVINT_UR) {
2700 		pch_udc_svc_ur_interrupt(dev);
2701 		dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2702 	}
2703 	/* Enumeration Done Interrupt */
2704 	if (dev_intr & UDC_DEVINT_ENUM) {
2705 		pch_udc_svc_enum_interrupt(dev);
2706 		dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2707 	}
2708 	/* Set Interface Interrupt */
2709 	if (dev_intr & UDC_DEVINT_SI)
2710 		pch_udc_svc_intf_interrupt(dev);
2711 	/* Set Config Interrupt */
2712 	if (dev_intr & UDC_DEVINT_SC)
2713 		pch_udc_svc_cfg_interrupt(dev);
2714 	/* USB Suspend interrupt */
2715 	if (dev_intr & UDC_DEVINT_US) {
2716 		if (dev->driver
2717 			&& dev->driver->suspend) {
2718 			spin_unlock(&dev->lock);
2719 			dev->driver->suspend(&dev->gadget);
2720 			spin_lock(&dev->lock);
2721 		}
2722 
2723 		vbus = pch_vbus_gpio_get_value(dev);
2724 		if ((dev->vbus_session == 0)
2725 			&& (vbus != 1)) {
2726 			if (dev->driver && dev->driver->disconnect) {
2727 				spin_unlock(&dev->lock);
2728 				dev->driver->disconnect(&dev->gadget);
2729 				spin_lock(&dev->lock);
2730 			}
2731 			pch_udc_reconnect(dev);
2732 		} else if ((dev->vbus_session == 0)
2733 			&& (vbus == 1)
2734 			&& !dev->vbus_gpio.intr)
2735 			schedule_work(&dev->vbus_gpio.irq_work_fall);
2736 
2737 		dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2738 	}
2739 	/* Clear the SOF interrupt, if enabled */
2740 	if (dev_intr & UDC_DEVINT_SOF)
2741 		dev_dbg(&dev->pdev->dev, "SOF\n");
2742 	/* ES interrupt, IDLE > 3ms on the USB */
2743 	if (dev_intr & UDC_DEVINT_ES)
2744 		dev_dbg(&dev->pdev->dev, "ES\n");
2745 	/* RWKP interrupt */
2746 	if (dev_intr & UDC_DEVINT_RWKP)
2747 		dev_dbg(&dev->pdev->dev, "RWKP\n");
2748 }
2749 
2750 /**
2751  * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2752  * @irq:	Interrupt request number
2753  * @dev:	Reference to the device structure
2754  */
2755 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2756 {
2757 	struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2758 	u32 dev_intr, ep_intr;
2759 	int i;
2760 
2761 	dev_intr = pch_udc_read_device_interrupts(dev);
2762 	ep_intr = pch_udc_read_ep_interrupts(dev);
2763 
2764 	/* For a hot plug, this find that the controller is hung up. */
2765 	if (dev_intr == ep_intr)
2766 		if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2767 			dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2768 			/* The controller is reset */
2769 			pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2770 			return IRQ_HANDLED;
2771 		}
2772 	if (dev_intr)
2773 		/* Clear device interrupts */
2774 		pch_udc_write_device_interrupts(dev, dev_intr);
2775 	if (ep_intr)
2776 		/* Clear ep interrupts */
2777 		pch_udc_write_ep_interrupts(dev, ep_intr);
2778 	if (!dev_intr && !ep_intr)
2779 		return IRQ_NONE;
2780 	spin_lock(&dev->lock);
2781 	if (dev_intr)
2782 		pch_udc_dev_isr(dev, dev_intr);
2783 	if (ep_intr) {
2784 		pch_udc_read_all_epstatus(dev, ep_intr);
2785 		/* Process Control In interrupts, if present */
2786 		if (ep_intr & UDC_EPINT_IN_EP0) {
2787 			pch_udc_svc_control_in(dev);
2788 			pch_udc_postsvc_epinters(dev, 0);
2789 		}
2790 		/* Process Control Out interrupts, if present */
2791 		if (ep_intr & UDC_EPINT_OUT_EP0)
2792 			pch_udc_svc_control_out(dev);
2793 		/* Process data in end point interrupts */
2794 		for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2795 			if (ep_intr & (1 <<  i)) {
2796 				pch_udc_svc_data_in(dev, i);
2797 				pch_udc_postsvc_epinters(dev, i);
2798 			}
2799 		}
2800 		/* Process data out end point interrupts */
2801 		for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2802 						 PCH_UDC_USED_EP_NUM); i++)
2803 			if (ep_intr & (1 <<  i))
2804 				pch_udc_svc_data_out(dev, i -
2805 							 UDC_EPINT_OUT_SHIFT);
2806 	}
2807 	spin_unlock(&dev->lock);
2808 	return IRQ_HANDLED;
2809 }
2810 
2811 /**
2812  * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2813  * @dev:	Reference to the device structure
2814  */
2815 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2816 {
2817 	/* enable ep0 interrupts */
2818 	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2819 						UDC_EPINT_OUT_EP0);
2820 	/* enable device interrupts */
2821 	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2822 				       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2823 				       UDC_DEVINT_SI | UDC_DEVINT_SC);
2824 }
2825 
2826 /**
2827  * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2828  * @dev:	Reference to the driver structure
2829  */
2830 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2831 {
2832 	const char *const ep_string[] = {
2833 		ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2834 		"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2835 		"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2836 		"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2837 		"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2838 		"ep15in", "ep15out",
2839 	};
2840 	int i;
2841 
2842 	dev->gadget.speed = USB_SPEED_UNKNOWN;
2843 	INIT_LIST_HEAD(&dev->gadget.ep_list);
2844 
2845 	/* Initialize the endpoints structures */
2846 	memset(dev->ep, 0, sizeof dev->ep);
2847 	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2848 		struct pch_udc_ep *ep = &dev->ep[i];
2849 		ep->dev = dev;
2850 		ep->halted = 1;
2851 		ep->num = i / 2;
2852 		ep->in = ~i & 1;
2853 		ep->ep.name = ep_string[i];
2854 		ep->ep.ops = &pch_udc_ep_ops;
2855 		if (ep->in) {
2856 			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2857 			ep->ep.caps.dir_in = true;
2858 		} else {
2859 			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2860 					  UDC_EP_REG_SHIFT;
2861 			ep->ep.caps.dir_out = true;
2862 		}
2863 		if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2864 			ep->ep.caps.type_control = true;
2865 		} else {
2866 			ep->ep.caps.type_iso = true;
2867 			ep->ep.caps.type_bulk = true;
2868 			ep->ep.caps.type_int = true;
2869 		}
2870 		/* need to set ep->ep.maxpacket and set Default Configuration?*/
2871 		usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2872 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2873 		INIT_LIST_HEAD(&ep->queue);
2874 	}
2875 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2876 	usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2877 
2878 	/* remove ep0 in and out from the list.  They have own pointer */
2879 	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2880 	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2881 
2882 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2883 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2884 }
2885 
2886 /**
2887  * pch_udc_pcd_init() - This API initializes the driver structure
2888  * @dev:	Reference to the driver structure
2889  *
2890  * Return codes:
2891  *	0: Success
2892  */
2893 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2894 {
2895 	pch_udc_init(dev);
2896 	pch_udc_pcd_reinit(dev);
2897 	pch_vbus_gpio_init(dev, vbus_gpio_port);
2898 	return 0;
2899 }
2900 
2901 /**
2902  * init_dma_pools() - create dma pools during initialization
2903  * @pdev:	reference to struct pci_dev
2904  */
2905 static int init_dma_pools(struct pch_udc_dev *dev)
2906 {
2907 	struct pch_udc_stp_dma_desc	*td_stp;
2908 	struct pch_udc_data_dma_desc	*td_data;
2909 	void				*ep0out_buf;
2910 
2911 	/* DMA setup */
2912 	dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
2913 		sizeof(struct pch_udc_data_dma_desc), 0, 0);
2914 	if (!dev->data_requests) {
2915 		dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2916 			__func__);
2917 		return -ENOMEM;
2918 	}
2919 
2920 	/* dma desc for setup data */
2921 	dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
2922 		sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2923 	if (!dev->stp_requests) {
2924 		dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2925 			__func__);
2926 		return -ENOMEM;
2927 	}
2928 	/* setup */
2929 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
2930 				&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2931 	if (!td_stp) {
2932 		dev_err(&dev->pdev->dev,
2933 			"%s: can't allocate setup dma descriptor\n", __func__);
2934 		return -ENOMEM;
2935 	}
2936 	dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2937 
2938 	/* data: 0 packets !? */
2939 	td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
2940 				&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2941 	if (!td_data) {
2942 		dev_err(&dev->pdev->dev,
2943 			"%s: can't allocate data dma descriptor\n", __func__);
2944 		return -ENOMEM;
2945 	}
2946 	dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2947 	dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2948 	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2949 	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2950 	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2951 
2952 	ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2953 				  GFP_KERNEL);
2954 	if (!ep0out_buf)
2955 		return -ENOMEM;
2956 	dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2957 				       UDC_EP0OUT_BUFF_SIZE * 4,
2958 				       DMA_FROM_DEVICE);
2959 	return 0;
2960 }
2961 
2962 static int pch_udc_start(struct usb_gadget *g,
2963 		struct usb_gadget_driver *driver)
2964 {
2965 	struct pch_udc_dev	*dev = to_pch_udc(g);
2966 
2967 	driver->driver.bus = NULL;
2968 	dev->driver = driver;
2969 
2970 	/* get ready for ep0 traffic */
2971 	pch_udc_setup_ep0(dev);
2972 
2973 	/* clear SD */
2974 	if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2975 		pch_udc_clear_disconnect(dev);
2976 
2977 	dev->connected = 1;
2978 	return 0;
2979 }
2980 
2981 static int pch_udc_stop(struct usb_gadget *g)
2982 {
2983 	struct pch_udc_dev	*dev = to_pch_udc(g);
2984 
2985 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2986 
2987 	/* Assures that there are no pending requests with this driver */
2988 	dev->driver = NULL;
2989 	dev->connected = 0;
2990 
2991 	/* set SD */
2992 	pch_udc_set_disconnect(dev);
2993 
2994 	return 0;
2995 }
2996 
2997 static void pch_udc_shutdown(struct pci_dev *pdev)
2998 {
2999 	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3000 
3001 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3002 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3003 
3004 	/* disable the pullup so the host will think we're gone */
3005 	pch_udc_set_disconnect(dev);
3006 }
3007 
3008 static void pch_udc_remove(struct pci_dev *pdev)
3009 {
3010 	struct pch_udc_dev	*dev = pci_get_drvdata(pdev);
3011 
3012 	usb_del_gadget_udc(&dev->gadget);
3013 
3014 	/* gadget driver must not be registered */
3015 	if (dev->driver)
3016 		dev_err(&pdev->dev,
3017 			"%s: gadget driver still bound!!!\n", __func__);
3018 	/* dma pool cleanup */
3019 	dma_pool_destroy(dev->data_requests);
3020 
3021 	if (dev->stp_requests) {
3022 		/* cleanup DMA desc's for ep0in */
3023 		if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3024 			dma_pool_free(dev->stp_requests,
3025 				dev->ep[UDC_EP0OUT_IDX].td_stp,
3026 				dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3027 		}
3028 		if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3029 			dma_pool_free(dev->stp_requests,
3030 				dev->ep[UDC_EP0OUT_IDX].td_data,
3031 				dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3032 		}
3033 		dma_pool_destroy(dev->stp_requests);
3034 	}
3035 
3036 	if (dev->dma_addr)
3037 		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3038 				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3039 
3040 	pch_vbus_gpio_free(dev);
3041 
3042 	pch_udc_exit(dev);
3043 }
3044 
3045 #ifdef CONFIG_PM_SLEEP
3046 static int pch_udc_suspend(struct device *d)
3047 {
3048 	struct pch_udc_dev *dev = dev_get_drvdata(d);
3049 
3050 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3051 	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3052 
3053 	return 0;
3054 }
3055 
3056 static int pch_udc_resume(struct device *d)
3057 {
3058 	return 0;
3059 }
3060 
3061 static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3062 #define PCH_UDC_PM_OPS		(&pch_udc_pm)
3063 #else
3064 #define PCH_UDC_PM_OPS		NULL
3065 #endif /* CONFIG_PM_SLEEP */
3066 
3067 static int pch_udc_probe(struct pci_dev *pdev,
3068 			  const struct pci_device_id *id)
3069 {
3070 	int			bar;
3071 	int			retval;
3072 	struct pch_udc_dev	*dev;
3073 
3074 	/* init */
3075 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3076 	if (!dev)
3077 		return -ENOMEM;
3078 
3079 	/* pci setup */
3080 	retval = pcim_enable_device(pdev);
3081 	if (retval)
3082 		return retval;
3083 
3084 	pci_set_drvdata(pdev, dev);
3085 
3086 	/* Determine BAR based on PCI ID */
3087 	if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3088 		bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3089 	else
3090 		bar = PCH_UDC_PCI_BAR;
3091 
3092 	/* PCI resource allocation */
3093 	retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3094 	if (retval)
3095 		return retval;
3096 
3097 	dev->base_addr = pcim_iomap_table(pdev)[bar];
3098 
3099 	/* initialize the hardware */
3100 	if (pch_udc_pcd_init(dev))
3101 		return -ENODEV;
3102 
3103 	pci_enable_msi(pdev);
3104 
3105 	retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3106 				  IRQF_SHARED, KBUILD_MODNAME, dev);
3107 	if (retval) {
3108 		dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3109 			pdev->irq);
3110 		goto finished;
3111 	}
3112 
3113 	pci_set_master(pdev);
3114 	pci_try_set_mwi(pdev);
3115 
3116 	/* device struct setup */
3117 	spin_lock_init(&dev->lock);
3118 	dev->pdev = pdev;
3119 	dev->gadget.ops = &pch_udc_ops;
3120 
3121 	retval = init_dma_pools(dev);
3122 	if (retval)
3123 		goto finished;
3124 
3125 	dev->gadget.name = KBUILD_MODNAME;
3126 	dev->gadget.max_speed = USB_SPEED_HIGH;
3127 
3128 	/* Put the device in disconnected state till a driver is bound */
3129 	pch_udc_set_disconnect(dev);
3130 	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3131 	if (retval)
3132 		goto finished;
3133 	return 0;
3134 
3135 finished:
3136 	pch_udc_remove(pdev);
3137 	return retval;
3138 }
3139 
3140 static const struct pci_device_id pch_udc_pcidev_id[] = {
3141 	{
3142 		PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3143 			   PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3144 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3145 		.class_mask = 0xffffffff,
3146 	},
3147 	{
3148 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3149 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3150 		.class_mask = 0xffffffff,
3151 	},
3152 	{
3153 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3154 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3155 		.class_mask = 0xffffffff,
3156 	},
3157 	{
3158 		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3159 		.class = PCI_CLASS_SERIAL_USB_DEVICE,
3160 		.class_mask = 0xffffffff,
3161 	},
3162 	{ 0 },
3163 };
3164 
3165 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3166 
3167 static struct pci_driver pch_udc_driver = {
3168 	.name =	KBUILD_MODNAME,
3169 	.id_table =	pch_udc_pcidev_id,
3170 	.probe =	pch_udc_probe,
3171 	.remove =	pch_udc_remove,
3172 	.shutdown =	pch_udc_shutdown,
3173 	.driver = {
3174 		.pm = PCH_UDC_PM_OPS,
3175 	},
3176 };
3177 
3178 module_pci_driver(pch_udc_driver);
3179 
3180 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3181 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3182 MODULE_LICENSE("GPL");
3183