xref: /openbmc/linux/drivers/usb/gadget/udc/net2272.c (revision feac8c8b)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Driver for PLX NET2272 USB device controller
4  *
5  * Copyright (C) 2005-2006 PLX Technology, Inc.
6  * Copyright (C) 2006-2011 Analog Devices, Inc.
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/gpio.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/ioport.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/pci.h>
22 #include <linux/platform_device.h>
23 #include <linux/prefetch.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <linux/usb.h>
28 #include <linux/usb/ch9.h>
29 #include <linux/usb/gadget.h>
30 
31 #include <asm/byteorder.h>
32 #include <asm/unaligned.h>
33 
34 #include "net2272.h"
35 
36 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
37 
38 static const char driver_name[] = "net2272";
39 static const char driver_vers[] = "2006 October 17/mainline";
40 static const char driver_desc[] = DRIVER_DESC;
41 
42 static const char ep0name[] = "ep0";
43 static const char * const ep_name[] = {
44 	ep0name,
45 	"ep-a", "ep-b", "ep-c",
46 };
47 
48 #ifdef CONFIG_USB_NET2272_DMA
49 /*
50  * use_dma: the NET2272 can use an external DMA controller.
51  * Note that since there is no generic DMA api, some functions,
52  * notably request_dma, start_dma, and cancel_dma will need to be
53  * modified for your platform's particular dma controller.
54  *
55  * If use_dma is disabled, pio will be used instead.
56  */
57 static bool use_dma = 0;
58 module_param(use_dma, bool, 0644);
59 
60 /*
61  * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
62  * The NET2272 can only use dma for a single endpoint at a time.
63  * At some point this could be modified to allow either endpoint
64  * to take control of dma as it becomes available.
65  *
66  * Note that DMA should not be used on OUT endpoints unless it can
67  * be guaranteed that no short packets will arrive on an IN endpoint
68  * while the DMA operation is pending.  Otherwise the OUT DMA will
69  * terminate prematurely (See NET2272 Errata 630-0213-0101)
70  */
71 static ushort dma_ep = 1;
72 module_param(dma_ep, ushort, 0644);
73 
74 /*
75  * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
76  *	mode 0 == Slow DREQ mode
77  *	mode 1 == Fast DREQ mode
78  *	mode 2 == Burst mode
79  */
80 static ushort dma_mode = 2;
81 module_param(dma_mode, ushort, 0644);
82 #else
83 #define use_dma 0
84 #define dma_ep 1
85 #define dma_mode 2
86 #endif
87 
88 /*
89  * fifo_mode: net2272 buffer configuration:
90  *      mode 0 == ep-{a,b,c} 512db each
91  *      mode 1 == ep-a 1k, ep-{b,c} 512db
92  *      mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
93  *      mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
94  */
95 static ushort fifo_mode = 0;
96 module_param(fifo_mode, ushort, 0644);
97 
98 /*
99  * enable_suspend: When enabled, the driver will respond to
100  * USB suspend requests by powering down the NET2272.  Otherwise,
101  * USB suspend requests will be ignored.  This is acceptible for
102  * self-powered devices.  For bus powered devices set this to 1.
103  */
104 static ushort enable_suspend = 0;
105 module_param(enable_suspend, ushort, 0644);
106 
107 static void assert_out_naking(struct net2272_ep *ep, const char *where)
108 {
109 	u8 tmp;
110 
111 #ifndef DEBUG
112 	return;
113 #endif
114 
115 	tmp = net2272_ep_read(ep, EP_STAT0);
116 	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
117 		dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
118 			ep->ep.name, where, tmp);
119 		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
120 	}
121 }
122 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
123 
124 static void stop_out_naking(struct net2272_ep *ep)
125 {
126 	u8 tmp = net2272_ep_read(ep, EP_STAT0);
127 
128 	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
129 		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
130 }
131 
132 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
133 
134 static char *type_string(u8 bmAttributes)
135 {
136 	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
137 	case USB_ENDPOINT_XFER_BULK: return "bulk";
138 	case USB_ENDPOINT_XFER_ISOC: return "iso";
139 	case USB_ENDPOINT_XFER_INT:  return "intr";
140 	default:                     return "control";
141 	}
142 }
143 
144 static char *buf_state_string(unsigned state)
145 {
146 	switch (state) {
147 	case BUFF_FREE:  return "free";
148 	case BUFF_VALID: return "valid";
149 	case BUFF_LCL:   return "local";
150 	case BUFF_USB:   return "usb";
151 	default:         return "unknown";
152 	}
153 }
154 
155 static char *dma_mode_string(void)
156 {
157 	if (!use_dma)
158 		return "PIO";
159 	switch (dma_mode) {
160 	case 0:  return "SLOW DREQ";
161 	case 1:  return "FAST DREQ";
162 	case 2:  return "BURST";
163 	default: return "invalid";
164 	}
165 }
166 
167 static void net2272_dequeue_all(struct net2272_ep *);
168 static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
169 static int net2272_fifo_status(struct usb_ep *);
170 
171 static const struct usb_ep_ops net2272_ep_ops;
172 
173 /*---------------------------------------------------------------------------*/
174 
175 static int
176 net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
177 {
178 	struct net2272 *dev;
179 	struct net2272_ep *ep;
180 	u32 max;
181 	u8 tmp;
182 	unsigned long flags;
183 
184 	ep = container_of(_ep, struct net2272_ep, ep);
185 	if (!_ep || !desc || ep->desc || _ep->name == ep0name
186 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
187 		return -EINVAL;
188 	dev = ep->dev;
189 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
190 		return -ESHUTDOWN;
191 
192 	max = usb_endpoint_maxp(desc);
193 
194 	spin_lock_irqsave(&dev->lock, flags);
195 	_ep->maxpacket = max;
196 	ep->desc = desc;
197 
198 	/* net2272_ep_reset() has already been called */
199 	ep->stopped = 0;
200 	ep->wedged = 0;
201 
202 	/* set speed-dependent max packet */
203 	net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
204 	net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
205 
206 	/* set type, direction, address; reset fifo counters */
207 	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
208 	tmp = usb_endpoint_type(desc);
209 	if (usb_endpoint_xfer_bulk(desc)) {
210 		/* catch some particularly blatant driver bugs */
211 		if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
212 		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
213 			spin_unlock_irqrestore(&dev->lock, flags);
214 			return -ERANGE;
215 		}
216 	}
217 	ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
218 	tmp <<= ENDPOINT_TYPE;
219 	tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
220 	tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
221 	tmp |= (1 << ENDPOINT_ENABLE);
222 
223 	/* for OUT transfers, block the rx fifo until a read is posted */
224 	ep->is_in = usb_endpoint_dir_in(desc);
225 	if (!ep->is_in)
226 		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
227 
228 	net2272_ep_write(ep, EP_CFG, tmp);
229 
230 	/* enable irqs */
231 	tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
232 	net2272_write(dev, IRQENB0, tmp);
233 
234 	tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
235 		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
236 		| net2272_ep_read(ep, EP_IRQENB);
237 	net2272_ep_write(ep, EP_IRQENB, tmp);
238 
239 	tmp = desc->bEndpointAddress;
240 	dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
241 		_ep->name, tmp & 0x0f, PIPEDIR(tmp),
242 		type_string(desc->bmAttributes), max,
243 		net2272_ep_read(ep, EP_CFG));
244 
245 	spin_unlock_irqrestore(&dev->lock, flags);
246 	return 0;
247 }
248 
249 static void net2272_ep_reset(struct net2272_ep *ep)
250 {
251 	u8 tmp;
252 
253 	ep->desc = NULL;
254 	INIT_LIST_HEAD(&ep->queue);
255 
256 	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
257 	ep->ep.ops = &net2272_ep_ops;
258 
259 	/* disable irqs, endpoint */
260 	net2272_ep_write(ep, EP_IRQENB, 0);
261 
262 	/* init to our chosen defaults, notably so that we NAK OUT
263 	 * packets until the driver queues a read.
264 	 */
265 	tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
266 	net2272_ep_write(ep, EP_RSPSET, tmp);
267 
268 	tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
269 	if (ep->num != 0)
270 		tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
271 
272 	net2272_ep_write(ep, EP_RSPCLR, tmp);
273 
274 	/* scrub most status bits, and flush any fifo state */
275 	net2272_ep_write(ep, EP_STAT0,
276 			  (1 << DATA_IN_TOKEN_INTERRUPT)
277 			| (1 << DATA_OUT_TOKEN_INTERRUPT)
278 			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
279 			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
280 			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
281 
282 	net2272_ep_write(ep, EP_STAT1,
283 			    (1 << TIMEOUT)
284 			  | (1 << USB_OUT_ACK_SENT)
285 			  | (1 << USB_OUT_NAK_SENT)
286 			  | (1 << USB_IN_ACK_RCVD)
287 			  | (1 << USB_IN_NAK_SENT)
288 			  | (1 << USB_STALL_SENT)
289 			  | (1 << LOCAL_OUT_ZLP)
290 			  | (1 << BUFFER_FLUSH));
291 
292 	/* fifo size is handled seperately */
293 }
294 
295 static int net2272_disable(struct usb_ep *_ep)
296 {
297 	struct net2272_ep *ep;
298 	unsigned long flags;
299 
300 	ep = container_of(_ep, struct net2272_ep, ep);
301 	if (!_ep || !ep->desc || _ep->name == ep0name)
302 		return -EINVAL;
303 
304 	spin_lock_irqsave(&ep->dev->lock, flags);
305 	net2272_dequeue_all(ep);
306 	net2272_ep_reset(ep);
307 
308 	dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
309 
310 	spin_unlock_irqrestore(&ep->dev->lock, flags);
311 	return 0;
312 }
313 
314 /*---------------------------------------------------------------------------*/
315 
316 static struct usb_request *
317 net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
318 {
319 	struct net2272_request *req;
320 
321 	if (!_ep)
322 		return NULL;
323 
324 	req = kzalloc(sizeof(*req), gfp_flags);
325 	if (!req)
326 		return NULL;
327 
328 	INIT_LIST_HEAD(&req->queue);
329 
330 	return &req->req;
331 }
332 
333 static void
334 net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
335 {
336 	struct net2272_request *req;
337 
338 	if (!_ep || !_req)
339 		return;
340 
341 	req = container_of(_req, struct net2272_request, req);
342 	WARN_ON(!list_empty(&req->queue));
343 	kfree(req);
344 }
345 
346 static void
347 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
348 {
349 	struct net2272 *dev;
350 	unsigned stopped = ep->stopped;
351 
352 	if (ep->num == 0) {
353 		if (ep->dev->protocol_stall) {
354 			ep->stopped = 1;
355 			set_halt(ep);
356 		}
357 		allow_status(ep);
358 	}
359 
360 	list_del_init(&req->queue);
361 
362 	if (req->req.status == -EINPROGRESS)
363 		req->req.status = status;
364 	else
365 		status = req->req.status;
366 
367 	dev = ep->dev;
368 	if (use_dma && ep->dma)
369 		usb_gadget_unmap_request(&dev->gadget, &req->req,
370 				ep->is_in);
371 
372 	if (status && status != -ESHUTDOWN)
373 		dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
374 			ep->ep.name, &req->req, status,
375 			req->req.actual, req->req.length, req->req.buf);
376 
377 	/* don't modify queue heads during completion callback */
378 	ep->stopped = 1;
379 	spin_unlock(&dev->lock);
380 	usb_gadget_giveback_request(&ep->ep, &req->req);
381 	spin_lock(&dev->lock);
382 	ep->stopped = stopped;
383 }
384 
385 static int
386 net2272_write_packet(struct net2272_ep *ep, u8 *buf,
387 	struct net2272_request *req, unsigned max)
388 {
389 	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
390 	u16 *bufp;
391 	unsigned length, count;
392 	u8 tmp;
393 
394 	length = min(req->req.length - req->req.actual, max);
395 	req->req.actual += length;
396 
397 	dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
398 		ep->ep.name, req, max, length,
399 		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
400 
401 	count = length;
402 	bufp = (u16 *)buf;
403 
404 	while (likely(count >= 2)) {
405 		/* no byte-swap required; chip endian set during init */
406 		writew(*bufp++, ep_data);
407 		count -= 2;
408 	}
409 	buf = (u8 *)bufp;
410 
411 	/* write final byte by placing the NET2272 into 8-bit mode */
412 	if (unlikely(count)) {
413 		tmp = net2272_read(ep->dev, LOCCTL);
414 		net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
415 		writeb(*buf, ep_data);
416 		net2272_write(ep->dev, LOCCTL, tmp);
417 	}
418 	return length;
419 }
420 
421 /* returns: 0: still running, 1: completed, negative: errno */
422 static int
423 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
424 {
425 	u8 *buf;
426 	unsigned count, max;
427 	int status;
428 
429 	dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
430 		ep->ep.name, req->req.actual, req->req.length);
431 
432 	/*
433 	 * Keep loading the endpoint until the final packet is loaded,
434 	 * or the endpoint buffer is full.
435 	 */
436  top:
437 	/*
438 	 * Clear interrupt status
439 	 *  - Packet Transmitted interrupt will become set again when the
440 	 *    host successfully takes another packet
441 	 */
442 	net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
443 	while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
444 		buf = req->req.buf + req->req.actual;
445 		prefetch(buf);
446 
447 		/* force pagesel */
448 		net2272_ep_read(ep, EP_STAT0);
449 
450 		max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
451 			(net2272_ep_read(ep, EP_AVAIL0));
452 
453 		if (max < ep->ep.maxpacket)
454 			max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
455 				| (net2272_ep_read(ep, EP_AVAIL0));
456 
457 		count = net2272_write_packet(ep, buf, req, max);
458 		/* see if we are done */
459 		if (req->req.length == req->req.actual) {
460 			/* validate short or zlp packet */
461 			if (count < ep->ep.maxpacket)
462 				set_fifo_bytecount(ep, 0);
463 			net2272_done(ep, req, 0);
464 
465 			if (!list_empty(&ep->queue)) {
466 				req = list_entry(ep->queue.next,
467 						struct net2272_request,
468 						queue);
469 				status = net2272_kick_dma(ep, req);
470 
471 				if (status < 0)
472 					if ((net2272_ep_read(ep, EP_STAT0)
473 							& (1 << BUFFER_EMPTY)))
474 						goto top;
475 			}
476 			return 1;
477 		}
478 		net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
479 	}
480 	return 0;
481 }
482 
483 static void
484 net2272_out_flush(struct net2272_ep *ep)
485 {
486 	ASSERT_OUT_NAKING(ep);
487 
488 	net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
489 			| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
490 	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
491 }
492 
493 static int
494 net2272_read_packet(struct net2272_ep *ep, u8 *buf,
495 	struct net2272_request *req, unsigned avail)
496 {
497 	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
498 	unsigned is_short;
499 	u16 *bufp;
500 
501 	req->req.actual += avail;
502 
503 	dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
504 		ep->ep.name, req, avail,
505 		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
506 
507 	is_short = (avail < ep->ep.maxpacket);
508 
509 	if (unlikely(avail == 0)) {
510 		/* remove any zlp from the buffer */
511 		(void)readw(ep_data);
512 		return is_short;
513 	}
514 
515 	/* Ensure we get the final byte */
516 	if (unlikely(avail % 2))
517 		avail++;
518 	bufp = (u16 *)buf;
519 
520 	do {
521 		*bufp++ = readw(ep_data);
522 		avail -= 2;
523 	} while (avail);
524 
525 	/*
526 	 * To avoid false endpoint available race condition must read
527 	 * ep stat0 twice in the case of a short transfer
528 	 */
529 	if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
530 		net2272_ep_read(ep, EP_STAT0);
531 
532 	return is_short;
533 }
534 
535 static int
536 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
537 {
538 	u8 *buf;
539 	unsigned is_short;
540 	int count;
541 	int tmp;
542 	int cleanup = 0;
543 	int status = -1;
544 
545 	dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
546 		ep->ep.name, req->req.actual, req->req.length);
547 
548  top:
549 	do {
550 		buf = req->req.buf + req->req.actual;
551 		prefetchw(buf);
552 
553 		count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
554 			| net2272_ep_read(ep, EP_AVAIL0);
555 
556 		net2272_ep_write(ep, EP_STAT0,
557 			(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
558 			(1 << DATA_PACKET_RECEIVED_INTERRUPT));
559 
560 		tmp = req->req.length - req->req.actual;
561 
562 		if (count > tmp) {
563 			if ((tmp % ep->ep.maxpacket) != 0) {
564 				dev_err(ep->dev->dev,
565 					"%s out fifo %d bytes, expected %d\n",
566 					ep->ep.name, count, tmp);
567 				cleanup = 1;
568 			}
569 			count = (tmp > 0) ? tmp : 0;
570 		}
571 
572 		is_short = net2272_read_packet(ep, buf, req, count);
573 
574 		/* completion */
575 		if (unlikely(cleanup || is_short ||
576 				((req->req.actual == req->req.length)
577 				 && !req->req.zero))) {
578 
579 			if (cleanup) {
580 				net2272_out_flush(ep);
581 				net2272_done(ep, req, -EOVERFLOW);
582 			} else
583 				net2272_done(ep, req, 0);
584 
585 			/* re-initialize endpoint transfer registers
586 			 * otherwise they may result in erroneous pre-validation
587 			 * for subsequent control reads
588 			 */
589 			if (unlikely(ep->num == 0)) {
590 				net2272_ep_write(ep, EP_TRANSFER2, 0);
591 				net2272_ep_write(ep, EP_TRANSFER1, 0);
592 				net2272_ep_write(ep, EP_TRANSFER0, 0);
593 			}
594 
595 			if (!list_empty(&ep->queue)) {
596 				req = list_entry(ep->queue.next,
597 					struct net2272_request, queue);
598 				status = net2272_kick_dma(ep, req);
599 				if ((status < 0) &&
600 				    !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
601 					goto top;
602 			}
603 			return 1;
604 		}
605 	} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
606 
607 	return 0;
608 }
609 
610 static void
611 net2272_pio_advance(struct net2272_ep *ep)
612 {
613 	struct net2272_request *req;
614 
615 	if (unlikely(list_empty(&ep->queue)))
616 		return;
617 
618 	req = list_entry(ep->queue.next, struct net2272_request, queue);
619 	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
620 }
621 
622 /* returns 0 on success, else negative errno */
623 static int
624 net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
625 	unsigned len, unsigned dir)
626 {
627 	dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
628 		ep, buf, len, dir);
629 
630 	/* The NET2272 only supports a single dma channel */
631 	if (dev->dma_busy)
632 		return -EBUSY;
633 	/*
634 	 * EP_TRANSFER (used to determine the number of bytes received
635 	 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
636 	 */
637 	if ((dir == 1) && (len > 0x1000000))
638 		return -EINVAL;
639 
640 	dev->dma_busy = 1;
641 
642 	/* initialize platform's dma */
643 #ifdef CONFIG_USB_PCI
644 	/* NET2272 addr, buffer addr, length, etc. */
645 	switch (dev->dev_id) {
646 	case PCI_DEVICE_ID_RDK1:
647 		/* Setup PLX 9054 DMA mode */
648 		writel((1 << LOCAL_BUS_WIDTH) |
649 			(1 << TA_READY_INPUT_ENABLE) |
650 			(0 << LOCAL_BURST_ENABLE) |
651 			(1 << DONE_INTERRUPT_ENABLE) |
652 			(1 << LOCAL_ADDRESSING_MODE) |
653 			(1 << DEMAND_MODE) |
654 			(1 << DMA_EOT_ENABLE) |
655 			(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
656 			(1 << DMA_CHANNEL_INTERRUPT_SELECT),
657 			dev->rdk1.plx9054_base_addr + DMAMODE0);
658 
659 		writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
660 		writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
661 		writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
662 		writel((dir << DIRECTION_OF_TRANSFER) |
663 			(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
664 			dev->rdk1.plx9054_base_addr + DMADPR0);
665 		writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
666 			readl(dev->rdk1.plx9054_base_addr + INTCSR),
667 			dev->rdk1.plx9054_base_addr + INTCSR);
668 
669 		break;
670 	}
671 #endif
672 
673 	net2272_write(dev, DMAREQ,
674 		(0 << DMA_BUFFER_VALID) |
675 		(1 << DMA_REQUEST_ENABLE) |
676 		(1 << DMA_CONTROL_DACK) |
677 		(dev->dma_eot_polarity << EOT_POLARITY) |
678 		(dev->dma_dack_polarity << DACK_POLARITY) |
679 		(dev->dma_dreq_polarity << DREQ_POLARITY) |
680 		((ep >> 1) << DMA_ENDPOINT_SELECT));
681 
682 	(void) net2272_read(dev, SCRATCH);
683 
684 	return 0;
685 }
686 
687 static void
688 net2272_start_dma(struct net2272 *dev)
689 {
690 	/* start platform's dma controller */
691 #ifdef CONFIG_USB_PCI
692 	switch (dev->dev_id) {
693 	case PCI_DEVICE_ID_RDK1:
694 		writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
695 			dev->rdk1.plx9054_base_addr + DMACSR0);
696 		break;
697 	}
698 #endif
699 }
700 
701 /* returns 0 on success, else negative errno */
702 static int
703 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
704 {
705 	unsigned size;
706 	u8 tmp;
707 
708 	if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
709 		return -EINVAL;
710 
711 	/* don't use dma for odd-length transfers
712 	 * otherwise, we'd need to deal with the last byte with pio
713 	 */
714 	if (req->req.length & 1)
715 		return -EINVAL;
716 
717 	dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
718 		ep->ep.name, req, (unsigned long long) req->req.dma);
719 
720 	net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
721 
722 	/* The NET2272 can only use DMA on one endpoint at a time */
723 	if (ep->dev->dma_busy)
724 		return -EBUSY;
725 
726 	/* Make sure we only DMA an even number of bytes (we'll use
727 	 * pio to complete the transfer)
728 	 */
729 	size = req->req.length;
730 	size &= ~1;
731 
732 	/* device-to-host transfer */
733 	if (ep->is_in) {
734 		/* initialize platform's dma controller */
735 		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
736 			/* unable to obtain DMA channel; return error and use pio mode */
737 			return -EBUSY;
738 		req->req.actual += size;
739 
740 	/* host-to-device transfer */
741 	} else {
742 		tmp = net2272_ep_read(ep, EP_STAT0);
743 
744 		/* initialize platform's dma controller */
745 		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
746 			/* unable to obtain DMA channel; return error and use pio mode */
747 			return -EBUSY;
748 
749 		if (!(tmp & (1 << BUFFER_EMPTY)))
750 			ep->not_empty = 1;
751 		else
752 			ep->not_empty = 0;
753 
754 
755 		/* allow the endpoint's buffer to fill */
756 		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
757 
758 		/* this transfer completed and data's already in the fifo
759 		 * return error so pio gets used.
760 		 */
761 		if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
762 
763 			/* deassert dreq */
764 			net2272_write(ep->dev, DMAREQ,
765 				(0 << DMA_BUFFER_VALID) |
766 				(0 << DMA_REQUEST_ENABLE) |
767 				(1 << DMA_CONTROL_DACK) |
768 				(ep->dev->dma_eot_polarity << EOT_POLARITY) |
769 				(ep->dev->dma_dack_polarity << DACK_POLARITY) |
770 				(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
771 				((ep->num >> 1) << DMA_ENDPOINT_SELECT));
772 
773 			return -EBUSY;
774 		}
775 	}
776 
777 	/* Don't use per-packet interrupts: use dma interrupts only */
778 	net2272_ep_write(ep, EP_IRQENB, 0);
779 
780 	net2272_start_dma(ep->dev);
781 
782 	return 0;
783 }
784 
785 static void net2272_cancel_dma(struct net2272 *dev)
786 {
787 #ifdef CONFIG_USB_PCI
788 	switch (dev->dev_id) {
789 	case PCI_DEVICE_ID_RDK1:
790 		writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
791 		writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
792 		while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
793 		         (1 << CHANNEL_DONE)))
794 			continue;	/* wait for dma to stabalize */
795 
796 		/* dma abort generates an interrupt */
797 		writeb(1 << CHANNEL_CLEAR_INTERRUPT,
798 			dev->rdk1.plx9054_base_addr + DMACSR0);
799 		break;
800 	}
801 #endif
802 
803 	dev->dma_busy = 0;
804 }
805 
806 /*---------------------------------------------------------------------------*/
807 
808 static int
809 net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
810 {
811 	struct net2272_request *req;
812 	struct net2272_ep *ep;
813 	struct net2272 *dev;
814 	unsigned long flags;
815 	int status = -1;
816 	u8 s;
817 
818 	req = container_of(_req, struct net2272_request, req);
819 	if (!_req || !_req->complete || !_req->buf
820 			|| !list_empty(&req->queue))
821 		return -EINVAL;
822 	ep = container_of(_ep, struct net2272_ep, ep);
823 	if (!_ep || (!ep->desc && ep->num != 0))
824 		return -EINVAL;
825 	dev = ep->dev;
826 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
827 		return -ESHUTDOWN;
828 
829 	/* set up dma mapping in case the caller didn't */
830 	if (use_dma && ep->dma) {
831 		status = usb_gadget_map_request(&dev->gadget, _req,
832 				ep->is_in);
833 		if (status)
834 			return status;
835 	}
836 
837 	dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
838 		_ep->name, _req, _req->length, _req->buf,
839 		(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
840 
841 	spin_lock_irqsave(&dev->lock, flags);
842 
843 	_req->status = -EINPROGRESS;
844 	_req->actual = 0;
845 
846 	/* kickstart this i/o queue? */
847 	if (list_empty(&ep->queue) && !ep->stopped) {
848 		/* maybe there's no control data, just status ack */
849 		if (ep->num == 0 && _req->length == 0) {
850 			net2272_done(ep, req, 0);
851 			dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
852 			goto done;
853 		}
854 
855 		/* Return zlp, don't let it block subsequent packets */
856 		s = net2272_ep_read(ep, EP_STAT0);
857 		if (s & (1 << BUFFER_EMPTY)) {
858 			/* Buffer is empty check for a blocking zlp, handle it */
859 			if ((s & (1 << NAK_OUT_PACKETS)) &&
860 			    net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
861 				dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
862 				/*
863 				 * Request is going to terminate with a short packet ...
864 				 * hope the client is ready for it!
865 				 */
866 				status = net2272_read_fifo(ep, req);
867 				/* clear short packet naking */
868 				net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
869 				goto done;
870 			}
871 		}
872 
873 		/* try dma first */
874 		status = net2272_kick_dma(ep, req);
875 
876 		if (status < 0) {
877 			/* dma failed (most likely in use by another endpoint)
878 			 * fallback to pio
879 			 */
880 			status = 0;
881 
882 			if (ep->is_in)
883 				status = net2272_write_fifo(ep, req);
884 			else {
885 				s = net2272_ep_read(ep, EP_STAT0);
886 				if ((s & (1 << BUFFER_EMPTY)) == 0)
887 					status = net2272_read_fifo(ep, req);
888 			}
889 
890 			if (unlikely(status != 0)) {
891 				if (status > 0)
892 					status = 0;
893 				req = NULL;
894 			}
895 		}
896 	}
897 	if (likely(req))
898 		list_add_tail(&req->queue, &ep->queue);
899 
900 	if (likely(!list_empty(&ep->queue)))
901 		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
902  done:
903 	spin_unlock_irqrestore(&dev->lock, flags);
904 
905 	return 0;
906 }
907 
908 /* dequeue ALL requests */
909 static void
910 net2272_dequeue_all(struct net2272_ep *ep)
911 {
912 	struct net2272_request *req;
913 
914 	/* called with spinlock held */
915 	ep->stopped = 1;
916 
917 	while (!list_empty(&ep->queue)) {
918 		req = list_entry(ep->queue.next,
919 				struct net2272_request,
920 				queue);
921 		net2272_done(ep, req, -ESHUTDOWN);
922 	}
923 }
924 
925 /* dequeue JUST ONE request */
926 static int
927 net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
928 {
929 	struct net2272_ep *ep;
930 	struct net2272_request *req;
931 	unsigned long flags;
932 	int stopped;
933 
934 	ep = container_of(_ep, struct net2272_ep, ep);
935 	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
936 		return -EINVAL;
937 
938 	spin_lock_irqsave(&ep->dev->lock, flags);
939 	stopped = ep->stopped;
940 	ep->stopped = 1;
941 
942 	/* make sure it's still queued on this endpoint */
943 	list_for_each_entry(req, &ep->queue, queue) {
944 		if (&req->req == _req)
945 			break;
946 	}
947 	if (&req->req != _req) {
948 		spin_unlock_irqrestore(&ep->dev->lock, flags);
949 		return -EINVAL;
950 	}
951 
952 	/* queue head may be partially complete */
953 	if (ep->queue.next == &req->queue) {
954 		dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
955 		net2272_done(ep, req, -ECONNRESET);
956 	}
957 	req = NULL;
958 	ep->stopped = stopped;
959 
960 	spin_unlock_irqrestore(&ep->dev->lock, flags);
961 	return 0;
962 }
963 
964 /*---------------------------------------------------------------------------*/
965 
966 static int
967 net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
968 {
969 	struct net2272_ep *ep;
970 	unsigned long flags;
971 	int ret = 0;
972 
973 	ep = container_of(_ep, struct net2272_ep, ep);
974 	if (!_ep || (!ep->desc && ep->num != 0))
975 		return -EINVAL;
976 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
977 		return -ESHUTDOWN;
978 	if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
979 		return -EINVAL;
980 
981 	spin_lock_irqsave(&ep->dev->lock, flags);
982 	if (!list_empty(&ep->queue))
983 		ret = -EAGAIN;
984 	else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
985 		ret = -EAGAIN;
986 	else {
987 		dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
988 			value ? "set" : "clear",
989 			wedged ? "wedge" : "halt");
990 		/* set/clear */
991 		if (value) {
992 			if (ep->num == 0)
993 				ep->dev->protocol_stall = 1;
994 			else
995 				set_halt(ep);
996 			if (wedged)
997 				ep->wedged = 1;
998 		} else {
999 			clear_halt(ep);
1000 			ep->wedged = 0;
1001 		}
1002 	}
1003 	spin_unlock_irqrestore(&ep->dev->lock, flags);
1004 
1005 	return ret;
1006 }
1007 
1008 static int
1009 net2272_set_halt(struct usb_ep *_ep, int value)
1010 {
1011 	return net2272_set_halt_and_wedge(_ep, value, 0);
1012 }
1013 
1014 static int
1015 net2272_set_wedge(struct usb_ep *_ep)
1016 {
1017 	if (!_ep || _ep->name == ep0name)
1018 		return -EINVAL;
1019 	return net2272_set_halt_and_wedge(_ep, 1, 1);
1020 }
1021 
1022 static int
1023 net2272_fifo_status(struct usb_ep *_ep)
1024 {
1025 	struct net2272_ep *ep;
1026 	u16 avail;
1027 
1028 	ep = container_of(_ep, struct net2272_ep, ep);
1029 	if (!_ep || (!ep->desc && ep->num != 0))
1030 		return -ENODEV;
1031 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1032 		return -ESHUTDOWN;
1033 
1034 	avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1035 	avail |= net2272_ep_read(ep, EP_AVAIL0);
1036 	if (avail > ep->fifo_size)
1037 		return -EOVERFLOW;
1038 	if (ep->is_in)
1039 		avail = ep->fifo_size - avail;
1040 	return avail;
1041 }
1042 
1043 static void
1044 net2272_fifo_flush(struct usb_ep *_ep)
1045 {
1046 	struct net2272_ep *ep;
1047 
1048 	ep = container_of(_ep, struct net2272_ep, ep);
1049 	if (!_ep || (!ep->desc && ep->num != 0))
1050 		return;
1051 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1052 		return;
1053 
1054 	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1055 }
1056 
1057 static const struct usb_ep_ops net2272_ep_ops = {
1058 	.enable        = net2272_enable,
1059 	.disable       = net2272_disable,
1060 
1061 	.alloc_request = net2272_alloc_request,
1062 	.free_request  = net2272_free_request,
1063 
1064 	.queue         = net2272_queue,
1065 	.dequeue       = net2272_dequeue,
1066 
1067 	.set_halt      = net2272_set_halt,
1068 	.set_wedge     = net2272_set_wedge,
1069 	.fifo_status   = net2272_fifo_status,
1070 	.fifo_flush    = net2272_fifo_flush,
1071 };
1072 
1073 /*---------------------------------------------------------------------------*/
1074 
1075 static int
1076 net2272_get_frame(struct usb_gadget *_gadget)
1077 {
1078 	struct net2272 *dev;
1079 	unsigned long flags;
1080 	u16 ret;
1081 
1082 	if (!_gadget)
1083 		return -ENODEV;
1084 	dev = container_of(_gadget, struct net2272, gadget);
1085 	spin_lock_irqsave(&dev->lock, flags);
1086 
1087 	ret = net2272_read(dev, FRAME1) << 8;
1088 	ret |= net2272_read(dev, FRAME0);
1089 
1090 	spin_unlock_irqrestore(&dev->lock, flags);
1091 	return ret;
1092 }
1093 
1094 static int
1095 net2272_wakeup(struct usb_gadget *_gadget)
1096 {
1097 	struct net2272 *dev;
1098 	u8 tmp;
1099 	unsigned long flags;
1100 
1101 	if (!_gadget)
1102 		return 0;
1103 	dev = container_of(_gadget, struct net2272, gadget);
1104 
1105 	spin_lock_irqsave(&dev->lock, flags);
1106 	tmp = net2272_read(dev, USBCTL0);
1107 	if (tmp & (1 << IO_WAKEUP_ENABLE))
1108 		net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1109 
1110 	spin_unlock_irqrestore(&dev->lock, flags);
1111 
1112 	return 0;
1113 }
1114 
1115 static int
1116 net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1117 {
1118 	if (!_gadget)
1119 		return -ENODEV;
1120 
1121 	_gadget->is_selfpowered = (value != 0);
1122 
1123 	return 0;
1124 }
1125 
1126 static int
1127 net2272_pullup(struct usb_gadget *_gadget, int is_on)
1128 {
1129 	struct net2272 *dev;
1130 	u8 tmp;
1131 	unsigned long flags;
1132 
1133 	if (!_gadget)
1134 		return -ENODEV;
1135 	dev = container_of(_gadget, struct net2272, gadget);
1136 
1137 	spin_lock_irqsave(&dev->lock, flags);
1138 	tmp = net2272_read(dev, USBCTL0);
1139 	dev->softconnect = (is_on != 0);
1140 	if (is_on)
1141 		tmp |= (1 << USB_DETECT_ENABLE);
1142 	else
1143 		tmp &= ~(1 << USB_DETECT_ENABLE);
1144 	net2272_write(dev, USBCTL0, tmp);
1145 	spin_unlock_irqrestore(&dev->lock, flags);
1146 
1147 	return 0;
1148 }
1149 
1150 static int net2272_start(struct usb_gadget *_gadget,
1151 		struct usb_gadget_driver *driver);
1152 static int net2272_stop(struct usb_gadget *_gadget);
1153 
1154 static const struct usb_gadget_ops net2272_ops = {
1155 	.get_frame	= net2272_get_frame,
1156 	.wakeup		= net2272_wakeup,
1157 	.set_selfpowered = net2272_set_selfpowered,
1158 	.pullup		= net2272_pullup,
1159 	.udc_start	= net2272_start,
1160 	.udc_stop	= net2272_stop,
1161 };
1162 
1163 /*---------------------------------------------------------------------------*/
1164 
1165 static ssize_t
1166 registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1167 {
1168 	struct net2272 *dev;
1169 	char *next;
1170 	unsigned size, t;
1171 	unsigned long flags;
1172 	u8 t1, t2;
1173 	int i;
1174 	const char *s;
1175 
1176 	dev = dev_get_drvdata(_dev);
1177 	next = buf;
1178 	size = PAGE_SIZE;
1179 	spin_lock_irqsave(&dev->lock, flags);
1180 
1181 	if (dev->driver)
1182 		s = dev->driver->driver.name;
1183 	else
1184 		s = "(none)";
1185 
1186 	/* Main Control Registers */
1187 	t = scnprintf(next, size, "%s version %s,"
1188 		"chiprev %02x, locctl %02x\n"
1189 		"irqenb0 %02x irqenb1 %02x "
1190 		"irqstat0 %02x irqstat1 %02x\n",
1191 		driver_name, driver_vers, dev->chiprev,
1192 		net2272_read(dev, LOCCTL),
1193 		net2272_read(dev, IRQENB0),
1194 		net2272_read(dev, IRQENB1),
1195 		net2272_read(dev, IRQSTAT0),
1196 		net2272_read(dev, IRQSTAT1));
1197 	size -= t;
1198 	next += t;
1199 
1200 	/* DMA */
1201 	t1 = net2272_read(dev, DMAREQ);
1202 	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1203 		t1, ep_name[(t1 & 0x01) + 1],
1204 		t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1205 		t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1206 		t1 & (1 << DMA_REQUEST) ? "req " : "",
1207 		t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1208 	size -= t;
1209 	next += t;
1210 
1211 	/* USB Control Registers */
1212 	t1 = net2272_read(dev, USBCTL1);
1213 	if (t1 & (1 << VBUS_PIN)) {
1214 		if (t1 & (1 << USB_HIGH_SPEED))
1215 			s = "high speed";
1216 		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1217 			s = "powered";
1218 		else
1219 			s = "full speed";
1220 	} else
1221 		s = "not attached";
1222 	t = scnprintf(next, size,
1223 		"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1224 		net2272_read(dev, USBCTL0), t1,
1225 		net2272_read(dev, OURADDR), s);
1226 	size -= t;
1227 	next += t;
1228 
1229 	/* Endpoint Registers */
1230 	for (i = 0; i < 4; ++i) {
1231 		struct net2272_ep *ep;
1232 
1233 		ep = &dev->ep[i];
1234 		if (i && !ep->desc)
1235 			continue;
1236 
1237 		t1 = net2272_ep_read(ep, EP_CFG);
1238 		t2 = net2272_ep_read(ep, EP_RSPSET);
1239 		t = scnprintf(next, size,
1240 			"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1241 			"irqenb %02x\n",
1242 			ep->ep.name, t1, t2,
1243 			(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1244 			(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1245 			(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1246 			(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1247 			(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1248 			(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1249 			(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1250 			(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1251 			net2272_ep_read(ep, EP_IRQENB));
1252 		size -= t;
1253 		next += t;
1254 
1255 		t = scnprintf(next, size,
1256 			"\tstat0 %02x stat1 %02x avail %04x "
1257 			"(ep%d%s-%s)%s\n",
1258 			net2272_ep_read(ep, EP_STAT0),
1259 			net2272_ep_read(ep, EP_STAT1),
1260 			(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1261 			t1 & 0x0f,
1262 			ep->is_in ? "in" : "out",
1263 			type_string(t1 >> 5),
1264 			ep->stopped ? "*" : "");
1265 		size -= t;
1266 		next += t;
1267 
1268 		t = scnprintf(next, size,
1269 			"\tep_transfer %06x\n",
1270 			((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1271 			((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1272 			((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1273 		size -= t;
1274 		next += t;
1275 
1276 		t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1277 		t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1278 		t = scnprintf(next, size,
1279 			"\tbuf-a %s buf-b %s\n",
1280 			buf_state_string(t1),
1281 			buf_state_string(t2));
1282 		size -= t;
1283 		next += t;
1284 	}
1285 
1286 	spin_unlock_irqrestore(&dev->lock, flags);
1287 
1288 	return PAGE_SIZE - size;
1289 }
1290 static DEVICE_ATTR_RO(registers);
1291 
1292 /*---------------------------------------------------------------------------*/
1293 
1294 static void
1295 net2272_set_fifo_mode(struct net2272 *dev, int mode)
1296 {
1297 	u8 tmp;
1298 
1299 	tmp = net2272_read(dev, LOCCTL) & 0x3f;
1300 	tmp |= (mode << 6);
1301 	net2272_write(dev, LOCCTL, tmp);
1302 
1303 	INIT_LIST_HEAD(&dev->gadget.ep_list);
1304 
1305 	/* always ep-a, ep-c ... maybe not ep-b */
1306 	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1307 
1308 	switch (mode) {
1309 	case 0:
1310 		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1311 		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1312 		break;
1313 	case 1:
1314 		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1315 		dev->ep[1].fifo_size = 1024;
1316 		dev->ep[2].fifo_size = 512;
1317 		break;
1318 	case 2:
1319 		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1320 		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1321 		break;
1322 	case 3:
1323 		dev->ep[1].fifo_size = 1024;
1324 		break;
1325 	}
1326 
1327 	/* ep-c is always 2 512 byte buffers */
1328 	list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1329 	dev->ep[3].fifo_size = 512;
1330 }
1331 
1332 /*---------------------------------------------------------------------------*/
1333 
1334 static void
1335 net2272_usb_reset(struct net2272 *dev)
1336 {
1337 	dev->gadget.speed = USB_SPEED_UNKNOWN;
1338 
1339 	net2272_cancel_dma(dev);
1340 
1341 	net2272_write(dev, IRQENB0, 0);
1342 	net2272_write(dev, IRQENB1, 0);
1343 
1344 	/* clear irq state */
1345 	net2272_write(dev, IRQSTAT0, 0xff);
1346 	net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1347 
1348 	net2272_write(dev, DMAREQ,
1349 		(0 << DMA_BUFFER_VALID) |
1350 		(0 << DMA_REQUEST_ENABLE) |
1351 		(1 << DMA_CONTROL_DACK) |
1352 		(dev->dma_eot_polarity << EOT_POLARITY) |
1353 		(dev->dma_dack_polarity << DACK_POLARITY) |
1354 		(dev->dma_dreq_polarity << DREQ_POLARITY) |
1355 		((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1356 
1357 	net2272_cancel_dma(dev);
1358 	net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1359 
1360 	/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1361 	 * note that the higher level gadget drivers are expected to convert data to little endian.
1362 	 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1363 	 */
1364 	net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1365 	net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1366 }
1367 
1368 static void
1369 net2272_usb_reinit(struct net2272 *dev)
1370 {
1371 	int i;
1372 
1373 	/* basic endpoint init */
1374 	for (i = 0; i < 4; ++i) {
1375 		struct net2272_ep *ep = &dev->ep[i];
1376 
1377 		ep->ep.name = ep_name[i];
1378 		ep->dev = dev;
1379 		ep->num = i;
1380 		ep->not_empty = 0;
1381 
1382 		if (use_dma && ep->num == dma_ep)
1383 			ep->dma = 1;
1384 
1385 		if (i > 0 && i <= 3)
1386 			ep->fifo_size = 512;
1387 		else
1388 			ep->fifo_size = 64;
1389 		net2272_ep_reset(ep);
1390 
1391 		if (i == 0) {
1392 			ep->ep.caps.type_control = true;
1393 		} else {
1394 			ep->ep.caps.type_iso = true;
1395 			ep->ep.caps.type_bulk = true;
1396 			ep->ep.caps.type_int = true;
1397 		}
1398 
1399 		ep->ep.caps.dir_in = true;
1400 		ep->ep.caps.dir_out = true;
1401 	}
1402 	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1403 
1404 	dev->gadget.ep0 = &dev->ep[0].ep;
1405 	dev->ep[0].stopped = 0;
1406 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1407 }
1408 
1409 static void
1410 net2272_ep0_start(struct net2272 *dev)
1411 {
1412 	struct net2272_ep *ep0 = &dev->ep[0];
1413 
1414 	net2272_ep_write(ep0, EP_RSPSET,
1415 		(1 << NAK_OUT_PACKETS_MODE) |
1416 		(1 << ALT_NAK_OUT_PACKETS));
1417 	net2272_ep_write(ep0, EP_RSPCLR,
1418 		(1 << HIDE_STATUS_PHASE) |
1419 		(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1420 	net2272_write(dev, USBCTL0,
1421 		(dev->softconnect << USB_DETECT_ENABLE) |
1422 		(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1423 		(1 << IO_WAKEUP_ENABLE));
1424 	net2272_write(dev, IRQENB0,
1425 		(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1426 		(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1427 		(1 << DMA_DONE_INTERRUPT_ENABLE));
1428 	net2272_write(dev, IRQENB1,
1429 		(1 << VBUS_INTERRUPT_ENABLE) |
1430 		(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1431 		(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1432 }
1433 
1434 /* when a driver is successfully registered, it will receive
1435  * control requests including set_configuration(), which enables
1436  * non-control requests.  then usb traffic follows until a
1437  * disconnect is reported.  then a host may connect again, or
1438  * the driver might get unbound.
1439  */
1440 static int net2272_start(struct usb_gadget *_gadget,
1441 		struct usb_gadget_driver *driver)
1442 {
1443 	struct net2272 *dev;
1444 	unsigned i;
1445 
1446 	if (!driver || !driver->setup ||
1447 	    driver->max_speed != USB_SPEED_HIGH)
1448 		return -EINVAL;
1449 
1450 	dev = container_of(_gadget, struct net2272, gadget);
1451 
1452 	for (i = 0; i < 4; ++i)
1453 		dev->ep[i].irqs = 0;
1454 	/* hook up the driver ... */
1455 	dev->softconnect = 1;
1456 	driver->driver.bus = NULL;
1457 	dev->driver = driver;
1458 
1459 	/* ... then enable host detection and ep0; and we're ready
1460 	 * for set_configuration as well as eventual disconnect.
1461 	 */
1462 	net2272_ep0_start(dev);
1463 
1464 	return 0;
1465 }
1466 
1467 static void
1468 stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1469 {
1470 	int i;
1471 
1472 	/* don't disconnect if it's not connected */
1473 	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1474 		driver = NULL;
1475 
1476 	/* stop hardware; prevent new request submissions;
1477 	 * and kill any outstanding requests.
1478 	 */
1479 	net2272_usb_reset(dev);
1480 	for (i = 0; i < 4; ++i)
1481 		net2272_dequeue_all(&dev->ep[i]);
1482 
1483 	/* report disconnect; the driver is already quiesced */
1484 	if (driver) {
1485 		spin_unlock(&dev->lock);
1486 		driver->disconnect(&dev->gadget);
1487 		spin_lock(&dev->lock);
1488 	}
1489 
1490 	net2272_usb_reinit(dev);
1491 }
1492 
1493 static int net2272_stop(struct usb_gadget *_gadget)
1494 {
1495 	struct net2272 *dev;
1496 	unsigned long flags;
1497 
1498 	dev = container_of(_gadget, struct net2272, gadget);
1499 
1500 	spin_lock_irqsave(&dev->lock, flags);
1501 	stop_activity(dev, NULL);
1502 	spin_unlock_irqrestore(&dev->lock, flags);
1503 
1504 	dev->driver = NULL;
1505 
1506 	return 0;
1507 }
1508 
1509 /*---------------------------------------------------------------------------*/
1510 /* handle ep-a/ep-b dma completions */
1511 static void
1512 net2272_handle_dma(struct net2272_ep *ep)
1513 {
1514 	struct net2272_request *req;
1515 	unsigned len;
1516 	int status;
1517 
1518 	if (!list_empty(&ep->queue))
1519 		req = list_entry(ep->queue.next,
1520 				struct net2272_request, queue);
1521 	else
1522 		req = NULL;
1523 
1524 	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1525 
1526 	/* Ensure DREQ is de-asserted */
1527 	net2272_write(ep->dev, DMAREQ,
1528 		(0 << DMA_BUFFER_VALID)
1529 	      | (0 << DMA_REQUEST_ENABLE)
1530 	      | (1 << DMA_CONTROL_DACK)
1531 	      | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1532 	      | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1533 	      | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1534 	      | (ep->dma << DMA_ENDPOINT_SELECT));
1535 
1536 	ep->dev->dma_busy = 0;
1537 
1538 	net2272_ep_write(ep, EP_IRQENB,
1539 		  (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1540 		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1541 		| net2272_ep_read(ep, EP_IRQENB));
1542 
1543 	/* device-to-host transfer completed */
1544 	if (ep->is_in) {
1545 		/* validate a short packet or zlp if necessary */
1546 		if ((req->req.length % ep->ep.maxpacket != 0) ||
1547 				req->req.zero)
1548 			set_fifo_bytecount(ep, 0);
1549 
1550 		net2272_done(ep, req, 0);
1551 		if (!list_empty(&ep->queue)) {
1552 			req = list_entry(ep->queue.next,
1553 					struct net2272_request, queue);
1554 			status = net2272_kick_dma(ep, req);
1555 			if (status < 0)
1556 				net2272_pio_advance(ep);
1557 		}
1558 
1559 	/* host-to-device transfer completed */
1560 	} else {
1561 		/* terminated with a short packet? */
1562 		if (net2272_read(ep->dev, IRQSTAT0) &
1563 				(1 << DMA_DONE_INTERRUPT)) {
1564 			/* abort system dma */
1565 			net2272_cancel_dma(ep->dev);
1566 		}
1567 
1568 		/* EP_TRANSFER will contain the number of bytes
1569 		 * actually received.
1570 		 * NOTE: There is no overflow detection on EP_TRANSFER:
1571 		 * We can't deal with transfers larger than 2^24 bytes!
1572 		 */
1573 		len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1574 			| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1575 			| (net2272_ep_read(ep, EP_TRANSFER0));
1576 
1577 		if (ep->not_empty)
1578 			len += 4;
1579 
1580 		req->req.actual += len;
1581 
1582 		/* get any remaining data */
1583 		net2272_pio_advance(ep);
1584 	}
1585 }
1586 
1587 /*---------------------------------------------------------------------------*/
1588 
1589 static void
1590 net2272_handle_ep(struct net2272_ep *ep)
1591 {
1592 	struct net2272_request *req;
1593 	u8 stat0, stat1;
1594 
1595 	if (!list_empty(&ep->queue))
1596 		req = list_entry(ep->queue.next,
1597 			struct net2272_request, queue);
1598 	else
1599 		req = NULL;
1600 
1601 	/* ack all, and handle what we care about */
1602 	stat0 = net2272_ep_read(ep, EP_STAT0);
1603 	stat1 = net2272_ep_read(ep, EP_STAT1);
1604 	ep->irqs++;
1605 
1606 	dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1607 		ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1608 
1609 	net2272_ep_write(ep, EP_STAT0, stat0 &
1610 		~((1 << NAK_OUT_PACKETS)
1611 		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1612 	net2272_ep_write(ep, EP_STAT1, stat1);
1613 
1614 	/* data packet(s) received (in the fifo, OUT)
1615 	 * direction must be validated, otherwise control read status phase
1616 	 * could be interpreted as a valid packet
1617 	 */
1618 	if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1619 		net2272_pio_advance(ep);
1620 	/* data packet(s) transmitted (IN) */
1621 	else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1622 		net2272_pio_advance(ep);
1623 }
1624 
1625 static struct net2272_ep *
1626 net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1627 {
1628 	struct net2272_ep *ep;
1629 
1630 	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1631 		return &dev->ep[0];
1632 
1633 	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1634 		u8 bEndpointAddress;
1635 
1636 		if (!ep->desc)
1637 			continue;
1638 		bEndpointAddress = ep->desc->bEndpointAddress;
1639 		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1640 			continue;
1641 		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1642 			return ep;
1643 	}
1644 	return NULL;
1645 }
1646 
1647 /*
1648  * USB Test Packet:
1649  * JKJKJKJK * 9
1650  * JJKKJJKK * 8
1651  * JJJJKKKK * 8
1652  * JJJJJJJKKKKKKK * 8
1653  * JJJJJJJK * 8
1654  * {JKKKKKKK * 10}, JK
1655  */
1656 static const u8 net2272_test_packet[] = {
1657 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1658 	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1659 	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1660 	0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1661 	0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1662 	0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1663 };
1664 
1665 static void
1666 net2272_set_test_mode(struct net2272 *dev, int mode)
1667 {
1668 	int i;
1669 
1670 	/* Disable all net2272 interrupts:
1671 	 * Nothing but a power cycle should stop the test.
1672 	 */
1673 	net2272_write(dev, IRQENB0, 0x00);
1674 	net2272_write(dev, IRQENB1, 0x00);
1675 
1676 	/* Force tranceiver to high-speed */
1677 	net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1678 
1679 	net2272_write(dev, PAGESEL, 0);
1680 	net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1681 	net2272_write(dev, EP_RSPCLR,
1682 			  (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1683 			| (1 << HIDE_STATUS_PHASE));
1684 	net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1685 	net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1686 
1687 	/* wait for status phase to complete */
1688 	while (!(net2272_read(dev, EP_STAT0) &
1689 				(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1690 		;
1691 
1692 	/* Enable test mode */
1693 	net2272_write(dev, USBTEST, mode);
1694 
1695 	/* load test packet */
1696 	if (mode == TEST_PACKET) {
1697 		/* switch to 8 bit mode */
1698 		net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1699 				~(1 << DATA_WIDTH));
1700 
1701 		for (i = 0; i < sizeof(net2272_test_packet); ++i)
1702 			net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1703 
1704 		/* Validate test packet */
1705 		net2272_write(dev, EP_TRANSFER0, 0);
1706 	}
1707 }
1708 
1709 static void
1710 net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1711 {
1712 	struct net2272_ep *ep;
1713 	u8 num, scratch;
1714 
1715 	/* starting a control request? */
1716 	if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1717 		union {
1718 			u8 raw[8];
1719 			struct usb_ctrlrequest	r;
1720 		} u;
1721 		int tmp = 0;
1722 		struct net2272_request *req;
1723 
1724 		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1725 			if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1726 				dev->gadget.speed = USB_SPEED_HIGH;
1727 			else
1728 				dev->gadget.speed = USB_SPEED_FULL;
1729 			dev_dbg(dev->dev, "%s\n",
1730 				usb_speed_string(dev->gadget.speed));
1731 		}
1732 
1733 		ep = &dev->ep[0];
1734 		ep->irqs++;
1735 
1736 		/* make sure any leftover interrupt state is cleared */
1737 		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1738 		while (!list_empty(&ep->queue)) {
1739 			req = list_entry(ep->queue.next,
1740 				struct net2272_request, queue);
1741 			net2272_done(ep, req,
1742 				(req->req.actual == req->req.length) ? 0 : -EPROTO);
1743 		}
1744 		ep->stopped = 0;
1745 		dev->protocol_stall = 0;
1746 		net2272_ep_write(ep, EP_STAT0,
1747 			    (1 << DATA_IN_TOKEN_INTERRUPT)
1748 			  | (1 << DATA_OUT_TOKEN_INTERRUPT)
1749 			  | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1750 			  | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1751 			  | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1752 		net2272_ep_write(ep, EP_STAT1,
1753 			    (1 << TIMEOUT)
1754 			  | (1 << USB_OUT_ACK_SENT)
1755 			  | (1 << USB_OUT_NAK_SENT)
1756 			  | (1 << USB_IN_ACK_RCVD)
1757 			  | (1 << USB_IN_NAK_SENT)
1758 			  | (1 << USB_STALL_SENT)
1759 			  | (1 << LOCAL_OUT_ZLP));
1760 
1761 		/*
1762 		 * Ensure Control Read pre-validation setting is beyond maximum size
1763 		 *  - Control Writes can leave non-zero values in EP_TRANSFER. If
1764 		 *    an EP0 transfer following the Control Write is a Control Read,
1765 		 *    the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1766 		 *    pre-validation count.
1767 		 *  - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1768 		 *    the pre-validation count cannot cause an unexpected validatation
1769 		 */
1770 		net2272_write(dev, PAGESEL, 0);
1771 		net2272_write(dev, EP_TRANSFER2, 0xff);
1772 		net2272_write(dev, EP_TRANSFER1, 0xff);
1773 		net2272_write(dev, EP_TRANSFER0, 0xff);
1774 
1775 		u.raw[0] = net2272_read(dev, SETUP0);
1776 		u.raw[1] = net2272_read(dev, SETUP1);
1777 		u.raw[2] = net2272_read(dev, SETUP2);
1778 		u.raw[3] = net2272_read(dev, SETUP3);
1779 		u.raw[4] = net2272_read(dev, SETUP4);
1780 		u.raw[5] = net2272_read(dev, SETUP5);
1781 		u.raw[6] = net2272_read(dev, SETUP6);
1782 		u.raw[7] = net2272_read(dev, SETUP7);
1783 		/*
1784 		 * If you have a big endian cpu make sure le16_to_cpus
1785 		 * performs the proper byte swapping here...
1786 		 */
1787 		le16_to_cpus(&u.r.wValue);
1788 		le16_to_cpus(&u.r.wIndex);
1789 		le16_to_cpus(&u.r.wLength);
1790 
1791 		/* ack the irq */
1792 		net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1793 		stat ^= (1 << SETUP_PACKET_INTERRUPT);
1794 
1795 		/* watch control traffic at the token level, and force
1796 		 * synchronization before letting the status phase happen.
1797 		 */
1798 		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1799 		if (ep->is_in) {
1800 			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1801 				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1802 				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1803 			stop_out_naking(ep);
1804 		} else
1805 			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1806 				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1807 				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1808 		net2272_ep_write(ep, EP_IRQENB, scratch);
1809 
1810 		if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1811 			goto delegate;
1812 		switch (u.r.bRequest) {
1813 		case USB_REQ_GET_STATUS: {
1814 			struct net2272_ep *e;
1815 			u16 status = 0;
1816 
1817 			switch (u.r.bRequestType & USB_RECIP_MASK) {
1818 			case USB_RECIP_ENDPOINT:
1819 				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1820 				if (!e || u.r.wLength > 2)
1821 					goto do_stall;
1822 				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1823 					status = cpu_to_le16(1);
1824 				else
1825 					status = cpu_to_le16(0);
1826 
1827 				/* don't bother with a request object! */
1828 				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1829 				writew(status, net2272_reg_addr(dev, EP_DATA));
1830 				set_fifo_bytecount(&dev->ep[0], 0);
1831 				allow_status(ep);
1832 				dev_vdbg(dev->dev, "%s stat %02x\n",
1833 					ep->ep.name, status);
1834 				goto next_endpoints;
1835 			case USB_RECIP_DEVICE:
1836 				if (u.r.wLength > 2)
1837 					goto do_stall;
1838 				if (dev->gadget.is_selfpowered)
1839 					status = (1 << USB_DEVICE_SELF_POWERED);
1840 
1841 				/* don't bother with a request object! */
1842 				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1843 				writew(status, net2272_reg_addr(dev, EP_DATA));
1844 				set_fifo_bytecount(&dev->ep[0], 0);
1845 				allow_status(ep);
1846 				dev_vdbg(dev->dev, "device stat %02x\n", status);
1847 				goto next_endpoints;
1848 			case USB_RECIP_INTERFACE:
1849 				if (u.r.wLength > 2)
1850 					goto do_stall;
1851 
1852 				/* don't bother with a request object! */
1853 				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1854 				writew(status, net2272_reg_addr(dev, EP_DATA));
1855 				set_fifo_bytecount(&dev->ep[0], 0);
1856 				allow_status(ep);
1857 				dev_vdbg(dev->dev, "interface status %02x\n", status);
1858 				goto next_endpoints;
1859 			}
1860 
1861 			break;
1862 		}
1863 		case USB_REQ_CLEAR_FEATURE: {
1864 			struct net2272_ep *e;
1865 
1866 			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1867 				goto delegate;
1868 			if (u.r.wValue != USB_ENDPOINT_HALT ||
1869 			    u.r.wLength != 0)
1870 				goto do_stall;
1871 			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1872 			if (!e)
1873 				goto do_stall;
1874 			if (e->wedged) {
1875 				dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1876 					ep->ep.name);
1877 			} else {
1878 				dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1879 				clear_halt(e);
1880 			}
1881 			allow_status(ep);
1882 			goto next_endpoints;
1883 		}
1884 		case USB_REQ_SET_FEATURE: {
1885 			struct net2272_ep *e;
1886 
1887 			if (u.r.bRequestType == USB_RECIP_DEVICE) {
1888 				if (u.r.wIndex != NORMAL_OPERATION)
1889 					net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1890 				allow_status(ep);
1891 				dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1892 				goto next_endpoints;
1893 			} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1894 				goto delegate;
1895 			if (u.r.wValue != USB_ENDPOINT_HALT ||
1896 			    u.r.wLength != 0)
1897 				goto do_stall;
1898 			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1899 			if (!e)
1900 				goto do_stall;
1901 			set_halt(e);
1902 			allow_status(ep);
1903 			dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1904 			goto next_endpoints;
1905 		}
1906 		case USB_REQ_SET_ADDRESS: {
1907 			net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1908 			allow_status(ep);
1909 			break;
1910 		}
1911 		default:
1912  delegate:
1913 			dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1914 				"ep_cfg %08x\n",
1915 				u.r.bRequestType, u.r.bRequest,
1916 				u.r.wValue, u.r.wIndex,
1917 				net2272_ep_read(ep, EP_CFG));
1918 			spin_unlock(&dev->lock);
1919 			tmp = dev->driver->setup(&dev->gadget, &u.r);
1920 			spin_lock(&dev->lock);
1921 		}
1922 
1923 		/* stall ep0 on error */
1924 		if (tmp < 0) {
1925  do_stall:
1926 			dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1927 				u.r.bRequestType, u.r.bRequest, tmp);
1928 			dev->protocol_stall = 1;
1929 		}
1930 	/* endpoint dma irq? */
1931 	} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1932 		net2272_cancel_dma(dev);
1933 		net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1934 		stat &= ~(1 << DMA_DONE_INTERRUPT);
1935 		num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1936 			? 2 : 1;
1937 
1938 		ep = &dev->ep[num];
1939 		net2272_handle_dma(ep);
1940 	}
1941 
1942  next_endpoints:
1943 	/* endpoint data irq? */
1944 	scratch = stat & 0x0f;
1945 	stat &= ~0x0f;
1946 	for (num = 0; scratch; num++) {
1947 		u8 t;
1948 
1949 		/* does this endpoint's FIFO and queue need tending? */
1950 		t = 1 << num;
1951 		if ((scratch & t) == 0)
1952 			continue;
1953 		scratch ^= t;
1954 
1955 		ep = &dev->ep[num];
1956 		net2272_handle_ep(ep);
1957 	}
1958 
1959 	/* some interrupts we can just ignore */
1960 	stat &= ~(1 << SOF_INTERRUPT);
1961 
1962 	if (stat)
1963 		dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1964 }
1965 
1966 static void
1967 net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1968 {
1969 	u8 tmp, mask;
1970 
1971 	/* after disconnect there's nothing else to do! */
1972 	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1973 	mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1974 
1975 	if (stat & tmp) {
1976 		bool	reset = false;
1977 		bool	disconnect = false;
1978 
1979 		/*
1980 		 * Ignore disconnects and resets if the speed hasn't been set.
1981 		 * VBUS can bounce and there's always an initial reset.
1982 		 */
1983 		net2272_write(dev, IRQSTAT1, tmp);
1984 		if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
1985 			if ((stat & (1 << VBUS_INTERRUPT)) &&
1986 					(net2272_read(dev, USBCTL1) &
1987 						(1 << VBUS_PIN)) == 0) {
1988 				disconnect = true;
1989 				dev_dbg(dev->dev, "disconnect %s\n",
1990 					dev->driver->driver.name);
1991 			} else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
1992 					(net2272_read(dev, USBCTL1) & mask)
1993 						== 0) {
1994 				reset = true;
1995 				dev_dbg(dev->dev, "reset %s\n",
1996 					dev->driver->driver.name);
1997 			}
1998 
1999 			if (disconnect || reset) {
2000 				stop_activity(dev, dev->driver);
2001 				net2272_ep0_start(dev);
2002 				spin_unlock(&dev->lock);
2003 				if (reset)
2004 					usb_gadget_udc_reset
2005 						(&dev->gadget, dev->driver);
2006 				else
2007 					(dev->driver->disconnect)
2008 						(&dev->gadget);
2009 				spin_lock(&dev->lock);
2010 				return;
2011 			}
2012 		}
2013 		stat &= ~tmp;
2014 
2015 		if (!stat)
2016 			return;
2017 	}
2018 
2019 	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2020 	if (stat & tmp) {
2021 		net2272_write(dev, IRQSTAT1, tmp);
2022 		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2023 			if (dev->driver->suspend)
2024 				dev->driver->suspend(&dev->gadget);
2025 			if (!enable_suspend) {
2026 				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2027 				dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2028 			}
2029 		} else {
2030 			if (dev->driver->resume)
2031 				dev->driver->resume(&dev->gadget);
2032 		}
2033 		stat &= ~tmp;
2034 	}
2035 
2036 	/* clear any other status/irqs */
2037 	if (stat)
2038 		net2272_write(dev, IRQSTAT1, stat);
2039 
2040 	/* some status we can just ignore */
2041 	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2042 			| (1 << SUSPEND_REQUEST_INTERRUPT)
2043 			| (1 << RESUME_INTERRUPT));
2044 	if (!stat)
2045 		return;
2046 	else
2047 		dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2048 }
2049 
2050 static irqreturn_t net2272_irq(int irq, void *_dev)
2051 {
2052 	struct net2272 *dev = _dev;
2053 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2054 	u32 intcsr;
2055 #endif
2056 #if defined(PLX_PCI_RDK)
2057 	u8 dmareq;
2058 #endif
2059 	spin_lock(&dev->lock);
2060 #if defined(PLX_PCI_RDK)
2061 	intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2062 
2063 	if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2064 		writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2065 				dev->rdk1.plx9054_base_addr + INTCSR);
2066 		net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2067 		net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2068 		intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2069 		writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2070 			dev->rdk1.plx9054_base_addr + INTCSR);
2071 	}
2072 	if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2073 		writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2074 				dev->rdk1.plx9054_base_addr + DMACSR0);
2075 
2076 		dmareq = net2272_read(dev, DMAREQ);
2077 		if (dmareq & 0x01)
2078 			net2272_handle_dma(&dev->ep[2]);
2079 		else
2080 			net2272_handle_dma(&dev->ep[1]);
2081 	}
2082 #endif
2083 #if defined(PLX_PCI_RDK2)
2084 	/* see if PCI int for us by checking irqstat */
2085 	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2086 	if (!intcsr & (1 << NET2272_PCI_IRQ)) {
2087 		spin_unlock(&dev->lock);
2088 		return IRQ_NONE;
2089 	}
2090 	/* check dma interrupts */
2091 #endif
2092 	/* Platform/devcice interrupt handler */
2093 #if !defined(PLX_PCI_RDK)
2094 	net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2095 	net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2096 #endif
2097 	spin_unlock(&dev->lock);
2098 
2099 	return IRQ_HANDLED;
2100 }
2101 
2102 static int net2272_present(struct net2272 *dev)
2103 {
2104 	/*
2105 	 * Quick test to see if CPU can communicate properly with the NET2272.
2106 	 * Verifies connection using writes and reads to write/read and
2107 	 * read-only registers.
2108 	 *
2109 	 * This routine is strongly recommended especially during early bring-up
2110 	 * of new hardware, however for designs that do not apply Power On System
2111 	 * Tests (POST) it may discarded (or perhaps minimized).
2112 	 */
2113 	unsigned int ii;
2114 	u8 val, refval;
2115 
2116 	/* Verify NET2272 write/read SCRATCH register can write and read */
2117 	refval = net2272_read(dev, SCRATCH);
2118 	for (ii = 0; ii < 0x100; ii += 7) {
2119 		net2272_write(dev, SCRATCH, ii);
2120 		val = net2272_read(dev, SCRATCH);
2121 		if (val != ii) {
2122 			dev_dbg(dev->dev,
2123 				"%s: write/read SCRATCH register test failed: "
2124 				"wrote:0x%2.2x, read:0x%2.2x\n",
2125 				__func__, ii, val);
2126 			return -EINVAL;
2127 		}
2128 	}
2129 	/* To be nice, we write the original SCRATCH value back: */
2130 	net2272_write(dev, SCRATCH, refval);
2131 
2132 	/* Verify NET2272 CHIPREV register is read-only: */
2133 	refval = net2272_read(dev, CHIPREV_2272);
2134 	for (ii = 0; ii < 0x100; ii += 7) {
2135 		net2272_write(dev, CHIPREV_2272, ii);
2136 		val = net2272_read(dev, CHIPREV_2272);
2137 		if (val != refval) {
2138 			dev_dbg(dev->dev,
2139 				"%s: write/read CHIPREV register test failed: "
2140 				"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2141 				__func__, ii, val, refval);
2142 			return -EINVAL;
2143 		}
2144 	}
2145 
2146 	/*
2147 	 * Verify NET2272's "NET2270 legacy revision" register
2148 	 *  - NET2272 has two revision registers. The NET2270 legacy revision
2149 	 *    register should read the same value, regardless of the NET2272
2150 	 *    silicon revision.  The legacy register applies to NET2270
2151 	 *    firmware being applied to the NET2272.
2152 	 */
2153 	val = net2272_read(dev, CHIPREV_LEGACY);
2154 	if (val != NET2270_LEGACY_REV) {
2155 		/*
2156 		 * Unexpected legacy revision value
2157 		 * - Perhaps the chip is a NET2270?
2158 		 */
2159 		dev_dbg(dev->dev,
2160 			"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2161 			" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2162 			__func__, NET2270_LEGACY_REV, val);
2163 		return -EINVAL;
2164 	}
2165 
2166 	/*
2167 	 * Verify NET2272 silicon revision
2168 	 *  - This revision register is appropriate for the silicon version
2169 	 *    of the NET2272
2170 	 */
2171 	val = net2272_read(dev, CHIPREV_2272);
2172 	switch (val) {
2173 	case CHIPREV_NET2272_R1:
2174 		/*
2175 		 * NET2272 Rev 1 has DMA related errata:
2176 		 *  - Newer silicon (Rev 1A or better) required
2177 		 */
2178 		dev_dbg(dev->dev,
2179 			"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2180 			__func__);
2181 		break;
2182 	case CHIPREV_NET2272_R1A:
2183 		break;
2184 	default:
2185 		/* NET2272 silicon version *may* not work with this firmware */
2186 		dev_dbg(dev->dev,
2187 			"%s: unexpected silicon revision register value: "
2188 			" CHIPREV_2272: 0x%2.2x\n",
2189 			__func__, val);
2190 		/*
2191 		 * Return Success, even though the chip rev is not an expected value
2192 		 *  - Older, pre-built firmware can attempt to operate on newer silicon
2193 		 *  - Often, new silicon is perfectly compatible
2194 		 */
2195 	}
2196 
2197 	/* Success: NET2272 checks out OK */
2198 	return 0;
2199 }
2200 
2201 static void
2202 net2272_gadget_release(struct device *_dev)
2203 {
2204 	struct net2272 *dev = dev_get_drvdata(_dev);
2205 	kfree(dev);
2206 }
2207 
2208 /*---------------------------------------------------------------------------*/
2209 
2210 static void
2211 net2272_remove(struct net2272 *dev)
2212 {
2213 	usb_del_gadget_udc(&dev->gadget);
2214 	free_irq(dev->irq, dev);
2215 	iounmap(dev->base_addr);
2216 	device_remove_file(dev->dev, &dev_attr_registers);
2217 
2218 	dev_info(dev->dev, "unbind\n");
2219 }
2220 
2221 static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2222 {
2223 	struct net2272 *ret;
2224 
2225 	if (!irq) {
2226 		dev_dbg(dev, "No IRQ!\n");
2227 		return ERR_PTR(-ENODEV);
2228 	}
2229 
2230 	/* alloc, and start init */
2231 	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2232 	if (!ret)
2233 		return ERR_PTR(-ENOMEM);
2234 
2235 	spin_lock_init(&ret->lock);
2236 	ret->irq = irq;
2237 	ret->dev = dev;
2238 	ret->gadget.ops = &net2272_ops;
2239 	ret->gadget.max_speed = USB_SPEED_HIGH;
2240 
2241 	/* the "gadget" abstracts/virtualizes the controller */
2242 	ret->gadget.name = driver_name;
2243 
2244 	return ret;
2245 }
2246 
2247 static int
2248 net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2249 {
2250 	int ret;
2251 
2252 	/* See if there... */
2253 	if (net2272_present(dev)) {
2254 		dev_warn(dev->dev, "2272 not found!\n");
2255 		ret = -ENODEV;
2256 		goto err;
2257 	}
2258 
2259 	net2272_usb_reset(dev);
2260 	net2272_usb_reinit(dev);
2261 
2262 	ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2263 	if (ret) {
2264 		dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2265 		goto err;
2266 	}
2267 
2268 	dev->chiprev = net2272_read(dev, CHIPREV_2272);
2269 
2270 	/* done */
2271 	dev_info(dev->dev, "%s\n", driver_desc);
2272 	dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2273 		dev->irq, dev->base_addr, dev->chiprev,
2274 		dma_mode_string());
2275 	dev_info(dev->dev, "version: %s\n", driver_vers);
2276 
2277 	ret = device_create_file(dev->dev, &dev_attr_registers);
2278 	if (ret)
2279 		goto err_irq;
2280 
2281 	ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget,
2282 			net2272_gadget_release);
2283 	if (ret)
2284 		goto err_add_udc;
2285 
2286 	return 0;
2287 
2288 err_add_udc:
2289 	device_remove_file(dev->dev, &dev_attr_registers);
2290  err_irq:
2291 	free_irq(dev->irq, dev);
2292  err:
2293 	return ret;
2294 }
2295 
2296 #ifdef CONFIG_USB_PCI
2297 
2298 /*
2299  * wrap this driver around the specified device, but
2300  * don't respond over USB until a gadget driver binds to us
2301  */
2302 
2303 static int
2304 net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2305 {
2306 	unsigned long resource, len, tmp;
2307 	void __iomem *mem_mapped_addr[4];
2308 	int ret, i;
2309 
2310 	/*
2311 	 * BAR 0 holds PLX 9054 config registers
2312 	 * BAR 1 is i/o memory; unused here
2313 	 * BAR 2 holds EPLD config registers
2314 	 * BAR 3 holds NET2272 registers
2315 	 */
2316 
2317 	/* Find and map all address spaces */
2318 	for (i = 0; i < 4; ++i) {
2319 		if (i == 1)
2320 			continue;	/* BAR1 unused */
2321 
2322 		resource = pci_resource_start(pdev, i);
2323 		len = pci_resource_len(pdev, i);
2324 
2325 		if (!request_mem_region(resource, len, driver_name)) {
2326 			dev_dbg(dev->dev, "controller already in use\n");
2327 			ret = -EBUSY;
2328 			goto err;
2329 		}
2330 
2331 		mem_mapped_addr[i] = ioremap_nocache(resource, len);
2332 		if (mem_mapped_addr[i] == NULL) {
2333 			release_mem_region(resource, len);
2334 			dev_dbg(dev->dev, "can't map memory\n");
2335 			ret = -EFAULT;
2336 			goto err;
2337 		}
2338 	}
2339 
2340 	dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2341 	dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2342 	dev->base_addr = mem_mapped_addr[3];
2343 
2344 	/* Set PLX 9054 bus width (16 bits) */
2345 	tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2346 	writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2347 			dev->rdk1.plx9054_base_addr + LBRD1);
2348 
2349 	/* Enable PLX 9054 Interrupts */
2350 	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2351 			(1 << PCI_INTERRUPT_ENABLE) |
2352 			(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2353 			dev->rdk1.plx9054_base_addr + INTCSR);
2354 
2355 	writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2356 			dev->rdk1.plx9054_base_addr + DMACSR0);
2357 
2358 	/* reset */
2359 	writeb((1 << EPLD_DMA_ENABLE) |
2360 		(1 << DMA_CTL_DACK) |
2361 		(1 << DMA_TIMEOUT_ENABLE) |
2362 		(1 << USER) |
2363 		(0 << MPX_MODE) |
2364 		(1 << BUSWIDTH) |
2365 		(1 << NET2272_RESET),
2366 		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2367 
2368 	mb();
2369 	writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2370 		~(1 << NET2272_RESET),
2371 		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2372 	udelay(200);
2373 
2374 	return 0;
2375 
2376  err:
2377 	while (--i >= 0) {
2378 		iounmap(mem_mapped_addr[i]);
2379 		release_mem_region(pci_resource_start(pdev, i),
2380 			pci_resource_len(pdev, i));
2381 	}
2382 
2383 	return ret;
2384 }
2385 
2386 static int
2387 net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2388 {
2389 	unsigned long resource, len;
2390 	void __iomem *mem_mapped_addr[2];
2391 	int ret, i;
2392 
2393 	/*
2394 	 * BAR 0 holds FGPA config registers
2395 	 * BAR 1 holds NET2272 registers
2396 	 */
2397 
2398 	/* Find and map all address spaces, bar2-3 unused in rdk 2 */
2399 	for (i = 0; i < 2; ++i) {
2400 		resource = pci_resource_start(pdev, i);
2401 		len = pci_resource_len(pdev, i);
2402 
2403 		if (!request_mem_region(resource, len, driver_name)) {
2404 			dev_dbg(dev->dev, "controller already in use\n");
2405 			ret = -EBUSY;
2406 			goto err;
2407 		}
2408 
2409 		mem_mapped_addr[i] = ioremap_nocache(resource, len);
2410 		if (mem_mapped_addr[i] == NULL) {
2411 			release_mem_region(resource, len);
2412 			dev_dbg(dev->dev, "can't map memory\n");
2413 			ret = -EFAULT;
2414 			goto err;
2415 		}
2416 	}
2417 
2418 	dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2419 	dev->base_addr = mem_mapped_addr[1];
2420 
2421 	mb();
2422 	/* Set 2272 bus width (16 bits) and reset */
2423 	writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2424 	udelay(200);
2425 	writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2426 	/* Print fpga version number */
2427 	dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2428 		readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2429 	/* Enable FPGA Interrupts */
2430 	writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2431 
2432 	return 0;
2433 
2434  err:
2435 	while (--i >= 0) {
2436 		iounmap(mem_mapped_addr[i]);
2437 		release_mem_region(pci_resource_start(pdev, i),
2438 			pci_resource_len(pdev, i));
2439 	}
2440 
2441 	return ret;
2442 }
2443 
2444 static int
2445 net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2446 {
2447 	struct net2272 *dev;
2448 	int ret;
2449 
2450 	dev = net2272_probe_init(&pdev->dev, pdev->irq);
2451 	if (IS_ERR(dev))
2452 		return PTR_ERR(dev);
2453 	dev->dev_id = pdev->device;
2454 
2455 	if (pci_enable_device(pdev) < 0) {
2456 		ret = -ENODEV;
2457 		goto err_free;
2458 	}
2459 
2460 	pci_set_master(pdev);
2461 
2462 	switch (pdev->device) {
2463 	case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2464 	case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2465 	default: BUG();
2466 	}
2467 	if (ret)
2468 		goto err_pci;
2469 
2470 	ret = net2272_probe_fin(dev, 0);
2471 	if (ret)
2472 		goto err_pci;
2473 
2474 	pci_set_drvdata(pdev, dev);
2475 
2476 	return 0;
2477 
2478  err_pci:
2479 	pci_disable_device(pdev);
2480  err_free:
2481 	kfree(dev);
2482 
2483 	return ret;
2484 }
2485 
2486 static void
2487 net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2488 {
2489 	int i;
2490 
2491 	/* disable PLX 9054 interrupts */
2492 	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2493 		~(1 << PCI_INTERRUPT_ENABLE),
2494 		dev->rdk1.plx9054_base_addr + INTCSR);
2495 
2496 	/* clean up resources allocated during probe() */
2497 	iounmap(dev->rdk1.plx9054_base_addr);
2498 	iounmap(dev->rdk1.epld_base_addr);
2499 
2500 	for (i = 0; i < 4; ++i) {
2501 		if (i == 1)
2502 			continue;	/* BAR1 unused */
2503 		release_mem_region(pci_resource_start(pdev, i),
2504 			pci_resource_len(pdev, i));
2505 	}
2506 }
2507 
2508 static void
2509 net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2510 {
2511 	int i;
2512 
2513 	/* disable fpga interrupts
2514 	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2515 			~(1 << PCI_INTERRUPT_ENABLE),
2516 			dev->rdk1.plx9054_base_addr + INTCSR);
2517 	*/
2518 
2519 	/* clean up resources allocated during probe() */
2520 	iounmap(dev->rdk2.fpga_base_addr);
2521 
2522 	for (i = 0; i < 2; ++i)
2523 		release_mem_region(pci_resource_start(pdev, i),
2524 			pci_resource_len(pdev, i));
2525 }
2526 
2527 static void
2528 net2272_pci_remove(struct pci_dev *pdev)
2529 {
2530 	struct net2272 *dev = pci_get_drvdata(pdev);
2531 
2532 	net2272_remove(dev);
2533 
2534 	switch (pdev->device) {
2535 	case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2536 	case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2537 	default: BUG();
2538 	}
2539 
2540 	pci_disable_device(pdev);
2541 
2542 	kfree(dev);
2543 }
2544 
2545 /* Table of matching PCI IDs */
2546 static struct pci_device_id pci_ids[] = {
2547 	{	/* RDK 1 card */
2548 		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2549 		.class_mask  = 0,
2550 		.vendor      = PCI_VENDOR_ID_PLX,
2551 		.device      = PCI_DEVICE_ID_RDK1,
2552 		.subvendor   = PCI_ANY_ID,
2553 		.subdevice   = PCI_ANY_ID,
2554 	},
2555 	{	/* RDK 2 card */
2556 		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2557 		.class_mask  = 0,
2558 		.vendor      = PCI_VENDOR_ID_PLX,
2559 		.device      = PCI_DEVICE_ID_RDK2,
2560 		.subvendor   = PCI_ANY_ID,
2561 		.subdevice   = PCI_ANY_ID,
2562 	},
2563 	{ }
2564 };
2565 MODULE_DEVICE_TABLE(pci, pci_ids);
2566 
2567 static struct pci_driver net2272_pci_driver = {
2568 	.name     = driver_name,
2569 	.id_table = pci_ids,
2570 
2571 	.probe    = net2272_pci_probe,
2572 	.remove   = net2272_pci_remove,
2573 };
2574 
2575 static int net2272_pci_register(void)
2576 {
2577 	return pci_register_driver(&net2272_pci_driver);
2578 }
2579 
2580 static void net2272_pci_unregister(void)
2581 {
2582 	pci_unregister_driver(&net2272_pci_driver);
2583 }
2584 
2585 #else
2586 static inline int net2272_pci_register(void) { return 0; }
2587 static inline void net2272_pci_unregister(void) { }
2588 #endif
2589 
2590 /*---------------------------------------------------------------------------*/
2591 
2592 static int
2593 net2272_plat_probe(struct platform_device *pdev)
2594 {
2595 	struct net2272 *dev;
2596 	int ret;
2597 	unsigned int irqflags;
2598 	resource_size_t base, len;
2599 	struct resource *iomem, *iomem_bus, *irq_res;
2600 
2601 	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2602 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2603 	iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2604 	if (!irq_res || !iomem) {
2605 		dev_err(&pdev->dev, "must provide irq/base addr");
2606 		return -EINVAL;
2607 	}
2608 
2609 	dev = net2272_probe_init(&pdev->dev, irq_res->start);
2610 	if (IS_ERR(dev))
2611 		return PTR_ERR(dev);
2612 
2613 	irqflags = 0;
2614 	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2615 		irqflags |= IRQF_TRIGGER_RISING;
2616 	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2617 		irqflags |= IRQF_TRIGGER_FALLING;
2618 	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2619 		irqflags |= IRQF_TRIGGER_HIGH;
2620 	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2621 		irqflags |= IRQF_TRIGGER_LOW;
2622 
2623 	base = iomem->start;
2624 	len = resource_size(iomem);
2625 	if (iomem_bus)
2626 		dev->base_shift = iomem_bus->start;
2627 
2628 	if (!request_mem_region(base, len, driver_name)) {
2629 		dev_dbg(dev->dev, "get request memory region!\n");
2630 		ret = -EBUSY;
2631 		goto err;
2632 	}
2633 	dev->base_addr = ioremap_nocache(base, len);
2634 	if (!dev->base_addr) {
2635 		dev_dbg(dev->dev, "can't map memory\n");
2636 		ret = -EFAULT;
2637 		goto err_req;
2638 	}
2639 
2640 	ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2641 	if (ret)
2642 		goto err_io;
2643 
2644 	platform_set_drvdata(pdev, dev);
2645 	dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2646 		(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2647 
2648 	return 0;
2649 
2650  err_io:
2651 	iounmap(dev->base_addr);
2652  err_req:
2653 	release_mem_region(base, len);
2654  err:
2655 	return ret;
2656 }
2657 
2658 static int
2659 net2272_plat_remove(struct platform_device *pdev)
2660 {
2661 	struct net2272 *dev = platform_get_drvdata(pdev);
2662 
2663 	net2272_remove(dev);
2664 
2665 	release_mem_region(pdev->resource[0].start,
2666 		resource_size(&pdev->resource[0]));
2667 
2668 	kfree(dev);
2669 
2670 	return 0;
2671 }
2672 
2673 static struct platform_driver net2272_plat_driver = {
2674 	.probe   = net2272_plat_probe,
2675 	.remove  = net2272_plat_remove,
2676 	.driver  = {
2677 		.name  = driver_name,
2678 	},
2679 	/* FIXME .suspend, .resume */
2680 };
2681 MODULE_ALIAS("platform:net2272");
2682 
2683 static int __init net2272_init(void)
2684 {
2685 	int ret;
2686 
2687 	ret = net2272_pci_register();
2688 	if (ret)
2689 		return ret;
2690 	ret = platform_driver_register(&net2272_plat_driver);
2691 	if (ret)
2692 		goto err_pci;
2693 	return ret;
2694 
2695 err_pci:
2696 	net2272_pci_unregister();
2697 	return ret;
2698 }
2699 module_init(net2272_init);
2700 
2701 static void __exit net2272_cleanup(void)
2702 {
2703 	net2272_pci_unregister();
2704 	platform_driver_unregister(&net2272_plat_driver);
2705 }
2706 module_exit(net2272_cleanup);
2707 
2708 MODULE_DESCRIPTION(DRIVER_DESC);
2709 MODULE_AUTHOR("PLX Technology, Inc.");
2710 MODULE_LICENSE("GPL");
2711