xref: /openbmc/linux/drivers/usb/host/xen-hcd.c (revision ce656528)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * xen-hcd.c
4  *
5  * Xen USB Virtual Host Controller driver
6  *
7  * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
8  * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/usb.h>
13 #include <linux/list.h>
14 #include <linux/usb/hcd.h>
15 #include <linux/io.h>
16 
17 #include <xen/xen.h>
18 #include <xen/xenbus.h>
19 #include <xen/grant_table.h>
20 #include <xen/events.h>
21 #include <xen/page.h>
22 
23 #include <xen/interface/io/usbif.h>
24 
25 /* Private per-URB data */
26 struct urb_priv {
27 	struct list_head list;
28 	struct urb *urb;
29 	int req_id;		/* RING_REQUEST id for submitting */
30 	int unlink_req_id;	/* RING_REQUEST id for unlinking */
31 	int status;
32 	bool unlinked;		/* dequeued marker */
33 };
34 
35 /* virtual roothub port status */
36 struct rhport_status {
37 	__u32 status;
38 	bool resuming;		/* in resuming */
39 	bool c_connection;	/* connection changed */
40 	unsigned long timeout;
41 };
42 
43 /* status of attached device */
44 struct vdevice_status {
45 	int devnum;
46 	enum usb_device_state status;
47 	enum usb_device_speed speed;
48 };
49 
50 /* RING request shadow */
51 struct usb_shadow {
52 	struct xenusb_urb_request req;
53 	struct urb *urb;
54 	bool in_flight;
55 };
56 
57 struct xenhcd_info {
58 	/* Virtual Host Controller has 4 urb queues */
59 	struct list_head pending_submit_list;
60 	struct list_head pending_unlink_list;
61 	struct list_head in_progress_list;
62 	struct list_head giveback_waiting_list;
63 
64 	spinlock_t lock;
65 
66 	/* timer that kick pending and giveback waiting urbs */
67 	struct timer_list watchdog;
68 	unsigned long actions;
69 
70 	/* virtual root hub */
71 	int rh_numports;
72 	struct rhport_status ports[XENUSB_MAX_PORTNR];
73 	struct vdevice_status devices[XENUSB_MAX_PORTNR];
74 
75 	/* Xen related staff */
76 	struct xenbus_device *xbdev;
77 	int urb_ring_ref;
78 	int conn_ring_ref;
79 	struct xenusb_urb_front_ring urb_ring;
80 	struct xenusb_conn_front_ring conn_ring;
81 
82 	unsigned int evtchn;
83 	unsigned int irq;
84 	struct usb_shadow shadow[XENUSB_URB_RING_SIZE];
85 	unsigned int shadow_free;
86 
87 	bool error;
88 };
89 
90 #define GRANT_INVALID_REF 0
91 
92 #define XENHCD_RING_JIFFIES (HZ/200)
93 #define XENHCD_SCAN_JIFFIES 1
94 
95 enum xenhcd_timer_action {
96 	TIMER_RING_WATCHDOG,
97 	TIMER_SCAN_PENDING_URBS,
98 };
99 
100 static struct kmem_cache *xenhcd_urbp_cachep;
101 
102 static inline struct xenhcd_info *xenhcd_hcd_to_info(struct usb_hcd *hcd)
103 {
104 	return (struct xenhcd_info *)hcd->hcd_priv;
105 }
106 
107 static inline struct usb_hcd *xenhcd_info_to_hcd(struct xenhcd_info *info)
108 {
109 	return container_of((void *)info, struct usb_hcd, hcd_priv);
110 }
111 
112 static void xenhcd_set_error(struct xenhcd_info *info, const char *msg)
113 {
114 	info->error = true;
115 
116 	pr_alert("xen-hcd: protocol error: %s!\n", msg);
117 }
118 
119 static inline void xenhcd_timer_action_done(struct xenhcd_info *info,
120 					    enum xenhcd_timer_action action)
121 {
122 	clear_bit(action, &info->actions);
123 }
124 
125 static void xenhcd_timer_action(struct xenhcd_info *info,
126 				enum xenhcd_timer_action action)
127 {
128 	if (timer_pending(&info->watchdog) &&
129 	    test_bit(TIMER_SCAN_PENDING_URBS, &info->actions))
130 		return;
131 
132 	if (!test_and_set_bit(action, &info->actions)) {
133 		unsigned long t;
134 
135 		switch (action) {
136 		case TIMER_RING_WATCHDOG:
137 			t = XENHCD_RING_JIFFIES;
138 			break;
139 		default:
140 			t = XENHCD_SCAN_JIFFIES;
141 			break;
142 		}
143 		mod_timer(&info->watchdog, t + jiffies);
144 	}
145 }
146 
147 /*
148  * set virtual port connection status
149  */
150 static void xenhcd_set_connect_state(struct xenhcd_info *info, int portnum)
151 {
152 	int port;
153 
154 	port = portnum - 1;
155 	if (info->ports[port].status & USB_PORT_STAT_POWER) {
156 		switch (info->devices[port].speed) {
157 		case XENUSB_SPEED_NONE:
158 			info->ports[port].status &=
159 				~(USB_PORT_STAT_CONNECTION |
160 				  USB_PORT_STAT_ENABLE |
161 				  USB_PORT_STAT_LOW_SPEED |
162 				  USB_PORT_STAT_HIGH_SPEED |
163 				  USB_PORT_STAT_SUSPEND);
164 			break;
165 		case XENUSB_SPEED_LOW:
166 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
167 			info->ports[port].status |= USB_PORT_STAT_LOW_SPEED;
168 			break;
169 		case XENUSB_SPEED_FULL:
170 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
171 			break;
172 		case XENUSB_SPEED_HIGH:
173 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
174 			info->ports[port].status |= USB_PORT_STAT_HIGH_SPEED;
175 			break;
176 		default: /* error */
177 			return;
178 		}
179 		info->ports[port].status |= (USB_PORT_STAT_C_CONNECTION << 16);
180 	}
181 }
182 
183 /*
184  * set virtual device connection status
185  */
186 static int xenhcd_rhport_connect(struct xenhcd_info *info, __u8 portnum,
187 				 __u8 speed)
188 {
189 	int port;
190 
191 	if (portnum < 1 || portnum > info->rh_numports)
192 		return -EINVAL; /* invalid port number */
193 
194 	port = portnum - 1;
195 	if (info->devices[port].speed != speed) {
196 		switch (speed) {
197 		case XENUSB_SPEED_NONE: /* disconnect */
198 			info->devices[port].status = USB_STATE_NOTATTACHED;
199 			break;
200 		case XENUSB_SPEED_LOW:
201 		case XENUSB_SPEED_FULL:
202 		case XENUSB_SPEED_HIGH:
203 			info->devices[port].status = USB_STATE_ATTACHED;
204 			break;
205 		default: /* error */
206 			return -EINVAL;
207 		}
208 		info->devices[port].speed = speed;
209 		info->ports[port].c_connection = true;
210 
211 		xenhcd_set_connect_state(info, portnum);
212 	}
213 
214 	return 0;
215 }
216 
217 /*
218  * SetPortFeature(PORT_SUSPENDED)
219  */
220 static void xenhcd_rhport_suspend(struct xenhcd_info *info, int portnum)
221 {
222 	int port;
223 
224 	port = portnum - 1;
225 	info->ports[port].status |= USB_PORT_STAT_SUSPEND;
226 	info->devices[port].status = USB_STATE_SUSPENDED;
227 }
228 
229 /*
230  * ClearPortFeature(PORT_SUSPENDED)
231  */
232 static void xenhcd_rhport_resume(struct xenhcd_info *info, int portnum)
233 {
234 	int port;
235 
236 	port = portnum - 1;
237 	if (info->ports[port].status & USB_PORT_STAT_SUSPEND) {
238 		info->ports[port].resuming = true;
239 		info->ports[port].timeout = jiffies + msecs_to_jiffies(20);
240 	}
241 }
242 
243 /*
244  * SetPortFeature(PORT_POWER)
245  */
246 static void xenhcd_rhport_power_on(struct xenhcd_info *info, int portnum)
247 {
248 	int port;
249 
250 	port = portnum - 1;
251 	if ((info->ports[port].status & USB_PORT_STAT_POWER) == 0) {
252 		info->ports[port].status |= USB_PORT_STAT_POWER;
253 		if (info->devices[port].status != USB_STATE_NOTATTACHED)
254 			info->devices[port].status = USB_STATE_POWERED;
255 		if (info->ports[port].c_connection)
256 			xenhcd_set_connect_state(info, portnum);
257 	}
258 }
259 
260 /*
261  * ClearPortFeature(PORT_POWER)
262  * SetConfiguration(non-zero)
263  * Power_Source_Off
264  * Over-current
265  */
266 static void xenhcd_rhport_power_off(struct xenhcd_info *info, int portnum)
267 {
268 	int port;
269 
270 	port = portnum - 1;
271 	if (info->ports[port].status & USB_PORT_STAT_POWER) {
272 		info->ports[port].status = 0;
273 		if (info->devices[port].status != USB_STATE_NOTATTACHED)
274 			info->devices[port].status = USB_STATE_ATTACHED;
275 	}
276 }
277 
278 /*
279  * ClearPortFeature(PORT_ENABLE)
280  */
281 static void xenhcd_rhport_disable(struct xenhcd_info *info, int portnum)
282 {
283 	int port;
284 
285 	port = portnum - 1;
286 	info->ports[port].status &= ~USB_PORT_STAT_ENABLE;
287 	info->ports[port].status &= ~USB_PORT_STAT_SUSPEND;
288 	info->ports[port].resuming = false;
289 	if (info->devices[port].status != USB_STATE_NOTATTACHED)
290 		info->devices[port].status = USB_STATE_POWERED;
291 }
292 
293 /*
294  * SetPortFeature(PORT_RESET)
295  */
296 static void xenhcd_rhport_reset(struct xenhcd_info *info, int portnum)
297 {
298 	int port;
299 
300 	port = portnum - 1;
301 	info->ports[port].status &= ~(USB_PORT_STAT_ENABLE |
302 				      USB_PORT_STAT_LOW_SPEED |
303 				      USB_PORT_STAT_HIGH_SPEED);
304 	info->ports[port].status |= USB_PORT_STAT_RESET;
305 
306 	if (info->devices[port].status != USB_STATE_NOTATTACHED)
307 		info->devices[port].status = USB_STATE_ATTACHED;
308 
309 	/* 10msec reset signaling */
310 	info->ports[port].timeout = jiffies + msecs_to_jiffies(10);
311 }
312 
313 #ifdef CONFIG_PM
314 static int xenhcd_bus_suspend(struct usb_hcd *hcd)
315 {
316 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
317 	int ret = 0;
318 	int i, ports;
319 
320 	ports = info->rh_numports;
321 
322 	spin_lock_irq(&info->lock);
323 	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
324 		ret = -ESHUTDOWN;
325 	} else {
326 		/* suspend any active ports*/
327 		for (i = 1; i <= ports; i++)
328 			xenhcd_rhport_suspend(info, i);
329 	}
330 	spin_unlock_irq(&info->lock);
331 
332 	del_timer_sync(&info->watchdog);
333 
334 	return ret;
335 }
336 
337 static int xenhcd_bus_resume(struct usb_hcd *hcd)
338 {
339 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
340 	int ret = 0;
341 	int i, ports;
342 
343 	ports = info->rh_numports;
344 
345 	spin_lock_irq(&info->lock);
346 	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
347 		ret = -ESHUTDOWN;
348 	} else {
349 		/* resume any suspended ports*/
350 		for (i = 1; i <= ports; i++)
351 			xenhcd_rhport_resume(info, i);
352 	}
353 	spin_unlock_irq(&info->lock);
354 
355 	return ret;
356 }
357 #endif
358 
359 static void xenhcd_hub_descriptor(struct xenhcd_info *info,
360 				  struct usb_hub_descriptor *desc)
361 {
362 	__u16 temp;
363 	int ports = info->rh_numports;
364 
365 	desc->bDescriptorType = 0x29;
366 	desc->bPwrOn2PwrGood = 10; /* EHCI says 20ms max */
367 	desc->bHubContrCurrent = 0;
368 	desc->bNbrPorts = ports;
369 
370 	/* size of DeviceRemovable and PortPwrCtrlMask fields */
371 	temp = 1 + (ports / 8);
372 	desc->bDescLength = 7 + 2 * temp;
373 
374 	/* bitmaps for DeviceRemovable and PortPwrCtrlMask */
375 	memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
376 	memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
377 
378 	/* per-port over current reporting and no power switching */
379 	temp = 0x000a;
380 	desc->wHubCharacteristics = cpu_to_le16(temp);
381 }
382 
383 /* port status change mask for hub_status_data */
384 #define PORT_C_MASK	((USB_PORT_STAT_C_CONNECTION |		\
385 			  USB_PORT_STAT_C_ENABLE |		\
386 			  USB_PORT_STAT_C_SUSPEND |		\
387 			  USB_PORT_STAT_C_OVERCURRENT |		\
388 			  USB_PORT_STAT_C_RESET) << 16)
389 
390 /*
391  * See USB 2.0 Spec, 11.12.4 Hub and Port Status Change Bitmap.
392  * If port status changed, writes the bitmap to buf and return
393  * that length(number of bytes).
394  * If Nothing changed, return 0.
395  */
396 static int xenhcd_hub_status_data(struct usb_hcd *hcd, char *buf)
397 {
398 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
399 	int ports;
400 	int i;
401 	unsigned long flags;
402 	int ret;
403 	int changed = 0;
404 
405 	/* initialize the status to no-changes */
406 	ports = info->rh_numports;
407 	ret = 1 + (ports / 8);
408 	memset(buf, 0, ret);
409 
410 	spin_lock_irqsave(&info->lock, flags);
411 
412 	for (i = 0; i < ports; i++) {
413 		/* check status for each port */
414 		if (info->ports[i].status & PORT_C_MASK) {
415 			buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
416 			changed = 1;
417 		}
418 	}
419 
420 	if ((hcd->state == HC_STATE_SUSPENDED) && (changed == 1))
421 		usb_hcd_resume_root_hub(hcd);
422 
423 	spin_unlock_irqrestore(&info->lock, flags);
424 
425 	return changed ? ret : 0;
426 }
427 
428 static int xenhcd_hub_control(struct usb_hcd *hcd, __u16 typeReq, __u16 wValue,
429 			      __u16 wIndex, char *buf, __u16 wLength)
430 {
431 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
432 	int ports = info->rh_numports;
433 	unsigned long flags;
434 	int ret = 0;
435 	int i;
436 	int changed = 0;
437 
438 	spin_lock_irqsave(&info->lock, flags);
439 	switch (typeReq) {
440 	case ClearHubFeature:
441 		/* ignore this request */
442 		break;
443 	case ClearPortFeature:
444 		if (!wIndex || wIndex > ports)
445 			goto error;
446 
447 		switch (wValue) {
448 		case USB_PORT_FEAT_SUSPEND:
449 			xenhcd_rhport_resume(info, wIndex);
450 			break;
451 		case USB_PORT_FEAT_POWER:
452 			xenhcd_rhport_power_off(info, wIndex);
453 			break;
454 		case USB_PORT_FEAT_ENABLE:
455 			xenhcd_rhport_disable(info, wIndex);
456 			break;
457 		case USB_PORT_FEAT_C_CONNECTION:
458 			info->ports[wIndex - 1].c_connection = false;
459 			fallthrough;
460 		default:
461 			info->ports[wIndex - 1].status &= ~(1 << wValue);
462 			break;
463 		}
464 		break;
465 	case GetHubDescriptor:
466 		xenhcd_hub_descriptor(info, (struct usb_hub_descriptor *)buf);
467 		break;
468 	case GetHubStatus:
469 		/* always local power supply good and no over-current exists. */
470 		*(__le32 *)buf = cpu_to_le32(0);
471 		break;
472 	case GetPortStatus:
473 		if (!wIndex || wIndex > ports)
474 			goto error;
475 
476 		wIndex--;
477 
478 		/* resume completion */
479 		if (info->ports[wIndex].resuming &&
480 		    time_after_eq(jiffies, info->ports[wIndex].timeout)) {
481 			info->ports[wIndex].status |=
482 				USB_PORT_STAT_C_SUSPEND << 16;
483 			info->ports[wIndex].status &= ~USB_PORT_STAT_SUSPEND;
484 		}
485 
486 		/* reset completion */
487 		if ((info->ports[wIndex].status & USB_PORT_STAT_RESET) != 0 &&
488 		    time_after_eq(jiffies, info->ports[wIndex].timeout)) {
489 			info->ports[wIndex].status |=
490 				USB_PORT_STAT_C_RESET << 16;
491 			info->ports[wIndex].status &= ~USB_PORT_STAT_RESET;
492 
493 			if (info->devices[wIndex].status !=
494 			    USB_STATE_NOTATTACHED) {
495 				info->ports[wIndex].status |=
496 					USB_PORT_STAT_ENABLE;
497 				info->devices[wIndex].status =
498 					USB_STATE_DEFAULT;
499 			}
500 
501 			switch (info->devices[wIndex].speed) {
502 			case XENUSB_SPEED_LOW:
503 				info->ports[wIndex].status |=
504 					USB_PORT_STAT_LOW_SPEED;
505 				break;
506 			case XENUSB_SPEED_HIGH:
507 				info->ports[wIndex].status |=
508 					USB_PORT_STAT_HIGH_SPEED;
509 				break;
510 			default:
511 				break;
512 			}
513 		}
514 
515 		*(__le32 *)buf = cpu_to_le32(info->ports[wIndex].status);
516 		break;
517 	case SetPortFeature:
518 		if (!wIndex || wIndex > ports)
519 			goto error;
520 
521 		switch (wValue) {
522 		case USB_PORT_FEAT_POWER:
523 			xenhcd_rhport_power_on(info, wIndex);
524 			break;
525 		case USB_PORT_FEAT_RESET:
526 			xenhcd_rhport_reset(info, wIndex);
527 			break;
528 		case USB_PORT_FEAT_SUSPEND:
529 			xenhcd_rhport_suspend(info, wIndex);
530 			break;
531 		default:
532 			if (info->ports[wIndex-1].status & USB_PORT_STAT_POWER)
533 				info->ports[wIndex-1].status |= (1 << wValue);
534 		}
535 		break;
536 
537 	case SetHubFeature:
538 		/* not supported */
539 	default:
540 error:
541 		ret = -EPIPE;
542 	}
543 	spin_unlock_irqrestore(&info->lock, flags);
544 
545 	/* check status for each port */
546 	for (i = 0; i < ports; i++) {
547 		if (info->ports[i].status & PORT_C_MASK)
548 			changed = 1;
549 	}
550 	if (changed)
551 		usb_hcd_poll_rh_status(hcd);
552 
553 	return ret;
554 }
555 
556 static void xenhcd_free_urb_priv(struct urb_priv *urbp)
557 {
558 	urbp->urb->hcpriv = NULL;
559 	kmem_cache_free(xenhcd_urbp_cachep, urbp);
560 }
561 
562 static inline unsigned int xenhcd_get_id_from_freelist(struct xenhcd_info *info)
563 {
564 	unsigned int free;
565 
566 	free = info->shadow_free;
567 	info->shadow_free = info->shadow[free].req.id;
568 	info->shadow[free].req.id = 0x0fff; /* debug */
569 	return free;
570 }
571 
572 static inline void xenhcd_add_id_to_freelist(struct xenhcd_info *info,
573 					     unsigned int id)
574 {
575 	info->shadow[id].req.id	= info->shadow_free;
576 	info->shadow[id].urb = NULL;
577 	info->shadow_free = id;
578 }
579 
580 static inline int xenhcd_count_pages(void *addr, int length)
581 {
582 	unsigned long vaddr = (unsigned long)addr;
583 
584 	return PFN_UP(vaddr + length) - PFN_DOWN(vaddr);
585 }
586 
587 static void xenhcd_gnttab_map(struct xenhcd_info *info, void *addr, int length,
588 			      grant_ref_t *gref_head,
589 			      struct xenusb_request_segment *seg,
590 			      int nr_pages, int flags)
591 {
592 	grant_ref_t ref;
593 	unsigned int offset;
594 	unsigned int len = length;
595 	unsigned int bytes;
596 	int i;
597 
598 	for (i = 0; i < nr_pages; i++) {
599 		offset = offset_in_page(addr);
600 
601 		bytes = PAGE_SIZE - offset;
602 		if (bytes > len)
603 			bytes = len;
604 
605 		ref = gnttab_claim_grant_reference(gref_head);
606 		gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
607 						virt_to_gfn(addr), flags);
608 		seg[i].gref = ref;
609 		seg[i].offset = (__u16)offset;
610 		seg[i].length = (__u16)bytes;
611 
612 		addr += bytes;
613 		len -= bytes;
614 	}
615 }
616 
617 static __u32 xenhcd_pipe_urb_to_xenusb(__u32 urb_pipe, __u8 port)
618 {
619 	static __u32 pipe;
620 
621 	pipe = usb_pipedevice(urb_pipe) << XENUSB_PIPE_DEV_SHIFT;
622 	pipe |= usb_pipeendpoint(urb_pipe) << XENUSB_PIPE_EP_SHIFT;
623 	if (usb_pipein(urb_pipe))
624 		pipe |= XENUSB_PIPE_DIR;
625 	switch (usb_pipetype(urb_pipe)) {
626 	case PIPE_ISOCHRONOUS:
627 		pipe |= XENUSB_PIPE_TYPE_ISOC << XENUSB_PIPE_TYPE_SHIFT;
628 		break;
629 	case PIPE_INTERRUPT:
630 		pipe |= XENUSB_PIPE_TYPE_INT << XENUSB_PIPE_TYPE_SHIFT;
631 		break;
632 	case PIPE_CONTROL:
633 		pipe |= XENUSB_PIPE_TYPE_CTRL << XENUSB_PIPE_TYPE_SHIFT;
634 		break;
635 	case PIPE_BULK:
636 		pipe |= XENUSB_PIPE_TYPE_BULK << XENUSB_PIPE_TYPE_SHIFT;
637 		break;
638 	}
639 	pipe = xenusb_setportnum_pipe(pipe, port);
640 
641 	return pipe;
642 }
643 
644 static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb,
645 				      struct xenusb_urb_request *req)
646 {
647 	grant_ref_t gref_head;
648 	int nr_buff_pages = 0;
649 	int nr_isodesc_pages = 0;
650 	int nr_grants = 0;
651 
652 	if (urb->transfer_buffer_length) {
653 		nr_buff_pages = xenhcd_count_pages(urb->transfer_buffer,
654 						urb->transfer_buffer_length);
655 
656 		if (usb_pipeisoc(urb->pipe))
657 			nr_isodesc_pages = xenhcd_count_pages(
658 				&urb->iso_frame_desc[0],
659 				sizeof(struct usb_iso_packet_descriptor) *
660 				urb->number_of_packets);
661 
662 		nr_grants = nr_buff_pages + nr_isodesc_pages;
663 		if (nr_grants > XENUSB_MAX_SEGMENTS_PER_REQUEST) {
664 			pr_err("xenhcd: error: %d grants\n", nr_grants);
665 			return -E2BIG;
666 		}
667 
668 		if (gnttab_alloc_grant_references(nr_grants, &gref_head)) {
669 			pr_err("xenhcd: gnttab_alloc_grant_references() error\n");
670 			return -ENOMEM;
671 		}
672 
673 		xenhcd_gnttab_map(info, urb->transfer_buffer,
674 				  urb->transfer_buffer_length, &gref_head,
675 				  &req->seg[0], nr_buff_pages,
676 				  usb_pipein(urb->pipe) ? 0 : GTF_readonly);
677 	}
678 
679 	req->pipe = xenhcd_pipe_urb_to_xenusb(urb->pipe, urb->dev->portnum);
680 	req->transfer_flags = 0;
681 	if (urb->transfer_flags & URB_SHORT_NOT_OK)
682 		req->transfer_flags |= XENUSB_SHORT_NOT_OK;
683 	req->buffer_length = urb->transfer_buffer_length;
684 	req->nr_buffer_segs = nr_buff_pages;
685 
686 	switch (usb_pipetype(urb->pipe)) {
687 	case PIPE_ISOCHRONOUS:
688 		req->u.isoc.interval = urb->interval;
689 		req->u.isoc.start_frame = urb->start_frame;
690 		req->u.isoc.number_of_packets = urb->number_of_packets;
691 		req->u.isoc.nr_frame_desc_segs = nr_isodesc_pages;
692 
693 		xenhcd_gnttab_map(info, &urb->iso_frame_desc[0],
694 				  sizeof(struct usb_iso_packet_descriptor) *
695 				  urb->number_of_packets,
696 				  &gref_head, &req->seg[nr_buff_pages],
697 				  nr_isodesc_pages, 0);
698 		break;
699 	case PIPE_INTERRUPT:
700 		req->u.intr.interval = urb->interval;
701 		break;
702 	case PIPE_CONTROL:
703 		if (urb->setup_packet)
704 			memcpy(req->u.ctrl, urb->setup_packet, 8);
705 		break;
706 	case PIPE_BULK:
707 		break;
708 	default:
709 		break;
710 	}
711 
712 	if (nr_grants)
713 		gnttab_free_grant_references(gref_head);
714 
715 	return 0;
716 }
717 
718 static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id)
719 {
720 	struct usb_shadow *shadow = info->shadow + id;
721 	int nr_segs = 0;
722 	int i;
723 
724 	if (!shadow->in_flight) {
725 		xenhcd_set_error(info, "Illegal request id");
726 		return;
727 	}
728 	shadow->in_flight = false;
729 
730 	nr_segs = shadow->req.nr_buffer_segs;
731 
732 	if (xenusb_pipeisoc(shadow->req.pipe))
733 		nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
734 
735 	for (i = 0; i < nr_segs; i++) {
736 		if (!gnttab_try_end_foreign_access(shadow->req.seg[i].gref))
737 			xenhcd_set_error(info, "backend didn't release grant");
738 	}
739 
740 	shadow->req.nr_buffer_segs = 0;
741 	shadow->req.u.isoc.nr_frame_desc_segs = 0;
742 }
743 
744 static int xenhcd_translate_status(int status)
745 {
746 	switch (status) {
747 	case XENUSB_STATUS_OK:
748 		return 0;
749 	case XENUSB_STATUS_NODEV:
750 		return -ENODEV;
751 	case XENUSB_STATUS_INVAL:
752 		return -EINVAL;
753 	case XENUSB_STATUS_STALL:
754 		return -EPIPE;
755 	case XENUSB_STATUS_IOERROR:
756 		return -EPROTO;
757 	case XENUSB_STATUS_BABBLE:
758 		return -EOVERFLOW;
759 	default:
760 		return -ESHUTDOWN;
761 	}
762 }
763 
764 static void xenhcd_giveback_urb(struct xenhcd_info *info, struct urb *urb,
765 				int status)
766 {
767 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
768 	int priv_status = urbp->status;
769 
770 	list_del_init(&urbp->list);
771 	xenhcd_free_urb_priv(urbp);
772 
773 	if (urb->status == -EINPROGRESS)
774 		urb->status = xenhcd_translate_status(status);
775 
776 	spin_unlock(&info->lock);
777 	usb_hcd_giveback_urb(xenhcd_info_to_hcd(info), urb,
778 			     priv_status <= 0 ? priv_status : urb->status);
779 	spin_lock(&info->lock);
780 }
781 
782 static int xenhcd_do_request(struct xenhcd_info *info, struct urb_priv *urbp)
783 {
784 	struct xenusb_urb_request *req;
785 	struct urb *urb = urbp->urb;
786 	unsigned int id;
787 	int notify;
788 	int ret;
789 
790 	id = xenhcd_get_id_from_freelist(info);
791 	req = &info->shadow[id].req;
792 	req->id = id;
793 
794 	if (unlikely(urbp->unlinked)) {
795 		req->u.unlink.unlink_id = urbp->req_id;
796 		req->pipe = xenusb_setunlink_pipe(xenhcd_pipe_urb_to_xenusb(
797 						 urb->pipe, urb->dev->portnum));
798 		urbp->unlink_req_id = id;
799 	} else {
800 		ret = xenhcd_map_urb_for_request(info, urb, req);
801 		if (ret) {
802 			xenhcd_add_id_to_freelist(info, id);
803 			return ret;
804 		}
805 		urbp->req_id = id;
806 	}
807 
808 	req = RING_GET_REQUEST(&info->urb_ring, info->urb_ring.req_prod_pvt);
809 	*req = info->shadow[id].req;
810 
811 	info->urb_ring.req_prod_pvt++;
812 	info->shadow[id].urb = urb;
813 	info->shadow[id].in_flight = true;
814 
815 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->urb_ring, notify);
816 	if (notify)
817 		notify_remote_via_irq(info->irq);
818 
819 	return 0;
820 }
821 
822 static void xenhcd_kick_pending_urbs(struct xenhcd_info *info)
823 {
824 	struct urb_priv *urbp;
825 
826 	while (!list_empty(&info->pending_submit_list)) {
827 		if (RING_FULL(&info->urb_ring)) {
828 			xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
829 			return;
830 		}
831 
832 		urbp = list_entry(info->pending_submit_list.next,
833 				  struct urb_priv, list);
834 		if (!xenhcd_do_request(info, urbp))
835 			list_move_tail(&urbp->list, &info->in_progress_list);
836 		else
837 			xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
838 	}
839 	xenhcd_timer_action_done(info, TIMER_SCAN_PENDING_URBS);
840 }
841 
842 /*
843  * caller must lock info->lock
844  */
845 static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info)
846 {
847 	struct urb_priv *urbp, *tmp;
848 	int req_id;
849 
850 	list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) {
851 		req_id = urbp->req_id;
852 		if (!urbp->unlinked) {
853 			xenhcd_gnttab_done(info, req_id);
854 			if (info->error)
855 				return;
856 			if (urbp->urb->status == -EINPROGRESS)
857 				/* not dequeued */
858 				xenhcd_giveback_urb(info, urbp->urb,
859 						    -ESHUTDOWN);
860 			else	/* dequeued */
861 				xenhcd_giveback_urb(info, urbp->urb,
862 						    urbp->urb->status);
863 		}
864 		info->shadow[req_id].urb = NULL;
865 	}
866 
867 	list_for_each_entry_safe(urbp, tmp, &info->pending_submit_list, list)
868 		xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
869 }
870 
871 /*
872  * caller must lock info->lock
873  */
874 static void xenhcd_giveback_unlinked_urbs(struct xenhcd_info *info)
875 {
876 	struct urb_priv *urbp, *tmp;
877 
878 	list_for_each_entry_safe(urbp, tmp, &info->giveback_waiting_list, list)
879 		xenhcd_giveback_urb(info, urbp->urb, urbp->urb->status);
880 }
881 
882 static int xenhcd_submit_urb(struct xenhcd_info *info, struct urb_priv *urbp)
883 {
884 	int ret;
885 
886 	if (RING_FULL(&info->urb_ring)) {
887 		list_add_tail(&urbp->list, &info->pending_submit_list);
888 		xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
889 		return 0;
890 	}
891 
892 	if (!list_empty(&info->pending_submit_list)) {
893 		list_add_tail(&urbp->list, &info->pending_submit_list);
894 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
895 		return 0;
896 	}
897 
898 	ret = xenhcd_do_request(info, urbp);
899 	if (ret == 0)
900 		list_add_tail(&urbp->list, &info->in_progress_list);
901 
902 	return ret;
903 }
904 
905 static int xenhcd_unlink_urb(struct xenhcd_info *info, struct urb_priv *urbp)
906 {
907 	int ret;
908 
909 	/* already unlinked? */
910 	if (urbp->unlinked)
911 		return -EBUSY;
912 
913 	urbp->unlinked = true;
914 
915 	/* the urb is still in pending_submit queue */
916 	if (urbp->req_id == ~0) {
917 		list_move_tail(&urbp->list, &info->giveback_waiting_list);
918 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
919 		return 0;
920 	}
921 
922 	/* send unlink request to backend */
923 	if (RING_FULL(&info->urb_ring)) {
924 		list_move_tail(&urbp->list, &info->pending_unlink_list);
925 		xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
926 		return 0;
927 	}
928 
929 	if (!list_empty(&info->pending_unlink_list)) {
930 		list_move_tail(&urbp->list, &info->pending_unlink_list);
931 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
932 		return 0;
933 	}
934 
935 	ret = xenhcd_do_request(info, urbp);
936 	if (ret == 0)
937 		list_move_tail(&urbp->list, &info->in_progress_list);
938 
939 	return ret;
940 }
941 
942 static void xenhcd_res_to_urb(struct xenhcd_info *info,
943 			      struct xenusb_urb_response *res, struct urb *urb)
944 {
945 	if (unlikely(!urb))
946 		return;
947 
948 	if (res->actual_length > urb->transfer_buffer_length)
949 		urb->actual_length = urb->transfer_buffer_length;
950 	else if (res->actual_length < 0)
951 		urb->actual_length = 0;
952 	else
953 		urb->actual_length = res->actual_length;
954 	urb->error_count = res->error_count;
955 	urb->start_frame = res->start_frame;
956 	xenhcd_giveback_urb(info, urb, res->status);
957 }
958 
959 static int xenhcd_urb_request_done(struct xenhcd_info *info,
960 				   unsigned int *eoiflag)
961 {
962 	struct xenusb_urb_response res;
963 	RING_IDX i, rp;
964 	__u16 id;
965 	int more_to_do = 0;
966 	unsigned long flags;
967 
968 	spin_lock_irqsave(&info->lock, flags);
969 
970 	rp = info->urb_ring.sring->rsp_prod;
971 	if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) {
972 		xenhcd_set_error(info, "Illegal index on urb-ring");
973 		goto err;
974 	}
975 	rmb(); /* ensure we see queued responses up to "rp" */
976 
977 	for (i = info->urb_ring.rsp_cons; i != rp; i++) {
978 		RING_COPY_RESPONSE(&info->urb_ring, i, &res);
979 		id = res.id;
980 		if (id >= XENUSB_URB_RING_SIZE) {
981 			xenhcd_set_error(info, "Illegal data on urb-ring");
982 			goto err;
983 		}
984 
985 		if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) {
986 			xenhcd_gnttab_done(info, id);
987 			if (info->error)
988 				goto err;
989 			xenhcd_res_to_urb(info, &res, info->shadow[id].urb);
990 		}
991 
992 		xenhcd_add_id_to_freelist(info, id);
993 
994 		*eoiflag = 0;
995 	}
996 	info->urb_ring.rsp_cons = i;
997 
998 	if (i != info->urb_ring.req_prod_pvt)
999 		RING_FINAL_CHECK_FOR_RESPONSES(&info->urb_ring, more_to_do);
1000 	else
1001 		info->urb_ring.sring->rsp_event = i + 1;
1002 
1003 	spin_unlock_irqrestore(&info->lock, flags);
1004 
1005 	return more_to_do;
1006 
1007  err:
1008 	spin_unlock_irqrestore(&info->lock, flags);
1009 	return 0;
1010 }
1011 
1012 static int xenhcd_conn_notify(struct xenhcd_info *info, unsigned int *eoiflag)
1013 {
1014 	struct xenusb_conn_response res;
1015 	struct xenusb_conn_request *req;
1016 	RING_IDX rc, rp;
1017 	__u16 id;
1018 	__u8 portnum, speed;
1019 	int more_to_do = 0;
1020 	int notify;
1021 	int port_changed = 0;
1022 	unsigned long flags;
1023 
1024 	spin_lock_irqsave(&info->lock, flags);
1025 
1026 	rc = info->conn_ring.rsp_cons;
1027 	rp = info->conn_ring.sring->rsp_prod;
1028 	if (RING_RESPONSE_PROD_OVERFLOW(&info->conn_ring, rp)) {
1029 		xenhcd_set_error(info, "Illegal index on conn-ring");
1030 		spin_unlock_irqrestore(&info->lock, flags);
1031 		return 0;
1032 	}
1033 	rmb(); /* ensure we see queued responses up to "rp" */
1034 
1035 	while (rc != rp) {
1036 		RING_COPY_RESPONSE(&info->conn_ring, rc, &res);
1037 		id = res.id;
1038 		portnum = res.portnum;
1039 		speed = res.speed;
1040 		info->conn_ring.rsp_cons = ++rc;
1041 
1042 		if (xenhcd_rhport_connect(info, portnum, speed)) {
1043 			xenhcd_set_error(info, "Illegal data on conn-ring");
1044 			spin_unlock_irqrestore(&info->lock, flags);
1045 			return 0;
1046 		}
1047 
1048 		if (info->ports[portnum - 1].c_connection)
1049 			port_changed = 1;
1050 
1051 		barrier();
1052 
1053 		req = RING_GET_REQUEST(&info->conn_ring,
1054 				       info->conn_ring.req_prod_pvt);
1055 		req->id = id;
1056 		info->conn_ring.req_prod_pvt++;
1057 
1058 		*eoiflag = 0;
1059 	}
1060 
1061 	if (rc != info->conn_ring.req_prod_pvt)
1062 		RING_FINAL_CHECK_FOR_RESPONSES(&info->conn_ring, more_to_do);
1063 	else
1064 		info->conn_ring.sring->rsp_event = rc + 1;
1065 
1066 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
1067 	if (notify)
1068 		notify_remote_via_irq(info->irq);
1069 
1070 	spin_unlock_irqrestore(&info->lock, flags);
1071 
1072 	if (port_changed)
1073 		usb_hcd_poll_rh_status(xenhcd_info_to_hcd(info));
1074 
1075 	return more_to_do;
1076 }
1077 
1078 static irqreturn_t xenhcd_int(int irq, void *dev_id)
1079 {
1080 	struct xenhcd_info *info = (struct xenhcd_info *)dev_id;
1081 	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1082 
1083 	if (unlikely(info->error)) {
1084 		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
1085 		return IRQ_HANDLED;
1086 	}
1087 
1088 	while (xenhcd_urb_request_done(info, &eoiflag) |
1089 	       xenhcd_conn_notify(info, &eoiflag))
1090 		/* Yield point for this unbounded loop. */
1091 		cond_resched();
1092 
1093 	xen_irq_lateeoi(irq, eoiflag);
1094 	return IRQ_HANDLED;
1095 }
1096 
1097 static void xenhcd_destroy_rings(struct xenhcd_info *info)
1098 {
1099 	if (info->irq)
1100 		unbind_from_irqhandler(info->irq, info);
1101 	info->irq = 0;
1102 
1103 	if (info->urb_ring_ref != GRANT_INVALID_REF) {
1104 		gnttab_end_foreign_access(info->urb_ring_ref,
1105 					  (unsigned long)info->urb_ring.sring);
1106 		info->urb_ring_ref = GRANT_INVALID_REF;
1107 	}
1108 	info->urb_ring.sring = NULL;
1109 
1110 	if (info->conn_ring_ref != GRANT_INVALID_REF) {
1111 		gnttab_end_foreign_access(info->conn_ring_ref,
1112 					  (unsigned long)info->conn_ring.sring);
1113 		info->conn_ring_ref = GRANT_INVALID_REF;
1114 	}
1115 	info->conn_ring.sring = NULL;
1116 }
1117 
1118 static int xenhcd_setup_rings(struct xenbus_device *dev,
1119 			      struct xenhcd_info *info)
1120 {
1121 	struct xenusb_urb_sring *urb_sring;
1122 	struct xenusb_conn_sring *conn_sring;
1123 	grant_ref_t gref;
1124 	int err;
1125 
1126 	info->urb_ring_ref = GRANT_INVALID_REF;
1127 	info->conn_ring_ref = GRANT_INVALID_REF;
1128 
1129 	urb_sring = (struct xenusb_urb_sring *)get_zeroed_page(
1130 							GFP_NOIO | __GFP_HIGH);
1131 	if (!urb_sring) {
1132 		xenbus_dev_fatal(dev, -ENOMEM, "allocating urb ring");
1133 		return -ENOMEM;
1134 	}
1135 	SHARED_RING_INIT(urb_sring);
1136 	FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE);
1137 
1138 	err = xenbus_grant_ring(dev, urb_sring, 1, &gref);
1139 	if (err < 0) {
1140 		free_page((unsigned long)urb_sring);
1141 		info->urb_ring.sring = NULL;
1142 		goto fail;
1143 	}
1144 	info->urb_ring_ref = gref;
1145 
1146 	conn_sring = (struct xenusb_conn_sring *)get_zeroed_page(
1147 							GFP_NOIO | __GFP_HIGH);
1148 	if (!conn_sring) {
1149 		xenbus_dev_fatal(dev, -ENOMEM, "allocating conn ring");
1150 		err = -ENOMEM;
1151 		goto fail;
1152 	}
1153 	SHARED_RING_INIT(conn_sring);
1154 	FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE);
1155 
1156 	err = xenbus_grant_ring(dev, conn_sring, 1, &gref);
1157 	if (err < 0) {
1158 		free_page((unsigned long)conn_sring);
1159 		info->conn_ring.sring = NULL;
1160 		goto fail;
1161 	}
1162 	info->conn_ring_ref = gref;
1163 
1164 	err = xenbus_alloc_evtchn(dev, &info->evtchn);
1165 	if (err) {
1166 		xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
1167 		goto fail;
1168 	}
1169 
1170 	err = bind_evtchn_to_irq_lateeoi(info->evtchn);
1171 	if (err <= 0) {
1172 		xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq_lateeoi");
1173 		goto fail;
1174 	}
1175 
1176 	info->irq = err;
1177 
1178 	err = request_threaded_irq(info->irq, NULL, xenhcd_int,
1179 				   IRQF_ONESHOT, "xenhcd", info);
1180 	if (err) {
1181 		xenbus_dev_fatal(dev, err, "request_threaded_irq");
1182 		goto free_irq;
1183 	}
1184 
1185 	return 0;
1186 
1187 free_irq:
1188 	unbind_from_irqhandler(info->irq, info);
1189 fail:
1190 	xenhcd_destroy_rings(info);
1191 	return err;
1192 }
1193 
1194 static int xenhcd_talk_to_backend(struct xenbus_device *dev,
1195 				  struct xenhcd_info *info)
1196 {
1197 	const char *message;
1198 	struct xenbus_transaction xbt;
1199 	int err;
1200 
1201 	err = xenhcd_setup_rings(dev, info);
1202 	if (err)
1203 		return err;
1204 
1205 again:
1206 	err = xenbus_transaction_start(&xbt);
1207 	if (err) {
1208 		xenbus_dev_fatal(dev, err, "starting transaction");
1209 		goto destroy_ring;
1210 	}
1211 
1212 	err = xenbus_printf(xbt, dev->nodename, "urb-ring-ref", "%u",
1213 			    info->urb_ring_ref);
1214 	if (err) {
1215 		message = "writing urb-ring-ref";
1216 		goto abort_transaction;
1217 	}
1218 
1219 	err = xenbus_printf(xbt, dev->nodename, "conn-ring-ref", "%u",
1220 			    info->conn_ring_ref);
1221 	if (err) {
1222 		message = "writing conn-ring-ref";
1223 		goto abort_transaction;
1224 	}
1225 
1226 	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
1227 			    info->evtchn);
1228 	if (err) {
1229 		message = "writing event-channel";
1230 		goto abort_transaction;
1231 	}
1232 
1233 	err = xenbus_transaction_end(xbt, 0);
1234 	if (err) {
1235 		if (err == -EAGAIN)
1236 			goto again;
1237 		xenbus_dev_fatal(dev, err, "completing transaction");
1238 		goto destroy_ring;
1239 	}
1240 
1241 	return 0;
1242 
1243 abort_transaction:
1244 	xenbus_transaction_end(xbt, 1);
1245 	xenbus_dev_fatal(dev, err, "%s", message);
1246 
1247 destroy_ring:
1248 	xenhcd_destroy_rings(info);
1249 
1250 	return err;
1251 }
1252 
1253 static int xenhcd_connect(struct xenbus_device *dev)
1254 {
1255 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1256 	struct xenusb_conn_request *req;
1257 	int idx, err;
1258 	int notify;
1259 	char name[TASK_COMM_LEN];
1260 	struct usb_hcd *hcd;
1261 
1262 	hcd = xenhcd_info_to_hcd(info);
1263 	snprintf(name, TASK_COMM_LEN, "xenhcd.%d", hcd->self.busnum);
1264 
1265 	err = xenhcd_talk_to_backend(dev, info);
1266 	if (err)
1267 		return err;
1268 
1269 	/* prepare ring for hotplug notification */
1270 	for (idx = 0; idx < XENUSB_CONN_RING_SIZE; idx++) {
1271 		req = RING_GET_REQUEST(&info->conn_ring, idx);
1272 		req->id = idx;
1273 	}
1274 	info->conn_ring.req_prod_pvt = idx;
1275 
1276 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
1277 	if (notify)
1278 		notify_remote_via_irq(info->irq);
1279 
1280 	return 0;
1281 }
1282 
1283 static void xenhcd_disconnect(struct xenbus_device *dev)
1284 {
1285 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1286 	struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
1287 
1288 	usb_remove_hcd(hcd);
1289 	xenbus_frontend_closed(dev);
1290 }
1291 
1292 static void xenhcd_watchdog(struct timer_list *timer)
1293 {
1294 	struct xenhcd_info *info = from_timer(info, timer, watchdog);
1295 	unsigned long flags;
1296 
1297 	spin_lock_irqsave(&info->lock, flags);
1298 	if (likely(HC_IS_RUNNING(xenhcd_info_to_hcd(info)->state))) {
1299 		xenhcd_timer_action_done(info, TIMER_RING_WATCHDOG);
1300 		xenhcd_giveback_unlinked_urbs(info);
1301 		xenhcd_kick_pending_urbs(info);
1302 	}
1303 	spin_unlock_irqrestore(&info->lock, flags);
1304 }
1305 
1306 /*
1307  * one-time HC init
1308  */
1309 static int xenhcd_setup(struct usb_hcd *hcd)
1310 {
1311 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1312 
1313 	spin_lock_init(&info->lock);
1314 	INIT_LIST_HEAD(&info->pending_submit_list);
1315 	INIT_LIST_HEAD(&info->pending_unlink_list);
1316 	INIT_LIST_HEAD(&info->in_progress_list);
1317 	INIT_LIST_HEAD(&info->giveback_waiting_list);
1318 	timer_setup(&info->watchdog, xenhcd_watchdog, 0);
1319 
1320 	hcd->has_tt = (hcd->driver->flags & HCD_MASK) != HCD_USB11;
1321 
1322 	return 0;
1323 }
1324 
1325 /*
1326  * start HC running
1327  */
1328 static int xenhcd_run(struct usb_hcd *hcd)
1329 {
1330 	hcd->uses_new_polling = 1;
1331 	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1332 	hcd->state = HC_STATE_RUNNING;
1333 	return 0;
1334 }
1335 
1336 /*
1337  * stop running HC
1338  */
1339 static void xenhcd_stop(struct usb_hcd *hcd)
1340 {
1341 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1342 
1343 	del_timer_sync(&info->watchdog);
1344 	spin_lock_irq(&info->lock);
1345 	/* cancel all urbs */
1346 	hcd->state = HC_STATE_HALT;
1347 	xenhcd_cancel_all_enqueued_urbs(info);
1348 	xenhcd_giveback_unlinked_urbs(info);
1349 	spin_unlock_irq(&info->lock);
1350 }
1351 
1352 /*
1353  * called as .urb_enqueue()
1354  * non-error returns are promise to giveback the urb later
1355  */
1356 static int xenhcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1357 			      gfp_t mem_flags)
1358 {
1359 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1360 	struct urb_priv *urbp;
1361 	unsigned long flags;
1362 	int ret;
1363 
1364 	if (unlikely(info->error))
1365 		return -ESHUTDOWN;
1366 
1367 	urbp = kmem_cache_zalloc(xenhcd_urbp_cachep, mem_flags);
1368 	if (!urbp)
1369 		return -ENOMEM;
1370 
1371 	spin_lock_irqsave(&info->lock, flags);
1372 
1373 	urbp->urb = urb;
1374 	urb->hcpriv = urbp;
1375 	urbp->req_id = ~0;
1376 	urbp->unlink_req_id = ~0;
1377 	INIT_LIST_HEAD(&urbp->list);
1378 	urbp->status = 1;
1379 	urb->unlinked = false;
1380 
1381 	ret = xenhcd_submit_urb(info, urbp);
1382 
1383 	if (ret)
1384 		xenhcd_free_urb_priv(urbp);
1385 
1386 	spin_unlock_irqrestore(&info->lock, flags);
1387 
1388 	return ret;
1389 }
1390 
1391 /*
1392  * called as .urb_dequeue()
1393  */
1394 static int xenhcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1395 {
1396 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1397 	struct urb_priv *urbp;
1398 	unsigned long flags;
1399 	int ret = 0;
1400 
1401 	spin_lock_irqsave(&info->lock, flags);
1402 
1403 	urbp = urb->hcpriv;
1404 	if (urbp) {
1405 		urbp->status = status;
1406 		ret = xenhcd_unlink_urb(info, urbp);
1407 	}
1408 
1409 	spin_unlock_irqrestore(&info->lock, flags);
1410 
1411 	return ret;
1412 }
1413 
1414 /*
1415  * called from usb_get_current_frame_number(),
1416  * but, almost all drivers not use such function.
1417  */
1418 static int xenhcd_get_frame(struct usb_hcd *hcd)
1419 {
1420 	/* it means error, but probably no problem :-) */
1421 	return 0;
1422 }
1423 
1424 static struct hc_driver xenhcd_usb20_hc_driver = {
1425 	.description = "xen-hcd",
1426 	.product_desc = "Xen USB2.0 Virtual Host Controller",
1427 	.hcd_priv_size = sizeof(struct xenhcd_info),
1428 	.flags = HCD_USB2,
1429 
1430 	/* basic HC lifecycle operations */
1431 	.reset = xenhcd_setup,
1432 	.start = xenhcd_run,
1433 	.stop = xenhcd_stop,
1434 
1435 	/* managing urb I/O */
1436 	.urb_enqueue = xenhcd_urb_enqueue,
1437 	.urb_dequeue = xenhcd_urb_dequeue,
1438 	.get_frame_number = xenhcd_get_frame,
1439 
1440 	/* root hub operations */
1441 	.hub_status_data = xenhcd_hub_status_data,
1442 	.hub_control = xenhcd_hub_control,
1443 #ifdef CONFIG_PM
1444 	.bus_suspend = xenhcd_bus_suspend,
1445 	.bus_resume = xenhcd_bus_resume,
1446 #endif
1447 };
1448 
1449 static struct hc_driver xenhcd_usb11_hc_driver = {
1450 	.description = "xen-hcd",
1451 	.product_desc = "Xen USB1.1 Virtual Host Controller",
1452 	.hcd_priv_size = sizeof(struct xenhcd_info),
1453 	.flags = HCD_USB11,
1454 
1455 	/* basic HC lifecycle operations */
1456 	.reset = xenhcd_setup,
1457 	.start = xenhcd_run,
1458 	.stop = xenhcd_stop,
1459 
1460 	/* managing urb I/O */
1461 	.urb_enqueue = xenhcd_urb_enqueue,
1462 	.urb_dequeue = xenhcd_urb_dequeue,
1463 	.get_frame_number = xenhcd_get_frame,
1464 
1465 	/* root hub operations */
1466 	.hub_status_data = xenhcd_hub_status_data,
1467 	.hub_control = xenhcd_hub_control,
1468 #ifdef CONFIG_PM
1469 	.bus_suspend = xenhcd_bus_suspend,
1470 	.bus_resume = xenhcd_bus_resume,
1471 #endif
1472 };
1473 
1474 static struct usb_hcd *xenhcd_create_hcd(struct xenbus_device *dev)
1475 {
1476 	int i;
1477 	int err = 0;
1478 	int num_ports;
1479 	int usb_ver;
1480 	struct usb_hcd *hcd = NULL;
1481 	struct xenhcd_info *info;
1482 
1483 	err = xenbus_scanf(XBT_NIL, dev->otherend, "num-ports", "%d",
1484 			   &num_ports);
1485 	if (err != 1) {
1486 		xenbus_dev_fatal(dev, err, "reading num-ports");
1487 		return ERR_PTR(-EINVAL);
1488 	}
1489 	if (num_ports < 1 || num_ports > XENUSB_MAX_PORTNR) {
1490 		xenbus_dev_fatal(dev, err, "invalid num-ports");
1491 		return ERR_PTR(-EINVAL);
1492 	}
1493 
1494 	err = xenbus_scanf(XBT_NIL, dev->otherend, "usb-ver", "%d", &usb_ver);
1495 	if (err != 1) {
1496 		xenbus_dev_fatal(dev, err, "reading usb-ver");
1497 		return ERR_PTR(-EINVAL);
1498 	}
1499 	switch (usb_ver) {
1500 	case XENUSB_VER_USB11:
1501 		hcd = usb_create_hcd(&xenhcd_usb11_hc_driver, &dev->dev,
1502 				     dev_name(&dev->dev));
1503 		break;
1504 	case XENUSB_VER_USB20:
1505 		hcd = usb_create_hcd(&xenhcd_usb20_hc_driver, &dev->dev,
1506 				     dev_name(&dev->dev));
1507 		break;
1508 	default:
1509 		xenbus_dev_fatal(dev, err, "invalid usb-ver");
1510 		return ERR_PTR(-EINVAL);
1511 	}
1512 	if (!hcd) {
1513 		xenbus_dev_fatal(dev, err,
1514 				 "fail to allocate USB host controller");
1515 		return ERR_PTR(-ENOMEM);
1516 	}
1517 
1518 	info = xenhcd_hcd_to_info(hcd);
1519 	info->xbdev = dev;
1520 	info->rh_numports = num_ports;
1521 
1522 	for (i = 0; i < XENUSB_URB_RING_SIZE; i++) {
1523 		info->shadow[i].req.id = i + 1;
1524 		info->shadow[i].urb = NULL;
1525 		info->shadow[i].in_flight = false;
1526 	}
1527 	info->shadow[XENUSB_URB_RING_SIZE - 1].req.id = 0x0fff;
1528 
1529 	return hcd;
1530 }
1531 
1532 static void xenhcd_backend_changed(struct xenbus_device *dev,
1533 				   enum xenbus_state backend_state)
1534 {
1535 	switch (backend_state) {
1536 	case XenbusStateInitialising:
1537 	case XenbusStateReconfiguring:
1538 	case XenbusStateReconfigured:
1539 	case XenbusStateUnknown:
1540 		break;
1541 
1542 	case XenbusStateInitWait:
1543 	case XenbusStateInitialised:
1544 	case XenbusStateConnected:
1545 		if (dev->state != XenbusStateInitialising)
1546 			break;
1547 		if (!xenhcd_connect(dev))
1548 			xenbus_switch_state(dev, XenbusStateConnected);
1549 		break;
1550 
1551 	case XenbusStateClosed:
1552 		if (dev->state == XenbusStateClosed)
1553 			break;
1554 		fallthrough;	/* Missed the backend's Closing state. */
1555 	case XenbusStateClosing:
1556 		xenhcd_disconnect(dev);
1557 		break;
1558 
1559 	default:
1560 		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1561 				 backend_state);
1562 		break;
1563 	}
1564 }
1565 
1566 static int xenhcd_remove(struct xenbus_device *dev)
1567 {
1568 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1569 	struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
1570 
1571 	xenhcd_destroy_rings(info);
1572 	usb_put_hcd(hcd);
1573 
1574 	return 0;
1575 }
1576 
1577 static int xenhcd_probe(struct xenbus_device *dev,
1578 			const struct xenbus_device_id *id)
1579 {
1580 	int err;
1581 	struct usb_hcd *hcd;
1582 	struct xenhcd_info *info;
1583 
1584 	if (usb_disabled())
1585 		return -ENODEV;
1586 
1587 	hcd = xenhcd_create_hcd(dev);
1588 	if (IS_ERR(hcd)) {
1589 		err = PTR_ERR(hcd);
1590 		xenbus_dev_fatal(dev, err,
1591 				 "fail to create usb host controller");
1592 		return err;
1593 	}
1594 
1595 	info = xenhcd_hcd_to_info(hcd);
1596 	dev_set_drvdata(&dev->dev, info);
1597 
1598 	err = usb_add_hcd(hcd, 0, 0);
1599 	if (err) {
1600 		xenbus_dev_fatal(dev, err, "fail to add USB host controller");
1601 		usb_put_hcd(hcd);
1602 		dev_set_drvdata(&dev->dev, NULL);
1603 	}
1604 
1605 	return err;
1606 }
1607 
1608 static const struct xenbus_device_id xenhcd_ids[] = {
1609 	{ "vusb" },
1610 	{ "" },
1611 };
1612 
1613 static struct xenbus_driver xenhcd_driver = {
1614 	.ids			= xenhcd_ids,
1615 	.probe			= xenhcd_probe,
1616 	.otherend_changed	= xenhcd_backend_changed,
1617 	.remove			= xenhcd_remove,
1618 };
1619 
1620 static int __init xenhcd_init(void)
1621 {
1622 	if (!xen_domain())
1623 		return -ENODEV;
1624 
1625 	xenhcd_urbp_cachep = kmem_cache_create("xenhcd_urb_priv",
1626 					sizeof(struct urb_priv), 0, 0, NULL);
1627 	if (!xenhcd_urbp_cachep) {
1628 		pr_err("xenhcd failed to create kmem cache\n");
1629 		return -ENOMEM;
1630 	}
1631 
1632 	return xenbus_register_frontend(&xenhcd_driver);
1633 }
1634 module_init(xenhcd_init);
1635 
1636 static void __exit xenhcd_exit(void)
1637 {
1638 	kmem_cache_destroy(xenhcd_urbp_cachep);
1639 	xenbus_unregister_driver(&xenhcd_driver);
1640 }
1641 module_exit(xenhcd_exit);
1642 
1643 MODULE_ALIAS("xen:vusb");
1644 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
1645 MODULE_DESCRIPTION("Xen USB Virtual Host Controller driver (xen-hcd)");
1646 MODULE_LICENSE("Dual BSD/GPL");
1647