xref: /openbmc/linux/drivers/usb/host/xen-hcd.c (revision 297ce026)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * xen-hcd.c
4  *
5  * Xen USB Virtual Host Controller driver
6  *
7  * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
8  * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/usb.h>
13 #include <linux/list.h>
14 #include <linux/usb/hcd.h>
15 #include <linux/io.h>
16 
17 #include <xen/xen.h>
18 #include <xen/xenbus.h>
19 #include <xen/grant_table.h>
20 #include <xen/events.h>
21 #include <xen/page.h>
22 
23 #include <xen/interface/io/usbif.h>
24 
25 /* Private per-URB data */
26 struct urb_priv {
27 	struct list_head list;
28 	struct urb *urb;
29 	int req_id;		/* RING_REQUEST id for submitting */
30 	int unlink_req_id;	/* RING_REQUEST id for unlinking */
31 	int status;
32 	bool unlinked;		/* dequeued marker */
33 };
34 
35 /* virtual roothub port status */
36 struct rhport_status {
37 	__u32 status;
38 	bool resuming;		/* in resuming */
39 	bool c_connection;	/* connection changed */
40 	unsigned long timeout;
41 };
42 
43 /* status of attached device */
44 struct vdevice_status {
45 	int devnum;
46 	enum usb_device_state status;
47 	enum usb_device_speed speed;
48 };
49 
50 /* RING request shadow */
51 struct usb_shadow {
52 	struct xenusb_urb_request req;
53 	struct urb *urb;
54 	bool in_flight;
55 };
56 
57 struct xenhcd_info {
58 	/* Virtual Host Controller has 4 urb queues */
59 	struct list_head pending_submit_list;
60 	struct list_head pending_unlink_list;
61 	struct list_head in_progress_list;
62 	struct list_head giveback_waiting_list;
63 
64 	spinlock_t lock;
65 
66 	/* timer that kick pending and giveback waiting urbs */
67 	struct timer_list watchdog;
68 	unsigned long actions;
69 
70 	/* virtual root hub */
71 	int rh_numports;
72 	struct rhport_status ports[XENUSB_MAX_PORTNR];
73 	struct vdevice_status devices[XENUSB_MAX_PORTNR];
74 
75 	/* Xen related staff */
76 	struct xenbus_device *xbdev;
77 	int urb_ring_ref;
78 	int conn_ring_ref;
79 	struct xenusb_urb_front_ring urb_ring;
80 	struct xenusb_conn_front_ring conn_ring;
81 
82 	unsigned int evtchn;
83 	unsigned int irq;
84 	struct usb_shadow shadow[XENUSB_URB_RING_SIZE];
85 	unsigned int shadow_free;
86 
87 	bool error;
88 };
89 
90 #define XENHCD_RING_JIFFIES (HZ/200)
91 #define XENHCD_SCAN_JIFFIES 1
92 
93 enum xenhcd_timer_action {
94 	TIMER_RING_WATCHDOG,
95 	TIMER_SCAN_PENDING_URBS,
96 };
97 
98 static struct kmem_cache *xenhcd_urbp_cachep;
99 
100 static inline struct xenhcd_info *xenhcd_hcd_to_info(struct usb_hcd *hcd)
101 {
102 	return (struct xenhcd_info *)hcd->hcd_priv;
103 }
104 
105 static inline struct usb_hcd *xenhcd_info_to_hcd(struct xenhcd_info *info)
106 {
107 	return container_of((void *)info, struct usb_hcd, hcd_priv);
108 }
109 
110 static void xenhcd_set_error(struct xenhcd_info *info, const char *msg)
111 {
112 	info->error = true;
113 
114 	pr_alert("xen-hcd: protocol error: %s!\n", msg);
115 }
116 
117 static inline void xenhcd_timer_action_done(struct xenhcd_info *info,
118 					    enum xenhcd_timer_action action)
119 {
120 	clear_bit(action, &info->actions);
121 }
122 
123 static void xenhcd_timer_action(struct xenhcd_info *info,
124 				enum xenhcd_timer_action action)
125 {
126 	if (timer_pending(&info->watchdog) &&
127 	    test_bit(TIMER_SCAN_PENDING_URBS, &info->actions))
128 		return;
129 
130 	if (!test_and_set_bit(action, &info->actions)) {
131 		unsigned long t;
132 
133 		switch (action) {
134 		case TIMER_RING_WATCHDOG:
135 			t = XENHCD_RING_JIFFIES;
136 			break;
137 		default:
138 			t = XENHCD_SCAN_JIFFIES;
139 			break;
140 		}
141 		mod_timer(&info->watchdog, t + jiffies);
142 	}
143 }
144 
145 /*
146  * set virtual port connection status
147  */
148 static void xenhcd_set_connect_state(struct xenhcd_info *info, int portnum)
149 {
150 	int port;
151 
152 	port = portnum - 1;
153 	if (info->ports[port].status & USB_PORT_STAT_POWER) {
154 		switch (info->devices[port].speed) {
155 		case XENUSB_SPEED_NONE:
156 			info->ports[port].status &=
157 				~(USB_PORT_STAT_CONNECTION |
158 				  USB_PORT_STAT_ENABLE |
159 				  USB_PORT_STAT_LOW_SPEED |
160 				  USB_PORT_STAT_HIGH_SPEED |
161 				  USB_PORT_STAT_SUSPEND);
162 			break;
163 		case XENUSB_SPEED_LOW:
164 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
165 			info->ports[port].status |= USB_PORT_STAT_LOW_SPEED;
166 			break;
167 		case XENUSB_SPEED_FULL:
168 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
169 			break;
170 		case XENUSB_SPEED_HIGH:
171 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
172 			info->ports[port].status |= USB_PORT_STAT_HIGH_SPEED;
173 			break;
174 		default: /* error */
175 			return;
176 		}
177 		info->ports[port].status |= (USB_PORT_STAT_C_CONNECTION << 16);
178 	}
179 }
180 
181 /*
182  * set virtual device connection status
183  */
184 static int xenhcd_rhport_connect(struct xenhcd_info *info, __u8 portnum,
185 				 __u8 speed)
186 {
187 	int port;
188 
189 	if (portnum < 1 || portnum > info->rh_numports)
190 		return -EINVAL; /* invalid port number */
191 
192 	port = portnum - 1;
193 	if (info->devices[port].speed != speed) {
194 		switch (speed) {
195 		case XENUSB_SPEED_NONE: /* disconnect */
196 			info->devices[port].status = USB_STATE_NOTATTACHED;
197 			break;
198 		case XENUSB_SPEED_LOW:
199 		case XENUSB_SPEED_FULL:
200 		case XENUSB_SPEED_HIGH:
201 			info->devices[port].status = USB_STATE_ATTACHED;
202 			break;
203 		default: /* error */
204 			return -EINVAL;
205 		}
206 		info->devices[port].speed = speed;
207 		info->ports[port].c_connection = true;
208 
209 		xenhcd_set_connect_state(info, portnum);
210 	}
211 
212 	return 0;
213 }
214 
215 /*
216  * SetPortFeature(PORT_SUSPENDED)
217  */
218 static void xenhcd_rhport_suspend(struct xenhcd_info *info, int portnum)
219 {
220 	int port;
221 
222 	port = portnum - 1;
223 	info->ports[port].status |= USB_PORT_STAT_SUSPEND;
224 	info->devices[port].status = USB_STATE_SUSPENDED;
225 }
226 
227 /*
228  * ClearPortFeature(PORT_SUSPENDED)
229  */
230 static void xenhcd_rhport_resume(struct xenhcd_info *info, int portnum)
231 {
232 	int port;
233 
234 	port = portnum - 1;
235 	if (info->ports[port].status & USB_PORT_STAT_SUSPEND) {
236 		info->ports[port].resuming = true;
237 		info->ports[port].timeout = jiffies + msecs_to_jiffies(20);
238 	}
239 }
240 
241 /*
242  * SetPortFeature(PORT_POWER)
243  */
244 static void xenhcd_rhport_power_on(struct xenhcd_info *info, int portnum)
245 {
246 	int port;
247 
248 	port = portnum - 1;
249 	if ((info->ports[port].status & USB_PORT_STAT_POWER) == 0) {
250 		info->ports[port].status |= USB_PORT_STAT_POWER;
251 		if (info->devices[port].status != USB_STATE_NOTATTACHED)
252 			info->devices[port].status = USB_STATE_POWERED;
253 		if (info->ports[port].c_connection)
254 			xenhcd_set_connect_state(info, portnum);
255 	}
256 }
257 
258 /*
259  * ClearPortFeature(PORT_POWER)
260  * SetConfiguration(non-zero)
261  * Power_Source_Off
262  * Over-current
263  */
264 static void xenhcd_rhport_power_off(struct xenhcd_info *info, int portnum)
265 {
266 	int port;
267 
268 	port = portnum - 1;
269 	if (info->ports[port].status & USB_PORT_STAT_POWER) {
270 		info->ports[port].status = 0;
271 		if (info->devices[port].status != USB_STATE_NOTATTACHED)
272 			info->devices[port].status = USB_STATE_ATTACHED;
273 	}
274 }
275 
276 /*
277  * ClearPortFeature(PORT_ENABLE)
278  */
279 static void xenhcd_rhport_disable(struct xenhcd_info *info, int portnum)
280 {
281 	int port;
282 
283 	port = portnum - 1;
284 	info->ports[port].status &= ~USB_PORT_STAT_ENABLE;
285 	info->ports[port].status &= ~USB_PORT_STAT_SUSPEND;
286 	info->ports[port].resuming = false;
287 	if (info->devices[port].status != USB_STATE_NOTATTACHED)
288 		info->devices[port].status = USB_STATE_POWERED;
289 }
290 
291 /*
292  * SetPortFeature(PORT_RESET)
293  */
294 static void xenhcd_rhport_reset(struct xenhcd_info *info, int portnum)
295 {
296 	int port;
297 
298 	port = portnum - 1;
299 	info->ports[port].status &= ~(USB_PORT_STAT_ENABLE |
300 				      USB_PORT_STAT_LOW_SPEED |
301 				      USB_PORT_STAT_HIGH_SPEED);
302 	info->ports[port].status |= USB_PORT_STAT_RESET;
303 
304 	if (info->devices[port].status != USB_STATE_NOTATTACHED)
305 		info->devices[port].status = USB_STATE_ATTACHED;
306 
307 	/* 10msec reset signaling */
308 	info->ports[port].timeout = jiffies + msecs_to_jiffies(10);
309 }
310 
311 #ifdef CONFIG_PM
312 static int xenhcd_bus_suspend(struct usb_hcd *hcd)
313 {
314 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
315 	int ret = 0;
316 	int i, ports;
317 
318 	ports = info->rh_numports;
319 
320 	spin_lock_irq(&info->lock);
321 	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
322 		ret = -ESHUTDOWN;
323 	} else {
324 		/* suspend any active ports*/
325 		for (i = 1; i <= ports; i++)
326 			xenhcd_rhport_suspend(info, i);
327 	}
328 	spin_unlock_irq(&info->lock);
329 
330 	del_timer_sync(&info->watchdog);
331 
332 	return ret;
333 }
334 
335 static int xenhcd_bus_resume(struct usb_hcd *hcd)
336 {
337 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
338 	int ret = 0;
339 	int i, ports;
340 
341 	ports = info->rh_numports;
342 
343 	spin_lock_irq(&info->lock);
344 	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
345 		ret = -ESHUTDOWN;
346 	} else {
347 		/* resume any suspended ports*/
348 		for (i = 1; i <= ports; i++)
349 			xenhcd_rhport_resume(info, i);
350 	}
351 	spin_unlock_irq(&info->lock);
352 
353 	return ret;
354 }
355 #endif
356 
357 static void xenhcd_hub_descriptor(struct xenhcd_info *info,
358 				  struct usb_hub_descriptor *desc)
359 {
360 	__u16 temp;
361 	int ports = info->rh_numports;
362 
363 	desc->bDescriptorType = 0x29;
364 	desc->bPwrOn2PwrGood = 10; /* EHCI says 20ms max */
365 	desc->bHubContrCurrent = 0;
366 	desc->bNbrPorts = ports;
367 
368 	/* size of DeviceRemovable and PortPwrCtrlMask fields */
369 	temp = 1 + (ports / 8);
370 	desc->bDescLength = 7 + 2 * temp;
371 
372 	/* bitmaps for DeviceRemovable and PortPwrCtrlMask */
373 	memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
374 	memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
375 
376 	/* per-port over current reporting and no power switching */
377 	temp = 0x000a;
378 	desc->wHubCharacteristics = cpu_to_le16(temp);
379 }
380 
381 /* port status change mask for hub_status_data */
382 #define PORT_C_MASK	((USB_PORT_STAT_C_CONNECTION |		\
383 			  USB_PORT_STAT_C_ENABLE |		\
384 			  USB_PORT_STAT_C_SUSPEND |		\
385 			  USB_PORT_STAT_C_OVERCURRENT |		\
386 			  USB_PORT_STAT_C_RESET) << 16)
387 
388 /*
389  * See USB 2.0 Spec, 11.12.4 Hub and Port Status Change Bitmap.
390  * If port status changed, writes the bitmap to buf and return
391  * that length(number of bytes).
392  * If Nothing changed, return 0.
393  */
394 static int xenhcd_hub_status_data(struct usb_hcd *hcd, char *buf)
395 {
396 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
397 	int ports;
398 	int i;
399 	unsigned long flags;
400 	int ret;
401 	int changed = 0;
402 
403 	/* initialize the status to no-changes */
404 	ports = info->rh_numports;
405 	ret = 1 + (ports / 8);
406 	memset(buf, 0, ret);
407 
408 	spin_lock_irqsave(&info->lock, flags);
409 
410 	for (i = 0; i < ports; i++) {
411 		/* check status for each port */
412 		if (info->ports[i].status & PORT_C_MASK) {
413 			buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
414 			changed = 1;
415 		}
416 	}
417 
418 	if ((hcd->state == HC_STATE_SUSPENDED) && (changed == 1))
419 		usb_hcd_resume_root_hub(hcd);
420 
421 	spin_unlock_irqrestore(&info->lock, flags);
422 
423 	return changed ? ret : 0;
424 }
425 
426 static int xenhcd_hub_control(struct usb_hcd *hcd, __u16 typeReq, __u16 wValue,
427 			      __u16 wIndex, char *buf, __u16 wLength)
428 {
429 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
430 	int ports = info->rh_numports;
431 	unsigned long flags;
432 	int ret = 0;
433 	int i;
434 	int changed = 0;
435 
436 	spin_lock_irqsave(&info->lock, flags);
437 	switch (typeReq) {
438 	case ClearHubFeature:
439 		/* ignore this request */
440 		break;
441 	case ClearPortFeature:
442 		if (!wIndex || wIndex > ports)
443 			goto error;
444 
445 		switch (wValue) {
446 		case USB_PORT_FEAT_SUSPEND:
447 			xenhcd_rhport_resume(info, wIndex);
448 			break;
449 		case USB_PORT_FEAT_POWER:
450 			xenhcd_rhport_power_off(info, wIndex);
451 			break;
452 		case USB_PORT_FEAT_ENABLE:
453 			xenhcd_rhport_disable(info, wIndex);
454 			break;
455 		case USB_PORT_FEAT_C_CONNECTION:
456 			info->ports[wIndex - 1].c_connection = false;
457 			fallthrough;
458 		default:
459 			info->ports[wIndex - 1].status &= ~(1 << wValue);
460 			break;
461 		}
462 		break;
463 	case GetHubDescriptor:
464 		xenhcd_hub_descriptor(info, (struct usb_hub_descriptor *)buf);
465 		break;
466 	case GetHubStatus:
467 		/* always local power supply good and no over-current exists. */
468 		*(__le32 *)buf = cpu_to_le32(0);
469 		break;
470 	case GetPortStatus:
471 		if (!wIndex || wIndex > ports)
472 			goto error;
473 
474 		wIndex--;
475 
476 		/* resume completion */
477 		if (info->ports[wIndex].resuming &&
478 		    time_after_eq(jiffies, info->ports[wIndex].timeout)) {
479 			info->ports[wIndex].status |=
480 				USB_PORT_STAT_C_SUSPEND << 16;
481 			info->ports[wIndex].status &= ~USB_PORT_STAT_SUSPEND;
482 		}
483 
484 		/* reset completion */
485 		if ((info->ports[wIndex].status & USB_PORT_STAT_RESET) != 0 &&
486 		    time_after_eq(jiffies, info->ports[wIndex].timeout)) {
487 			info->ports[wIndex].status |=
488 				USB_PORT_STAT_C_RESET << 16;
489 			info->ports[wIndex].status &= ~USB_PORT_STAT_RESET;
490 
491 			if (info->devices[wIndex].status !=
492 			    USB_STATE_NOTATTACHED) {
493 				info->ports[wIndex].status |=
494 					USB_PORT_STAT_ENABLE;
495 				info->devices[wIndex].status =
496 					USB_STATE_DEFAULT;
497 			}
498 
499 			switch (info->devices[wIndex].speed) {
500 			case XENUSB_SPEED_LOW:
501 				info->ports[wIndex].status |=
502 					USB_PORT_STAT_LOW_SPEED;
503 				break;
504 			case XENUSB_SPEED_HIGH:
505 				info->ports[wIndex].status |=
506 					USB_PORT_STAT_HIGH_SPEED;
507 				break;
508 			default:
509 				break;
510 			}
511 		}
512 
513 		*(__le32 *)buf = cpu_to_le32(info->ports[wIndex].status);
514 		break;
515 	case SetPortFeature:
516 		if (!wIndex || wIndex > ports)
517 			goto error;
518 
519 		switch (wValue) {
520 		case USB_PORT_FEAT_POWER:
521 			xenhcd_rhport_power_on(info, wIndex);
522 			break;
523 		case USB_PORT_FEAT_RESET:
524 			xenhcd_rhport_reset(info, wIndex);
525 			break;
526 		case USB_PORT_FEAT_SUSPEND:
527 			xenhcd_rhport_suspend(info, wIndex);
528 			break;
529 		default:
530 			if (info->ports[wIndex-1].status & USB_PORT_STAT_POWER)
531 				info->ports[wIndex-1].status |= (1 << wValue);
532 		}
533 		break;
534 
535 	case SetHubFeature:
536 		/* not supported */
537 	default:
538 error:
539 		ret = -EPIPE;
540 	}
541 	spin_unlock_irqrestore(&info->lock, flags);
542 
543 	/* check status for each port */
544 	for (i = 0; i < ports; i++) {
545 		if (info->ports[i].status & PORT_C_MASK)
546 			changed = 1;
547 	}
548 	if (changed)
549 		usb_hcd_poll_rh_status(hcd);
550 
551 	return ret;
552 }
553 
554 static void xenhcd_free_urb_priv(struct urb_priv *urbp)
555 {
556 	urbp->urb->hcpriv = NULL;
557 	kmem_cache_free(xenhcd_urbp_cachep, urbp);
558 }
559 
560 static inline unsigned int xenhcd_get_id_from_freelist(struct xenhcd_info *info)
561 {
562 	unsigned int free;
563 
564 	free = info->shadow_free;
565 	info->shadow_free = info->shadow[free].req.id;
566 	info->shadow[free].req.id = 0x0fff; /* debug */
567 	return free;
568 }
569 
570 static inline void xenhcd_add_id_to_freelist(struct xenhcd_info *info,
571 					     unsigned int id)
572 {
573 	info->shadow[id].req.id	= info->shadow_free;
574 	info->shadow[id].urb = NULL;
575 	info->shadow_free = id;
576 }
577 
578 static inline int xenhcd_count_pages(void *addr, int length)
579 {
580 	unsigned long vaddr = (unsigned long)addr;
581 
582 	return PFN_UP(vaddr + length) - PFN_DOWN(vaddr);
583 }
584 
585 static void xenhcd_gnttab_map(struct xenhcd_info *info, void *addr, int length,
586 			      grant_ref_t *gref_head,
587 			      struct xenusb_request_segment *seg,
588 			      int nr_pages, int flags)
589 {
590 	grant_ref_t ref;
591 	unsigned int offset;
592 	unsigned int len = length;
593 	unsigned int bytes;
594 	int i;
595 
596 	for (i = 0; i < nr_pages; i++) {
597 		offset = offset_in_page(addr);
598 
599 		bytes = PAGE_SIZE - offset;
600 		if (bytes > len)
601 			bytes = len;
602 
603 		ref = gnttab_claim_grant_reference(gref_head);
604 		gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
605 						virt_to_gfn(addr), flags);
606 		seg[i].gref = ref;
607 		seg[i].offset = (__u16)offset;
608 		seg[i].length = (__u16)bytes;
609 
610 		addr += bytes;
611 		len -= bytes;
612 	}
613 }
614 
615 static __u32 xenhcd_pipe_urb_to_xenusb(__u32 urb_pipe, __u8 port)
616 {
617 	static __u32 pipe;
618 
619 	pipe = usb_pipedevice(urb_pipe) << XENUSB_PIPE_DEV_SHIFT;
620 	pipe |= usb_pipeendpoint(urb_pipe) << XENUSB_PIPE_EP_SHIFT;
621 	if (usb_pipein(urb_pipe))
622 		pipe |= XENUSB_PIPE_DIR;
623 	switch (usb_pipetype(urb_pipe)) {
624 	case PIPE_ISOCHRONOUS:
625 		pipe |= XENUSB_PIPE_TYPE_ISOC << XENUSB_PIPE_TYPE_SHIFT;
626 		break;
627 	case PIPE_INTERRUPT:
628 		pipe |= XENUSB_PIPE_TYPE_INT << XENUSB_PIPE_TYPE_SHIFT;
629 		break;
630 	case PIPE_CONTROL:
631 		pipe |= XENUSB_PIPE_TYPE_CTRL << XENUSB_PIPE_TYPE_SHIFT;
632 		break;
633 	case PIPE_BULK:
634 		pipe |= XENUSB_PIPE_TYPE_BULK << XENUSB_PIPE_TYPE_SHIFT;
635 		break;
636 	}
637 	pipe = xenusb_setportnum_pipe(pipe, port);
638 
639 	return pipe;
640 }
641 
642 static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb,
643 				      struct xenusb_urb_request *req)
644 {
645 	grant_ref_t gref_head;
646 	int nr_buff_pages = 0;
647 	int nr_isodesc_pages = 0;
648 	int nr_grants = 0;
649 
650 	if (urb->transfer_buffer_length) {
651 		nr_buff_pages = xenhcd_count_pages(urb->transfer_buffer,
652 						urb->transfer_buffer_length);
653 
654 		if (usb_pipeisoc(urb->pipe))
655 			nr_isodesc_pages = xenhcd_count_pages(
656 				&urb->iso_frame_desc[0],
657 				sizeof(struct usb_iso_packet_descriptor) *
658 				urb->number_of_packets);
659 
660 		nr_grants = nr_buff_pages + nr_isodesc_pages;
661 		if (nr_grants > XENUSB_MAX_SEGMENTS_PER_REQUEST) {
662 			pr_err("xenhcd: error: %d grants\n", nr_grants);
663 			return -E2BIG;
664 		}
665 
666 		if (gnttab_alloc_grant_references(nr_grants, &gref_head)) {
667 			pr_err("xenhcd: gnttab_alloc_grant_references() error\n");
668 			return -ENOMEM;
669 		}
670 
671 		xenhcd_gnttab_map(info, urb->transfer_buffer,
672 				  urb->transfer_buffer_length, &gref_head,
673 				  &req->seg[0], nr_buff_pages,
674 				  usb_pipein(urb->pipe) ? 0 : GTF_readonly);
675 	}
676 
677 	req->pipe = xenhcd_pipe_urb_to_xenusb(urb->pipe, urb->dev->portnum);
678 	req->transfer_flags = 0;
679 	if (urb->transfer_flags & URB_SHORT_NOT_OK)
680 		req->transfer_flags |= XENUSB_SHORT_NOT_OK;
681 	req->buffer_length = urb->transfer_buffer_length;
682 	req->nr_buffer_segs = nr_buff_pages;
683 
684 	switch (usb_pipetype(urb->pipe)) {
685 	case PIPE_ISOCHRONOUS:
686 		req->u.isoc.interval = urb->interval;
687 		req->u.isoc.start_frame = urb->start_frame;
688 		req->u.isoc.number_of_packets = urb->number_of_packets;
689 		req->u.isoc.nr_frame_desc_segs = nr_isodesc_pages;
690 
691 		xenhcd_gnttab_map(info, &urb->iso_frame_desc[0],
692 				  sizeof(struct usb_iso_packet_descriptor) *
693 				  urb->number_of_packets,
694 				  &gref_head, &req->seg[nr_buff_pages],
695 				  nr_isodesc_pages, 0);
696 		break;
697 	case PIPE_INTERRUPT:
698 		req->u.intr.interval = urb->interval;
699 		break;
700 	case PIPE_CONTROL:
701 		if (urb->setup_packet)
702 			memcpy(req->u.ctrl, urb->setup_packet, 8);
703 		break;
704 	case PIPE_BULK:
705 		break;
706 	default:
707 		break;
708 	}
709 
710 	if (nr_grants)
711 		gnttab_free_grant_references(gref_head);
712 
713 	return 0;
714 }
715 
716 static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id)
717 {
718 	struct usb_shadow *shadow = info->shadow + id;
719 	int nr_segs = 0;
720 	int i;
721 
722 	if (!shadow->in_flight) {
723 		xenhcd_set_error(info, "Illegal request id");
724 		return;
725 	}
726 	shadow->in_flight = false;
727 
728 	nr_segs = shadow->req.nr_buffer_segs;
729 
730 	if (xenusb_pipeisoc(shadow->req.pipe))
731 		nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
732 
733 	for (i = 0; i < nr_segs; i++) {
734 		if (!gnttab_try_end_foreign_access(shadow->req.seg[i].gref))
735 			xenhcd_set_error(info, "backend didn't release grant");
736 	}
737 
738 	shadow->req.nr_buffer_segs = 0;
739 	shadow->req.u.isoc.nr_frame_desc_segs = 0;
740 }
741 
742 static int xenhcd_translate_status(int status)
743 {
744 	switch (status) {
745 	case XENUSB_STATUS_OK:
746 		return 0;
747 	case XENUSB_STATUS_NODEV:
748 		return -ENODEV;
749 	case XENUSB_STATUS_INVAL:
750 		return -EINVAL;
751 	case XENUSB_STATUS_STALL:
752 		return -EPIPE;
753 	case XENUSB_STATUS_IOERROR:
754 		return -EPROTO;
755 	case XENUSB_STATUS_BABBLE:
756 		return -EOVERFLOW;
757 	default:
758 		return -ESHUTDOWN;
759 	}
760 }
761 
762 static void xenhcd_giveback_urb(struct xenhcd_info *info, struct urb *urb,
763 				int status)
764 {
765 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
766 	int priv_status = urbp->status;
767 
768 	list_del_init(&urbp->list);
769 	xenhcd_free_urb_priv(urbp);
770 
771 	if (urb->status == -EINPROGRESS)
772 		urb->status = xenhcd_translate_status(status);
773 
774 	spin_unlock(&info->lock);
775 	usb_hcd_giveback_urb(xenhcd_info_to_hcd(info), urb,
776 			     priv_status <= 0 ? priv_status : urb->status);
777 	spin_lock(&info->lock);
778 }
779 
780 static int xenhcd_do_request(struct xenhcd_info *info, struct urb_priv *urbp)
781 {
782 	struct xenusb_urb_request *req;
783 	struct urb *urb = urbp->urb;
784 	unsigned int id;
785 	int notify;
786 	int ret;
787 
788 	id = xenhcd_get_id_from_freelist(info);
789 	req = &info->shadow[id].req;
790 	req->id = id;
791 
792 	if (unlikely(urbp->unlinked)) {
793 		req->u.unlink.unlink_id = urbp->req_id;
794 		req->pipe = xenusb_setunlink_pipe(xenhcd_pipe_urb_to_xenusb(
795 						 urb->pipe, urb->dev->portnum));
796 		urbp->unlink_req_id = id;
797 	} else {
798 		ret = xenhcd_map_urb_for_request(info, urb, req);
799 		if (ret) {
800 			xenhcd_add_id_to_freelist(info, id);
801 			return ret;
802 		}
803 		urbp->req_id = id;
804 	}
805 
806 	req = RING_GET_REQUEST(&info->urb_ring, info->urb_ring.req_prod_pvt);
807 	*req = info->shadow[id].req;
808 
809 	info->urb_ring.req_prod_pvt++;
810 	info->shadow[id].urb = urb;
811 	info->shadow[id].in_flight = true;
812 
813 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->urb_ring, notify);
814 	if (notify)
815 		notify_remote_via_irq(info->irq);
816 
817 	return 0;
818 }
819 
820 static void xenhcd_kick_pending_urbs(struct xenhcd_info *info)
821 {
822 	struct urb_priv *urbp;
823 
824 	while (!list_empty(&info->pending_submit_list)) {
825 		if (RING_FULL(&info->urb_ring)) {
826 			xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
827 			return;
828 		}
829 
830 		urbp = list_entry(info->pending_submit_list.next,
831 				  struct urb_priv, list);
832 		if (!xenhcd_do_request(info, urbp))
833 			list_move_tail(&urbp->list, &info->in_progress_list);
834 		else
835 			xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
836 	}
837 	xenhcd_timer_action_done(info, TIMER_SCAN_PENDING_URBS);
838 }
839 
840 /*
841  * caller must lock info->lock
842  */
843 static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info)
844 {
845 	struct urb_priv *urbp, *tmp;
846 	int req_id;
847 
848 	list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) {
849 		req_id = urbp->req_id;
850 		if (!urbp->unlinked) {
851 			xenhcd_gnttab_done(info, req_id);
852 			if (info->error)
853 				return;
854 			if (urbp->urb->status == -EINPROGRESS)
855 				/* not dequeued */
856 				xenhcd_giveback_urb(info, urbp->urb,
857 						    -ESHUTDOWN);
858 			else	/* dequeued */
859 				xenhcd_giveback_urb(info, urbp->urb,
860 						    urbp->urb->status);
861 		}
862 		info->shadow[req_id].urb = NULL;
863 	}
864 
865 	list_for_each_entry_safe(urbp, tmp, &info->pending_submit_list, list)
866 		xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
867 }
868 
869 /*
870  * caller must lock info->lock
871  */
872 static void xenhcd_giveback_unlinked_urbs(struct xenhcd_info *info)
873 {
874 	struct urb_priv *urbp, *tmp;
875 
876 	list_for_each_entry_safe(urbp, tmp, &info->giveback_waiting_list, list)
877 		xenhcd_giveback_urb(info, urbp->urb, urbp->urb->status);
878 }
879 
880 static int xenhcd_submit_urb(struct xenhcd_info *info, struct urb_priv *urbp)
881 {
882 	int ret;
883 
884 	if (RING_FULL(&info->urb_ring)) {
885 		list_add_tail(&urbp->list, &info->pending_submit_list);
886 		xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
887 		return 0;
888 	}
889 
890 	if (!list_empty(&info->pending_submit_list)) {
891 		list_add_tail(&urbp->list, &info->pending_submit_list);
892 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
893 		return 0;
894 	}
895 
896 	ret = xenhcd_do_request(info, urbp);
897 	if (ret == 0)
898 		list_add_tail(&urbp->list, &info->in_progress_list);
899 
900 	return ret;
901 }
902 
903 static int xenhcd_unlink_urb(struct xenhcd_info *info, struct urb_priv *urbp)
904 {
905 	int ret;
906 
907 	/* already unlinked? */
908 	if (urbp->unlinked)
909 		return -EBUSY;
910 
911 	urbp->unlinked = true;
912 
913 	/* the urb is still in pending_submit queue */
914 	if (urbp->req_id == ~0) {
915 		list_move_tail(&urbp->list, &info->giveback_waiting_list);
916 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
917 		return 0;
918 	}
919 
920 	/* send unlink request to backend */
921 	if (RING_FULL(&info->urb_ring)) {
922 		list_move_tail(&urbp->list, &info->pending_unlink_list);
923 		xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
924 		return 0;
925 	}
926 
927 	if (!list_empty(&info->pending_unlink_list)) {
928 		list_move_tail(&urbp->list, &info->pending_unlink_list);
929 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
930 		return 0;
931 	}
932 
933 	ret = xenhcd_do_request(info, urbp);
934 	if (ret == 0)
935 		list_move_tail(&urbp->list, &info->in_progress_list);
936 
937 	return ret;
938 }
939 
940 static void xenhcd_res_to_urb(struct xenhcd_info *info,
941 			      struct xenusb_urb_response *res, struct urb *urb)
942 {
943 	if (unlikely(!urb))
944 		return;
945 
946 	if (res->actual_length > urb->transfer_buffer_length)
947 		urb->actual_length = urb->transfer_buffer_length;
948 	else if (res->actual_length < 0)
949 		urb->actual_length = 0;
950 	else
951 		urb->actual_length = res->actual_length;
952 	urb->error_count = res->error_count;
953 	urb->start_frame = res->start_frame;
954 	xenhcd_giveback_urb(info, urb, res->status);
955 }
956 
957 static int xenhcd_urb_request_done(struct xenhcd_info *info,
958 				   unsigned int *eoiflag)
959 {
960 	struct xenusb_urb_response res;
961 	RING_IDX i, rp;
962 	__u16 id;
963 	int more_to_do = 0;
964 	unsigned long flags;
965 
966 	spin_lock_irqsave(&info->lock, flags);
967 
968 	rp = info->urb_ring.sring->rsp_prod;
969 	if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) {
970 		xenhcd_set_error(info, "Illegal index on urb-ring");
971 		goto err;
972 	}
973 	rmb(); /* ensure we see queued responses up to "rp" */
974 
975 	for (i = info->urb_ring.rsp_cons; i != rp; i++) {
976 		RING_COPY_RESPONSE(&info->urb_ring, i, &res);
977 		id = res.id;
978 		if (id >= XENUSB_URB_RING_SIZE) {
979 			xenhcd_set_error(info, "Illegal data on urb-ring");
980 			goto err;
981 		}
982 
983 		if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) {
984 			xenhcd_gnttab_done(info, id);
985 			if (info->error)
986 				goto err;
987 			xenhcd_res_to_urb(info, &res, info->shadow[id].urb);
988 		}
989 
990 		xenhcd_add_id_to_freelist(info, id);
991 
992 		*eoiflag = 0;
993 	}
994 	info->urb_ring.rsp_cons = i;
995 
996 	if (i != info->urb_ring.req_prod_pvt)
997 		RING_FINAL_CHECK_FOR_RESPONSES(&info->urb_ring, more_to_do);
998 	else
999 		info->urb_ring.sring->rsp_event = i + 1;
1000 
1001 	spin_unlock_irqrestore(&info->lock, flags);
1002 
1003 	return more_to_do;
1004 
1005  err:
1006 	spin_unlock_irqrestore(&info->lock, flags);
1007 	return 0;
1008 }
1009 
1010 static int xenhcd_conn_notify(struct xenhcd_info *info, unsigned int *eoiflag)
1011 {
1012 	struct xenusb_conn_response res;
1013 	struct xenusb_conn_request *req;
1014 	RING_IDX rc, rp;
1015 	__u16 id;
1016 	__u8 portnum, speed;
1017 	int more_to_do = 0;
1018 	int notify;
1019 	int port_changed = 0;
1020 	unsigned long flags;
1021 
1022 	spin_lock_irqsave(&info->lock, flags);
1023 
1024 	rc = info->conn_ring.rsp_cons;
1025 	rp = info->conn_ring.sring->rsp_prod;
1026 	if (RING_RESPONSE_PROD_OVERFLOW(&info->conn_ring, rp)) {
1027 		xenhcd_set_error(info, "Illegal index on conn-ring");
1028 		spin_unlock_irqrestore(&info->lock, flags);
1029 		return 0;
1030 	}
1031 	rmb(); /* ensure we see queued responses up to "rp" */
1032 
1033 	while (rc != rp) {
1034 		RING_COPY_RESPONSE(&info->conn_ring, rc, &res);
1035 		id = res.id;
1036 		portnum = res.portnum;
1037 		speed = res.speed;
1038 		info->conn_ring.rsp_cons = ++rc;
1039 
1040 		if (xenhcd_rhport_connect(info, portnum, speed)) {
1041 			xenhcd_set_error(info, "Illegal data on conn-ring");
1042 			spin_unlock_irqrestore(&info->lock, flags);
1043 			return 0;
1044 		}
1045 
1046 		if (info->ports[portnum - 1].c_connection)
1047 			port_changed = 1;
1048 
1049 		barrier();
1050 
1051 		req = RING_GET_REQUEST(&info->conn_ring,
1052 				       info->conn_ring.req_prod_pvt);
1053 		req->id = id;
1054 		info->conn_ring.req_prod_pvt++;
1055 
1056 		*eoiflag = 0;
1057 	}
1058 
1059 	if (rc != info->conn_ring.req_prod_pvt)
1060 		RING_FINAL_CHECK_FOR_RESPONSES(&info->conn_ring, more_to_do);
1061 	else
1062 		info->conn_ring.sring->rsp_event = rc + 1;
1063 
1064 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
1065 	if (notify)
1066 		notify_remote_via_irq(info->irq);
1067 
1068 	spin_unlock_irqrestore(&info->lock, flags);
1069 
1070 	if (port_changed)
1071 		usb_hcd_poll_rh_status(xenhcd_info_to_hcd(info));
1072 
1073 	return more_to_do;
1074 }
1075 
1076 static irqreturn_t xenhcd_int(int irq, void *dev_id)
1077 {
1078 	struct xenhcd_info *info = (struct xenhcd_info *)dev_id;
1079 	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1080 
1081 	if (unlikely(info->error)) {
1082 		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
1083 		return IRQ_HANDLED;
1084 	}
1085 
1086 	while (xenhcd_urb_request_done(info, &eoiflag) |
1087 	       xenhcd_conn_notify(info, &eoiflag))
1088 		/* Yield point for this unbounded loop. */
1089 		cond_resched();
1090 
1091 	xen_irq_lateeoi(irq, eoiflag);
1092 	return IRQ_HANDLED;
1093 }
1094 
1095 static void xenhcd_destroy_rings(struct xenhcd_info *info)
1096 {
1097 	if (info->irq)
1098 		unbind_from_irqhandler(info->irq, info);
1099 	info->irq = 0;
1100 
1101 	if (info->urb_ring_ref != INVALID_GRANT_REF) {
1102 		gnttab_end_foreign_access(info->urb_ring_ref,
1103 					  (unsigned long)info->urb_ring.sring);
1104 		info->urb_ring_ref = INVALID_GRANT_REF;
1105 	}
1106 	info->urb_ring.sring = NULL;
1107 
1108 	if (info->conn_ring_ref != INVALID_GRANT_REF) {
1109 		gnttab_end_foreign_access(info->conn_ring_ref,
1110 					  (unsigned long)info->conn_ring.sring);
1111 		info->conn_ring_ref = INVALID_GRANT_REF;
1112 	}
1113 	info->conn_ring.sring = NULL;
1114 }
1115 
1116 static int xenhcd_setup_rings(struct xenbus_device *dev,
1117 			      struct xenhcd_info *info)
1118 {
1119 	struct xenusb_urb_sring *urb_sring;
1120 	struct xenusb_conn_sring *conn_sring;
1121 	grant_ref_t gref;
1122 	int err;
1123 
1124 	info->urb_ring_ref = INVALID_GRANT_REF;
1125 	info->conn_ring_ref = INVALID_GRANT_REF;
1126 
1127 	urb_sring = (struct xenusb_urb_sring *)get_zeroed_page(
1128 							GFP_NOIO | __GFP_HIGH);
1129 	if (!urb_sring) {
1130 		xenbus_dev_fatal(dev, -ENOMEM, "allocating urb ring");
1131 		return -ENOMEM;
1132 	}
1133 	SHARED_RING_INIT(urb_sring);
1134 	FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE);
1135 
1136 	err = xenbus_grant_ring(dev, urb_sring, 1, &gref);
1137 	if (err < 0) {
1138 		free_page((unsigned long)urb_sring);
1139 		info->urb_ring.sring = NULL;
1140 		goto fail;
1141 	}
1142 	info->urb_ring_ref = gref;
1143 
1144 	conn_sring = (struct xenusb_conn_sring *)get_zeroed_page(
1145 							GFP_NOIO | __GFP_HIGH);
1146 	if (!conn_sring) {
1147 		xenbus_dev_fatal(dev, -ENOMEM, "allocating conn ring");
1148 		err = -ENOMEM;
1149 		goto fail;
1150 	}
1151 	SHARED_RING_INIT(conn_sring);
1152 	FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE);
1153 
1154 	err = xenbus_grant_ring(dev, conn_sring, 1, &gref);
1155 	if (err < 0) {
1156 		free_page((unsigned long)conn_sring);
1157 		info->conn_ring.sring = NULL;
1158 		goto fail;
1159 	}
1160 	info->conn_ring_ref = gref;
1161 
1162 	err = xenbus_alloc_evtchn(dev, &info->evtchn);
1163 	if (err) {
1164 		xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
1165 		goto fail;
1166 	}
1167 
1168 	err = bind_evtchn_to_irq_lateeoi(info->evtchn);
1169 	if (err <= 0) {
1170 		xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq_lateeoi");
1171 		goto fail;
1172 	}
1173 
1174 	info->irq = err;
1175 
1176 	err = request_threaded_irq(info->irq, NULL, xenhcd_int,
1177 				   IRQF_ONESHOT, "xenhcd", info);
1178 	if (err) {
1179 		xenbus_dev_fatal(dev, err, "request_threaded_irq");
1180 		goto free_irq;
1181 	}
1182 
1183 	return 0;
1184 
1185 free_irq:
1186 	unbind_from_irqhandler(info->irq, info);
1187 fail:
1188 	xenhcd_destroy_rings(info);
1189 	return err;
1190 }
1191 
1192 static int xenhcd_talk_to_backend(struct xenbus_device *dev,
1193 				  struct xenhcd_info *info)
1194 {
1195 	const char *message;
1196 	struct xenbus_transaction xbt;
1197 	int err;
1198 
1199 	err = xenhcd_setup_rings(dev, info);
1200 	if (err)
1201 		return err;
1202 
1203 again:
1204 	err = xenbus_transaction_start(&xbt);
1205 	if (err) {
1206 		xenbus_dev_fatal(dev, err, "starting transaction");
1207 		goto destroy_ring;
1208 	}
1209 
1210 	err = xenbus_printf(xbt, dev->nodename, "urb-ring-ref", "%u",
1211 			    info->urb_ring_ref);
1212 	if (err) {
1213 		message = "writing urb-ring-ref";
1214 		goto abort_transaction;
1215 	}
1216 
1217 	err = xenbus_printf(xbt, dev->nodename, "conn-ring-ref", "%u",
1218 			    info->conn_ring_ref);
1219 	if (err) {
1220 		message = "writing conn-ring-ref";
1221 		goto abort_transaction;
1222 	}
1223 
1224 	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
1225 			    info->evtchn);
1226 	if (err) {
1227 		message = "writing event-channel";
1228 		goto abort_transaction;
1229 	}
1230 
1231 	err = xenbus_transaction_end(xbt, 0);
1232 	if (err) {
1233 		if (err == -EAGAIN)
1234 			goto again;
1235 		xenbus_dev_fatal(dev, err, "completing transaction");
1236 		goto destroy_ring;
1237 	}
1238 
1239 	return 0;
1240 
1241 abort_transaction:
1242 	xenbus_transaction_end(xbt, 1);
1243 	xenbus_dev_fatal(dev, err, "%s", message);
1244 
1245 destroy_ring:
1246 	xenhcd_destroy_rings(info);
1247 
1248 	return err;
1249 }
1250 
1251 static int xenhcd_connect(struct xenbus_device *dev)
1252 {
1253 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1254 	struct xenusb_conn_request *req;
1255 	int idx, err;
1256 	int notify;
1257 	char name[TASK_COMM_LEN];
1258 	struct usb_hcd *hcd;
1259 
1260 	hcd = xenhcd_info_to_hcd(info);
1261 	snprintf(name, TASK_COMM_LEN, "xenhcd.%d", hcd->self.busnum);
1262 
1263 	err = xenhcd_talk_to_backend(dev, info);
1264 	if (err)
1265 		return err;
1266 
1267 	/* prepare ring for hotplug notification */
1268 	for (idx = 0; idx < XENUSB_CONN_RING_SIZE; idx++) {
1269 		req = RING_GET_REQUEST(&info->conn_ring, idx);
1270 		req->id = idx;
1271 	}
1272 	info->conn_ring.req_prod_pvt = idx;
1273 
1274 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
1275 	if (notify)
1276 		notify_remote_via_irq(info->irq);
1277 
1278 	return 0;
1279 }
1280 
1281 static void xenhcd_disconnect(struct xenbus_device *dev)
1282 {
1283 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1284 	struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
1285 
1286 	usb_remove_hcd(hcd);
1287 	xenbus_frontend_closed(dev);
1288 }
1289 
1290 static void xenhcd_watchdog(struct timer_list *timer)
1291 {
1292 	struct xenhcd_info *info = from_timer(info, timer, watchdog);
1293 	unsigned long flags;
1294 
1295 	spin_lock_irqsave(&info->lock, flags);
1296 	if (likely(HC_IS_RUNNING(xenhcd_info_to_hcd(info)->state))) {
1297 		xenhcd_timer_action_done(info, TIMER_RING_WATCHDOG);
1298 		xenhcd_giveback_unlinked_urbs(info);
1299 		xenhcd_kick_pending_urbs(info);
1300 	}
1301 	spin_unlock_irqrestore(&info->lock, flags);
1302 }
1303 
1304 /*
1305  * one-time HC init
1306  */
1307 static int xenhcd_setup(struct usb_hcd *hcd)
1308 {
1309 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1310 
1311 	spin_lock_init(&info->lock);
1312 	INIT_LIST_HEAD(&info->pending_submit_list);
1313 	INIT_LIST_HEAD(&info->pending_unlink_list);
1314 	INIT_LIST_HEAD(&info->in_progress_list);
1315 	INIT_LIST_HEAD(&info->giveback_waiting_list);
1316 	timer_setup(&info->watchdog, xenhcd_watchdog, 0);
1317 
1318 	hcd->has_tt = (hcd->driver->flags & HCD_MASK) != HCD_USB11;
1319 
1320 	return 0;
1321 }
1322 
1323 /*
1324  * start HC running
1325  */
1326 static int xenhcd_run(struct usb_hcd *hcd)
1327 {
1328 	hcd->uses_new_polling = 1;
1329 	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1330 	hcd->state = HC_STATE_RUNNING;
1331 	return 0;
1332 }
1333 
1334 /*
1335  * stop running HC
1336  */
1337 static void xenhcd_stop(struct usb_hcd *hcd)
1338 {
1339 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1340 
1341 	del_timer_sync(&info->watchdog);
1342 	spin_lock_irq(&info->lock);
1343 	/* cancel all urbs */
1344 	hcd->state = HC_STATE_HALT;
1345 	xenhcd_cancel_all_enqueued_urbs(info);
1346 	xenhcd_giveback_unlinked_urbs(info);
1347 	spin_unlock_irq(&info->lock);
1348 }
1349 
1350 /*
1351  * called as .urb_enqueue()
1352  * non-error returns are promise to giveback the urb later
1353  */
1354 static int xenhcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1355 			      gfp_t mem_flags)
1356 {
1357 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1358 	struct urb_priv *urbp;
1359 	unsigned long flags;
1360 	int ret;
1361 
1362 	if (unlikely(info->error))
1363 		return -ESHUTDOWN;
1364 
1365 	urbp = kmem_cache_zalloc(xenhcd_urbp_cachep, mem_flags);
1366 	if (!urbp)
1367 		return -ENOMEM;
1368 
1369 	spin_lock_irqsave(&info->lock, flags);
1370 
1371 	urbp->urb = urb;
1372 	urb->hcpriv = urbp;
1373 	urbp->req_id = ~0;
1374 	urbp->unlink_req_id = ~0;
1375 	INIT_LIST_HEAD(&urbp->list);
1376 	urbp->status = 1;
1377 	urb->unlinked = false;
1378 
1379 	ret = xenhcd_submit_urb(info, urbp);
1380 
1381 	if (ret)
1382 		xenhcd_free_urb_priv(urbp);
1383 
1384 	spin_unlock_irqrestore(&info->lock, flags);
1385 
1386 	return ret;
1387 }
1388 
1389 /*
1390  * called as .urb_dequeue()
1391  */
1392 static int xenhcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1393 {
1394 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1395 	struct urb_priv *urbp;
1396 	unsigned long flags;
1397 	int ret = 0;
1398 
1399 	spin_lock_irqsave(&info->lock, flags);
1400 
1401 	urbp = urb->hcpriv;
1402 	if (urbp) {
1403 		urbp->status = status;
1404 		ret = xenhcd_unlink_urb(info, urbp);
1405 	}
1406 
1407 	spin_unlock_irqrestore(&info->lock, flags);
1408 
1409 	return ret;
1410 }
1411 
1412 /*
1413  * called from usb_get_current_frame_number(),
1414  * but, almost all drivers not use such function.
1415  */
1416 static int xenhcd_get_frame(struct usb_hcd *hcd)
1417 {
1418 	/* it means error, but probably no problem :-) */
1419 	return 0;
1420 }
1421 
1422 static struct hc_driver xenhcd_usb20_hc_driver = {
1423 	.description = "xen-hcd",
1424 	.product_desc = "Xen USB2.0 Virtual Host Controller",
1425 	.hcd_priv_size = sizeof(struct xenhcd_info),
1426 	.flags = HCD_USB2,
1427 
1428 	/* basic HC lifecycle operations */
1429 	.reset = xenhcd_setup,
1430 	.start = xenhcd_run,
1431 	.stop = xenhcd_stop,
1432 
1433 	/* managing urb I/O */
1434 	.urb_enqueue = xenhcd_urb_enqueue,
1435 	.urb_dequeue = xenhcd_urb_dequeue,
1436 	.get_frame_number = xenhcd_get_frame,
1437 
1438 	/* root hub operations */
1439 	.hub_status_data = xenhcd_hub_status_data,
1440 	.hub_control = xenhcd_hub_control,
1441 #ifdef CONFIG_PM
1442 	.bus_suspend = xenhcd_bus_suspend,
1443 	.bus_resume = xenhcd_bus_resume,
1444 #endif
1445 };
1446 
1447 static struct hc_driver xenhcd_usb11_hc_driver = {
1448 	.description = "xen-hcd",
1449 	.product_desc = "Xen USB1.1 Virtual Host Controller",
1450 	.hcd_priv_size = sizeof(struct xenhcd_info),
1451 	.flags = HCD_USB11,
1452 
1453 	/* basic HC lifecycle operations */
1454 	.reset = xenhcd_setup,
1455 	.start = xenhcd_run,
1456 	.stop = xenhcd_stop,
1457 
1458 	/* managing urb I/O */
1459 	.urb_enqueue = xenhcd_urb_enqueue,
1460 	.urb_dequeue = xenhcd_urb_dequeue,
1461 	.get_frame_number = xenhcd_get_frame,
1462 
1463 	/* root hub operations */
1464 	.hub_status_data = xenhcd_hub_status_data,
1465 	.hub_control = xenhcd_hub_control,
1466 #ifdef CONFIG_PM
1467 	.bus_suspend = xenhcd_bus_suspend,
1468 	.bus_resume = xenhcd_bus_resume,
1469 #endif
1470 };
1471 
1472 static struct usb_hcd *xenhcd_create_hcd(struct xenbus_device *dev)
1473 {
1474 	int i;
1475 	int err = 0;
1476 	int num_ports;
1477 	int usb_ver;
1478 	struct usb_hcd *hcd = NULL;
1479 	struct xenhcd_info *info;
1480 
1481 	err = xenbus_scanf(XBT_NIL, dev->otherend, "num-ports", "%d",
1482 			   &num_ports);
1483 	if (err != 1) {
1484 		xenbus_dev_fatal(dev, err, "reading num-ports");
1485 		return ERR_PTR(-EINVAL);
1486 	}
1487 	if (num_ports < 1 || num_ports > XENUSB_MAX_PORTNR) {
1488 		xenbus_dev_fatal(dev, err, "invalid num-ports");
1489 		return ERR_PTR(-EINVAL);
1490 	}
1491 
1492 	err = xenbus_scanf(XBT_NIL, dev->otherend, "usb-ver", "%d", &usb_ver);
1493 	if (err != 1) {
1494 		xenbus_dev_fatal(dev, err, "reading usb-ver");
1495 		return ERR_PTR(-EINVAL);
1496 	}
1497 	switch (usb_ver) {
1498 	case XENUSB_VER_USB11:
1499 		hcd = usb_create_hcd(&xenhcd_usb11_hc_driver, &dev->dev,
1500 				     dev_name(&dev->dev));
1501 		break;
1502 	case XENUSB_VER_USB20:
1503 		hcd = usb_create_hcd(&xenhcd_usb20_hc_driver, &dev->dev,
1504 				     dev_name(&dev->dev));
1505 		break;
1506 	default:
1507 		xenbus_dev_fatal(dev, err, "invalid usb-ver");
1508 		return ERR_PTR(-EINVAL);
1509 	}
1510 	if (!hcd) {
1511 		xenbus_dev_fatal(dev, err,
1512 				 "fail to allocate USB host controller");
1513 		return ERR_PTR(-ENOMEM);
1514 	}
1515 
1516 	info = xenhcd_hcd_to_info(hcd);
1517 	info->xbdev = dev;
1518 	info->rh_numports = num_ports;
1519 
1520 	for (i = 0; i < XENUSB_URB_RING_SIZE; i++) {
1521 		info->shadow[i].req.id = i + 1;
1522 		info->shadow[i].urb = NULL;
1523 		info->shadow[i].in_flight = false;
1524 	}
1525 	info->shadow[XENUSB_URB_RING_SIZE - 1].req.id = 0x0fff;
1526 
1527 	return hcd;
1528 }
1529 
1530 static void xenhcd_backend_changed(struct xenbus_device *dev,
1531 				   enum xenbus_state backend_state)
1532 {
1533 	switch (backend_state) {
1534 	case XenbusStateInitialising:
1535 	case XenbusStateReconfiguring:
1536 	case XenbusStateReconfigured:
1537 	case XenbusStateUnknown:
1538 		break;
1539 
1540 	case XenbusStateInitWait:
1541 	case XenbusStateInitialised:
1542 	case XenbusStateConnected:
1543 		if (dev->state != XenbusStateInitialising)
1544 			break;
1545 		if (!xenhcd_connect(dev))
1546 			xenbus_switch_state(dev, XenbusStateConnected);
1547 		break;
1548 
1549 	case XenbusStateClosed:
1550 		if (dev->state == XenbusStateClosed)
1551 			break;
1552 		fallthrough;	/* Missed the backend's Closing state. */
1553 	case XenbusStateClosing:
1554 		xenhcd_disconnect(dev);
1555 		break;
1556 
1557 	default:
1558 		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1559 				 backend_state);
1560 		break;
1561 	}
1562 }
1563 
1564 static int xenhcd_remove(struct xenbus_device *dev)
1565 {
1566 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1567 	struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
1568 
1569 	xenhcd_destroy_rings(info);
1570 	usb_put_hcd(hcd);
1571 
1572 	return 0;
1573 }
1574 
1575 static int xenhcd_probe(struct xenbus_device *dev,
1576 			const struct xenbus_device_id *id)
1577 {
1578 	int err;
1579 	struct usb_hcd *hcd;
1580 	struct xenhcd_info *info;
1581 
1582 	if (usb_disabled())
1583 		return -ENODEV;
1584 
1585 	hcd = xenhcd_create_hcd(dev);
1586 	if (IS_ERR(hcd)) {
1587 		err = PTR_ERR(hcd);
1588 		xenbus_dev_fatal(dev, err,
1589 				 "fail to create usb host controller");
1590 		return err;
1591 	}
1592 
1593 	info = xenhcd_hcd_to_info(hcd);
1594 	dev_set_drvdata(&dev->dev, info);
1595 
1596 	err = usb_add_hcd(hcd, 0, 0);
1597 	if (err) {
1598 		xenbus_dev_fatal(dev, err, "fail to add USB host controller");
1599 		usb_put_hcd(hcd);
1600 		dev_set_drvdata(&dev->dev, NULL);
1601 	}
1602 
1603 	return err;
1604 }
1605 
1606 static const struct xenbus_device_id xenhcd_ids[] = {
1607 	{ "vusb" },
1608 	{ "" },
1609 };
1610 
1611 static struct xenbus_driver xenhcd_driver = {
1612 	.ids			= xenhcd_ids,
1613 	.probe			= xenhcd_probe,
1614 	.otherend_changed	= xenhcd_backend_changed,
1615 	.remove			= xenhcd_remove,
1616 };
1617 
1618 static int __init xenhcd_init(void)
1619 {
1620 	if (!xen_domain())
1621 		return -ENODEV;
1622 
1623 	xenhcd_urbp_cachep = kmem_cache_create("xenhcd_urb_priv",
1624 					sizeof(struct urb_priv), 0, 0, NULL);
1625 	if (!xenhcd_urbp_cachep) {
1626 		pr_err("xenhcd failed to create kmem cache\n");
1627 		return -ENOMEM;
1628 	}
1629 
1630 	return xenbus_register_frontend(&xenhcd_driver);
1631 }
1632 module_init(xenhcd_init);
1633 
1634 static void __exit xenhcd_exit(void)
1635 {
1636 	kmem_cache_destroy(xenhcd_urbp_cachep);
1637 	xenbus_unregister_driver(&xenhcd_driver);
1638 }
1639 module_exit(xenhcd_exit);
1640 
1641 MODULE_ALIAS("xen:vusb");
1642 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
1643 MODULE_DESCRIPTION("Xen USB Virtual Host Controller driver (xen-hcd)");
1644 MODULE_LICENSE("Dual BSD/GPL");
1645