1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
4  *
5  * ep0.c - Endpoint 0 handling
6  *
7  * Copyright 2017 IBM Corporation
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/delay.h>
14 #include <linux/ioport.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/prefetch.h>
21 #include <linux/clk.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/of.h>
24 #include <linux/regmap.h>
25 #include <linux/dma-mapping.h>
26 
27 #include "vhub.h"
28 
29 int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
30 {
31 	struct usb_request *req = &ep->ep0.req.req;
32 	int rc;
33 
34 	if (WARN_ON(ep->d_idx != 0))
35 		return std_req_stall;
36 	if (WARN_ON(!ep->ep0.dir_in))
37 		return std_req_stall;
38 	if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
39 		return std_req_stall;
40 	if (WARN_ON(req->status == -EINPROGRESS))
41 		return std_req_stall;
42 
43 	req->buf = ptr;
44 	req->length = len;
45 	req->complete = NULL;
46 	req->zero = true;
47 
48 	/*
49 	 * Call internal queue directly after dropping the lock. This is
50 	 * safe to do as the reply is always the last thing done when
51 	 * processing a SETUP packet, usually as a tail call
52 	 */
53 	spin_unlock(&ep->vhub->lock);
54 	if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
55 		rc = std_req_stall;
56 	else
57 		rc = std_req_data;
58 	spin_lock(&ep->vhub->lock);
59 	return rc;
60 }
61 
62 int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
63 {
64 	u8 *buffer = ep->buf;
65 	unsigned int i;
66 	va_list args;
67 
68 	va_start(args, len);
69 
70 	/* Copy data directly into EP buffer */
71 	for (i = 0; i < len; i++)
72 		buffer[i] = va_arg(args, int);
73 	va_end(args);
74 
75 	/* req->buf NULL means data is already there */
76 	return ast_vhub_reply(ep, NULL, len);
77 }
78 
79 void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
80 {
81 	struct usb_ctrlrequest crq;
82 	enum std_req_rc std_req_rc;
83 	int rc = -ENODEV;
84 
85 	if (WARN_ON(ep->d_idx != 0))
86 		return;
87 
88 	/*
89 	 * Grab the setup packet from the chip and byteswap
90 	 * interesting fields
91 	 */
92 	memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
93 
94 	EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
95 	      crq.bRequestType, crq.bRequest,
96 	       le16_to_cpu(crq.wValue),
97 	       le16_to_cpu(crq.wIndex),
98 	       le16_to_cpu(crq.wLength),
99 	       (crq.bRequestType & USB_DIR_IN) ? "in" : "out",
100 	       ep->ep0.state);
101 
102 	/*
103 	 * Check our state, cancel pending requests if needed
104 	 *
105 	 * Note: Under some circumstances, we can get a new setup
106 	 * packet while waiting for the stall ack, just accept it.
107 	 *
108 	 * In any case, a SETUP packet in wrong state should have
109 	 * reset the HW state machine, so let's just log, nuke
110 	 * requests, move on.
111 	 */
112 	if (ep->ep0.state != ep0_state_token &&
113 	    ep->ep0.state != ep0_state_stall) {
114 		EPDBG(ep, "wrong state\n");
115 		ast_vhub_nuke(ep, -EIO);
116 	}
117 
118 	/* Calculate next state for EP0 */
119 	ep->ep0.state = ep0_state_data;
120 	ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
121 
122 	/* If this is the vHub, we handle requests differently */
123 	std_req_rc = std_req_driver;
124 	if (ep->dev == NULL) {
125 		if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
126 			std_req_rc = ast_vhub_std_hub_request(ep, &crq);
127 		else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
128 			std_req_rc = ast_vhub_class_hub_request(ep, &crq);
129 		else
130 			std_req_rc = std_req_stall;
131 	} else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
132 		std_req_rc = ast_vhub_std_dev_request(ep, &crq);
133 
134 	/* Act upon result */
135 	switch(std_req_rc) {
136 	case std_req_complete:
137 		goto complete;
138 	case std_req_stall:
139 		goto stall;
140 	case std_req_driver:
141 		break;
142 	case std_req_data:
143 		return;
144 	}
145 
146 	/* Pass request up to the gadget driver */
147 	if (WARN_ON(!ep->dev))
148 		goto stall;
149 	if (ep->dev->driver) {
150 		EPDBG(ep, "forwarding to gadget...\n");
151 		spin_unlock(&ep->vhub->lock);
152 		rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
153 		spin_lock(&ep->vhub->lock);
154 		EPDBG(ep, "driver returned %d\n", rc);
155 	} else {
156 		EPDBG(ep, "no gadget for request !\n");
157 	}
158 	if (rc >= 0)
159 		return;
160 
161  stall:
162 	EPDBG(ep, "stalling\n");
163 	writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
164 	ep->ep0.state = ep0_state_stall;
165 	ep->ep0.dir_in = false;
166 	return;
167 
168  complete:
169 	EPVDBG(ep, "sending [in] status with no data\n");
170 	writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
171 	ep->ep0.state = ep0_state_status;
172 	ep->ep0.dir_in = false;
173 }
174 
175 
176 static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
177 				 struct ast_vhub_req *req)
178 {
179 	unsigned int chunk;
180 	u32 reg;
181 
182 	/* If this is a 0-length request, it's the gadget trying to
183 	 * send a status on our behalf. We take it from here.
184 	 */
185 	if (req->req.length == 0)
186 		req->last_desc = 1;
187 
188 	/* Are we done ? Complete request, otherwise wait for next interrupt */
189 	if (req->last_desc >= 0) {
190 		EPVDBG(ep, "complete send %d/%d\n",
191 		       req->req.actual, req->req.length);
192 		ep->ep0.state = ep0_state_status;
193 		writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
194 		ast_vhub_done(ep, req, 0);
195 		return;
196 	}
197 
198 	/*
199 	 * Next chunk cropped to max packet size. Also check if this
200 	 * is the last packet
201 	 */
202 	chunk = req->req.length - req->req.actual;
203 	if (chunk > ep->ep.maxpacket)
204 		chunk = ep->ep.maxpacket;
205 	else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
206 		req->last_desc = 1;
207 
208 	EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
209 	       chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
210 
211 	/*
212 	 * Copy data if any (internal requests already have data
213 	 * in the EP buffer)
214 	 */
215 	if (chunk && req->req.buf)
216 		memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
217 
218 	vhub_dma_workaround(ep->buf);
219 
220 	/* Remember chunk size and trigger send */
221 	reg = VHUB_EP0_SET_TX_LEN(chunk);
222 	writel(reg, ep->ep0.ctlstat);
223 	writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
224 	req->req.actual += chunk;
225 }
226 
227 static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
228 {
229 	EPVDBG(ep, "rx prime\n");
230 
231 	/* Prime endpoint for receiving data */
232 	writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
233 }
234 
235 static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
236 				    unsigned int len)
237 {
238 	unsigned int remain;
239 	int rc = 0;
240 
241 	/* We are receiving... grab request */
242 	remain = req->req.length - req->req.actual;
243 
244 	EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
245 
246 	/* Are we getting more than asked ? */
247 	if (len > remain) {
248 		EPDBG(ep, "receiving too much (ovf: %d) !\n",
249 		      len - remain);
250 		len = remain;
251 		rc = -EOVERFLOW;
252 	}
253 
254 	/* Hardware return wrong data len */
255 	if (len < ep->ep.maxpacket && len != remain) {
256 		EPDBG(ep, "using expected data len instead\n");
257 		len = remain;
258 	}
259 
260 	if (len && req->req.buf)
261 		memcpy(req->req.buf + req->req.actual, ep->buf, len);
262 	req->req.actual += len;
263 
264 	/* Done ? */
265 	if (len < ep->ep.maxpacket || len == remain) {
266 		ep->ep0.state = ep0_state_status;
267 		writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
268 		ast_vhub_done(ep, req, rc);
269 	} else
270 		ast_vhub_ep0_rx_prime(ep);
271 }
272 
273 void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
274 {
275 	struct ast_vhub_req *req;
276 	struct ast_vhub *vhub = ep->vhub;
277 	struct device *dev = &vhub->pdev->dev;
278 	bool stall = false;
279 	u32 stat;
280 
281 	/* Read EP0 status */
282 	stat = readl(ep->ep0.ctlstat);
283 
284 	/* Grab current request if any */
285 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
286 
287 	EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
288 		stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
289 
290 	switch(ep->ep0.state) {
291 	case ep0_state_token:
292 		/* There should be no request queued in that state... */
293 		if (req) {
294 			dev_warn(dev, "request present while in TOKEN state\n");
295 			ast_vhub_nuke(ep, -EINVAL);
296 		}
297 		dev_warn(dev, "ack while in TOKEN state\n");
298 		stall = true;
299 		break;
300 	case ep0_state_data:
301 		/* Check the state bits corresponding to our direction */
302 		if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
303 		    (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
304 		    (ep->ep0.dir_in != in_ack)) {
305 			/* In that case, ignore interrupt */
306 			dev_warn(dev, "irq state mismatch");
307 			break;
308 		}
309 		/*
310 		 * We are in data phase and there's no request, something is
311 		 * wrong, stall
312 		 */
313 		if (!req) {
314 			dev_warn(dev, "data phase, no request\n");
315 			stall = true;
316 			break;
317 		}
318 
319 		/* We have a request, handle data transfers */
320 		if (ep->ep0.dir_in)
321 			ast_vhub_ep0_do_send(ep, req);
322 		else
323 			ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
324 		return;
325 	case ep0_state_status:
326 		/* Nuke stale requests */
327 		if (req) {
328 			dev_warn(dev, "request present while in STATUS state\n");
329 			ast_vhub_nuke(ep, -EINVAL);
330 		}
331 
332 		/*
333 		 * If the status phase completes with the wrong ack, stall
334 		 * the endpoint just in case, to abort whatever the host
335 		 * was doing.
336 		 */
337 		if (ep->ep0.dir_in == in_ack) {
338 			dev_warn(dev, "status direction mismatch\n");
339 			stall = true;
340 		}
341 		break;
342 	case ep0_state_stall:
343 		/*
344 		 * There shouldn't be any request left, but nuke just in case
345 		 * otherwise the stale request will block subsequent ones
346 		 */
347 		ast_vhub_nuke(ep, -EIO);
348 		break;
349 	}
350 
351 	/* Reset to token state or stall */
352 	if (stall) {
353 		writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
354 		ep->ep0.state = ep0_state_stall;
355 	} else
356 		ep->ep0.state = ep0_state_token;
357 }
358 
359 static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
360 			      gfp_t gfp_flags)
361 {
362 	struct ast_vhub_req *req = to_ast_req(u_req);
363 	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
364 	struct ast_vhub *vhub = ep->vhub;
365 	struct device *dev = &vhub->pdev->dev;
366 	unsigned long flags;
367 
368 	/* Paranoid cheks */
369 	if (!u_req || (!u_req->complete && !req->internal)) {
370 		dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
371 		if (u_req) {
372 			dev_warn(dev, "complete=%p internal=%d\n",
373 				 u_req->complete, req->internal);
374 		}
375 		return -EINVAL;
376 	}
377 
378 	/* Not endpoint 0 ? */
379 	if (WARN_ON(ep->d_idx != 0))
380 		return -EINVAL;
381 
382 	/* Disabled device */
383 	if (ep->dev && !ep->dev->enabled)
384 		return -ESHUTDOWN;
385 
386 	/* Data, no buffer and not internal ? */
387 	if (u_req->length && !u_req->buf && !req->internal) {
388 		dev_warn(dev, "Request with no buffer !\n");
389 		return -EINVAL;
390 	}
391 
392 	EPVDBG(ep, "enqueue req @%p\n", req);
393 	EPVDBG(ep, "  l=%d zero=%d noshort=%d is_in=%d\n",
394 	       u_req->length, u_req->zero,
395 	       u_req->short_not_ok, ep->ep0.dir_in);
396 
397 	/* Initialize request progress fields */
398 	u_req->status = -EINPROGRESS;
399 	u_req->actual = 0;
400 	req->last_desc = -1;
401 	req->active = false;
402 
403 	spin_lock_irqsave(&vhub->lock, flags);
404 
405 	/* EP0 can only support a single request at a time */
406 	if (!list_empty(&ep->queue) ||
407 	    ep->ep0.state == ep0_state_token ||
408 	    ep->ep0.state == ep0_state_stall) {
409 		dev_warn(dev, "EP0: Request in wrong state\n");
410 	        EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
411 		       list_empty(&ep->queue), ep->ep0.state);
412 		spin_unlock_irqrestore(&vhub->lock, flags);
413 		return -EBUSY;
414 	}
415 
416 	/* Add request to list and kick processing if empty */
417 	list_add_tail(&req->queue, &ep->queue);
418 
419 	if (ep->ep0.dir_in) {
420 		/* IN request, send data */
421 		ast_vhub_ep0_do_send(ep, req);
422 	} else if (u_req->length == 0) {
423 		/* 0-len request, send completion as rx */
424 		EPVDBG(ep, "0-length rx completion\n");
425 		ep->ep0.state = ep0_state_status;
426 		writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
427 		ast_vhub_done(ep, req, 0);
428 	} else {
429 		/* OUT request, start receiver */
430 		ast_vhub_ep0_rx_prime(ep);
431 	}
432 
433 	spin_unlock_irqrestore(&vhub->lock, flags);
434 
435 	return 0;
436 }
437 
438 static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
439 {
440 	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
441 	struct ast_vhub *vhub = ep->vhub;
442 	struct ast_vhub_req *req;
443 	unsigned long flags;
444 	int rc = -EINVAL;
445 
446 	spin_lock_irqsave(&vhub->lock, flags);
447 
448 	/* Only one request can be in the queue */
449 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
450 
451 	/* Is it ours ? */
452 	if (req && u_req == &req->req) {
453 		EPVDBG(ep, "dequeue req @%p\n", req);
454 
455 		/*
456 		 * We don't have to deal with "active" as all
457 		 * DMAs go to the EP buffers, not the request.
458 		 */
459 		ast_vhub_done(ep, req, -ECONNRESET);
460 
461 		/* We do stall the EP to clean things up in HW */
462 		writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
463 		ep->ep0.state = ep0_state_status;
464 		ep->ep0.dir_in = false;
465 		rc = 0;
466 	}
467 	spin_unlock_irqrestore(&vhub->lock, flags);
468 	return rc;
469 }
470 
471 
472 static const struct usb_ep_ops ast_vhub_ep0_ops = {
473 	.queue		= ast_vhub_ep0_queue,
474 	.dequeue	= ast_vhub_ep0_dequeue,
475 	.alloc_request	= ast_vhub_alloc_request,
476 	.free_request	= ast_vhub_free_request,
477 };
478 
479 void ast_vhub_reset_ep0(struct ast_vhub_dev *dev)
480 {
481 	struct ast_vhub_ep *ep = &dev->ep0;
482 
483 	ast_vhub_nuke(ep, -EIO);
484 	ep->ep0.state = ep0_state_token;
485 }
486 
487 
488 void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
489 		       struct ast_vhub_dev *dev)
490 {
491 	memset(ep, 0, sizeof(*ep));
492 
493 	INIT_LIST_HEAD(&ep->ep.ep_list);
494 	INIT_LIST_HEAD(&ep->queue);
495 	ep->ep.ops = &ast_vhub_ep0_ops;
496 	ep->ep.name = "ep0";
497 	ep->ep.caps.type_control = true;
498 	usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
499 	ep->d_idx = 0;
500 	ep->dev = dev;
501 	ep->vhub = vhub;
502 	ep->ep0.state = ep0_state_token;
503 	INIT_LIST_HEAD(&ep->ep0.req.queue);
504 	ep->ep0.req.internal = true;
505 
506 	/* Small difference between vHub and devices */
507 	if (dev) {
508 		ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
509 		ep->ep0.setup = vhub->regs +
510 			AST_VHUB_SETUP0 + 8 * (dev->index + 1);
511 		ep->buf = vhub->ep0_bufs +
512 			AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
513 		ep->buf_dma = vhub->ep0_bufs_dma +
514 			AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
515 	} else {
516 		ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
517 		ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
518 		ep->buf = vhub->ep0_bufs;
519 		ep->buf_dma = vhub->ep0_bufs_dma;
520 	}
521 }
522