1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget 4 * 5 * epn.c - Generic endpoints management 6 * 7 * Copyright 2017 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/platform_device.h> 18 #include <linux/delay.h> 19 #include <linux/ioport.h> 20 #include <linux/slab.h> 21 #include <linux/errno.h> 22 #include <linux/list.h> 23 #include <linux/interrupt.h> 24 #include <linux/proc_fs.h> 25 #include <linux/prefetch.h> 26 #include <linux/clk.h> 27 #include <linux/usb/gadget.h> 28 #include <linux/of.h> 29 #include <linux/of_gpio.h> 30 #include <linux/regmap.h> 31 #include <linux/dma-mapping.h> 32 33 #include "vhub.h" 34 35 #define EXTRA_CHECKS 36 37 #ifdef EXTRA_CHECKS 38 #define CHECK(ep, expr, fmt...) \ 39 do { \ 40 if (!(expr)) EPDBG(ep, "CHECK:" fmt); \ 41 } while(0) 42 #else 43 #define CHECK(ep, expr, fmt...) do { } while(0) 44 #endif 45 46 static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req) 47 { 48 unsigned int act = req->req.actual; 49 unsigned int len = req->req.length; 50 unsigned int chunk; 51 52 /* There should be no DMA ongoing */ 53 WARN_ON(req->active); 54 55 /* Calculate next chunk size */ 56 chunk = len - act; 57 if (chunk > ep->ep.maxpacket) 58 chunk = ep->ep.maxpacket; 59 else if ((chunk < ep->ep.maxpacket) || !req->req.zero) 60 req->last_desc = 1; 61 62 EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n", 63 req, act, len, chunk, req->last_desc); 64 65 /* If DMA unavailable, using staging EP buffer */ 66 if (!req->req.dma) { 67 68 /* For IN transfers, copy data over first */ 69 if (ep->epn.is_in) { 70 memcpy(ep->buf, req->req.buf + act, chunk); 71 vhub_dma_workaround(ep->buf); 72 } 73 writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 74 } else { 75 if (ep->epn.is_in) 76 vhub_dma_workaround(req->req.buf); 77 writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 78 } 79 80 /* Start DMA */ 81 req->active = true; 82 writel(VHUB_EP_DMA_SET_TX_SIZE(chunk), 83 ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 84 writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK, 85 ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 86 } 87 88 static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep) 89 { 90 struct ast_vhub_req *req; 91 unsigned int len; 92 u32 stat; 93 94 /* Read EP status */ 95 stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 96 97 /* Grab current request if any */ 98 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); 99 100 EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n", 101 stat, ep->epn.is_in, req, req ? req->active : 0); 102 103 /* In absence of a request, bail out, must have been dequeued */ 104 if (!req) 105 return; 106 107 /* 108 * Request not active, move on to processing queue, active request 109 * was probably dequeued 110 */ 111 if (!req->active) 112 goto next_chunk; 113 114 /* Check if HW has moved on */ 115 if (VHUB_EP_DMA_RPTR(stat) != 0) { 116 EPDBG(ep, "DMA read pointer not 0 !\n"); 117 return; 118 } 119 120 /* No current DMA ongoing */ 121 req->active = false; 122 123 /* Grab length out of HW */ 124 len = VHUB_EP_DMA_TX_SIZE(stat); 125 126 /* If not using DMA, copy data out if needed */ 127 if (!req->req.dma && !ep->epn.is_in && len) 128 memcpy(req->req.buf + req->req.actual, ep->buf, len); 129 130 /* Adjust size */ 131 req->req.actual += len; 132 133 /* Check for short packet */ 134 if (len < ep->ep.maxpacket) 135 req->last_desc = 1; 136 137 /* That's it ? complete the request and pick a new one */ 138 if (req->last_desc >= 0) { 139 ast_vhub_done(ep, req, 0); 140 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, 141 queue); 142 143 /* 144 * Due to lock dropping inside "done" the next request could 145 * already be active, so check for that and bail if needed. 146 */ 147 if (!req || req->active) 148 return; 149 } 150 151 next_chunk: 152 ast_vhub_epn_kick(ep, req); 153 } 154 155 static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep) 156 { 157 /* 158 * d_next == d_last means descriptor list empty to HW, 159 * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors 160 * in the list 161 */ 162 return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) & 163 (AST_VHUB_DESCS_COUNT - 1); 164 } 165 166 static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep, 167 struct ast_vhub_req *req) 168 { 169 struct ast_vhub_desc *desc = NULL; 170 unsigned int act = req->act_count; 171 unsigned int len = req->req.length; 172 unsigned int chunk; 173 174 /* Mark request active if not already */ 175 req->active = true; 176 177 /* If the request was already completely written, do nothing */ 178 if (req->last_desc >= 0) 179 return; 180 181 EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n", 182 act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep)); 183 184 /* While we can create descriptors */ 185 while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) { 186 unsigned int d_num; 187 188 /* Grab next free descriptor */ 189 d_num = ep->epn.d_next; 190 desc = &ep->epn.descs[d_num]; 191 ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1); 192 193 /* Calculate next chunk size */ 194 chunk = len - act; 195 if (chunk <= ep->epn.chunk_max) { 196 /* 197 * Is this the last packet ? Because of having up to 8 198 * packets in a descriptor we can't just compare "chunk" 199 * with ep.maxpacket. We have to see if it's a multiple 200 * of it to know if we have to send a zero packet. 201 * Sadly that involves a modulo which is a bit expensive 202 * but probably still better than not doing it. 203 */ 204 if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0) 205 req->last_desc = d_num; 206 } else { 207 chunk = ep->epn.chunk_max; 208 } 209 210 EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n", 211 act, len, chunk, req->last_desc, d_num, 212 ast_vhub_count_free_descs(ep)); 213 214 /* Populate descriptor */ 215 desc->w0 = cpu_to_le32(req->req.dma + act); 216 217 /* Interrupt if end of request or no more descriptors */ 218 219 /* 220 * TODO: Be smarter about it, if we don't have enough 221 * descriptors request an interrupt before queue empty 222 * or so in order to be able to populate more before 223 * the HW runs out. This isn't a problem at the moment 224 * as we use 256 descriptors and only put at most one 225 * request in the ring. 226 */ 227 desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk)); 228 if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep)) 229 desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT); 230 231 /* Account packet */ 232 req->act_count = act = act + chunk; 233 } 234 235 if (likely(desc)) 236 vhub_dma_workaround(desc); 237 238 /* Tell HW about new descriptors */ 239 writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next), 240 ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 241 242 EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n", 243 ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS)); 244 } 245 246 static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep) 247 { 248 struct ast_vhub_req *req; 249 unsigned int len, d_last; 250 u32 stat, stat1; 251 252 /* Read EP status, workaround HW race */ 253 do { 254 stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 255 stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 256 } while(stat != stat1); 257 258 /* Extract RPTR */ 259 d_last = VHUB_EP_DMA_RPTR(stat); 260 261 /* Grab current request if any */ 262 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); 263 264 EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n", 265 stat, ep->epn.is_in, ep->epn.d_last, d_last); 266 267 /* Check all completed descriptors */ 268 while (ep->epn.d_last != d_last) { 269 struct ast_vhub_desc *desc; 270 unsigned int d_num; 271 bool is_last_desc; 272 273 /* Grab next completed descriptor */ 274 d_num = ep->epn.d_last; 275 desc = &ep->epn.descs[d_num]; 276 ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1); 277 278 /* Grab len out of descriptor */ 279 len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1)); 280 281 EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n", 282 d_num, len, req, req ? req->active : 0); 283 284 /* If no active request pending, move on */ 285 if (!req || !req->active) 286 continue; 287 288 /* Adjust size */ 289 req->req.actual += len; 290 291 /* Is that the last chunk ? */ 292 is_last_desc = req->last_desc == d_num; 293 CHECK(ep, is_last_desc == (len < ep->ep.maxpacket || 294 (req->req.actual >= req->req.length && 295 !req->req.zero)), 296 "Last packet discrepancy: last_desc=%d len=%d r.act=%d " 297 "r.len=%d r.zero=%d mp=%d\n", 298 is_last_desc, len, req->req.actual, req->req.length, 299 req->req.zero, ep->ep.maxpacket); 300 301 if (is_last_desc) { 302 /* 303 * Because we can only have one request at a time 304 * in our descriptor list in this implementation, 305 * d_last and ep->d_last should now be equal 306 */ 307 CHECK(ep, d_last == ep->epn.d_last, 308 "DMA read ptr mismatch %d vs %d\n", 309 d_last, ep->epn.d_last); 310 311 /* Note: done will drop and re-acquire the lock */ 312 ast_vhub_done(ep, req, 0); 313 req = list_first_entry_or_null(&ep->queue, 314 struct ast_vhub_req, 315 queue); 316 break; 317 } 318 } 319 320 /* More work ? */ 321 if (req) 322 ast_vhub_epn_kick_desc(ep, req); 323 } 324 325 void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep) 326 { 327 if (ep->epn.desc_mode) 328 ast_vhub_epn_handle_ack_desc(ep); 329 else 330 ast_vhub_epn_handle_ack(ep); 331 } 332 333 static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req, 334 gfp_t gfp_flags) 335 { 336 struct ast_vhub_req *req = to_ast_req(u_req); 337 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 338 struct ast_vhub *vhub = ep->vhub; 339 unsigned long flags; 340 bool empty; 341 int rc; 342 343 /* Paranoid checks */ 344 if (!u_req || !u_req->complete || !u_req->buf) { 345 dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req); 346 if (u_req) { 347 dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n", 348 u_req->complete, req->internal); 349 } 350 return -EINVAL; 351 } 352 353 /* Endpoint enabled ? */ 354 if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx || 355 !ep->dev->enabled) { 356 EPDBG(ep, "Enqueuing request on wrong or disabled EP\n"); 357 return -ESHUTDOWN; 358 } 359 360 /* Map request for DMA if possible. For now, the rule for DMA is 361 * that: 362 * 363 * * For single stage mode (no descriptors): 364 * 365 * - The buffer is aligned to a 8 bytes boundary (HW requirement) 366 * - For a OUT endpoint, the request size is a multiple of the EP 367 * packet size (otherwise the controller will DMA past the end 368 * of the buffer if the host is sending a too long packet). 369 * 370 * * For descriptor mode (tx only for now), always. 371 * 372 * We could relax the latter by making the decision to use the bounce 373 * buffer based on the size of a given *segment* of the request rather 374 * than the whole request. 375 */ 376 if (ep->epn.desc_mode || 377 ((((unsigned long)u_req->buf & 7) == 0) && 378 (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) { 379 rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req, 380 ep->epn.is_in); 381 if (rc) { 382 dev_warn(&vhub->pdev->dev, 383 "Request mapping failure %d\n", rc); 384 return rc; 385 } 386 } else 387 u_req->dma = 0; 388 389 EPVDBG(ep, "enqueue req @%p\n", req); 390 EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n", 391 u_req->length, (u32)u_req->dma, u_req->zero, 392 u_req->short_not_ok, u_req->no_interrupt, 393 ep->epn.is_in); 394 395 /* Initialize request progress fields */ 396 u_req->status = -EINPROGRESS; 397 u_req->actual = 0; 398 req->act_count = 0; 399 req->active = false; 400 req->last_desc = -1; 401 spin_lock_irqsave(&vhub->lock, flags); 402 empty = list_empty(&ep->queue); 403 404 /* Add request to list and kick processing if empty */ 405 list_add_tail(&req->queue, &ep->queue); 406 if (empty) { 407 if (ep->epn.desc_mode) 408 ast_vhub_epn_kick_desc(ep, req); 409 else 410 ast_vhub_epn_kick(ep, req); 411 } 412 spin_unlock_irqrestore(&vhub->lock, flags); 413 414 return 0; 415 } 416 417 static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep, 418 bool restart_ep) 419 { 420 u32 state, reg, loops; 421 422 /* Stop DMA activity */ 423 if (ep->epn.desc_mode) 424 writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 425 else 426 writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 427 428 /* Wait for it to complete */ 429 for (loops = 0; loops < 1000; loops++) { 430 state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 431 state = VHUB_EP_DMA_PROC_STATUS(state); 432 if (state == EP_DMA_PROC_RX_IDLE || 433 state == EP_DMA_PROC_TX_IDLE) 434 break; 435 udelay(1); 436 } 437 if (loops >= 1000) 438 dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n"); 439 440 /* If we don't have to restart the endpoint, that's it */ 441 if (!restart_ep) 442 return; 443 444 /* Restart the endpoint */ 445 if (ep->epn.desc_mode) { 446 /* 447 * Take out descriptors by resetting the DMA read 448 * pointer to be equal to the CPU write pointer. 449 * 450 * Note: If we ever support creating descriptors for 451 * requests that aren't the head of the queue, we 452 * may have to do something more complex here, 453 * especially if the request being taken out is 454 * not the current head descriptors. 455 */ 456 reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) | 457 VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next); 458 writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 459 460 /* Then turn it back on */ 461 writel(ep->epn.dma_conf, 462 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 463 } else { 464 /* Single mode: just turn it back on */ 465 writel(ep->epn.dma_conf, 466 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 467 } 468 } 469 470 static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req) 471 { 472 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 473 struct ast_vhub *vhub = ep->vhub; 474 struct ast_vhub_req *req; 475 unsigned long flags; 476 int rc = -EINVAL; 477 478 spin_lock_irqsave(&vhub->lock, flags); 479 480 /* Make sure it's actually queued on this endpoint */ 481 list_for_each_entry (req, &ep->queue, queue) { 482 if (&req->req == u_req) 483 break; 484 } 485 486 if (&req->req == u_req) { 487 EPVDBG(ep, "dequeue req @%p active=%d\n", 488 req, req->active); 489 if (req->active) 490 ast_vhub_stop_active_req(ep, true); 491 ast_vhub_done(ep, req, -ECONNRESET); 492 rc = 0; 493 } 494 495 spin_unlock_irqrestore(&vhub->lock, flags); 496 return rc; 497 } 498 499 void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep) 500 { 501 u32 reg; 502 503 if (WARN_ON(ep->d_idx == 0)) 504 return; 505 reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG); 506 if (ep->epn.stalled || ep->epn.wedged) 507 reg |= VHUB_EP_CFG_STALL_CTRL; 508 else 509 reg &= ~VHUB_EP_CFG_STALL_CTRL; 510 writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG); 511 512 if (!ep->epn.stalled && !ep->epn.wedged) 513 writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx), 514 ep->vhub->regs + AST_VHUB_EP_TOGGLE); 515 } 516 517 static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt, 518 bool wedge) 519 { 520 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 521 struct ast_vhub *vhub = ep->vhub; 522 unsigned long flags; 523 524 EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge); 525 526 if (!u_ep || !u_ep->desc) 527 return -EINVAL; 528 if (ep->d_idx == 0) 529 return 0; 530 if (ep->epn.is_iso) 531 return -EOPNOTSUPP; 532 533 spin_lock_irqsave(&vhub->lock, flags); 534 535 /* Fail with still-busy IN endpoints */ 536 if (halt && ep->epn.is_in && !list_empty(&ep->queue)) { 537 spin_unlock_irqrestore(&vhub->lock, flags); 538 return -EAGAIN; 539 } 540 ep->epn.stalled = halt; 541 ep->epn.wedged = wedge; 542 ast_vhub_update_epn_stall(ep); 543 544 spin_unlock_irqrestore(&vhub->lock, flags); 545 546 return 0; 547 } 548 549 static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value) 550 { 551 return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false); 552 } 553 554 static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep) 555 { 556 return ast_vhub_set_halt_and_wedge(u_ep, true, true); 557 } 558 559 static int ast_vhub_epn_disable(struct usb_ep* u_ep) 560 { 561 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 562 struct ast_vhub *vhub = ep->vhub; 563 unsigned long flags; 564 u32 imask, ep_ier; 565 566 EPDBG(ep, "Disabling !\n"); 567 568 spin_lock_irqsave(&vhub->lock, flags); 569 570 ep->epn.enabled = false; 571 572 /* Stop active DMA if any */ 573 ast_vhub_stop_active_req(ep, false); 574 575 /* Disable endpoint */ 576 writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG); 577 578 /* Disable ACK interrupt */ 579 imask = VHUB_EP_IRQ(ep->epn.g_idx); 580 ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER); 581 ep_ier &= ~imask; 582 writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER); 583 writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR); 584 585 /* Nuke all pending requests */ 586 ast_vhub_nuke(ep, -ESHUTDOWN); 587 588 /* No more descriptor associated with request */ 589 ep->ep.desc = NULL; 590 591 spin_unlock_irqrestore(&vhub->lock, flags); 592 593 return 0; 594 } 595 596 static int ast_vhub_epn_enable(struct usb_ep* u_ep, 597 const struct usb_endpoint_descriptor *desc) 598 { 599 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 600 struct ast_vhub_dev *dev; 601 struct ast_vhub *vhub; 602 u16 maxpacket, type; 603 unsigned long flags; 604 u32 ep_conf, ep_ier, imask; 605 606 /* Check arguments */ 607 if (!u_ep || !desc) 608 return -EINVAL; 609 610 maxpacket = usb_endpoint_maxp(desc); 611 if (!ep->d_idx || !ep->dev || 612 desc->bDescriptorType != USB_DT_ENDPOINT || 613 maxpacket == 0 || maxpacket > ep->ep.maxpacket) { 614 EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n", 615 ep->d_idx, ep->dev, desc->bDescriptorType, 616 maxpacket, ep->ep.maxpacket); 617 return -EINVAL; 618 } 619 if (ep->d_idx != usb_endpoint_num(desc)) { 620 EPDBG(ep, "EP number mismatch !\n"); 621 return -EINVAL; 622 } 623 624 if (ep->epn.enabled) { 625 EPDBG(ep, "Already enabled\n"); 626 return -EBUSY; 627 } 628 dev = ep->dev; 629 vhub = ep->vhub; 630 631 /* Check device state */ 632 if (!dev->driver) { 633 EPDBG(ep, "Bogus device state: driver=%p speed=%d\n", 634 dev->driver, dev->gadget.speed); 635 return -ESHUTDOWN; 636 } 637 638 /* Grab some info from the descriptor */ 639 ep->epn.is_in = usb_endpoint_dir_in(desc); 640 ep->ep.maxpacket = maxpacket; 641 type = usb_endpoint_type(desc); 642 ep->epn.d_next = ep->epn.d_last = 0; 643 ep->epn.is_iso = false; 644 ep->epn.stalled = false; 645 ep->epn.wedged = false; 646 647 EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n", 648 ep->epn.is_in ? "in" : "out", usb_ep_type_string(type), 649 usb_endpoint_num(desc), maxpacket); 650 651 /* Can we use DMA descriptor mode ? */ 652 ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in; 653 if (ep->epn.desc_mode) 654 memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT); 655 656 /* 657 * Large send function can send up to 8 packets from 658 * one descriptor with a limit of 4095 bytes. 659 */ 660 ep->epn.chunk_max = ep->ep.maxpacket; 661 if (ep->epn.is_in) { 662 ep->epn.chunk_max <<= 3; 663 while (ep->epn.chunk_max > 4095) 664 ep->epn.chunk_max -= ep->ep.maxpacket; 665 } 666 667 switch(type) { 668 case USB_ENDPOINT_XFER_CONTROL: 669 EPDBG(ep, "Only one control endpoint\n"); 670 return -EINVAL; 671 case USB_ENDPOINT_XFER_INT: 672 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT); 673 break; 674 case USB_ENDPOINT_XFER_BULK: 675 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK); 676 break; 677 case USB_ENDPOINT_XFER_ISOC: 678 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO); 679 ep->epn.is_iso = true; 680 break; 681 default: 682 return -EINVAL; 683 } 684 685 /* Encode the rest of the EP config register */ 686 if (maxpacket < 1024) 687 ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket); 688 if (!ep->epn.is_in) 689 ep_conf |= VHUB_EP_CFG_DIR_OUT; 690 ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc)); 691 ep_conf |= VHUB_EP_CFG_ENABLE; 692 ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1); 693 EPVDBG(ep, "config=%08x\n", ep_conf); 694 695 spin_lock_irqsave(&vhub->lock, flags); 696 697 /* Disable HW and reset DMA */ 698 writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG); 699 writel(VHUB_EP_DMA_CTRL_RESET, 700 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 701 702 /* Configure and enable */ 703 writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG); 704 705 if (ep->epn.desc_mode) { 706 /* Clear DMA status, including the DMA read ptr */ 707 writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 708 709 /* Set descriptor base */ 710 writel(ep->epn.descs_dma, 711 ep->epn.regs + AST_VHUB_EP_DESC_BASE); 712 713 /* Set base DMA config value */ 714 ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE; 715 if (ep->epn.is_in) 716 ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE; 717 718 /* First reset and disable all operations */ 719 writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET, 720 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 721 722 /* Enable descriptor mode */ 723 writel(ep->epn.dma_conf, 724 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 725 } else { 726 /* Set base DMA config value */ 727 ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE; 728 729 /* Reset and switch to single stage mode */ 730 writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET, 731 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 732 writel(ep->epn.dma_conf, 733 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 734 writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 735 } 736 737 /* Cleanup data toggle just in case */ 738 writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx), 739 vhub->regs + AST_VHUB_EP_TOGGLE); 740 741 /* Cleanup and enable ACK interrupt */ 742 imask = VHUB_EP_IRQ(ep->epn.g_idx); 743 writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR); 744 ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER); 745 ep_ier |= imask; 746 writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER); 747 748 /* Woot, we are online ! */ 749 ep->epn.enabled = true; 750 751 spin_unlock_irqrestore(&vhub->lock, flags); 752 753 return 0; 754 } 755 756 static void ast_vhub_epn_dispose(struct usb_ep *u_ep) 757 { 758 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 759 760 if (WARN_ON(!ep->dev || !ep->d_idx)) 761 return; 762 763 EPDBG(ep, "Releasing endpoint\n"); 764 765 /* Take it out of the EP list */ 766 list_del_init(&ep->ep.ep_list); 767 768 /* Mark the address free in the device */ 769 ep->dev->epns[ep->d_idx - 1] = NULL; 770 771 /* Free name & DMA buffers */ 772 kfree(ep->ep.name); 773 ep->ep.name = NULL; 774 dma_free_coherent(&ep->vhub->pdev->dev, 775 AST_VHUB_EPn_MAX_PACKET + 776 8 * AST_VHUB_DESCS_COUNT, 777 ep->buf, ep->buf_dma); 778 ep->buf = NULL; 779 ep->epn.descs = NULL; 780 781 /* Mark free */ 782 ep->dev = NULL; 783 } 784 785 static const struct usb_ep_ops ast_vhub_epn_ops = { 786 .enable = ast_vhub_epn_enable, 787 .disable = ast_vhub_epn_disable, 788 .dispose = ast_vhub_epn_dispose, 789 .queue = ast_vhub_epn_queue, 790 .dequeue = ast_vhub_epn_dequeue, 791 .set_halt = ast_vhub_epn_set_halt, 792 .set_wedge = ast_vhub_epn_set_wedge, 793 .alloc_request = ast_vhub_alloc_request, 794 .free_request = ast_vhub_free_request, 795 }; 796 797 struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr) 798 { 799 struct ast_vhub *vhub = d->vhub; 800 struct ast_vhub_ep *ep; 801 unsigned long flags; 802 int i; 803 804 /* Find a free one (no device) */ 805 spin_lock_irqsave(&vhub->lock, flags); 806 for (i = 0; i < vhub->max_epns; i++) 807 if (vhub->epns[i].dev == NULL) 808 break; 809 if (i >= vhub->max_epns) { 810 spin_unlock_irqrestore(&vhub->lock, flags); 811 return NULL; 812 } 813 814 /* Set it up */ 815 ep = &vhub->epns[i]; 816 ep->dev = d; 817 spin_unlock_irqrestore(&vhub->lock, flags); 818 819 DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr); 820 INIT_LIST_HEAD(&ep->queue); 821 ep->d_idx = addr; 822 ep->vhub = vhub; 823 ep->ep.ops = &ast_vhub_epn_ops; 824 ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr); 825 d->epns[addr-1] = ep; 826 ep->epn.g_idx = i; 827 ep->epn.regs = vhub->regs + 0x200 + (i * 0x10); 828 829 ep->buf = dma_alloc_coherent(&vhub->pdev->dev, 830 AST_VHUB_EPn_MAX_PACKET + 831 8 * AST_VHUB_DESCS_COUNT, 832 &ep->buf_dma, GFP_KERNEL); 833 if (!ep->buf) { 834 kfree(ep->ep.name); 835 ep->ep.name = NULL; 836 return NULL; 837 } 838 ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET; 839 ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET; 840 841 usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET); 842 list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list); 843 ep->ep.caps.type_iso = true; 844 ep->ep.caps.type_bulk = true; 845 ep->ep.caps.type_int = true; 846 ep->ep.caps.dir_in = true; 847 ep->ep.caps.dir_out = true; 848 849 return ep; 850 } 851