1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget 4 * 5 * epn.c - Generic endpoints management 6 * 7 * Copyright 2017 IBM Corporation 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/platform_device.h> 13 #include <linux/delay.h> 14 #include <linux/ioport.h> 15 #include <linux/slab.h> 16 #include <linux/errno.h> 17 #include <linux/list.h> 18 #include <linux/interrupt.h> 19 #include <linux/proc_fs.h> 20 #include <linux/prefetch.h> 21 #include <linux/clk.h> 22 #include <linux/usb/gadget.h> 23 #include <linux/of.h> 24 #include <linux/of_gpio.h> 25 #include <linux/regmap.h> 26 #include <linux/dma-mapping.h> 27 28 #include "vhub.h" 29 30 #define EXTRA_CHECKS 31 32 #ifdef EXTRA_CHECKS 33 #define CHECK(ep, expr, fmt...) \ 34 do { \ 35 if (!(expr)) EPDBG(ep, "CHECK:" fmt); \ 36 } while(0) 37 #else 38 #define CHECK(ep, expr, fmt...) do { } while(0) 39 #endif 40 41 static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req) 42 { 43 unsigned int act = req->req.actual; 44 unsigned int len = req->req.length; 45 unsigned int chunk; 46 47 /* There should be no DMA ongoing */ 48 WARN_ON(req->active); 49 50 /* Calculate next chunk size */ 51 chunk = len - act; 52 if (chunk > ep->ep.maxpacket) 53 chunk = ep->ep.maxpacket; 54 else if ((chunk < ep->ep.maxpacket) || !req->req.zero) 55 req->last_desc = 1; 56 57 EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n", 58 req, act, len, chunk, req->last_desc); 59 60 /* If DMA unavailable, using staging EP buffer */ 61 if (!req->req.dma) { 62 63 /* For IN transfers, copy data over first */ 64 if (ep->epn.is_in) { 65 memcpy(ep->buf, req->req.buf + act, chunk); 66 vhub_dma_workaround(ep->buf); 67 } 68 writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 69 } else { 70 if (ep->epn.is_in) 71 vhub_dma_workaround(req->req.buf); 72 writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 73 } 74 75 /* Start DMA */ 76 req->active = true; 77 writel(VHUB_EP_DMA_SET_TX_SIZE(chunk), 78 ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 79 writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK, 80 ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 81 } 82 83 static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep) 84 { 85 struct ast_vhub_req *req; 86 unsigned int len; 87 int status = 0; 88 u32 stat; 89 90 /* Read EP status */ 91 stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 92 93 /* Grab current request if any */ 94 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); 95 96 EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n", 97 stat, ep->epn.is_in, req, req ? req->active : 0); 98 99 /* In absence of a request, bail out, must have been dequeued */ 100 if (!req) 101 return; 102 103 /* 104 * Request not active, move on to processing queue, active request 105 * was probably dequeued 106 */ 107 if (!req->active) 108 goto next_chunk; 109 110 /* Check if HW has moved on */ 111 if (VHUB_EP_DMA_RPTR(stat) != 0) { 112 EPDBG(ep, "DMA read pointer not 0 !\n"); 113 return; 114 } 115 116 /* No current DMA ongoing */ 117 req->active = false; 118 119 /* Grab length out of HW */ 120 len = VHUB_EP_DMA_TX_SIZE(stat); 121 122 /* If not using DMA, copy data out if needed */ 123 if (!req->req.dma && !ep->epn.is_in && len) { 124 if (req->req.actual + len > req->req.length) { 125 req->last_desc = 1; 126 status = -EOVERFLOW; 127 goto done; 128 } else { 129 memcpy(req->req.buf + req->req.actual, ep->buf, len); 130 } 131 } 132 /* Adjust size */ 133 req->req.actual += len; 134 135 /* Check for short packet */ 136 if (len < ep->ep.maxpacket) 137 req->last_desc = 1; 138 139 done: 140 /* That's it ? complete the request and pick a new one */ 141 if (req->last_desc >= 0) { 142 ast_vhub_done(ep, req, status); 143 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, 144 queue); 145 146 /* 147 * Due to lock dropping inside "done" the next request could 148 * already be active, so check for that and bail if needed. 149 */ 150 if (!req || req->active) 151 return; 152 } 153 154 next_chunk: 155 ast_vhub_epn_kick(ep, req); 156 } 157 158 static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep) 159 { 160 /* 161 * d_next == d_last means descriptor list empty to HW, 162 * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors 163 * in the list 164 */ 165 return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) & 166 (AST_VHUB_DESCS_COUNT - 1); 167 } 168 169 static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep, 170 struct ast_vhub_req *req) 171 { 172 struct ast_vhub_desc *desc = NULL; 173 unsigned int act = req->act_count; 174 unsigned int len = req->req.length; 175 unsigned int chunk; 176 177 /* Mark request active if not already */ 178 req->active = true; 179 180 /* If the request was already completely written, do nothing */ 181 if (req->last_desc >= 0) 182 return; 183 184 EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n", 185 act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep)); 186 187 /* While we can create descriptors */ 188 while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) { 189 unsigned int d_num; 190 191 /* Grab next free descriptor */ 192 d_num = ep->epn.d_next; 193 desc = &ep->epn.descs[d_num]; 194 ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1); 195 196 /* Calculate next chunk size */ 197 chunk = len - act; 198 if (chunk <= ep->epn.chunk_max) { 199 /* 200 * Is this the last packet ? Because of having up to 8 201 * packets in a descriptor we can't just compare "chunk" 202 * with ep.maxpacket. We have to see if it's a multiple 203 * of it to know if we have to send a zero packet. 204 * Sadly that involves a modulo which is a bit expensive 205 * but probably still better than not doing it. 206 */ 207 if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0) 208 req->last_desc = d_num; 209 } else { 210 chunk = ep->epn.chunk_max; 211 } 212 213 EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n", 214 act, len, chunk, req->last_desc, d_num, 215 ast_vhub_count_free_descs(ep)); 216 217 /* Populate descriptor */ 218 desc->w0 = cpu_to_le32(req->req.dma + act); 219 220 /* Interrupt if end of request or no more descriptors */ 221 222 /* 223 * TODO: Be smarter about it, if we don't have enough 224 * descriptors request an interrupt before queue empty 225 * or so in order to be able to populate more before 226 * the HW runs out. This isn't a problem at the moment 227 * as we use 256 descriptors and only put at most one 228 * request in the ring. 229 */ 230 desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk)); 231 if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep)) 232 desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT); 233 234 /* Account packet */ 235 req->act_count = act = act + chunk; 236 } 237 238 if (likely(desc)) 239 vhub_dma_workaround(desc); 240 241 /* Tell HW about new descriptors */ 242 writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next), 243 ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 244 245 EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n", 246 ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS)); 247 } 248 249 static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep) 250 { 251 struct ast_vhub_req *req; 252 unsigned int len, d_last; 253 u32 stat, stat1; 254 255 /* Read EP status, workaround HW race */ 256 do { 257 stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 258 stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 259 } while(stat != stat1); 260 261 /* Extract RPTR */ 262 d_last = VHUB_EP_DMA_RPTR(stat); 263 264 /* Grab current request if any */ 265 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); 266 267 EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n", 268 stat, ep->epn.is_in, ep->epn.d_last, d_last); 269 270 /* Check all completed descriptors */ 271 while (ep->epn.d_last != d_last) { 272 struct ast_vhub_desc *desc; 273 unsigned int d_num; 274 bool is_last_desc; 275 276 /* Grab next completed descriptor */ 277 d_num = ep->epn.d_last; 278 desc = &ep->epn.descs[d_num]; 279 ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1); 280 281 /* Grab len out of descriptor */ 282 len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1)); 283 284 EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n", 285 d_num, len, req, req ? req->active : 0); 286 287 /* If no active request pending, move on */ 288 if (!req || !req->active) 289 continue; 290 291 /* Adjust size */ 292 req->req.actual += len; 293 294 /* Is that the last chunk ? */ 295 is_last_desc = req->last_desc == d_num; 296 CHECK(ep, is_last_desc == (len < ep->ep.maxpacket || 297 (req->req.actual >= req->req.length && 298 !req->req.zero)), 299 "Last packet discrepancy: last_desc=%d len=%d r.act=%d " 300 "r.len=%d r.zero=%d mp=%d\n", 301 is_last_desc, len, req->req.actual, req->req.length, 302 req->req.zero, ep->ep.maxpacket); 303 304 if (is_last_desc) { 305 /* 306 * Because we can only have one request at a time 307 * in our descriptor list in this implementation, 308 * d_last and ep->d_last should now be equal 309 */ 310 CHECK(ep, d_last == ep->epn.d_last, 311 "DMA read ptr mismatch %d vs %d\n", 312 d_last, ep->epn.d_last); 313 314 /* Note: done will drop and re-acquire the lock */ 315 ast_vhub_done(ep, req, 0); 316 req = list_first_entry_or_null(&ep->queue, 317 struct ast_vhub_req, 318 queue); 319 break; 320 } 321 } 322 323 /* More work ? */ 324 if (req) 325 ast_vhub_epn_kick_desc(ep, req); 326 } 327 328 void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep) 329 { 330 if (ep->epn.desc_mode) 331 ast_vhub_epn_handle_ack_desc(ep); 332 else 333 ast_vhub_epn_handle_ack(ep); 334 } 335 336 static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req, 337 gfp_t gfp_flags) 338 { 339 struct ast_vhub_req *req = to_ast_req(u_req); 340 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 341 struct ast_vhub *vhub = ep->vhub; 342 unsigned long flags; 343 bool empty; 344 int rc; 345 346 /* Paranoid checks */ 347 if (!u_req || !u_req->complete || !u_req->buf) { 348 dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req); 349 if (u_req) { 350 dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n", 351 u_req->complete, req->internal); 352 } 353 return -EINVAL; 354 } 355 356 /* Endpoint enabled ? */ 357 if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx || 358 !ep->dev->enabled) { 359 EPDBG(ep, "Enqueuing request on wrong or disabled EP\n"); 360 return -ESHUTDOWN; 361 } 362 363 /* Map request for DMA if possible. For now, the rule for DMA is 364 * that: 365 * 366 * * For single stage mode (no descriptors): 367 * 368 * - The buffer is aligned to a 8 bytes boundary (HW requirement) 369 * - For a OUT endpoint, the request size is a multiple of the EP 370 * packet size (otherwise the controller will DMA past the end 371 * of the buffer if the host is sending a too long packet). 372 * 373 * * For descriptor mode (tx only for now), always. 374 * 375 * We could relax the latter by making the decision to use the bounce 376 * buffer based on the size of a given *segment* of the request rather 377 * than the whole request. 378 */ 379 if (ep->epn.desc_mode || 380 ((((unsigned long)u_req->buf & 7) == 0) && 381 (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) { 382 rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req, 383 ep->epn.is_in); 384 if (rc) { 385 dev_warn(&vhub->pdev->dev, 386 "Request mapping failure %d\n", rc); 387 return rc; 388 } 389 } else 390 u_req->dma = 0; 391 392 EPVDBG(ep, "enqueue req @%p\n", req); 393 EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n", 394 u_req->length, (u32)u_req->dma, u_req->zero, 395 u_req->short_not_ok, u_req->no_interrupt, 396 ep->epn.is_in); 397 398 /* Initialize request progress fields */ 399 u_req->status = -EINPROGRESS; 400 u_req->actual = 0; 401 req->act_count = 0; 402 req->active = false; 403 req->last_desc = -1; 404 spin_lock_irqsave(&vhub->lock, flags); 405 empty = list_empty(&ep->queue); 406 407 /* Add request to list and kick processing if empty */ 408 list_add_tail(&req->queue, &ep->queue); 409 if (empty) { 410 if (ep->epn.desc_mode) 411 ast_vhub_epn_kick_desc(ep, req); 412 else 413 ast_vhub_epn_kick(ep, req); 414 } 415 spin_unlock_irqrestore(&vhub->lock, flags); 416 417 return 0; 418 } 419 420 static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep, 421 bool restart_ep) 422 { 423 u32 state, reg, loops; 424 425 /* Stop DMA activity */ 426 if (ep->epn.desc_mode) 427 writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 428 else 429 writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 430 431 /* Wait for it to complete */ 432 for (loops = 0; loops < 1000; loops++) { 433 state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 434 state = VHUB_EP_DMA_PROC_STATUS(state); 435 if (state == EP_DMA_PROC_RX_IDLE || 436 state == EP_DMA_PROC_TX_IDLE) 437 break; 438 udelay(1); 439 } 440 if (loops >= 1000) 441 dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n"); 442 443 /* If we don't have to restart the endpoint, that's it */ 444 if (!restart_ep) 445 return; 446 447 /* Restart the endpoint */ 448 if (ep->epn.desc_mode) { 449 /* 450 * Take out descriptors by resetting the DMA read 451 * pointer to be equal to the CPU write pointer. 452 * 453 * Note: If we ever support creating descriptors for 454 * requests that aren't the head of the queue, we 455 * may have to do something more complex here, 456 * especially if the request being taken out is 457 * not the current head descriptors. 458 */ 459 reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) | 460 VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next); 461 writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 462 463 /* Then turn it back on */ 464 writel(ep->epn.dma_conf, 465 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 466 } else { 467 /* Single mode: just turn it back on */ 468 writel(ep->epn.dma_conf, 469 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 470 } 471 } 472 473 static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req) 474 { 475 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 476 struct ast_vhub *vhub = ep->vhub; 477 struct ast_vhub_req *req = NULL, *iter; 478 unsigned long flags; 479 int rc = -EINVAL; 480 481 spin_lock_irqsave(&vhub->lock, flags); 482 483 /* Make sure it's actually queued on this endpoint */ 484 list_for_each_entry(iter, &ep->queue, queue) { 485 if (&iter->req != u_req) 486 continue; 487 req = iter; 488 break; 489 } 490 491 if (req) { 492 EPVDBG(ep, "dequeue req @%p active=%d\n", 493 req, req->active); 494 if (req->active) 495 ast_vhub_stop_active_req(ep, true); 496 ast_vhub_done(ep, req, -ECONNRESET); 497 rc = 0; 498 } 499 500 spin_unlock_irqrestore(&vhub->lock, flags); 501 return rc; 502 } 503 504 void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep) 505 { 506 u32 reg; 507 508 if (WARN_ON(ep->d_idx == 0)) 509 return; 510 reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG); 511 if (ep->epn.stalled || ep->epn.wedged) 512 reg |= VHUB_EP_CFG_STALL_CTRL; 513 else 514 reg &= ~VHUB_EP_CFG_STALL_CTRL; 515 writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG); 516 517 if (!ep->epn.stalled && !ep->epn.wedged) 518 writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx), 519 ep->vhub->regs + AST_VHUB_EP_TOGGLE); 520 } 521 522 static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt, 523 bool wedge) 524 { 525 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 526 struct ast_vhub *vhub = ep->vhub; 527 unsigned long flags; 528 529 EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge); 530 531 if (!u_ep || !u_ep->desc) 532 return -EINVAL; 533 if (ep->d_idx == 0) 534 return 0; 535 if (ep->epn.is_iso) 536 return -EOPNOTSUPP; 537 538 spin_lock_irqsave(&vhub->lock, flags); 539 540 /* Fail with still-busy IN endpoints */ 541 if (halt && ep->epn.is_in && !list_empty(&ep->queue)) { 542 spin_unlock_irqrestore(&vhub->lock, flags); 543 return -EAGAIN; 544 } 545 ep->epn.stalled = halt; 546 ep->epn.wedged = wedge; 547 ast_vhub_update_epn_stall(ep); 548 549 spin_unlock_irqrestore(&vhub->lock, flags); 550 551 return 0; 552 } 553 554 static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value) 555 { 556 return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false); 557 } 558 559 static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep) 560 { 561 return ast_vhub_set_halt_and_wedge(u_ep, true, true); 562 } 563 564 static int ast_vhub_epn_disable(struct usb_ep* u_ep) 565 { 566 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 567 struct ast_vhub *vhub = ep->vhub; 568 unsigned long flags; 569 u32 imask, ep_ier; 570 571 EPDBG(ep, "Disabling !\n"); 572 573 spin_lock_irqsave(&vhub->lock, flags); 574 575 ep->epn.enabled = false; 576 577 /* Stop active DMA if any */ 578 ast_vhub_stop_active_req(ep, false); 579 580 /* Disable endpoint */ 581 writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG); 582 583 /* Disable ACK interrupt */ 584 imask = VHUB_EP_IRQ(ep->epn.g_idx); 585 ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER); 586 ep_ier &= ~imask; 587 writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER); 588 writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR); 589 590 /* Nuke all pending requests */ 591 ast_vhub_nuke(ep, -ESHUTDOWN); 592 593 /* No more descriptor associated with request */ 594 ep->ep.desc = NULL; 595 596 spin_unlock_irqrestore(&vhub->lock, flags); 597 598 return 0; 599 } 600 601 static int ast_vhub_epn_enable(struct usb_ep* u_ep, 602 const struct usb_endpoint_descriptor *desc) 603 { 604 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 605 struct ast_vhub_dev *dev; 606 struct ast_vhub *vhub; 607 u16 maxpacket, type; 608 unsigned long flags; 609 u32 ep_conf, ep_ier, imask; 610 611 /* Check arguments */ 612 if (!u_ep || !desc) 613 return -EINVAL; 614 615 maxpacket = usb_endpoint_maxp(desc); 616 if (!ep->d_idx || !ep->dev || 617 desc->bDescriptorType != USB_DT_ENDPOINT || 618 maxpacket == 0 || maxpacket > ep->ep.maxpacket) { 619 EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n", 620 ep->d_idx, ep->dev, desc->bDescriptorType, 621 maxpacket, ep->ep.maxpacket); 622 return -EINVAL; 623 } 624 if (ep->d_idx != usb_endpoint_num(desc)) { 625 EPDBG(ep, "EP number mismatch !\n"); 626 return -EINVAL; 627 } 628 629 if (ep->epn.enabled) { 630 EPDBG(ep, "Already enabled\n"); 631 return -EBUSY; 632 } 633 dev = ep->dev; 634 vhub = ep->vhub; 635 636 /* Check device state */ 637 if (!dev->driver) { 638 EPDBG(ep, "Bogus device state: driver=%p speed=%d\n", 639 dev->driver, dev->gadget.speed); 640 return -ESHUTDOWN; 641 } 642 643 /* Grab some info from the descriptor */ 644 ep->epn.is_in = usb_endpoint_dir_in(desc); 645 ep->ep.maxpacket = maxpacket; 646 type = usb_endpoint_type(desc); 647 ep->epn.d_next = ep->epn.d_last = 0; 648 ep->epn.is_iso = false; 649 ep->epn.stalled = false; 650 ep->epn.wedged = false; 651 652 EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n", 653 ep->epn.is_in ? "in" : "out", usb_ep_type_string(type), 654 usb_endpoint_num(desc), maxpacket); 655 656 /* Can we use DMA descriptor mode ? */ 657 ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in; 658 if (ep->epn.desc_mode) 659 memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT); 660 661 /* 662 * Large send function can send up to 8 packets from 663 * one descriptor with a limit of 4095 bytes. 664 */ 665 ep->epn.chunk_max = ep->ep.maxpacket; 666 if (ep->epn.is_in) { 667 ep->epn.chunk_max <<= 3; 668 while (ep->epn.chunk_max > 4095) 669 ep->epn.chunk_max -= ep->ep.maxpacket; 670 } 671 672 switch(type) { 673 case USB_ENDPOINT_XFER_CONTROL: 674 EPDBG(ep, "Only one control endpoint\n"); 675 return -EINVAL; 676 case USB_ENDPOINT_XFER_INT: 677 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT); 678 break; 679 case USB_ENDPOINT_XFER_BULK: 680 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK); 681 break; 682 case USB_ENDPOINT_XFER_ISOC: 683 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO); 684 ep->epn.is_iso = true; 685 break; 686 default: 687 return -EINVAL; 688 } 689 690 /* Encode the rest of the EP config register */ 691 if (maxpacket < 1024) 692 ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket); 693 if (!ep->epn.is_in) 694 ep_conf |= VHUB_EP_CFG_DIR_OUT; 695 ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc)); 696 ep_conf |= VHUB_EP_CFG_ENABLE; 697 ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1); 698 EPVDBG(ep, "config=%08x\n", ep_conf); 699 700 spin_lock_irqsave(&vhub->lock, flags); 701 702 /* Disable HW and reset DMA */ 703 writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG); 704 writel(VHUB_EP_DMA_CTRL_RESET, 705 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 706 707 /* Configure and enable */ 708 writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG); 709 710 if (ep->epn.desc_mode) { 711 /* Clear DMA status, including the DMA read ptr */ 712 writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 713 714 /* Set descriptor base */ 715 writel(ep->epn.descs_dma, 716 ep->epn.regs + AST_VHUB_EP_DESC_BASE); 717 718 /* Set base DMA config value */ 719 ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE; 720 if (ep->epn.is_in) 721 ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE; 722 723 /* First reset and disable all operations */ 724 writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET, 725 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 726 727 /* Enable descriptor mode */ 728 writel(ep->epn.dma_conf, 729 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 730 } else { 731 /* Set base DMA config value */ 732 ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE; 733 734 /* Reset and switch to single stage mode */ 735 writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET, 736 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 737 writel(ep->epn.dma_conf, 738 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); 739 writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 740 } 741 742 /* Cleanup data toggle just in case */ 743 writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx), 744 vhub->regs + AST_VHUB_EP_TOGGLE); 745 746 /* Cleanup and enable ACK interrupt */ 747 imask = VHUB_EP_IRQ(ep->epn.g_idx); 748 writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR); 749 ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER); 750 ep_ier |= imask; 751 writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER); 752 753 /* Woot, we are online ! */ 754 ep->epn.enabled = true; 755 756 spin_unlock_irqrestore(&vhub->lock, flags); 757 758 return 0; 759 } 760 761 static void ast_vhub_epn_dispose(struct usb_ep *u_ep) 762 { 763 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 764 765 if (WARN_ON(!ep->dev || !ep->d_idx)) 766 return; 767 768 EPDBG(ep, "Releasing endpoint\n"); 769 770 /* Take it out of the EP list */ 771 list_del_init(&ep->ep.ep_list); 772 773 /* Mark the address free in the device */ 774 ep->dev->epns[ep->d_idx - 1] = NULL; 775 776 /* Free name & DMA buffers */ 777 kfree(ep->ep.name); 778 ep->ep.name = NULL; 779 dma_free_coherent(&ep->vhub->pdev->dev, 780 AST_VHUB_EPn_MAX_PACKET + 781 8 * AST_VHUB_DESCS_COUNT, 782 ep->buf, ep->buf_dma); 783 ep->buf = NULL; 784 ep->epn.descs = NULL; 785 786 /* Mark free */ 787 ep->dev = NULL; 788 } 789 790 static const struct usb_ep_ops ast_vhub_epn_ops = { 791 .enable = ast_vhub_epn_enable, 792 .disable = ast_vhub_epn_disable, 793 .dispose = ast_vhub_epn_dispose, 794 .queue = ast_vhub_epn_queue, 795 .dequeue = ast_vhub_epn_dequeue, 796 .set_halt = ast_vhub_epn_set_halt, 797 .set_wedge = ast_vhub_epn_set_wedge, 798 .alloc_request = ast_vhub_alloc_request, 799 .free_request = ast_vhub_free_request, 800 }; 801 802 struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr) 803 { 804 struct ast_vhub *vhub = d->vhub; 805 struct ast_vhub_ep *ep; 806 unsigned long flags; 807 int i; 808 809 /* Find a free one (no device) */ 810 spin_lock_irqsave(&vhub->lock, flags); 811 for (i = 0; i < vhub->max_epns; i++) 812 if (vhub->epns[i].dev == NULL) 813 break; 814 if (i >= vhub->max_epns) { 815 spin_unlock_irqrestore(&vhub->lock, flags); 816 return NULL; 817 } 818 819 /* Set it up */ 820 ep = &vhub->epns[i]; 821 ep->dev = d; 822 spin_unlock_irqrestore(&vhub->lock, flags); 823 824 DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr); 825 INIT_LIST_HEAD(&ep->queue); 826 ep->d_idx = addr; 827 ep->vhub = vhub; 828 ep->ep.ops = &ast_vhub_epn_ops; 829 ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr); 830 d->epns[addr-1] = ep; 831 ep->epn.g_idx = i; 832 ep->epn.regs = vhub->regs + 0x200 + (i * 0x10); 833 834 ep->buf = dma_alloc_coherent(&vhub->pdev->dev, 835 AST_VHUB_EPn_MAX_PACKET + 836 8 * AST_VHUB_DESCS_COUNT, 837 &ep->buf_dma, GFP_KERNEL); 838 if (!ep->buf) { 839 kfree(ep->ep.name); 840 ep->ep.name = NULL; 841 return NULL; 842 } 843 ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET; 844 ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET; 845 846 usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET); 847 list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list); 848 ep->ep.caps.type_iso = true; 849 ep->ep.caps.type_bulk = true; 850 ep->ep.caps.type_int = true; 851 ep->ep.caps.dir_in = true; 852 ep->ep.caps.dir_out = true; 853 854 return ep; 855 } 856