1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cadence CDNSP DRD Driver. 4 * 5 * Copyright (C) 2020 Cadence. 6 * 7 * Author: Pawel Laszczak <pawell@cadence.com> 8 * 9 */ 10 11 #include <linux/moduleparam.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/module.h> 14 #include <linux/iopoll.h> 15 #include <linux/delay.h> 16 #include <linux/log2.h> 17 #include <linux/slab.h> 18 #include <linux/pci.h> 19 #include <linux/irq.h> 20 #include <linux/dmi.h> 21 22 #include "core.h" 23 #include "gadget-export.h" 24 #include "drd.h" 25 #include "cdnsp-gadget.h" 26 #include "cdnsp-trace.h" 27 28 unsigned int cdnsp_port_speed(unsigned int port_status) 29 { 30 /*Detect gadget speed based on PORTSC register*/ 31 if (DEV_SUPERSPEEDPLUS(port_status)) 32 return USB_SPEED_SUPER_PLUS; 33 else if (DEV_SUPERSPEED(port_status)) 34 return USB_SPEED_SUPER; 35 else if (DEV_HIGHSPEED(port_status)) 36 return USB_SPEED_HIGH; 37 else if (DEV_FULLSPEED(port_status)) 38 return USB_SPEED_FULL; 39 40 /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/ 41 return USB_SPEED_UNKNOWN; 42 } 43 44 /* 45 * Given a port state, this function returns a value that would result in the 46 * port being in the same state, if the value was written to the port status 47 * control register. 48 * Save Read Only (RO) bits and save read/write bits where 49 * writing a 0 clears the bit and writing a 1 sets the bit (RWS). 50 * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect. 51 */ 52 u32 cdnsp_port_state_to_neutral(u32 state) 53 { 54 /* Save read-only status and port state. */ 55 return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS); 56 } 57 58 /** 59 * Find the offset of the extended capabilities with capability ID id. 60 * @base: PCI MMIO registers base address. 61 * @start: Address at which to start looking, (0 or HCC_PARAMS to start at 62 * beginning of list) 63 * @id: Extended capability ID to search for. 64 * 65 * Returns the offset of the next matching extended capability structure. 66 * Some capabilities can occur several times, 67 * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all. 68 */ 69 int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id) 70 { 71 u32 offset = start; 72 u32 next; 73 u32 val; 74 75 if (!start || start == HCC_PARAMS_OFFSET) { 76 val = readl(base + HCC_PARAMS_OFFSET); 77 if (val == ~0) 78 return 0; 79 80 offset = HCC_EXT_CAPS(val) << 2; 81 if (!offset) 82 return 0; 83 }; 84 85 do { 86 val = readl(base + offset); 87 if (val == ~0) 88 return 0; 89 90 if (EXT_CAPS_ID(val) == id && offset != start) 91 return offset; 92 93 next = EXT_CAPS_NEXT(val); 94 offset += next << 2; 95 } while (next); 96 97 return 0; 98 } 99 100 void cdnsp_set_link_state(struct cdnsp_device *pdev, 101 __le32 __iomem *port_regs, 102 u32 link_state) 103 { 104 int port_num = 0xFF; 105 u32 temp; 106 107 temp = readl(port_regs); 108 temp = cdnsp_port_state_to_neutral(temp); 109 temp |= PORT_WKCONN_E | PORT_WKDISC_E; 110 writel(temp, port_regs); 111 112 temp &= ~PORT_PLS_MASK; 113 temp |= PORT_LINK_STROBE | link_state; 114 115 if (pdev->active_port) 116 port_num = pdev->active_port->port_num; 117 118 trace_cdnsp_handle_port_status(port_num, readl(port_regs)); 119 writel(temp, port_regs); 120 trace_cdnsp_link_state_changed(port_num, readl(port_regs)); 121 } 122 123 static void cdnsp_disable_port(struct cdnsp_device *pdev, 124 __le32 __iomem *port_regs) 125 { 126 u32 temp = cdnsp_port_state_to_neutral(readl(port_regs)); 127 128 writel(temp | PORT_PED, port_regs); 129 } 130 131 static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev, 132 __le32 __iomem *port_regs) 133 { 134 u32 portsc = readl(port_regs); 135 136 writel(cdnsp_port_state_to_neutral(portsc) | 137 (portsc & PORT_CHANGE_BITS), port_regs); 138 } 139 140 static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) 141 { 142 __le32 __iomem *reg; 143 void __iomem *base; 144 u32 offset = 0; 145 146 base = &pdev->cap_regs->hc_capbase; 147 offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); 148 reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; 149 150 bit = readl(reg) | bit; 151 writel(bit, reg); 152 } 153 154 static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) 155 { 156 __le32 __iomem *reg; 157 void __iomem *base; 158 u32 offset = 0; 159 160 base = &pdev->cap_regs->hc_capbase; 161 offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); 162 reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; 163 164 bit = readl(reg) & ~bit; 165 writel(bit, reg); 166 } 167 168 /* 169 * Disable interrupts and begin the controller halting process. 170 */ 171 static void cdnsp_quiesce(struct cdnsp_device *pdev) 172 { 173 u32 halted; 174 u32 mask; 175 u32 cmd; 176 177 mask = ~(u32)(CDNSP_IRQS); 178 179 halted = readl(&pdev->op_regs->status) & STS_HALT; 180 if (!halted) 181 mask &= ~(CMD_R_S | CMD_DEVEN); 182 183 cmd = readl(&pdev->op_regs->command); 184 cmd &= mask; 185 writel(cmd, &pdev->op_regs->command); 186 } 187 188 /* 189 * Force controller into halt state. 190 * 191 * Disable any IRQs and clear the run/stop bit. 192 * Controller will complete any current and actively pipelined transactions, and 193 * should halt within 16 ms of the run/stop bit being cleared. 194 * Read controller Halted bit in the status register to see when the 195 * controller is finished. 196 */ 197 int cdnsp_halt(struct cdnsp_device *pdev) 198 { 199 int ret; 200 u32 val; 201 202 cdnsp_quiesce(pdev); 203 204 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val, 205 val & STS_HALT, 1, 206 CDNSP_MAX_HALT_USEC); 207 if (ret) { 208 dev_err(pdev->dev, "ERROR: Device halt failed\n"); 209 return ret; 210 } 211 212 pdev->cdnsp_state |= CDNSP_STATE_HALTED; 213 214 return 0; 215 } 216 217 /* 218 * device controller died, register read returns 0xffffffff, or command never 219 * ends. 220 */ 221 void cdnsp_died(struct cdnsp_device *pdev) 222 { 223 dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n"); 224 pdev->cdnsp_state |= CDNSP_STATE_DYING; 225 cdnsp_halt(pdev); 226 } 227 228 /* 229 * Set the run bit and wait for the device to be running. 230 */ 231 static int cdnsp_start(struct cdnsp_device *pdev) 232 { 233 u32 temp; 234 int ret; 235 236 temp = readl(&pdev->op_regs->command); 237 temp |= (CMD_R_S | CMD_DEVEN); 238 writel(temp, &pdev->op_regs->command); 239 240 pdev->cdnsp_state = 0; 241 242 /* 243 * Wait for the STS_HALT Status bit to be 0 to indicate the device is 244 * running. 245 */ 246 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, 247 !(temp & STS_HALT), 1, 248 CDNSP_MAX_HALT_USEC); 249 if (ret) { 250 pdev->cdnsp_state = CDNSP_STATE_DYING; 251 dev_err(pdev->dev, "ERROR: Controller run failed\n"); 252 } 253 254 return ret; 255 } 256 257 /* 258 * Reset a halted controller. 259 * 260 * This resets pipelines, timers, counters, state machines, etc. 261 * Transactions will be terminated immediately, and operational registers 262 * will be set to their defaults. 263 */ 264 int cdnsp_reset(struct cdnsp_device *pdev) 265 { 266 u32 command; 267 u32 temp; 268 int ret; 269 270 temp = readl(&pdev->op_regs->status); 271 272 if (temp == ~(u32)0) { 273 dev_err(pdev->dev, "Device not accessible, reset failed.\n"); 274 return -ENODEV; 275 } 276 277 if ((temp & STS_HALT) == 0) { 278 dev_err(pdev->dev, "Controller not halted, aborting reset.\n"); 279 return -EINVAL; 280 } 281 282 command = readl(&pdev->op_regs->command); 283 command |= CMD_RESET; 284 writel(command, &pdev->op_regs->command); 285 286 ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp, 287 !(temp & CMD_RESET), 1, 288 10 * 1000); 289 if (ret) { 290 dev_err(pdev->dev, "ERROR: Controller reset failed\n"); 291 return ret; 292 } 293 294 /* 295 * CDNSP cannot write any doorbells or operational registers other 296 * than status until the "Controller Not Ready" flag is cleared. 297 */ 298 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, 299 !(temp & STS_CNR), 1, 300 10 * 1000); 301 302 if (ret) { 303 dev_err(pdev->dev, "ERROR: Controller not ready to work\n"); 304 return ret; 305 } 306 307 dev_dbg(pdev->dev, "Controller ready to work"); 308 309 return ret; 310 } 311 312 /* 313 * cdnsp_get_endpoint_index - Find the index for an endpoint given its 314 * descriptor.Use the return value to right shift 1 for the bitmask. 315 * 316 * Index = (epnum * 2) + direction - 1, 317 * where direction = 0 for OUT, 1 for IN. 318 * For control endpoints, the IN index is used (OUT index is unused), so 319 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 320 */ 321 static unsigned int 322 cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc) 323 { 324 unsigned int index = (unsigned int)usb_endpoint_num(desc); 325 326 if (usb_endpoint_xfer_control(desc)) 327 return index * 2; 328 329 return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 330 } 331 332 /* 333 * Find the flag for this endpoint (for use in the control context). Use the 334 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 335 * bit 1, etc. 336 */ 337 static unsigned int 338 cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc) 339 { 340 return 1 << (cdnsp_get_endpoint_index(desc) + 1); 341 } 342 343 int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq) 344 { 345 struct cdnsp_device *pdev = pep->pdev; 346 struct usb_request *request; 347 int ret; 348 349 if (preq->epnum == 0 && !list_empty(&pep->pending_list)) { 350 trace_cdnsp_request_enqueue_busy(preq); 351 return -EBUSY; 352 } 353 354 request = &preq->request; 355 request->actual = 0; 356 request->status = -EINPROGRESS; 357 preq->direction = pep->direction; 358 preq->epnum = pep->number; 359 preq->td.drbl = 0; 360 361 ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction); 362 if (ret) { 363 trace_cdnsp_request_enqueue_error(preq); 364 return ret; 365 } 366 367 list_add_tail(&preq->list, &pep->pending_list); 368 369 trace_cdnsp_request_enqueue(preq); 370 371 switch (usb_endpoint_type(pep->endpoint.desc)) { 372 case USB_ENDPOINT_XFER_CONTROL: 373 ret = cdnsp_queue_ctrl_tx(pdev, preq); 374 break; 375 case USB_ENDPOINT_XFER_BULK: 376 case USB_ENDPOINT_XFER_INT: 377 ret = cdnsp_queue_bulk_tx(pdev, preq); 378 break; 379 case USB_ENDPOINT_XFER_ISOC: 380 ret = cdnsp_queue_isoc_tx_prepare(pdev, preq); 381 } 382 383 if (ret) 384 goto unmap; 385 386 return 0; 387 388 unmap: 389 usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, 390 pep->direction); 391 list_del(&preq->list); 392 trace_cdnsp_request_enqueue_error(preq); 393 394 return ret; 395 } 396 397 /* 398 * Remove the request's TD from the endpoint ring. This may cause the 399 * controller to stop USB transfers, potentially stopping in the middle of a 400 * TRB buffer. The controller should pick up where it left off in the TD, 401 * unless a Set Transfer Ring Dequeue Pointer is issued. 402 * 403 * The TRBs that make up the buffers for the canceled request will be "removed" 404 * from the ring. Since the ring is a contiguous structure, they can't be 405 * physically removed. Instead, there are two options: 406 * 407 * 1) If the controller is in the middle of processing the request to be 408 * canceled, we simply move the ring's dequeue pointer past those TRBs 409 * using the Set Transfer Ring Dequeue Pointer command. This will be 410 * the common case, when drivers timeout on the last submitted request 411 * and attempt to cancel. 412 * 413 * 2) If the controller is in the middle of a different TD, we turn the TRBs 414 * into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained. 415 * The controller will need to invalidate the any TRBs it has cached after 416 * the stop endpoint command. 417 * 418 * 3) The TD may have completed by the time the Stop Endpoint Command 419 * completes, so software needs to handle that case too. 420 * 421 */ 422 int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq) 423 { 424 struct cdnsp_device *pdev = pep->pdev; 425 int ret; 426 427 trace_cdnsp_request_dequeue(preq); 428 429 if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) { 430 ret = cdnsp_cmd_stop_ep(pdev, pep); 431 if (ret) 432 return ret; 433 } 434 435 return cdnsp_remove_request(pdev, preq, pep); 436 } 437 438 static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev) 439 { 440 struct cdnsp_input_control_ctx *ctrl_ctx; 441 struct cdnsp_slot_ctx *slot_ctx; 442 struct cdnsp_ep_ctx *ep_ctx; 443 int i; 444 445 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 446 447 /* 448 * When a device's add flag and drop flag are zero, any subsequent 449 * configure endpoint command will leave that endpoint's state 450 * untouched. Make sure we don't leave any old state in the input 451 * endpoint contexts. 452 */ 453 ctrl_ctx->drop_flags = 0; 454 ctrl_ctx->add_flags = 0; 455 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 456 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 457 458 /* Endpoint 0 is always valid */ 459 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 460 for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) { 461 ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i); 462 ep_ctx->ep_info = 0; 463 ep_ctx->ep_info2 = 0; 464 ep_ctx->deq = 0; 465 ep_ctx->tx_info = 0; 466 } 467 } 468 469 /* Issue a configure endpoint command and wait for it to finish. */ 470 static int cdnsp_configure_endpoint(struct cdnsp_device *pdev) 471 { 472 int ret; 473 474 cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma); 475 cdnsp_ring_cmd_db(pdev); 476 ret = cdnsp_wait_for_cmd_compl(pdev); 477 if (ret) { 478 dev_err(pdev->dev, 479 "ERR: unexpected command completion code 0x%x.\n", ret); 480 return -EINVAL; 481 } 482 483 return ret; 484 } 485 486 static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev, 487 struct cdnsp_ep *pep) 488 { 489 struct cdnsp_segment *segment; 490 union cdnsp_trb *event; 491 u32 cycle_state; 492 u32 data; 493 494 event = pdev->event_ring->dequeue; 495 segment = pdev->event_ring->deq_seg; 496 cycle_state = pdev->event_ring->cycle_state; 497 498 while (1) { 499 data = le32_to_cpu(event->trans_event.flags); 500 501 /* Check the owner of the TRB. */ 502 if ((data & TRB_CYCLE) != cycle_state) 503 break; 504 505 if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER && 506 TRB_TO_EP_ID(data) == (pep->idx + 1)) { 507 data |= TRB_EVENT_INVALIDATE; 508 event->trans_event.flags = cpu_to_le32(data); 509 } 510 511 if (cdnsp_last_trb_on_seg(segment, event)) { 512 cycle_state ^= 1; 513 segment = pdev->event_ring->deq_seg->next; 514 event = segment->trbs; 515 } else { 516 event++; 517 } 518 } 519 } 520 521 int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev) 522 { 523 struct cdnsp_segment *event_deq_seg; 524 union cdnsp_trb *cmd_trb; 525 dma_addr_t cmd_deq_dma; 526 union cdnsp_trb *event; 527 u32 cycle_state; 528 int ret, val; 529 u64 cmd_dma; 530 u32 flags; 531 532 cmd_trb = pdev->cmd.command_trb; 533 pdev->cmd.status = 0; 534 535 trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic); 536 537 ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val, 538 !CMD_RING_BUSY(val), 1, 539 CDNSP_CMD_TIMEOUT); 540 if (ret) { 541 dev_err(pdev->dev, "ERR: Timeout while waiting for command\n"); 542 trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic); 543 pdev->cdnsp_state = CDNSP_STATE_DYING; 544 return -ETIMEDOUT; 545 } 546 547 event = pdev->event_ring->dequeue; 548 event_deq_seg = pdev->event_ring->deq_seg; 549 cycle_state = pdev->event_ring->cycle_state; 550 551 cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb); 552 if (!cmd_deq_dma) 553 return -EINVAL; 554 555 while (1) { 556 flags = le32_to_cpu(event->event_cmd.flags); 557 558 /* Check the owner of the TRB. */ 559 if ((flags & TRB_CYCLE) != cycle_state) 560 return -EINVAL; 561 562 cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb); 563 564 /* 565 * Check whether the completion event is for last queued 566 * command. 567 */ 568 if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION || 569 cmd_dma != (u64)cmd_deq_dma) { 570 if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { 571 event++; 572 continue; 573 } 574 575 if (cdnsp_last_trb_on_ring(pdev->event_ring, 576 event_deq_seg, event)) 577 cycle_state ^= 1; 578 579 event_deq_seg = event_deq_seg->next; 580 event = event_deq_seg->trbs; 581 continue; 582 } 583 584 trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic); 585 586 pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status)); 587 if (pdev->cmd.status == COMP_SUCCESS) 588 return 0; 589 590 return -pdev->cmd.status; 591 } 592 } 593 594 int cdnsp_halt_endpoint(struct cdnsp_device *pdev, 595 struct cdnsp_ep *pep, 596 int value) 597 { 598 int ret; 599 600 trace_cdnsp_ep_halt(value ? "Set" : "Clear"); 601 602 if (value) { 603 ret = cdnsp_cmd_stop_ep(pdev, pep); 604 if (ret) 605 return ret; 606 607 if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) { 608 cdnsp_queue_halt_endpoint(pdev, pep->idx); 609 cdnsp_ring_cmd_db(pdev); 610 ret = cdnsp_wait_for_cmd_compl(pdev); 611 } 612 613 pep->ep_state |= EP_HALTED; 614 } else { 615 /* 616 * In device mode driver can call reset endpoint command 617 * from any endpoint state. 618 */ 619 cdnsp_queue_reset_ep(pdev, pep->idx); 620 cdnsp_ring_cmd_db(pdev); 621 ret = cdnsp_wait_for_cmd_compl(pdev); 622 trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx); 623 624 if (ret) 625 return ret; 626 627 pep->ep_state &= ~EP_HALTED; 628 629 if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE)) 630 cdnsp_ring_doorbell_for_active_rings(pdev, pep); 631 632 pep->ep_state &= ~EP_WEDGE; 633 } 634 635 return 0; 636 } 637 638 static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev, 639 struct cdnsp_ep *pep) 640 { 641 struct cdnsp_input_control_ctx *ctrl_ctx; 642 struct cdnsp_slot_ctx *slot_ctx; 643 int ret = 0; 644 u32 ep_sts; 645 int i; 646 647 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 648 649 /* Don't issue the command if there's no endpoints to update. */ 650 if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0) 651 return 0; 652 653 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 654 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 655 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 656 657 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ 658 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 659 for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) { 660 __le32 le32 = cpu_to_le32(BIT(i)); 661 662 if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) || 663 (ctrl_ctx->add_flags & le32) || i == 1) { 664 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 665 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); 666 break; 667 } 668 } 669 670 ep_sts = GET_EP_CTX_STATE(pep->out_ctx); 671 672 if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) && 673 ep_sts == EP_STATE_DISABLED) || 674 (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags)) 675 ret = cdnsp_configure_endpoint(pdev); 676 677 trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx)); 678 trace_cdnsp_handle_cmd_config_ep(pep->out_ctx); 679 680 cdnsp_zero_in_ctx(pdev); 681 682 return ret; 683 } 684 685 /* 686 * This submits a Reset Device Command, which will set the device state to 0, 687 * set the device address to 0, and disable all the endpoints except the default 688 * control endpoint. The USB core should come back and call 689 * cdnsp_setup_device(), and then re-set up the configuration. 690 */ 691 int cdnsp_reset_device(struct cdnsp_device *pdev) 692 { 693 struct cdnsp_slot_ctx *slot_ctx; 694 int slot_state; 695 int ret, i; 696 697 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 698 slot_ctx->dev_info = 0; 699 pdev->device_address = 0; 700 701 /* If device is not setup, there is no point in resetting it. */ 702 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 703 slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 704 trace_cdnsp_reset_device(slot_ctx); 705 706 if (slot_state <= SLOT_STATE_DEFAULT && 707 pdev->eps[0].ep_state & EP_HALTED) { 708 cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0); 709 } 710 711 /* 712 * During Reset Device command controller shall transition the 713 * endpoint ep0 to the Running State. 714 */ 715 pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED); 716 pdev->eps[0].ep_state |= EP_ENABLED; 717 718 if (slot_state <= SLOT_STATE_DEFAULT) 719 return 0; 720 721 cdnsp_queue_reset_device(pdev); 722 cdnsp_ring_cmd_db(pdev); 723 ret = cdnsp_wait_for_cmd_compl(pdev); 724 725 /* 726 * After Reset Device command all not default endpoints 727 * are in Disabled state. 728 */ 729 for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) 730 pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED; 731 732 trace_cdnsp_handle_cmd_reset_dev(slot_ctx); 733 734 if (ret) 735 dev_err(pdev->dev, "Reset device failed with error code %d", 736 ret); 737 738 return ret; 739 } 740 741 /* 742 * Sets the MaxPStreams field and the Linear Stream Array field. 743 * Sets the dequeue pointer to the stream context array. 744 */ 745 static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev, 746 struct cdnsp_ep_ctx *ep_ctx, 747 struct cdnsp_stream_info *stream_info) 748 { 749 u32 max_primary_streams; 750 751 /* MaxPStreams is the number of stream context array entries, not the 752 * number we're actually using. Must be in 2^(MaxPstreams + 1) format. 753 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. 754 */ 755 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 756 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); 757 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) 758 | EP_HAS_LSA); 759 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); 760 } 761 762 /* 763 * The drivers use this function to prepare a bulk endpoints to use streams. 764 * 765 * Don't allow the call to succeed if endpoint only supports one stream 766 * (which means it doesn't support streams at all). 767 */ 768 int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep) 769 { 770 unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc); 771 unsigned int num_stream_ctxs; 772 int ret; 773 774 if (num_streams == 0) 775 return 0; 776 777 if (num_streams > STREAM_NUM_STREAMS) 778 return -EINVAL; 779 780 /* 781 * Add two to the number of streams requested to account for 782 * stream 0 that is reserved for controller usage and one additional 783 * for TASK SET FULL response. 784 */ 785 num_streams += 2; 786 787 /* The stream context array size must be a power of two */ 788 num_stream_ctxs = roundup_pow_of_two(num_streams); 789 790 trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams); 791 792 ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams); 793 if (ret) 794 return ret; 795 796 cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info); 797 798 pep->ep_state |= EP_HAS_STREAMS; 799 pep->stream_info.td_count = 0; 800 pep->stream_info.first_prime_det = 0; 801 802 /* Subtract 1 for stream 0, which drivers can't use. */ 803 return num_streams - 1; 804 } 805 806 int cdnsp_disable_slot(struct cdnsp_device *pdev) 807 { 808 int ret; 809 810 cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT); 811 cdnsp_ring_cmd_db(pdev); 812 ret = cdnsp_wait_for_cmd_compl(pdev); 813 814 pdev->slot_id = 0; 815 pdev->active_port = NULL; 816 817 trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); 818 819 memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE); 820 memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE); 821 822 return ret; 823 } 824 825 int cdnsp_enable_slot(struct cdnsp_device *pdev) 826 { 827 struct cdnsp_slot_ctx *slot_ctx; 828 int slot_state; 829 int ret; 830 831 /* If device is not setup, there is no point in resetting it */ 832 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 833 slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 834 835 if (slot_state != SLOT_STATE_DISABLED) 836 return 0; 837 838 cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT); 839 cdnsp_ring_cmd_db(pdev); 840 ret = cdnsp_wait_for_cmd_compl(pdev); 841 if (ret) 842 goto show_trace; 843 844 pdev->slot_id = 1; 845 846 show_trace: 847 trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); 848 849 return ret; 850 } 851 852 /* 853 * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY 854 * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS. 855 */ 856 int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup) 857 { 858 struct cdnsp_input_control_ctx *ctrl_ctx; 859 struct cdnsp_slot_ctx *slot_ctx; 860 int dev_state = 0; 861 int ret; 862 863 if (!pdev->slot_id) { 864 trace_cdnsp_slot_id("incorrect"); 865 return -EINVAL; 866 } 867 868 if (!pdev->active_port->port_num) 869 return -EINVAL; 870 871 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 872 dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 873 874 if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) { 875 trace_cdnsp_slot_already_in_default(slot_ctx); 876 return 0; 877 } 878 879 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 880 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 881 882 if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) { 883 ret = cdnsp_setup_addressable_priv_dev(pdev); 884 if (ret) 885 return ret; 886 } 887 888 cdnsp_copy_ep0_dequeue_into_input_ctx(pdev); 889 890 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 891 ctrl_ctx->drop_flags = 0; 892 893 trace_cdnsp_setup_device_slot(slot_ctx); 894 895 cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup); 896 cdnsp_ring_cmd_db(pdev); 897 ret = cdnsp_wait_for_cmd_compl(pdev); 898 899 trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx)); 900 901 /* Zero the input context control for later use. */ 902 ctrl_ctx->add_flags = 0; 903 ctrl_ctx->drop_flags = 0; 904 905 return ret; 906 } 907 908 void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev, 909 struct usb_request *req, 910 int enable) 911 { 912 if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable) 913 return; 914 915 trace_cdnsp_lpm(enable); 916 917 if (enable) 918 writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE, 919 &pdev->active_port->regs->portpmsc); 920 else 921 writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc); 922 } 923 924 static int cdnsp_get_frame(struct cdnsp_device *pdev) 925 { 926 return readl(&pdev->run_regs->microframe_index) >> 3; 927 } 928 929 static int cdnsp_gadget_ep_enable(struct usb_ep *ep, 930 const struct usb_endpoint_descriptor *desc) 931 { 932 struct cdnsp_input_control_ctx *ctrl_ctx; 933 struct cdnsp_device *pdev; 934 struct cdnsp_ep *pep; 935 unsigned long flags; 936 u32 added_ctxs; 937 int ret; 938 939 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT || 940 !desc->wMaxPacketSize) 941 return -EINVAL; 942 943 pep = to_cdnsp_ep(ep); 944 pdev = pep->pdev; 945 pep->ep_state &= ~EP_UNCONFIGURED; 946 947 if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED, 948 "%s is already enabled\n", pep->name)) 949 return 0; 950 951 spin_lock_irqsave(&pdev->lock, flags); 952 953 added_ctxs = cdnsp_get_endpoint_flag(desc); 954 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 955 dev_err(pdev->dev, "ERROR: Bad endpoint number\n"); 956 ret = -EINVAL; 957 goto unlock; 958 } 959 960 pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 961 962 if (pdev->gadget.speed == USB_SPEED_FULL) { 963 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) 964 pep->interval = desc->bInterval << 3; 965 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) 966 pep->interval = BIT(desc->bInterval - 1) << 3; 967 } 968 969 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) { 970 if (pep->interval > BIT(12)) { 971 dev_err(pdev->dev, "bInterval %d not supported\n", 972 desc->bInterval); 973 ret = -EINVAL; 974 goto unlock; 975 } 976 cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); 977 } 978 979 ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC); 980 if (ret) 981 goto unlock; 982 983 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 984 ctrl_ctx->add_flags = cpu_to_le32(added_ctxs); 985 ctrl_ctx->drop_flags = 0; 986 987 ret = cdnsp_update_eps_configuration(pdev, pep); 988 if (ret) { 989 cdnsp_free_endpoint_rings(pdev, pep); 990 goto unlock; 991 } 992 993 pep->ep_state |= EP_ENABLED; 994 pep->ep_state &= ~EP_STOPPED; 995 996 unlock: 997 trace_cdnsp_ep_enable_end(pep, 0); 998 spin_unlock_irqrestore(&pdev->lock, flags); 999 1000 return ret; 1001 } 1002 1003 static int cdnsp_gadget_ep_disable(struct usb_ep *ep) 1004 { 1005 struct cdnsp_input_control_ctx *ctrl_ctx; 1006 struct cdnsp_request *preq; 1007 struct cdnsp_device *pdev; 1008 struct cdnsp_ep *pep; 1009 unsigned long flags; 1010 u32 drop_flag; 1011 int ret = 0; 1012 1013 if (!ep) 1014 return -EINVAL; 1015 1016 pep = to_cdnsp_ep(ep); 1017 pdev = pep->pdev; 1018 1019 spin_lock_irqsave(&pdev->lock, flags); 1020 1021 if (!(pep->ep_state & EP_ENABLED)) { 1022 dev_err(pdev->dev, "%s is already disabled\n", pep->name); 1023 ret = -EINVAL; 1024 goto finish; 1025 } 1026 1027 pep->ep_state |= EP_DIS_IN_RROGRESS; 1028 1029 /* Endpoint was unconfigured by Reset Device command. */ 1030 if (!(pep->ep_state & EP_UNCONFIGURED)) { 1031 cdnsp_cmd_stop_ep(pdev, pep); 1032 cdnsp_cmd_flush_ep(pdev, pep); 1033 } 1034 1035 /* Remove all queued USB requests. */ 1036 while (!list_empty(&pep->pending_list)) { 1037 preq = next_request(&pep->pending_list); 1038 cdnsp_ep_dequeue(pep, preq); 1039 } 1040 1041 cdnsp_invalidate_ep_events(pdev, pep); 1042 1043 pep->ep_state &= ~EP_DIS_IN_RROGRESS; 1044 drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc); 1045 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 1046 ctrl_ctx->drop_flags = cpu_to_le32(drop_flag); 1047 ctrl_ctx->add_flags = 0; 1048 1049 cdnsp_endpoint_zero(pdev, pep); 1050 1051 if (!(pep->ep_state & EP_UNCONFIGURED)) 1052 ret = cdnsp_update_eps_configuration(pdev, pep); 1053 1054 cdnsp_free_endpoint_rings(pdev, pep); 1055 1056 pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED); 1057 pep->ep_state |= EP_STOPPED; 1058 1059 finish: 1060 trace_cdnsp_ep_disable_end(pep, 0); 1061 spin_unlock_irqrestore(&pdev->lock, flags); 1062 1063 return ret; 1064 } 1065 1066 static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep, 1067 gfp_t gfp_flags) 1068 { 1069 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1070 struct cdnsp_request *preq; 1071 1072 preq = kzalloc(sizeof(*preq), gfp_flags); 1073 if (!preq) 1074 return NULL; 1075 1076 preq->epnum = pep->number; 1077 preq->pep = pep; 1078 1079 trace_cdnsp_alloc_request(preq); 1080 1081 return &preq->request; 1082 } 1083 1084 static void cdnsp_gadget_ep_free_request(struct usb_ep *ep, 1085 struct usb_request *request) 1086 { 1087 struct cdnsp_request *preq = to_cdnsp_request(request); 1088 1089 trace_cdnsp_free_request(preq); 1090 kfree(preq); 1091 } 1092 1093 static int cdnsp_gadget_ep_queue(struct usb_ep *ep, 1094 struct usb_request *request, 1095 gfp_t gfp_flags) 1096 { 1097 struct cdnsp_request *preq; 1098 struct cdnsp_device *pdev; 1099 struct cdnsp_ep *pep; 1100 unsigned long flags; 1101 int ret; 1102 1103 if (!request || !ep) 1104 return -EINVAL; 1105 1106 pep = to_cdnsp_ep(ep); 1107 pdev = pep->pdev; 1108 1109 if (!(pep->ep_state & EP_ENABLED)) { 1110 dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n", 1111 pep->name); 1112 return -EINVAL; 1113 } 1114 1115 preq = to_cdnsp_request(request); 1116 spin_lock_irqsave(&pdev->lock, flags); 1117 ret = cdnsp_ep_enqueue(pep, preq); 1118 spin_unlock_irqrestore(&pdev->lock, flags); 1119 1120 return ret; 1121 } 1122 1123 static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep, 1124 struct usb_request *request) 1125 { 1126 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1127 struct cdnsp_device *pdev = pep->pdev; 1128 unsigned long flags; 1129 int ret; 1130 1131 if (!pep->endpoint.desc) { 1132 dev_err(pdev->dev, 1133 "%s: can't dequeue to disabled endpoint\n", 1134 pep->name); 1135 return -ESHUTDOWN; 1136 } 1137 1138 /* Requests has been dequeued during disabling endpoint. */ 1139 if (!(pep->ep_state & EP_ENABLED)) 1140 return 0; 1141 1142 spin_lock_irqsave(&pdev->lock, flags); 1143 ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request)); 1144 spin_unlock_irqrestore(&pdev->lock, flags); 1145 1146 return ret; 1147 } 1148 1149 static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value) 1150 { 1151 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1152 struct cdnsp_device *pdev = pep->pdev; 1153 struct cdnsp_request *preq; 1154 unsigned long flags = 0; 1155 int ret; 1156 1157 spin_lock_irqsave(&pdev->lock, flags); 1158 1159 preq = next_request(&pep->pending_list); 1160 if (value) { 1161 if (preq) { 1162 trace_cdnsp_ep_busy_try_halt_again(pep, 0); 1163 ret = -EAGAIN; 1164 goto done; 1165 } 1166 } 1167 1168 ret = cdnsp_halt_endpoint(pdev, pep, value); 1169 1170 done: 1171 spin_unlock_irqrestore(&pdev->lock, flags); 1172 return ret; 1173 } 1174 1175 static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep) 1176 { 1177 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1178 struct cdnsp_device *pdev = pep->pdev; 1179 unsigned long flags = 0; 1180 int ret; 1181 1182 spin_lock_irqsave(&pdev->lock, flags); 1183 pep->ep_state |= EP_WEDGE; 1184 ret = cdnsp_halt_endpoint(pdev, pep, 1); 1185 spin_unlock_irqrestore(&pdev->lock, flags); 1186 1187 return ret; 1188 } 1189 1190 static const struct usb_ep_ops cdnsp_gadget_ep0_ops = { 1191 .enable = cdnsp_gadget_ep_enable, 1192 .disable = cdnsp_gadget_ep_disable, 1193 .alloc_request = cdnsp_gadget_ep_alloc_request, 1194 .free_request = cdnsp_gadget_ep_free_request, 1195 .queue = cdnsp_gadget_ep_queue, 1196 .dequeue = cdnsp_gadget_ep_dequeue, 1197 .set_halt = cdnsp_gadget_ep_set_halt, 1198 .set_wedge = cdnsp_gadget_ep_set_wedge, 1199 }; 1200 1201 static const struct usb_ep_ops cdnsp_gadget_ep_ops = { 1202 .enable = cdnsp_gadget_ep_enable, 1203 .disable = cdnsp_gadget_ep_disable, 1204 .alloc_request = cdnsp_gadget_ep_alloc_request, 1205 .free_request = cdnsp_gadget_ep_free_request, 1206 .queue = cdnsp_gadget_ep_queue, 1207 .dequeue = cdnsp_gadget_ep_dequeue, 1208 .set_halt = cdnsp_gadget_ep_set_halt, 1209 .set_wedge = cdnsp_gadget_ep_set_wedge, 1210 }; 1211 1212 void cdnsp_gadget_giveback(struct cdnsp_ep *pep, 1213 struct cdnsp_request *preq, 1214 int status) 1215 { 1216 struct cdnsp_device *pdev = pep->pdev; 1217 1218 list_del(&preq->list); 1219 1220 if (preq->request.status == -EINPROGRESS) 1221 preq->request.status = status; 1222 1223 usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, 1224 preq->direction); 1225 1226 trace_cdnsp_request_giveback(preq); 1227 1228 if (preq != &pdev->ep0_preq) { 1229 spin_unlock(&pdev->lock); 1230 usb_gadget_giveback_request(&pep->endpoint, &preq->request); 1231 spin_lock(&pdev->lock); 1232 } 1233 } 1234 1235 static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = { 1236 .bLength = USB_DT_ENDPOINT_SIZE, 1237 .bDescriptorType = USB_DT_ENDPOINT, 1238 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1239 }; 1240 1241 static int cdnsp_run(struct cdnsp_device *pdev, 1242 enum usb_device_speed speed) 1243 { 1244 u32 fs_speed = 0; 1245 u64 temp_64; 1246 u32 temp; 1247 int ret; 1248 1249 temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue); 1250 temp_64 &= ~ERST_PTR_MASK; 1251 temp = readl(&pdev->ir_set->irq_control); 1252 temp &= ~IMOD_INTERVAL_MASK; 1253 temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK); 1254 writel(temp, &pdev->ir_set->irq_control); 1255 1256 temp = readl(&pdev->port3x_regs->mode_addr); 1257 1258 switch (speed) { 1259 case USB_SPEED_SUPER_PLUS: 1260 temp |= CFG_3XPORT_SSP_SUPPORT; 1261 break; 1262 case USB_SPEED_SUPER: 1263 temp &= ~CFG_3XPORT_SSP_SUPPORT; 1264 break; 1265 case USB_SPEED_HIGH: 1266 break; 1267 case USB_SPEED_FULL: 1268 fs_speed = PORT_REG6_FORCE_FS; 1269 break; 1270 default: 1271 dev_err(pdev->dev, "invalid maximum_speed parameter %d\n", 1272 speed); 1273 fallthrough; 1274 case USB_SPEED_UNKNOWN: 1275 /* Default to superspeed. */ 1276 speed = USB_SPEED_SUPER; 1277 break; 1278 } 1279 1280 if (speed >= USB_SPEED_SUPER) { 1281 writel(temp, &pdev->port3x_regs->mode_addr); 1282 cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc, 1283 XDEV_RXDETECT); 1284 } else { 1285 cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); 1286 } 1287 1288 cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc, 1289 XDEV_RXDETECT); 1290 1291 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1292 1293 writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6); 1294 1295 ret = cdnsp_start(pdev); 1296 if (ret) { 1297 ret = -ENODEV; 1298 goto err; 1299 } 1300 1301 temp = readl(&pdev->op_regs->command); 1302 temp |= (CMD_INTE); 1303 writel(temp, &pdev->op_regs->command); 1304 1305 temp = readl(&pdev->ir_set->irq_pending); 1306 writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending); 1307 1308 trace_cdnsp_init("Controller ready to work"); 1309 return 0; 1310 err: 1311 cdnsp_halt(pdev); 1312 return ret; 1313 } 1314 1315 static int cdnsp_gadget_udc_start(struct usb_gadget *g, 1316 struct usb_gadget_driver *driver) 1317 { 1318 enum usb_device_speed max_speed = driver->max_speed; 1319 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1320 unsigned long flags; 1321 int ret; 1322 1323 spin_lock_irqsave(&pdev->lock, flags); 1324 pdev->gadget_driver = driver; 1325 1326 /* limit speed if necessary */ 1327 max_speed = min(driver->max_speed, g->max_speed); 1328 ret = cdnsp_run(pdev, max_speed); 1329 1330 spin_unlock_irqrestore(&pdev->lock, flags); 1331 1332 return ret; 1333 } 1334 1335 /* 1336 * Update Event Ring Dequeue Pointer: 1337 * - When all events have finished 1338 * - To avoid "Event Ring Full Error" condition 1339 */ 1340 void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev, 1341 union cdnsp_trb *event_ring_deq, 1342 u8 clear_ehb) 1343 { 1344 u64 temp_64; 1345 dma_addr_t deq; 1346 1347 temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue); 1348 1349 /* If necessary, update the HW's version of the event ring deq ptr. */ 1350 if (event_ring_deq != pdev->event_ring->dequeue) { 1351 deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg, 1352 pdev->event_ring->dequeue); 1353 temp_64 &= ERST_PTR_MASK; 1354 temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK); 1355 } 1356 1357 /* Clear the event handler busy flag (RW1C). */ 1358 if (clear_ehb) 1359 temp_64 |= ERST_EHB; 1360 else 1361 temp_64 &= ~ERST_EHB; 1362 1363 cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue); 1364 } 1365 1366 static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev) 1367 { 1368 struct cdnsp_segment *seg; 1369 u64 val_64; 1370 int i; 1371 1372 cdnsp_initialize_ring_info(pdev->cmd_ring); 1373 1374 seg = pdev->cmd_ring->first_seg; 1375 for (i = 0; i < pdev->cmd_ring->num_segs; i++) { 1376 memset(seg->trbs, 0, 1377 sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1)); 1378 seg = seg->next; 1379 } 1380 1381 /* Set the address in the Command Ring Control register. */ 1382 val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring); 1383 val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) | 1384 (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) | 1385 pdev->cmd_ring->cycle_state; 1386 cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring); 1387 } 1388 1389 static void cdnsp_consume_all_events(struct cdnsp_device *pdev) 1390 { 1391 struct cdnsp_segment *event_deq_seg; 1392 union cdnsp_trb *event_ring_deq; 1393 union cdnsp_trb *event; 1394 u32 cycle_bit; 1395 1396 event_ring_deq = pdev->event_ring->dequeue; 1397 event_deq_seg = pdev->event_ring->deq_seg; 1398 event = pdev->event_ring->dequeue; 1399 1400 /* Update ring dequeue pointer. */ 1401 while (1) { 1402 cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE); 1403 1404 /* Does the controller or driver own the TRB? */ 1405 if (cycle_bit != pdev->event_ring->cycle_state) 1406 break; 1407 1408 cdnsp_inc_deq(pdev, pdev->event_ring); 1409 1410 if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { 1411 event++; 1412 continue; 1413 } 1414 1415 if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg, 1416 event)) 1417 cycle_bit ^= 1; 1418 1419 event_deq_seg = event_deq_seg->next; 1420 event = event_deq_seg->trbs; 1421 } 1422 1423 cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1); 1424 } 1425 1426 static void cdnsp_stop(struct cdnsp_device *pdev) 1427 { 1428 u32 temp; 1429 1430 cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]); 1431 1432 /* Remove internally queued request for ep0. */ 1433 if (!list_empty(&pdev->eps[0].pending_list)) { 1434 struct cdnsp_request *req; 1435 1436 req = next_request(&pdev->eps[0].pending_list); 1437 if (req == &pdev->ep0_preq) 1438 cdnsp_ep_dequeue(&pdev->eps[0], req); 1439 } 1440 1441 cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc); 1442 cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); 1443 cdnsp_disable_slot(pdev); 1444 cdnsp_halt(pdev); 1445 1446 temp = readl(&pdev->op_regs->status); 1447 writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status); 1448 temp = readl(&pdev->ir_set->irq_pending); 1449 writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending); 1450 1451 cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc); 1452 cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc); 1453 1454 /* Clear interrupt line */ 1455 temp = readl(&pdev->ir_set->irq_pending); 1456 temp |= IMAN_IP; 1457 writel(temp, &pdev->ir_set->irq_pending); 1458 1459 cdnsp_consume_all_events(pdev); 1460 cdnsp_clear_cmd_ring(pdev); 1461 1462 trace_cdnsp_exit("Controller stopped."); 1463 } 1464 1465 /* 1466 * Stop controller. 1467 * This function is called by the gadget core when the driver is removed. 1468 * Disable slot, disable IRQs, and quiesce the controller. 1469 */ 1470 static int cdnsp_gadget_udc_stop(struct usb_gadget *g) 1471 { 1472 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1473 unsigned long flags; 1474 1475 spin_lock_irqsave(&pdev->lock, flags); 1476 cdnsp_stop(pdev); 1477 pdev->gadget_driver = NULL; 1478 spin_unlock_irqrestore(&pdev->lock, flags); 1479 1480 return 0; 1481 } 1482 1483 static int cdnsp_gadget_get_frame(struct usb_gadget *g) 1484 { 1485 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1486 1487 return cdnsp_get_frame(pdev); 1488 } 1489 1490 static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev) 1491 { 1492 struct cdnsp_port_regs __iomem *port_regs; 1493 u32 portpm, portsc; 1494 1495 port_regs = pdev->active_port->regs; 1496 portsc = readl(&port_regs->portsc) & PORT_PLS_MASK; 1497 1498 /* Remote wakeup feature is not enabled by host. */ 1499 if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) { 1500 portpm = readl(&port_regs->portpmsc); 1501 1502 if (!(portpm & PORT_RWE)) 1503 return; 1504 } 1505 1506 if (portsc == XDEV_U3 && !pdev->may_wakeup) 1507 return; 1508 1509 cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0); 1510 1511 pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING; 1512 } 1513 1514 static int cdnsp_gadget_wakeup(struct usb_gadget *g) 1515 { 1516 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1517 unsigned long flags; 1518 1519 spin_lock_irqsave(&pdev->lock, flags); 1520 __cdnsp_gadget_wakeup(pdev); 1521 spin_unlock_irqrestore(&pdev->lock, flags); 1522 1523 return 0; 1524 } 1525 1526 static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g, 1527 int is_selfpowered) 1528 { 1529 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1530 unsigned long flags; 1531 1532 spin_lock_irqsave(&pdev->lock, flags); 1533 g->is_selfpowered = !!is_selfpowered; 1534 spin_unlock_irqrestore(&pdev->lock, flags); 1535 1536 return 0; 1537 } 1538 1539 static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on) 1540 { 1541 struct cdnsp_device *pdev = gadget_to_cdnsp(gadget); 1542 struct cdns *cdns = dev_get_drvdata(pdev->dev); 1543 1544 trace_cdnsp_pullup(is_on); 1545 1546 if (!is_on) { 1547 cdnsp_reset_device(pdev); 1548 cdns_clear_vbus(cdns); 1549 } else { 1550 cdns_set_vbus(cdns); 1551 } 1552 return 0; 1553 } 1554 1555 static const struct usb_gadget_ops cdnsp_gadget_ops = { 1556 .get_frame = cdnsp_gadget_get_frame, 1557 .wakeup = cdnsp_gadget_wakeup, 1558 .set_selfpowered = cdnsp_gadget_set_selfpowered, 1559 .pullup = cdnsp_gadget_pullup, 1560 .udc_start = cdnsp_gadget_udc_start, 1561 .udc_stop = cdnsp_gadget_udc_stop, 1562 }; 1563 1564 static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev, 1565 struct cdnsp_ep *pep) 1566 { 1567 void __iomem *reg = &pdev->cap_regs->hc_capbase; 1568 int endpoints; 1569 1570 reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID); 1571 1572 if (!pep->direction) { 1573 pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET); 1574 pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET); 1575 pep->buffering = (pep->buffering + 1) / 2; 1576 pep->buffering_period = (pep->buffering_period + 1) / 2; 1577 return; 1578 } 1579 1580 endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2; 1581 1582 /* Set to XBUF_TX_TAG_MASK_0 register. */ 1583 reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32); 1584 /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */ 1585 reg += pep->number * sizeof(u32) * 2; 1586 1587 pep->buffering = (readl(reg) + 1) / 2; 1588 pep->buffering_period = pep->buffering; 1589 } 1590 1591 static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev) 1592 { 1593 int max_streams = HCC_MAX_PSA(pdev->hcc_params); 1594 struct cdnsp_ep *pep; 1595 int i; 1596 1597 INIT_LIST_HEAD(&pdev->gadget.ep_list); 1598 1599 if (max_streams < STREAM_LOG_STREAMS) { 1600 dev_err(pdev->dev, "Stream size %d not supported\n", 1601 max_streams); 1602 return -EINVAL; 1603 } 1604 1605 max_streams = STREAM_LOG_STREAMS; 1606 1607 for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { 1608 bool direction = !(i & 1); /* Start from OUT endpoint. */ 1609 u8 epnum = ((i + 1) >> 1); 1610 1611 if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction)) 1612 continue; 1613 1614 pep = &pdev->eps[i]; 1615 pep->pdev = pdev; 1616 pep->number = epnum; 1617 pep->direction = direction; /* 0 for OUT, 1 for IN. */ 1618 1619 /* 1620 * Ep0 is bidirectional, so ep0in and ep0out are represented by 1621 * pdev->eps[0] 1622 */ 1623 if (epnum == 0) { 1624 snprintf(pep->name, sizeof(pep->name), "ep%d%s", 1625 epnum, "BiDir"); 1626 1627 pep->idx = 0; 1628 usb_ep_set_maxpacket_limit(&pep->endpoint, 512); 1629 pep->endpoint.maxburst = 1; 1630 pep->endpoint.ops = &cdnsp_gadget_ep0_ops; 1631 pep->endpoint.desc = &cdnsp_gadget_ep0_desc; 1632 pep->endpoint.comp_desc = NULL; 1633 pep->endpoint.caps.type_control = true; 1634 pep->endpoint.caps.dir_in = true; 1635 pep->endpoint.caps.dir_out = true; 1636 1637 pdev->ep0_preq.epnum = pep->number; 1638 pdev->ep0_preq.pep = pep; 1639 pdev->gadget.ep0 = &pep->endpoint; 1640 } else { 1641 snprintf(pep->name, sizeof(pep->name), "ep%d%s", 1642 epnum, (pep->direction) ? "in" : "out"); 1643 1644 pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1; 1645 usb_ep_set_maxpacket_limit(&pep->endpoint, 1024); 1646 1647 pep->endpoint.max_streams = max_streams; 1648 pep->endpoint.ops = &cdnsp_gadget_ep_ops; 1649 list_add_tail(&pep->endpoint.ep_list, 1650 &pdev->gadget.ep_list); 1651 1652 pep->endpoint.caps.type_iso = true; 1653 pep->endpoint.caps.type_bulk = true; 1654 pep->endpoint.caps.type_int = true; 1655 1656 pep->endpoint.caps.dir_in = direction; 1657 pep->endpoint.caps.dir_out = !direction; 1658 } 1659 1660 pep->endpoint.name = pep->name; 1661 pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx); 1662 pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx); 1663 cdnsp_get_ep_buffering(pdev, pep); 1664 1665 dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: " 1666 "CTRL: %s, INT: %s, BULK: %s, ISOC %s, " 1667 "SupDir IN: %s, OUT: %s\n", 1668 pep->name, 1024, 1669 (pep->endpoint.caps.type_control) ? "yes" : "no", 1670 (pep->endpoint.caps.type_int) ? "yes" : "no", 1671 (pep->endpoint.caps.type_bulk) ? "yes" : "no", 1672 (pep->endpoint.caps.type_iso) ? "yes" : "no", 1673 (pep->endpoint.caps.dir_in) ? "yes" : "no", 1674 (pep->endpoint.caps.dir_out) ? "yes" : "no"); 1675 1676 INIT_LIST_HEAD(&pep->pending_list); 1677 } 1678 1679 return 0; 1680 } 1681 1682 static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev) 1683 { 1684 struct cdnsp_ep *pep; 1685 int i; 1686 1687 for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { 1688 pep = &pdev->eps[i]; 1689 if (pep->number != 0 && pep->out_ctx) 1690 list_del(&pep->endpoint.ep_list); 1691 } 1692 } 1693 1694 void cdnsp_disconnect_gadget(struct cdnsp_device *pdev) 1695 { 1696 pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING; 1697 1698 if (pdev->gadget_driver && pdev->gadget_driver->disconnect) { 1699 spin_unlock(&pdev->lock); 1700 pdev->gadget_driver->disconnect(&pdev->gadget); 1701 spin_lock(&pdev->lock); 1702 } 1703 1704 pdev->gadget.speed = USB_SPEED_UNKNOWN; 1705 usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED); 1706 1707 pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING; 1708 } 1709 1710 void cdnsp_suspend_gadget(struct cdnsp_device *pdev) 1711 { 1712 if (pdev->gadget_driver && pdev->gadget_driver->suspend) { 1713 spin_unlock(&pdev->lock); 1714 pdev->gadget_driver->suspend(&pdev->gadget); 1715 spin_lock(&pdev->lock); 1716 } 1717 } 1718 1719 void cdnsp_resume_gadget(struct cdnsp_device *pdev) 1720 { 1721 if (pdev->gadget_driver && pdev->gadget_driver->resume) { 1722 spin_unlock(&pdev->lock); 1723 pdev->gadget_driver->resume(&pdev->gadget); 1724 spin_lock(&pdev->lock); 1725 } 1726 } 1727 1728 void cdnsp_irq_reset(struct cdnsp_device *pdev) 1729 { 1730 struct cdnsp_port_regs __iomem *port_regs; 1731 1732 cdnsp_reset_device(pdev); 1733 1734 port_regs = pdev->active_port->regs; 1735 pdev->gadget.speed = cdnsp_port_speed(readl(port_regs)); 1736 1737 spin_unlock(&pdev->lock); 1738 usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver); 1739 spin_lock(&pdev->lock); 1740 1741 switch (pdev->gadget.speed) { 1742 case USB_SPEED_SUPER_PLUS: 1743 case USB_SPEED_SUPER: 1744 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1745 pdev->gadget.ep0->maxpacket = 512; 1746 break; 1747 case USB_SPEED_HIGH: 1748 case USB_SPEED_FULL: 1749 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 1750 pdev->gadget.ep0->maxpacket = 64; 1751 break; 1752 default: 1753 /* Low speed is not supported. */ 1754 dev_err(pdev->dev, "Unknown device speed\n"); 1755 break; 1756 } 1757 1758 cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); 1759 cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY); 1760 usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT); 1761 } 1762 1763 static void cdnsp_get_rev_cap(struct cdnsp_device *pdev) 1764 { 1765 void __iomem *reg = &pdev->cap_regs->hc_capbase; 1766 1767 reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP); 1768 pdev->rev_cap = reg; 1769 1770 dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", 1771 readl(&pdev->rev_cap->ctrl_revision), 1772 readl(&pdev->rev_cap->rtl_revision), 1773 readl(&pdev->rev_cap->ep_supported), 1774 readl(&pdev->rev_cap->rx_buff_size), 1775 readl(&pdev->rev_cap->tx_buff_size)); 1776 } 1777 1778 static int cdnsp_gen_setup(struct cdnsp_device *pdev) 1779 { 1780 int ret; 1781 u32 reg; 1782 1783 pdev->cap_regs = pdev->regs; 1784 pdev->op_regs = pdev->regs + 1785 HC_LENGTH(readl(&pdev->cap_regs->hc_capbase)); 1786 pdev->run_regs = pdev->regs + 1787 (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK); 1788 1789 /* Cache read-only capability registers */ 1790 pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1); 1791 pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase); 1792 pdev->hci_version = HC_VERSION(pdev->hcc_params); 1793 pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); 1794 1795 cdnsp_get_rev_cap(pdev); 1796 1797 /* Make sure the Device Controller is halted. */ 1798 ret = cdnsp_halt(pdev); 1799 if (ret) 1800 return ret; 1801 1802 /* Reset the internal controller memory state and registers. */ 1803 ret = cdnsp_reset(pdev); 1804 if (ret) 1805 return ret; 1806 1807 /* 1808 * Set dma_mask and coherent_dma_mask to 64-bits, 1809 * if controller supports 64-bit addressing. 1810 */ 1811 if (HCC_64BIT_ADDR(pdev->hcc_params) && 1812 !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) { 1813 dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n"); 1814 dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64)); 1815 } else { 1816 /* 1817 * This is to avoid error in cases where a 32-bit USB 1818 * controller is used on a 64-bit capable system. 1819 */ 1820 ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32)); 1821 if (ret) 1822 return ret; 1823 1824 dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n"); 1825 dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32)); 1826 } 1827 1828 spin_lock_init(&pdev->lock); 1829 1830 ret = cdnsp_mem_init(pdev); 1831 if (ret) 1832 return ret; 1833 1834 /* 1835 * Software workaround for U1: after transition 1836 * to U1 the controller starts gating clock, and in some cases, 1837 * it causes that controller stack. 1838 */ 1839 reg = readl(&pdev->port3x_regs->mode_2); 1840 reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN; 1841 writel(reg, &pdev->port3x_regs->mode_2); 1842 1843 return 0; 1844 } 1845 1846 static int __cdnsp_gadget_init(struct cdns *cdns) 1847 { 1848 struct cdnsp_device *pdev; 1849 u32 max_speed; 1850 int ret = -ENOMEM; 1851 1852 cdns_drd_gadget_on(cdns); 1853 1854 pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); 1855 if (!pdev) 1856 return -ENOMEM; 1857 1858 pm_runtime_get_sync(cdns->dev); 1859 1860 cdns->gadget_dev = pdev; 1861 pdev->dev = cdns->dev; 1862 pdev->regs = cdns->dev_regs; 1863 max_speed = usb_get_maximum_speed(cdns->dev); 1864 1865 switch (max_speed) { 1866 case USB_SPEED_FULL: 1867 case USB_SPEED_HIGH: 1868 case USB_SPEED_SUPER: 1869 case USB_SPEED_SUPER_PLUS: 1870 break; 1871 default: 1872 dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed); 1873 fallthrough; 1874 case USB_SPEED_UNKNOWN: 1875 /* Default to SSP */ 1876 max_speed = USB_SPEED_SUPER_PLUS; 1877 break; 1878 } 1879 1880 pdev->gadget.ops = &cdnsp_gadget_ops; 1881 pdev->gadget.name = "cdnsp-gadget"; 1882 pdev->gadget.speed = USB_SPEED_UNKNOWN; 1883 pdev->gadget.sg_supported = 1; 1884 pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS; 1885 pdev->gadget.lpm_capable = 1; 1886 1887 pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL); 1888 if (!pdev->setup_buf) 1889 goto free_pdev; 1890 1891 /* 1892 * Controller supports not aligned buffer but it should improve 1893 * performance. 1894 */ 1895 pdev->gadget.quirk_ep_out_aligned_size = true; 1896 1897 ret = cdnsp_gen_setup(pdev); 1898 if (ret) { 1899 dev_err(pdev->dev, "Generic initialization failed %d\n", ret); 1900 goto free_setup; 1901 } 1902 1903 ret = cdnsp_gadget_init_endpoints(pdev); 1904 if (ret) { 1905 dev_err(pdev->dev, "failed to initialize endpoints\n"); 1906 goto halt_pdev; 1907 } 1908 1909 ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget); 1910 if (ret) { 1911 dev_err(pdev->dev, "failed to register udc\n"); 1912 goto free_endpoints; 1913 } 1914 1915 ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq, 1916 cdnsp_irq_handler, 1917 cdnsp_thread_irq_handler, IRQF_SHARED, 1918 dev_name(pdev->dev), pdev); 1919 if (ret) 1920 goto del_gadget; 1921 1922 return 0; 1923 1924 del_gadget: 1925 usb_del_gadget_udc(&pdev->gadget); 1926 free_endpoints: 1927 cdnsp_gadget_free_endpoints(pdev); 1928 halt_pdev: 1929 cdnsp_halt(pdev); 1930 cdnsp_reset(pdev); 1931 cdnsp_mem_cleanup(pdev); 1932 free_setup: 1933 kfree(pdev->setup_buf); 1934 free_pdev: 1935 kfree(pdev); 1936 1937 return ret; 1938 } 1939 1940 static void cdnsp_gadget_exit(struct cdns *cdns) 1941 { 1942 struct cdnsp_device *pdev = cdns->gadget_dev; 1943 1944 devm_free_irq(pdev->dev, cdns->dev_irq, pdev); 1945 pm_runtime_mark_last_busy(cdns->dev); 1946 pm_runtime_put_autosuspend(cdns->dev); 1947 usb_del_gadget_udc(&pdev->gadget); 1948 cdnsp_gadget_free_endpoints(pdev); 1949 cdnsp_mem_cleanup(pdev); 1950 kfree(pdev); 1951 cdns->gadget_dev = NULL; 1952 cdns_drd_gadget_off(cdns); 1953 } 1954 1955 static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup) 1956 { 1957 struct cdnsp_device *pdev = cdns->gadget_dev; 1958 unsigned long flags; 1959 1960 if (pdev->link_state == XDEV_U3) 1961 return 0; 1962 1963 spin_lock_irqsave(&pdev->lock, flags); 1964 cdnsp_disconnect_gadget(pdev); 1965 cdnsp_stop(pdev); 1966 spin_unlock_irqrestore(&pdev->lock, flags); 1967 1968 return 0; 1969 } 1970 1971 static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated) 1972 { 1973 struct cdnsp_device *pdev = cdns->gadget_dev; 1974 enum usb_device_speed max_speed; 1975 unsigned long flags; 1976 int ret; 1977 1978 if (!pdev->gadget_driver) 1979 return 0; 1980 1981 spin_lock_irqsave(&pdev->lock, flags); 1982 max_speed = pdev->gadget_driver->max_speed; 1983 1984 /* Limit speed if necessary. */ 1985 max_speed = min(max_speed, pdev->gadget.max_speed); 1986 1987 ret = cdnsp_run(pdev, max_speed); 1988 1989 if (pdev->link_state == XDEV_U3) 1990 __cdnsp_gadget_wakeup(pdev); 1991 1992 spin_unlock_irqrestore(&pdev->lock, flags); 1993 1994 return ret; 1995 } 1996 1997 /** 1998 * cdnsp_gadget_init - initialize device structure 1999 * @cdns: cdnsp instance 2000 * 2001 * This function initializes the gadget. 2002 */ 2003 int cdnsp_gadget_init(struct cdns *cdns) 2004 { 2005 struct cdns_role_driver *rdrv; 2006 2007 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 2008 if (!rdrv) 2009 return -ENOMEM; 2010 2011 rdrv->start = __cdnsp_gadget_init; 2012 rdrv->stop = cdnsp_gadget_exit; 2013 rdrv->suspend = cdnsp_gadget_suspend; 2014 rdrv->resume = cdnsp_gadget_resume; 2015 rdrv->state = CDNS_ROLE_STATE_INACTIVE; 2016 rdrv->name = "gadget"; 2017 cdns->roles[USB_ROLE_DEVICE] = rdrv; 2018 2019 return 0; 2020 } 2021