1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cadence CDNSP DRD Driver. 4 * 5 * Copyright (C) 2020 Cadence. 6 * 7 * Author: Pawel Laszczak <pawell@cadence.com> 8 * 9 */ 10 11 #include <linux/moduleparam.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/module.h> 14 #include <linux/iopoll.h> 15 #include <linux/delay.h> 16 #include <linux/log2.h> 17 #include <linux/slab.h> 18 #include <linux/pci.h> 19 #include <linux/irq.h> 20 #include <linux/dmi.h> 21 22 #include "core.h" 23 #include "gadget-export.h" 24 #include "drd.h" 25 #include "cdnsp-gadget.h" 26 #include "cdnsp-trace.h" 27 28 unsigned int cdnsp_port_speed(unsigned int port_status) 29 { 30 /*Detect gadget speed based on PORTSC register*/ 31 if (DEV_SUPERSPEEDPLUS(port_status)) 32 return USB_SPEED_SUPER_PLUS; 33 else if (DEV_SUPERSPEED(port_status)) 34 return USB_SPEED_SUPER; 35 else if (DEV_HIGHSPEED(port_status)) 36 return USB_SPEED_HIGH; 37 else if (DEV_FULLSPEED(port_status)) 38 return USB_SPEED_FULL; 39 40 /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/ 41 return USB_SPEED_UNKNOWN; 42 } 43 44 /* 45 * Given a port state, this function returns a value that would result in the 46 * port being in the same state, if the value was written to the port status 47 * control register. 48 * Save Read Only (RO) bits and save read/write bits where 49 * writing a 0 clears the bit and writing a 1 sets the bit (RWS). 50 * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect. 51 */ 52 u32 cdnsp_port_state_to_neutral(u32 state) 53 { 54 /* Save read-only status and port state. */ 55 return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS); 56 } 57 58 /** 59 * Find the offset of the extended capabilities with capability ID id. 60 * @base: PCI MMIO registers base address. 61 * @start: Address at which to start looking, (0 or HCC_PARAMS to start at 62 * beginning of list) 63 * @id: Extended capability ID to search for. 64 * 65 * Returns the offset of the next matching extended capability structure. 66 * Some capabilities can occur several times, 67 * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all. 68 */ 69 int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id) 70 { 71 u32 offset = start; 72 u32 next; 73 u32 val; 74 75 if (!start || start == HCC_PARAMS_OFFSET) { 76 val = readl(base + HCC_PARAMS_OFFSET); 77 if (val == ~0) 78 return 0; 79 80 offset = HCC_EXT_CAPS(val) << 2; 81 if (!offset) 82 return 0; 83 }; 84 85 do { 86 val = readl(base + offset); 87 if (val == ~0) 88 return 0; 89 90 if (EXT_CAPS_ID(val) == id && offset != start) 91 return offset; 92 93 next = EXT_CAPS_NEXT(val); 94 offset += next << 2; 95 } while (next); 96 97 return 0; 98 } 99 100 void cdnsp_set_link_state(struct cdnsp_device *pdev, 101 __le32 __iomem *port_regs, 102 u32 link_state) 103 { 104 int port_num = 0xFF; 105 u32 temp; 106 107 temp = readl(port_regs); 108 temp = cdnsp_port_state_to_neutral(temp); 109 temp |= PORT_WKCONN_E | PORT_WKDISC_E; 110 writel(temp, port_regs); 111 112 temp &= ~PORT_PLS_MASK; 113 temp |= PORT_LINK_STROBE | link_state; 114 115 if (pdev->active_port) 116 port_num = pdev->active_port->port_num; 117 118 trace_cdnsp_handle_port_status(port_num, readl(port_regs)); 119 writel(temp, port_regs); 120 trace_cdnsp_link_state_changed(port_num, readl(port_regs)); 121 } 122 123 static void cdnsp_disable_port(struct cdnsp_device *pdev, 124 __le32 __iomem *port_regs) 125 { 126 u32 temp = cdnsp_port_state_to_neutral(readl(port_regs)); 127 128 writel(temp | PORT_PED, port_regs); 129 } 130 131 static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev, 132 __le32 __iomem *port_regs) 133 { 134 u32 portsc = readl(port_regs); 135 136 writel(cdnsp_port_state_to_neutral(portsc) | 137 (portsc & PORT_CHANGE_BITS), port_regs); 138 } 139 140 static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) 141 { 142 __le32 __iomem *reg; 143 void __iomem *base; 144 u32 offset = 0; 145 146 base = &pdev->cap_regs->hc_capbase; 147 offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); 148 reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; 149 150 bit = readl(reg) | bit; 151 writel(bit, reg); 152 } 153 154 static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) 155 { 156 __le32 __iomem *reg; 157 void __iomem *base; 158 u32 offset = 0; 159 160 base = &pdev->cap_regs->hc_capbase; 161 offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); 162 reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; 163 164 bit = readl(reg) & ~bit; 165 writel(bit, reg); 166 } 167 168 /* 169 * Disable interrupts and begin the controller halting process. 170 */ 171 static void cdnsp_quiesce(struct cdnsp_device *pdev) 172 { 173 u32 halted; 174 u32 mask; 175 u32 cmd; 176 177 mask = ~(u32)(CDNSP_IRQS); 178 179 halted = readl(&pdev->op_regs->status) & STS_HALT; 180 if (!halted) 181 mask &= ~(CMD_R_S | CMD_DEVEN); 182 183 cmd = readl(&pdev->op_regs->command); 184 cmd &= mask; 185 writel(cmd, &pdev->op_regs->command); 186 } 187 188 /* 189 * Force controller into halt state. 190 * 191 * Disable any IRQs and clear the run/stop bit. 192 * Controller will complete any current and actively pipelined transactions, and 193 * should halt within 16 ms of the run/stop bit being cleared. 194 * Read controller Halted bit in the status register to see when the 195 * controller is finished. 196 */ 197 int cdnsp_halt(struct cdnsp_device *pdev) 198 { 199 int ret; 200 u32 val; 201 202 cdnsp_quiesce(pdev); 203 204 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val, 205 val & STS_HALT, 1, 206 CDNSP_MAX_HALT_USEC); 207 if (ret) { 208 dev_err(pdev->dev, "ERROR: Device halt failed\n"); 209 return ret; 210 } 211 212 pdev->cdnsp_state |= CDNSP_STATE_HALTED; 213 214 return 0; 215 } 216 217 /* 218 * device controller died, register read returns 0xffffffff, or command never 219 * ends. 220 */ 221 void cdnsp_died(struct cdnsp_device *pdev) 222 { 223 dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n"); 224 pdev->cdnsp_state |= CDNSP_STATE_DYING; 225 cdnsp_halt(pdev); 226 } 227 228 /* 229 * Set the run bit and wait for the device to be running. 230 */ 231 static int cdnsp_start(struct cdnsp_device *pdev) 232 { 233 u32 temp; 234 int ret; 235 236 temp = readl(&pdev->op_regs->command); 237 temp |= (CMD_R_S | CMD_DEVEN); 238 writel(temp, &pdev->op_regs->command); 239 240 pdev->cdnsp_state = 0; 241 242 /* 243 * Wait for the STS_HALT Status bit to be 0 to indicate the device is 244 * running. 245 */ 246 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, 247 !(temp & STS_HALT), 1, 248 CDNSP_MAX_HALT_USEC); 249 if (ret) { 250 pdev->cdnsp_state = CDNSP_STATE_DYING; 251 dev_err(pdev->dev, "ERROR: Controller run failed\n"); 252 } 253 254 return ret; 255 } 256 257 /* 258 * Reset a halted controller. 259 * 260 * This resets pipelines, timers, counters, state machines, etc. 261 * Transactions will be terminated immediately, and operational registers 262 * will be set to their defaults. 263 */ 264 int cdnsp_reset(struct cdnsp_device *pdev) 265 { 266 u32 command; 267 u32 temp; 268 int ret; 269 270 temp = readl(&pdev->op_regs->status); 271 272 if (temp == ~(u32)0) { 273 dev_err(pdev->dev, "Device not accessible, reset failed.\n"); 274 return -ENODEV; 275 } 276 277 if ((temp & STS_HALT) == 0) { 278 dev_err(pdev->dev, "Controller not halted, aborting reset.\n"); 279 return -EINVAL; 280 } 281 282 command = readl(&pdev->op_regs->command); 283 command |= CMD_RESET; 284 writel(command, &pdev->op_regs->command); 285 286 ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp, 287 !(temp & CMD_RESET), 1, 288 10 * 1000); 289 if (ret) { 290 dev_err(pdev->dev, "ERROR: Controller reset failed\n"); 291 return ret; 292 } 293 294 /* 295 * CDNSP cannot write any doorbells or operational registers other 296 * than status until the "Controller Not Ready" flag is cleared. 297 */ 298 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, 299 !(temp & STS_CNR), 1, 300 10 * 1000); 301 302 if (ret) { 303 dev_err(pdev->dev, "ERROR: Controller not ready to work\n"); 304 return ret; 305 } 306 307 dev_dbg(pdev->dev, "Controller ready to work"); 308 309 return ret; 310 } 311 312 /* 313 * cdnsp_get_endpoint_index - Find the index for an endpoint given its 314 * descriptor.Use the return value to right shift 1 for the bitmask. 315 * 316 * Index = (epnum * 2) + direction - 1, 317 * where direction = 0 for OUT, 1 for IN. 318 * For control endpoints, the IN index is used (OUT index is unused), so 319 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 320 */ 321 static unsigned int 322 cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc) 323 { 324 unsigned int index = (unsigned int)usb_endpoint_num(desc); 325 326 if (usb_endpoint_xfer_control(desc)) 327 return index * 2; 328 329 return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 330 } 331 332 /* 333 * Find the flag for this endpoint (for use in the control context). Use the 334 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 335 * bit 1, etc. 336 */ 337 static unsigned int 338 cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc) 339 { 340 return 1 << (cdnsp_get_endpoint_index(desc) + 1); 341 } 342 343 int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq) 344 { 345 struct cdnsp_device *pdev = pep->pdev; 346 struct usb_request *request; 347 int ret; 348 349 if (preq->epnum == 0 && !list_empty(&pep->pending_list)) { 350 trace_cdnsp_request_enqueue_busy(preq); 351 return -EBUSY; 352 } 353 354 request = &preq->request; 355 request->actual = 0; 356 request->status = -EINPROGRESS; 357 preq->direction = pep->direction; 358 preq->epnum = pep->number; 359 preq->td.drbl = 0; 360 361 ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction); 362 if (ret) { 363 trace_cdnsp_request_enqueue_error(preq); 364 return ret; 365 } 366 367 list_add_tail(&preq->list, &pep->pending_list); 368 369 trace_cdnsp_request_enqueue(preq); 370 371 switch (usb_endpoint_type(pep->endpoint.desc)) { 372 case USB_ENDPOINT_XFER_CONTROL: 373 ret = cdnsp_queue_ctrl_tx(pdev, preq); 374 break; 375 case USB_ENDPOINT_XFER_BULK: 376 case USB_ENDPOINT_XFER_INT: 377 ret = cdnsp_queue_bulk_tx(pdev, preq); 378 break; 379 case USB_ENDPOINT_XFER_ISOC: 380 ret = cdnsp_queue_isoc_tx_prepare(pdev, preq); 381 } 382 383 if (ret) 384 goto unmap; 385 386 return 0; 387 388 unmap: 389 usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, 390 pep->direction); 391 list_del(&preq->list); 392 trace_cdnsp_request_enqueue_error(preq); 393 394 return ret; 395 } 396 397 /* 398 * Remove the request's TD from the endpoint ring. This may cause the 399 * controller to stop USB transfers, potentially stopping in the middle of a 400 * TRB buffer. The controller should pick up where it left off in the TD, 401 * unless a Set Transfer Ring Dequeue Pointer is issued. 402 * 403 * The TRBs that make up the buffers for the canceled request will be "removed" 404 * from the ring. Since the ring is a contiguous structure, they can't be 405 * physically removed. Instead, there are two options: 406 * 407 * 1) If the controller is in the middle of processing the request to be 408 * canceled, we simply move the ring's dequeue pointer past those TRBs 409 * using the Set Transfer Ring Dequeue Pointer command. This will be 410 * the common case, when drivers timeout on the last submitted request 411 * and attempt to cancel. 412 * 413 * 2) If the controller is in the middle of a different TD, we turn the TRBs 414 * into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained. 415 * The controller will need to invalidate the any TRBs it has cached after 416 * the stop endpoint command. 417 * 418 * 3) The TD may have completed by the time the Stop Endpoint Command 419 * completes, so software needs to handle that case too. 420 * 421 */ 422 int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq) 423 { 424 struct cdnsp_device *pdev = pep->pdev; 425 int ret; 426 427 trace_cdnsp_request_dequeue(preq); 428 429 if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) { 430 ret = cdnsp_cmd_stop_ep(pdev, pep); 431 if (ret) 432 return ret; 433 } 434 435 return cdnsp_remove_request(pdev, preq, pep); 436 } 437 438 static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev) 439 { 440 struct cdnsp_input_control_ctx *ctrl_ctx; 441 struct cdnsp_slot_ctx *slot_ctx; 442 struct cdnsp_ep_ctx *ep_ctx; 443 int i; 444 445 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 446 447 /* 448 * When a device's add flag and drop flag are zero, any subsequent 449 * configure endpoint command will leave that endpoint's state 450 * untouched. Make sure we don't leave any old state in the input 451 * endpoint contexts. 452 */ 453 ctrl_ctx->drop_flags = 0; 454 ctrl_ctx->add_flags = 0; 455 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 456 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 457 458 /* Endpoint 0 is always valid */ 459 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 460 for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) { 461 ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i); 462 ep_ctx->ep_info = 0; 463 ep_ctx->ep_info2 = 0; 464 ep_ctx->deq = 0; 465 ep_ctx->tx_info = 0; 466 } 467 } 468 469 /* Issue a configure endpoint command and wait for it to finish. */ 470 static int cdnsp_configure_endpoint(struct cdnsp_device *pdev) 471 { 472 int ret; 473 474 cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma); 475 cdnsp_ring_cmd_db(pdev); 476 ret = cdnsp_wait_for_cmd_compl(pdev); 477 if (ret) { 478 dev_err(pdev->dev, 479 "ERR: unexpected command completion code 0x%x.\n", ret); 480 return -EINVAL; 481 } 482 483 return ret; 484 } 485 486 static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev, 487 struct cdnsp_ep *pep) 488 { 489 struct cdnsp_segment *segment; 490 union cdnsp_trb *event; 491 u32 cycle_state; 492 u32 data; 493 494 event = pdev->event_ring->dequeue; 495 segment = pdev->event_ring->deq_seg; 496 cycle_state = pdev->event_ring->cycle_state; 497 498 while (1) { 499 data = le32_to_cpu(event->trans_event.flags); 500 501 /* Check the owner of the TRB. */ 502 if ((data & TRB_CYCLE) != cycle_state) 503 break; 504 505 if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER && 506 TRB_TO_EP_ID(data) == (pep->idx + 1)) { 507 data |= TRB_EVENT_INVALIDATE; 508 event->trans_event.flags = cpu_to_le32(data); 509 } 510 511 if (cdnsp_last_trb_on_seg(segment, event)) { 512 cycle_state ^= 1; 513 segment = pdev->event_ring->deq_seg->next; 514 event = segment->trbs; 515 } else { 516 event++; 517 } 518 } 519 } 520 521 int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev) 522 { 523 struct cdnsp_segment *event_deq_seg; 524 union cdnsp_trb *cmd_trb; 525 dma_addr_t cmd_deq_dma; 526 union cdnsp_trb *event; 527 u32 cycle_state; 528 int ret, val; 529 u64 cmd_dma; 530 u32 flags; 531 532 cmd_trb = pdev->cmd.command_trb; 533 pdev->cmd.status = 0; 534 535 trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic); 536 537 ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val, 538 !CMD_RING_BUSY(val), 1, 539 CDNSP_CMD_TIMEOUT); 540 if (ret) { 541 dev_err(pdev->dev, "ERR: Timeout while waiting for command\n"); 542 trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic); 543 pdev->cdnsp_state = CDNSP_STATE_DYING; 544 return -ETIMEDOUT; 545 } 546 547 event = pdev->event_ring->dequeue; 548 event_deq_seg = pdev->event_ring->deq_seg; 549 cycle_state = pdev->event_ring->cycle_state; 550 551 cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb); 552 if (!cmd_deq_dma) 553 return -EINVAL; 554 555 while (1) { 556 flags = le32_to_cpu(event->event_cmd.flags); 557 558 /* Check the owner of the TRB. */ 559 if ((flags & TRB_CYCLE) != cycle_state) 560 return -EINVAL; 561 562 cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb); 563 564 /* 565 * Check whether the completion event is for last queued 566 * command. 567 */ 568 if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION || 569 cmd_dma != (u64)cmd_deq_dma) { 570 if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { 571 event++; 572 continue; 573 } 574 575 if (cdnsp_last_trb_on_ring(pdev->event_ring, 576 event_deq_seg, event)) 577 cycle_state ^= 1; 578 579 event_deq_seg = event_deq_seg->next; 580 event = event_deq_seg->trbs; 581 continue; 582 } 583 584 trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic); 585 586 pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status)); 587 if (pdev->cmd.status == COMP_SUCCESS) 588 return 0; 589 590 return -pdev->cmd.status; 591 } 592 } 593 594 int cdnsp_halt_endpoint(struct cdnsp_device *pdev, 595 struct cdnsp_ep *pep, 596 int value) 597 { 598 int ret; 599 600 trace_cdnsp_ep_halt(value ? "Set" : "Clear"); 601 602 if (value) { 603 ret = cdnsp_cmd_stop_ep(pdev, pep); 604 if (ret) 605 return ret; 606 607 if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) { 608 cdnsp_queue_halt_endpoint(pdev, pep->idx); 609 cdnsp_ring_cmd_db(pdev); 610 ret = cdnsp_wait_for_cmd_compl(pdev); 611 } 612 613 pep->ep_state |= EP_HALTED; 614 } else { 615 /* 616 * In device mode driver can call reset endpoint command 617 * from any endpoint state. 618 */ 619 cdnsp_queue_reset_ep(pdev, pep->idx); 620 cdnsp_ring_cmd_db(pdev); 621 ret = cdnsp_wait_for_cmd_compl(pdev); 622 trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx); 623 624 if (ret) 625 return ret; 626 627 pep->ep_state &= ~EP_HALTED; 628 629 if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE)) 630 cdnsp_ring_doorbell_for_active_rings(pdev, pep); 631 632 pep->ep_state &= ~EP_WEDGE; 633 } 634 635 return 0; 636 } 637 638 static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev, 639 struct cdnsp_ep *pep) 640 { 641 struct cdnsp_input_control_ctx *ctrl_ctx; 642 struct cdnsp_slot_ctx *slot_ctx; 643 int ret = 0; 644 u32 ep_sts; 645 int i; 646 647 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 648 649 /* Don't issue the command if there's no endpoints to update. */ 650 if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0) 651 return 0; 652 653 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 654 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 655 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 656 657 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ 658 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 659 for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) { 660 __le32 le32 = cpu_to_le32(BIT(i)); 661 662 if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) || 663 (ctrl_ctx->add_flags & le32) || i == 1) { 664 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 665 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); 666 break; 667 } 668 } 669 670 ep_sts = GET_EP_CTX_STATE(pep->out_ctx); 671 672 if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) && 673 ep_sts == EP_STATE_DISABLED) || 674 (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags)) 675 ret = cdnsp_configure_endpoint(pdev); 676 677 trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx)); 678 trace_cdnsp_handle_cmd_config_ep(pep->out_ctx); 679 680 cdnsp_zero_in_ctx(pdev); 681 682 return ret; 683 } 684 685 /* 686 * This submits a Reset Device Command, which will set the device state to 0, 687 * set the device address to 0, and disable all the endpoints except the default 688 * control endpoint. The USB core should come back and call 689 * cdnsp_setup_device(), and then re-set up the configuration. 690 */ 691 int cdnsp_reset_device(struct cdnsp_device *pdev) 692 { 693 struct cdnsp_slot_ctx *slot_ctx; 694 int slot_state; 695 int ret, i; 696 697 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 698 slot_ctx->dev_info = 0; 699 pdev->device_address = 0; 700 701 /* If device is not setup, there is no point in resetting it. */ 702 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 703 slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 704 trace_cdnsp_reset_device(slot_ctx); 705 706 if (slot_state <= SLOT_STATE_DEFAULT && 707 pdev->eps[0].ep_state & EP_HALTED) { 708 cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0); 709 } 710 711 /* 712 * During Reset Device command controller shall transition the 713 * endpoint ep0 to the Running State. 714 */ 715 pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED); 716 pdev->eps[0].ep_state |= EP_ENABLED; 717 718 if (slot_state <= SLOT_STATE_DEFAULT) 719 return 0; 720 721 cdnsp_queue_reset_device(pdev); 722 cdnsp_ring_cmd_db(pdev); 723 ret = cdnsp_wait_for_cmd_compl(pdev); 724 725 /* 726 * After Reset Device command all not default endpoints 727 * are in Disabled state. 728 */ 729 for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) 730 pdev->eps[i].ep_state |= EP_STOPPED; 731 732 trace_cdnsp_handle_cmd_reset_dev(slot_ctx); 733 734 if (ret) 735 dev_err(pdev->dev, "Reset device failed with error code %d", 736 ret); 737 738 return ret; 739 } 740 741 /* 742 * Sets the MaxPStreams field and the Linear Stream Array field. 743 * Sets the dequeue pointer to the stream context array. 744 */ 745 static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev, 746 struct cdnsp_ep_ctx *ep_ctx, 747 struct cdnsp_stream_info *stream_info) 748 { 749 u32 max_primary_streams; 750 751 /* MaxPStreams is the number of stream context array entries, not the 752 * number we're actually using. Must be in 2^(MaxPstreams + 1) format. 753 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. 754 */ 755 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 756 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); 757 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) 758 | EP_HAS_LSA); 759 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); 760 } 761 762 /* 763 * The drivers use this function to prepare a bulk endpoints to use streams. 764 * 765 * Don't allow the call to succeed if endpoint only supports one stream 766 * (which means it doesn't support streams at all). 767 */ 768 int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep) 769 { 770 unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc); 771 unsigned int num_stream_ctxs; 772 int ret; 773 774 if (num_streams == 0) 775 return 0; 776 777 if (num_streams > STREAM_NUM_STREAMS) 778 return -EINVAL; 779 780 /* 781 * Add two to the number of streams requested to account for 782 * stream 0 that is reserved for controller usage and one additional 783 * for TASK SET FULL response. 784 */ 785 num_streams += 2; 786 787 /* The stream context array size must be a power of two */ 788 num_stream_ctxs = roundup_pow_of_two(num_streams); 789 790 trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams); 791 792 ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams); 793 if (ret) 794 return ret; 795 796 cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info); 797 798 pep->ep_state |= EP_HAS_STREAMS; 799 pep->stream_info.td_count = 0; 800 pep->stream_info.first_prime_det = 0; 801 802 /* Subtract 1 for stream 0, which drivers can't use. */ 803 return num_streams - 1; 804 } 805 806 int cdnsp_disable_slot(struct cdnsp_device *pdev) 807 { 808 int ret; 809 810 cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT); 811 cdnsp_ring_cmd_db(pdev); 812 ret = cdnsp_wait_for_cmd_compl(pdev); 813 814 pdev->slot_id = 0; 815 pdev->active_port = NULL; 816 817 trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); 818 819 memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE); 820 memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE); 821 822 return ret; 823 } 824 825 int cdnsp_enable_slot(struct cdnsp_device *pdev) 826 { 827 struct cdnsp_slot_ctx *slot_ctx; 828 int slot_state; 829 int ret; 830 831 /* If device is not setup, there is no point in resetting it */ 832 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 833 slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 834 835 if (slot_state != SLOT_STATE_DISABLED) 836 return 0; 837 838 cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT); 839 cdnsp_ring_cmd_db(pdev); 840 ret = cdnsp_wait_for_cmd_compl(pdev); 841 if (ret) 842 goto show_trace; 843 844 pdev->slot_id = 1; 845 846 show_trace: 847 trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); 848 849 return ret; 850 } 851 852 /* 853 * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY 854 * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS. 855 */ 856 int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup) 857 { 858 struct cdnsp_input_control_ctx *ctrl_ctx; 859 struct cdnsp_slot_ctx *slot_ctx; 860 int dev_state = 0; 861 int ret; 862 863 if (!pdev->slot_id) { 864 trace_cdnsp_slot_id("incorrect"); 865 return -EINVAL; 866 } 867 868 if (!pdev->active_port->port_num) 869 return -EINVAL; 870 871 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 872 dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 873 874 if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) { 875 trace_cdnsp_slot_already_in_default(slot_ctx); 876 return 0; 877 } 878 879 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 880 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 881 882 if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) { 883 ret = cdnsp_setup_addressable_priv_dev(pdev); 884 if (ret) 885 return ret; 886 } 887 888 cdnsp_copy_ep0_dequeue_into_input_ctx(pdev); 889 890 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 891 ctrl_ctx->drop_flags = 0; 892 893 trace_cdnsp_setup_device_slot(slot_ctx); 894 895 cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup); 896 cdnsp_ring_cmd_db(pdev); 897 ret = cdnsp_wait_for_cmd_compl(pdev); 898 899 trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx)); 900 901 /* Zero the input context control for later use. */ 902 ctrl_ctx->add_flags = 0; 903 ctrl_ctx->drop_flags = 0; 904 905 return ret; 906 } 907 908 void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev, 909 struct usb_request *req, 910 int enable) 911 { 912 if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable) 913 return; 914 915 trace_cdnsp_lpm(enable); 916 917 if (enable) 918 writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE, 919 &pdev->active_port->regs->portpmsc); 920 else 921 writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc); 922 } 923 924 static int cdnsp_get_frame(struct cdnsp_device *pdev) 925 { 926 return readl(&pdev->run_regs->microframe_index) >> 3; 927 } 928 929 static int cdnsp_gadget_ep_enable(struct usb_ep *ep, 930 const struct usb_endpoint_descriptor *desc) 931 { 932 struct cdnsp_input_control_ctx *ctrl_ctx; 933 struct cdnsp_device *pdev; 934 struct cdnsp_ep *pep; 935 unsigned long flags; 936 u32 added_ctxs; 937 int ret; 938 939 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT || 940 !desc->wMaxPacketSize) 941 return -EINVAL; 942 943 pep = to_cdnsp_ep(ep); 944 pdev = pep->pdev; 945 946 if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED, 947 "%s is already enabled\n", pep->name)) 948 return 0; 949 950 spin_lock_irqsave(&pdev->lock, flags); 951 952 added_ctxs = cdnsp_get_endpoint_flag(desc); 953 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 954 dev_err(pdev->dev, "ERROR: Bad endpoint number\n"); 955 ret = -EINVAL; 956 goto unlock; 957 } 958 959 pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 960 961 if (pdev->gadget.speed == USB_SPEED_FULL) { 962 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) 963 pep->interval = desc->bInterval << 3; 964 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) 965 pep->interval = BIT(desc->bInterval - 1) << 3; 966 } 967 968 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) { 969 if (pep->interval > BIT(12)) { 970 dev_err(pdev->dev, "bInterval %d not supported\n", 971 desc->bInterval); 972 ret = -EINVAL; 973 goto unlock; 974 } 975 cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); 976 } 977 978 ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC); 979 if (ret) 980 goto unlock; 981 982 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 983 ctrl_ctx->add_flags = cpu_to_le32(added_ctxs); 984 ctrl_ctx->drop_flags = 0; 985 986 ret = cdnsp_update_eps_configuration(pdev, pep); 987 if (ret) { 988 cdnsp_free_endpoint_rings(pdev, pep); 989 goto unlock; 990 } 991 992 pep->ep_state |= EP_ENABLED; 993 pep->ep_state &= ~EP_STOPPED; 994 995 unlock: 996 trace_cdnsp_ep_enable_end(pep, 0); 997 spin_unlock_irqrestore(&pdev->lock, flags); 998 999 return ret; 1000 } 1001 1002 static int cdnsp_gadget_ep_disable(struct usb_ep *ep) 1003 { 1004 struct cdnsp_input_control_ctx *ctrl_ctx; 1005 struct cdnsp_request *preq; 1006 struct cdnsp_device *pdev; 1007 struct cdnsp_ep *pep; 1008 unsigned long flags; 1009 u32 drop_flag; 1010 int ret = 0; 1011 1012 if (!ep) 1013 return -EINVAL; 1014 1015 pep = to_cdnsp_ep(ep); 1016 pdev = pep->pdev; 1017 1018 spin_lock_irqsave(&pdev->lock, flags); 1019 1020 if (!(pep->ep_state & EP_ENABLED)) { 1021 dev_err(pdev->dev, "%s is already disabled\n", pep->name); 1022 ret = -EINVAL; 1023 goto finish; 1024 } 1025 1026 cdnsp_cmd_stop_ep(pdev, pep); 1027 pep->ep_state |= EP_DIS_IN_RROGRESS; 1028 cdnsp_cmd_flush_ep(pdev, pep); 1029 1030 /* Remove all queued USB requests. */ 1031 while (!list_empty(&pep->pending_list)) { 1032 preq = next_request(&pep->pending_list); 1033 cdnsp_ep_dequeue(pep, preq); 1034 } 1035 1036 cdnsp_invalidate_ep_events(pdev, pep); 1037 1038 pep->ep_state &= ~EP_DIS_IN_RROGRESS; 1039 drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc); 1040 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 1041 ctrl_ctx->drop_flags = cpu_to_le32(drop_flag); 1042 ctrl_ctx->add_flags = 0; 1043 1044 cdnsp_endpoint_zero(pdev, pep); 1045 1046 ret = cdnsp_update_eps_configuration(pdev, pep); 1047 cdnsp_free_endpoint_rings(pdev, pep); 1048 1049 pep->ep_state &= ~EP_ENABLED; 1050 pep->ep_state |= EP_STOPPED; 1051 1052 finish: 1053 trace_cdnsp_ep_disable_end(pep, 0); 1054 spin_unlock_irqrestore(&pdev->lock, flags); 1055 1056 return ret; 1057 } 1058 1059 static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep, 1060 gfp_t gfp_flags) 1061 { 1062 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1063 struct cdnsp_request *preq; 1064 1065 preq = kzalloc(sizeof(*preq), gfp_flags); 1066 if (!preq) 1067 return NULL; 1068 1069 preq->epnum = pep->number; 1070 preq->pep = pep; 1071 1072 trace_cdnsp_alloc_request(preq); 1073 1074 return &preq->request; 1075 } 1076 1077 static void cdnsp_gadget_ep_free_request(struct usb_ep *ep, 1078 struct usb_request *request) 1079 { 1080 struct cdnsp_request *preq = to_cdnsp_request(request); 1081 1082 trace_cdnsp_free_request(preq); 1083 kfree(preq); 1084 } 1085 1086 static int cdnsp_gadget_ep_queue(struct usb_ep *ep, 1087 struct usb_request *request, 1088 gfp_t gfp_flags) 1089 { 1090 struct cdnsp_request *preq; 1091 struct cdnsp_device *pdev; 1092 struct cdnsp_ep *pep; 1093 unsigned long flags; 1094 int ret; 1095 1096 if (!request || !ep) 1097 return -EINVAL; 1098 1099 pep = to_cdnsp_ep(ep); 1100 pdev = pep->pdev; 1101 1102 if (!(pep->ep_state & EP_ENABLED)) { 1103 dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n", 1104 pep->name); 1105 return -EINVAL; 1106 } 1107 1108 preq = to_cdnsp_request(request); 1109 spin_lock_irqsave(&pdev->lock, flags); 1110 ret = cdnsp_ep_enqueue(pep, preq); 1111 spin_unlock_irqrestore(&pdev->lock, flags); 1112 1113 return ret; 1114 } 1115 1116 static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep, 1117 struct usb_request *request) 1118 { 1119 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1120 struct cdnsp_device *pdev = pep->pdev; 1121 unsigned long flags; 1122 int ret; 1123 1124 if (!pep->endpoint.desc) { 1125 dev_err(pdev->dev, 1126 "%s: can't dequeue to disabled endpoint\n", 1127 pep->name); 1128 return -ESHUTDOWN; 1129 } 1130 1131 /* Requests has been dequeued during disabling endpoint. */ 1132 if (!(pep->ep_state & EP_ENABLED)) 1133 return 0; 1134 1135 spin_lock_irqsave(&pdev->lock, flags); 1136 ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request)); 1137 spin_unlock_irqrestore(&pdev->lock, flags); 1138 1139 return ret; 1140 } 1141 1142 static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value) 1143 { 1144 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1145 struct cdnsp_device *pdev = pep->pdev; 1146 struct cdnsp_request *preq; 1147 unsigned long flags = 0; 1148 int ret; 1149 1150 spin_lock_irqsave(&pdev->lock, flags); 1151 1152 preq = next_request(&pep->pending_list); 1153 if (value) { 1154 if (preq) { 1155 trace_cdnsp_ep_busy_try_halt_again(pep, 0); 1156 ret = -EAGAIN; 1157 goto done; 1158 } 1159 } 1160 1161 ret = cdnsp_halt_endpoint(pdev, pep, value); 1162 1163 done: 1164 spin_unlock_irqrestore(&pdev->lock, flags); 1165 return ret; 1166 } 1167 1168 static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep) 1169 { 1170 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1171 struct cdnsp_device *pdev = pep->pdev; 1172 unsigned long flags = 0; 1173 int ret; 1174 1175 spin_lock_irqsave(&pdev->lock, flags); 1176 pep->ep_state |= EP_WEDGE; 1177 ret = cdnsp_halt_endpoint(pdev, pep, 1); 1178 spin_unlock_irqrestore(&pdev->lock, flags); 1179 1180 return ret; 1181 } 1182 1183 static const struct usb_ep_ops cdnsp_gadget_ep0_ops = { 1184 .enable = cdnsp_gadget_ep_enable, 1185 .disable = cdnsp_gadget_ep_disable, 1186 .alloc_request = cdnsp_gadget_ep_alloc_request, 1187 .free_request = cdnsp_gadget_ep_free_request, 1188 .queue = cdnsp_gadget_ep_queue, 1189 .dequeue = cdnsp_gadget_ep_dequeue, 1190 .set_halt = cdnsp_gadget_ep_set_halt, 1191 .set_wedge = cdnsp_gadget_ep_set_wedge, 1192 }; 1193 1194 static const struct usb_ep_ops cdnsp_gadget_ep_ops = { 1195 .enable = cdnsp_gadget_ep_enable, 1196 .disable = cdnsp_gadget_ep_disable, 1197 .alloc_request = cdnsp_gadget_ep_alloc_request, 1198 .free_request = cdnsp_gadget_ep_free_request, 1199 .queue = cdnsp_gadget_ep_queue, 1200 .dequeue = cdnsp_gadget_ep_dequeue, 1201 .set_halt = cdnsp_gadget_ep_set_halt, 1202 .set_wedge = cdnsp_gadget_ep_set_wedge, 1203 }; 1204 1205 void cdnsp_gadget_giveback(struct cdnsp_ep *pep, 1206 struct cdnsp_request *preq, 1207 int status) 1208 { 1209 struct cdnsp_device *pdev = pep->pdev; 1210 1211 list_del(&preq->list); 1212 1213 if (preq->request.status == -EINPROGRESS) 1214 preq->request.status = status; 1215 1216 usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, 1217 preq->direction); 1218 1219 trace_cdnsp_request_giveback(preq); 1220 1221 if (preq != &pdev->ep0_preq) { 1222 spin_unlock(&pdev->lock); 1223 usb_gadget_giveback_request(&pep->endpoint, &preq->request); 1224 spin_lock(&pdev->lock); 1225 } 1226 } 1227 1228 static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = { 1229 .bLength = USB_DT_ENDPOINT_SIZE, 1230 .bDescriptorType = USB_DT_ENDPOINT, 1231 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1232 }; 1233 1234 static int cdnsp_run(struct cdnsp_device *pdev, 1235 enum usb_device_speed speed) 1236 { 1237 u32 fs_speed = 0; 1238 u64 temp_64; 1239 u32 temp; 1240 int ret; 1241 1242 temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue); 1243 temp_64 &= ~ERST_PTR_MASK; 1244 temp = readl(&pdev->ir_set->irq_control); 1245 temp &= ~IMOD_INTERVAL_MASK; 1246 temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK); 1247 writel(temp, &pdev->ir_set->irq_control); 1248 1249 temp = readl(&pdev->port3x_regs->mode_addr); 1250 1251 switch (speed) { 1252 case USB_SPEED_SUPER_PLUS: 1253 temp |= CFG_3XPORT_SSP_SUPPORT; 1254 break; 1255 case USB_SPEED_SUPER: 1256 temp &= ~CFG_3XPORT_SSP_SUPPORT; 1257 break; 1258 case USB_SPEED_HIGH: 1259 break; 1260 case USB_SPEED_FULL: 1261 fs_speed = PORT_REG6_FORCE_FS; 1262 break; 1263 default: 1264 dev_err(pdev->dev, "invalid maximum_speed parameter %d\n", 1265 speed); 1266 fallthrough; 1267 case USB_SPEED_UNKNOWN: 1268 /* Default to superspeed. */ 1269 speed = USB_SPEED_SUPER; 1270 break; 1271 } 1272 1273 if (speed >= USB_SPEED_SUPER) { 1274 writel(temp, &pdev->port3x_regs->mode_addr); 1275 cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc, 1276 XDEV_RXDETECT); 1277 } else { 1278 cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); 1279 } 1280 1281 cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc, 1282 XDEV_RXDETECT); 1283 1284 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1285 1286 writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6); 1287 1288 ret = cdnsp_start(pdev); 1289 if (ret) { 1290 ret = -ENODEV; 1291 goto err; 1292 } 1293 1294 temp = readl(&pdev->op_regs->command); 1295 temp |= (CMD_INTE); 1296 writel(temp, &pdev->op_regs->command); 1297 1298 temp = readl(&pdev->ir_set->irq_pending); 1299 writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending); 1300 1301 trace_cdnsp_init("Controller ready to work"); 1302 return 0; 1303 err: 1304 cdnsp_halt(pdev); 1305 return ret; 1306 } 1307 1308 static int cdnsp_gadget_udc_start(struct usb_gadget *g, 1309 struct usb_gadget_driver *driver) 1310 { 1311 enum usb_device_speed max_speed = driver->max_speed; 1312 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1313 unsigned long flags; 1314 int ret; 1315 1316 spin_lock_irqsave(&pdev->lock, flags); 1317 pdev->gadget_driver = driver; 1318 1319 /* limit speed if necessary */ 1320 max_speed = min(driver->max_speed, g->max_speed); 1321 ret = cdnsp_run(pdev, max_speed); 1322 1323 spin_unlock_irqrestore(&pdev->lock, flags); 1324 1325 return ret; 1326 } 1327 1328 /* 1329 * Update Event Ring Dequeue Pointer: 1330 * - When all events have finished 1331 * - To avoid "Event Ring Full Error" condition 1332 */ 1333 void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev, 1334 union cdnsp_trb *event_ring_deq, 1335 u8 clear_ehb) 1336 { 1337 u64 temp_64; 1338 dma_addr_t deq; 1339 1340 temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue); 1341 1342 /* If necessary, update the HW's version of the event ring deq ptr. */ 1343 if (event_ring_deq != pdev->event_ring->dequeue) { 1344 deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg, 1345 pdev->event_ring->dequeue); 1346 temp_64 &= ERST_PTR_MASK; 1347 temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK); 1348 } 1349 1350 /* Clear the event handler busy flag (RW1C). */ 1351 if (clear_ehb) 1352 temp_64 |= ERST_EHB; 1353 else 1354 temp_64 &= ~ERST_EHB; 1355 1356 cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue); 1357 } 1358 1359 static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev) 1360 { 1361 struct cdnsp_segment *seg; 1362 u64 val_64; 1363 int i; 1364 1365 cdnsp_initialize_ring_info(pdev->cmd_ring); 1366 1367 seg = pdev->cmd_ring->first_seg; 1368 for (i = 0; i < pdev->cmd_ring->num_segs; i++) { 1369 memset(seg->trbs, 0, 1370 sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1)); 1371 seg = seg->next; 1372 } 1373 1374 /* Set the address in the Command Ring Control register. */ 1375 val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring); 1376 val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) | 1377 (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) | 1378 pdev->cmd_ring->cycle_state; 1379 cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring); 1380 } 1381 1382 static void cdnsp_consume_all_events(struct cdnsp_device *pdev) 1383 { 1384 struct cdnsp_segment *event_deq_seg; 1385 union cdnsp_trb *event_ring_deq; 1386 union cdnsp_trb *event; 1387 u32 cycle_bit; 1388 1389 event_ring_deq = pdev->event_ring->dequeue; 1390 event_deq_seg = pdev->event_ring->deq_seg; 1391 event = pdev->event_ring->dequeue; 1392 1393 /* Update ring dequeue pointer. */ 1394 while (1) { 1395 cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE); 1396 1397 /* Does the controller or driver own the TRB? */ 1398 if (cycle_bit != pdev->event_ring->cycle_state) 1399 break; 1400 1401 cdnsp_inc_deq(pdev, pdev->event_ring); 1402 1403 if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { 1404 event++; 1405 continue; 1406 } 1407 1408 if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg, 1409 event)) 1410 cycle_bit ^= 1; 1411 1412 event_deq_seg = event_deq_seg->next; 1413 event = event_deq_seg->trbs; 1414 } 1415 1416 cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1); 1417 } 1418 1419 static void cdnsp_stop(struct cdnsp_device *pdev) 1420 { 1421 u32 temp; 1422 1423 cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]); 1424 1425 /* Remove internally queued request for ep0. */ 1426 if (!list_empty(&pdev->eps[0].pending_list)) { 1427 struct cdnsp_request *req; 1428 1429 req = next_request(&pdev->eps[0].pending_list); 1430 if (req == &pdev->ep0_preq) 1431 cdnsp_ep_dequeue(&pdev->eps[0], req); 1432 } 1433 1434 cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc); 1435 cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); 1436 cdnsp_disable_slot(pdev); 1437 cdnsp_halt(pdev); 1438 1439 temp = readl(&pdev->op_regs->status); 1440 writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status); 1441 temp = readl(&pdev->ir_set->irq_pending); 1442 writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending); 1443 1444 cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc); 1445 cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc); 1446 1447 /* Clear interrupt line */ 1448 temp = readl(&pdev->ir_set->irq_pending); 1449 temp |= IMAN_IP; 1450 writel(temp, &pdev->ir_set->irq_pending); 1451 1452 cdnsp_consume_all_events(pdev); 1453 cdnsp_clear_cmd_ring(pdev); 1454 1455 trace_cdnsp_exit("Controller stopped."); 1456 } 1457 1458 /* 1459 * Stop controller. 1460 * This function is called by the gadget core when the driver is removed. 1461 * Disable slot, disable IRQs, and quiesce the controller. 1462 */ 1463 static int cdnsp_gadget_udc_stop(struct usb_gadget *g) 1464 { 1465 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1466 unsigned long flags; 1467 1468 spin_lock_irqsave(&pdev->lock, flags); 1469 cdnsp_stop(pdev); 1470 pdev->gadget_driver = NULL; 1471 spin_unlock_irqrestore(&pdev->lock, flags); 1472 1473 return 0; 1474 } 1475 1476 static int cdnsp_gadget_get_frame(struct usb_gadget *g) 1477 { 1478 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1479 1480 return cdnsp_get_frame(pdev); 1481 } 1482 1483 static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev) 1484 { 1485 struct cdnsp_port_regs __iomem *port_regs; 1486 u32 portpm, portsc; 1487 1488 port_regs = pdev->active_port->regs; 1489 portsc = readl(&port_regs->portsc) & PORT_PLS_MASK; 1490 1491 /* Remote wakeup feature is not enabled by host. */ 1492 if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) { 1493 portpm = readl(&port_regs->portpmsc); 1494 1495 if (!(portpm & PORT_RWE)) 1496 return; 1497 } 1498 1499 if (portsc == XDEV_U3 && !pdev->may_wakeup) 1500 return; 1501 1502 cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0); 1503 1504 pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING; 1505 } 1506 1507 static int cdnsp_gadget_wakeup(struct usb_gadget *g) 1508 { 1509 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1510 unsigned long flags; 1511 1512 spin_lock_irqsave(&pdev->lock, flags); 1513 __cdnsp_gadget_wakeup(pdev); 1514 spin_unlock_irqrestore(&pdev->lock, flags); 1515 1516 return 0; 1517 } 1518 1519 static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g, 1520 int is_selfpowered) 1521 { 1522 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1523 unsigned long flags; 1524 1525 spin_lock_irqsave(&pdev->lock, flags); 1526 g->is_selfpowered = !!is_selfpowered; 1527 spin_unlock_irqrestore(&pdev->lock, flags); 1528 1529 return 0; 1530 } 1531 1532 static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on) 1533 { 1534 struct cdnsp_device *pdev = gadget_to_cdnsp(gadget); 1535 struct cdns *cdns = dev_get_drvdata(pdev->dev); 1536 1537 trace_cdnsp_pullup(is_on); 1538 1539 if (!is_on) { 1540 cdnsp_reset_device(pdev); 1541 cdns_clear_vbus(cdns); 1542 } else { 1543 cdns_set_vbus(cdns); 1544 } 1545 return 0; 1546 } 1547 1548 static const struct usb_gadget_ops cdnsp_gadget_ops = { 1549 .get_frame = cdnsp_gadget_get_frame, 1550 .wakeup = cdnsp_gadget_wakeup, 1551 .set_selfpowered = cdnsp_gadget_set_selfpowered, 1552 .pullup = cdnsp_gadget_pullup, 1553 .udc_start = cdnsp_gadget_udc_start, 1554 .udc_stop = cdnsp_gadget_udc_stop, 1555 }; 1556 1557 static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev, 1558 struct cdnsp_ep *pep) 1559 { 1560 void __iomem *reg = &pdev->cap_regs->hc_capbase; 1561 int endpoints; 1562 1563 reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID); 1564 1565 if (!pep->direction) { 1566 pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET); 1567 pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET); 1568 pep->buffering = (pep->buffering + 1) / 2; 1569 pep->buffering_period = (pep->buffering_period + 1) / 2; 1570 return; 1571 } 1572 1573 endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2; 1574 1575 /* Set to XBUF_TX_TAG_MASK_0 register. */ 1576 reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32); 1577 /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */ 1578 reg += pep->number * sizeof(u32) * 2; 1579 1580 pep->buffering = (readl(reg) + 1) / 2; 1581 pep->buffering_period = pep->buffering; 1582 } 1583 1584 static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev) 1585 { 1586 int max_streams = HCC_MAX_PSA(pdev->hcc_params); 1587 struct cdnsp_ep *pep; 1588 int i; 1589 1590 INIT_LIST_HEAD(&pdev->gadget.ep_list); 1591 1592 if (max_streams < STREAM_LOG_STREAMS) { 1593 dev_err(pdev->dev, "Stream size %d not supported\n", 1594 max_streams); 1595 return -EINVAL; 1596 } 1597 1598 max_streams = STREAM_LOG_STREAMS; 1599 1600 for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { 1601 bool direction = !(i & 1); /* Start from OUT endpoint. */ 1602 u8 epnum = ((i + 1) >> 1); 1603 1604 if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction)) 1605 continue; 1606 1607 pep = &pdev->eps[i]; 1608 pep->pdev = pdev; 1609 pep->number = epnum; 1610 pep->direction = direction; /* 0 for OUT, 1 for IN. */ 1611 1612 /* 1613 * Ep0 is bidirectional, so ep0in and ep0out are represented by 1614 * pdev->eps[0] 1615 */ 1616 if (epnum == 0) { 1617 snprintf(pep->name, sizeof(pep->name), "ep%d%s", 1618 epnum, "BiDir"); 1619 1620 pep->idx = 0; 1621 usb_ep_set_maxpacket_limit(&pep->endpoint, 512); 1622 pep->endpoint.maxburst = 1; 1623 pep->endpoint.ops = &cdnsp_gadget_ep0_ops; 1624 pep->endpoint.desc = &cdnsp_gadget_ep0_desc; 1625 pep->endpoint.comp_desc = NULL; 1626 pep->endpoint.caps.type_control = true; 1627 pep->endpoint.caps.dir_in = true; 1628 pep->endpoint.caps.dir_out = true; 1629 1630 pdev->ep0_preq.epnum = pep->number; 1631 pdev->ep0_preq.pep = pep; 1632 pdev->gadget.ep0 = &pep->endpoint; 1633 } else { 1634 snprintf(pep->name, sizeof(pep->name), "ep%d%s", 1635 epnum, (pep->direction) ? "in" : "out"); 1636 1637 pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1; 1638 usb_ep_set_maxpacket_limit(&pep->endpoint, 1024); 1639 1640 pep->endpoint.max_streams = max_streams; 1641 pep->endpoint.ops = &cdnsp_gadget_ep_ops; 1642 list_add_tail(&pep->endpoint.ep_list, 1643 &pdev->gadget.ep_list); 1644 1645 pep->endpoint.caps.type_iso = true; 1646 pep->endpoint.caps.type_bulk = true; 1647 pep->endpoint.caps.type_int = true; 1648 1649 pep->endpoint.caps.dir_in = direction; 1650 pep->endpoint.caps.dir_out = !direction; 1651 } 1652 1653 pep->endpoint.name = pep->name; 1654 pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx); 1655 pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx); 1656 cdnsp_get_ep_buffering(pdev, pep); 1657 1658 dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: " 1659 "CTRL: %s, INT: %s, BULK: %s, ISOC %s, " 1660 "SupDir IN: %s, OUT: %s\n", 1661 pep->name, 1024, 1662 (pep->endpoint.caps.type_control) ? "yes" : "no", 1663 (pep->endpoint.caps.type_int) ? "yes" : "no", 1664 (pep->endpoint.caps.type_bulk) ? "yes" : "no", 1665 (pep->endpoint.caps.type_iso) ? "yes" : "no", 1666 (pep->endpoint.caps.dir_in) ? "yes" : "no", 1667 (pep->endpoint.caps.dir_out) ? "yes" : "no"); 1668 1669 INIT_LIST_HEAD(&pep->pending_list); 1670 } 1671 1672 return 0; 1673 } 1674 1675 static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev) 1676 { 1677 struct cdnsp_ep *pep; 1678 int i; 1679 1680 for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { 1681 pep = &pdev->eps[i]; 1682 if (pep->number != 0 && pep->out_ctx) 1683 list_del(&pep->endpoint.ep_list); 1684 } 1685 } 1686 1687 void cdnsp_disconnect_gadget(struct cdnsp_device *pdev) 1688 { 1689 pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING; 1690 1691 if (pdev->gadget_driver && pdev->gadget_driver->disconnect) { 1692 spin_unlock(&pdev->lock); 1693 pdev->gadget_driver->disconnect(&pdev->gadget); 1694 spin_lock(&pdev->lock); 1695 } 1696 1697 pdev->gadget.speed = USB_SPEED_UNKNOWN; 1698 usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED); 1699 1700 pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING; 1701 } 1702 1703 void cdnsp_suspend_gadget(struct cdnsp_device *pdev) 1704 { 1705 if (pdev->gadget_driver && pdev->gadget_driver->suspend) { 1706 spin_unlock(&pdev->lock); 1707 pdev->gadget_driver->suspend(&pdev->gadget); 1708 spin_lock(&pdev->lock); 1709 } 1710 } 1711 1712 void cdnsp_resume_gadget(struct cdnsp_device *pdev) 1713 { 1714 if (pdev->gadget_driver && pdev->gadget_driver->resume) { 1715 spin_unlock(&pdev->lock); 1716 pdev->gadget_driver->resume(&pdev->gadget); 1717 spin_lock(&pdev->lock); 1718 } 1719 } 1720 1721 void cdnsp_irq_reset(struct cdnsp_device *pdev) 1722 { 1723 struct cdnsp_port_regs __iomem *port_regs; 1724 1725 cdnsp_reset_device(pdev); 1726 1727 port_regs = pdev->active_port->regs; 1728 pdev->gadget.speed = cdnsp_port_speed(readl(port_regs)); 1729 1730 spin_unlock(&pdev->lock); 1731 usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver); 1732 spin_lock(&pdev->lock); 1733 1734 switch (pdev->gadget.speed) { 1735 case USB_SPEED_SUPER_PLUS: 1736 case USB_SPEED_SUPER: 1737 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1738 pdev->gadget.ep0->maxpacket = 512; 1739 break; 1740 case USB_SPEED_HIGH: 1741 case USB_SPEED_FULL: 1742 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 1743 pdev->gadget.ep0->maxpacket = 64; 1744 break; 1745 default: 1746 /* Low speed is not supported. */ 1747 dev_err(pdev->dev, "Unknown device speed\n"); 1748 break; 1749 } 1750 1751 cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); 1752 cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY); 1753 usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT); 1754 } 1755 1756 static void cdnsp_get_rev_cap(struct cdnsp_device *pdev) 1757 { 1758 void __iomem *reg = &pdev->cap_regs->hc_capbase; 1759 1760 reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP); 1761 pdev->rev_cap = reg; 1762 1763 dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", 1764 readl(&pdev->rev_cap->ctrl_revision), 1765 readl(&pdev->rev_cap->rtl_revision), 1766 readl(&pdev->rev_cap->ep_supported), 1767 readl(&pdev->rev_cap->rx_buff_size), 1768 readl(&pdev->rev_cap->tx_buff_size)); 1769 } 1770 1771 static int cdnsp_gen_setup(struct cdnsp_device *pdev) 1772 { 1773 int ret; 1774 u32 reg; 1775 1776 pdev->cap_regs = pdev->regs; 1777 pdev->op_regs = pdev->regs + 1778 HC_LENGTH(readl(&pdev->cap_regs->hc_capbase)); 1779 pdev->run_regs = pdev->regs + 1780 (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK); 1781 1782 /* Cache read-only capability registers */ 1783 pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1); 1784 pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase); 1785 pdev->hci_version = HC_VERSION(pdev->hcc_params); 1786 pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); 1787 1788 cdnsp_get_rev_cap(pdev); 1789 1790 /* Make sure the Device Controller is halted. */ 1791 ret = cdnsp_halt(pdev); 1792 if (ret) 1793 return ret; 1794 1795 /* Reset the internal controller memory state and registers. */ 1796 ret = cdnsp_reset(pdev); 1797 if (ret) 1798 return ret; 1799 1800 /* 1801 * Set dma_mask and coherent_dma_mask to 64-bits, 1802 * if controller supports 64-bit addressing. 1803 */ 1804 if (HCC_64BIT_ADDR(pdev->hcc_params) && 1805 !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) { 1806 dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n"); 1807 dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64)); 1808 } else { 1809 /* 1810 * This is to avoid error in cases where a 32-bit USB 1811 * controller is used on a 64-bit capable system. 1812 */ 1813 ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32)); 1814 if (ret) 1815 return ret; 1816 1817 dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n"); 1818 dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32)); 1819 } 1820 1821 spin_lock_init(&pdev->lock); 1822 1823 ret = cdnsp_mem_init(pdev); 1824 if (ret) 1825 return ret; 1826 1827 /* 1828 * Software workaround for U1: after transition 1829 * to U1 the controller starts gating clock, and in some cases, 1830 * it causes that controller stack. 1831 */ 1832 reg = readl(&pdev->port3x_regs->mode_2); 1833 reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN; 1834 writel(reg, &pdev->port3x_regs->mode_2); 1835 1836 return 0; 1837 } 1838 1839 static int __cdnsp_gadget_init(struct cdns *cdns) 1840 { 1841 struct cdnsp_device *pdev; 1842 u32 max_speed; 1843 int ret = -ENOMEM; 1844 1845 cdns_drd_gadget_on(cdns); 1846 1847 pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); 1848 if (!pdev) 1849 return -ENOMEM; 1850 1851 pm_runtime_get_sync(cdns->dev); 1852 1853 cdns->gadget_dev = pdev; 1854 pdev->dev = cdns->dev; 1855 pdev->regs = cdns->dev_regs; 1856 max_speed = usb_get_maximum_speed(cdns->dev); 1857 1858 switch (max_speed) { 1859 case USB_SPEED_FULL: 1860 case USB_SPEED_HIGH: 1861 case USB_SPEED_SUPER: 1862 case USB_SPEED_SUPER_PLUS: 1863 break; 1864 default: 1865 dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed); 1866 fallthrough; 1867 case USB_SPEED_UNKNOWN: 1868 /* Default to SSP */ 1869 max_speed = USB_SPEED_SUPER_PLUS; 1870 break; 1871 } 1872 1873 pdev->gadget.ops = &cdnsp_gadget_ops; 1874 pdev->gadget.name = "cdnsp-gadget"; 1875 pdev->gadget.speed = USB_SPEED_UNKNOWN; 1876 pdev->gadget.sg_supported = 1; 1877 pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS; 1878 pdev->gadget.lpm_capable = 1; 1879 1880 pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL); 1881 if (!pdev->setup_buf) 1882 goto free_pdev; 1883 1884 /* 1885 * Controller supports not aligned buffer but it should improve 1886 * performance. 1887 */ 1888 pdev->gadget.quirk_ep_out_aligned_size = true; 1889 1890 ret = cdnsp_gen_setup(pdev); 1891 if (ret) { 1892 dev_err(pdev->dev, "Generic initialization failed %d\n", ret); 1893 goto free_setup; 1894 } 1895 1896 ret = cdnsp_gadget_init_endpoints(pdev); 1897 if (ret) { 1898 dev_err(pdev->dev, "failed to initialize endpoints\n"); 1899 goto halt_pdev; 1900 } 1901 1902 ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget); 1903 if (ret) { 1904 dev_err(pdev->dev, "failed to register udc\n"); 1905 goto free_endpoints; 1906 } 1907 1908 ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq, 1909 cdnsp_irq_handler, 1910 cdnsp_thread_irq_handler, IRQF_SHARED, 1911 dev_name(pdev->dev), pdev); 1912 if (ret) 1913 goto del_gadget; 1914 1915 return 0; 1916 1917 del_gadget: 1918 usb_del_gadget_udc(&pdev->gadget); 1919 free_endpoints: 1920 cdnsp_gadget_free_endpoints(pdev); 1921 halt_pdev: 1922 cdnsp_halt(pdev); 1923 cdnsp_reset(pdev); 1924 cdnsp_mem_cleanup(pdev); 1925 free_setup: 1926 kfree(pdev->setup_buf); 1927 free_pdev: 1928 kfree(pdev); 1929 1930 return ret; 1931 } 1932 1933 static void cdnsp_gadget_exit(struct cdns *cdns) 1934 { 1935 struct cdnsp_device *pdev = cdns->gadget_dev; 1936 1937 devm_free_irq(pdev->dev, cdns->dev_irq, pdev); 1938 pm_runtime_mark_last_busy(cdns->dev); 1939 pm_runtime_put_autosuspend(cdns->dev); 1940 usb_del_gadget_udc(&pdev->gadget); 1941 cdnsp_gadget_free_endpoints(pdev); 1942 cdnsp_mem_cleanup(pdev); 1943 kfree(pdev); 1944 cdns->gadget_dev = NULL; 1945 cdns_drd_gadget_off(cdns); 1946 } 1947 1948 static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup) 1949 { 1950 struct cdnsp_device *pdev = cdns->gadget_dev; 1951 unsigned long flags; 1952 1953 if (pdev->link_state == XDEV_U3) 1954 return 0; 1955 1956 spin_lock_irqsave(&pdev->lock, flags); 1957 cdnsp_disconnect_gadget(pdev); 1958 cdnsp_stop(pdev); 1959 spin_unlock_irqrestore(&pdev->lock, flags); 1960 1961 return 0; 1962 } 1963 1964 static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated) 1965 { 1966 struct cdnsp_device *pdev = cdns->gadget_dev; 1967 enum usb_device_speed max_speed; 1968 unsigned long flags; 1969 int ret; 1970 1971 if (!pdev->gadget_driver) 1972 return 0; 1973 1974 spin_lock_irqsave(&pdev->lock, flags); 1975 max_speed = pdev->gadget_driver->max_speed; 1976 1977 /* Limit speed if necessary. */ 1978 max_speed = min(max_speed, pdev->gadget.max_speed); 1979 1980 ret = cdnsp_run(pdev, max_speed); 1981 1982 if (pdev->link_state == XDEV_U3) 1983 __cdnsp_gadget_wakeup(pdev); 1984 1985 spin_unlock_irqrestore(&pdev->lock, flags); 1986 1987 return ret; 1988 } 1989 1990 /** 1991 * cdnsp_gadget_init - initialize device structure 1992 * @cdns: cdnsp instance 1993 * 1994 * This function initializes the gadget. 1995 */ 1996 int cdnsp_gadget_init(struct cdns *cdns) 1997 { 1998 struct cdns_role_driver *rdrv; 1999 2000 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 2001 if (!rdrv) 2002 return -ENOMEM; 2003 2004 rdrv->start = __cdnsp_gadget_init; 2005 rdrv->stop = cdnsp_gadget_exit; 2006 rdrv->suspend = cdnsp_gadget_suspend; 2007 rdrv->resume = cdnsp_gadget_resume; 2008 rdrv->state = CDNS_ROLE_STATE_INACTIVE; 2009 rdrv->name = "gadget"; 2010 cdns->roles[USB_ROLE_DEVICE] = rdrv; 2011 2012 return 0; 2013 } 2014