1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cadence CDNSP DRD Driver. 4 * 5 * Copyright (C) 2020 Cadence. 6 * 7 * Author: Pawel Laszczak <pawell@cadence.com> 8 * 9 */ 10 11 #include <linux/moduleparam.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/module.h> 14 #include <linux/iopoll.h> 15 #include <linux/delay.h> 16 #include <linux/log2.h> 17 #include <linux/slab.h> 18 #include <linux/pci.h> 19 #include <linux/irq.h> 20 #include <linux/dmi.h> 21 22 #include "core.h" 23 #include "gadget-export.h" 24 #include "drd.h" 25 #include "cdnsp-gadget.h" 26 #include "cdnsp-trace.h" 27 28 unsigned int cdnsp_port_speed(unsigned int port_status) 29 { 30 /*Detect gadget speed based on PORTSC register*/ 31 if (DEV_SUPERSPEEDPLUS(port_status)) 32 return USB_SPEED_SUPER_PLUS; 33 else if (DEV_SUPERSPEED(port_status)) 34 return USB_SPEED_SUPER; 35 else if (DEV_HIGHSPEED(port_status)) 36 return USB_SPEED_HIGH; 37 else if (DEV_FULLSPEED(port_status)) 38 return USB_SPEED_FULL; 39 40 /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/ 41 return USB_SPEED_UNKNOWN; 42 } 43 44 /* 45 * Given a port state, this function returns a value that would result in the 46 * port being in the same state, if the value was written to the port status 47 * control register. 48 * Save Read Only (RO) bits and save read/write bits where 49 * writing a 0 clears the bit and writing a 1 sets the bit (RWS). 50 * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect. 51 */ 52 u32 cdnsp_port_state_to_neutral(u32 state) 53 { 54 /* Save read-only status and port state. */ 55 return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS); 56 } 57 58 /** 59 * Find the offset of the extended capabilities with capability ID id. 60 * @base: PCI MMIO registers base address. 61 * @start: Address at which to start looking, (0 or HCC_PARAMS to start at 62 * beginning of list) 63 * @id: Extended capability ID to search for. 64 * 65 * Returns the offset of the next matching extended capability structure. 66 * Some capabilities can occur several times, 67 * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all. 68 */ 69 int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id) 70 { 71 u32 offset = start; 72 u32 next; 73 u32 val; 74 75 if (!start || start == HCC_PARAMS_OFFSET) { 76 val = readl(base + HCC_PARAMS_OFFSET); 77 if (val == ~0) 78 return 0; 79 80 offset = HCC_EXT_CAPS(val) << 2; 81 if (!offset) 82 return 0; 83 }; 84 85 do { 86 val = readl(base + offset); 87 if (val == ~0) 88 return 0; 89 90 if (EXT_CAPS_ID(val) == id && offset != start) 91 return offset; 92 93 next = EXT_CAPS_NEXT(val); 94 offset += next << 2; 95 } while (next); 96 97 return 0; 98 } 99 100 void cdnsp_set_link_state(struct cdnsp_device *pdev, 101 __le32 __iomem *port_regs, 102 u32 link_state) 103 { 104 int port_num = 0xFF; 105 u32 temp; 106 107 temp = readl(port_regs); 108 temp = cdnsp_port_state_to_neutral(temp); 109 temp |= PORT_WKCONN_E | PORT_WKDISC_E; 110 writel(temp, port_regs); 111 112 temp &= ~PORT_PLS_MASK; 113 temp |= PORT_LINK_STROBE | link_state; 114 115 if (pdev->active_port) 116 port_num = pdev->active_port->port_num; 117 118 trace_cdnsp_handle_port_status(port_num, readl(port_regs)); 119 writel(temp, port_regs); 120 trace_cdnsp_link_state_changed(port_num, readl(port_regs)); 121 } 122 123 static void cdnsp_disable_port(struct cdnsp_device *pdev, 124 __le32 __iomem *port_regs) 125 { 126 u32 temp = cdnsp_port_state_to_neutral(readl(port_regs)); 127 128 writel(temp | PORT_PED, port_regs); 129 } 130 131 static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev, 132 __le32 __iomem *port_regs) 133 { 134 u32 portsc = readl(port_regs); 135 136 writel(cdnsp_port_state_to_neutral(portsc) | 137 (portsc & PORT_CHANGE_BITS), port_regs); 138 } 139 140 static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) 141 { 142 __le32 __iomem *reg; 143 void __iomem *base; 144 u32 offset = 0; 145 146 base = &pdev->cap_regs->hc_capbase; 147 offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); 148 reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; 149 150 bit = readl(reg) | bit; 151 writel(bit, reg); 152 } 153 154 static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) 155 { 156 __le32 __iomem *reg; 157 void __iomem *base; 158 u32 offset = 0; 159 160 base = &pdev->cap_regs->hc_capbase; 161 offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); 162 reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; 163 164 bit = readl(reg) & ~bit; 165 writel(bit, reg); 166 } 167 168 /* 169 * Disable interrupts and begin the controller halting process. 170 */ 171 static void cdnsp_quiesce(struct cdnsp_device *pdev) 172 { 173 u32 halted; 174 u32 mask; 175 u32 cmd; 176 177 mask = ~(u32)(CDNSP_IRQS); 178 179 halted = readl(&pdev->op_regs->status) & STS_HALT; 180 if (!halted) 181 mask &= ~(CMD_R_S | CMD_DEVEN); 182 183 cmd = readl(&pdev->op_regs->command); 184 cmd &= mask; 185 writel(cmd, &pdev->op_regs->command); 186 } 187 188 /* 189 * Force controller into halt state. 190 * 191 * Disable any IRQs and clear the run/stop bit. 192 * Controller will complete any current and actively pipelined transactions, and 193 * should halt within 16 ms of the run/stop bit being cleared. 194 * Read controller Halted bit in the status register to see when the 195 * controller is finished. 196 */ 197 int cdnsp_halt(struct cdnsp_device *pdev) 198 { 199 int ret; 200 u32 val; 201 202 cdnsp_quiesce(pdev); 203 204 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val, 205 val & STS_HALT, 1, 206 CDNSP_MAX_HALT_USEC); 207 if (ret) { 208 dev_err(pdev->dev, "ERROR: Device halt failed\n"); 209 return ret; 210 } 211 212 pdev->cdnsp_state |= CDNSP_STATE_HALTED; 213 214 return 0; 215 } 216 217 /* 218 * device controller died, register read returns 0xffffffff, or command never 219 * ends. 220 */ 221 void cdnsp_died(struct cdnsp_device *pdev) 222 { 223 dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n"); 224 pdev->cdnsp_state |= CDNSP_STATE_DYING; 225 cdnsp_halt(pdev); 226 } 227 228 /* 229 * Set the run bit and wait for the device to be running. 230 */ 231 static int cdnsp_start(struct cdnsp_device *pdev) 232 { 233 u32 temp; 234 int ret; 235 236 temp = readl(&pdev->op_regs->command); 237 temp |= (CMD_R_S | CMD_DEVEN); 238 writel(temp, &pdev->op_regs->command); 239 240 pdev->cdnsp_state = 0; 241 242 /* 243 * Wait for the STS_HALT Status bit to be 0 to indicate the device is 244 * running. 245 */ 246 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, 247 !(temp & STS_HALT), 1, 248 CDNSP_MAX_HALT_USEC); 249 if (ret) { 250 pdev->cdnsp_state = CDNSP_STATE_DYING; 251 dev_err(pdev->dev, "ERROR: Controller run failed\n"); 252 } 253 254 return ret; 255 } 256 257 /* 258 * Reset a halted controller. 259 * 260 * This resets pipelines, timers, counters, state machines, etc. 261 * Transactions will be terminated immediately, and operational registers 262 * will be set to their defaults. 263 */ 264 int cdnsp_reset(struct cdnsp_device *pdev) 265 { 266 u32 command; 267 u32 temp; 268 int ret; 269 270 temp = readl(&pdev->op_regs->status); 271 272 if (temp == ~(u32)0) { 273 dev_err(pdev->dev, "Device not accessible, reset failed.\n"); 274 return -ENODEV; 275 } 276 277 if ((temp & STS_HALT) == 0) { 278 dev_err(pdev->dev, "Controller not halted, aborting reset.\n"); 279 return -EINVAL; 280 } 281 282 command = readl(&pdev->op_regs->command); 283 command |= CMD_RESET; 284 writel(command, &pdev->op_regs->command); 285 286 ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp, 287 !(temp & CMD_RESET), 1, 288 10 * 1000); 289 if (ret) { 290 dev_err(pdev->dev, "ERROR: Controller reset failed\n"); 291 return ret; 292 } 293 294 /* 295 * CDNSP cannot write any doorbells or operational registers other 296 * than status until the "Controller Not Ready" flag is cleared. 297 */ 298 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, 299 !(temp & STS_CNR), 1, 300 10 * 1000); 301 302 if (ret) { 303 dev_err(pdev->dev, "ERROR: Controller not ready to work\n"); 304 return ret; 305 } 306 307 dev_dbg(pdev->dev, "Controller ready to work"); 308 309 return ret; 310 } 311 312 /* 313 * cdnsp_get_endpoint_index - Find the index for an endpoint given its 314 * descriptor.Use the return value to right shift 1 for the bitmask. 315 * 316 * Index = (epnum * 2) + direction - 1, 317 * where direction = 0 for OUT, 1 for IN. 318 * For control endpoints, the IN index is used (OUT index is unused), so 319 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 320 */ 321 static unsigned int 322 cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc) 323 { 324 unsigned int index = (unsigned int)usb_endpoint_num(desc); 325 326 if (usb_endpoint_xfer_control(desc)) 327 return index * 2; 328 329 return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 330 } 331 332 /* 333 * Find the flag for this endpoint (for use in the control context). Use the 334 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 335 * bit 1, etc. 336 */ 337 static unsigned int 338 cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc) 339 { 340 return 1 << (cdnsp_get_endpoint_index(desc) + 1); 341 } 342 343 int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq) 344 { 345 struct cdnsp_device *pdev = pep->pdev; 346 struct usb_request *request; 347 int ret; 348 349 if (preq->epnum == 0 && !list_empty(&pep->pending_list)) { 350 trace_cdnsp_request_enqueue_busy(preq); 351 return -EBUSY; 352 } 353 354 request = &preq->request; 355 request->actual = 0; 356 request->status = -EINPROGRESS; 357 preq->direction = pep->direction; 358 preq->epnum = pep->number; 359 preq->td.drbl = 0; 360 361 ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction); 362 if (ret) { 363 trace_cdnsp_request_enqueue_error(preq); 364 return ret; 365 } 366 367 list_add_tail(&preq->list, &pep->pending_list); 368 369 trace_cdnsp_request_enqueue(preq); 370 371 switch (usb_endpoint_type(pep->endpoint.desc)) { 372 case USB_ENDPOINT_XFER_CONTROL: 373 ret = cdnsp_queue_ctrl_tx(pdev, preq); 374 break; 375 case USB_ENDPOINT_XFER_BULK: 376 case USB_ENDPOINT_XFER_INT: 377 ret = cdnsp_queue_bulk_tx(pdev, preq); 378 break; 379 case USB_ENDPOINT_XFER_ISOC: 380 ret = cdnsp_queue_isoc_tx_prepare(pdev, preq); 381 } 382 383 if (ret) 384 goto unmap; 385 386 return 0; 387 388 unmap: 389 usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, 390 pep->direction); 391 list_del(&preq->list); 392 trace_cdnsp_request_enqueue_error(preq); 393 394 return ret; 395 } 396 397 /* 398 * Remove the request's TD from the endpoint ring. This may cause the 399 * controller to stop USB transfers, potentially stopping in the middle of a 400 * TRB buffer. The controller should pick up where it left off in the TD, 401 * unless a Set Transfer Ring Dequeue Pointer is issued. 402 * 403 * The TRBs that make up the buffers for the canceled request will be "removed" 404 * from the ring. Since the ring is a contiguous structure, they can't be 405 * physically removed. Instead, there are two options: 406 * 407 * 1) If the controller is in the middle of processing the request to be 408 * canceled, we simply move the ring's dequeue pointer past those TRBs 409 * using the Set Transfer Ring Dequeue Pointer command. This will be 410 * the common case, when drivers timeout on the last submitted request 411 * and attempt to cancel. 412 * 413 * 2) If the controller is in the middle of a different TD, we turn the TRBs 414 * into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained. 415 * The controller will need to invalidate the any TRBs it has cached after 416 * the stop endpoint command. 417 * 418 * 3) The TD may have completed by the time the Stop Endpoint Command 419 * completes, so software needs to handle that case too. 420 * 421 */ 422 int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq) 423 { 424 struct cdnsp_device *pdev = pep->pdev; 425 int ret; 426 427 trace_cdnsp_request_dequeue(preq); 428 429 if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) { 430 ret = cdnsp_cmd_stop_ep(pdev, pep); 431 if (ret) 432 return ret; 433 } 434 435 return cdnsp_remove_request(pdev, preq, pep); 436 } 437 438 static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev) 439 { 440 struct cdnsp_input_control_ctx *ctrl_ctx; 441 struct cdnsp_slot_ctx *slot_ctx; 442 struct cdnsp_ep_ctx *ep_ctx; 443 int i; 444 445 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 446 447 /* 448 * When a device's add flag and drop flag are zero, any subsequent 449 * configure endpoint command will leave that endpoint's state 450 * untouched. Make sure we don't leave any old state in the input 451 * endpoint contexts. 452 */ 453 ctrl_ctx->drop_flags = 0; 454 ctrl_ctx->add_flags = 0; 455 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 456 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 457 458 /* Endpoint 0 is always valid */ 459 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 460 for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) { 461 ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i); 462 ep_ctx->ep_info = 0; 463 ep_ctx->ep_info2 = 0; 464 ep_ctx->deq = 0; 465 ep_ctx->tx_info = 0; 466 } 467 } 468 469 /* Issue a configure endpoint command and wait for it to finish. */ 470 static int cdnsp_configure_endpoint(struct cdnsp_device *pdev) 471 { 472 int ret; 473 474 cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma); 475 cdnsp_ring_cmd_db(pdev); 476 ret = cdnsp_wait_for_cmd_compl(pdev); 477 if (ret) { 478 dev_err(pdev->dev, 479 "ERR: unexpected command completion code 0x%x.\n", ret); 480 return -EINVAL; 481 } 482 483 return ret; 484 } 485 486 static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev, 487 struct cdnsp_ep *pep) 488 { 489 struct cdnsp_segment *segment; 490 union cdnsp_trb *event; 491 u32 cycle_state; 492 u32 data; 493 494 event = pdev->event_ring->dequeue; 495 segment = pdev->event_ring->deq_seg; 496 cycle_state = pdev->event_ring->cycle_state; 497 498 while (1) { 499 data = le32_to_cpu(event->trans_event.flags); 500 501 /* Check the owner of the TRB. */ 502 if ((data & TRB_CYCLE) != cycle_state) 503 break; 504 505 if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER && 506 TRB_TO_EP_ID(data) == (pep->idx + 1)) { 507 data |= TRB_EVENT_INVALIDATE; 508 event->trans_event.flags = cpu_to_le32(data); 509 } 510 511 if (cdnsp_last_trb_on_seg(segment, event)) { 512 cycle_state ^= 1; 513 segment = pdev->event_ring->deq_seg->next; 514 event = segment->trbs; 515 } else { 516 event++; 517 } 518 } 519 } 520 521 int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev) 522 { 523 struct cdnsp_segment *event_deq_seg; 524 union cdnsp_trb *cmd_trb; 525 dma_addr_t cmd_deq_dma; 526 union cdnsp_trb *event; 527 u32 cycle_state; 528 int ret, val; 529 u64 cmd_dma; 530 u32 flags; 531 532 cmd_trb = pdev->cmd.command_trb; 533 pdev->cmd.status = 0; 534 535 trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic); 536 537 ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val, 538 !CMD_RING_BUSY(val), 1, 539 CDNSP_CMD_TIMEOUT); 540 if (ret) { 541 dev_err(pdev->dev, "ERR: Timeout while waiting for command\n"); 542 trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic); 543 pdev->cdnsp_state = CDNSP_STATE_DYING; 544 return -ETIMEDOUT; 545 } 546 547 event = pdev->event_ring->dequeue; 548 event_deq_seg = pdev->event_ring->deq_seg; 549 cycle_state = pdev->event_ring->cycle_state; 550 551 cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb); 552 if (!cmd_deq_dma) 553 return -EINVAL; 554 555 while (1) { 556 flags = le32_to_cpu(event->event_cmd.flags); 557 558 /* Check the owner of the TRB. */ 559 if ((flags & TRB_CYCLE) != cycle_state) 560 return -EINVAL; 561 562 cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb); 563 564 /* 565 * Check whether the completion event is for last queued 566 * command. 567 */ 568 if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION || 569 cmd_dma != (u64)cmd_deq_dma) { 570 if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { 571 event++; 572 continue; 573 } 574 575 if (cdnsp_last_trb_on_ring(pdev->event_ring, 576 event_deq_seg, event)) 577 cycle_state ^= 1; 578 579 event_deq_seg = event_deq_seg->next; 580 event = event_deq_seg->trbs; 581 continue; 582 } 583 584 trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic); 585 586 pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status)); 587 if (pdev->cmd.status == COMP_SUCCESS) 588 return 0; 589 590 return -pdev->cmd.status; 591 } 592 } 593 594 int cdnsp_halt_endpoint(struct cdnsp_device *pdev, 595 struct cdnsp_ep *pep, 596 int value) 597 { 598 int ret; 599 600 trace_cdnsp_ep_halt(value ? "Set" : "Clear"); 601 602 if (value) { 603 ret = cdnsp_cmd_stop_ep(pdev, pep); 604 if (ret) 605 return ret; 606 607 if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) { 608 cdnsp_queue_halt_endpoint(pdev, pep->idx); 609 cdnsp_ring_cmd_db(pdev); 610 ret = cdnsp_wait_for_cmd_compl(pdev); 611 } 612 613 pep->ep_state |= EP_HALTED; 614 } else { 615 /* 616 * In device mode driver can call reset endpoint command 617 * from any endpoint state. 618 */ 619 cdnsp_queue_reset_ep(pdev, pep->idx); 620 cdnsp_ring_cmd_db(pdev); 621 ret = cdnsp_wait_for_cmd_compl(pdev); 622 trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx); 623 624 if (ret) 625 return ret; 626 627 pep->ep_state &= ~EP_HALTED; 628 629 if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE)) 630 cdnsp_ring_doorbell_for_active_rings(pdev, pep); 631 632 pep->ep_state &= ~EP_WEDGE; 633 } 634 635 return 0; 636 } 637 638 static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev, 639 struct cdnsp_ep *pep) 640 { 641 struct cdnsp_input_control_ctx *ctrl_ctx; 642 struct cdnsp_slot_ctx *slot_ctx; 643 int ret = 0; 644 u32 ep_sts; 645 int i; 646 647 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 648 649 /* Don't issue the command if there's no endpoints to update. */ 650 if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0) 651 return 0; 652 653 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 654 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 655 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 656 657 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ 658 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 659 for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) { 660 __le32 le32 = cpu_to_le32(BIT(i)); 661 662 if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) || 663 (ctrl_ctx->add_flags & le32) || i == 1) { 664 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 665 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); 666 break; 667 } 668 } 669 670 ep_sts = GET_EP_CTX_STATE(pep->out_ctx); 671 672 if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) && 673 ep_sts == EP_STATE_DISABLED) || 674 (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags)) 675 ret = cdnsp_configure_endpoint(pdev); 676 677 trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx)); 678 trace_cdnsp_handle_cmd_config_ep(pep->out_ctx); 679 680 cdnsp_zero_in_ctx(pdev); 681 682 return ret; 683 } 684 685 /* 686 * This submits a Reset Device Command, which will set the device state to 0, 687 * set the device address to 0, and disable all the endpoints except the default 688 * control endpoint. The USB core should come back and call 689 * cdnsp_setup_device(), and then re-set up the configuration. 690 */ 691 int cdnsp_reset_device(struct cdnsp_device *pdev) 692 { 693 struct cdnsp_slot_ctx *slot_ctx; 694 int slot_state; 695 int ret, i; 696 697 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 698 slot_ctx->dev_info = 0; 699 pdev->device_address = 0; 700 701 /* If device is not setup, there is no point in resetting it. */ 702 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 703 slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 704 trace_cdnsp_reset_device(slot_ctx); 705 706 if (slot_state <= SLOT_STATE_DEFAULT && 707 pdev->eps[0].ep_state & EP_HALTED) { 708 cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0); 709 } 710 711 /* 712 * During Reset Device command controller shall transition the 713 * endpoint ep0 to the Running State. 714 */ 715 pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED); 716 pdev->eps[0].ep_state |= EP_ENABLED; 717 718 if (slot_state <= SLOT_STATE_DEFAULT) 719 return 0; 720 721 cdnsp_queue_reset_device(pdev); 722 cdnsp_ring_cmd_db(pdev); 723 ret = cdnsp_wait_for_cmd_compl(pdev); 724 725 /* 726 * After Reset Device command all not default endpoints 727 * are in Disabled state. 728 */ 729 for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) 730 pdev->eps[i].ep_state |= EP_STOPPED; 731 732 trace_cdnsp_handle_cmd_reset_dev(slot_ctx); 733 734 if (ret) 735 dev_err(pdev->dev, "Reset device failed with error code %d", 736 ret); 737 738 return ret; 739 } 740 741 /* 742 * Sets the MaxPStreams field and the Linear Stream Array field. 743 * Sets the dequeue pointer to the stream context array. 744 */ 745 static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev, 746 struct cdnsp_ep_ctx *ep_ctx, 747 struct cdnsp_stream_info *stream_info) 748 { 749 u32 max_primary_streams; 750 751 /* MaxPStreams is the number of stream context array entries, not the 752 * number we're actually using. Must be in 2^(MaxPstreams + 1) format. 753 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. 754 */ 755 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 756 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); 757 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) 758 | EP_HAS_LSA); 759 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); 760 } 761 762 /* 763 * The drivers use this function to prepare a bulk endpoints to use streams. 764 * 765 * Don't allow the call to succeed if endpoint only supports one stream 766 * (which means it doesn't support streams at all). 767 */ 768 int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep) 769 { 770 unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc); 771 unsigned int num_stream_ctxs; 772 int ret; 773 774 if (num_streams == 0) 775 return 0; 776 777 if (num_streams > STREAM_NUM_STREAMS) 778 return -EINVAL; 779 780 /* 781 * Add two to the number of streams requested to account for 782 * stream 0 that is reserved for controller usage and one additional 783 * for TASK SET FULL response. 784 */ 785 num_streams += 2; 786 787 /* The stream context array size must be a power of two */ 788 num_stream_ctxs = roundup_pow_of_two(num_streams); 789 790 trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams); 791 792 ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams); 793 if (ret) 794 return ret; 795 796 cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info); 797 798 pep->ep_state |= EP_HAS_STREAMS; 799 pep->stream_info.td_count = 0; 800 pep->stream_info.first_prime_det = 0; 801 802 /* Subtract 1 for stream 0, which drivers can't use. */ 803 return num_streams - 1; 804 } 805 806 int cdnsp_disable_slot(struct cdnsp_device *pdev) 807 { 808 int ret; 809 810 cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT); 811 cdnsp_ring_cmd_db(pdev); 812 ret = cdnsp_wait_for_cmd_compl(pdev); 813 814 pdev->slot_id = 0; 815 pdev->active_port = NULL; 816 817 trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); 818 819 memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE); 820 memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE); 821 822 return ret; 823 } 824 825 int cdnsp_enable_slot(struct cdnsp_device *pdev) 826 { 827 struct cdnsp_slot_ctx *slot_ctx; 828 int slot_state; 829 int ret; 830 831 /* If device is not setup, there is no point in resetting it */ 832 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 833 slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 834 835 if (slot_state != SLOT_STATE_DISABLED) 836 return 0; 837 838 cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT); 839 cdnsp_ring_cmd_db(pdev); 840 ret = cdnsp_wait_for_cmd_compl(pdev); 841 if (ret) 842 goto show_trace; 843 844 pdev->slot_id = 1; 845 846 show_trace: 847 trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); 848 849 return ret; 850 } 851 852 /* 853 * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY 854 * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS. 855 */ 856 int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup) 857 { 858 struct cdnsp_input_control_ctx *ctrl_ctx; 859 struct cdnsp_slot_ctx *slot_ctx; 860 int dev_state = 0; 861 int ret; 862 863 if (!pdev->slot_id) { 864 trace_cdnsp_slot_id("incorrect"); 865 return -EINVAL; 866 } 867 868 if (!pdev->active_port->port_num) 869 return -EINVAL; 870 871 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 872 dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 873 874 if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) { 875 trace_cdnsp_slot_already_in_default(slot_ctx); 876 return 0; 877 } 878 879 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 880 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 881 882 if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) { 883 ret = cdnsp_setup_addressable_priv_dev(pdev); 884 if (ret) 885 return ret; 886 } 887 888 cdnsp_copy_ep0_dequeue_into_input_ctx(pdev); 889 890 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 891 ctrl_ctx->drop_flags = 0; 892 893 trace_cdnsp_setup_device_slot(slot_ctx); 894 895 cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup); 896 cdnsp_ring_cmd_db(pdev); 897 ret = cdnsp_wait_for_cmd_compl(pdev); 898 899 trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx)); 900 901 /* Zero the input context control for later use. */ 902 ctrl_ctx->add_flags = 0; 903 ctrl_ctx->drop_flags = 0; 904 905 return ret; 906 } 907 908 void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev, 909 struct usb_request *req, 910 int enable) 911 { 912 if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable) 913 return; 914 915 trace_cdnsp_lpm(enable); 916 917 if (enable) 918 writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE, 919 &pdev->active_port->regs->portpmsc); 920 else 921 writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc); 922 } 923 924 static int cdnsp_get_frame(struct cdnsp_device *pdev) 925 { 926 return readl(&pdev->run_regs->microframe_index) >> 3; 927 } 928 929 static int cdnsp_gadget_ep_enable(struct usb_ep *ep, 930 const struct usb_endpoint_descriptor *desc) 931 { 932 struct cdnsp_input_control_ctx *ctrl_ctx; 933 struct cdnsp_device *pdev; 934 struct cdnsp_ep *pep; 935 unsigned long flags; 936 u32 added_ctxs; 937 int ret; 938 939 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT || 940 !desc->wMaxPacketSize) 941 return -EINVAL; 942 943 pep = to_cdnsp_ep(ep); 944 pdev = pep->pdev; 945 946 if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED, 947 "%s is already enabled\n", pep->name)) 948 return 0; 949 950 spin_lock_irqsave(&pdev->lock, flags); 951 952 added_ctxs = cdnsp_get_endpoint_flag(desc); 953 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 954 dev_err(pdev->dev, "ERROR: Bad endpoint number\n"); 955 ret = -EINVAL; 956 goto unlock; 957 } 958 959 pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 960 961 if (pdev->gadget.speed == USB_SPEED_FULL) { 962 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) 963 pep->interval = desc->bInterval << 3; 964 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) 965 pep->interval = BIT(desc->bInterval - 1) << 3; 966 } 967 968 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) { 969 if (pep->interval > BIT(12)) { 970 dev_err(pdev->dev, "bInterval %d not supported\n", 971 desc->bInterval); 972 ret = -EINVAL; 973 goto unlock; 974 } 975 cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); 976 } 977 978 ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC); 979 if (ret) 980 goto unlock; 981 982 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 983 ctrl_ctx->add_flags = cpu_to_le32(added_ctxs); 984 ctrl_ctx->drop_flags = 0; 985 986 ret = cdnsp_update_eps_configuration(pdev, pep); 987 if (ret) { 988 cdnsp_free_endpoint_rings(pdev, pep); 989 goto unlock; 990 } 991 992 pep->ep_state |= EP_ENABLED; 993 pep->ep_state &= ~EP_STOPPED; 994 995 unlock: 996 trace_cdnsp_ep_enable_end(pep, 0); 997 spin_unlock_irqrestore(&pdev->lock, flags); 998 999 return ret; 1000 } 1001 1002 static int cdnsp_gadget_ep_disable(struct usb_ep *ep) 1003 { 1004 struct cdnsp_input_control_ctx *ctrl_ctx; 1005 struct cdnsp_request *preq; 1006 struct cdnsp_device *pdev; 1007 struct cdnsp_ep *pep; 1008 unsigned long flags; 1009 u32 drop_flag; 1010 int ret = 0; 1011 1012 if (!ep) 1013 return -EINVAL; 1014 1015 pep = to_cdnsp_ep(ep); 1016 pdev = pep->pdev; 1017 1018 spin_lock_irqsave(&pdev->lock, flags); 1019 1020 if (!(pep->ep_state & EP_ENABLED)) { 1021 dev_err(pdev->dev, "%s is already disabled\n", pep->name); 1022 ret = -EINVAL; 1023 goto finish; 1024 } 1025 1026 cdnsp_cmd_stop_ep(pdev, pep); 1027 pep->ep_state |= EP_DIS_IN_RROGRESS; 1028 cdnsp_cmd_flush_ep(pdev, pep); 1029 1030 /* Remove all queued USB requests. */ 1031 while (!list_empty(&pep->pending_list)) { 1032 preq = next_request(&pep->pending_list); 1033 cdnsp_ep_dequeue(pep, preq); 1034 } 1035 1036 cdnsp_invalidate_ep_events(pdev, pep); 1037 1038 pep->ep_state &= ~EP_DIS_IN_RROGRESS; 1039 drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc); 1040 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 1041 ctrl_ctx->drop_flags = cpu_to_le32(drop_flag); 1042 ctrl_ctx->add_flags = 0; 1043 1044 cdnsp_endpoint_zero(pdev, pep); 1045 1046 ret = cdnsp_update_eps_configuration(pdev, pep); 1047 cdnsp_free_endpoint_rings(pdev, pep); 1048 1049 pep->ep_state &= ~EP_ENABLED; 1050 pep->ep_state |= EP_STOPPED; 1051 1052 finish: 1053 trace_cdnsp_ep_disable_end(pep, 0); 1054 spin_unlock_irqrestore(&pdev->lock, flags); 1055 1056 return ret; 1057 } 1058 1059 static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep, 1060 gfp_t gfp_flags) 1061 { 1062 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1063 struct cdnsp_request *preq; 1064 1065 preq = kzalloc(sizeof(*preq), gfp_flags); 1066 if (!preq) 1067 return NULL; 1068 1069 preq->epnum = pep->number; 1070 preq->pep = pep; 1071 1072 trace_cdnsp_alloc_request(preq); 1073 1074 return &preq->request; 1075 } 1076 1077 static void cdnsp_gadget_ep_free_request(struct usb_ep *ep, 1078 struct usb_request *request) 1079 { 1080 struct cdnsp_request *preq = to_cdnsp_request(request); 1081 1082 trace_cdnsp_free_request(preq); 1083 kfree(preq); 1084 } 1085 1086 static int cdnsp_gadget_ep_queue(struct usb_ep *ep, 1087 struct usb_request *request, 1088 gfp_t gfp_flags) 1089 { 1090 struct cdnsp_request *preq; 1091 struct cdnsp_device *pdev; 1092 struct cdnsp_ep *pep; 1093 unsigned long flags; 1094 int ret; 1095 1096 if (!request || !ep) 1097 return -EINVAL; 1098 1099 pep = to_cdnsp_ep(ep); 1100 pdev = pep->pdev; 1101 1102 if (!(pep->ep_state & EP_ENABLED)) { 1103 dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n", 1104 pep->name); 1105 return -EINVAL; 1106 } 1107 1108 preq = to_cdnsp_request(request); 1109 spin_lock_irqsave(&pdev->lock, flags); 1110 ret = cdnsp_ep_enqueue(pep, preq); 1111 spin_unlock_irqrestore(&pdev->lock, flags); 1112 1113 return ret; 1114 } 1115 1116 static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep, 1117 struct usb_request *request) 1118 { 1119 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1120 struct cdnsp_device *pdev = pep->pdev; 1121 unsigned long flags; 1122 int ret; 1123 1124 if (!pep->endpoint.desc) { 1125 dev_err(pdev->dev, 1126 "%s: can't dequeue to disabled endpoint\n", 1127 pep->name); 1128 return -ESHUTDOWN; 1129 } 1130 1131 spin_lock_irqsave(&pdev->lock, flags); 1132 ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request)); 1133 spin_unlock_irqrestore(&pdev->lock, flags); 1134 1135 return ret; 1136 } 1137 1138 static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value) 1139 { 1140 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1141 struct cdnsp_device *pdev = pep->pdev; 1142 struct cdnsp_request *preq; 1143 unsigned long flags = 0; 1144 int ret; 1145 1146 spin_lock_irqsave(&pdev->lock, flags); 1147 1148 preq = next_request(&pep->pending_list); 1149 if (value) { 1150 if (preq) { 1151 trace_cdnsp_ep_busy_try_halt_again(pep, 0); 1152 ret = -EAGAIN; 1153 goto done; 1154 } 1155 } 1156 1157 ret = cdnsp_halt_endpoint(pdev, pep, value); 1158 1159 done: 1160 spin_unlock_irqrestore(&pdev->lock, flags); 1161 return ret; 1162 } 1163 1164 static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep) 1165 { 1166 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1167 struct cdnsp_device *pdev = pep->pdev; 1168 unsigned long flags = 0; 1169 int ret; 1170 1171 spin_lock_irqsave(&pdev->lock, flags); 1172 pep->ep_state |= EP_WEDGE; 1173 ret = cdnsp_halt_endpoint(pdev, pep, 1); 1174 spin_unlock_irqrestore(&pdev->lock, flags); 1175 1176 return ret; 1177 } 1178 1179 static const struct usb_ep_ops cdnsp_gadget_ep0_ops = { 1180 .enable = cdnsp_gadget_ep_enable, 1181 .disable = cdnsp_gadget_ep_disable, 1182 .alloc_request = cdnsp_gadget_ep_alloc_request, 1183 .free_request = cdnsp_gadget_ep_free_request, 1184 .queue = cdnsp_gadget_ep_queue, 1185 .dequeue = cdnsp_gadget_ep_dequeue, 1186 .set_halt = cdnsp_gadget_ep_set_halt, 1187 .set_wedge = cdnsp_gadget_ep_set_wedge, 1188 }; 1189 1190 static const struct usb_ep_ops cdnsp_gadget_ep_ops = { 1191 .enable = cdnsp_gadget_ep_enable, 1192 .disable = cdnsp_gadget_ep_disable, 1193 .alloc_request = cdnsp_gadget_ep_alloc_request, 1194 .free_request = cdnsp_gadget_ep_free_request, 1195 .queue = cdnsp_gadget_ep_queue, 1196 .dequeue = cdnsp_gadget_ep_dequeue, 1197 .set_halt = cdnsp_gadget_ep_set_halt, 1198 .set_wedge = cdnsp_gadget_ep_set_wedge, 1199 }; 1200 1201 void cdnsp_gadget_giveback(struct cdnsp_ep *pep, 1202 struct cdnsp_request *preq, 1203 int status) 1204 { 1205 struct cdnsp_device *pdev = pep->pdev; 1206 1207 list_del(&preq->list); 1208 1209 if (preq->request.status == -EINPROGRESS) 1210 preq->request.status = status; 1211 1212 usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, 1213 preq->direction); 1214 1215 trace_cdnsp_request_giveback(preq); 1216 1217 if (preq != &pdev->ep0_preq) { 1218 spin_unlock(&pdev->lock); 1219 usb_gadget_giveback_request(&pep->endpoint, &preq->request); 1220 spin_lock(&pdev->lock); 1221 } 1222 } 1223 1224 static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = { 1225 .bLength = USB_DT_ENDPOINT_SIZE, 1226 .bDescriptorType = USB_DT_ENDPOINT, 1227 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1228 }; 1229 1230 static int cdnsp_run(struct cdnsp_device *pdev, 1231 enum usb_device_speed speed) 1232 { 1233 u32 fs_speed = 0; 1234 u64 temp_64; 1235 u32 temp; 1236 int ret; 1237 1238 temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue); 1239 temp_64 &= ~ERST_PTR_MASK; 1240 temp = readl(&pdev->ir_set->irq_control); 1241 temp &= ~IMOD_INTERVAL_MASK; 1242 temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK); 1243 writel(temp, &pdev->ir_set->irq_control); 1244 1245 temp = readl(&pdev->port3x_regs->mode_addr); 1246 1247 switch (speed) { 1248 case USB_SPEED_SUPER_PLUS: 1249 temp |= CFG_3XPORT_SSP_SUPPORT; 1250 break; 1251 case USB_SPEED_SUPER: 1252 temp &= ~CFG_3XPORT_SSP_SUPPORT; 1253 break; 1254 case USB_SPEED_HIGH: 1255 break; 1256 case USB_SPEED_FULL: 1257 fs_speed = PORT_REG6_FORCE_FS; 1258 break; 1259 default: 1260 dev_err(pdev->dev, "invalid maximum_speed parameter %d\n", 1261 speed); 1262 fallthrough; 1263 case USB_SPEED_UNKNOWN: 1264 /* Default to superspeed. */ 1265 speed = USB_SPEED_SUPER; 1266 break; 1267 } 1268 1269 if (speed >= USB_SPEED_SUPER) { 1270 writel(temp, &pdev->port3x_regs->mode_addr); 1271 cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc, 1272 XDEV_RXDETECT); 1273 } else { 1274 cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); 1275 } 1276 1277 cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc, 1278 XDEV_RXDETECT); 1279 1280 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1281 1282 writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6); 1283 1284 ret = cdnsp_start(pdev); 1285 if (ret) { 1286 ret = -ENODEV; 1287 goto err; 1288 } 1289 1290 temp = readl(&pdev->op_regs->command); 1291 temp |= (CMD_INTE); 1292 writel(temp, &pdev->op_regs->command); 1293 1294 temp = readl(&pdev->ir_set->irq_pending); 1295 writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending); 1296 1297 trace_cdnsp_init("Controller ready to work"); 1298 return 0; 1299 err: 1300 cdnsp_halt(pdev); 1301 return ret; 1302 } 1303 1304 static int cdnsp_gadget_udc_start(struct usb_gadget *g, 1305 struct usb_gadget_driver *driver) 1306 { 1307 enum usb_device_speed max_speed = driver->max_speed; 1308 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1309 unsigned long flags; 1310 int ret; 1311 1312 spin_lock_irqsave(&pdev->lock, flags); 1313 pdev->gadget_driver = driver; 1314 1315 /* limit speed if necessary */ 1316 max_speed = min(driver->max_speed, g->max_speed); 1317 ret = cdnsp_run(pdev, max_speed); 1318 1319 spin_unlock_irqrestore(&pdev->lock, flags); 1320 1321 return ret; 1322 } 1323 1324 /* 1325 * Update Event Ring Dequeue Pointer: 1326 * - When all events have finished 1327 * - To avoid "Event Ring Full Error" condition 1328 */ 1329 void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev, 1330 union cdnsp_trb *event_ring_deq, 1331 u8 clear_ehb) 1332 { 1333 u64 temp_64; 1334 dma_addr_t deq; 1335 1336 temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue); 1337 1338 /* If necessary, update the HW's version of the event ring deq ptr. */ 1339 if (event_ring_deq != pdev->event_ring->dequeue) { 1340 deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg, 1341 pdev->event_ring->dequeue); 1342 temp_64 &= ERST_PTR_MASK; 1343 temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK); 1344 } 1345 1346 /* Clear the event handler busy flag (RW1C). */ 1347 if (clear_ehb) 1348 temp_64 |= ERST_EHB; 1349 else 1350 temp_64 &= ~ERST_EHB; 1351 1352 cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue); 1353 } 1354 1355 static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev) 1356 { 1357 struct cdnsp_segment *seg; 1358 u64 val_64; 1359 int i; 1360 1361 cdnsp_initialize_ring_info(pdev->cmd_ring); 1362 1363 seg = pdev->cmd_ring->first_seg; 1364 for (i = 0; i < pdev->cmd_ring->num_segs; i++) { 1365 memset(seg->trbs, 0, 1366 sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1)); 1367 seg = seg->next; 1368 } 1369 1370 /* Set the address in the Command Ring Control register. */ 1371 val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring); 1372 val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) | 1373 (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) | 1374 pdev->cmd_ring->cycle_state; 1375 cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring); 1376 } 1377 1378 static void cdnsp_consume_all_events(struct cdnsp_device *pdev) 1379 { 1380 struct cdnsp_segment *event_deq_seg; 1381 union cdnsp_trb *event_ring_deq; 1382 union cdnsp_trb *event; 1383 u32 cycle_bit; 1384 1385 event_ring_deq = pdev->event_ring->dequeue; 1386 event_deq_seg = pdev->event_ring->deq_seg; 1387 event = pdev->event_ring->dequeue; 1388 1389 /* Update ring dequeue pointer. */ 1390 while (1) { 1391 cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE); 1392 1393 /* Does the controller or driver own the TRB? */ 1394 if (cycle_bit != pdev->event_ring->cycle_state) 1395 break; 1396 1397 cdnsp_inc_deq(pdev, pdev->event_ring); 1398 1399 if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { 1400 event++; 1401 continue; 1402 } 1403 1404 if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg, 1405 event)) 1406 cycle_bit ^= 1; 1407 1408 event_deq_seg = event_deq_seg->next; 1409 event = event_deq_seg->trbs; 1410 } 1411 1412 cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1); 1413 } 1414 1415 static void cdnsp_stop(struct cdnsp_device *pdev) 1416 { 1417 u32 temp; 1418 1419 cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]); 1420 1421 /* Remove internally queued request for ep0. */ 1422 if (!list_empty(&pdev->eps[0].pending_list)) { 1423 struct cdnsp_request *req; 1424 1425 req = next_request(&pdev->eps[0].pending_list); 1426 if (req == &pdev->ep0_preq) 1427 cdnsp_ep_dequeue(&pdev->eps[0], req); 1428 } 1429 1430 cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc); 1431 cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); 1432 cdnsp_disable_slot(pdev); 1433 cdnsp_halt(pdev); 1434 1435 temp = readl(&pdev->op_regs->status); 1436 writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status); 1437 temp = readl(&pdev->ir_set->irq_pending); 1438 writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending); 1439 1440 cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc); 1441 cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc); 1442 1443 /* Clear interrupt line */ 1444 temp = readl(&pdev->ir_set->irq_pending); 1445 temp |= IMAN_IP; 1446 writel(temp, &pdev->ir_set->irq_pending); 1447 1448 cdnsp_consume_all_events(pdev); 1449 cdnsp_clear_cmd_ring(pdev); 1450 1451 trace_cdnsp_exit("Controller stopped."); 1452 } 1453 1454 /* 1455 * Stop controller. 1456 * This function is called by the gadget core when the driver is removed. 1457 * Disable slot, disable IRQs, and quiesce the controller. 1458 */ 1459 static int cdnsp_gadget_udc_stop(struct usb_gadget *g) 1460 { 1461 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1462 unsigned long flags; 1463 1464 spin_lock_irqsave(&pdev->lock, flags); 1465 cdnsp_stop(pdev); 1466 pdev->gadget_driver = NULL; 1467 spin_unlock_irqrestore(&pdev->lock, flags); 1468 1469 return 0; 1470 } 1471 1472 static int cdnsp_gadget_get_frame(struct usb_gadget *g) 1473 { 1474 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1475 1476 return cdnsp_get_frame(pdev); 1477 } 1478 1479 static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev) 1480 { 1481 struct cdnsp_port_regs __iomem *port_regs; 1482 u32 portpm, portsc; 1483 1484 port_regs = pdev->active_port->regs; 1485 portsc = readl(&port_regs->portsc) & PORT_PLS_MASK; 1486 1487 /* Remote wakeup feature is not enabled by host. */ 1488 if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) { 1489 portpm = readl(&port_regs->portpmsc); 1490 1491 if (!(portpm & PORT_RWE)) 1492 return; 1493 } 1494 1495 if (portsc == XDEV_U3 && !pdev->may_wakeup) 1496 return; 1497 1498 cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0); 1499 1500 pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING; 1501 } 1502 1503 static int cdnsp_gadget_wakeup(struct usb_gadget *g) 1504 { 1505 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1506 unsigned long flags; 1507 1508 spin_lock_irqsave(&pdev->lock, flags); 1509 __cdnsp_gadget_wakeup(pdev); 1510 spin_unlock_irqrestore(&pdev->lock, flags); 1511 1512 return 0; 1513 } 1514 1515 static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g, 1516 int is_selfpowered) 1517 { 1518 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1519 unsigned long flags; 1520 1521 spin_lock_irqsave(&pdev->lock, flags); 1522 g->is_selfpowered = !!is_selfpowered; 1523 spin_unlock_irqrestore(&pdev->lock, flags); 1524 1525 return 0; 1526 } 1527 1528 static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on) 1529 { 1530 struct cdnsp_device *pdev = gadget_to_cdnsp(gadget); 1531 struct cdns *cdns = dev_get_drvdata(pdev->dev); 1532 1533 trace_cdnsp_pullup(is_on); 1534 1535 if (!is_on) { 1536 cdnsp_reset_device(pdev); 1537 cdns_clear_vbus(cdns); 1538 } else { 1539 cdns_set_vbus(cdns); 1540 } 1541 return 0; 1542 } 1543 1544 static const struct usb_gadget_ops cdnsp_gadget_ops = { 1545 .get_frame = cdnsp_gadget_get_frame, 1546 .wakeup = cdnsp_gadget_wakeup, 1547 .set_selfpowered = cdnsp_gadget_set_selfpowered, 1548 .pullup = cdnsp_gadget_pullup, 1549 .udc_start = cdnsp_gadget_udc_start, 1550 .udc_stop = cdnsp_gadget_udc_stop, 1551 }; 1552 1553 static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev, 1554 struct cdnsp_ep *pep) 1555 { 1556 void __iomem *reg = &pdev->cap_regs->hc_capbase; 1557 int endpoints; 1558 1559 reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID); 1560 1561 if (!pep->direction) { 1562 pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET); 1563 pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET); 1564 pep->buffering = (pep->buffering + 1) / 2; 1565 pep->buffering_period = (pep->buffering_period + 1) / 2; 1566 return; 1567 } 1568 1569 endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2; 1570 1571 /* Set to XBUF_TX_TAG_MASK_0 register. */ 1572 reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32); 1573 /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */ 1574 reg += pep->number * sizeof(u32) * 2; 1575 1576 pep->buffering = (readl(reg) + 1) / 2; 1577 pep->buffering_period = pep->buffering; 1578 } 1579 1580 static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev) 1581 { 1582 int max_streams = HCC_MAX_PSA(pdev->hcc_params); 1583 struct cdnsp_ep *pep; 1584 int i; 1585 1586 INIT_LIST_HEAD(&pdev->gadget.ep_list); 1587 1588 if (max_streams < STREAM_LOG_STREAMS) { 1589 dev_err(pdev->dev, "Stream size %d not supported\n", 1590 max_streams); 1591 return -EINVAL; 1592 } 1593 1594 max_streams = STREAM_LOG_STREAMS; 1595 1596 for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { 1597 bool direction = !(i & 1); /* Start from OUT endpoint. */ 1598 u8 epnum = ((i + 1) >> 1); 1599 1600 if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction)) 1601 continue; 1602 1603 pep = &pdev->eps[i]; 1604 pep->pdev = pdev; 1605 pep->number = epnum; 1606 pep->direction = direction; /* 0 for OUT, 1 for IN. */ 1607 1608 /* 1609 * Ep0 is bidirectional, so ep0in and ep0out are represented by 1610 * pdev->eps[0] 1611 */ 1612 if (epnum == 0) { 1613 snprintf(pep->name, sizeof(pep->name), "ep%d%s", 1614 epnum, "BiDir"); 1615 1616 pep->idx = 0; 1617 usb_ep_set_maxpacket_limit(&pep->endpoint, 512); 1618 pep->endpoint.maxburst = 1; 1619 pep->endpoint.ops = &cdnsp_gadget_ep0_ops; 1620 pep->endpoint.desc = &cdnsp_gadget_ep0_desc; 1621 pep->endpoint.comp_desc = NULL; 1622 pep->endpoint.caps.type_control = true; 1623 pep->endpoint.caps.dir_in = true; 1624 pep->endpoint.caps.dir_out = true; 1625 1626 pdev->ep0_preq.epnum = pep->number; 1627 pdev->ep0_preq.pep = pep; 1628 pdev->gadget.ep0 = &pep->endpoint; 1629 } else { 1630 snprintf(pep->name, sizeof(pep->name), "ep%d%s", 1631 epnum, (pep->direction) ? "in" : "out"); 1632 1633 pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1; 1634 usb_ep_set_maxpacket_limit(&pep->endpoint, 1024); 1635 1636 pep->endpoint.max_streams = max_streams; 1637 pep->endpoint.ops = &cdnsp_gadget_ep_ops; 1638 list_add_tail(&pep->endpoint.ep_list, 1639 &pdev->gadget.ep_list); 1640 1641 pep->endpoint.caps.type_iso = true; 1642 pep->endpoint.caps.type_bulk = true; 1643 pep->endpoint.caps.type_int = true; 1644 1645 pep->endpoint.caps.dir_in = direction; 1646 pep->endpoint.caps.dir_out = !direction; 1647 } 1648 1649 pep->endpoint.name = pep->name; 1650 pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx); 1651 pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx); 1652 cdnsp_get_ep_buffering(pdev, pep); 1653 1654 dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: " 1655 "CTRL: %s, INT: %s, BULK: %s, ISOC %s, " 1656 "SupDir IN: %s, OUT: %s\n", 1657 pep->name, 1024, 1658 (pep->endpoint.caps.type_control) ? "yes" : "no", 1659 (pep->endpoint.caps.type_int) ? "yes" : "no", 1660 (pep->endpoint.caps.type_bulk) ? "yes" : "no", 1661 (pep->endpoint.caps.type_iso) ? "yes" : "no", 1662 (pep->endpoint.caps.dir_in) ? "yes" : "no", 1663 (pep->endpoint.caps.dir_out) ? "yes" : "no"); 1664 1665 INIT_LIST_HEAD(&pep->pending_list); 1666 } 1667 1668 return 0; 1669 } 1670 1671 static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev) 1672 { 1673 struct cdnsp_ep *pep; 1674 int i; 1675 1676 for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { 1677 pep = &pdev->eps[i]; 1678 if (pep->number != 0 && pep->out_ctx) 1679 list_del(&pep->endpoint.ep_list); 1680 } 1681 } 1682 1683 void cdnsp_disconnect_gadget(struct cdnsp_device *pdev) 1684 { 1685 pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING; 1686 1687 if (pdev->gadget_driver && pdev->gadget_driver->disconnect) { 1688 spin_unlock(&pdev->lock); 1689 pdev->gadget_driver->disconnect(&pdev->gadget); 1690 spin_lock(&pdev->lock); 1691 } 1692 1693 pdev->gadget.speed = USB_SPEED_UNKNOWN; 1694 usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED); 1695 1696 pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING; 1697 } 1698 1699 void cdnsp_suspend_gadget(struct cdnsp_device *pdev) 1700 { 1701 if (pdev->gadget_driver && pdev->gadget_driver->suspend) { 1702 spin_unlock(&pdev->lock); 1703 pdev->gadget_driver->suspend(&pdev->gadget); 1704 spin_lock(&pdev->lock); 1705 } 1706 } 1707 1708 void cdnsp_resume_gadget(struct cdnsp_device *pdev) 1709 { 1710 if (pdev->gadget_driver && pdev->gadget_driver->resume) { 1711 spin_unlock(&pdev->lock); 1712 pdev->gadget_driver->resume(&pdev->gadget); 1713 spin_lock(&pdev->lock); 1714 } 1715 } 1716 1717 void cdnsp_irq_reset(struct cdnsp_device *pdev) 1718 { 1719 struct cdnsp_port_regs __iomem *port_regs; 1720 1721 cdnsp_reset_device(pdev); 1722 1723 port_regs = pdev->active_port->regs; 1724 pdev->gadget.speed = cdnsp_port_speed(readl(port_regs)); 1725 1726 spin_unlock(&pdev->lock); 1727 usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver); 1728 spin_lock(&pdev->lock); 1729 1730 switch (pdev->gadget.speed) { 1731 case USB_SPEED_SUPER_PLUS: 1732 case USB_SPEED_SUPER: 1733 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1734 pdev->gadget.ep0->maxpacket = 512; 1735 break; 1736 case USB_SPEED_HIGH: 1737 case USB_SPEED_FULL: 1738 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 1739 pdev->gadget.ep0->maxpacket = 64; 1740 break; 1741 default: 1742 /* Low speed is not supported. */ 1743 dev_err(pdev->dev, "Unknown device speed\n"); 1744 break; 1745 } 1746 1747 cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); 1748 cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY); 1749 usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT); 1750 } 1751 1752 static void cdnsp_get_rev_cap(struct cdnsp_device *pdev) 1753 { 1754 void __iomem *reg = &pdev->cap_regs->hc_capbase; 1755 1756 reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP); 1757 pdev->rev_cap = reg; 1758 1759 dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", 1760 readl(&pdev->rev_cap->ctrl_revision), 1761 readl(&pdev->rev_cap->rtl_revision), 1762 readl(&pdev->rev_cap->ep_supported), 1763 readl(&pdev->rev_cap->rx_buff_size), 1764 readl(&pdev->rev_cap->tx_buff_size)); 1765 } 1766 1767 static int cdnsp_gen_setup(struct cdnsp_device *pdev) 1768 { 1769 int ret; 1770 u32 reg; 1771 1772 pdev->cap_regs = pdev->regs; 1773 pdev->op_regs = pdev->regs + 1774 HC_LENGTH(readl(&pdev->cap_regs->hc_capbase)); 1775 pdev->run_regs = pdev->regs + 1776 (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK); 1777 1778 /* Cache read-only capability registers */ 1779 pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1); 1780 pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase); 1781 pdev->hci_version = HC_VERSION(pdev->hcc_params); 1782 pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); 1783 1784 cdnsp_get_rev_cap(pdev); 1785 1786 /* Make sure the Device Controller is halted. */ 1787 ret = cdnsp_halt(pdev); 1788 if (ret) 1789 return ret; 1790 1791 /* Reset the internal controller memory state and registers. */ 1792 ret = cdnsp_reset(pdev); 1793 if (ret) 1794 return ret; 1795 1796 /* 1797 * Set dma_mask and coherent_dma_mask to 64-bits, 1798 * if controller supports 64-bit addressing. 1799 */ 1800 if (HCC_64BIT_ADDR(pdev->hcc_params) && 1801 !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) { 1802 dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n"); 1803 dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64)); 1804 } else { 1805 /* 1806 * This is to avoid error in cases where a 32-bit USB 1807 * controller is used on a 64-bit capable system. 1808 */ 1809 ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32)); 1810 if (ret) 1811 return ret; 1812 1813 dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n"); 1814 dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32)); 1815 } 1816 1817 spin_lock_init(&pdev->lock); 1818 1819 ret = cdnsp_mem_init(pdev); 1820 if (ret) 1821 return ret; 1822 1823 /* 1824 * Software workaround for U1: after transition 1825 * to U1 the controller starts gating clock, and in some cases, 1826 * it causes that controller stack. 1827 */ 1828 reg = readl(&pdev->port3x_regs->mode_2); 1829 reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN; 1830 writel(reg, &pdev->port3x_regs->mode_2); 1831 1832 return 0; 1833 } 1834 1835 static int __cdnsp_gadget_init(struct cdns *cdns) 1836 { 1837 struct cdnsp_device *pdev; 1838 u32 max_speed; 1839 int ret = -ENOMEM; 1840 1841 cdns_drd_gadget_on(cdns); 1842 1843 pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); 1844 if (!pdev) 1845 return -ENOMEM; 1846 1847 pm_runtime_get_sync(cdns->dev); 1848 1849 cdns->gadget_dev = pdev; 1850 pdev->dev = cdns->dev; 1851 pdev->regs = cdns->dev_regs; 1852 max_speed = usb_get_maximum_speed(cdns->dev); 1853 1854 switch (max_speed) { 1855 case USB_SPEED_FULL: 1856 case USB_SPEED_HIGH: 1857 case USB_SPEED_SUPER: 1858 case USB_SPEED_SUPER_PLUS: 1859 break; 1860 default: 1861 dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed); 1862 fallthrough; 1863 case USB_SPEED_UNKNOWN: 1864 /* Default to SSP */ 1865 max_speed = USB_SPEED_SUPER_PLUS; 1866 break; 1867 } 1868 1869 pdev->gadget.ops = &cdnsp_gadget_ops; 1870 pdev->gadget.name = "cdnsp-gadget"; 1871 pdev->gadget.speed = USB_SPEED_UNKNOWN; 1872 pdev->gadget.sg_supported = 1; 1873 pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS; 1874 pdev->gadget.lpm_capable = 1; 1875 1876 pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL); 1877 if (!pdev->setup_buf) 1878 goto free_pdev; 1879 1880 /* 1881 * Controller supports not aligned buffer but it should improve 1882 * performance. 1883 */ 1884 pdev->gadget.quirk_ep_out_aligned_size = true; 1885 1886 ret = cdnsp_gen_setup(pdev); 1887 if (ret) { 1888 dev_err(pdev->dev, "Generic initialization failed %d\n", ret); 1889 goto free_setup; 1890 } 1891 1892 ret = cdnsp_gadget_init_endpoints(pdev); 1893 if (ret) { 1894 dev_err(pdev->dev, "failed to initialize endpoints\n"); 1895 goto halt_pdev; 1896 } 1897 1898 ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget); 1899 if (ret) { 1900 dev_err(pdev->dev, "failed to register udc\n"); 1901 goto free_endpoints; 1902 } 1903 1904 ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq, 1905 cdnsp_irq_handler, 1906 cdnsp_thread_irq_handler, IRQF_SHARED, 1907 dev_name(pdev->dev), pdev); 1908 if (ret) 1909 goto del_gadget; 1910 1911 return 0; 1912 1913 del_gadget: 1914 usb_del_gadget_udc(&pdev->gadget); 1915 free_endpoints: 1916 cdnsp_gadget_free_endpoints(pdev); 1917 halt_pdev: 1918 cdnsp_halt(pdev); 1919 cdnsp_reset(pdev); 1920 cdnsp_mem_cleanup(pdev); 1921 free_setup: 1922 kfree(pdev->setup_buf); 1923 free_pdev: 1924 kfree(pdev); 1925 1926 return ret; 1927 } 1928 1929 static void cdnsp_gadget_exit(struct cdns *cdns) 1930 { 1931 struct cdnsp_device *pdev = cdns->gadget_dev; 1932 1933 devm_free_irq(pdev->dev, cdns->dev_irq, pdev); 1934 pm_runtime_mark_last_busy(cdns->dev); 1935 pm_runtime_put_autosuspend(cdns->dev); 1936 usb_del_gadget_udc(&pdev->gadget); 1937 cdnsp_gadget_free_endpoints(pdev); 1938 cdnsp_mem_cleanup(pdev); 1939 kfree(pdev); 1940 cdns->gadget_dev = NULL; 1941 cdns_drd_gadget_off(cdns); 1942 } 1943 1944 static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup) 1945 { 1946 struct cdnsp_device *pdev = cdns->gadget_dev; 1947 unsigned long flags; 1948 1949 if (pdev->link_state == XDEV_U3) 1950 return 0; 1951 1952 spin_lock_irqsave(&pdev->lock, flags); 1953 cdnsp_disconnect_gadget(pdev); 1954 cdnsp_stop(pdev); 1955 spin_unlock_irqrestore(&pdev->lock, flags); 1956 1957 return 0; 1958 } 1959 1960 static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated) 1961 { 1962 struct cdnsp_device *pdev = cdns->gadget_dev; 1963 enum usb_device_speed max_speed; 1964 unsigned long flags; 1965 int ret; 1966 1967 if (!pdev->gadget_driver) 1968 return 0; 1969 1970 spin_lock_irqsave(&pdev->lock, flags); 1971 max_speed = pdev->gadget_driver->max_speed; 1972 1973 /* Limit speed if necessary. */ 1974 max_speed = min(max_speed, pdev->gadget.max_speed); 1975 1976 ret = cdnsp_run(pdev, max_speed); 1977 1978 if (pdev->link_state == XDEV_U3) 1979 __cdnsp_gadget_wakeup(pdev); 1980 1981 spin_unlock_irqrestore(&pdev->lock, flags); 1982 1983 return ret; 1984 } 1985 1986 /** 1987 * cdnsp_gadget_init - initialize device structure 1988 * @cdns: cdnsp instance 1989 * 1990 * This function initializes the gadget. 1991 */ 1992 int cdnsp_gadget_init(struct cdns *cdns) 1993 { 1994 struct cdns_role_driver *rdrv; 1995 1996 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 1997 if (!rdrv) 1998 return -ENOMEM; 1999 2000 rdrv->start = __cdnsp_gadget_init; 2001 rdrv->stop = cdnsp_gadget_exit; 2002 rdrv->suspend = cdnsp_gadget_suspend; 2003 rdrv->resume = cdnsp_gadget_resume; 2004 rdrv->state = CDNS_ROLE_STATE_INACTIVE; 2005 rdrv->name = "gadget"; 2006 cdns->roles[USB_ROLE_DEVICE] = rdrv; 2007 2008 return 0; 2009 } 2010