1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cadence USBSS DRD Driver - gadget side. 4 * 5 * Copyright (C) 2018-2019 Cadence Design Systems. 6 * Copyright (C) 2017-2018 NXP 7 * 8 * Authors: Pawel Jez <pjez@cadence.com>, 9 * Pawel Laszczak <pawell@cadence.com> 10 * Peter Chen <peter.chen@nxp.com> 11 */ 12 13 /* 14 * Work around 1: 15 * At some situations, the controller may get stale data address in TRB 16 * at below sequences: 17 * 1. Controller read TRB includes data address 18 * 2. Software updates TRBs includes data address and Cycle bit 19 * 3. Controller read TRB which includes Cycle bit 20 * 4. DMA run with stale data address 21 * 22 * To fix this problem, driver needs to make the first TRB in TD as invalid. 23 * After preparing all TRBs driver needs to check the position of DMA and 24 * if the DMA point to the first just added TRB and doorbell is 1, 25 * then driver must defer making this TRB as valid. This TRB will be make 26 * as valid during adding next TRB only if DMA is stopped or at TRBERR 27 * interrupt. 28 * 29 * Issue has been fixed in DEV_VER_V3 version of controller. 30 * 31 * Work around 2: 32 * Controller for OUT endpoints has shared on-chip buffers for all incoming 33 * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA 34 * in correct order. If the first packet in the buffer will not be handled, 35 * then the following packets directed for other endpoints and functions 36 * will be blocked. 37 * Additionally the packets directed to one endpoint can block entire on-chip 38 * buffers. In this case transfer to other endpoints also will blocked. 39 * 40 * To resolve this issue after raising the descriptor missing interrupt 41 * driver prepares internal usb_request object and use it to arm DMA transfer. 42 * 43 * The problematic situation was observed in case when endpoint has been enabled 44 * but no usb_request were queued. Driver try detects such endpoints and will 45 * use this workaround only for these endpoint. 46 * 47 * Driver use limited number of buffer. This number can be set by macro 48 * CDNS3_WA2_NUM_BUFFERS. 49 * 50 * Such blocking situation was observed on ACM gadget. For this function 51 * host send OUT data packet but ACM function is not prepared for this packet. 52 * It's cause that buffer placed in on chip memory block transfer to other 53 * endpoints. 54 * 55 * Issue has been fixed in DEV_VER_V2 version of controller. 56 * 57 */ 58 59 #include <linux/dma-mapping.h> 60 #include <linux/usb/gadget.h> 61 #include <linux/module.h> 62 #include <linux/dmapool.h> 63 #include <linux/iopoll.h> 64 65 #include "core.h" 66 #include "gadget-export.h" 67 #include "cdns3-gadget.h" 68 #include "cdns3-trace.h" 69 #include "drd.h" 70 71 static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 72 struct usb_request *request, 73 gfp_t gfp_flags); 74 75 static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 76 struct usb_request *request); 77 78 static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 79 struct usb_request *request); 80 81 /** 82 * cdns3_clear_register_bit - clear bit in given register. 83 * @ptr: address of device controller register to be read and changed 84 * @mask: bits requested to clar 85 */ 86 static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask) 87 { 88 mask = readl(ptr) & ~mask; 89 writel(mask, ptr); 90 } 91 92 /** 93 * cdns3_set_register_bit - set bit in given register. 94 * @ptr: address of device controller register to be read and changed 95 * @mask: bits requested to set 96 */ 97 void cdns3_set_register_bit(void __iomem *ptr, u32 mask) 98 { 99 mask = readl(ptr) | mask; 100 writel(mask, ptr); 101 } 102 103 /** 104 * cdns3_ep_addr_to_index - Macro converts endpoint address to 105 * index of endpoint object in cdns3_device.eps[] container 106 * @ep_addr: endpoint address for which endpoint object is required 107 * 108 */ 109 u8 cdns3_ep_addr_to_index(u8 ep_addr) 110 { 111 return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0)); 112 } 113 114 static int cdns3_get_dma_pos(struct cdns3_device *priv_dev, 115 struct cdns3_endpoint *priv_ep) 116 { 117 int dma_index; 118 119 dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma; 120 121 return dma_index / TRB_SIZE; 122 } 123 124 /** 125 * cdns3_next_request - returns next request from list 126 * @list: list containing requests 127 * 128 * Returns request or NULL if no requests in list 129 */ 130 struct usb_request *cdns3_next_request(struct list_head *list) 131 { 132 return list_first_entry_or_null(list, struct usb_request, list); 133 } 134 135 /** 136 * cdns3_next_align_buf - returns next buffer from list 137 * @list: list containing buffers 138 * 139 * Returns buffer or NULL if no buffers in list 140 */ 141 static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) 142 { 143 return list_first_entry_or_null(list, struct cdns3_aligned_buf, list); 144 } 145 146 /** 147 * cdns3_next_priv_request - returns next request from list 148 * @list: list containing requests 149 * 150 * Returns request or NULL if no requests in list 151 */ 152 static struct cdns3_request *cdns3_next_priv_request(struct list_head *list) 153 { 154 return list_first_entry_or_null(list, struct cdns3_request, list); 155 } 156 157 /** 158 * select_ep - selects endpoint 159 * @priv_dev: extended gadget object 160 * @ep: endpoint address 161 */ 162 void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep) 163 { 164 if (priv_dev->selected_ep == ep) 165 return; 166 167 priv_dev->selected_ep = ep; 168 writel(ep, &priv_dev->regs->ep_sel); 169 } 170 171 /** 172 * cdns3_get_tdl - gets current tdl for selected endpoint. 173 * @priv_dev: extended gadget object 174 * 175 * Before calling this function the appropriate endpoint must 176 * be selected by means of cdns3_select_ep function. 177 */ 178 static int cdns3_get_tdl(struct cdns3_device *priv_dev) 179 { 180 if (priv_dev->dev_ver < DEV_VER_V3) 181 return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 182 else 183 return readl(&priv_dev->regs->ep_tdl); 184 } 185 186 dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, 187 struct cdns3_trb *trb) 188 { 189 u32 offset = (char *)trb - (char *)priv_ep->trb_pool; 190 191 return priv_ep->trb_pool_dma + offset; 192 } 193 194 static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) 195 { 196 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 197 198 if (priv_ep->trb_pool) { 199 dma_pool_free(priv_dev->eps_dma_pool, 200 priv_ep->trb_pool, priv_ep->trb_pool_dma); 201 priv_ep->trb_pool = NULL; 202 } 203 } 204 205 /** 206 * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint 207 * @priv_ep: endpoint object 208 * 209 * Function will return 0 on success or -ENOMEM on allocation error 210 */ 211 int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) 212 { 213 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 214 int ring_size = TRB_RING_SIZE; 215 int num_trbs = ring_size / TRB_SIZE; 216 struct cdns3_trb *link_trb; 217 218 if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size) 219 cdns3_free_trb_pool(priv_ep); 220 221 if (!priv_ep->trb_pool) { 222 priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool, 223 GFP_DMA32 | GFP_ATOMIC, 224 &priv_ep->trb_pool_dma); 225 226 if (!priv_ep->trb_pool) 227 return -ENOMEM; 228 229 priv_ep->alloc_ring_size = ring_size; 230 } 231 232 memset(priv_ep->trb_pool, 0, ring_size); 233 234 priv_ep->num_trbs = num_trbs; 235 236 if (!priv_ep->num) 237 return 0; 238 239 /* Initialize the last TRB as Link TRB */ 240 link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1)); 241 242 if (priv_ep->use_streams) { 243 /* 244 * For stream capable endpoints driver use single correct TRB. 245 * The last trb has zeroed cycle bit 246 */ 247 link_trb->control = 0; 248 } else { 249 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma)); 250 link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE); 251 } 252 return 0; 253 } 254 255 /** 256 * cdns3_ep_stall_flush - Stalls and flushes selected endpoint 257 * @priv_ep: endpoint object 258 * 259 * Endpoint must be selected before call to this function 260 */ 261 static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep) 262 { 263 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 264 int val; 265 266 trace_cdns3_halt(priv_ep, 1, 1); 267 268 writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL, 269 &priv_dev->regs->ep_cmd); 270 271 /* wait for DFLUSH cleared */ 272 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 273 !(val & EP_CMD_DFLUSH), 1, 1000); 274 priv_ep->flags |= EP_STALLED; 275 priv_ep->flags &= ~EP_STALL_PENDING; 276 } 277 278 /** 279 * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller. 280 * @priv_dev: extended gadget object 281 */ 282 void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev) 283 { 284 int i; 285 286 writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf); 287 288 cdns3_allow_enable_l1(priv_dev, 0); 289 priv_dev->hw_configured_flag = 0; 290 priv_dev->onchip_used_size = 0; 291 priv_dev->out_mem_is_allocated = 0; 292 priv_dev->wait_for_setup = 0; 293 priv_dev->using_streams = 0; 294 295 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 296 if (priv_dev->eps[i]) 297 priv_dev->eps[i]->flags &= ~EP_CONFIGURED; 298 } 299 300 /** 301 * cdns3_ep_inc_trb - increment a trb index. 302 * @index: Pointer to the TRB index to increment. 303 * @cs: Cycle state 304 * @trb_in_seg: number of TRBs in segment 305 * 306 * The index should never point to the link TRB. After incrementing, 307 * if it is point to the link TRB, wrap around to the beginning and revert 308 * cycle state bit The 309 * link TRB is always at the last TRB entry. 310 */ 311 static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg) 312 { 313 (*index)++; 314 if (*index == (trb_in_seg - 1)) { 315 *index = 0; 316 *cs ^= 1; 317 } 318 } 319 320 /** 321 * cdns3_ep_inc_enq - increment endpoint's enqueue pointer 322 * @priv_ep: The endpoint whose enqueue pointer we're incrementing 323 */ 324 static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep) 325 { 326 priv_ep->free_trbs--; 327 cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs); 328 } 329 330 /** 331 * cdns3_ep_inc_deq - increment endpoint's dequeue pointer 332 * @priv_ep: The endpoint whose dequeue pointer we're incrementing 333 */ 334 static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) 335 { 336 priv_ep->free_trbs++; 337 cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); 338 } 339 340 static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req) 341 { 342 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 343 int current_trb = priv_req->start_trb; 344 345 while (current_trb != priv_req->end_trb) { 346 cdns3_ep_inc_deq(priv_ep); 347 current_trb = priv_ep->dequeue; 348 } 349 350 cdns3_ep_inc_deq(priv_ep); 351 } 352 353 /** 354 * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. 355 * @priv_dev: Extended gadget object 356 * @enable: Enable/disable permit to transition to L1. 357 * 358 * If bit USB_CONF_L1EN is set and device receive Extended Token packet, 359 * then controller answer with ACK handshake. 360 * If bit USB_CONF_L1DS is set and device receive Extended Token packet, 361 * then controller answer with NYET handshake. 362 */ 363 void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable) 364 { 365 if (enable) 366 writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf); 367 else 368 writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf); 369 } 370 371 enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev) 372 { 373 u32 reg; 374 375 reg = readl(&priv_dev->regs->usb_sts); 376 377 if (DEV_SUPERSPEED(reg)) 378 return USB_SPEED_SUPER; 379 else if (DEV_HIGHSPEED(reg)) 380 return USB_SPEED_HIGH; 381 else if (DEV_FULLSPEED(reg)) 382 return USB_SPEED_FULL; 383 else if (DEV_LOWSPEED(reg)) 384 return USB_SPEED_LOW; 385 return USB_SPEED_UNKNOWN; 386 } 387 388 /** 389 * cdns3_start_all_request - add to ring all request not started 390 * @priv_dev: Extended gadget object 391 * @priv_ep: The endpoint for whom request will be started. 392 * 393 * Returns return ENOMEM if transfer ring i not enough TRBs to start 394 * all requests. 395 */ 396 static int cdns3_start_all_request(struct cdns3_device *priv_dev, 397 struct cdns3_endpoint *priv_ep) 398 { 399 struct usb_request *request; 400 int ret = 0; 401 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 402 403 /* 404 * If the last pending transfer is INTERNAL 405 * OR streams are enabled for this endpoint 406 * do NOT start new transfer till the last one is pending 407 */ 408 if (!pending_empty) { 409 struct cdns3_request *priv_req; 410 411 request = cdns3_next_request(&priv_ep->pending_req_list); 412 priv_req = to_cdns3_request(request); 413 if ((priv_req->flags & REQUEST_INTERNAL) || 414 (priv_ep->flags & EP_TDLCHK_EN) || 415 priv_ep->use_streams) { 416 dev_dbg(priv_dev->dev, "Blocking external request\n"); 417 return ret; 418 } 419 } 420 421 while (!list_empty(&priv_ep->deferred_req_list)) { 422 request = cdns3_next_request(&priv_ep->deferred_req_list); 423 424 if (!priv_ep->use_streams) { 425 ret = cdns3_ep_run_transfer(priv_ep, request); 426 } else { 427 priv_ep->stream_sg_idx = 0; 428 ret = cdns3_ep_run_stream_transfer(priv_ep, request); 429 } 430 if (ret) 431 return ret; 432 433 list_del(&request->list); 434 list_add_tail(&request->list, 435 &priv_ep->pending_req_list); 436 if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN)) 437 break; 438 } 439 440 priv_ep->flags &= ~EP_RING_FULL; 441 return ret; 442 } 443 444 /* 445 * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set 446 * driver try to detect whether endpoint need additional internal 447 * buffer for unblocking on-chip FIFO buffer. This flag will be cleared 448 * if before first DESCMISS interrupt the DMA will be armed. 449 */ 450 #define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \ 451 if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \ 452 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \ 453 (reg) |= EP_STS_EN_DESCMISEN; \ 454 } } while (0) 455 456 static void __cdns3_descmiss_copy_data(struct usb_request *request, 457 struct usb_request *descmiss_req) 458 { 459 int length = request->actual + descmiss_req->actual; 460 struct scatterlist *s = request->sg; 461 462 if (!s) { 463 if (length <= request->length) { 464 memcpy(&((u8 *)request->buf)[request->actual], 465 descmiss_req->buf, 466 descmiss_req->actual); 467 request->actual = length; 468 } else { 469 /* It should never occures */ 470 request->status = -ENOMEM; 471 } 472 } else { 473 if (length <= sg_dma_len(s)) { 474 void *p = phys_to_virt(sg_dma_address(s)); 475 476 memcpy(&((u8 *)p)[request->actual], 477 descmiss_req->buf, 478 descmiss_req->actual); 479 request->actual = length; 480 } else { 481 request->status = -ENOMEM; 482 } 483 } 484 } 485 486 /** 487 * cdns3_wa2_descmiss_copy_data copy data from internal requests to 488 * request queued by class driver. 489 * @priv_ep: extended endpoint object 490 * @request: request object 491 */ 492 static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, 493 struct usb_request *request) 494 { 495 struct usb_request *descmiss_req; 496 struct cdns3_request *descmiss_priv_req; 497 498 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 499 int chunk_end; 500 501 descmiss_priv_req = 502 cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 503 descmiss_req = &descmiss_priv_req->request; 504 505 /* driver can't touch pending request */ 506 if (descmiss_priv_req->flags & REQUEST_PENDING) 507 break; 508 509 chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH; 510 request->status = descmiss_req->status; 511 __cdns3_descmiss_copy_data(request, descmiss_req); 512 list_del_init(&descmiss_priv_req->list); 513 kfree(descmiss_req->buf); 514 cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req); 515 --priv_ep->wa2_counter; 516 517 if (!chunk_end) 518 break; 519 } 520 } 521 522 static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, 523 struct cdns3_endpoint *priv_ep, 524 struct cdns3_request *priv_req) 525 { 526 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN && 527 priv_req->flags & REQUEST_INTERNAL) { 528 struct usb_request *req; 529 530 req = cdns3_next_request(&priv_ep->deferred_req_list); 531 532 priv_ep->descmis_req = NULL; 533 534 if (!req) 535 return NULL; 536 537 /* unmap the gadget request before copying data */ 538 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req, 539 priv_ep->dir); 540 541 cdns3_wa2_descmiss_copy_data(priv_ep, req); 542 if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) && 543 req->length != req->actual) { 544 /* wait for next part of transfer */ 545 /* re-map the gadget request buffer*/ 546 usb_gadget_map_request_by_dev(priv_dev->sysdev, req, 547 usb_endpoint_dir_in(priv_ep->endpoint.desc)); 548 return NULL; 549 } 550 551 if (req->status == -EINPROGRESS) 552 req->status = 0; 553 554 list_del_init(&req->list); 555 cdns3_start_all_request(priv_dev, priv_ep); 556 return req; 557 } 558 559 return &priv_req->request; 560 } 561 562 static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, 563 struct cdns3_endpoint *priv_ep, 564 struct cdns3_request *priv_req) 565 { 566 int deferred = 0; 567 568 /* 569 * If transfer was queued before DESCMISS appear than we 570 * can disable handling of DESCMISS interrupt. Driver assumes that it 571 * can disable special treatment for this endpoint. 572 */ 573 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 574 u32 reg; 575 576 cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir); 577 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 578 reg = readl(&priv_dev->regs->ep_sts_en); 579 reg &= ~EP_STS_EN_DESCMISEN; 580 trace_cdns3_wa2(priv_ep, "workaround disabled\n"); 581 writel(reg, &priv_dev->regs->ep_sts_en); 582 } 583 584 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 585 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 586 u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list); 587 588 /* 589 * DESCMISS transfer has been finished, so data will be 590 * directly copied from internal allocated usb_request 591 * objects. 592 */ 593 if (pending_empty && !descmiss_empty && 594 !(priv_req->flags & REQUEST_INTERNAL)) { 595 cdns3_wa2_descmiss_copy_data(priv_ep, 596 &priv_req->request); 597 598 trace_cdns3_wa2(priv_ep, "get internal stored data"); 599 600 list_add_tail(&priv_req->request.list, 601 &priv_ep->pending_req_list); 602 cdns3_gadget_giveback(priv_ep, priv_req, 603 priv_req->request.status); 604 605 /* 606 * Intentionally driver returns positive value as 607 * correct value. It informs that transfer has 608 * been finished. 609 */ 610 return EINPROGRESS; 611 } 612 613 /* 614 * Driver will wait for completion DESCMISS transfer, 615 * before starts new, not DESCMISS transfer. 616 */ 617 if (!pending_empty && !descmiss_empty) { 618 trace_cdns3_wa2(priv_ep, "wait for pending transfer\n"); 619 deferred = 1; 620 } 621 622 if (priv_req->flags & REQUEST_INTERNAL) 623 list_add_tail(&priv_req->list, 624 &priv_ep->wa2_descmiss_req_list); 625 } 626 627 return deferred; 628 } 629 630 static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep) 631 { 632 struct cdns3_request *priv_req; 633 634 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 635 u8 chain; 636 637 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 638 chain = !!(priv_req->flags & REQUEST_INTERNAL_CH); 639 640 trace_cdns3_wa2(priv_ep, "removes eldest request"); 641 642 kfree(priv_req->request.buf); 643 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 644 &priv_req->request); 645 list_del_init(&priv_req->list); 646 --priv_ep->wa2_counter; 647 648 if (!chain) 649 break; 650 } 651 } 652 653 /** 654 * cdns3_wa2_descmissing_packet - handles descriptor missing event. 655 * @priv_ep: extended gadget object 656 * 657 * This function is used only for WA2. For more information see Work around 2 658 * description. 659 */ 660 static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) 661 { 662 struct cdns3_request *priv_req; 663 struct usb_request *request; 664 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 665 666 /* check for pending transfer */ 667 if (!pending_empty) { 668 trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n"); 669 return; 670 } 671 672 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 673 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 674 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN; 675 } 676 677 trace_cdns3_wa2(priv_ep, "Description Missing detected\n"); 678 679 if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) { 680 trace_cdns3_wa2(priv_ep, "WA2 overflow\n"); 681 cdns3_wa2_remove_old_request(priv_ep); 682 } 683 684 request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint, 685 GFP_ATOMIC); 686 if (!request) 687 goto err; 688 689 priv_req = to_cdns3_request(request); 690 priv_req->flags |= REQUEST_INTERNAL; 691 692 /* if this field is still assigned it indicate that transfer related 693 * with this request has not been finished yet. Driver in this 694 * case simply allocate next request and assign flag REQUEST_INTERNAL_CH 695 * flag to previous one. It will indicate that current request is 696 * part of the previous one. 697 */ 698 if (priv_ep->descmis_req) 699 priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH; 700 701 priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE, 702 GFP_ATOMIC); 703 priv_ep->wa2_counter++; 704 705 if (!priv_req->request.buf) { 706 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 707 goto err; 708 } 709 710 priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE; 711 priv_ep->descmis_req = priv_req; 712 713 __cdns3_gadget_ep_queue(&priv_ep->endpoint, 714 &priv_ep->descmis_req->request, 715 GFP_ATOMIC); 716 717 return; 718 719 err: 720 dev_err(priv_ep->cdns3_dev->dev, 721 "Failed: No sufficient memory for DESCMIS\n"); 722 } 723 724 static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev) 725 { 726 u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 727 728 if (tdl) { 729 u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl; 730 731 writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL, 732 &priv_dev->regs->ep_cmd); 733 } 734 } 735 736 static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev) 737 { 738 u32 ep_sts_reg; 739 740 /* select EP0-out */ 741 cdns3_select_ep(priv_dev, 0); 742 743 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 744 745 if (EP_STS_OUTQ_VAL(ep_sts_reg)) { 746 u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg); 747 struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num]; 748 749 if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) && 750 outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) { 751 u8 pending_empty = list_empty(&outq_ep->pending_req_list); 752 753 if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) || 754 (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) || 755 !pending_empty) { 756 } else { 757 u32 ep_sts_en_reg; 758 u32 ep_cmd_reg; 759 760 cdns3_select_ep(priv_dev, outq_ep->num | 761 outq_ep->dir); 762 ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en); 763 ep_cmd_reg = readl(&priv_dev->regs->ep_cmd); 764 765 outq_ep->flags |= EP_TDLCHK_EN; 766 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 767 EP_CFG_TDL_CHK); 768 769 cdns3_wa2_enable_detection(priv_dev, outq_ep, 770 ep_sts_en_reg); 771 writel(ep_sts_en_reg, 772 &priv_dev->regs->ep_sts_en); 773 /* reset tdl value to zero */ 774 cdns3_wa2_reset_tdl(priv_dev); 775 /* 776 * Memory barrier - Reset tdl before ringing the 777 * doorbell. 778 */ 779 wmb(); 780 if (EP_CMD_DRDY & ep_cmd_reg) { 781 trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n"); 782 783 } else { 784 trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n"); 785 /* 786 * ring doorbell to generate DESCMIS irq 787 */ 788 writel(EP_CMD_DRDY, 789 &priv_dev->regs->ep_cmd); 790 } 791 } 792 } 793 } 794 } 795 796 /** 797 * cdns3_gadget_giveback - call struct usb_request's ->complete callback 798 * @priv_ep: The endpoint to whom the request belongs to 799 * @priv_req: The request we're giving back 800 * @status: completion code for the request 801 * 802 * Must be called with controller's lock held and interrupts disabled. This 803 * function will unmap @req and call its ->complete() callback to notify upper 804 * layers that it has completed. 805 */ 806 void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, 807 struct cdns3_request *priv_req, 808 int status) 809 { 810 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 811 struct usb_request *request = &priv_req->request; 812 813 list_del_init(&request->list); 814 815 if (request->status == -EINPROGRESS) 816 request->status = status; 817 818 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request, 819 priv_ep->dir); 820 821 if ((priv_req->flags & REQUEST_UNALIGNED) && 822 priv_ep->dir == USB_DIR_OUT && !request->status) { 823 /* Make DMA buffer CPU accessible */ 824 dma_sync_single_for_cpu(priv_dev->sysdev, 825 priv_req->aligned_buf->dma, 826 priv_req->aligned_buf->size, 827 priv_req->aligned_buf->dir); 828 memcpy(request->buf, priv_req->aligned_buf->buf, 829 request->length); 830 } 831 832 priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); 833 /* All TRBs have finished, clear the counter */ 834 priv_req->finished_trb = 0; 835 trace_cdns3_gadget_giveback(priv_req); 836 837 if (priv_dev->dev_ver < DEV_VER_V2) { 838 request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep, 839 priv_req); 840 if (!request) 841 return; 842 } 843 844 if (request->complete) { 845 spin_unlock(&priv_dev->lock); 846 usb_gadget_giveback_request(&priv_ep->endpoint, 847 request); 848 spin_lock(&priv_dev->lock); 849 } 850 851 if (request->buf == priv_dev->zlp_buf) 852 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 853 } 854 855 static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) 856 { 857 /* Work around for stale data address in TRB*/ 858 if (priv_ep->wa1_set) { 859 trace_cdns3_wa1(priv_ep, "restore cycle bit"); 860 861 priv_ep->wa1_set = 0; 862 priv_ep->wa1_trb_index = 0xFFFF; 863 if (priv_ep->wa1_cycle_bit) { 864 priv_ep->wa1_trb->control = 865 priv_ep->wa1_trb->control | cpu_to_le32(0x1); 866 } else { 867 priv_ep->wa1_trb->control = 868 priv_ep->wa1_trb->control & cpu_to_le32(~0x1); 869 } 870 } 871 } 872 873 static void cdns3_free_aligned_request_buf(struct work_struct *work) 874 { 875 struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, 876 aligned_buf_wq); 877 struct cdns3_aligned_buf *buf, *tmp; 878 unsigned long flags; 879 880 spin_lock_irqsave(&priv_dev->lock, flags); 881 882 list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) { 883 if (!buf->in_use) { 884 list_del(&buf->list); 885 886 /* 887 * Re-enable interrupts to free DMA capable memory. 888 * Driver can't free this memory with disabled 889 * interrupts. 890 */ 891 spin_unlock_irqrestore(&priv_dev->lock, flags); 892 dma_free_noncoherent(priv_dev->sysdev, buf->size, 893 buf->buf, buf->dma, buf->dir); 894 kfree(buf); 895 spin_lock_irqsave(&priv_dev->lock, flags); 896 } 897 } 898 899 spin_unlock_irqrestore(&priv_dev->lock, flags); 900 } 901 902 static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) 903 { 904 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 905 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 906 struct cdns3_aligned_buf *buf; 907 908 /* check if buffer is aligned to 8. */ 909 if (!((uintptr_t)priv_req->request.buf & 0x7)) 910 return 0; 911 912 buf = priv_req->aligned_buf; 913 914 if (!buf || priv_req->request.length > buf->size) { 915 buf = kzalloc(sizeof(*buf), GFP_ATOMIC); 916 if (!buf) 917 return -ENOMEM; 918 919 buf->size = priv_req->request.length; 920 buf->dir = usb_endpoint_dir_in(priv_ep->endpoint.desc) ? 921 DMA_TO_DEVICE : DMA_FROM_DEVICE; 922 923 buf->buf = dma_alloc_noncoherent(priv_dev->sysdev, 924 buf->size, 925 &buf->dma, 926 buf->dir, 927 GFP_ATOMIC); 928 if (!buf->buf) { 929 kfree(buf); 930 return -ENOMEM; 931 } 932 933 if (priv_req->aligned_buf) { 934 trace_cdns3_free_aligned_request(priv_req); 935 priv_req->aligned_buf->in_use = 0; 936 queue_work(system_freezable_wq, 937 &priv_dev->aligned_buf_wq); 938 } 939 940 buf->in_use = 1; 941 priv_req->aligned_buf = buf; 942 943 list_add_tail(&buf->list, 944 &priv_dev->aligned_buf_list); 945 } 946 947 if (priv_ep->dir == USB_DIR_IN) { 948 /* Make DMA buffer CPU accessible */ 949 dma_sync_single_for_cpu(priv_dev->sysdev, 950 buf->dma, buf->size, buf->dir); 951 memcpy(buf->buf, priv_req->request.buf, 952 priv_req->request.length); 953 } 954 955 /* Transfer DMA buffer ownership back to device */ 956 dma_sync_single_for_device(priv_dev->sysdev, 957 buf->dma, buf->size, buf->dir); 958 959 priv_req->flags |= REQUEST_UNALIGNED; 960 trace_cdns3_prepare_aligned_request(priv_req); 961 962 return 0; 963 } 964 965 static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep, 966 struct cdns3_trb *trb) 967 { 968 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 969 970 if (!priv_ep->wa1_set) { 971 u32 doorbell; 972 973 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 974 975 if (doorbell) { 976 priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0; 977 priv_ep->wa1_set = 1; 978 priv_ep->wa1_trb = trb; 979 priv_ep->wa1_trb_index = priv_ep->enqueue; 980 trace_cdns3_wa1(priv_ep, "set guard"); 981 return 0; 982 } 983 } 984 return 1; 985 } 986 987 static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev, 988 struct cdns3_endpoint *priv_ep) 989 { 990 int dma_index; 991 u32 doorbell; 992 993 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 994 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 995 996 if (!doorbell || dma_index != priv_ep->wa1_trb_index) 997 cdns3_wa1_restore_cycle_bit(priv_ep); 998 } 999 1000 static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 1001 struct usb_request *request) 1002 { 1003 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1004 struct cdns3_request *priv_req; 1005 struct cdns3_trb *trb; 1006 dma_addr_t trb_dma; 1007 int address; 1008 u32 control; 1009 u32 length; 1010 u32 tdl; 1011 unsigned int sg_idx = priv_ep->stream_sg_idx; 1012 1013 priv_req = to_cdns3_request(request); 1014 address = priv_ep->endpoint.desc->bEndpointAddress; 1015 1016 priv_ep->flags |= EP_PENDING_REQUEST; 1017 1018 /* must allocate buffer aligned to 8 */ 1019 if (priv_req->flags & REQUEST_UNALIGNED) 1020 trb_dma = priv_req->aligned_buf->dma; 1021 else 1022 trb_dma = request->dma; 1023 1024 /* For stream capable endpoints driver use only single TD. */ 1025 trb = priv_ep->trb_pool + priv_ep->enqueue; 1026 priv_req->start_trb = priv_ep->enqueue; 1027 priv_req->end_trb = priv_req->start_trb; 1028 priv_req->trb = trb; 1029 1030 cdns3_select_ep(priv_ep->cdns3_dev, address); 1031 1032 control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE | 1033 TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP; 1034 1035 if (!request->num_sgs) { 1036 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1037 length = request->length; 1038 } else { 1039 trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address)); 1040 length = request->sg[sg_idx].length; 1041 } 1042 1043 tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); 1044 1045 trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length)); 1046 1047 /* 1048 * For DEV_VER_V2 controller version we have enabled 1049 * USB_CONF2_EN_TDL_TRB in DMULT configuration. 1050 * This enables TDL calculation based on TRB, hence setting TDL in TRB. 1051 */ 1052 if (priv_dev->dev_ver >= DEV_VER_V2) { 1053 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1054 trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl)); 1055 } 1056 priv_req->flags |= REQUEST_PENDING; 1057 1058 trb->control = cpu_to_le32(control); 1059 1060 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1061 1062 /* 1063 * Memory barrier - Cycle Bit must be set before trb->length and 1064 * trb->buffer fields. 1065 */ 1066 wmb(); 1067 1068 /* always first element */ 1069 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), 1070 &priv_dev->regs->ep_traddr); 1071 1072 if (!(priv_ep->flags & EP_STALLED)) { 1073 trace_cdns3_ring(priv_ep); 1074 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1075 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1076 1077 priv_ep->prime_flag = false; 1078 1079 /* 1080 * Controller version DEV_VER_V2 tdl calculation 1081 * is based on TRB 1082 */ 1083 1084 if (priv_dev->dev_ver < DEV_VER_V2) 1085 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1086 &priv_dev->regs->ep_cmd); 1087 else if (priv_dev->dev_ver > DEV_VER_V2) 1088 writel(tdl, &priv_dev->regs->ep_tdl); 1089 1090 priv_ep->last_stream_id = priv_req->request.stream_id; 1091 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1092 writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) | 1093 EP_CMD_ERDY, &priv_dev->regs->ep_cmd); 1094 1095 trace_cdns3_doorbell_epx(priv_ep->name, 1096 readl(&priv_dev->regs->ep_traddr)); 1097 } 1098 1099 /* WORKAROUND for transition to L0 */ 1100 __cdns3_gadget_wakeup(priv_dev); 1101 1102 return 0; 1103 } 1104 1105 /** 1106 * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware 1107 * @priv_ep: endpoint object 1108 * @request: request object 1109 * 1110 * Returns zero on success or negative value on failure 1111 */ 1112 static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 1113 struct usb_request *request) 1114 { 1115 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1116 struct cdns3_request *priv_req; 1117 struct cdns3_trb *trb; 1118 struct cdns3_trb *link_trb = NULL; 1119 dma_addr_t trb_dma; 1120 u32 togle_pcs = 1; 1121 int sg_iter = 0; 1122 int num_trb; 1123 int address; 1124 u32 control; 1125 int pcs; 1126 u16 total_tdl = 0; 1127 struct scatterlist *s = NULL; 1128 bool sg_supported = !!(request->num_mapped_sgs); 1129 1130 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) 1131 num_trb = priv_ep->interval; 1132 else 1133 num_trb = sg_supported ? request->num_mapped_sgs : 1; 1134 1135 if (num_trb > priv_ep->free_trbs) { 1136 priv_ep->flags |= EP_RING_FULL; 1137 return -ENOBUFS; 1138 } 1139 1140 priv_req = to_cdns3_request(request); 1141 address = priv_ep->endpoint.desc->bEndpointAddress; 1142 1143 priv_ep->flags |= EP_PENDING_REQUEST; 1144 1145 /* must allocate buffer aligned to 8 */ 1146 if (priv_req->flags & REQUEST_UNALIGNED) 1147 trb_dma = priv_req->aligned_buf->dma; 1148 else 1149 trb_dma = request->dma; 1150 1151 trb = priv_ep->trb_pool + priv_ep->enqueue; 1152 priv_req->start_trb = priv_ep->enqueue; 1153 priv_req->trb = trb; 1154 1155 cdns3_select_ep(priv_ep->cdns3_dev, address); 1156 1157 /* prepare ring */ 1158 if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) { 1159 int doorbell, dma_index; 1160 u32 ch_bit = 0; 1161 1162 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1163 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1164 1165 /* Driver can't update LINK TRB if it is current processed. */ 1166 if (doorbell && dma_index == priv_ep->num_trbs - 1) { 1167 priv_ep->flags |= EP_DEFERRED_DRDY; 1168 return -ENOBUFS; 1169 } 1170 1171 /*updating C bt in Link TRB before starting DMA*/ 1172 link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1); 1173 /* 1174 * For TRs size equal 2 enabling TRB_CHAIN for epXin causes 1175 * that DMA stuck at the LINK TRB. 1176 * On the other hand, removing TRB_CHAIN for longer TRs for 1177 * epXout cause that DMA stuck after handling LINK TRB. 1178 * To eliminate this strange behavioral driver set TRB_CHAIN 1179 * bit only for TR size > 2. 1180 */ 1181 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC || 1182 TRBS_PER_SEGMENT > 2) 1183 ch_bit = TRB_CHAIN; 1184 1185 link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | 1186 TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); 1187 } 1188 1189 if (priv_dev->dev_ver <= DEV_VER_V2) 1190 togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); 1191 1192 if (sg_supported) 1193 s = request->sg; 1194 1195 /* set incorrect Cycle Bit for first trb*/ 1196 control = priv_ep->pcs ? 0 : TRB_CYCLE; 1197 trb->length = 0; 1198 if (priv_dev->dev_ver >= DEV_VER_V2) { 1199 u16 td_size; 1200 1201 td_size = DIV_ROUND_UP(request->length, 1202 priv_ep->endpoint.maxpacket); 1203 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1204 trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size)); 1205 else 1206 control |= TRB_TDL_HS_SIZE(td_size); 1207 } 1208 1209 do { 1210 u32 length; 1211 1212 /* fill TRB */ 1213 control |= TRB_TYPE(TRB_NORMAL); 1214 if (sg_supported) { 1215 trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s))); 1216 length = sg_dma_len(s); 1217 } else { 1218 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1219 length = request->length; 1220 } 1221 1222 if (priv_ep->flags & EP_TDLCHK_EN) 1223 total_tdl += DIV_ROUND_UP(length, 1224 priv_ep->endpoint.maxpacket); 1225 1226 trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) | 1227 TRB_LEN(length)); 1228 pcs = priv_ep->pcs ? TRB_CYCLE : 0; 1229 1230 /* 1231 * first trb should be prepared as last to avoid processing 1232 * transfer to early 1233 */ 1234 if (sg_iter != 0) 1235 control |= pcs; 1236 1237 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 1238 control |= TRB_IOC | TRB_ISP; 1239 } else { 1240 /* for last element in TD or in SG list */ 1241 if (sg_iter == (num_trb - 1) && sg_iter != 0) 1242 control |= pcs | TRB_IOC | TRB_ISP; 1243 } 1244 1245 if (sg_iter) 1246 trb->control = cpu_to_le32(control); 1247 else 1248 priv_req->trb->control = cpu_to_le32(control); 1249 1250 if (sg_supported) { 1251 trb->control |= cpu_to_le32(TRB_ISP); 1252 /* Don't set chain bit for last TRB */ 1253 if (sg_iter < num_trb - 1) 1254 trb->control |= cpu_to_le32(TRB_CHAIN); 1255 1256 s = sg_next(s); 1257 } 1258 1259 control = 0; 1260 ++sg_iter; 1261 priv_req->end_trb = priv_ep->enqueue; 1262 cdns3_ep_inc_enq(priv_ep); 1263 trb = priv_ep->trb_pool + priv_ep->enqueue; 1264 trb->length = 0; 1265 } while (sg_iter < num_trb); 1266 1267 trb = priv_req->trb; 1268 1269 priv_req->flags |= REQUEST_PENDING; 1270 priv_req->num_of_trb = num_trb; 1271 1272 if (sg_iter == 1) 1273 trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP); 1274 1275 if (priv_dev->dev_ver < DEV_VER_V2 && 1276 (priv_ep->flags & EP_TDLCHK_EN)) { 1277 u16 tdl = total_tdl; 1278 u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 1279 1280 if (tdl > EP_CMD_TDL_MAX) { 1281 tdl = EP_CMD_TDL_MAX; 1282 priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX; 1283 } 1284 1285 if (old_tdl < tdl) { 1286 tdl -= old_tdl; 1287 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1288 &priv_dev->regs->ep_cmd); 1289 } 1290 } 1291 1292 /* 1293 * Memory barrier - cycle bit must be set before other filds in trb. 1294 */ 1295 wmb(); 1296 1297 /* give the TD to the consumer*/ 1298 if (togle_pcs) 1299 trb->control = trb->control ^ cpu_to_le32(1); 1300 1301 if (priv_dev->dev_ver <= DEV_VER_V2) 1302 cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); 1303 1304 if (num_trb > 1) { 1305 int i = 0; 1306 1307 while (i < num_trb) { 1308 trace_cdns3_prepare_trb(priv_ep, trb + i); 1309 if (trb + i == link_trb) { 1310 trb = priv_ep->trb_pool; 1311 num_trb = num_trb - i; 1312 i = 0; 1313 } else { 1314 i++; 1315 } 1316 } 1317 } else { 1318 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1319 } 1320 1321 /* 1322 * Memory barrier - Cycle Bit must be set before trb->length and 1323 * trb->buffer fields. 1324 */ 1325 wmb(); 1326 1327 /* 1328 * For DMULT mode we can set address to transfer ring only once after 1329 * enabling endpoint. 1330 */ 1331 if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) { 1332 /* 1333 * Until SW is not ready to handle the OUT transfer the ISO OUT 1334 * Endpoint should be disabled (EP_CFG.ENABLE = 0). 1335 * EP_CFG_ENABLE must be set before updating ep_traddr. 1336 */ 1337 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir && 1338 !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) { 1339 priv_ep->flags |= EP_QUIRK_ISO_OUT_EN; 1340 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 1341 EP_CFG_ENABLE); 1342 } 1343 1344 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + 1345 priv_req->start_trb * TRB_SIZE), 1346 &priv_dev->regs->ep_traddr); 1347 1348 priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR; 1349 } 1350 1351 if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) { 1352 trace_cdns3_ring(priv_ep); 1353 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1354 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1355 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1356 trace_cdns3_doorbell_epx(priv_ep->name, 1357 readl(&priv_dev->regs->ep_traddr)); 1358 } 1359 1360 /* WORKAROUND for transition to L0 */ 1361 __cdns3_gadget_wakeup(priv_dev); 1362 1363 return 0; 1364 } 1365 1366 void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) 1367 { 1368 struct cdns3_endpoint *priv_ep; 1369 struct usb_ep *ep; 1370 1371 if (priv_dev->hw_configured_flag) 1372 return; 1373 1374 writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf); 1375 1376 cdns3_set_register_bit(&priv_dev->regs->usb_conf, 1377 USB_CONF_U1EN | USB_CONF_U2EN); 1378 1379 priv_dev->hw_configured_flag = 1; 1380 1381 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 1382 if (ep->enabled) { 1383 priv_ep = ep_to_cdns3_ep(ep); 1384 cdns3_start_all_request(priv_dev, priv_ep); 1385 } 1386 } 1387 1388 cdns3_allow_enable_l1(priv_dev, 1); 1389 } 1390 1391 /** 1392 * cdns3_trb_handled - check whether trb has been handled by DMA 1393 * 1394 * @priv_ep: extended endpoint object. 1395 * @priv_req: request object for checking 1396 * 1397 * Endpoint must be selected before invoking this function. 1398 * 1399 * Returns false if request has not been handled by DMA, else returns true. 1400 * 1401 * SR - start ring 1402 * ER - end ring 1403 * DQ = priv_ep->dequeue - dequeue position 1404 * EQ = priv_ep->enqueue - enqueue position 1405 * ST = priv_req->start_trb - index of first TRB in transfer ring 1406 * ET = priv_req->end_trb - index of last TRB in transfer ring 1407 * CI = current_index - index of processed TRB by DMA. 1408 * 1409 * As first step, we check if the TRB between the ST and ET. 1410 * Then, we check if cycle bit for index priv_ep->dequeue 1411 * is correct. 1412 * 1413 * some rules: 1414 * 1. priv_ep->dequeue never equals to current_index. 1415 * 2 priv_ep->enqueue never exceed priv_ep->dequeue 1416 * 3. exception: priv_ep->enqueue == priv_ep->dequeue 1417 * and priv_ep->free_trbs is zero. 1418 * This case indicate that TR is full. 1419 * 1420 * At below two cases, the request have been handled. 1421 * Case 1 - priv_ep->dequeue < current_index 1422 * SR ... EQ ... DQ ... CI ... ER 1423 * SR ... DQ ... CI ... EQ ... ER 1424 * 1425 * Case 2 - priv_ep->dequeue > current_index 1426 * This situation takes place when CI go through the LINK TRB at the end of 1427 * transfer ring. 1428 * SR ... CI ... EQ ... DQ ... ER 1429 */ 1430 static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep, 1431 struct cdns3_request *priv_req) 1432 { 1433 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1434 struct cdns3_trb *trb; 1435 int current_index = 0; 1436 int handled = 0; 1437 int doorbell; 1438 1439 current_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1440 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1441 1442 /* current trb doesn't belong to this request */ 1443 if (priv_req->start_trb < priv_req->end_trb) { 1444 if (priv_ep->dequeue > priv_req->end_trb) 1445 goto finish; 1446 1447 if (priv_ep->dequeue < priv_req->start_trb) 1448 goto finish; 1449 } 1450 1451 if ((priv_req->start_trb > priv_req->end_trb) && 1452 (priv_ep->dequeue > priv_req->end_trb) && 1453 (priv_ep->dequeue < priv_req->start_trb)) 1454 goto finish; 1455 1456 if ((priv_req->start_trb == priv_req->end_trb) && 1457 (priv_ep->dequeue != priv_req->end_trb)) 1458 goto finish; 1459 1460 trb = &priv_ep->trb_pool[priv_ep->dequeue]; 1461 1462 if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs) 1463 goto finish; 1464 1465 if (doorbell == 1 && current_index == priv_ep->dequeue) 1466 goto finish; 1467 1468 /* The corner case for TRBS_PER_SEGMENT equal 2). */ 1469 if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 1470 handled = 1; 1471 goto finish; 1472 } 1473 1474 if (priv_ep->enqueue == priv_ep->dequeue && 1475 priv_ep->free_trbs == 0) { 1476 handled = 1; 1477 } else if (priv_ep->dequeue < current_index) { 1478 if ((current_index == (priv_ep->num_trbs - 1)) && 1479 !priv_ep->dequeue) 1480 goto finish; 1481 1482 handled = 1; 1483 } else if (priv_ep->dequeue > current_index) { 1484 handled = 1; 1485 } 1486 1487 finish: 1488 trace_cdns3_request_handled(priv_req, current_index, handled); 1489 1490 return handled; 1491 } 1492 1493 static void cdns3_transfer_completed(struct cdns3_device *priv_dev, 1494 struct cdns3_endpoint *priv_ep) 1495 { 1496 struct cdns3_request *priv_req; 1497 struct usb_request *request; 1498 struct cdns3_trb *trb; 1499 bool request_handled = false; 1500 bool transfer_end = false; 1501 1502 while (!list_empty(&priv_ep->pending_req_list)) { 1503 request = cdns3_next_request(&priv_ep->pending_req_list); 1504 priv_req = to_cdns3_request(request); 1505 1506 trb = priv_ep->trb_pool + priv_ep->dequeue; 1507 1508 /* Request was dequeued and TRB was changed to TRB_LINK. */ 1509 if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { 1510 trace_cdns3_complete_trb(priv_ep, trb); 1511 cdns3_move_deq_to_next_trb(priv_req); 1512 } 1513 1514 if (!request->stream_id) { 1515 /* Re-select endpoint. It could be changed by other CPU 1516 * during handling usb_gadget_giveback_request. 1517 */ 1518 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1519 1520 while (cdns3_trb_handled(priv_ep, priv_req)) { 1521 priv_req->finished_trb++; 1522 if (priv_req->finished_trb >= priv_req->num_of_trb) 1523 request_handled = true; 1524 1525 trb = priv_ep->trb_pool + priv_ep->dequeue; 1526 trace_cdns3_complete_trb(priv_ep, trb); 1527 1528 if (!transfer_end) 1529 request->actual += 1530 TRB_LEN(le32_to_cpu(trb->length)); 1531 1532 if (priv_req->num_of_trb > 1 && 1533 le32_to_cpu(trb->control) & TRB_SMM) 1534 transfer_end = true; 1535 1536 cdns3_ep_inc_deq(priv_ep); 1537 } 1538 1539 if (request_handled) { 1540 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1541 request_handled = false; 1542 transfer_end = false; 1543 } else { 1544 goto prepare_next_td; 1545 } 1546 1547 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && 1548 TRBS_PER_SEGMENT == 2) 1549 break; 1550 } else { 1551 /* Re-select endpoint. It could be changed by other CPU 1552 * during handling usb_gadget_giveback_request. 1553 */ 1554 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1555 1556 trb = priv_ep->trb_pool; 1557 trace_cdns3_complete_trb(priv_ep, trb); 1558 1559 if (trb != priv_req->trb) 1560 dev_warn(priv_dev->dev, 1561 "request_trb=0x%p, queue_trb=0x%p\n", 1562 priv_req->trb, trb); 1563 1564 request->actual += TRB_LEN(le32_to_cpu(trb->length)); 1565 1566 if (!request->num_sgs || 1567 (request->num_sgs == (priv_ep->stream_sg_idx + 1))) { 1568 priv_ep->stream_sg_idx = 0; 1569 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1570 } else { 1571 priv_ep->stream_sg_idx++; 1572 cdns3_ep_run_stream_transfer(priv_ep, request); 1573 } 1574 break; 1575 } 1576 } 1577 priv_ep->flags &= ~EP_PENDING_REQUEST; 1578 1579 prepare_next_td: 1580 if (!(priv_ep->flags & EP_STALLED) && 1581 !(priv_ep->flags & EP_STALL_PENDING)) 1582 cdns3_start_all_request(priv_dev, priv_ep); 1583 } 1584 1585 void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm) 1586 { 1587 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1588 1589 cdns3_wa1_restore_cycle_bit(priv_ep); 1590 1591 if (rearm) { 1592 trace_cdns3_ring(priv_ep); 1593 1594 /* Cycle Bit must be updated before arming DMA. */ 1595 wmb(); 1596 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1597 1598 __cdns3_gadget_wakeup(priv_dev); 1599 1600 trace_cdns3_doorbell_epx(priv_ep->name, 1601 readl(&priv_dev->regs->ep_traddr)); 1602 } 1603 } 1604 1605 static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep) 1606 { 1607 u16 tdl = priv_ep->pending_tdl; 1608 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1609 1610 if (tdl > EP_CMD_TDL_MAX) { 1611 tdl = EP_CMD_TDL_MAX; 1612 priv_ep->pending_tdl -= EP_CMD_TDL_MAX; 1613 } else { 1614 priv_ep->pending_tdl = 0; 1615 } 1616 1617 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd); 1618 } 1619 1620 /** 1621 * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint 1622 * @priv_ep: endpoint object 1623 * 1624 * Returns 0 1625 */ 1626 static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) 1627 { 1628 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1629 u32 ep_sts_reg; 1630 struct usb_request *deferred_request; 1631 struct usb_request *pending_request; 1632 u32 tdl = 0; 1633 1634 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1635 1636 trace_cdns3_epx_irq(priv_dev, priv_ep); 1637 1638 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 1639 writel(ep_sts_reg, &priv_dev->regs->ep_sts); 1640 1641 if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) { 1642 bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY); 1643 1644 tdl = cdns3_get_tdl(priv_dev); 1645 1646 /* 1647 * Continue the previous transfer: 1648 * There is some racing between ERDY and PRIME. The device send 1649 * ERDY and almost in the same time Host send PRIME. It cause 1650 * that host ignore the ERDY packet and driver has to send it 1651 * again. 1652 */ 1653 if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) || 1654 EP_STS_HOSTPP(ep_sts_reg))) { 1655 writel(EP_CMD_ERDY | 1656 EP_CMD_ERDY_SID(priv_ep->last_stream_id), 1657 &priv_dev->regs->ep_cmd); 1658 ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC); 1659 } else { 1660 priv_ep->prime_flag = true; 1661 1662 pending_request = cdns3_next_request(&priv_ep->pending_req_list); 1663 deferred_request = cdns3_next_request(&priv_ep->deferred_req_list); 1664 1665 if (deferred_request && !pending_request) { 1666 cdns3_start_all_request(priv_dev, priv_ep); 1667 } 1668 } 1669 } 1670 1671 if (ep_sts_reg & EP_STS_TRBERR) { 1672 if (priv_ep->flags & EP_STALL_PENDING && 1673 !(ep_sts_reg & EP_STS_DESCMIS && 1674 priv_dev->dev_ver < DEV_VER_V2)) { 1675 cdns3_ep_stall_flush(priv_ep); 1676 } 1677 1678 /* 1679 * For isochronous transfer driver completes request on 1680 * IOC or on TRBERR. IOC appears only when device receive 1681 * OUT data packet. If host disable stream or lost some packet 1682 * then the only way to finish all queued transfer is to do it 1683 * on TRBERR event. 1684 */ 1685 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && 1686 !priv_ep->wa1_set) { 1687 if (!priv_ep->dir) { 1688 u32 ep_cfg = readl(&priv_dev->regs->ep_cfg); 1689 1690 ep_cfg &= ~EP_CFG_ENABLE; 1691 writel(ep_cfg, &priv_dev->regs->ep_cfg); 1692 priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; 1693 } 1694 cdns3_transfer_completed(priv_dev, priv_ep); 1695 } else if (!(priv_ep->flags & EP_STALLED) && 1696 !(priv_ep->flags & EP_STALL_PENDING)) { 1697 if (priv_ep->flags & EP_DEFERRED_DRDY) { 1698 priv_ep->flags &= ~EP_DEFERRED_DRDY; 1699 cdns3_start_all_request(priv_dev, priv_ep); 1700 } else { 1701 cdns3_rearm_transfer(priv_ep, 1702 priv_ep->wa1_set); 1703 } 1704 } 1705 } 1706 1707 if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) || 1708 (ep_sts_reg & EP_STS_IOT)) { 1709 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 1710 if (ep_sts_reg & EP_STS_ISP) 1711 priv_ep->flags |= EP_QUIRK_END_TRANSFER; 1712 else 1713 priv_ep->flags &= ~EP_QUIRK_END_TRANSFER; 1714 } 1715 1716 if (!priv_ep->use_streams) { 1717 if ((ep_sts_reg & EP_STS_IOC) || 1718 (ep_sts_reg & EP_STS_ISP)) { 1719 cdns3_transfer_completed(priv_dev, priv_ep); 1720 } else if ((priv_ep->flags & EP_TDLCHK_EN) & 1721 priv_ep->pending_tdl) { 1722 /* handle IOT with pending tdl */ 1723 cdns3_reprogram_tdl(priv_ep); 1724 } 1725 } else if (priv_ep->dir == USB_DIR_OUT) { 1726 priv_ep->ep_sts_pending |= ep_sts_reg; 1727 } else if (ep_sts_reg & EP_STS_IOT) { 1728 cdns3_transfer_completed(priv_dev, priv_ep); 1729 } 1730 } 1731 1732 /* 1733 * MD_EXIT interrupt sets when stream capable endpoint exits 1734 * from MOVE DATA state of Bulk IN/OUT stream protocol state machine 1735 */ 1736 if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) && 1737 (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) { 1738 priv_ep->ep_sts_pending = 0; 1739 cdns3_transfer_completed(priv_dev, priv_ep); 1740 } 1741 1742 /* 1743 * WA2: this condition should only be meet when 1744 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or 1745 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. 1746 * In other cases this interrupt will be disabled. 1747 */ 1748 if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 && 1749 !(priv_ep->flags & EP_STALLED)) 1750 cdns3_wa2_descmissing_packet(priv_ep); 1751 1752 return 0; 1753 } 1754 1755 static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev) 1756 { 1757 if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) 1758 priv_dev->gadget_driver->disconnect(&priv_dev->gadget); 1759 } 1760 1761 /** 1762 * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device 1763 * @priv_dev: extended gadget object 1764 * @usb_ists: bitmap representation of device's reported interrupts 1765 * (usb_ists register value) 1766 */ 1767 static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, 1768 u32 usb_ists) 1769 __must_hold(&priv_dev->lock) 1770 { 1771 int speed = 0; 1772 1773 trace_cdns3_usb_irq(priv_dev, usb_ists); 1774 if (usb_ists & USB_ISTS_L1ENTI) { 1775 /* 1776 * WORKAROUND: CDNS3 controller has issue with hardware resuming 1777 * from L1. To fix it, if any DMA transfer is pending driver 1778 * must starts driving resume signal immediately. 1779 */ 1780 if (readl(&priv_dev->regs->drbl)) 1781 __cdns3_gadget_wakeup(priv_dev); 1782 } 1783 1784 /* Connection detected */ 1785 if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { 1786 speed = cdns3_get_speed(priv_dev); 1787 priv_dev->gadget.speed = speed; 1788 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED); 1789 cdns3_ep0_config(priv_dev); 1790 } 1791 1792 /* Disconnection detected */ 1793 if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) { 1794 spin_unlock(&priv_dev->lock); 1795 cdns3_disconnect_gadget(priv_dev); 1796 spin_lock(&priv_dev->lock); 1797 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 1798 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 1799 cdns3_hw_reset_eps_config(priv_dev); 1800 } 1801 1802 if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { 1803 if (priv_dev->gadget_driver && 1804 priv_dev->gadget_driver->suspend) { 1805 spin_unlock(&priv_dev->lock); 1806 priv_dev->gadget_driver->suspend(&priv_dev->gadget); 1807 spin_lock(&priv_dev->lock); 1808 } 1809 } 1810 1811 if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) { 1812 if (priv_dev->gadget_driver && 1813 priv_dev->gadget_driver->resume) { 1814 spin_unlock(&priv_dev->lock); 1815 priv_dev->gadget_driver->resume(&priv_dev->gadget); 1816 spin_lock(&priv_dev->lock); 1817 } 1818 } 1819 1820 /* reset*/ 1821 if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) { 1822 if (priv_dev->gadget_driver) { 1823 spin_unlock(&priv_dev->lock); 1824 usb_gadget_udc_reset(&priv_dev->gadget, 1825 priv_dev->gadget_driver); 1826 spin_lock(&priv_dev->lock); 1827 1828 /*read again to check the actual speed*/ 1829 speed = cdns3_get_speed(priv_dev); 1830 priv_dev->gadget.speed = speed; 1831 cdns3_hw_reset_eps_config(priv_dev); 1832 cdns3_ep0_config(priv_dev); 1833 } 1834 } 1835 } 1836 1837 /** 1838 * cdns3_device_irq_handler- interrupt handler for device part of controller 1839 * 1840 * @irq: irq number for cdns3 core device 1841 * @data: structure of cdns3 1842 * 1843 * Returns IRQ_HANDLED or IRQ_NONE 1844 */ 1845 static irqreturn_t cdns3_device_irq_handler(int irq, void *data) 1846 { 1847 struct cdns3_device *priv_dev = data; 1848 struct cdns *cdns = dev_get_drvdata(priv_dev->dev); 1849 irqreturn_t ret = IRQ_NONE; 1850 u32 reg; 1851 1852 if (cdns->in_lpm) 1853 return ret; 1854 1855 /* check USB device interrupt */ 1856 reg = readl(&priv_dev->regs->usb_ists); 1857 if (reg) { 1858 /* After masking interrupts the new interrupts won't be 1859 * reported in usb_ists/ep_ists. In order to not lose some 1860 * of them driver disables only detected interrupts. 1861 * They will be enabled ASAP after clearing source of 1862 * interrupt. This an unusual behavior only applies to 1863 * usb_ists register. 1864 */ 1865 reg = ~reg & readl(&priv_dev->regs->usb_ien); 1866 /* mask deferred interrupt. */ 1867 writel(reg, &priv_dev->regs->usb_ien); 1868 ret = IRQ_WAKE_THREAD; 1869 } 1870 1871 /* check endpoint interrupt */ 1872 reg = readl(&priv_dev->regs->ep_ists); 1873 if (reg) { 1874 writel(0, &priv_dev->regs->ep_ien); 1875 ret = IRQ_WAKE_THREAD; 1876 } 1877 1878 return ret; 1879 } 1880 1881 /** 1882 * cdns3_device_thread_irq_handler- interrupt handler for device part 1883 * of controller 1884 * 1885 * @irq: irq number for cdns3 core device 1886 * @data: structure of cdns3 1887 * 1888 * Returns IRQ_HANDLED or IRQ_NONE 1889 */ 1890 static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) 1891 { 1892 struct cdns3_device *priv_dev = data; 1893 irqreturn_t ret = IRQ_NONE; 1894 unsigned long flags; 1895 unsigned int bit; 1896 unsigned long reg; 1897 1898 spin_lock_irqsave(&priv_dev->lock, flags); 1899 1900 reg = readl(&priv_dev->regs->usb_ists); 1901 if (reg) { 1902 writel(reg, &priv_dev->regs->usb_ists); 1903 writel(USB_IEN_INIT, &priv_dev->regs->usb_ien); 1904 cdns3_check_usb_interrupt_proceed(priv_dev, reg); 1905 ret = IRQ_HANDLED; 1906 } 1907 1908 reg = readl(&priv_dev->regs->ep_ists); 1909 1910 /* handle default endpoint OUT */ 1911 if (reg & EP_ISTS_EP_OUT0) { 1912 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT); 1913 ret = IRQ_HANDLED; 1914 } 1915 1916 /* handle default endpoint IN */ 1917 if (reg & EP_ISTS_EP_IN0) { 1918 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN); 1919 ret = IRQ_HANDLED; 1920 } 1921 1922 /* check if interrupt from non default endpoint, if no exit */ 1923 reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0); 1924 if (!reg) 1925 goto irqend; 1926 1927 for_each_set_bit(bit, ®, 1928 sizeof(u32) * BITS_PER_BYTE) { 1929 cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]); 1930 ret = IRQ_HANDLED; 1931 } 1932 1933 if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams) 1934 cdns3_wa2_check_outq_status(priv_dev); 1935 1936 irqend: 1937 writel(~0, &priv_dev->regs->ep_ien); 1938 spin_unlock_irqrestore(&priv_dev->lock, flags); 1939 1940 return ret; 1941 } 1942 1943 /** 1944 * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP 1945 * 1946 * The real reservation will occur during write to EP_CFG register, 1947 * this function is used to check if the 'size' reservation is allowed. 1948 * 1949 * @priv_dev: extended gadget object 1950 * @size: the size (KB) for EP would like to allocate 1951 * @is_in: endpoint direction 1952 * 1953 * Return 0 if the required size can met or negative value on failure 1954 */ 1955 static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, 1956 int size, int is_in) 1957 { 1958 int remained; 1959 1960 /* 2KB are reserved for EP0*/ 1961 remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2; 1962 1963 if (is_in) { 1964 if (remained < size) 1965 return -EPERM; 1966 1967 priv_dev->onchip_used_size += size; 1968 } else { 1969 int required; 1970 1971 /** 1972 * ALL OUT EPs are shared the same chunk onchip memory, so 1973 * driver checks if it already has assigned enough buffers 1974 */ 1975 if (priv_dev->out_mem_is_allocated >= size) 1976 return 0; 1977 1978 required = size - priv_dev->out_mem_is_allocated; 1979 1980 if (required > remained) 1981 return -EPERM; 1982 1983 priv_dev->out_mem_is_allocated += required; 1984 priv_dev->onchip_used_size += required; 1985 } 1986 1987 return 0; 1988 } 1989 1990 static void cdns3_configure_dmult(struct cdns3_device *priv_dev, 1991 struct cdns3_endpoint *priv_ep) 1992 { 1993 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 1994 1995 /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */ 1996 if (priv_dev->dev_ver <= DEV_VER_V2) 1997 writel(USB_CONF_DMULT, ®s->usb_conf); 1998 1999 if (priv_dev->dev_ver == DEV_VER_V2) 2000 writel(USB_CONF2_EN_TDL_TRB, ®s->usb_conf2); 2001 2002 if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) { 2003 u32 mask; 2004 2005 if (priv_ep->dir) 2006 mask = BIT(priv_ep->num + 16); 2007 else 2008 mask = BIT(priv_ep->num); 2009 2010 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 2011 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2012 cdns3_set_register_bit(®s->tdl_beh, mask); 2013 cdns3_set_register_bit(®s->tdl_beh2, mask); 2014 cdns3_set_register_bit(®s->dma_adv_td, mask); 2015 } 2016 2017 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2018 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2019 2020 cdns3_set_register_bit(®s->dtrans, mask); 2021 } 2022 } 2023 2024 /** 2025 * cdns3_ep_config Configure hardware endpoint 2026 * @priv_ep: extended endpoint object 2027 * @enable: set EP_CFG_ENABLE bit in ep_cfg register. 2028 */ 2029 int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) 2030 { 2031 bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC); 2032 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2033 u32 bEndpointAddress = priv_ep->num | priv_ep->dir; 2034 u32 max_packet_size = 0; 2035 u8 maxburst = 0; 2036 u32 ep_cfg = 0; 2037 u8 buffering; 2038 u8 mult = 0; 2039 int ret; 2040 2041 buffering = CDNS3_EP_BUF_SIZE - 1; 2042 2043 cdns3_configure_dmult(priv_dev, priv_ep); 2044 2045 switch (priv_ep->type) { 2046 case USB_ENDPOINT_XFER_INT: 2047 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT); 2048 2049 if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) || 2050 priv_dev->dev_ver > DEV_VER_V2) 2051 ep_cfg |= EP_CFG_TDL_CHK; 2052 break; 2053 case USB_ENDPOINT_XFER_BULK: 2054 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK); 2055 2056 if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) || 2057 priv_dev->dev_ver > DEV_VER_V2) 2058 ep_cfg |= EP_CFG_TDL_CHK; 2059 break; 2060 default: 2061 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); 2062 mult = CDNS3_EP_ISO_HS_MULT - 1; 2063 buffering = mult + 1; 2064 } 2065 2066 switch (priv_dev->gadget.speed) { 2067 case USB_SPEED_FULL: 2068 max_packet_size = is_iso_ep ? 1023 : 64; 2069 break; 2070 case USB_SPEED_HIGH: 2071 max_packet_size = is_iso_ep ? 1024 : 512; 2072 break; 2073 case USB_SPEED_SUPER: 2074 /* It's limitation that driver assumes in driver. */ 2075 mult = 0; 2076 max_packet_size = 1024; 2077 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2078 maxburst = CDNS3_EP_ISO_SS_BURST - 1; 2079 buffering = (mult + 1) * 2080 (maxburst + 1); 2081 2082 if (priv_ep->interval > 1) 2083 buffering++; 2084 } else { 2085 maxburst = CDNS3_EP_BUF_SIZE - 1; 2086 } 2087 break; 2088 default: 2089 /* all other speed are not supported */ 2090 return -EINVAL; 2091 } 2092 2093 if (max_packet_size == 1024) 2094 priv_ep->trb_burst_size = 128; 2095 else if (max_packet_size >= 512) 2096 priv_ep->trb_burst_size = 64; 2097 else 2098 priv_ep->trb_burst_size = 16; 2099 2100 /* onchip buffer is only allocated before configuration */ 2101 if (!priv_dev->hw_configured_flag) { 2102 ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1, 2103 !!priv_ep->dir); 2104 if (ret) { 2105 dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n"); 2106 return ret; 2107 } 2108 } 2109 2110 if (enable) 2111 ep_cfg |= EP_CFG_ENABLE; 2112 2113 if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2114 if (priv_dev->dev_ver >= DEV_VER_V3) { 2115 u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0)); 2116 2117 /* 2118 * Stream capable endpoints are handled by using ep_tdl 2119 * register. Other endpoints use TDL from TRB feature. 2120 */ 2121 cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, 2122 mask); 2123 } 2124 2125 /* Enable Stream Bit TDL chk and SID chk */ 2126 ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK; 2127 } 2128 2129 ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) | 2130 EP_CFG_MULT(mult) | 2131 EP_CFG_BUFFERING(buffering) | 2132 EP_CFG_MAXBURST(maxburst); 2133 2134 cdns3_select_ep(priv_dev, bEndpointAddress); 2135 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2136 priv_ep->flags |= EP_CONFIGURED; 2137 2138 dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n", 2139 priv_ep->name, ep_cfg); 2140 2141 return 0; 2142 } 2143 2144 /* Find correct direction for HW endpoint according to description */ 2145 static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc, 2146 struct cdns3_endpoint *priv_ep) 2147 { 2148 return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) || 2149 (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc)); 2150 } 2151 2152 static struct 2153 cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev, 2154 struct usb_endpoint_descriptor *desc) 2155 { 2156 struct usb_ep *ep; 2157 struct cdns3_endpoint *priv_ep; 2158 2159 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2160 unsigned long num; 2161 int ret; 2162 /* ep name pattern likes epXin or epXout */ 2163 char c[2] = {ep->name[2], '\0'}; 2164 2165 ret = kstrtoul(c, 10, &num); 2166 if (ret) 2167 return ERR_PTR(ret); 2168 2169 priv_ep = ep_to_cdns3_ep(ep); 2170 if (cdns3_ep_dir_is_correct(desc, priv_ep)) { 2171 if (!(priv_ep->flags & EP_CLAIMED)) { 2172 priv_ep->num = num; 2173 return priv_ep; 2174 } 2175 } 2176 } 2177 2178 return ERR_PTR(-ENOENT); 2179 } 2180 2181 /* 2182 * Cadence IP has one limitation that all endpoints must be configured 2183 * (Type & MaxPacketSize) before setting configuration through hardware 2184 * register, it means we can't change endpoints configuration after 2185 * set_configuration. 2186 * 2187 * This function set EP_CLAIMED flag which is added when the gadget driver 2188 * uses usb_ep_autoconfig to configure specific endpoint; 2189 * When the udc driver receives set_configurion request, 2190 * it goes through all claimed endpoints, and configure all endpoints 2191 * accordingly. 2192 * 2193 * At usb_ep_ops.enable/disable, we only enable and disable endpoint through 2194 * ep_cfg register which can be changed after set_configuration, and do 2195 * some software operation accordingly. 2196 */ 2197 static struct 2198 usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, 2199 struct usb_endpoint_descriptor *desc, 2200 struct usb_ss_ep_comp_descriptor *comp_desc) 2201 { 2202 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2203 struct cdns3_endpoint *priv_ep; 2204 unsigned long flags; 2205 2206 priv_ep = cdns3_find_available_ep(priv_dev, desc); 2207 if (IS_ERR(priv_ep)) { 2208 dev_err(priv_dev->dev, "no available ep\n"); 2209 return NULL; 2210 } 2211 2212 dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name); 2213 2214 spin_lock_irqsave(&priv_dev->lock, flags); 2215 priv_ep->endpoint.desc = desc; 2216 priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT; 2217 priv_ep->type = usb_endpoint_type(desc); 2218 priv_ep->flags |= EP_CLAIMED; 2219 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2220 2221 spin_unlock_irqrestore(&priv_dev->lock, flags); 2222 return &priv_ep->endpoint; 2223 } 2224 2225 /** 2226 * cdns3_gadget_ep_alloc_request Allocates request 2227 * @ep: endpoint object associated with request 2228 * @gfp_flags: gfp flags 2229 * 2230 * Returns allocated request address, NULL on allocation error 2231 */ 2232 struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, 2233 gfp_t gfp_flags) 2234 { 2235 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2236 struct cdns3_request *priv_req; 2237 2238 priv_req = kzalloc(sizeof(*priv_req), gfp_flags); 2239 if (!priv_req) 2240 return NULL; 2241 2242 priv_req->priv_ep = priv_ep; 2243 2244 trace_cdns3_alloc_request(priv_req); 2245 return &priv_req->request; 2246 } 2247 2248 /** 2249 * cdns3_gadget_ep_free_request Free memory occupied by request 2250 * @ep: endpoint object associated with request 2251 * @request: request to free memory 2252 */ 2253 void cdns3_gadget_ep_free_request(struct usb_ep *ep, 2254 struct usb_request *request) 2255 { 2256 struct cdns3_request *priv_req = to_cdns3_request(request); 2257 2258 if (priv_req->aligned_buf) 2259 priv_req->aligned_buf->in_use = 0; 2260 2261 trace_cdns3_free_request(priv_req); 2262 kfree(priv_req); 2263 } 2264 2265 /** 2266 * cdns3_gadget_ep_enable Enable endpoint 2267 * @ep: endpoint object 2268 * @desc: endpoint descriptor 2269 * 2270 * Returns 0 on success, error code elsewhere 2271 */ 2272 static int cdns3_gadget_ep_enable(struct usb_ep *ep, 2273 const struct usb_endpoint_descriptor *desc) 2274 { 2275 struct cdns3_endpoint *priv_ep; 2276 struct cdns3_device *priv_dev; 2277 const struct usb_ss_ep_comp_descriptor *comp_desc; 2278 u32 reg = EP_STS_EN_TRBERREN; 2279 u32 bEndpointAddress; 2280 unsigned long flags; 2281 int enable = 1; 2282 int ret = 0; 2283 int val; 2284 2285 priv_ep = ep_to_cdns3_ep(ep); 2286 priv_dev = priv_ep->cdns3_dev; 2287 comp_desc = priv_ep->endpoint.comp_desc; 2288 2289 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 2290 dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); 2291 return -EINVAL; 2292 } 2293 2294 if (!desc->wMaxPacketSize) { 2295 dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n"); 2296 return -EINVAL; 2297 } 2298 2299 if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED, 2300 "%s is already enabled\n", priv_ep->name)) 2301 return 0; 2302 2303 spin_lock_irqsave(&priv_dev->lock, flags); 2304 2305 priv_ep->endpoint.desc = desc; 2306 priv_ep->type = usb_endpoint_type(desc); 2307 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2308 2309 if (priv_ep->interval > ISO_MAX_INTERVAL && 2310 priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2311 dev_err(priv_dev->dev, "Driver is limited to %d period\n", 2312 ISO_MAX_INTERVAL); 2313 2314 ret = -EINVAL; 2315 goto exit; 2316 } 2317 2318 bEndpointAddress = priv_ep->num | priv_ep->dir; 2319 cdns3_select_ep(priv_dev, bEndpointAddress); 2320 2321 /* 2322 * For some versions of controller at some point during ISO OUT traffic 2323 * DMA reads Transfer Ring for the EP which has never got doorbell. 2324 * This issue was detected only on simulation, but to avoid this issue 2325 * driver add protection against it. To fix it driver enable ISO OUT 2326 * endpoint before setting DRBL. This special treatment of ISO OUT 2327 * endpoints are recommended by controller specification. 2328 */ 2329 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2330 enable = 0; 2331 2332 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 2333 /* 2334 * Enable stream support (SS mode) related interrupts 2335 * in EP_STS_EN Register 2336 */ 2337 if (priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2338 reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN | 2339 EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN | 2340 EP_STS_EN_STREAMREN; 2341 priv_ep->use_streams = true; 2342 ret = cdns3_ep_config(priv_ep, enable); 2343 priv_dev->using_streams |= true; 2344 } 2345 } else { 2346 ret = cdns3_ep_config(priv_ep, enable); 2347 } 2348 2349 if (ret) 2350 goto exit; 2351 2352 ret = cdns3_allocate_trb_pool(priv_ep); 2353 if (ret) 2354 goto exit; 2355 2356 bEndpointAddress = priv_ep->num | priv_ep->dir; 2357 cdns3_select_ep(priv_dev, bEndpointAddress); 2358 2359 trace_cdns3_gadget_ep_enable(priv_ep); 2360 2361 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2362 2363 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2364 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2365 1, 1000); 2366 2367 if (unlikely(ret)) { 2368 cdns3_free_trb_pool(priv_ep); 2369 ret = -EINVAL; 2370 goto exit; 2371 } 2372 2373 /* enable interrupt for selected endpoint */ 2374 cdns3_set_register_bit(&priv_dev->regs->ep_ien, 2375 BIT(cdns3_ep_addr_to_index(bEndpointAddress))); 2376 2377 if (priv_dev->dev_ver < DEV_VER_V2) 2378 cdns3_wa2_enable_detection(priv_dev, priv_ep, reg); 2379 2380 writel(reg, &priv_dev->regs->ep_sts_en); 2381 2382 ep->desc = desc; 2383 priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING | 2384 EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN); 2385 priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR; 2386 priv_ep->wa1_set = 0; 2387 priv_ep->enqueue = 0; 2388 priv_ep->dequeue = 0; 2389 reg = readl(&priv_dev->regs->ep_sts); 2390 priv_ep->pcs = !!EP_STS_CCS(reg); 2391 priv_ep->ccs = !!EP_STS_CCS(reg); 2392 /* one TRB is reserved for link TRB used in DMULT mode*/ 2393 priv_ep->free_trbs = priv_ep->num_trbs - 1; 2394 exit: 2395 spin_unlock_irqrestore(&priv_dev->lock, flags); 2396 2397 return ret; 2398 } 2399 2400 /** 2401 * cdns3_gadget_ep_disable Disable endpoint 2402 * @ep: endpoint object 2403 * 2404 * Returns 0 on success, error code elsewhere 2405 */ 2406 static int cdns3_gadget_ep_disable(struct usb_ep *ep) 2407 { 2408 struct cdns3_endpoint *priv_ep; 2409 struct cdns3_request *priv_req; 2410 struct cdns3_device *priv_dev; 2411 struct usb_request *request; 2412 unsigned long flags; 2413 int ret = 0; 2414 u32 ep_cfg; 2415 int val; 2416 2417 if (!ep) { 2418 pr_err("usbss: invalid parameters\n"); 2419 return -EINVAL; 2420 } 2421 2422 priv_ep = ep_to_cdns3_ep(ep); 2423 priv_dev = priv_ep->cdns3_dev; 2424 2425 if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED), 2426 "%s is already disabled\n", priv_ep->name)) 2427 return 0; 2428 2429 spin_lock_irqsave(&priv_dev->lock, flags); 2430 2431 trace_cdns3_gadget_ep_disable(priv_ep); 2432 2433 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2434 2435 ep_cfg = readl(&priv_dev->regs->ep_cfg); 2436 ep_cfg &= ~EP_CFG_ENABLE; 2437 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2438 2439 /** 2440 * Driver needs some time before resetting endpoint. 2441 * It need waits for clearing DBUSY bit or for timeout expired. 2442 * 10us is enough time for controller to stop transfer. 2443 */ 2444 readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val, 2445 !(val & EP_STS_DBUSY), 1, 10); 2446 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2447 2448 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2449 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2450 1, 1000); 2451 if (unlikely(ret)) 2452 dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n", 2453 priv_ep->name); 2454 2455 while (!list_empty(&priv_ep->pending_req_list)) { 2456 request = cdns3_next_request(&priv_ep->pending_req_list); 2457 2458 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2459 -ESHUTDOWN); 2460 } 2461 2462 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 2463 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 2464 2465 kfree(priv_req->request.buf); 2466 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 2467 &priv_req->request); 2468 list_del_init(&priv_req->list); 2469 --priv_ep->wa2_counter; 2470 } 2471 2472 while (!list_empty(&priv_ep->deferred_req_list)) { 2473 request = cdns3_next_request(&priv_ep->deferred_req_list); 2474 2475 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2476 -ESHUTDOWN); 2477 } 2478 2479 priv_ep->descmis_req = NULL; 2480 2481 ep->desc = NULL; 2482 priv_ep->flags &= ~EP_ENABLED; 2483 priv_ep->use_streams = false; 2484 2485 spin_unlock_irqrestore(&priv_dev->lock, flags); 2486 2487 return ret; 2488 } 2489 2490 /** 2491 * cdns3_gadget_ep_queue Transfer data on endpoint 2492 * @ep: endpoint object 2493 * @request: request object 2494 * @gfp_flags: gfp flags 2495 * 2496 * Returns 0 on success, error code elsewhere 2497 */ 2498 static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 2499 struct usb_request *request, 2500 gfp_t gfp_flags) 2501 { 2502 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2503 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2504 struct cdns3_request *priv_req; 2505 int ret = 0; 2506 2507 request->actual = 0; 2508 request->status = -EINPROGRESS; 2509 priv_req = to_cdns3_request(request); 2510 trace_cdns3_ep_queue(priv_req); 2511 2512 if (priv_dev->dev_ver < DEV_VER_V2) { 2513 ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep, 2514 priv_req); 2515 2516 if (ret == EINPROGRESS) 2517 return 0; 2518 } 2519 2520 ret = cdns3_prepare_aligned_request_buf(priv_req); 2521 if (ret < 0) 2522 return ret; 2523 2524 ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request, 2525 usb_endpoint_dir_in(ep->desc)); 2526 if (ret) 2527 return ret; 2528 2529 list_add_tail(&request->list, &priv_ep->deferred_req_list); 2530 2531 /* 2532 * For stream capable endpoint if prime irq flag is set then only start 2533 * request. 2534 * If hardware endpoint configuration has not been set yet then 2535 * just queue request in deferred list. Transfer will be started in 2536 * cdns3_set_hw_configuration. 2537 */ 2538 if (!request->stream_id) { 2539 if (priv_dev->hw_configured_flag && 2540 !(priv_ep->flags & EP_STALLED) && 2541 !(priv_ep->flags & EP_STALL_PENDING)) 2542 cdns3_start_all_request(priv_dev, priv_ep); 2543 } else { 2544 if (priv_dev->hw_configured_flag && priv_ep->prime_flag) 2545 cdns3_start_all_request(priv_dev, priv_ep); 2546 } 2547 2548 return 0; 2549 } 2550 2551 static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 2552 gfp_t gfp_flags) 2553 { 2554 struct usb_request *zlp_request; 2555 struct cdns3_endpoint *priv_ep; 2556 struct cdns3_device *priv_dev; 2557 unsigned long flags; 2558 int ret; 2559 2560 if (!request || !ep) 2561 return -EINVAL; 2562 2563 priv_ep = ep_to_cdns3_ep(ep); 2564 priv_dev = priv_ep->cdns3_dev; 2565 2566 spin_lock_irqsave(&priv_dev->lock, flags); 2567 2568 ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags); 2569 2570 if (ret == 0 && request->zero && request->length && 2571 (request->length % ep->maxpacket == 0)) { 2572 struct cdns3_request *priv_req; 2573 2574 zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 2575 zlp_request->buf = priv_dev->zlp_buf; 2576 zlp_request->length = 0; 2577 2578 priv_req = to_cdns3_request(zlp_request); 2579 priv_req->flags |= REQUEST_ZLP; 2580 2581 dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n", 2582 priv_ep->name); 2583 ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags); 2584 } 2585 2586 spin_unlock_irqrestore(&priv_dev->lock, flags); 2587 return ret; 2588 } 2589 2590 /** 2591 * cdns3_gadget_ep_dequeue Remove request from transfer queue 2592 * @ep: endpoint object associated with request 2593 * @request: request object 2594 * 2595 * Returns 0 on success, error code elsewhere 2596 */ 2597 int cdns3_gadget_ep_dequeue(struct usb_ep *ep, 2598 struct usb_request *request) 2599 { 2600 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2601 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2602 struct usb_request *req, *req_temp; 2603 struct cdns3_request *priv_req; 2604 struct cdns3_trb *link_trb; 2605 u8 req_on_hw_ring = 0; 2606 unsigned long flags; 2607 int ret = 0; 2608 2609 if (!ep || !request || !ep->desc) 2610 return -EINVAL; 2611 2612 spin_lock_irqsave(&priv_dev->lock, flags); 2613 2614 priv_req = to_cdns3_request(request); 2615 2616 trace_cdns3_ep_dequeue(priv_req); 2617 2618 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2619 2620 list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list, 2621 list) { 2622 if (request == req) { 2623 req_on_hw_ring = 1; 2624 goto found; 2625 } 2626 } 2627 2628 list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list, 2629 list) { 2630 if (request == req) 2631 goto found; 2632 } 2633 2634 goto not_found; 2635 2636 found: 2637 link_trb = priv_req->trb; 2638 2639 /* Update ring only if removed request is on pending_req_list list */ 2640 if (req_on_hw_ring && link_trb) { 2641 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma + 2642 ((priv_req->end_trb + 1) * TRB_SIZE))); 2643 link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | 2644 TRB_TYPE(TRB_LINK) | TRB_CHAIN); 2645 2646 if (priv_ep->wa1_trb == priv_req->trb) 2647 cdns3_wa1_restore_cycle_bit(priv_ep); 2648 } 2649 2650 cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); 2651 2652 not_found: 2653 spin_unlock_irqrestore(&priv_dev->lock, flags); 2654 return ret; 2655 } 2656 2657 /** 2658 * __cdns3_gadget_ep_set_halt Sets stall on selected endpoint 2659 * Should be called after acquiring spin_lock and selecting ep 2660 * @priv_ep: endpoint object to set stall on. 2661 */ 2662 void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep) 2663 { 2664 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2665 2666 trace_cdns3_halt(priv_ep, 1, 0); 2667 2668 if (!(priv_ep->flags & EP_STALLED)) { 2669 u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 2670 2671 if (!(ep_sts_reg & EP_STS_DBUSY)) 2672 cdns3_ep_stall_flush(priv_ep); 2673 else 2674 priv_ep->flags |= EP_STALL_PENDING; 2675 } 2676 } 2677 2678 /** 2679 * __cdns3_gadget_ep_clear_halt Clears stall on selected endpoint 2680 * Should be called after acquiring spin_lock and selecting ep 2681 * @priv_ep: endpoint object to clear stall on 2682 */ 2683 int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) 2684 { 2685 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2686 struct usb_request *request; 2687 struct cdns3_request *priv_req; 2688 struct cdns3_trb *trb = NULL; 2689 int ret; 2690 int val; 2691 2692 trace_cdns3_halt(priv_ep, 0, 0); 2693 2694 request = cdns3_next_request(&priv_ep->pending_req_list); 2695 if (request) { 2696 priv_req = to_cdns3_request(request); 2697 trb = priv_req->trb; 2698 if (trb) 2699 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2700 } 2701 2702 writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2703 2704 /* wait for EPRST cleared */ 2705 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2706 !(val & EP_CMD_EPRST), 1, 100); 2707 if (ret) 2708 return -EINVAL; 2709 2710 priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); 2711 2712 if (request) { 2713 if (trb) 2714 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2715 2716 cdns3_rearm_transfer(priv_ep, 1); 2717 } 2718 2719 cdns3_start_all_request(priv_dev, priv_ep); 2720 return ret; 2721 } 2722 2723 /** 2724 * cdns3_gadget_ep_set_halt Sets/clears stall on selected endpoint 2725 * @ep: endpoint object to set/clear stall on 2726 * @value: 1 for set stall, 0 for clear stall 2727 * 2728 * Returns 0 on success, error code elsewhere 2729 */ 2730 int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value) 2731 { 2732 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2733 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2734 unsigned long flags; 2735 int ret = 0; 2736 2737 if (!(priv_ep->flags & EP_ENABLED)) 2738 return -EPERM; 2739 2740 spin_lock_irqsave(&priv_dev->lock, flags); 2741 2742 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2743 2744 if (!value) { 2745 priv_ep->flags &= ~EP_WEDGE; 2746 ret = __cdns3_gadget_ep_clear_halt(priv_ep); 2747 } else { 2748 __cdns3_gadget_ep_set_halt(priv_ep); 2749 } 2750 2751 spin_unlock_irqrestore(&priv_dev->lock, flags); 2752 2753 return ret; 2754 } 2755 2756 extern const struct usb_ep_ops cdns3_gadget_ep0_ops; 2757 2758 static const struct usb_ep_ops cdns3_gadget_ep_ops = { 2759 .enable = cdns3_gadget_ep_enable, 2760 .disable = cdns3_gadget_ep_disable, 2761 .alloc_request = cdns3_gadget_ep_alloc_request, 2762 .free_request = cdns3_gadget_ep_free_request, 2763 .queue = cdns3_gadget_ep_queue, 2764 .dequeue = cdns3_gadget_ep_dequeue, 2765 .set_halt = cdns3_gadget_ep_set_halt, 2766 .set_wedge = cdns3_gadget_ep_set_wedge, 2767 }; 2768 2769 /** 2770 * cdns3_gadget_get_frame Returns number of actual ITP frame 2771 * @gadget: gadget object 2772 * 2773 * Returns number of actual ITP frame 2774 */ 2775 static int cdns3_gadget_get_frame(struct usb_gadget *gadget) 2776 { 2777 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2778 2779 return readl(&priv_dev->regs->usb_itpn); 2780 } 2781 2782 int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev) 2783 { 2784 enum usb_device_speed speed; 2785 2786 speed = cdns3_get_speed(priv_dev); 2787 2788 if (speed >= USB_SPEED_SUPER) 2789 return 0; 2790 2791 /* Start driving resume signaling to indicate remote wakeup. */ 2792 writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf); 2793 2794 return 0; 2795 } 2796 2797 static int cdns3_gadget_wakeup(struct usb_gadget *gadget) 2798 { 2799 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2800 unsigned long flags; 2801 int ret = 0; 2802 2803 spin_lock_irqsave(&priv_dev->lock, flags); 2804 ret = __cdns3_gadget_wakeup(priv_dev); 2805 spin_unlock_irqrestore(&priv_dev->lock, flags); 2806 return ret; 2807 } 2808 2809 static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget, 2810 int is_selfpowered) 2811 { 2812 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2813 unsigned long flags; 2814 2815 spin_lock_irqsave(&priv_dev->lock, flags); 2816 priv_dev->is_selfpowered = !!is_selfpowered; 2817 spin_unlock_irqrestore(&priv_dev->lock, flags); 2818 return 0; 2819 } 2820 2821 static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on) 2822 { 2823 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2824 2825 if (is_on) { 2826 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 2827 } else { 2828 writel(~0, &priv_dev->regs->ep_ists); 2829 writel(~0, &priv_dev->regs->usb_ists); 2830 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2831 } 2832 2833 return 0; 2834 } 2835 2836 static void cdns3_gadget_config(struct cdns3_device *priv_dev) 2837 { 2838 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 2839 u32 reg; 2840 2841 cdns3_ep0_config(priv_dev); 2842 2843 /* enable interrupts for endpoint 0 (in and out) */ 2844 writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, ®s->ep_ien); 2845 2846 /* 2847 * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1 2848 * revision of controller. 2849 */ 2850 if (priv_dev->dev_ver == DEV_VER_TI_V1) { 2851 reg = readl(®s->dbg_link1); 2852 2853 reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK; 2854 reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) | 2855 DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET; 2856 writel(reg, ®s->dbg_link1); 2857 } 2858 2859 /* 2860 * By default some platforms has set protected access to memory. 2861 * This cause problem with cache, so driver restore non-secure 2862 * access to memory. 2863 */ 2864 reg = readl(®s->dma_axi_ctrl); 2865 reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) | 2866 DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE); 2867 writel(reg, ®s->dma_axi_ctrl); 2868 2869 /* enable generic interrupt*/ 2870 writel(USB_IEN_INIT, ®s->usb_ien); 2871 writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); 2872 /* keep Fast Access bit */ 2873 writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr); 2874 2875 cdns3_configure_dmult(priv_dev, NULL); 2876 } 2877 2878 /** 2879 * cdns3_gadget_udc_start Gadget start 2880 * @gadget: gadget object 2881 * @driver: driver which operates on this gadget 2882 * 2883 * Returns 0 on success, error code elsewhere 2884 */ 2885 static int cdns3_gadget_udc_start(struct usb_gadget *gadget, 2886 struct usb_gadget_driver *driver) 2887 { 2888 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2889 unsigned long flags; 2890 enum usb_device_speed max_speed = driver->max_speed; 2891 2892 spin_lock_irqsave(&priv_dev->lock, flags); 2893 priv_dev->gadget_driver = driver; 2894 2895 /* limit speed if necessary */ 2896 max_speed = min(driver->max_speed, gadget->max_speed); 2897 2898 switch (max_speed) { 2899 case USB_SPEED_FULL: 2900 writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); 2901 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2902 break; 2903 case USB_SPEED_HIGH: 2904 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2905 break; 2906 case USB_SPEED_SUPER: 2907 break; 2908 default: 2909 dev_err(priv_dev->dev, 2910 "invalid maximum_speed parameter %d\n", 2911 max_speed); 2912 fallthrough; 2913 case USB_SPEED_UNKNOWN: 2914 /* default to superspeed */ 2915 max_speed = USB_SPEED_SUPER; 2916 break; 2917 } 2918 2919 cdns3_gadget_config(priv_dev); 2920 spin_unlock_irqrestore(&priv_dev->lock, flags); 2921 return 0; 2922 } 2923 2924 /** 2925 * cdns3_gadget_udc_stop Stops gadget 2926 * @gadget: gadget object 2927 * 2928 * Returns 0 2929 */ 2930 static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) 2931 { 2932 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2933 struct cdns3_endpoint *priv_ep; 2934 u32 bEndpointAddress; 2935 struct usb_ep *ep; 2936 int val; 2937 2938 priv_dev->gadget_driver = NULL; 2939 2940 priv_dev->onchip_used_size = 0; 2941 priv_dev->out_mem_is_allocated = 0; 2942 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 2943 2944 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2945 priv_ep = ep_to_cdns3_ep(ep); 2946 bEndpointAddress = priv_ep->num | priv_ep->dir; 2947 cdns3_select_ep(priv_dev, bEndpointAddress); 2948 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2949 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2950 !(val & EP_CMD_EPRST), 1, 100); 2951 2952 priv_ep->flags &= ~EP_CLAIMED; 2953 } 2954 2955 /* disable interrupt for device */ 2956 writel(0, &priv_dev->regs->usb_ien); 2957 writel(0, &priv_dev->regs->usb_pwr); 2958 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2959 2960 return 0; 2961 } 2962 2963 static const struct usb_gadget_ops cdns3_gadget_ops = { 2964 .get_frame = cdns3_gadget_get_frame, 2965 .wakeup = cdns3_gadget_wakeup, 2966 .set_selfpowered = cdns3_gadget_set_selfpowered, 2967 .pullup = cdns3_gadget_pullup, 2968 .udc_start = cdns3_gadget_udc_start, 2969 .udc_stop = cdns3_gadget_udc_stop, 2970 .match_ep = cdns3_gadget_match_ep, 2971 }; 2972 2973 static void cdns3_free_all_eps(struct cdns3_device *priv_dev) 2974 { 2975 int i; 2976 2977 /* ep0 OUT point to ep0 IN. */ 2978 priv_dev->eps[16] = NULL; 2979 2980 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 2981 if (priv_dev->eps[i]) { 2982 cdns3_free_trb_pool(priv_dev->eps[i]); 2983 devm_kfree(priv_dev->dev, priv_dev->eps[i]); 2984 } 2985 } 2986 2987 /** 2988 * cdns3_init_eps Initializes software endpoints of gadget 2989 * @priv_dev: extended gadget object 2990 * 2991 * Returns 0 on success, error code elsewhere 2992 */ 2993 static int cdns3_init_eps(struct cdns3_device *priv_dev) 2994 { 2995 u32 ep_enabled_reg, iso_ep_reg; 2996 struct cdns3_endpoint *priv_ep; 2997 int ep_dir, ep_number; 2998 u32 ep_mask; 2999 int ret = 0; 3000 int i; 3001 3002 /* Read it from USB_CAP3 to USB_CAP5 */ 3003 ep_enabled_reg = readl(&priv_dev->regs->usb_cap3); 3004 iso_ep_reg = readl(&priv_dev->regs->usb_cap4); 3005 3006 dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n"); 3007 3008 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { 3009 ep_dir = i >> 4; /* i div 16 */ 3010 ep_number = i & 0xF; /* i % 16 */ 3011 ep_mask = BIT(i); 3012 3013 if (!(ep_enabled_reg & ep_mask)) 3014 continue; 3015 3016 if (ep_dir && !ep_number) { 3017 priv_dev->eps[i] = priv_dev->eps[0]; 3018 continue; 3019 } 3020 3021 priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep), 3022 GFP_KERNEL); 3023 if (!priv_ep) 3024 goto err; 3025 3026 /* set parent of endpoint object */ 3027 priv_ep->cdns3_dev = priv_dev; 3028 priv_dev->eps[i] = priv_ep; 3029 priv_ep->num = ep_number; 3030 priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT; 3031 3032 if (!ep_number) { 3033 ret = cdns3_init_ep0(priv_dev, priv_ep); 3034 if (ret) { 3035 dev_err(priv_dev->dev, "Failed to init ep0\n"); 3036 goto err; 3037 } 3038 } else { 3039 snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s", 3040 ep_number, !!ep_dir ? "in" : "out"); 3041 priv_ep->endpoint.name = priv_ep->name; 3042 3043 usb_ep_set_maxpacket_limit(&priv_ep->endpoint, 3044 CDNS3_EP_MAX_PACKET_LIMIT); 3045 priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS; 3046 priv_ep->endpoint.ops = &cdns3_gadget_ep_ops; 3047 if (ep_dir) 3048 priv_ep->endpoint.caps.dir_in = 1; 3049 else 3050 priv_ep->endpoint.caps.dir_out = 1; 3051 3052 if (iso_ep_reg & ep_mask) 3053 priv_ep->endpoint.caps.type_iso = 1; 3054 3055 priv_ep->endpoint.caps.type_bulk = 1; 3056 priv_ep->endpoint.caps.type_int = 1; 3057 3058 list_add_tail(&priv_ep->endpoint.ep_list, 3059 &priv_dev->gadget.ep_list); 3060 } 3061 3062 priv_ep->flags = 0; 3063 3064 dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n", 3065 priv_ep->name, 3066 priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "", 3067 priv_ep->endpoint.caps.type_iso ? "ISO" : ""); 3068 3069 INIT_LIST_HEAD(&priv_ep->pending_req_list); 3070 INIT_LIST_HEAD(&priv_ep->deferred_req_list); 3071 INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list); 3072 } 3073 3074 return 0; 3075 err: 3076 cdns3_free_all_eps(priv_dev); 3077 return -ENOMEM; 3078 } 3079 3080 static void cdns3_gadget_release(struct device *dev) 3081 { 3082 struct cdns3_device *priv_dev = container_of(dev, 3083 struct cdns3_device, gadget.dev); 3084 3085 kfree(priv_dev); 3086 } 3087 3088 static void cdns3_gadget_exit(struct cdns *cdns) 3089 { 3090 struct cdns3_device *priv_dev; 3091 3092 priv_dev = cdns->gadget_dev; 3093 3094 3095 pm_runtime_mark_last_busy(cdns->dev); 3096 pm_runtime_put_autosuspend(cdns->dev); 3097 3098 usb_del_gadget(&priv_dev->gadget); 3099 devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); 3100 3101 cdns3_free_all_eps(priv_dev); 3102 3103 while (!list_empty(&priv_dev->aligned_buf_list)) { 3104 struct cdns3_aligned_buf *buf; 3105 3106 buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list); 3107 dma_free_noncoherent(priv_dev->sysdev, buf->size, 3108 buf->buf, 3109 buf->dma, 3110 buf->dir); 3111 3112 list_del(&buf->list); 3113 kfree(buf); 3114 } 3115 3116 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3117 priv_dev->setup_dma); 3118 dma_pool_destroy(priv_dev->eps_dma_pool); 3119 3120 kfree(priv_dev->zlp_buf); 3121 usb_put_gadget(&priv_dev->gadget); 3122 cdns->gadget_dev = NULL; 3123 cdns_drd_gadget_off(cdns); 3124 } 3125 3126 static int cdns3_gadget_start(struct cdns *cdns) 3127 { 3128 struct cdns3_device *priv_dev; 3129 u32 max_speed; 3130 int ret; 3131 3132 priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL); 3133 if (!priv_dev) 3134 return -ENOMEM; 3135 3136 usb_initialize_gadget(cdns->dev, &priv_dev->gadget, 3137 cdns3_gadget_release); 3138 cdns->gadget_dev = priv_dev; 3139 priv_dev->sysdev = cdns->dev; 3140 priv_dev->dev = cdns->dev; 3141 priv_dev->regs = cdns->dev_regs; 3142 3143 device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size", 3144 &priv_dev->onchip_buffers); 3145 3146 if (priv_dev->onchip_buffers <= 0) { 3147 u32 reg = readl(&priv_dev->regs->usb_cap2); 3148 3149 priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg); 3150 } 3151 3152 if (!priv_dev->onchip_buffers) 3153 priv_dev->onchip_buffers = 256; 3154 3155 max_speed = usb_get_maximum_speed(cdns->dev); 3156 3157 /* Check the maximum_speed parameter */ 3158 switch (max_speed) { 3159 case USB_SPEED_FULL: 3160 case USB_SPEED_HIGH: 3161 case USB_SPEED_SUPER: 3162 break; 3163 default: 3164 dev_err(cdns->dev, "invalid maximum_speed parameter %d\n", 3165 max_speed); 3166 fallthrough; 3167 case USB_SPEED_UNKNOWN: 3168 /* default to superspeed */ 3169 max_speed = USB_SPEED_SUPER; 3170 break; 3171 } 3172 3173 /* fill gadget fields */ 3174 priv_dev->gadget.max_speed = max_speed; 3175 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3176 priv_dev->gadget.ops = &cdns3_gadget_ops; 3177 priv_dev->gadget.name = "usb-ss-gadget"; 3178 priv_dev->gadget.quirk_avoids_skb_reserve = 1; 3179 priv_dev->gadget.irq = cdns->dev_irq; 3180 3181 spin_lock_init(&priv_dev->lock); 3182 INIT_WORK(&priv_dev->pending_status_wq, 3183 cdns3_pending_setup_status_handler); 3184 3185 INIT_WORK(&priv_dev->aligned_buf_wq, 3186 cdns3_free_aligned_request_buf); 3187 3188 /* initialize endpoint container */ 3189 INIT_LIST_HEAD(&priv_dev->gadget.ep_list); 3190 INIT_LIST_HEAD(&priv_dev->aligned_buf_list); 3191 priv_dev->eps_dma_pool = dma_pool_create("cdns3_eps_dma_pool", 3192 priv_dev->sysdev, 3193 TRB_RING_SIZE, 8, 0); 3194 if (!priv_dev->eps_dma_pool) { 3195 dev_err(priv_dev->dev, "Failed to create TRB dma pool\n"); 3196 ret = -ENOMEM; 3197 goto err1; 3198 } 3199 3200 ret = cdns3_init_eps(priv_dev); 3201 if (ret) { 3202 dev_err(priv_dev->dev, "Failed to create endpoints\n"); 3203 goto err1; 3204 } 3205 3206 /* allocate memory for setup packet buffer */ 3207 priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8, 3208 &priv_dev->setup_dma, GFP_DMA); 3209 if (!priv_dev->setup_buf) { 3210 ret = -ENOMEM; 3211 goto err2; 3212 } 3213 3214 priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); 3215 3216 dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", 3217 readl(&priv_dev->regs->usb_cap6)); 3218 dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n", 3219 readl(&priv_dev->regs->usb_cap1)); 3220 dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n", 3221 readl(&priv_dev->regs->usb_cap2)); 3222 3223 priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver); 3224 if (priv_dev->dev_ver >= DEV_VER_V2) 3225 priv_dev->gadget.sg_supported = 1; 3226 3227 priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL); 3228 if (!priv_dev->zlp_buf) { 3229 ret = -ENOMEM; 3230 goto err3; 3231 } 3232 3233 /* add USB gadget device */ 3234 ret = usb_add_gadget(&priv_dev->gadget); 3235 if (ret < 0) { 3236 dev_err(priv_dev->dev, "Failed to add gadget\n"); 3237 goto err4; 3238 } 3239 3240 return 0; 3241 err4: 3242 kfree(priv_dev->zlp_buf); 3243 err3: 3244 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3245 priv_dev->setup_dma); 3246 err2: 3247 cdns3_free_all_eps(priv_dev); 3248 err1: 3249 dma_pool_destroy(priv_dev->eps_dma_pool); 3250 3251 usb_put_gadget(&priv_dev->gadget); 3252 cdns->gadget_dev = NULL; 3253 return ret; 3254 } 3255 3256 static int __cdns3_gadget_init(struct cdns *cdns) 3257 { 3258 int ret = 0; 3259 3260 /* Ensure 32-bit DMA Mask in case we switched back from Host mode */ 3261 ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32)); 3262 if (ret) { 3263 dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret); 3264 return ret; 3265 } 3266 3267 cdns_drd_gadget_on(cdns); 3268 pm_runtime_get_sync(cdns->dev); 3269 3270 ret = cdns3_gadget_start(cdns); 3271 if (ret) 3272 return ret; 3273 3274 /* 3275 * Because interrupt line can be shared with other components in 3276 * driver it can't use IRQF_ONESHOT flag here. 3277 */ 3278 ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, 3279 cdns3_device_irq_handler, 3280 cdns3_device_thread_irq_handler, 3281 IRQF_SHARED, dev_name(cdns->dev), 3282 cdns->gadget_dev); 3283 3284 if (ret) 3285 goto err0; 3286 3287 return 0; 3288 err0: 3289 cdns3_gadget_exit(cdns); 3290 return ret; 3291 } 3292 3293 static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup) 3294 __must_hold(&cdns->lock) 3295 { 3296 struct cdns3_device *priv_dev = cdns->gadget_dev; 3297 3298 spin_unlock(&cdns->lock); 3299 cdns3_disconnect_gadget(priv_dev); 3300 spin_lock(&cdns->lock); 3301 3302 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3303 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 3304 cdns3_hw_reset_eps_config(priv_dev); 3305 3306 /* disable interrupt for device */ 3307 writel(0, &priv_dev->regs->usb_ien); 3308 3309 return 0; 3310 } 3311 3312 static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated) 3313 { 3314 struct cdns3_device *priv_dev = cdns->gadget_dev; 3315 3316 if (!priv_dev->gadget_driver) 3317 return 0; 3318 3319 cdns3_gadget_config(priv_dev); 3320 if (hibernated) 3321 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 3322 3323 return 0; 3324 } 3325 3326 /** 3327 * cdns3_gadget_init - initialize device structure 3328 * 3329 * @cdns: cdns instance 3330 * 3331 * This function initializes the gadget. 3332 */ 3333 int cdns3_gadget_init(struct cdns *cdns) 3334 { 3335 struct cdns_role_driver *rdrv; 3336 3337 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 3338 if (!rdrv) 3339 return -ENOMEM; 3340 3341 rdrv->start = __cdns3_gadget_init; 3342 rdrv->stop = cdns3_gadget_exit; 3343 rdrv->suspend = cdns3_gadget_suspend; 3344 rdrv->resume = cdns3_gadget_resume; 3345 rdrv->state = CDNS_ROLE_STATE_INACTIVE; 3346 rdrv->name = "gadget"; 3347 cdns->roles[USB_ROLE_DEVICE] = rdrv; 3348 3349 return 0; 3350 } 3351