1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cadence USBSS DRD Driver - gadget side. 4 * 5 * Copyright (C) 2018-2019 Cadence Design Systems. 6 * Copyright (C) 2017-2018 NXP 7 * 8 * Authors: Pawel Jez <pjez@cadence.com>, 9 * Pawel Laszczak <pawell@cadence.com> 10 * Peter Chen <peter.chen@nxp.com> 11 */ 12 13 /* 14 * Work around 1: 15 * At some situations, the controller may get stale data address in TRB 16 * at below sequences: 17 * 1. Controller read TRB includes data address 18 * 2. Software updates TRBs includes data address and Cycle bit 19 * 3. Controller read TRB which includes Cycle bit 20 * 4. DMA run with stale data address 21 * 22 * To fix this problem, driver needs to make the first TRB in TD as invalid. 23 * After preparing all TRBs driver needs to check the position of DMA and 24 * if the DMA point to the first just added TRB and doorbell is 1, 25 * then driver must defer making this TRB as valid. This TRB will be make 26 * as valid during adding next TRB only if DMA is stopped or at TRBERR 27 * interrupt. 28 * 29 * Issue has been fixed in DEV_VER_V3 version of controller. 30 * 31 * Work around 2: 32 * Controller for OUT endpoints has shared on-chip buffers for all incoming 33 * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA 34 * in correct order. If the first packet in the buffer will not be handled, 35 * then the following packets directed for other endpoints and functions 36 * will be blocked. 37 * Additionally the packets directed to one endpoint can block entire on-chip 38 * buffers. In this case transfer to other endpoints also will blocked. 39 * 40 * To resolve this issue after raising the descriptor missing interrupt 41 * driver prepares internal usb_request object and use it to arm DMA transfer. 42 * 43 * The problematic situation was observed in case when endpoint has been enabled 44 * but no usb_request were queued. Driver try detects such endpoints and will 45 * use this workaround only for these endpoint. 46 * 47 * Driver use limited number of buffer. This number can be set by macro 48 * CDNS3_WA2_NUM_BUFFERS. 49 * 50 * Such blocking situation was observed on ACM gadget. For this function 51 * host send OUT data packet but ACM function is not prepared for this packet. 52 * It's cause that buffer placed in on chip memory block transfer to other 53 * endpoints. 54 * 55 * Issue has been fixed in DEV_VER_V2 version of controller. 56 * 57 */ 58 59 #include <linux/dma-mapping.h> 60 #include <linux/usb/gadget.h> 61 #include <linux/module.h> 62 #include <linux/dmapool.h> 63 #include <linux/iopoll.h> 64 65 #include "core.h" 66 #include "gadget-export.h" 67 #include "cdns3-gadget.h" 68 #include "cdns3-trace.h" 69 #include "drd.h" 70 71 static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 72 struct usb_request *request, 73 gfp_t gfp_flags); 74 75 static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 76 struct usb_request *request); 77 78 static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 79 struct usb_request *request); 80 81 /** 82 * cdns3_clear_register_bit - clear bit in given register. 83 * @ptr: address of device controller register to be read and changed 84 * @mask: bits requested to clar 85 */ 86 static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask) 87 { 88 mask = readl(ptr) & ~mask; 89 writel(mask, ptr); 90 } 91 92 /** 93 * cdns3_set_register_bit - set bit in given register. 94 * @ptr: address of device controller register to be read and changed 95 * @mask: bits requested to set 96 */ 97 void cdns3_set_register_bit(void __iomem *ptr, u32 mask) 98 { 99 mask = readl(ptr) | mask; 100 writel(mask, ptr); 101 } 102 103 /** 104 * cdns3_ep_addr_to_index - Macro converts endpoint address to 105 * index of endpoint object in cdns3_device.eps[] container 106 * @ep_addr: endpoint address for which endpoint object is required 107 * 108 */ 109 u8 cdns3_ep_addr_to_index(u8 ep_addr) 110 { 111 return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0)); 112 } 113 114 static int cdns3_get_dma_pos(struct cdns3_device *priv_dev, 115 struct cdns3_endpoint *priv_ep) 116 { 117 int dma_index; 118 119 dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma; 120 121 return dma_index / TRB_SIZE; 122 } 123 124 /** 125 * cdns3_next_request - returns next request from list 126 * @list: list containing requests 127 * 128 * Returns request or NULL if no requests in list 129 */ 130 struct usb_request *cdns3_next_request(struct list_head *list) 131 { 132 return list_first_entry_or_null(list, struct usb_request, list); 133 } 134 135 /** 136 * cdns3_next_align_buf - returns next buffer from list 137 * @list: list containing buffers 138 * 139 * Returns buffer or NULL if no buffers in list 140 */ 141 static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) 142 { 143 return list_first_entry_or_null(list, struct cdns3_aligned_buf, list); 144 } 145 146 /** 147 * cdns3_next_priv_request - returns next request from list 148 * @list: list containing requests 149 * 150 * Returns request or NULL if no requests in list 151 */ 152 static struct cdns3_request *cdns3_next_priv_request(struct list_head *list) 153 { 154 return list_first_entry_or_null(list, struct cdns3_request, list); 155 } 156 157 /** 158 * cdns3_select_ep - selects endpoint 159 * @priv_dev: extended gadget object 160 * @ep: endpoint address 161 */ 162 void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep) 163 { 164 if (priv_dev->selected_ep == ep) 165 return; 166 167 priv_dev->selected_ep = ep; 168 writel(ep, &priv_dev->regs->ep_sel); 169 } 170 171 /** 172 * cdns3_get_tdl - gets current tdl for selected endpoint. 173 * @priv_dev: extended gadget object 174 * 175 * Before calling this function the appropriate endpoint must 176 * be selected by means of cdns3_select_ep function. 177 */ 178 static int cdns3_get_tdl(struct cdns3_device *priv_dev) 179 { 180 if (priv_dev->dev_ver < DEV_VER_V3) 181 return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 182 else 183 return readl(&priv_dev->regs->ep_tdl); 184 } 185 186 dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, 187 struct cdns3_trb *trb) 188 { 189 u32 offset = (char *)trb - (char *)priv_ep->trb_pool; 190 191 return priv_ep->trb_pool_dma + offset; 192 } 193 194 static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) 195 { 196 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 197 198 if (priv_ep->trb_pool) { 199 dma_pool_free(priv_dev->eps_dma_pool, 200 priv_ep->trb_pool, priv_ep->trb_pool_dma); 201 priv_ep->trb_pool = NULL; 202 } 203 } 204 205 /** 206 * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint 207 * @priv_ep: endpoint object 208 * 209 * Function will return 0 on success or -ENOMEM on allocation error 210 */ 211 int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) 212 { 213 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 214 int ring_size = TRB_RING_SIZE; 215 int num_trbs = ring_size / TRB_SIZE; 216 struct cdns3_trb *link_trb; 217 218 if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size) 219 cdns3_free_trb_pool(priv_ep); 220 221 if (!priv_ep->trb_pool) { 222 priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool, 223 GFP_DMA32 | GFP_ATOMIC, 224 &priv_ep->trb_pool_dma); 225 226 if (!priv_ep->trb_pool) 227 return -ENOMEM; 228 229 priv_ep->alloc_ring_size = ring_size; 230 } 231 232 memset(priv_ep->trb_pool, 0, ring_size); 233 234 priv_ep->num_trbs = num_trbs; 235 236 if (!priv_ep->num) 237 return 0; 238 239 /* Initialize the last TRB as Link TRB */ 240 link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1)); 241 242 if (priv_ep->use_streams) { 243 /* 244 * For stream capable endpoints driver use single correct TRB. 245 * The last trb has zeroed cycle bit 246 */ 247 link_trb->control = 0; 248 } else { 249 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma)); 250 link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE); 251 } 252 return 0; 253 } 254 255 /** 256 * cdns3_ep_stall_flush - Stalls and flushes selected endpoint 257 * @priv_ep: endpoint object 258 * 259 * Endpoint must be selected before call to this function 260 */ 261 static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep) 262 { 263 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 264 int val; 265 266 trace_cdns3_halt(priv_ep, 1, 1); 267 268 writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL, 269 &priv_dev->regs->ep_cmd); 270 271 /* wait for DFLUSH cleared */ 272 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 273 !(val & EP_CMD_DFLUSH), 1, 1000); 274 priv_ep->flags |= EP_STALLED; 275 priv_ep->flags &= ~EP_STALL_PENDING; 276 } 277 278 /** 279 * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller. 280 * @priv_dev: extended gadget object 281 */ 282 void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev) 283 { 284 int i; 285 286 writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf); 287 288 cdns3_allow_enable_l1(priv_dev, 0); 289 priv_dev->hw_configured_flag = 0; 290 priv_dev->onchip_used_size = 0; 291 priv_dev->out_mem_is_allocated = 0; 292 priv_dev->wait_for_setup = 0; 293 priv_dev->using_streams = 0; 294 295 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 296 if (priv_dev->eps[i]) 297 priv_dev->eps[i]->flags &= ~EP_CONFIGURED; 298 } 299 300 /** 301 * cdns3_ep_inc_trb - increment a trb index. 302 * @index: Pointer to the TRB index to increment. 303 * @cs: Cycle state 304 * @trb_in_seg: number of TRBs in segment 305 * 306 * The index should never point to the link TRB. After incrementing, 307 * if it is point to the link TRB, wrap around to the beginning and revert 308 * cycle state bit The 309 * link TRB is always at the last TRB entry. 310 */ 311 static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg) 312 { 313 (*index)++; 314 if (*index == (trb_in_seg - 1)) { 315 *index = 0; 316 *cs ^= 1; 317 } 318 } 319 320 /** 321 * cdns3_ep_inc_enq - increment endpoint's enqueue pointer 322 * @priv_ep: The endpoint whose enqueue pointer we're incrementing 323 */ 324 static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep) 325 { 326 priv_ep->free_trbs--; 327 cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs); 328 } 329 330 /** 331 * cdns3_ep_inc_deq - increment endpoint's dequeue pointer 332 * @priv_ep: The endpoint whose dequeue pointer we're incrementing 333 */ 334 static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) 335 { 336 priv_ep->free_trbs++; 337 cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); 338 } 339 340 static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req) 341 { 342 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 343 int current_trb = priv_req->start_trb; 344 345 while (current_trb != priv_req->end_trb) { 346 cdns3_ep_inc_deq(priv_ep); 347 current_trb = priv_ep->dequeue; 348 } 349 350 cdns3_ep_inc_deq(priv_ep); 351 } 352 353 /** 354 * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. 355 * @priv_dev: Extended gadget object 356 * @enable: Enable/disable permit to transition to L1. 357 * 358 * If bit USB_CONF_L1EN is set and device receive Extended Token packet, 359 * then controller answer with ACK handshake. 360 * If bit USB_CONF_L1DS is set and device receive Extended Token packet, 361 * then controller answer with NYET handshake. 362 */ 363 void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable) 364 { 365 if (enable) 366 writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf); 367 else 368 writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf); 369 } 370 371 enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev) 372 { 373 u32 reg; 374 375 reg = readl(&priv_dev->regs->usb_sts); 376 377 if (DEV_SUPERSPEED(reg)) 378 return USB_SPEED_SUPER; 379 else if (DEV_HIGHSPEED(reg)) 380 return USB_SPEED_HIGH; 381 else if (DEV_FULLSPEED(reg)) 382 return USB_SPEED_FULL; 383 else if (DEV_LOWSPEED(reg)) 384 return USB_SPEED_LOW; 385 return USB_SPEED_UNKNOWN; 386 } 387 388 /** 389 * cdns3_start_all_request - add to ring all request not started 390 * @priv_dev: Extended gadget object 391 * @priv_ep: The endpoint for whom request will be started. 392 * 393 * Returns return ENOMEM if transfer ring i not enough TRBs to start 394 * all requests. 395 */ 396 static int cdns3_start_all_request(struct cdns3_device *priv_dev, 397 struct cdns3_endpoint *priv_ep) 398 { 399 struct usb_request *request; 400 int ret = 0; 401 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 402 403 /* 404 * If the last pending transfer is INTERNAL 405 * OR streams are enabled for this endpoint 406 * do NOT start new transfer till the last one is pending 407 */ 408 if (!pending_empty) { 409 struct cdns3_request *priv_req; 410 411 request = cdns3_next_request(&priv_ep->pending_req_list); 412 priv_req = to_cdns3_request(request); 413 if ((priv_req->flags & REQUEST_INTERNAL) || 414 (priv_ep->flags & EP_TDLCHK_EN) || 415 priv_ep->use_streams) { 416 dev_dbg(priv_dev->dev, "Blocking external request\n"); 417 return ret; 418 } 419 } 420 421 while (!list_empty(&priv_ep->deferred_req_list)) { 422 request = cdns3_next_request(&priv_ep->deferred_req_list); 423 424 if (!priv_ep->use_streams) { 425 ret = cdns3_ep_run_transfer(priv_ep, request); 426 } else { 427 priv_ep->stream_sg_idx = 0; 428 ret = cdns3_ep_run_stream_transfer(priv_ep, request); 429 } 430 if (ret) 431 return ret; 432 433 list_move_tail(&request->list, &priv_ep->pending_req_list); 434 if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN)) 435 break; 436 } 437 438 priv_ep->flags &= ~EP_RING_FULL; 439 return ret; 440 } 441 442 /* 443 * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set 444 * driver try to detect whether endpoint need additional internal 445 * buffer for unblocking on-chip FIFO buffer. This flag will be cleared 446 * if before first DESCMISS interrupt the DMA will be armed. 447 */ 448 #define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \ 449 if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \ 450 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \ 451 (reg) |= EP_STS_EN_DESCMISEN; \ 452 } } while (0) 453 454 static void __cdns3_descmiss_copy_data(struct usb_request *request, 455 struct usb_request *descmiss_req) 456 { 457 int length = request->actual + descmiss_req->actual; 458 struct scatterlist *s = request->sg; 459 460 if (!s) { 461 if (length <= request->length) { 462 memcpy(&((u8 *)request->buf)[request->actual], 463 descmiss_req->buf, 464 descmiss_req->actual); 465 request->actual = length; 466 } else { 467 /* It should never occures */ 468 request->status = -ENOMEM; 469 } 470 } else { 471 if (length <= sg_dma_len(s)) { 472 void *p = phys_to_virt(sg_dma_address(s)); 473 474 memcpy(&((u8 *)p)[request->actual], 475 descmiss_req->buf, 476 descmiss_req->actual); 477 request->actual = length; 478 } else { 479 request->status = -ENOMEM; 480 } 481 } 482 } 483 484 /** 485 * cdns3_wa2_descmiss_copy_data - copy data from internal requests to 486 * request queued by class driver. 487 * @priv_ep: extended endpoint object 488 * @request: request object 489 */ 490 static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, 491 struct usb_request *request) 492 { 493 struct usb_request *descmiss_req; 494 struct cdns3_request *descmiss_priv_req; 495 496 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 497 int chunk_end; 498 499 descmiss_priv_req = 500 cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 501 descmiss_req = &descmiss_priv_req->request; 502 503 /* driver can't touch pending request */ 504 if (descmiss_priv_req->flags & REQUEST_PENDING) 505 break; 506 507 chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH; 508 request->status = descmiss_req->status; 509 __cdns3_descmiss_copy_data(request, descmiss_req); 510 list_del_init(&descmiss_priv_req->list); 511 kfree(descmiss_req->buf); 512 cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req); 513 --priv_ep->wa2_counter; 514 515 if (!chunk_end) 516 break; 517 } 518 } 519 520 static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, 521 struct cdns3_endpoint *priv_ep, 522 struct cdns3_request *priv_req) 523 { 524 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN && 525 priv_req->flags & REQUEST_INTERNAL) { 526 struct usb_request *req; 527 528 req = cdns3_next_request(&priv_ep->deferred_req_list); 529 530 priv_ep->descmis_req = NULL; 531 532 if (!req) 533 return NULL; 534 535 /* unmap the gadget request before copying data */ 536 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req, 537 priv_ep->dir); 538 539 cdns3_wa2_descmiss_copy_data(priv_ep, req); 540 if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) && 541 req->length != req->actual) { 542 /* wait for next part of transfer */ 543 /* re-map the gadget request buffer*/ 544 usb_gadget_map_request_by_dev(priv_dev->sysdev, req, 545 usb_endpoint_dir_in(priv_ep->endpoint.desc)); 546 return NULL; 547 } 548 549 if (req->status == -EINPROGRESS) 550 req->status = 0; 551 552 list_del_init(&req->list); 553 cdns3_start_all_request(priv_dev, priv_ep); 554 return req; 555 } 556 557 return &priv_req->request; 558 } 559 560 static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, 561 struct cdns3_endpoint *priv_ep, 562 struct cdns3_request *priv_req) 563 { 564 int deferred = 0; 565 566 /* 567 * If transfer was queued before DESCMISS appear than we 568 * can disable handling of DESCMISS interrupt. Driver assumes that it 569 * can disable special treatment for this endpoint. 570 */ 571 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 572 u32 reg; 573 574 cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir); 575 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 576 reg = readl(&priv_dev->regs->ep_sts_en); 577 reg &= ~EP_STS_EN_DESCMISEN; 578 trace_cdns3_wa2(priv_ep, "workaround disabled\n"); 579 writel(reg, &priv_dev->regs->ep_sts_en); 580 } 581 582 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 583 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 584 u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list); 585 586 /* 587 * DESCMISS transfer has been finished, so data will be 588 * directly copied from internal allocated usb_request 589 * objects. 590 */ 591 if (pending_empty && !descmiss_empty && 592 !(priv_req->flags & REQUEST_INTERNAL)) { 593 cdns3_wa2_descmiss_copy_data(priv_ep, 594 &priv_req->request); 595 596 trace_cdns3_wa2(priv_ep, "get internal stored data"); 597 598 list_add_tail(&priv_req->request.list, 599 &priv_ep->pending_req_list); 600 cdns3_gadget_giveback(priv_ep, priv_req, 601 priv_req->request.status); 602 603 /* 604 * Intentionally driver returns positive value as 605 * correct value. It informs that transfer has 606 * been finished. 607 */ 608 return EINPROGRESS; 609 } 610 611 /* 612 * Driver will wait for completion DESCMISS transfer, 613 * before starts new, not DESCMISS transfer. 614 */ 615 if (!pending_empty && !descmiss_empty) { 616 trace_cdns3_wa2(priv_ep, "wait for pending transfer\n"); 617 deferred = 1; 618 } 619 620 if (priv_req->flags & REQUEST_INTERNAL) 621 list_add_tail(&priv_req->list, 622 &priv_ep->wa2_descmiss_req_list); 623 } 624 625 return deferred; 626 } 627 628 static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep) 629 { 630 struct cdns3_request *priv_req; 631 632 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 633 u8 chain; 634 635 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 636 chain = !!(priv_req->flags & REQUEST_INTERNAL_CH); 637 638 trace_cdns3_wa2(priv_ep, "removes eldest request"); 639 640 kfree(priv_req->request.buf); 641 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 642 &priv_req->request); 643 list_del_init(&priv_req->list); 644 --priv_ep->wa2_counter; 645 646 if (!chain) 647 break; 648 } 649 } 650 651 /** 652 * cdns3_wa2_descmissing_packet - handles descriptor missing event. 653 * @priv_ep: extended gadget object 654 * 655 * This function is used only for WA2. For more information see Work around 2 656 * description. 657 */ 658 static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) 659 { 660 struct cdns3_request *priv_req; 661 struct usb_request *request; 662 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 663 664 /* check for pending transfer */ 665 if (!pending_empty) { 666 trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n"); 667 return; 668 } 669 670 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 671 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 672 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN; 673 } 674 675 trace_cdns3_wa2(priv_ep, "Description Missing detected\n"); 676 677 if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) { 678 trace_cdns3_wa2(priv_ep, "WA2 overflow\n"); 679 cdns3_wa2_remove_old_request(priv_ep); 680 } 681 682 request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint, 683 GFP_ATOMIC); 684 if (!request) 685 goto err; 686 687 priv_req = to_cdns3_request(request); 688 priv_req->flags |= REQUEST_INTERNAL; 689 690 /* if this field is still assigned it indicate that transfer related 691 * with this request has not been finished yet. Driver in this 692 * case simply allocate next request and assign flag REQUEST_INTERNAL_CH 693 * flag to previous one. It will indicate that current request is 694 * part of the previous one. 695 */ 696 if (priv_ep->descmis_req) 697 priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH; 698 699 priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE, 700 GFP_ATOMIC); 701 priv_ep->wa2_counter++; 702 703 if (!priv_req->request.buf) { 704 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 705 goto err; 706 } 707 708 priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE; 709 priv_ep->descmis_req = priv_req; 710 711 __cdns3_gadget_ep_queue(&priv_ep->endpoint, 712 &priv_ep->descmis_req->request, 713 GFP_ATOMIC); 714 715 return; 716 717 err: 718 dev_err(priv_ep->cdns3_dev->dev, 719 "Failed: No sufficient memory for DESCMIS\n"); 720 } 721 722 static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev) 723 { 724 u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 725 726 if (tdl) { 727 u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl; 728 729 writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL, 730 &priv_dev->regs->ep_cmd); 731 } 732 } 733 734 static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev) 735 { 736 u32 ep_sts_reg; 737 738 /* select EP0-out */ 739 cdns3_select_ep(priv_dev, 0); 740 741 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 742 743 if (EP_STS_OUTQ_VAL(ep_sts_reg)) { 744 u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg); 745 struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num]; 746 747 if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) && 748 outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) { 749 u8 pending_empty = list_empty(&outq_ep->pending_req_list); 750 751 if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) || 752 (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) || 753 !pending_empty) { 754 } else { 755 u32 ep_sts_en_reg; 756 u32 ep_cmd_reg; 757 758 cdns3_select_ep(priv_dev, outq_ep->num | 759 outq_ep->dir); 760 ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en); 761 ep_cmd_reg = readl(&priv_dev->regs->ep_cmd); 762 763 outq_ep->flags |= EP_TDLCHK_EN; 764 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 765 EP_CFG_TDL_CHK); 766 767 cdns3_wa2_enable_detection(priv_dev, outq_ep, 768 ep_sts_en_reg); 769 writel(ep_sts_en_reg, 770 &priv_dev->regs->ep_sts_en); 771 /* reset tdl value to zero */ 772 cdns3_wa2_reset_tdl(priv_dev); 773 /* 774 * Memory barrier - Reset tdl before ringing the 775 * doorbell. 776 */ 777 wmb(); 778 if (EP_CMD_DRDY & ep_cmd_reg) { 779 trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n"); 780 781 } else { 782 trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n"); 783 /* 784 * ring doorbell to generate DESCMIS irq 785 */ 786 writel(EP_CMD_DRDY, 787 &priv_dev->regs->ep_cmd); 788 } 789 } 790 } 791 } 792 } 793 794 /** 795 * cdns3_gadget_giveback - call struct usb_request's ->complete callback 796 * @priv_ep: The endpoint to whom the request belongs to 797 * @priv_req: The request we're giving back 798 * @status: completion code for the request 799 * 800 * Must be called with controller's lock held and interrupts disabled. This 801 * function will unmap @req and call its ->complete() callback to notify upper 802 * layers that it has completed. 803 */ 804 void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, 805 struct cdns3_request *priv_req, 806 int status) 807 { 808 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 809 struct usb_request *request = &priv_req->request; 810 811 list_del_init(&request->list); 812 813 if (request->status == -EINPROGRESS) 814 request->status = status; 815 816 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request, 817 priv_ep->dir); 818 819 if ((priv_req->flags & REQUEST_UNALIGNED) && 820 priv_ep->dir == USB_DIR_OUT && !request->status) { 821 /* Make DMA buffer CPU accessible */ 822 dma_sync_single_for_cpu(priv_dev->sysdev, 823 priv_req->aligned_buf->dma, 824 priv_req->aligned_buf->size, 825 priv_req->aligned_buf->dir); 826 memcpy(request->buf, priv_req->aligned_buf->buf, 827 request->length); 828 } 829 830 priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); 831 /* All TRBs have finished, clear the counter */ 832 priv_req->finished_trb = 0; 833 trace_cdns3_gadget_giveback(priv_req); 834 835 if (priv_dev->dev_ver < DEV_VER_V2) { 836 request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep, 837 priv_req); 838 if (!request) 839 return; 840 } 841 842 if (request->complete) { 843 spin_unlock(&priv_dev->lock); 844 usb_gadget_giveback_request(&priv_ep->endpoint, 845 request); 846 spin_lock(&priv_dev->lock); 847 } 848 849 if (request->buf == priv_dev->zlp_buf) 850 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 851 } 852 853 static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) 854 { 855 /* Work around for stale data address in TRB*/ 856 if (priv_ep->wa1_set) { 857 trace_cdns3_wa1(priv_ep, "restore cycle bit"); 858 859 priv_ep->wa1_set = 0; 860 priv_ep->wa1_trb_index = 0xFFFF; 861 if (priv_ep->wa1_cycle_bit) { 862 priv_ep->wa1_trb->control = 863 priv_ep->wa1_trb->control | cpu_to_le32(0x1); 864 } else { 865 priv_ep->wa1_trb->control = 866 priv_ep->wa1_trb->control & cpu_to_le32(~0x1); 867 } 868 } 869 } 870 871 static void cdns3_free_aligned_request_buf(struct work_struct *work) 872 { 873 struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, 874 aligned_buf_wq); 875 struct cdns3_aligned_buf *buf, *tmp; 876 unsigned long flags; 877 878 spin_lock_irqsave(&priv_dev->lock, flags); 879 880 list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) { 881 if (!buf->in_use) { 882 list_del(&buf->list); 883 884 /* 885 * Re-enable interrupts to free DMA capable memory. 886 * Driver can't free this memory with disabled 887 * interrupts. 888 */ 889 spin_unlock_irqrestore(&priv_dev->lock, flags); 890 dma_free_noncoherent(priv_dev->sysdev, buf->size, 891 buf->buf, buf->dma, buf->dir); 892 kfree(buf); 893 spin_lock_irqsave(&priv_dev->lock, flags); 894 } 895 } 896 897 spin_unlock_irqrestore(&priv_dev->lock, flags); 898 } 899 900 static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) 901 { 902 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 903 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 904 struct cdns3_aligned_buf *buf; 905 906 /* check if buffer is aligned to 8. */ 907 if (!((uintptr_t)priv_req->request.buf & 0x7)) 908 return 0; 909 910 buf = priv_req->aligned_buf; 911 912 if (!buf || priv_req->request.length > buf->size) { 913 buf = kzalloc(sizeof(*buf), GFP_ATOMIC); 914 if (!buf) 915 return -ENOMEM; 916 917 buf->size = priv_req->request.length; 918 buf->dir = usb_endpoint_dir_in(priv_ep->endpoint.desc) ? 919 DMA_TO_DEVICE : DMA_FROM_DEVICE; 920 921 buf->buf = dma_alloc_noncoherent(priv_dev->sysdev, 922 buf->size, 923 &buf->dma, 924 buf->dir, 925 GFP_ATOMIC); 926 if (!buf->buf) { 927 kfree(buf); 928 return -ENOMEM; 929 } 930 931 if (priv_req->aligned_buf) { 932 trace_cdns3_free_aligned_request(priv_req); 933 priv_req->aligned_buf->in_use = 0; 934 queue_work(system_freezable_wq, 935 &priv_dev->aligned_buf_wq); 936 } 937 938 buf->in_use = 1; 939 priv_req->aligned_buf = buf; 940 941 list_add_tail(&buf->list, 942 &priv_dev->aligned_buf_list); 943 } 944 945 if (priv_ep->dir == USB_DIR_IN) { 946 /* Make DMA buffer CPU accessible */ 947 dma_sync_single_for_cpu(priv_dev->sysdev, 948 buf->dma, buf->size, buf->dir); 949 memcpy(buf->buf, priv_req->request.buf, 950 priv_req->request.length); 951 } 952 953 /* Transfer DMA buffer ownership back to device */ 954 dma_sync_single_for_device(priv_dev->sysdev, 955 buf->dma, buf->size, buf->dir); 956 957 priv_req->flags |= REQUEST_UNALIGNED; 958 trace_cdns3_prepare_aligned_request(priv_req); 959 960 return 0; 961 } 962 963 static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep, 964 struct cdns3_trb *trb) 965 { 966 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 967 968 if (!priv_ep->wa1_set) { 969 u32 doorbell; 970 971 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 972 973 if (doorbell) { 974 priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0; 975 priv_ep->wa1_set = 1; 976 priv_ep->wa1_trb = trb; 977 priv_ep->wa1_trb_index = priv_ep->enqueue; 978 trace_cdns3_wa1(priv_ep, "set guard"); 979 return 0; 980 } 981 } 982 return 1; 983 } 984 985 static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev, 986 struct cdns3_endpoint *priv_ep) 987 { 988 int dma_index; 989 u32 doorbell; 990 991 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 992 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 993 994 if (!doorbell || dma_index != priv_ep->wa1_trb_index) 995 cdns3_wa1_restore_cycle_bit(priv_ep); 996 } 997 998 static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 999 struct usb_request *request) 1000 { 1001 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1002 struct cdns3_request *priv_req; 1003 struct cdns3_trb *trb; 1004 dma_addr_t trb_dma; 1005 int address; 1006 u32 control; 1007 u32 length; 1008 u32 tdl; 1009 unsigned int sg_idx = priv_ep->stream_sg_idx; 1010 1011 priv_req = to_cdns3_request(request); 1012 address = priv_ep->endpoint.desc->bEndpointAddress; 1013 1014 priv_ep->flags |= EP_PENDING_REQUEST; 1015 1016 /* must allocate buffer aligned to 8 */ 1017 if (priv_req->flags & REQUEST_UNALIGNED) 1018 trb_dma = priv_req->aligned_buf->dma; 1019 else 1020 trb_dma = request->dma; 1021 1022 /* For stream capable endpoints driver use only single TD. */ 1023 trb = priv_ep->trb_pool + priv_ep->enqueue; 1024 priv_req->start_trb = priv_ep->enqueue; 1025 priv_req->end_trb = priv_req->start_trb; 1026 priv_req->trb = trb; 1027 1028 cdns3_select_ep(priv_ep->cdns3_dev, address); 1029 1030 control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE | 1031 TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP; 1032 1033 if (!request->num_sgs) { 1034 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1035 length = request->length; 1036 } else { 1037 trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address)); 1038 length = request->sg[sg_idx].length; 1039 } 1040 1041 tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); 1042 1043 trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length)); 1044 1045 /* 1046 * For DEV_VER_V2 controller version we have enabled 1047 * USB_CONF2_EN_TDL_TRB in DMULT configuration. 1048 * This enables TDL calculation based on TRB, hence setting TDL in TRB. 1049 */ 1050 if (priv_dev->dev_ver >= DEV_VER_V2) { 1051 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1052 trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl)); 1053 } 1054 priv_req->flags |= REQUEST_PENDING; 1055 1056 trb->control = cpu_to_le32(control); 1057 1058 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1059 1060 /* 1061 * Memory barrier - Cycle Bit must be set before trb->length and 1062 * trb->buffer fields. 1063 */ 1064 wmb(); 1065 1066 /* always first element */ 1067 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), 1068 &priv_dev->regs->ep_traddr); 1069 1070 if (!(priv_ep->flags & EP_STALLED)) { 1071 trace_cdns3_ring(priv_ep); 1072 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1073 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1074 1075 priv_ep->prime_flag = false; 1076 1077 /* 1078 * Controller version DEV_VER_V2 tdl calculation 1079 * is based on TRB 1080 */ 1081 1082 if (priv_dev->dev_ver < DEV_VER_V2) 1083 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1084 &priv_dev->regs->ep_cmd); 1085 else if (priv_dev->dev_ver > DEV_VER_V2) 1086 writel(tdl, &priv_dev->regs->ep_tdl); 1087 1088 priv_ep->last_stream_id = priv_req->request.stream_id; 1089 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1090 writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) | 1091 EP_CMD_ERDY, &priv_dev->regs->ep_cmd); 1092 1093 trace_cdns3_doorbell_epx(priv_ep->name, 1094 readl(&priv_dev->regs->ep_traddr)); 1095 } 1096 1097 /* WORKAROUND for transition to L0 */ 1098 __cdns3_gadget_wakeup(priv_dev); 1099 1100 return 0; 1101 } 1102 1103 static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep) 1104 { 1105 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1106 1107 if (priv_dev->dev_ver < DEV_VER_V3) 1108 return; 1109 1110 if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) { 1111 writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts); 1112 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1113 } 1114 } 1115 1116 /** 1117 * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware 1118 * @priv_ep: endpoint object 1119 * @request: request object 1120 * 1121 * Returns zero on success or negative value on failure 1122 */ 1123 static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 1124 struct usb_request *request) 1125 { 1126 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1127 struct cdns3_request *priv_req; 1128 struct cdns3_trb *trb; 1129 struct cdns3_trb *link_trb = NULL; 1130 dma_addr_t trb_dma; 1131 u32 togle_pcs = 1; 1132 int sg_iter = 0; 1133 int num_trb; 1134 int address; 1135 u32 control; 1136 int pcs; 1137 u16 total_tdl = 0; 1138 struct scatterlist *s = NULL; 1139 bool sg_supported = !!(request->num_mapped_sgs); 1140 1141 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) 1142 num_trb = priv_ep->interval; 1143 else 1144 num_trb = sg_supported ? request->num_mapped_sgs : 1; 1145 1146 if (num_trb > priv_ep->free_trbs) { 1147 priv_ep->flags |= EP_RING_FULL; 1148 return -ENOBUFS; 1149 } 1150 1151 priv_req = to_cdns3_request(request); 1152 address = priv_ep->endpoint.desc->bEndpointAddress; 1153 1154 priv_ep->flags |= EP_PENDING_REQUEST; 1155 1156 /* must allocate buffer aligned to 8 */ 1157 if (priv_req->flags & REQUEST_UNALIGNED) 1158 trb_dma = priv_req->aligned_buf->dma; 1159 else 1160 trb_dma = request->dma; 1161 1162 trb = priv_ep->trb_pool + priv_ep->enqueue; 1163 priv_req->start_trb = priv_ep->enqueue; 1164 priv_req->trb = trb; 1165 1166 cdns3_select_ep(priv_ep->cdns3_dev, address); 1167 1168 /* prepare ring */ 1169 if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) { 1170 int doorbell, dma_index; 1171 u32 ch_bit = 0; 1172 1173 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1174 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1175 1176 /* Driver can't update LINK TRB if it is current processed. */ 1177 if (doorbell && dma_index == priv_ep->num_trbs - 1) { 1178 priv_ep->flags |= EP_DEFERRED_DRDY; 1179 return -ENOBUFS; 1180 } 1181 1182 /*updating C bt in Link TRB before starting DMA*/ 1183 link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1); 1184 /* 1185 * For TRs size equal 2 enabling TRB_CHAIN for epXin causes 1186 * that DMA stuck at the LINK TRB. 1187 * On the other hand, removing TRB_CHAIN for longer TRs for 1188 * epXout cause that DMA stuck after handling LINK TRB. 1189 * To eliminate this strange behavioral driver set TRB_CHAIN 1190 * bit only for TR size > 2. 1191 */ 1192 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC || 1193 TRBS_PER_SEGMENT > 2) 1194 ch_bit = TRB_CHAIN; 1195 1196 link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | 1197 TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); 1198 } 1199 1200 if (priv_dev->dev_ver <= DEV_VER_V2) 1201 togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); 1202 1203 if (sg_supported) 1204 s = request->sg; 1205 1206 /* set incorrect Cycle Bit for first trb*/ 1207 control = priv_ep->pcs ? 0 : TRB_CYCLE; 1208 trb->length = 0; 1209 if (priv_dev->dev_ver >= DEV_VER_V2) { 1210 u16 td_size; 1211 1212 td_size = DIV_ROUND_UP(request->length, 1213 priv_ep->endpoint.maxpacket); 1214 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1215 trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size)); 1216 else 1217 control |= TRB_TDL_HS_SIZE(td_size); 1218 } 1219 1220 do { 1221 u32 length; 1222 1223 /* fill TRB */ 1224 control |= TRB_TYPE(TRB_NORMAL); 1225 if (sg_supported) { 1226 trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s))); 1227 length = sg_dma_len(s); 1228 } else { 1229 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1230 length = request->length; 1231 } 1232 1233 if (priv_ep->flags & EP_TDLCHK_EN) 1234 total_tdl += DIV_ROUND_UP(length, 1235 priv_ep->endpoint.maxpacket); 1236 1237 trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) | 1238 TRB_LEN(length)); 1239 pcs = priv_ep->pcs ? TRB_CYCLE : 0; 1240 1241 /* 1242 * first trb should be prepared as last to avoid processing 1243 * transfer to early 1244 */ 1245 if (sg_iter != 0) 1246 control |= pcs; 1247 1248 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 1249 control |= TRB_IOC | TRB_ISP; 1250 } else { 1251 /* for last element in TD or in SG list */ 1252 if (sg_iter == (num_trb - 1) && sg_iter != 0) 1253 control |= pcs | TRB_IOC | TRB_ISP; 1254 } 1255 1256 if (sg_iter) 1257 trb->control = cpu_to_le32(control); 1258 else 1259 priv_req->trb->control = cpu_to_le32(control); 1260 1261 if (sg_supported) { 1262 trb->control |= cpu_to_le32(TRB_ISP); 1263 /* Don't set chain bit for last TRB */ 1264 if (sg_iter < num_trb - 1) 1265 trb->control |= cpu_to_le32(TRB_CHAIN); 1266 1267 s = sg_next(s); 1268 } 1269 1270 control = 0; 1271 ++sg_iter; 1272 priv_req->end_trb = priv_ep->enqueue; 1273 cdns3_ep_inc_enq(priv_ep); 1274 trb = priv_ep->trb_pool + priv_ep->enqueue; 1275 trb->length = 0; 1276 } while (sg_iter < num_trb); 1277 1278 trb = priv_req->trb; 1279 1280 priv_req->flags |= REQUEST_PENDING; 1281 priv_req->num_of_trb = num_trb; 1282 1283 if (sg_iter == 1) 1284 trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP); 1285 1286 if (priv_dev->dev_ver < DEV_VER_V2 && 1287 (priv_ep->flags & EP_TDLCHK_EN)) { 1288 u16 tdl = total_tdl; 1289 u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 1290 1291 if (tdl > EP_CMD_TDL_MAX) { 1292 tdl = EP_CMD_TDL_MAX; 1293 priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX; 1294 } 1295 1296 if (old_tdl < tdl) { 1297 tdl -= old_tdl; 1298 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1299 &priv_dev->regs->ep_cmd); 1300 } 1301 } 1302 1303 /* 1304 * Memory barrier - cycle bit must be set before other filds in trb. 1305 */ 1306 wmb(); 1307 1308 /* give the TD to the consumer*/ 1309 if (togle_pcs) 1310 trb->control = trb->control ^ cpu_to_le32(1); 1311 1312 if (priv_dev->dev_ver <= DEV_VER_V2) 1313 cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); 1314 1315 if (num_trb > 1) { 1316 int i = 0; 1317 1318 while (i < num_trb) { 1319 trace_cdns3_prepare_trb(priv_ep, trb + i); 1320 if (trb + i == link_trb) { 1321 trb = priv_ep->trb_pool; 1322 num_trb = num_trb - i; 1323 i = 0; 1324 } else { 1325 i++; 1326 } 1327 } 1328 } else { 1329 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1330 } 1331 1332 /* 1333 * Memory barrier - Cycle Bit must be set before trb->length and 1334 * trb->buffer fields. 1335 */ 1336 wmb(); 1337 1338 /* 1339 * For DMULT mode we can set address to transfer ring only once after 1340 * enabling endpoint. 1341 */ 1342 if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) { 1343 /* 1344 * Until SW is not ready to handle the OUT transfer the ISO OUT 1345 * Endpoint should be disabled (EP_CFG.ENABLE = 0). 1346 * EP_CFG_ENABLE must be set before updating ep_traddr. 1347 */ 1348 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir && 1349 !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) { 1350 priv_ep->flags |= EP_QUIRK_ISO_OUT_EN; 1351 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 1352 EP_CFG_ENABLE); 1353 } 1354 1355 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + 1356 priv_req->start_trb * TRB_SIZE), 1357 &priv_dev->regs->ep_traddr); 1358 1359 priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR; 1360 } 1361 1362 if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) { 1363 trace_cdns3_ring(priv_ep); 1364 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1365 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1366 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1367 cdns3_rearm_drdy_if_needed(priv_ep); 1368 trace_cdns3_doorbell_epx(priv_ep->name, 1369 readl(&priv_dev->regs->ep_traddr)); 1370 } 1371 1372 /* WORKAROUND for transition to L0 */ 1373 __cdns3_gadget_wakeup(priv_dev); 1374 1375 return 0; 1376 } 1377 1378 void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) 1379 { 1380 struct cdns3_endpoint *priv_ep; 1381 struct usb_ep *ep; 1382 1383 if (priv_dev->hw_configured_flag) 1384 return; 1385 1386 writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf); 1387 1388 cdns3_set_register_bit(&priv_dev->regs->usb_conf, 1389 USB_CONF_U1EN | USB_CONF_U2EN); 1390 1391 priv_dev->hw_configured_flag = 1; 1392 1393 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 1394 if (ep->enabled) { 1395 priv_ep = ep_to_cdns3_ep(ep); 1396 cdns3_start_all_request(priv_dev, priv_ep); 1397 } 1398 } 1399 1400 cdns3_allow_enable_l1(priv_dev, 1); 1401 } 1402 1403 /** 1404 * cdns3_trb_handled - check whether trb has been handled by DMA 1405 * 1406 * @priv_ep: extended endpoint object. 1407 * @priv_req: request object for checking 1408 * 1409 * Endpoint must be selected before invoking this function. 1410 * 1411 * Returns false if request has not been handled by DMA, else returns true. 1412 * 1413 * SR - start ring 1414 * ER - end ring 1415 * DQ = priv_ep->dequeue - dequeue position 1416 * EQ = priv_ep->enqueue - enqueue position 1417 * ST = priv_req->start_trb - index of first TRB in transfer ring 1418 * ET = priv_req->end_trb - index of last TRB in transfer ring 1419 * CI = current_index - index of processed TRB by DMA. 1420 * 1421 * As first step, we check if the TRB between the ST and ET. 1422 * Then, we check if cycle bit for index priv_ep->dequeue 1423 * is correct. 1424 * 1425 * some rules: 1426 * 1. priv_ep->dequeue never equals to current_index. 1427 * 2 priv_ep->enqueue never exceed priv_ep->dequeue 1428 * 3. exception: priv_ep->enqueue == priv_ep->dequeue 1429 * and priv_ep->free_trbs is zero. 1430 * This case indicate that TR is full. 1431 * 1432 * At below two cases, the request have been handled. 1433 * Case 1 - priv_ep->dequeue < current_index 1434 * SR ... EQ ... DQ ... CI ... ER 1435 * SR ... DQ ... CI ... EQ ... ER 1436 * 1437 * Case 2 - priv_ep->dequeue > current_index 1438 * This situation takes place when CI go through the LINK TRB at the end of 1439 * transfer ring. 1440 * SR ... CI ... EQ ... DQ ... ER 1441 */ 1442 static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep, 1443 struct cdns3_request *priv_req) 1444 { 1445 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1446 struct cdns3_trb *trb; 1447 int current_index = 0; 1448 int handled = 0; 1449 int doorbell; 1450 1451 current_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1452 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1453 1454 /* current trb doesn't belong to this request */ 1455 if (priv_req->start_trb < priv_req->end_trb) { 1456 if (priv_ep->dequeue > priv_req->end_trb) 1457 goto finish; 1458 1459 if (priv_ep->dequeue < priv_req->start_trb) 1460 goto finish; 1461 } 1462 1463 if ((priv_req->start_trb > priv_req->end_trb) && 1464 (priv_ep->dequeue > priv_req->end_trb) && 1465 (priv_ep->dequeue < priv_req->start_trb)) 1466 goto finish; 1467 1468 if ((priv_req->start_trb == priv_req->end_trb) && 1469 (priv_ep->dequeue != priv_req->end_trb)) 1470 goto finish; 1471 1472 trb = &priv_ep->trb_pool[priv_ep->dequeue]; 1473 1474 if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs) 1475 goto finish; 1476 1477 if (doorbell == 1 && current_index == priv_ep->dequeue) 1478 goto finish; 1479 1480 /* The corner case for TRBS_PER_SEGMENT equal 2). */ 1481 if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 1482 handled = 1; 1483 goto finish; 1484 } 1485 1486 if (priv_ep->enqueue == priv_ep->dequeue && 1487 priv_ep->free_trbs == 0) { 1488 handled = 1; 1489 } else if (priv_ep->dequeue < current_index) { 1490 if ((current_index == (priv_ep->num_trbs - 1)) && 1491 !priv_ep->dequeue) 1492 goto finish; 1493 1494 handled = 1; 1495 } else if (priv_ep->dequeue > current_index) { 1496 handled = 1; 1497 } 1498 1499 finish: 1500 trace_cdns3_request_handled(priv_req, current_index, handled); 1501 1502 return handled; 1503 } 1504 1505 static void cdns3_transfer_completed(struct cdns3_device *priv_dev, 1506 struct cdns3_endpoint *priv_ep) 1507 { 1508 struct cdns3_request *priv_req; 1509 struct usb_request *request; 1510 struct cdns3_trb *trb; 1511 bool request_handled = false; 1512 bool transfer_end = false; 1513 1514 while (!list_empty(&priv_ep->pending_req_list)) { 1515 request = cdns3_next_request(&priv_ep->pending_req_list); 1516 priv_req = to_cdns3_request(request); 1517 1518 trb = priv_ep->trb_pool + priv_ep->dequeue; 1519 1520 /* Request was dequeued and TRB was changed to TRB_LINK. */ 1521 if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { 1522 trace_cdns3_complete_trb(priv_ep, trb); 1523 cdns3_move_deq_to_next_trb(priv_req); 1524 } 1525 1526 if (!request->stream_id) { 1527 /* Re-select endpoint. It could be changed by other CPU 1528 * during handling usb_gadget_giveback_request. 1529 */ 1530 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1531 1532 while (cdns3_trb_handled(priv_ep, priv_req)) { 1533 priv_req->finished_trb++; 1534 if (priv_req->finished_trb >= priv_req->num_of_trb) 1535 request_handled = true; 1536 1537 trb = priv_ep->trb_pool + priv_ep->dequeue; 1538 trace_cdns3_complete_trb(priv_ep, trb); 1539 1540 if (!transfer_end) 1541 request->actual += 1542 TRB_LEN(le32_to_cpu(trb->length)); 1543 1544 if (priv_req->num_of_trb > 1 && 1545 le32_to_cpu(trb->control) & TRB_SMM) 1546 transfer_end = true; 1547 1548 cdns3_ep_inc_deq(priv_ep); 1549 } 1550 1551 if (request_handled) { 1552 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1553 request_handled = false; 1554 transfer_end = false; 1555 } else { 1556 goto prepare_next_td; 1557 } 1558 1559 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && 1560 TRBS_PER_SEGMENT == 2) 1561 break; 1562 } else { 1563 /* Re-select endpoint. It could be changed by other CPU 1564 * during handling usb_gadget_giveback_request. 1565 */ 1566 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1567 1568 trb = priv_ep->trb_pool; 1569 trace_cdns3_complete_trb(priv_ep, trb); 1570 1571 if (trb != priv_req->trb) 1572 dev_warn(priv_dev->dev, 1573 "request_trb=0x%p, queue_trb=0x%p\n", 1574 priv_req->trb, trb); 1575 1576 request->actual += TRB_LEN(le32_to_cpu(trb->length)); 1577 1578 if (!request->num_sgs || 1579 (request->num_sgs == (priv_ep->stream_sg_idx + 1))) { 1580 priv_ep->stream_sg_idx = 0; 1581 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1582 } else { 1583 priv_ep->stream_sg_idx++; 1584 cdns3_ep_run_stream_transfer(priv_ep, request); 1585 } 1586 break; 1587 } 1588 } 1589 priv_ep->flags &= ~EP_PENDING_REQUEST; 1590 1591 prepare_next_td: 1592 if (!(priv_ep->flags & EP_STALLED) && 1593 !(priv_ep->flags & EP_STALL_PENDING)) 1594 cdns3_start_all_request(priv_dev, priv_ep); 1595 } 1596 1597 void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm) 1598 { 1599 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1600 1601 cdns3_wa1_restore_cycle_bit(priv_ep); 1602 1603 if (rearm) { 1604 trace_cdns3_ring(priv_ep); 1605 1606 /* Cycle Bit must be updated before arming DMA. */ 1607 wmb(); 1608 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1609 1610 __cdns3_gadget_wakeup(priv_dev); 1611 1612 trace_cdns3_doorbell_epx(priv_ep->name, 1613 readl(&priv_dev->regs->ep_traddr)); 1614 } 1615 } 1616 1617 static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep) 1618 { 1619 u16 tdl = priv_ep->pending_tdl; 1620 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1621 1622 if (tdl > EP_CMD_TDL_MAX) { 1623 tdl = EP_CMD_TDL_MAX; 1624 priv_ep->pending_tdl -= EP_CMD_TDL_MAX; 1625 } else { 1626 priv_ep->pending_tdl = 0; 1627 } 1628 1629 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd); 1630 } 1631 1632 /** 1633 * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint 1634 * @priv_ep: endpoint object 1635 * 1636 * Returns 0 1637 */ 1638 static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) 1639 { 1640 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1641 u32 ep_sts_reg; 1642 struct usb_request *deferred_request; 1643 struct usb_request *pending_request; 1644 u32 tdl = 0; 1645 1646 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1647 1648 trace_cdns3_epx_irq(priv_dev, priv_ep); 1649 1650 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 1651 writel(ep_sts_reg, &priv_dev->regs->ep_sts); 1652 1653 if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) { 1654 bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY); 1655 1656 tdl = cdns3_get_tdl(priv_dev); 1657 1658 /* 1659 * Continue the previous transfer: 1660 * There is some racing between ERDY and PRIME. The device send 1661 * ERDY and almost in the same time Host send PRIME. It cause 1662 * that host ignore the ERDY packet and driver has to send it 1663 * again. 1664 */ 1665 if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) || 1666 EP_STS_HOSTPP(ep_sts_reg))) { 1667 writel(EP_CMD_ERDY | 1668 EP_CMD_ERDY_SID(priv_ep->last_stream_id), 1669 &priv_dev->regs->ep_cmd); 1670 ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC); 1671 } else { 1672 priv_ep->prime_flag = true; 1673 1674 pending_request = cdns3_next_request(&priv_ep->pending_req_list); 1675 deferred_request = cdns3_next_request(&priv_ep->deferred_req_list); 1676 1677 if (deferred_request && !pending_request) { 1678 cdns3_start_all_request(priv_dev, priv_ep); 1679 } 1680 } 1681 } 1682 1683 if (ep_sts_reg & EP_STS_TRBERR) { 1684 if (priv_ep->flags & EP_STALL_PENDING && 1685 !(ep_sts_reg & EP_STS_DESCMIS && 1686 priv_dev->dev_ver < DEV_VER_V2)) { 1687 cdns3_ep_stall_flush(priv_ep); 1688 } 1689 1690 /* 1691 * For isochronous transfer driver completes request on 1692 * IOC or on TRBERR. IOC appears only when device receive 1693 * OUT data packet. If host disable stream or lost some packet 1694 * then the only way to finish all queued transfer is to do it 1695 * on TRBERR event. 1696 */ 1697 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && 1698 !priv_ep->wa1_set) { 1699 if (!priv_ep->dir) { 1700 u32 ep_cfg = readl(&priv_dev->regs->ep_cfg); 1701 1702 ep_cfg &= ~EP_CFG_ENABLE; 1703 writel(ep_cfg, &priv_dev->regs->ep_cfg); 1704 priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; 1705 } 1706 cdns3_transfer_completed(priv_dev, priv_ep); 1707 } else if (!(priv_ep->flags & EP_STALLED) && 1708 !(priv_ep->flags & EP_STALL_PENDING)) { 1709 if (priv_ep->flags & EP_DEFERRED_DRDY) { 1710 priv_ep->flags &= ~EP_DEFERRED_DRDY; 1711 cdns3_start_all_request(priv_dev, priv_ep); 1712 } else { 1713 cdns3_rearm_transfer(priv_ep, 1714 priv_ep->wa1_set); 1715 } 1716 } 1717 } 1718 1719 if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) || 1720 (ep_sts_reg & EP_STS_IOT)) { 1721 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 1722 if (ep_sts_reg & EP_STS_ISP) 1723 priv_ep->flags |= EP_QUIRK_END_TRANSFER; 1724 else 1725 priv_ep->flags &= ~EP_QUIRK_END_TRANSFER; 1726 } 1727 1728 if (!priv_ep->use_streams) { 1729 if ((ep_sts_reg & EP_STS_IOC) || 1730 (ep_sts_reg & EP_STS_ISP)) { 1731 cdns3_transfer_completed(priv_dev, priv_ep); 1732 } else if ((priv_ep->flags & EP_TDLCHK_EN) & 1733 priv_ep->pending_tdl) { 1734 /* handle IOT with pending tdl */ 1735 cdns3_reprogram_tdl(priv_ep); 1736 } 1737 } else if (priv_ep->dir == USB_DIR_OUT) { 1738 priv_ep->ep_sts_pending |= ep_sts_reg; 1739 } else if (ep_sts_reg & EP_STS_IOT) { 1740 cdns3_transfer_completed(priv_dev, priv_ep); 1741 } 1742 } 1743 1744 /* 1745 * MD_EXIT interrupt sets when stream capable endpoint exits 1746 * from MOVE DATA state of Bulk IN/OUT stream protocol state machine 1747 */ 1748 if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) && 1749 (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) { 1750 priv_ep->ep_sts_pending = 0; 1751 cdns3_transfer_completed(priv_dev, priv_ep); 1752 } 1753 1754 /* 1755 * WA2: this condition should only be meet when 1756 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or 1757 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. 1758 * In other cases this interrupt will be disabled. 1759 */ 1760 if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 && 1761 !(priv_ep->flags & EP_STALLED)) 1762 cdns3_wa2_descmissing_packet(priv_ep); 1763 1764 return 0; 1765 } 1766 1767 static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev) 1768 { 1769 if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) 1770 priv_dev->gadget_driver->disconnect(&priv_dev->gadget); 1771 } 1772 1773 /** 1774 * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device 1775 * @priv_dev: extended gadget object 1776 * @usb_ists: bitmap representation of device's reported interrupts 1777 * (usb_ists register value) 1778 */ 1779 static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, 1780 u32 usb_ists) 1781 __must_hold(&priv_dev->lock) 1782 { 1783 int speed = 0; 1784 1785 trace_cdns3_usb_irq(priv_dev, usb_ists); 1786 if (usb_ists & USB_ISTS_L1ENTI) { 1787 /* 1788 * WORKAROUND: CDNS3 controller has issue with hardware resuming 1789 * from L1. To fix it, if any DMA transfer is pending driver 1790 * must starts driving resume signal immediately. 1791 */ 1792 if (readl(&priv_dev->regs->drbl)) 1793 __cdns3_gadget_wakeup(priv_dev); 1794 } 1795 1796 /* Connection detected */ 1797 if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { 1798 speed = cdns3_get_speed(priv_dev); 1799 priv_dev->gadget.speed = speed; 1800 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED); 1801 cdns3_ep0_config(priv_dev); 1802 } 1803 1804 /* Disconnection detected */ 1805 if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) { 1806 spin_unlock(&priv_dev->lock); 1807 cdns3_disconnect_gadget(priv_dev); 1808 spin_lock(&priv_dev->lock); 1809 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 1810 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 1811 cdns3_hw_reset_eps_config(priv_dev); 1812 } 1813 1814 if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { 1815 if (priv_dev->gadget_driver && 1816 priv_dev->gadget_driver->suspend) { 1817 spin_unlock(&priv_dev->lock); 1818 priv_dev->gadget_driver->suspend(&priv_dev->gadget); 1819 spin_lock(&priv_dev->lock); 1820 } 1821 } 1822 1823 if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) { 1824 if (priv_dev->gadget_driver && 1825 priv_dev->gadget_driver->resume) { 1826 spin_unlock(&priv_dev->lock); 1827 priv_dev->gadget_driver->resume(&priv_dev->gadget); 1828 spin_lock(&priv_dev->lock); 1829 } 1830 } 1831 1832 /* reset*/ 1833 if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) { 1834 if (priv_dev->gadget_driver) { 1835 spin_unlock(&priv_dev->lock); 1836 usb_gadget_udc_reset(&priv_dev->gadget, 1837 priv_dev->gadget_driver); 1838 spin_lock(&priv_dev->lock); 1839 1840 /*read again to check the actual speed*/ 1841 speed = cdns3_get_speed(priv_dev); 1842 priv_dev->gadget.speed = speed; 1843 cdns3_hw_reset_eps_config(priv_dev); 1844 cdns3_ep0_config(priv_dev); 1845 } 1846 } 1847 } 1848 1849 /** 1850 * cdns3_device_irq_handler - interrupt handler for device part of controller 1851 * 1852 * @irq: irq number for cdns3 core device 1853 * @data: structure of cdns3 1854 * 1855 * Returns IRQ_HANDLED or IRQ_NONE 1856 */ 1857 static irqreturn_t cdns3_device_irq_handler(int irq, void *data) 1858 { 1859 struct cdns3_device *priv_dev = data; 1860 struct cdns *cdns = dev_get_drvdata(priv_dev->dev); 1861 irqreturn_t ret = IRQ_NONE; 1862 u32 reg; 1863 1864 if (cdns->in_lpm) 1865 return ret; 1866 1867 /* check USB device interrupt */ 1868 reg = readl(&priv_dev->regs->usb_ists); 1869 if (reg) { 1870 /* After masking interrupts the new interrupts won't be 1871 * reported in usb_ists/ep_ists. In order to not lose some 1872 * of them driver disables only detected interrupts. 1873 * They will be enabled ASAP after clearing source of 1874 * interrupt. This an unusual behavior only applies to 1875 * usb_ists register. 1876 */ 1877 reg = ~reg & readl(&priv_dev->regs->usb_ien); 1878 /* mask deferred interrupt. */ 1879 writel(reg, &priv_dev->regs->usb_ien); 1880 ret = IRQ_WAKE_THREAD; 1881 } 1882 1883 /* check endpoint interrupt */ 1884 reg = readl(&priv_dev->regs->ep_ists); 1885 if (reg) { 1886 writel(0, &priv_dev->regs->ep_ien); 1887 ret = IRQ_WAKE_THREAD; 1888 } 1889 1890 return ret; 1891 } 1892 1893 /** 1894 * cdns3_device_thread_irq_handler - interrupt handler for device part 1895 * of controller 1896 * 1897 * @irq: irq number for cdns3 core device 1898 * @data: structure of cdns3 1899 * 1900 * Returns IRQ_HANDLED or IRQ_NONE 1901 */ 1902 static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) 1903 { 1904 struct cdns3_device *priv_dev = data; 1905 irqreturn_t ret = IRQ_NONE; 1906 unsigned long flags; 1907 unsigned int bit; 1908 unsigned long reg; 1909 1910 spin_lock_irqsave(&priv_dev->lock, flags); 1911 1912 reg = readl(&priv_dev->regs->usb_ists); 1913 if (reg) { 1914 writel(reg, &priv_dev->regs->usb_ists); 1915 writel(USB_IEN_INIT, &priv_dev->regs->usb_ien); 1916 cdns3_check_usb_interrupt_proceed(priv_dev, reg); 1917 ret = IRQ_HANDLED; 1918 } 1919 1920 reg = readl(&priv_dev->regs->ep_ists); 1921 1922 /* handle default endpoint OUT */ 1923 if (reg & EP_ISTS_EP_OUT0) { 1924 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT); 1925 ret = IRQ_HANDLED; 1926 } 1927 1928 /* handle default endpoint IN */ 1929 if (reg & EP_ISTS_EP_IN0) { 1930 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN); 1931 ret = IRQ_HANDLED; 1932 } 1933 1934 /* check if interrupt from non default endpoint, if no exit */ 1935 reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0); 1936 if (!reg) 1937 goto irqend; 1938 1939 for_each_set_bit(bit, ®, 1940 sizeof(u32) * BITS_PER_BYTE) { 1941 cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]); 1942 ret = IRQ_HANDLED; 1943 } 1944 1945 if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams) 1946 cdns3_wa2_check_outq_status(priv_dev); 1947 1948 irqend: 1949 writel(~0, &priv_dev->regs->ep_ien); 1950 spin_unlock_irqrestore(&priv_dev->lock, flags); 1951 1952 return ret; 1953 } 1954 1955 /** 1956 * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP 1957 * 1958 * The real reservation will occur during write to EP_CFG register, 1959 * this function is used to check if the 'size' reservation is allowed. 1960 * 1961 * @priv_dev: extended gadget object 1962 * @size: the size (KB) for EP would like to allocate 1963 * @is_in: endpoint direction 1964 * 1965 * Return 0 if the required size can met or negative value on failure 1966 */ 1967 static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, 1968 int size, int is_in) 1969 { 1970 int remained; 1971 1972 /* 2KB are reserved for EP0*/ 1973 remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2; 1974 1975 if (is_in) { 1976 if (remained < size) 1977 return -EPERM; 1978 1979 priv_dev->onchip_used_size += size; 1980 } else { 1981 int required; 1982 1983 /** 1984 * ALL OUT EPs are shared the same chunk onchip memory, so 1985 * driver checks if it already has assigned enough buffers 1986 */ 1987 if (priv_dev->out_mem_is_allocated >= size) 1988 return 0; 1989 1990 required = size - priv_dev->out_mem_is_allocated; 1991 1992 if (required > remained) 1993 return -EPERM; 1994 1995 priv_dev->out_mem_is_allocated += required; 1996 priv_dev->onchip_used_size += required; 1997 } 1998 1999 return 0; 2000 } 2001 2002 static void cdns3_configure_dmult(struct cdns3_device *priv_dev, 2003 struct cdns3_endpoint *priv_ep) 2004 { 2005 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 2006 2007 /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */ 2008 if (priv_dev->dev_ver <= DEV_VER_V2) 2009 writel(USB_CONF_DMULT, ®s->usb_conf); 2010 2011 if (priv_dev->dev_ver == DEV_VER_V2) 2012 writel(USB_CONF2_EN_TDL_TRB, ®s->usb_conf2); 2013 2014 if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) { 2015 u32 mask; 2016 2017 if (priv_ep->dir) 2018 mask = BIT(priv_ep->num + 16); 2019 else 2020 mask = BIT(priv_ep->num); 2021 2022 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 2023 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2024 cdns3_set_register_bit(®s->tdl_beh, mask); 2025 cdns3_set_register_bit(®s->tdl_beh2, mask); 2026 cdns3_set_register_bit(®s->dma_adv_td, mask); 2027 } 2028 2029 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2030 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2031 2032 cdns3_set_register_bit(®s->dtrans, mask); 2033 } 2034 } 2035 2036 /** 2037 * cdns3_ep_config - Configure hardware endpoint 2038 * @priv_ep: extended endpoint object 2039 * @enable: set EP_CFG_ENABLE bit in ep_cfg register. 2040 */ 2041 int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) 2042 { 2043 bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC); 2044 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2045 u32 bEndpointAddress = priv_ep->num | priv_ep->dir; 2046 u32 max_packet_size = 0; 2047 u8 maxburst = 0; 2048 u32 ep_cfg = 0; 2049 u8 buffering; 2050 u8 mult = 0; 2051 int ret; 2052 2053 buffering = CDNS3_EP_BUF_SIZE - 1; 2054 2055 cdns3_configure_dmult(priv_dev, priv_ep); 2056 2057 switch (priv_ep->type) { 2058 case USB_ENDPOINT_XFER_INT: 2059 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT); 2060 2061 if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) 2062 ep_cfg |= EP_CFG_TDL_CHK; 2063 break; 2064 case USB_ENDPOINT_XFER_BULK: 2065 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK); 2066 2067 if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) 2068 ep_cfg |= EP_CFG_TDL_CHK; 2069 break; 2070 default: 2071 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); 2072 mult = CDNS3_EP_ISO_HS_MULT - 1; 2073 buffering = mult + 1; 2074 } 2075 2076 switch (priv_dev->gadget.speed) { 2077 case USB_SPEED_FULL: 2078 max_packet_size = is_iso_ep ? 1023 : 64; 2079 break; 2080 case USB_SPEED_HIGH: 2081 max_packet_size = is_iso_ep ? 1024 : 512; 2082 break; 2083 case USB_SPEED_SUPER: 2084 /* It's limitation that driver assumes in driver. */ 2085 mult = 0; 2086 max_packet_size = 1024; 2087 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2088 maxburst = CDNS3_EP_ISO_SS_BURST - 1; 2089 buffering = (mult + 1) * 2090 (maxburst + 1); 2091 2092 if (priv_ep->interval > 1) 2093 buffering++; 2094 } else { 2095 maxburst = CDNS3_EP_BUF_SIZE - 1; 2096 } 2097 break; 2098 default: 2099 /* all other speed are not supported */ 2100 return -EINVAL; 2101 } 2102 2103 if (max_packet_size == 1024) 2104 priv_ep->trb_burst_size = 128; 2105 else if (max_packet_size >= 512) 2106 priv_ep->trb_burst_size = 64; 2107 else 2108 priv_ep->trb_burst_size = 16; 2109 2110 /* onchip buffer is only allocated before configuration */ 2111 if (!priv_dev->hw_configured_flag) { 2112 ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1, 2113 !!priv_ep->dir); 2114 if (ret) { 2115 dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n"); 2116 return ret; 2117 } 2118 } 2119 2120 if (enable) 2121 ep_cfg |= EP_CFG_ENABLE; 2122 2123 if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2124 if (priv_dev->dev_ver >= DEV_VER_V3) { 2125 u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0)); 2126 2127 /* 2128 * Stream capable endpoints are handled by using ep_tdl 2129 * register. Other endpoints use TDL from TRB feature. 2130 */ 2131 cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, 2132 mask); 2133 } 2134 2135 /* Enable Stream Bit TDL chk and SID chk */ 2136 ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK; 2137 } 2138 2139 ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) | 2140 EP_CFG_MULT(mult) | 2141 EP_CFG_BUFFERING(buffering) | 2142 EP_CFG_MAXBURST(maxburst); 2143 2144 cdns3_select_ep(priv_dev, bEndpointAddress); 2145 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2146 priv_ep->flags |= EP_CONFIGURED; 2147 2148 dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n", 2149 priv_ep->name, ep_cfg); 2150 2151 return 0; 2152 } 2153 2154 /* Find correct direction for HW endpoint according to description */ 2155 static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc, 2156 struct cdns3_endpoint *priv_ep) 2157 { 2158 return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) || 2159 (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc)); 2160 } 2161 2162 static struct 2163 cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev, 2164 struct usb_endpoint_descriptor *desc) 2165 { 2166 struct usb_ep *ep; 2167 struct cdns3_endpoint *priv_ep; 2168 2169 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2170 unsigned long num; 2171 int ret; 2172 /* ep name pattern likes epXin or epXout */ 2173 char c[2] = {ep->name[2], '\0'}; 2174 2175 ret = kstrtoul(c, 10, &num); 2176 if (ret) 2177 return ERR_PTR(ret); 2178 2179 priv_ep = ep_to_cdns3_ep(ep); 2180 if (cdns3_ep_dir_is_correct(desc, priv_ep)) { 2181 if (!(priv_ep->flags & EP_CLAIMED)) { 2182 priv_ep->num = num; 2183 return priv_ep; 2184 } 2185 } 2186 } 2187 2188 return ERR_PTR(-ENOENT); 2189 } 2190 2191 /* 2192 * Cadence IP has one limitation that all endpoints must be configured 2193 * (Type & MaxPacketSize) before setting configuration through hardware 2194 * register, it means we can't change endpoints configuration after 2195 * set_configuration. 2196 * 2197 * This function set EP_CLAIMED flag which is added when the gadget driver 2198 * uses usb_ep_autoconfig to configure specific endpoint; 2199 * When the udc driver receives set_configurion request, 2200 * it goes through all claimed endpoints, and configure all endpoints 2201 * accordingly. 2202 * 2203 * At usb_ep_ops.enable/disable, we only enable and disable endpoint through 2204 * ep_cfg register which can be changed after set_configuration, and do 2205 * some software operation accordingly. 2206 */ 2207 static struct 2208 usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, 2209 struct usb_endpoint_descriptor *desc, 2210 struct usb_ss_ep_comp_descriptor *comp_desc) 2211 { 2212 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2213 struct cdns3_endpoint *priv_ep; 2214 unsigned long flags; 2215 2216 priv_ep = cdns3_find_available_ep(priv_dev, desc); 2217 if (IS_ERR(priv_ep)) { 2218 dev_err(priv_dev->dev, "no available ep\n"); 2219 return NULL; 2220 } 2221 2222 dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name); 2223 2224 spin_lock_irqsave(&priv_dev->lock, flags); 2225 priv_ep->endpoint.desc = desc; 2226 priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT; 2227 priv_ep->type = usb_endpoint_type(desc); 2228 priv_ep->flags |= EP_CLAIMED; 2229 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2230 2231 spin_unlock_irqrestore(&priv_dev->lock, flags); 2232 return &priv_ep->endpoint; 2233 } 2234 2235 /** 2236 * cdns3_gadget_ep_alloc_request - Allocates request 2237 * @ep: endpoint object associated with request 2238 * @gfp_flags: gfp flags 2239 * 2240 * Returns allocated request address, NULL on allocation error 2241 */ 2242 struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, 2243 gfp_t gfp_flags) 2244 { 2245 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2246 struct cdns3_request *priv_req; 2247 2248 priv_req = kzalloc(sizeof(*priv_req), gfp_flags); 2249 if (!priv_req) 2250 return NULL; 2251 2252 priv_req->priv_ep = priv_ep; 2253 2254 trace_cdns3_alloc_request(priv_req); 2255 return &priv_req->request; 2256 } 2257 2258 /** 2259 * cdns3_gadget_ep_free_request - Free memory occupied by request 2260 * @ep: endpoint object associated with request 2261 * @request: request to free memory 2262 */ 2263 void cdns3_gadget_ep_free_request(struct usb_ep *ep, 2264 struct usb_request *request) 2265 { 2266 struct cdns3_request *priv_req = to_cdns3_request(request); 2267 2268 if (priv_req->aligned_buf) 2269 priv_req->aligned_buf->in_use = 0; 2270 2271 trace_cdns3_free_request(priv_req); 2272 kfree(priv_req); 2273 } 2274 2275 /** 2276 * cdns3_gadget_ep_enable - Enable endpoint 2277 * @ep: endpoint object 2278 * @desc: endpoint descriptor 2279 * 2280 * Returns 0 on success, error code elsewhere 2281 */ 2282 static int cdns3_gadget_ep_enable(struct usb_ep *ep, 2283 const struct usb_endpoint_descriptor *desc) 2284 { 2285 struct cdns3_endpoint *priv_ep; 2286 struct cdns3_device *priv_dev; 2287 const struct usb_ss_ep_comp_descriptor *comp_desc; 2288 u32 reg = EP_STS_EN_TRBERREN; 2289 u32 bEndpointAddress; 2290 unsigned long flags; 2291 int enable = 1; 2292 int ret = 0; 2293 int val; 2294 2295 priv_ep = ep_to_cdns3_ep(ep); 2296 priv_dev = priv_ep->cdns3_dev; 2297 comp_desc = priv_ep->endpoint.comp_desc; 2298 2299 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 2300 dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); 2301 return -EINVAL; 2302 } 2303 2304 if (!desc->wMaxPacketSize) { 2305 dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n"); 2306 return -EINVAL; 2307 } 2308 2309 if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED, 2310 "%s is already enabled\n", priv_ep->name)) 2311 return 0; 2312 2313 spin_lock_irqsave(&priv_dev->lock, flags); 2314 2315 priv_ep->endpoint.desc = desc; 2316 priv_ep->type = usb_endpoint_type(desc); 2317 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2318 2319 if (priv_ep->interval > ISO_MAX_INTERVAL && 2320 priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2321 dev_err(priv_dev->dev, "Driver is limited to %d period\n", 2322 ISO_MAX_INTERVAL); 2323 2324 ret = -EINVAL; 2325 goto exit; 2326 } 2327 2328 bEndpointAddress = priv_ep->num | priv_ep->dir; 2329 cdns3_select_ep(priv_dev, bEndpointAddress); 2330 2331 /* 2332 * For some versions of controller at some point during ISO OUT traffic 2333 * DMA reads Transfer Ring for the EP which has never got doorbell. 2334 * This issue was detected only on simulation, but to avoid this issue 2335 * driver add protection against it. To fix it driver enable ISO OUT 2336 * endpoint before setting DRBL. This special treatment of ISO OUT 2337 * endpoints are recommended by controller specification. 2338 */ 2339 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2340 enable = 0; 2341 2342 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 2343 /* 2344 * Enable stream support (SS mode) related interrupts 2345 * in EP_STS_EN Register 2346 */ 2347 if (priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2348 reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN | 2349 EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN | 2350 EP_STS_EN_STREAMREN; 2351 priv_ep->use_streams = true; 2352 ret = cdns3_ep_config(priv_ep, enable); 2353 priv_dev->using_streams |= true; 2354 } 2355 } else { 2356 ret = cdns3_ep_config(priv_ep, enable); 2357 } 2358 2359 if (ret) 2360 goto exit; 2361 2362 ret = cdns3_allocate_trb_pool(priv_ep); 2363 if (ret) 2364 goto exit; 2365 2366 bEndpointAddress = priv_ep->num | priv_ep->dir; 2367 cdns3_select_ep(priv_dev, bEndpointAddress); 2368 2369 trace_cdns3_gadget_ep_enable(priv_ep); 2370 2371 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2372 2373 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2374 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2375 1, 1000); 2376 2377 if (unlikely(ret)) { 2378 cdns3_free_trb_pool(priv_ep); 2379 ret = -EINVAL; 2380 goto exit; 2381 } 2382 2383 /* enable interrupt for selected endpoint */ 2384 cdns3_set_register_bit(&priv_dev->regs->ep_ien, 2385 BIT(cdns3_ep_addr_to_index(bEndpointAddress))); 2386 2387 if (priv_dev->dev_ver < DEV_VER_V2) 2388 cdns3_wa2_enable_detection(priv_dev, priv_ep, reg); 2389 2390 writel(reg, &priv_dev->regs->ep_sts_en); 2391 2392 ep->desc = desc; 2393 priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING | 2394 EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN); 2395 priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR; 2396 priv_ep->wa1_set = 0; 2397 priv_ep->enqueue = 0; 2398 priv_ep->dequeue = 0; 2399 reg = readl(&priv_dev->regs->ep_sts); 2400 priv_ep->pcs = !!EP_STS_CCS(reg); 2401 priv_ep->ccs = !!EP_STS_CCS(reg); 2402 /* one TRB is reserved for link TRB used in DMULT mode*/ 2403 priv_ep->free_trbs = priv_ep->num_trbs - 1; 2404 exit: 2405 spin_unlock_irqrestore(&priv_dev->lock, flags); 2406 2407 return ret; 2408 } 2409 2410 /** 2411 * cdns3_gadget_ep_disable - Disable endpoint 2412 * @ep: endpoint object 2413 * 2414 * Returns 0 on success, error code elsewhere 2415 */ 2416 static int cdns3_gadget_ep_disable(struct usb_ep *ep) 2417 { 2418 struct cdns3_endpoint *priv_ep; 2419 struct cdns3_request *priv_req; 2420 struct cdns3_device *priv_dev; 2421 struct usb_request *request; 2422 unsigned long flags; 2423 int ret = 0; 2424 u32 ep_cfg; 2425 int val; 2426 2427 if (!ep) { 2428 pr_err("usbss: invalid parameters\n"); 2429 return -EINVAL; 2430 } 2431 2432 priv_ep = ep_to_cdns3_ep(ep); 2433 priv_dev = priv_ep->cdns3_dev; 2434 2435 if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED), 2436 "%s is already disabled\n", priv_ep->name)) 2437 return 0; 2438 2439 spin_lock_irqsave(&priv_dev->lock, flags); 2440 2441 trace_cdns3_gadget_ep_disable(priv_ep); 2442 2443 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2444 2445 ep_cfg = readl(&priv_dev->regs->ep_cfg); 2446 ep_cfg &= ~EP_CFG_ENABLE; 2447 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2448 2449 /** 2450 * Driver needs some time before resetting endpoint. 2451 * It need waits for clearing DBUSY bit or for timeout expired. 2452 * 10us is enough time for controller to stop transfer. 2453 */ 2454 readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val, 2455 !(val & EP_STS_DBUSY), 1, 10); 2456 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2457 2458 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2459 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2460 1, 1000); 2461 if (unlikely(ret)) 2462 dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n", 2463 priv_ep->name); 2464 2465 while (!list_empty(&priv_ep->pending_req_list)) { 2466 request = cdns3_next_request(&priv_ep->pending_req_list); 2467 2468 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2469 -ESHUTDOWN); 2470 } 2471 2472 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 2473 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 2474 2475 kfree(priv_req->request.buf); 2476 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 2477 &priv_req->request); 2478 list_del_init(&priv_req->list); 2479 --priv_ep->wa2_counter; 2480 } 2481 2482 while (!list_empty(&priv_ep->deferred_req_list)) { 2483 request = cdns3_next_request(&priv_ep->deferred_req_list); 2484 2485 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2486 -ESHUTDOWN); 2487 } 2488 2489 priv_ep->descmis_req = NULL; 2490 2491 ep->desc = NULL; 2492 priv_ep->flags &= ~EP_ENABLED; 2493 priv_ep->use_streams = false; 2494 2495 spin_unlock_irqrestore(&priv_dev->lock, flags); 2496 2497 return ret; 2498 } 2499 2500 /** 2501 * __cdns3_gadget_ep_queue - Transfer data on endpoint 2502 * @ep: endpoint object 2503 * @request: request object 2504 * @gfp_flags: gfp flags 2505 * 2506 * Returns 0 on success, error code elsewhere 2507 */ 2508 static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 2509 struct usb_request *request, 2510 gfp_t gfp_flags) 2511 { 2512 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2513 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2514 struct cdns3_request *priv_req; 2515 int ret = 0; 2516 2517 request->actual = 0; 2518 request->status = -EINPROGRESS; 2519 priv_req = to_cdns3_request(request); 2520 trace_cdns3_ep_queue(priv_req); 2521 2522 if (priv_dev->dev_ver < DEV_VER_V2) { 2523 ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep, 2524 priv_req); 2525 2526 if (ret == EINPROGRESS) 2527 return 0; 2528 } 2529 2530 ret = cdns3_prepare_aligned_request_buf(priv_req); 2531 if (ret < 0) 2532 return ret; 2533 2534 ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request, 2535 usb_endpoint_dir_in(ep->desc)); 2536 if (ret) 2537 return ret; 2538 2539 list_add_tail(&request->list, &priv_ep->deferred_req_list); 2540 2541 /* 2542 * For stream capable endpoint if prime irq flag is set then only start 2543 * request. 2544 * If hardware endpoint configuration has not been set yet then 2545 * just queue request in deferred list. Transfer will be started in 2546 * cdns3_set_hw_configuration. 2547 */ 2548 if (!request->stream_id) { 2549 if (priv_dev->hw_configured_flag && 2550 !(priv_ep->flags & EP_STALLED) && 2551 !(priv_ep->flags & EP_STALL_PENDING)) 2552 cdns3_start_all_request(priv_dev, priv_ep); 2553 } else { 2554 if (priv_dev->hw_configured_flag && priv_ep->prime_flag) 2555 cdns3_start_all_request(priv_dev, priv_ep); 2556 } 2557 2558 return 0; 2559 } 2560 2561 static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 2562 gfp_t gfp_flags) 2563 { 2564 struct usb_request *zlp_request; 2565 struct cdns3_endpoint *priv_ep; 2566 struct cdns3_device *priv_dev; 2567 unsigned long flags; 2568 int ret; 2569 2570 if (!request || !ep) 2571 return -EINVAL; 2572 2573 priv_ep = ep_to_cdns3_ep(ep); 2574 priv_dev = priv_ep->cdns3_dev; 2575 2576 spin_lock_irqsave(&priv_dev->lock, flags); 2577 2578 ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags); 2579 2580 if (ret == 0 && request->zero && request->length && 2581 (request->length % ep->maxpacket == 0)) { 2582 struct cdns3_request *priv_req; 2583 2584 zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 2585 zlp_request->buf = priv_dev->zlp_buf; 2586 zlp_request->length = 0; 2587 2588 priv_req = to_cdns3_request(zlp_request); 2589 priv_req->flags |= REQUEST_ZLP; 2590 2591 dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n", 2592 priv_ep->name); 2593 ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags); 2594 } 2595 2596 spin_unlock_irqrestore(&priv_dev->lock, flags); 2597 return ret; 2598 } 2599 2600 /** 2601 * cdns3_gadget_ep_dequeue - Remove request from transfer queue 2602 * @ep: endpoint object associated with request 2603 * @request: request object 2604 * 2605 * Returns 0 on success, error code elsewhere 2606 */ 2607 int cdns3_gadget_ep_dequeue(struct usb_ep *ep, 2608 struct usb_request *request) 2609 { 2610 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2611 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2612 struct usb_request *req, *req_temp; 2613 struct cdns3_request *priv_req; 2614 struct cdns3_trb *link_trb; 2615 u8 req_on_hw_ring = 0; 2616 unsigned long flags; 2617 int ret = 0; 2618 2619 if (!ep || !request || !ep->desc) 2620 return -EINVAL; 2621 2622 spin_lock_irqsave(&priv_dev->lock, flags); 2623 2624 priv_req = to_cdns3_request(request); 2625 2626 trace_cdns3_ep_dequeue(priv_req); 2627 2628 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2629 2630 list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list, 2631 list) { 2632 if (request == req) { 2633 req_on_hw_ring = 1; 2634 goto found; 2635 } 2636 } 2637 2638 list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list, 2639 list) { 2640 if (request == req) 2641 goto found; 2642 } 2643 2644 goto not_found; 2645 2646 found: 2647 link_trb = priv_req->trb; 2648 2649 /* Update ring only if removed request is on pending_req_list list */ 2650 if (req_on_hw_ring && link_trb) { 2651 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma + 2652 ((priv_req->end_trb + 1) * TRB_SIZE))); 2653 link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | 2654 TRB_TYPE(TRB_LINK) | TRB_CHAIN); 2655 2656 if (priv_ep->wa1_trb == priv_req->trb) 2657 cdns3_wa1_restore_cycle_bit(priv_ep); 2658 } 2659 2660 cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); 2661 2662 not_found: 2663 spin_unlock_irqrestore(&priv_dev->lock, flags); 2664 return ret; 2665 } 2666 2667 /** 2668 * __cdns3_gadget_ep_set_halt - Sets stall on selected endpoint 2669 * Should be called after acquiring spin_lock and selecting ep 2670 * @priv_ep: endpoint object to set stall on. 2671 */ 2672 void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep) 2673 { 2674 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2675 2676 trace_cdns3_halt(priv_ep, 1, 0); 2677 2678 if (!(priv_ep->flags & EP_STALLED)) { 2679 u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 2680 2681 if (!(ep_sts_reg & EP_STS_DBUSY)) 2682 cdns3_ep_stall_flush(priv_ep); 2683 else 2684 priv_ep->flags |= EP_STALL_PENDING; 2685 } 2686 } 2687 2688 /** 2689 * __cdns3_gadget_ep_clear_halt - Clears stall on selected endpoint 2690 * Should be called after acquiring spin_lock and selecting ep 2691 * @priv_ep: endpoint object to clear stall on 2692 */ 2693 int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) 2694 { 2695 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2696 struct usb_request *request; 2697 struct cdns3_request *priv_req; 2698 struct cdns3_trb *trb = NULL; 2699 int ret; 2700 int val; 2701 2702 trace_cdns3_halt(priv_ep, 0, 0); 2703 2704 request = cdns3_next_request(&priv_ep->pending_req_list); 2705 if (request) { 2706 priv_req = to_cdns3_request(request); 2707 trb = priv_req->trb; 2708 if (trb) 2709 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2710 } 2711 2712 writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2713 2714 /* wait for EPRST cleared */ 2715 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2716 !(val & EP_CMD_EPRST), 1, 100); 2717 if (ret) 2718 return -EINVAL; 2719 2720 priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); 2721 2722 if (request) { 2723 if (trb) 2724 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2725 2726 cdns3_rearm_transfer(priv_ep, 1); 2727 } 2728 2729 cdns3_start_all_request(priv_dev, priv_ep); 2730 return ret; 2731 } 2732 2733 /** 2734 * cdns3_gadget_ep_set_halt - Sets/clears stall on selected endpoint 2735 * @ep: endpoint object to set/clear stall on 2736 * @value: 1 for set stall, 0 for clear stall 2737 * 2738 * Returns 0 on success, error code elsewhere 2739 */ 2740 int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value) 2741 { 2742 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2743 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2744 unsigned long flags; 2745 int ret = 0; 2746 2747 if (!(priv_ep->flags & EP_ENABLED)) 2748 return -EPERM; 2749 2750 spin_lock_irqsave(&priv_dev->lock, flags); 2751 2752 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2753 2754 if (!value) { 2755 priv_ep->flags &= ~EP_WEDGE; 2756 ret = __cdns3_gadget_ep_clear_halt(priv_ep); 2757 } else { 2758 __cdns3_gadget_ep_set_halt(priv_ep); 2759 } 2760 2761 spin_unlock_irqrestore(&priv_dev->lock, flags); 2762 2763 return ret; 2764 } 2765 2766 extern const struct usb_ep_ops cdns3_gadget_ep0_ops; 2767 2768 static const struct usb_ep_ops cdns3_gadget_ep_ops = { 2769 .enable = cdns3_gadget_ep_enable, 2770 .disable = cdns3_gadget_ep_disable, 2771 .alloc_request = cdns3_gadget_ep_alloc_request, 2772 .free_request = cdns3_gadget_ep_free_request, 2773 .queue = cdns3_gadget_ep_queue, 2774 .dequeue = cdns3_gadget_ep_dequeue, 2775 .set_halt = cdns3_gadget_ep_set_halt, 2776 .set_wedge = cdns3_gadget_ep_set_wedge, 2777 }; 2778 2779 /** 2780 * cdns3_gadget_get_frame - Returns number of actual ITP frame 2781 * @gadget: gadget object 2782 * 2783 * Returns number of actual ITP frame 2784 */ 2785 static int cdns3_gadget_get_frame(struct usb_gadget *gadget) 2786 { 2787 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2788 2789 return readl(&priv_dev->regs->usb_itpn); 2790 } 2791 2792 int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev) 2793 { 2794 enum usb_device_speed speed; 2795 2796 speed = cdns3_get_speed(priv_dev); 2797 2798 if (speed >= USB_SPEED_SUPER) 2799 return 0; 2800 2801 /* Start driving resume signaling to indicate remote wakeup. */ 2802 writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf); 2803 2804 return 0; 2805 } 2806 2807 static int cdns3_gadget_wakeup(struct usb_gadget *gadget) 2808 { 2809 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2810 unsigned long flags; 2811 int ret = 0; 2812 2813 spin_lock_irqsave(&priv_dev->lock, flags); 2814 ret = __cdns3_gadget_wakeup(priv_dev); 2815 spin_unlock_irqrestore(&priv_dev->lock, flags); 2816 return ret; 2817 } 2818 2819 static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget, 2820 int is_selfpowered) 2821 { 2822 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2823 unsigned long flags; 2824 2825 spin_lock_irqsave(&priv_dev->lock, flags); 2826 priv_dev->is_selfpowered = !!is_selfpowered; 2827 spin_unlock_irqrestore(&priv_dev->lock, flags); 2828 return 0; 2829 } 2830 2831 static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on) 2832 { 2833 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2834 2835 if (is_on) { 2836 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 2837 } else { 2838 writel(~0, &priv_dev->regs->ep_ists); 2839 writel(~0, &priv_dev->regs->usb_ists); 2840 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2841 } 2842 2843 return 0; 2844 } 2845 2846 static void cdns3_gadget_config(struct cdns3_device *priv_dev) 2847 { 2848 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 2849 u32 reg; 2850 2851 cdns3_ep0_config(priv_dev); 2852 2853 /* enable interrupts for endpoint 0 (in and out) */ 2854 writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, ®s->ep_ien); 2855 2856 /* 2857 * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1 2858 * revision of controller. 2859 */ 2860 if (priv_dev->dev_ver == DEV_VER_TI_V1) { 2861 reg = readl(®s->dbg_link1); 2862 2863 reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK; 2864 reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) | 2865 DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET; 2866 writel(reg, ®s->dbg_link1); 2867 } 2868 2869 /* 2870 * By default some platforms has set protected access to memory. 2871 * This cause problem with cache, so driver restore non-secure 2872 * access to memory. 2873 */ 2874 reg = readl(®s->dma_axi_ctrl); 2875 reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) | 2876 DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE); 2877 writel(reg, ®s->dma_axi_ctrl); 2878 2879 /* enable generic interrupt*/ 2880 writel(USB_IEN_INIT, ®s->usb_ien); 2881 writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); 2882 /* keep Fast Access bit */ 2883 writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr); 2884 2885 cdns3_configure_dmult(priv_dev, NULL); 2886 } 2887 2888 /** 2889 * cdns3_gadget_udc_start - Gadget start 2890 * @gadget: gadget object 2891 * @driver: driver which operates on this gadget 2892 * 2893 * Returns 0 on success, error code elsewhere 2894 */ 2895 static int cdns3_gadget_udc_start(struct usb_gadget *gadget, 2896 struct usb_gadget_driver *driver) 2897 { 2898 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2899 unsigned long flags; 2900 enum usb_device_speed max_speed = driver->max_speed; 2901 2902 spin_lock_irqsave(&priv_dev->lock, flags); 2903 priv_dev->gadget_driver = driver; 2904 2905 /* limit speed if necessary */ 2906 max_speed = min(driver->max_speed, gadget->max_speed); 2907 2908 switch (max_speed) { 2909 case USB_SPEED_FULL: 2910 writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); 2911 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2912 break; 2913 case USB_SPEED_HIGH: 2914 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2915 break; 2916 case USB_SPEED_SUPER: 2917 break; 2918 default: 2919 dev_err(priv_dev->dev, 2920 "invalid maximum_speed parameter %d\n", 2921 max_speed); 2922 fallthrough; 2923 case USB_SPEED_UNKNOWN: 2924 /* default to superspeed */ 2925 max_speed = USB_SPEED_SUPER; 2926 break; 2927 } 2928 2929 cdns3_gadget_config(priv_dev); 2930 spin_unlock_irqrestore(&priv_dev->lock, flags); 2931 return 0; 2932 } 2933 2934 /** 2935 * cdns3_gadget_udc_stop - Stops gadget 2936 * @gadget: gadget object 2937 * 2938 * Returns 0 2939 */ 2940 static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) 2941 { 2942 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2943 struct cdns3_endpoint *priv_ep; 2944 u32 bEndpointAddress; 2945 struct usb_ep *ep; 2946 int val; 2947 2948 priv_dev->gadget_driver = NULL; 2949 2950 priv_dev->onchip_used_size = 0; 2951 priv_dev->out_mem_is_allocated = 0; 2952 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 2953 2954 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2955 priv_ep = ep_to_cdns3_ep(ep); 2956 bEndpointAddress = priv_ep->num | priv_ep->dir; 2957 cdns3_select_ep(priv_dev, bEndpointAddress); 2958 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2959 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2960 !(val & EP_CMD_EPRST), 1, 100); 2961 2962 priv_ep->flags &= ~EP_CLAIMED; 2963 } 2964 2965 /* disable interrupt for device */ 2966 writel(0, &priv_dev->regs->usb_ien); 2967 writel(0, &priv_dev->regs->usb_pwr); 2968 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2969 2970 return 0; 2971 } 2972 2973 static const struct usb_gadget_ops cdns3_gadget_ops = { 2974 .get_frame = cdns3_gadget_get_frame, 2975 .wakeup = cdns3_gadget_wakeup, 2976 .set_selfpowered = cdns3_gadget_set_selfpowered, 2977 .pullup = cdns3_gadget_pullup, 2978 .udc_start = cdns3_gadget_udc_start, 2979 .udc_stop = cdns3_gadget_udc_stop, 2980 .match_ep = cdns3_gadget_match_ep, 2981 }; 2982 2983 static void cdns3_free_all_eps(struct cdns3_device *priv_dev) 2984 { 2985 int i; 2986 2987 /* ep0 OUT point to ep0 IN. */ 2988 priv_dev->eps[16] = NULL; 2989 2990 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 2991 if (priv_dev->eps[i]) { 2992 cdns3_free_trb_pool(priv_dev->eps[i]); 2993 devm_kfree(priv_dev->dev, priv_dev->eps[i]); 2994 } 2995 } 2996 2997 /** 2998 * cdns3_init_eps - Initializes software endpoints of gadget 2999 * @priv_dev: extended gadget object 3000 * 3001 * Returns 0 on success, error code elsewhere 3002 */ 3003 static int cdns3_init_eps(struct cdns3_device *priv_dev) 3004 { 3005 u32 ep_enabled_reg, iso_ep_reg; 3006 struct cdns3_endpoint *priv_ep; 3007 int ep_dir, ep_number; 3008 u32 ep_mask; 3009 int ret = 0; 3010 int i; 3011 3012 /* Read it from USB_CAP3 to USB_CAP5 */ 3013 ep_enabled_reg = readl(&priv_dev->regs->usb_cap3); 3014 iso_ep_reg = readl(&priv_dev->regs->usb_cap4); 3015 3016 dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n"); 3017 3018 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { 3019 ep_dir = i >> 4; /* i div 16 */ 3020 ep_number = i & 0xF; /* i % 16 */ 3021 ep_mask = BIT(i); 3022 3023 if (!(ep_enabled_reg & ep_mask)) 3024 continue; 3025 3026 if (ep_dir && !ep_number) { 3027 priv_dev->eps[i] = priv_dev->eps[0]; 3028 continue; 3029 } 3030 3031 priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep), 3032 GFP_KERNEL); 3033 if (!priv_ep) 3034 goto err; 3035 3036 /* set parent of endpoint object */ 3037 priv_ep->cdns3_dev = priv_dev; 3038 priv_dev->eps[i] = priv_ep; 3039 priv_ep->num = ep_number; 3040 priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT; 3041 3042 if (!ep_number) { 3043 ret = cdns3_init_ep0(priv_dev, priv_ep); 3044 if (ret) { 3045 dev_err(priv_dev->dev, "Failed to init ep0\n"); 3046 goto err; 3047 } 3048 } else { 3049 snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s", 3050 ep_number, !!ep_dir ? "in" : "out"); 3051 priv_ep->endpoint.name = priv_ep->name; 3052 3053 usb_ep_set_maxpacket_limit(&priv_ep->endpoint, 3054 CDNS3_EP_MAX_PACKET_LIMIT); 3055 priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS; 3056 priv_ep->endpoint.ops = &cdns3_gadget_ep_ops; 3057 if (ep_dir) 3058 priv_ep->endpoint.caps.dir_in = 1; 3059 else 3060 priv_ep->endpoint.caps.dir_out = 1; 3061 3062 if (iso_ep_reg & ep_mask) 3063 priv_ep->endpoint.caps.type_iso = 1; 3064 3065 priv_ep->endpoint.caps.type_bulk = 1; 3066 priv_ep->endpoint.caps.type_int = 1; 3067 3068 list_add_tail(&priv_ep->endpoint.ep_list, 3069 &priv_dev->gadget.ep_list); 3070 } 3071 3072 priv_ep->flags = 0; 3073 3074 dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n", 3075 priv_ep->name, 3076 priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "", 3077 priv_ep->endpoint.caps.type_iso ? "ISO" : ""); 3078 3079 INIT_LIST_HEAD(&priv_ep->pending_req_list); 3080 INIT_LIST_HEAD(&priv_ep->deferred_req_list); 3081 INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list); 3082 } 3083 3084 return 0; 3085 err: 3086 cdns3_free_all_eps(priv_dev); 3087 return -ENOMEM; 3088 } 3089 3090 static void cdns3_gadget_release(struct device *dev) 3091 { 3092 struct cdns3_device *priv_dev = container_of(dev, 3093 struct cdns3_device, gadget.dev); 3094 3095 kfree(priv_dev); 3096 } 3097 3098 static void cdns3_gadget_exit(struct cdns *cdns) 3099 { 3100 struct cdns3_device *priv_dev; 3101 3102 priv_dev = cdns->gadget_dev; 3103 3104 3105 pm_runtime_mark_last_busy(cdns->dev); 3106 pm_runtime_put_autosuspend(cdns->dev); 3107 3108 usb_del_gadget(&priv_dev->gadget); 3109 devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); 3110 3111 cdns3_free_all_eps(priv_dev); 3112 3113 while (!list_empty(&priv_dev->aligned_buf_list)) { 3114 struct cdns3_aligned_buf *buf; 3115 3116 buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list); 3117 dma_free_noncoherent(priv_dev->sysdev, buf->size, 3118 buf->buf, 3119 buf->dma, 3120 buf->dir); 3121 3122 list_del(&buf->list); 3123 kfree(buf); 3124 } 3125 3126 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3127 priv_dev->setup_dma); 3128 dma_pool_destroy(priv_dev->eps_dma_pool); 3129 3130 kfree(priv_dev->zlp_buf); 3131 usb_put_gadget(&priv_dev->gadget); 3132 cdns->gadget_dev = NULL; 3133 cdns_drd_gadget_off(cdns); 3134 } 3135 3136 static int cdns3_gadget_start(struct cdns *cdns) 3137 { 3138 struct cdns3_device *priv_dev; 3139 u32 max_speed; 3140 int ret; 3141 3142 priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL); 3143 if (!priv_dev) 3144 return -ENOMEM; 3145 3146 usb_initialize_gadget(cdns->dev, &priv_dev->gadget, 3147 cdns3_gadget_release); 3148 cdns->gadget_dev = priv_dev; 3149 priv_dev->sysdev = cdns->dev; 3150 priv_dev->dev = cdns->dev; 3151 priv_dev->regs = cdns->dev_regs; 3152 3153 device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size", 3154 &priv_dev->onchip_buffers); 3155 3156 if (priv_dev->onchip_buffers <= 0) { 3157 u32 reg = readl(&priv_dev->regs->usb_cap2); 3158 3159 priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg); 3160 } 3161 3162 if (!priv_dev->onchip_buffers) 3163 priv_dev->onchip_buffers = 256; 3164 3165 max_speed = usb_get_maximum_speed(cdns->dev); 3166 3167 /* Check the maximum_speed parameter */ 3168 switch (max_speed) { 3169 case USB_SPEED_FULL: 3170 case USB_SPEED_HIGH: 3171 case USB_SPEED_SUPER: 3172 break; 3173 default: 3174 dev_err(cdns->dev, "invalid maximum_speed parameter %d\n", 3175 max_speed); 3176 fallthrough; 3177 case USB_SPEED_UNKNOWN: 3178 /* default to superspeed */ 3179 max_speed = USB_SPEED_SUPER; 3180 break; 3181 } 3182 3183 /* fill gadget fields */ 3184 priv_dev->gadget.max_speed = max_speed; 3185 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3186 priv_dev->gadget.ops = &cdns3_gadget_ops; 3187 priv_dev->gadget.name = "usb-ss-gadget"; 3188 priv_dev->gadget.quirk_avoids_skb_reserve = 1; 3189 priv_dev->gadget.irq = cdns->dev_irq; 3190 3191 spin_lock_init(&priv_dev->lock); 3192 INIT_WORK(&priv_dev->pending_status_wq, 3193 cdns3_pending_setup_status_handler); 3194 3195 INIT_WORK(&priv_dev->aligned_buf_wq, 3196 cdns3_free_aligned_request_buf); 3197 3198 /* initialize endpoint container */ 3199 INIT_LIST_HEAD(&priv_dev->gadget.ep_list); 3200 INIT_LIST_HEAD(&priv_dev->aligned_buf_list); 3201 priv_dev->eps_dma_pool = dma_pool_create("cdns3_eps_dma_pool", 3202 priv_dev->sysdev, 3203 TRB_RING_SIZE, 8, 0); 3204 if (!priv_dev->eps_dma_pool) { 3205 dev_err(priv_dev->dev, "Failed to create TRB dma pool\n"); 3206 ret = -ENOMEM; 3207 goto err1; 3208 } 3209 3210 ret = cdns3_init_eps(priv_dev); 3211 if (ret) { 3212 dev_err(priv_dev->dev, "Failed to create endpoints\n"); 3213 goto err1; 3214 } 3215 3216 /* allocate memory for setup packet buffer */ 3217 priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8, 3218 &priv_dev->setup_dma, GFP_DMA); 3219 if (!priv_dev->setup_buf) { 3220 ret = -ENOMEM; 3221 goto err2; 3222 } 3223 3224 priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); 3225 3226 dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", 3227 readl(&priv_dev->regs->usb_cap6)); 3228 dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n", 3229 readl(&priv_dev->regs->usb_cap1)); 3230 dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n", 3231 readl(&priv_dev->regs->usb_cap2)); 3232 3233 priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver); 3234 if (priv_dev->dev_ver >= DEV_VER_V2) 3235 priv_dev->gadget.sg_supported = 1; 3236 3237 priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL); 3238 if (!priv_dev->zlp_buf) { 3239 ret = -ENOMEM; 3240 goto err3; 3241 } 3242 3243 /* add USB gadget device */ 3244 ret = usb_add_gadget(&priv_dev->gadget); 3245 if (ret < 0) { 3246 dev_err(priv_dev->dev, "Failed to add gadget\n"); 3247 goto err4; 3248 } 3249 3250 return 0; 3251 err4: 3252 kfree(priv_dev->zlp_buf); 3253 err3: 3254 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3255 priv_dev->setup_dma); 3256 err2: 3257 cdns3_free_all_eps(priv_dev); 3258 err1: 3259 dma_pool_destroy(priv_dev->eps_dma_pool); 3260 3261 usb_put_gadget(&priv_dev->gadget); 3262 cdns->gadget_dev = NULL; 3263 return ret; 3264 } 3265 3266 static int __cdns3_gadget_init(struct cdns *cdns) 3267 { 3268 int ret = 0; 3269 3270 /* Ensure 32-bit DMA Mask in case we switched back from Host mode */ 3271 ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32)); 3272 if (ret) { 3273 dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret); 3274 return ret; 3275 } 3276 3277 cdns_drd_gadget_on(cdns); 3278 pm_runtime_get_sync(cdns->dev); 3279 3280 ret = cdns3_gadget_start(cdns); 3281 if (ret) { 3282 pm_runtime_put_sync(cdns->dev); 3283 return ret; 3284 } 3285 3286 /* 3287 * Because interrupt line can be shared with other components in 3288 * driver it can't use IRQF_ONESHOT flag here. 3289 */ 3290 ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, 3291 cdns3_device_irq_handler, 3292 cdns3_device_thread_irq_handler, 3293 IRQF_SHARED, dev_name(cdns->dev), 3294 cdns->gadget_dev); 3295 3296 if (ret) 3297 goto err0; 3298 3299 return 0; 3300 err0: 3301 cdns3_gadget_exit(cdns); 3302 return ret; 3303 } 3304 3305 static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup) 3306 __must_hold(&cdns->lock) 3307 { 3308 struct cdns3_device *priv_dev = cdns->gadget_dev; 3309 3310 spin_unlock(&cdns->lock); 3311 cdns3_disconnect_gadget(priv_dev); 3312 spin_lock(&cdns->lock); 3313 3314 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3315 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 3316 cdns3_hw_reset_eps_config(priv_dev); 3317 3318 /* disable interrupt for device */ 3319 writel(0, &priv_dev->regs->usb_ien); 3320 3321 return 0; 3322 } 3323 3324 static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated) 3325 { 3326 struct cdns3_device *priv_dev = cdns->gadget_dev; 3327 3328 if (!priv_dev->gadget_driver) 3329 return 0; 3330 3331 cdns3_gadget_config(priv_dev); 3332 if (hibernated) 3333 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 3334 3335 return 0; 3336 } 3337 3338 /** 3339 * cdns3_gadget_init - initialize device structure 3340 * 3341 * @cdns: cdns instance 3342 * 3343 * This function initializes the gadget. 3344 */ 3345 int cdns3_gadget_init(struct cdns *cdns) 3346 { 3347 struct cdns_role_driver *rdrv; 3348 3349 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 3350 if (!rdrv) 3351 return -ENOMEM; 3352 3353 rdrv->start = __cdns3_gadget_init; 3354 rdrv->stop = cdns3_gadget_exit; 3355 rdrv->suspend = cdns3_gadget_suspend; 3356 rdrv->resume = cdns3_gadget_resume; 3357 rdrv->state = CDNS_ROLE_STATE_INACTIVE; 3358 rdrv->name = "gadget"; 3359 cdns->roles[USB_ROLE_DEVICE] = rdrv; 3360 3361 return 0; 3362 } 3363