1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cadence USBSS DRD Driver - gadget side. 4 * 5 * Copyright (C) 2018-2019 Cadence Design Systems. 6 * Copyright (C) 2017-2018 NXP 7 * 8 * Authors: Pawel Jez <pjez@cadence.com>, 9 * Pawel Laszczak <pawell@cadence.com> 10 * Peter Chen <peter.chen@nxp.com> 11 */ 12 13 /* 14 * Work around 1: 15 * At some situations, the controller may get stale data address in TRB 16 * at below sequences: 17 * 1. Controller read TRB includes data address 18 * 2. Software updates TRBs includes data address and Cycle bit 19 * 3. Controller read TRB which includes Cycle bit 20 * 4. DMA run with stale data address 21 * 22 * To fix this problem, driver needs to make the first TRB in TD as invalid. 23 * After preparing all TRBs driver needs to check the position of DMA and 24 * if the DMA point to the first just added TRB and doorbell is 1, 25 * then driver must defer making this TRB as valid. This TRB will be make 26 * as valid during adding next TRB only if DMA is stopped or at TRBERR 27 * interrupt. 28 * 29 * Issue has been fixed in DEV_VER_V3 version of controller. 30 * 31 * Work around 2: 32 * Controller for OUT endpoints has shared on-chip buffers for all incoming 33 * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA 34 * in correct order. If the first packet in the buffer will not be handled, 35 * then the following packets directed for other endpoints and functions 36 * will be blocked. 37 * Additionally the packets directed to one endpoint can block entire on-chip 38 * buffers. In this case transfer to other endpoints also will blocked. 39 * 40 * To resolve this issue after raising the descriptor missing interrupt 41 * driver prepares internal usb_request object and use it to arm DMA transfer. 42 * 43 * The problematic situation was observed in case when endpoint has been enabled 44 * but no usb_request were queued. Driver try detects such endpoints and will 45 * use this workaround only for these endpoint. 46 * 47 * Driver use limited number of buffer. This number can be set by macro 48 * CDNS3_WA2_NUM_BUFFERS. 49 * 50 * Such blocking situation was observed on ACM gadget. For this function 51 * host send OUT data packet but ACM function is not prepared for this packet. 52 * It's cause that buffer placed in on chip memory block transfer to other 53 * endpoints. 54 * 55 * Issue has been fixed in DEV_VER_V2 version of controller. 56 * 57 */ 58 59 #include <linux/dma-mapping.h> 60 #include <linux/usb/gadget.h> 61 #include <linux/module.h> 62 #include <linux/iopoll.h> 63 64 #include "core.h" 65 #include "gadget-export.h" 66 #include "cdns3-gadget.h" 67 #include "cdns3-trace.h" 68 #include "drd.h" 69 70 static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 71 struct usb_request *request, 72 gfp_t gfp_flags); 73 74 static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 75 struct usb_request *request); 76 77 static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 78 struct usb_request *request); 79 80 /** 81 * cdns3_clear_register_bit - clear bit in given register. 82 * @ptr: address of device controller register to be read and changed 83 * @mask: bits requested to clar 84 */ 85 static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask) 86 { 87 mask = readl(ptr) & ~mask; 88 writel(mask, ptr); 89 } 90 91 /** 92 * cdns3_set_register_bit - set bit in given register. 93 * @ptr: address of device controller register to be read and changed 94 * @mask: bits requested to set 95 */ 96 void cdns3_set_register_bit(void __iomem *ptr, u32 mask) 97 { 98 mask = readl(ptr) | mask; 99 writel(mask, ptr); 100 } 101 102 /** 103 * cdns3_ep_addr_to_index - Macro converts endpoint address to 104 * index of endpoint object in cdns3_device.eps[] container 105 * @ep_addr: endpoint address for which endpoint object is required 106 * 107 */ 108 u8 cdns3_ep_addr_to_index(u8 ep_addr) 109 { 110 return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0)); 111 } 112 113 static int cdns3_get_dma_pos(struct cdns3_device *priv_dev, 114 struct cdns3_endpoint *priv_ep) 115 { 116 int dma_index; 117 118 dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma; 119 120 return dma_index / TRB_SIZE; 121 } 122 123 /** 124 * cdns3_next_request - returns next request from list 125 * @list: list containing requests 126 * 127 * Returns request or NULL if no requests in list 128 */ 129 struct usb_request *cdns3_next_request(struct list_head *list) 130 { 131 return list_first_entry_or_null(list, struct usb_request, list); 132 } 133 134 /** 135 * cdns3_next_align_buf - returns next buffer from list 136 * @list: list containing buffers 137 * 138 * Returns buffer or NULL if no buffers in list 139 */ 140 static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) 141 { 142 return list_first_entry_or_null(list, struct cdns3_aligned_buf, list); 143 } 144 145 /** 146 * cdns3_next_priv_request - returns next request from list 147 * @list: list containing requests 148 * 149 * Returns request or NULL if no requests in list 150 */ 151 static struct cdns3_request *cdns3_next_priv_request(struct list_head *list) 152 { 153 return list_first_entry_or_null(list, struct cdns3_request, list); 154 } 155 156 /** 157 * select_ep - selects endpoint 158 * @priv_dev: extended gadget object 159 * @ep: endpoint address 160 */ 161 void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep) 162 { 163 if (priv_dev->selected_ep == ep) 164 return; 165 166 priv_dev->selected_ep = ep; 167 writel(ep, &priv_dev->regs->ep_sel); 168 } 169 170 /** 171 * cdns3_get_tdl - gets current tdl for selected endpoint. 172 * @priv_dev: extended gadget object 173 * 174 * Before calling this function the appropriate endpoint must 175 * be selected by means of cdns3_select_ep function. 176 */ 177 static int cdns3_get_tdl(struct cdns3_device *priv_dev) 178 { 179 if (priv_dev->dev_ver < DEV_VER_V3) 180 return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 181 else 182 return readl(&priv_dev->regs->ep_tdl); 183 } 184 185 dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, 186 struct cdns3_trb *trb) 187 { 188 u32 offset = (char *)trb - (char *)priv_ep->trb_pool; 189 190 return priv_ep->trb_pool_dma + offset; 191 } 192 193 static int cdns3_ring_size(struct cdns3_endpoint *priv_ep) 194 { 195 switch (priv_ep->type) { 196 case USB_ENDPOINT_XFER_ISOC: 197 return TRB_ISO_RING_SIZE; 198 case USB_ENDPOINT_XFER_CONTROL: 199 return TRB_CTRL_RING_SIZE; 200 default: 201 if (priv_ep->use_streams) 202 return TRB_STREAM_RING_SIZE; 203 else 204 return TRB_RING_SIZE; 205 } 206 } 207 208 static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) 209 { 210 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 211 212 if (priv_ep->trb_pool) { 213 dma_free_coherent(priv_dev->sysdev, 214 cdns3_ring_size(priv_ep), 215 priv_ep->trb_pool, priv_ep->trb_pool_dma); 216 priv_ep->trb_pool = NULL; 217 } 218 } 219 220 /** 221 * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint 222 * @priv_ep: endpoint object 223 * 224 * Function will return 0 on success or -ENOMEM on allocation error 225 */ 226 int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) 227 { 228 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 229 int ring_size = cdns3_ring_size(priv_ep); 230 int num_trbs = ring_size / TRB_SIZE; 231 struct cdns3_trb *link_trb; 232 233 if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size) 234 cdns3_free_trb_pool(priv_ep); 235 236 if (!priv_ep->trb_pool) { 237 priv_ep->trb_pool = dma_alloc_coherent(priv_dev->sysdev, 238 ring_size, 239 &priv_ep->trb_pool_dma, 240 GFP_DMA32 | GFP_ATOMIC); 241 if (!priv_ep->trb_pool) 242 return -ENOMEM; 243 244 priv_ep->alloc_ring_size = ring_size; 245 } 246 247 memset(priv_ep->trb_pool, 0, ring_size); 248 249 priv_ep->num_trbs = num_trbs; 250 251 if (!priv_ep->num) 252 return 0; 253 254 /* Initialize the last TRB as Link TRB */ 255 link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1)); 256 257 if (priv_ep->use_streams) { 258 /* 259 * For stream capable endpoints driver use single correct TRB. 260 * The last trb has zeroed cycle bit 261 */ 262 link_trb->control = 0; 263 } else { 264 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma)); 265 link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE); 266 } 267 return 0; 268 } 269 270 /** 271 * cdns3_ep_stall_flush - Stalls and flushes selected endpoint 272 * @priv_ep: endpoint object 273 * 274 * Endpoint must be selected before call to this function 275 */ 276 static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep) 277 { 278 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 279 int val; 280 281 trace_cdns3_halt(priv_ep, 1, 1); 282 283 writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL, 284 &priv_dev->regs->ep_cmd); 285 286 /* wait for DFLUSH cleared */ 287 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 288 !(val & EP_CMD_DFLUSH), 1, 1000); 289 priv_ep->flags |= EP_STALLED; 290 priv_ep->flags &= ~EP_STALL_PENDING; 291 } 292 293 /** 294 * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller. 295 * @priv_dev: extended gadget object 296 */ 297 void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev) 298 { 299 int i; 300 301 writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf); 302 303 cdns3_allow_enable_l1(priv_dev, 0); 304 priv_dev->hw_configured_flag = 0; 305 priv_dev->onchip_used_size = 0; 306 priv_dev->out_mem_is_allocated = 0; 307 priv_dev->wait_for_setup = 0; 308 priv_dev->using_streams = 0; 309 310 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 311 if (priv_dev->eps[i]) 312 priv_dev->eps[i]->flags &= ~EP_CONFIGURED; 313 } 314 315 /** 316 * cdns3_ep_inc_trb - increment a trb index. 317 * @index: Pointer to the TRB index to increment. 318 * @cs: Cycle state 319 * @trb_in_seg: number of TRBs in segment 320 * 321 * The index should never point to the link TRB. After incrementing, 322 * if it is point to the link TRB, wrap around to the beginning and revert 323 * cycle state bit The 324 * link TRB is always at the last TRB entry. 325 */ 326 static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg) 327 { 328 (*index)++; 329 if (*index == (trb_in_seg - 1)) { 330 *index = 0; 331 *cs ^= 1; 332 } 333 } 334 335 /** 336 * cdns3_ep_inc_enq - increment endpoint's enqueue pointer 337 * @priv_ep: The endpoint whose enqueue pointer we're incrementing 338 */ 339 static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep) 340 { 341 priv_ep->free_trbs--; 342 cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs); 343 } 344 345 /** 346 * cdns3_ep_inc_deq - increment endpoint's dequeue pointer 347 * @priv_ep: The endpoint whose dequeue pointer we're incrementing 348 */ 349 static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) 350 { 351 priv_ep->free_trbs++; 352 cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); 353 } 354 355 static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req) 356 { 357 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 358 int current_trb = priv_req->start_trb; 359 360 while (current_trb != priv_req->end_trb) { 361 cdns3_ep_inc_deq(priv_ep); 362 current_trb = priv_ep->dequeue; 363 } 364 365 cdns3_ep_inc_deq(priv_ep); 366 } 367 368 /** 369 * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. 370 * @priv_dev: Extended gadget object 371 * @enable: Enable/disable permit to transition to L1. 372 * 373 * If bit USB_CONF_L1EN is set and device receive Extended Token packet, 374 * then controller answer with ACK handshake. 375 * If bit USB_CONF_L1DS is set and device receive Extended Token packet, 376 * then controller answer with NYET handshake. 377 */ 378 void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable) 379 { 380 if (enable) 381 writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf); 382 else 383 writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf); 384 } 385 386 enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev) 387 { 388 u32 reg; 389 390 reg = readl(&priv_dev->regs->usb_sts); 391 392 if (DEV_SUPERSPEED(reg)) 393 return USB_SPEED_SUPER; 394 else if (DEV_HIGHSPEED(reg)) 395 return USB_SPEED_HIGH; 396 else if (DEV_FULLSPEED(reg)) 397 return USB_SPEED_FULL; 398 else if (DEV_LOWSPEED(reg)) 399 return USB_SPEED_LOW; 400 return USB_SPEED_UNKNOWN; 401 } 402 403 /** 404 * cdns3_start_all_request - add to ring all request not started 405 * @priv_dev: Extended gadget object 406 * @priv_ep: The endpoint for whom request will be started. 407 * 408 * Returns return ENOMEM if transfer ring i not enough TRBs to start 409 * all requests. 410 */ 411 static int cdns3_start_all_request(struct cdns3_device *priv_dev, 412 struct cdns3_endpoint *priv_ep) 413 { 414 struct usb_request *request; 415 int ret = 0; 416 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 417 418 /* 419 * If the last pending transfer is INTERNAL 420 * OR streams are enabled for this endpoint 421 * do NOT start new transfer till the last one is pending 422 */ 423 if (!pending_empty) { 424 struct cdns3_request *priv_req; 425 426 request = cdns3_next_request(&priv_ep->pending_req_list); 427 priv_req = to_cdns3_request(request); 428 if ((priv_req->flags & REQUEST_INTERNAL) || 429 (priv_ep->flags & EP_TDLCHK_EN) || 430 priv_ep->use_streams) { 431 dev_dbg(priv_dev->dev, "Blocking external request\n"); 432 return ret; 433 } 434 } 435 436 while (!list_empty(&priv_ep->deferred_req_list)) { 437 request = cdns3_next_request(&priv_ep->deferred_req_list); 438 439 if (!priv_ep->use_streams) { 440 ret = cdns3_ep_run_transfer(priv_ep, request); 441 } else { 442 priv_ep->stream_sg_idx = 0; 443 ret = cdns3_ep_run_stream_transfer(priv_ep, request); 444 } 445 if (ret) 446 return ret; 447 448 list_del(&request->list); 449 list_add_tail(&request->list, 450 &priv_ep->pending_req_list); 451 if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN)) 452 break; 453 } 454 455 priv_ep->flags &= ~EP_RING_FULL; 456 return ret; 457 } 458 459 /* 460 * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set 461 * driver try to detect whether endpoint need additional internal 462 * buffer for unblocking on-chip FIFO buffer. This flag will be cleared 463 * if before first DESCMISS interrupt the DMA will be armed. 464 */ 465 #define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \ 466 if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \ 467 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \ 468 (reg) |= EP_STS_EN_DESCMISEN; \ 469 } } while (0) 470 471 static void __cdns3_descmiss_copy_data(struct usb_request *request, 472 struct usb_request *descmiss_req) 473 { 474 int length = request->actual + descmiss_req->actual; 475 struct scatterlist *s = request->sg; 476 477 if (!s) { 478 if (length <= request->length) { 479 memcpy(&((u8 *)request->buf)[request->actual], 480 descmiss_req->buf, 481 descmiss_req->actual); 482 request->actual = length; 483 } else { 484 /* It should never occures */ 485 request->status = -ENOMEM; 486 } 487 } else { 488 if (length <= sg_dma_len(s)) { 489 void *p = phys_to_virt(sg_dma_address(s)); 490 491 memcpy(&((u8 *)p)[request->actual], 492 descmiss_req->buf, 493 descmiss_req->actual); 494 request->actual = length; 495 } else { 496 request->status = -ENOMEM; 497 } 498 } 499 } 500 501 /** 502 * cdns3_wa2_descmiss_copy_data copy data from internal requests to 503 * request queued by class driver. 504 * @priv_ep: extended endpoint object 505 * @request: request object 506 */ 507 static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, 508 struct usb_request *request) 509 { 510 struct usb_request *descmiss_req; 511 struct cdns3_request *descmiss_priv_req; 512 513 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 514 int chunk_end; 515 516 descmiss_priv_req = 517 cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 518 descmiss_req = &descmiss_priv_req->request; 519 520 /* driver can't touch pending request */ 521 if (descmiss_priv_req->flags & REQUEST_PENDING) 522 break; 523 524 chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH; 525 request->status = descmiss_req->status; 526 __cdns3_descmiss_copy_data(request, descmiss_req); 527 list_del_init(&descmiss_priv_req->list); 528 kfree(descmiss_req->buf); 529 cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req); 530 --priv_ep->wa2_counter; 531 532 if (!chunk_end) 533 break; 534 } 535 } 536 537 static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, 538 struct cdns3_endpoint *priv_ep, 539 struct cdns3_request *priv_req) 540 { 541 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN && 542 priv_req->flags & REQUEST_INTERNAL) { 543 struct usb_request *req; 544 545 req = cdns3_next_request(&priv_ep->deferred_req_list); 546 547 priv_ep->descmis_req = NULL; 548 549 if (!req) 550 return NULL; 551 552 /* unmap the gadget request before copying data */ 553 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req, 554 priv_ep->dir); 555 556 cdns3_wa2_descmiss_copy_data(priv_ep, req); 557 if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) && 558 req->length != req->actual) { 559 /* wait for next part of transfer */ 560 /* re-map the gadget request buffer*/ 561 usb_gadget_map_request_by_dev(priv_dev->sysdev, req, 562 usb_endpoint_dir_in(priv_ep->endpoint.desc)); 563 return NULL; 564 } 565 566 if (req->status == -EINPROGRESS) 567 req->status = 0; 568 569 list_del_init(&req->list); 570 cdns3_start_all_request(priv_dev, priv_ep); 571 return req; 572 } 573 574 return &priv_req->request; 575 } 576 577 static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, 578 struct cdns3_endpoint *priv_ep, 579 struct cdns3_request *priv_req) 580 { 581 int deferred = 0; 582 583 /* 584 * If transfer was queued before DESCMISS appear than we 585 * can disable handling of DESCMISS interrupt. Driver assumes that it 586 * can disable special treatment for this endpoint. 587 */ 588 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 589 u32 reg; 590 591 cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir); 592 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 593 reg = readl(&priv_dev->regs->ep_sts_en); 594 reg &= ~EP_STS_EN_DESCMISEN; 595 trace_cdns3_wa2(priv_ep, "workaround disabled\n"); 596 writel(reg, &priv_dev->regs->ep_sts_en); 597 } 598 599 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 600 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 601 u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list); 602 603 /* 604 * DESCMISS transfer has been finished, so data will be 605 * directly copied from internal allocated usb_request 606 * objects. 607 */ 608 if (pending_empty && !descmiss_empty && 609 !(priv_req->flags & REQUEST_INTERNAL)) { 610 cdns3_wa2_descmiss_copy_data(priv_ep, 611 &priv_req->request); 612 613 trace_cdns3_wa2(priv_ep, "get internal stored data"); 614 615 list_add_tail(&priv_req->request.list, 616 &priv_ep->pending_req_list); 617 cdns3_gadget_giveback(priv_ep, priv_req, 618 priv_req->request.status); 619 620 /* 621 * Intentionally driver returns positive value as 622 * correct value. It informs that transfer has 623 * been finished. 624 */ 625 return EINPROGRESS; 626 } 627 628 /* 629 * Driver will wait for completion DESCMISS transfer, 630 * before starts new, not DESCMISS transfer. 631 */ 632 if (!pending_empty && !descmiss_empty) { 633 trace_cdns3_wa2(priv_ep, "wait for pending transfer\n"); 634 deferred = 1; 635 } 636 637 if (priv_req->flags & REQUEST_INTERNAL) 638 list_add_tail(&priv_req->list, 639 &priv_ep->wa2_descmiss_req_list); 640 } 641 642 return deferred; 643 } 644 645 static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep) 646 { 647 struct cdns3_request *priv_req; 648 649 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 650 u8 chain; 651 652 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 653 chain = !!(priv_req->flags & REQUEST_INTERNAL_CH); 654 655 trace_cdns3_wa2(priv_ep, "removes eldest request"); 656 657 kfree(priv_req->request.buf); 658 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 659 &priv_req->request); 660 list_del_init(&priv_req->list); 661 --priv_ep->wa2_counter; 662 663 if (!chain) 664 break; 665 } 666 } 667 668 /** 669 * cdns3_wa2_descmissing_packet - handles descriptor missing event. 670 * @priv_ep: extended gadget object 671 * 672 * This function is used only for WA2. For more information see Work around 2 673 * description. 674 */ 675 static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) 676 { 677 struct cdns3_request *priv_req; 678 struct usb_request *request; 679 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 680 681 /* check for pending transfer */ 682 if (!pending_empty) { 683 trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n"); 684 return; 685 } 686 687 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 688 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 689 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN; 690 } 691 692 trace_cdns3_wa2(priv_ep, "Description Missing detected\n"); 693 694 if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) { 695 trace_cdns3_wa2(priv_ep, "WA2 overflow\n"); 696 cdns3_wa2_remove_old_request(priv_ep); 697 } 698 699 request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint, 700 GFP_ATOMIC); 701 if (!request) 702 goto err; 703 704 priv_req = to_cdns3_request(request); 705 priv_req->flags |= REQUEST_INTERNAL; 706 707 /* if this field is still assigned it indicate that transfer related 708 * with this request has not been finished yet. Driver in this 709 * case simply allocate next request and assign flag REQUEST_INTERNAL_CH 710 * flag to previous one. It will indicate that current request is 711 * part of the previous one. 712 */ 713 if (priv_ep->descmis_req) 714 priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH; 715 716 priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE, 717 GFP_ATOMIC); 718 priv_ep->wa2_counter++; 719 720 if (!priv_req->request.buf) { 721 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 722 goto err; 723 } 724 725 priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE; 726 priv_ep->descmis_req = priv_req; 727 728 __cdns3_gadget_ep_queue(&priv_ep->endpoint, 729 &priv_ep->descmis_req->request, 730 GFP_ATOMIC); 731 732 return; 733 734 err: 735 dev_err(priv_ep->cdns3_dev->dev, 736 "Failed: No sufficient memory for DESCMIS\n"); 737 } 738 739 static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev) 740 { 741 u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 742 743 if (tdl) { 744 u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl; 745 746 writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL, 747 &priv_dev->regs->ep_cmd); 748 } 749 } 750 751 static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev) 752 { 753 u32 ep_sts_reg; 754 755 /* select EP0-out */ 756 cdns3_select_ep(priv_dev, 0); 757 758 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 759 760 if (EP_STS_OUTQ_VAL(ep_sts_reg)) { 761 u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg); 762 struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num]; 763 764 if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) && 765 outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) { 766 u8 pending_empty = list_empty(&outq_ep->pending_req_list); 767 768 if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) || 769 (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) || 770 !pending_empty) { 771 } else { 772 u32 ep_sts_en_reg; 773 u32 ep_cmd_reg; 774 775 cdns3_select_ep(priv_dev, outq_ep->num | 776 outq_ep->dir); 777 ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en); 778 ep_cmd_reg = readl(&priv_dev->regs->ep_cmd); 779 780 outq_ep->flags |= EP_TDLCHK_EN; 781 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 782 EP_CFG_TDL_CHK); 783 784 cdns3_wa2_enable_detection(priv_dev, outq_ep, 785 ep_sts_en_reg); 786 writel(ep_sts_en_reg, 787 &priv_dev->regs->ep_sts_en); 788 /* reset tdl value to zero */ 789 cdns3_wa2_reset_tdl(priv_dev); 790 /* 791 * Memory barrier - Reset tdl before ringing the 792 * doorbell. 793 */ 794 wmb(); 795 if (EP_CMD_DRDY & ep_cmd_reg) { 796 trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n"); 797 798 } else { 799 trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n"); 800 /* 801 * ring doorbell to generate DESCMIS irq 802 */ 803 writel(EP_CMD_DRDY, 804 &priv_dev->regs->ep_cmd); 805 } 806 } 807 } 808 } 809 } 810 811 /** 812 * cdns3_gadget_giveback - call struct usb_request's ->complete callback 813 * @priv_ep: The endpoint to whom the request belongs to 814 * @priv_req: The request we're giving back 815 * @status: completion code for the request 816 * 817 * Must be called with controller's lock held and interrupts disabled. This 818 * function will unmap @req and call its ->complete() callback to notify upper 819 * layers that it has completed. 820 */ 821 void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, 822 struct cdns3_request *priv_req, 823 int status) 824 { 825 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 826 struct usb_request *request = &priv_req->request; 827 828 list_del_init(&request->list); 829 830 if (request->status == -EINPROGRESS) 831 request->status = status; 832 833 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request, 834 priv_ep->dir); 835 836 if ((priv_req->flags & REQUEST_UNALIGNED) && 837 priv_ep->dir == USB_DIR_OUT && !request->status) 838 memcpy(request->buf, priv_req->aligned_buf->buf, 839 request->length); 840 841 priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); 842 /* All TRBs have finished, clear the counter */ 843 priv_req->finished_trb = 0; 844 trace_cdns3_gadget_giveback(priv_req); 845 846 if (priv_dev->dev_ver < DEV_VER_V2) { 847 request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep, 848 priv_req); 849 if (!request) 850 return; 851 } 852 853 if (request->complete) { 854 spin_unlock(&priv_dev->lock); 855 usb_gadget_giveback_request(&priv_ep->endpoint, 856 request); 857 spin_lock(&priv_dev->lock); 858 } 859 860 if (request->buf == priv_dev->zlp_buf) 861 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 862 } 863 864 static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) 865 { 866 /* Work around for stale data address in TRB*/ 867 if (priv_ep->wa1_set) { 868 trace_cdns3_wa1(priv_ep, "restore cycle bit"); 869 870 priv_ep->wa1_set = 0; 871 priv_ep->wa1_trb_index = 0xFFFF; 872 if (priv_ep->wa1_cycle_bit) { 873 priv_ep->wa1_trb->control = 874 priv_ep->wa1_trb->control | cpu_to_le32(0x1); 875 } else { 876 priv_ep->wa1_trb->control = 877 priv_ep->wa1_trb->control & cpu_to_le32(~0x1); 878 } 879 } 880 } 881 882 static void cdns3_free_aligned_request_buf(struct work_struct *work) 883 { 884 struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, 885 aligned_buf_wq); 886 struct cdns3_aligned_buf *buf, *tmp; 887 unsigned long flags; 888 889 spin_lock_irqsave(&priv_dev->lock, flags); 890 891 list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) { 892 if (!buf->in_use) { 893 list_del(&buf->list); 894 895 /* 896 * Re-enable interrupts to free DMA capable memory. 897 * Driver can't free this memory with disabled 898 * interrupts. 899 */ 900 spin_unlock_irqrestore(&priv_dev->lock, flags); 901 dma_free_coherent(priv_dev->sysdev, buf->size, 902 buf->buf, buf->dma); 903 kfree(buf); 904 spin_lock_irqsave(&priv_dev->lock, flags); 905 } 906 } 907 908 spin_unlock_irqrestore(&priv_dev->lock, flags); 909 } 910 911 static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) 912 { 913 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 914 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 915 struct cdns3_aligned_buf *buf; 916 917 /* check if buffer is aligned to 8. */ 918 if (!((uintptr_t)priv_req->request.buf & 0x7)) 919 return 0; 920 921 buf = priv_req->aligned_buf; 922 923 if (!buf || priv_req->request.length > buf->size) { 924 buf = kzalloc(sizeof(*buf), GFP_ATOMIC); 925 if (!buf) 926 return -ENOMEM; 927 928 buf->size = priv_req->request.length; 929 930 buf->buf = dma_alloc_coherent(priv_dev->sysdev, 931 buf->size, 932 &buf->dma, 933 GFP_ATOMIC); 934 if (!buf->buf) { 935 kfree(buf); 936 return -ENOMEM; 937 } 938 939 if (priv_req->aligned_buf) { 940 trace_cdns3_free_aligned_request(priv_req); 941 priv_req->aligned_buf->in_use = 0; 942 queue_work(system_freezable_wq, 943 &priv_dev->aligned_buf_wq); 944 } 945 946 buf->in_use = 1; 947 priv_req->aligned_buf = buf; 948 949 list_add_tail(&buf->list, 950 &priv_dev->aligned_buf_list); 951 } 952 953 if (priv_ep->dir == USB_DIR_IN) { 954 memcpy(buf->buf, priv_req->request.buf, 955 priv_req->request.length); 956 } 957 958 priv_req->flags |= REQUEST_UNALIGNED; 959 trace_cdns3_prepare_aligned_request(priv_req); 960 961 return 0; 962 } 963 964 static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep, 965 struct cdns3_trb *trb) 966 { 967 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 968 969 if (!priv_ep->wa1_set) { 970 u32 doorbell; 971 972 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 973 974 if (doorbell) { 975 priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0; 976 priv_ep->wa1_set = 1; 977 priv_ep->wa1_trb = trb; 978 priv_ep->wa1_trb_index = priv_ep->enqueue; 979 trace_cdns3_wa1(priv_ep, "set guard"); 980 return 0; 981 } 982 } 983 return 1; 984 } 985 986 static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev, 987 struct cdns3_endpoint *priv_ep) 988 { 989 int dma_index; 990 u32 doorbell; 991 992 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 993 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 994 995 if (!doorbell || dma_index != priv_ep->wa1_trb_index) 996 cdns3_wa1_restore_cycle_bit(priv_ep); 997 } 998 999 static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 1000 struct usb_request *request) 1001 { 1002 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1003 struct cdns3_request *priv_req; 1004 struct cdns3_trb *trb; 1005 dma_addr_t trb_dma; 1006 int address; 1007 u32 control; 1008 u32 length; 1009 u32 tdl; 1010 unsigned int sg_idx = priv_ep->stream_sg_idx; 1011 1012 priv_req = to_cdns3_request(request); 1013 address = priv_ep->endpoint.desc->bEndpointAddress; 1014 1015 priv_ep->flags |= EP_PENDING_REQUEST; 1016 1017 /* must allocate buffer aligned to 8 */ 1018 if (priv_req->flags & REQUEST_UNALIGNED) 1019 trb_dma = priv_req->aligned_buf->dma; 1020 else 1021 trb_dma = request->dma; 1022 1023 /* For stream capable endpoints driver use only single TD. */ 1024 trb = priv_ep->trb_pool + priv_ep->enqueue; 1025 priv_req->start_trb = priv_ep->enqueue; 1026 priv_req->end_trb = priv_req->start_trb; 1027 priv_req->trb = trb; 1028 1029 cdns3_select_ep(priv_ep->cdns3_dev, address); 1030 1031 control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE | 1032 TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP; 1033 1034 if (!request->num_sgs) { 1035 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1036 length = request->length; 1037 } else { 1038 trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address)); 1039 length = request->sg[sg_idx].length; 1040 } 1041 1042 tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); 1043 1044 trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length)); 1045 1046 /* 1047 * For DEV_VER_V2 controller version we have enabled 1048 * USB_CONF2_EN_TDL_TRB in DMULT configuration. 1049 * This enables TDL calculation based on TRB, hence setting TDL in TRB. 1050 */ 1051 if (priv_dev->dev_ver >= DEV_VER_V2) { 1052 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1053 trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl)); 1054 } 1055 priv_req->flags |= REQUEST_PENDING; 1056 1057 trb->control = cpu_to_le32(control); 1058 1059 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1060 1061 /* 1062 * Memory barrier - Cycle Bit must be set before trb->length and 1063 * trb->buffer fields. 1064 */ 1065 wmb(); 1066 1067 /* always first element */ 1068 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), 1069 &priv_dev->regs->ep_traddr); 1070 1071 if (!(priv_ep->flags & EP_STALLED)) { 1072 trace_cdns3_ring(priv_ep); 1073 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1074 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1075 1076 priv_ep->prime_flag = false; 1077 1078 /* 1079 * Controller version DEV_VER_V2 tdl calculation 1080 * is based on TRB 1081 */ 1082 1083 if (priv_dev->dev_ver < DEV_VER_V2) 1084 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1085 &priv_dev->regs->ep_cmd); 1086 else if (priv_dev->dev_ver > DEV_VER_V2) 1087 writel(tdl, &priv_dev->regs->ep_tdl); 1088 1089 priv_ep->last_stream_id = priv_req->request.stream_id; 1090 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1091 writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) | 1092 EP_CMD_ERDY, &priv_dev->regs->ep_cmd); 1093 1094 trace_cdns3_doorbell_epx(priv_ep->name, 1095 readl(&priv_dev->regs->ep_traddr)); 1096 } 1097 1098 /* WORKAROUND for transition to L0 */ 1099 __cdns3_gadget_wakeup(priv_dev); 1100 1101 return 0; 1102 } 1103 1104 /** 1105 * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware 1106 * @priv_ep: endpoint object 1107 * @request: request object 1108 * 1109 * Returns zero on success or negative value on failure 1110 */ 1111 static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 1112 struct usb_request *request) 1113 { 1114 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1115 struct cdns3_request *priv_req; 1116 struct cdns3_trb *trb; 1117 struct cdns3_trb *link_trb = NULL; 1118 dma_addr_t trb_dma; 1119 u32 togle_pcs = 1; 1120 int sg_iter = 0; 1121 int num_trb; 1122 int address; 1123 u32 control; 1124 int pcs; 1125 u16 total_tdl = 0; 1126 struct scatterlist *s = NULL; 1127 bool sg_supported = !!(request->num_mapped_sgs); 1128 1129 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) 1130 num_trb = priv_ep->interval; 1131 else 1132 num_trb = sg_supported ? request->num_mapped_sgs : 1; 1133 1134 if (num_trb > priv_ep->free_trbs) { 1135 priv_ep->flags |= EP_RING_FULL; 1136 return -ENOBUFS; 1137 } 1138 1139 priv_req = to_cdns3_request(request); 1140 address = priv_ep->endpoint.desc->bEndpointAddress; 1141 1142 priv_ep->flags |= EP_PENDING_REQUEST; 1143 1144 /* must allocate buffer aligned to 8 */ 1145 if (priv_req->flags & REQUEST_UNALIGNED) 1146 trb_dma = priv_req->aligned_buf->dma; 1147 else 1148 trb_dma = request->dma; 1149 1150 trb = priv_ep->trb_pool + priv_ep->enqueue; 1151 priv_req->start_trb = priv_ep->enqueue; 1152 priv_req->trb = trb; 1153 1154 cdns3_select_ep(priv_ep->cdns3_dev, address); 1155 1156 /* prepare ring */ 1157 if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) { 1158 int doorbell, dma_index; 1159 u32 ch_bit = 0; 1160 1161 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1162 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1163 1164 /* Driver can't update LINK TRB if it is current processed. */ 1165 if (doorbell && dma_index == priv_ep->num_trbs - 1) { 1166 priv_ep->flags |= EP_DEFERRED_DRDY; 1167 return -ENOBUFS; 1168 } 1169 1170 /*updating C bt in Link TRB before starting DMA*/ 1171 link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1); 1172 /* 1173 * For TRs size equal 2 enabling TRB_CHAIN for epXin causes 1174 * that DMA stuck at the LINK TRB. 1175 * On the other hand, removing TRB_CHAIN for longer TRs for 1176 * epXout cause that DMA stuck after handling LINK TRB. 1177 * To eliminate this strange behavioral driver set TRB_CHAIN 1178 * bit only for TR size > 2. 1179 */ 1180 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC || 1181 TRBS_PER_SEGMENT > 2) 1182 ch_bit = TRB_CHAIN; 1183 1184 link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | 1185 TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); 1186 } 1187 1188 if (priv_dev->dev_ver <= DEV_VER_V2) 1189 togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); 1190 1191 if (sg_supported) 1192 s = request->sg; 1193 1194 /* set incorrect Cycle Bit for first trb*/ 1195 control = priv_ep->pcs ? 0 : TRB_CYCLE; 1196 trb->length = 0; 1197 if (priv_dev->dev_ver >= DEV_VER_V2) { 1198 u16 td_size; 1199 1200 td_size = DIV_ROUND_UP(request->length, 1201 priv_ep->endpoint.maxpacket); 1202 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1203 trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size)); 1204 else 1205 control |= TRB_TDL_HS_SIZE(td_size); 1206 } 1207 1208 do { 1209 u32 length; 1210 1211 /* fill TRB */ 1212 control |= TRB_TYPE(TRB_NORMAL); 1213 if (sg_supported) { 1214 trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s))); 1215 length = sg_dma_len(s); 1216 } else { 1217 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1218 length = request->length; 1219 } 1220 1221 if (priv_ep->flags & EP_TDLCHK_EN) 1222 total_tdl += DIV_ROUND_UP(length, 1223 priv_ep->endpoint.maxpacket); 1224 1225 trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) | 1226 TRB_LEN(length)); 1227 pcs = priv_ep->pcs ? TRB_CYCLE : 0; 1228 1229 /* 1230 * first trb should be prepared as last to avoid processing 1231 * transfer to early 1232 */ 1233 if (sg_iter != 0) 1234 control |= pcs; 1235 1236 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 1237 control |= TRB_IOC | TRB_ISP; 1238 } else { 1239 /* for last element in TD or in SG list */ 1240 if (sg_iter == (num_trb - 1) && sg_iter != 0) 1241 control |= pcs | TRB_IOC | TRB_ISP; 1242 } 1243 1244 if (sg_iter) 1245 trb->control = cpu_to_le32(control); 1246 else 1247 priv_req->trb->control = cpu_to_le32(control); 1248 1249 if (sg_supported) { 1250 trb->control |= cpu_to_le32(TRB_ISP); 1251 /* Don't set chain bit for last TRB */ 1252 if (sg_iter < num_trb - 1) 1253 trb->control |= cpu_to_le32(TRB_CHAIN); 1254 1255 s = sg_next(s); 1256 } 1257 1258 control = 0; 1259 ++sg_iter; 1260 priv_req->end_trb = priv_ep->enqueue; 1261 cdns3_ep_inc_enq(priv_ep); 1262 trb = priv_ep->trb_pool + priv_ep->enqueue; 1263 trb->length = 0; 1264 } while (sg_iter < num_trb); 1265 1266 trb = priv_req->trb; 1267 1268 priv_req->flags |= REQUEST_PENDING; 1269 priv_req->num_of_trb = num_trb; 1270 1271 if (sg_iter == 1) 1272 trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP); 1273 1274 if (priv_dev->dev_ver < DEV_VER_V2 && 1275 (priv_ep->flags & EP_TDLCHK_EN)) { 1276 u16 tdl = total_tdl; 1277 u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 1278 1279 if (tdl > EP_CMD_TDL_MAX) { 1280 tdl = EP_CMD_TDL_MAX; 1281 priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX; 1282 } 1283 1284 if (old_tdl < tdl) { 1285 tdl -= old_tdl; 1286 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1287 &priv_dev->regs->ep_cmd); 1288 } 1289 } 1290 1291 /* 1292 * Memory barrier - cycle bit must be set before other filds in trb. 1293 */ 1294 wmb(); 1295 1296 /* give the TD to the consumer*/ 1297 if (togle_pcs) 1298 trb->control = trb->control ^ cpu_to_le32(1); 1299 1300 if (priv_dev->dev_ver <= DEV_VER_V2) 1301 cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); 1302 1303 if (num_trb > 1) { 1304 int i = 0; 1305 1306 while (i < num_trb) { 1307 trace_cdns3_prepare_trb(priv_ep, trb + i); 1308 if (trb + i == link_trb) { 1309 trb = priv_ep->trb_pool; 1310 num_trb = num_trb - i; 1311 i = 0; 1312 } else { 1313 i++; 1314 } 1315 } 1316 } else { 1317 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1318 } 1319 1320 /* 1321 * Memory barrier - Cycle Bit must be set before trb->length and 1322 * trb->buffer fields. 1323 */ 1324 wmb(); 1325 1326 /* 1327 * For DMULT mode we can set address to transfer ring only once after 1328 * enabling endpoint. 1329 */ 1330 if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) { 1331 /* 1332 * Until SW is not ready to handle the OUT transfer the ISO OUT 1333 * Endpoint should be disabled (EP_CFG.ENABLE = 0). 1334 * EP_CFG_ENABLE must be set before updating ep_traddr. 1335 */ 1336 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir && 1337 !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) { 1338 priv_ep->flags |= EP_QUIRK_ISO_OUT_EN; 1339 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 1340 EP_CFG_ENABLE); 1341 } 1342 1343 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + 1344 priv_req->start_trb * TRB_SIZE), 1345 &priv_dev->regs->ep_traddr); 1346 1347 priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR; 1348 } 1349 1350 if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) { 1351 trace_cdns3_ring(priv_ep); 1352 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1353 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1354 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1355 trace_cdns3_doorbell_epx(priv_ep->name, 1356 readl(&priv_dev->regs->ep_traddr)); 1357 } 1358 1359 /* WORKAROUND for transition to L0 */ 1360 __cdns3_gadget_wakeup(priv_dev); 1361 1362 return 0; 1363 } 1364 1365 void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) 1366 { 1367 struct cdns3_endpoint *priv_ep; 1368 struct usb_ep *ep; 1369 1370 if (priv_dev->hw_configured_flag) 1371 return; 1372 1373 writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf); 1374 1375 cdns3_set_register_bit(&priv_dev->regs->usb_conf, 1376 USB_CONF_U1EN | USB_CONF_U2EN); 1377 1378 priv_dev->hw_configured_flag = 1; 1379 1380 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 1381 if (ep->enabled) { 1382 priv_ep = ep_to_cdns3_ep(ep); 1383 cdns3_start_all_request(priv_dev, priv_ep); 1384 } 1385 } 1386 1387 cdns3_allow_enable_l1(priv_dev, 1); 1388 } 1389 1390 /** 1391 * cdns3_trb_handled - check whether trb has been handled by DMA 1392 * 1393 * @priv_ep: extended endpoint object. 1394 * @priv_req: request object for checking 1395 * 1396 * Endpoint must be selected before invoking this function. 1397 * 1398 * Returns false if request has not been handled by DMA, else returns true. 1399 * 1400 * SR - start ring 1401 * ER - end ring 1402 * DQ = priv_ep->dequeue - dequeue position 1403 * EQ = priv_ep->enqueue - enqueue position 1404 * ST = priv_req->start_trb - index of first TRB in transfer ring 1405 * ET = priv_req->end_trb - index of last TRB in transfer ring 1406 * CI = current_index - index of processed TRB by DMA. 1407 * 1408 * As first step, we check if the TRB between the ST and ET. 1409 * Then, we check if cycle bit for index priv_ep->dequeue 1410 * is correct. 1411 * 1412 * some rules: 1413 * 1. priv_ep->dequeue never equals to current_index. 1414 * 2 priv_ep->enqueue never exceed priv_ep->dequeue 1415 * 3. exception: priv_ep->enqueue == priv_ep->dequeue 1416 * and priv_ep->free_trbs is zero. 1417 * This case indicate that TR is full. 1418 * 1419 * At below two cases, the request have been handled. 1420 * Case 1 - priv_ep->dequeue < current_index 1421 * SR ... EQ ... DQ ... CI ... ER 1422 * SR ... DQ ... CI ... EQ ... ER 1423 * 1424 * Case 2 - priv_ep->dequeue > current_index 1425 * This situation takes place when CI go through the LINK TRB at the end of 1426 * transfer ring. 1427 * SR ... CI ... EQ ... DQ ... ER 1428 */ 1429 static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep, 1430 struct cdns3_request *priv_req) 1431 { 1432 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1433 struct cdns3_trb *trb; 1434 int current_index = 0; 1435 int handled = 0; 1436 int doorbell; 1437 1438 current_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1439 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1440 1441 /* current trb doesn't belong to this request */ 1442 if (priv_req->start_trb < priv_req->end_trb) { 1443 if (priv_ep->dequeue > priv_req->end_trb) 1444 goto finish; 1445 1446 if (priv_ep->dequeue < priv_req->start_trb) 1447 goto finish; 1448 } 1449 1450 if ((priv_req->start_trb > priv_req->end_trb) && 1451 (priv_ep->dequeue > priv_req->end_trb) && 1452 (priv_ep->dequeue < priv_req->start_trb)) 1453 goto finish; 1454 1455 if ((priv_req->start_trb == priv_req->end_trb) && 1456 (priv_ep->dequeue != priv_req->end_trb)) 1457 goto finish; 1458 1459 trb = &priv_ep->trb_pool[priv_ep->dequeue]; 1460 1461 if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs) 1462 goto finish; 1463 1464 if (doorbell == 1 && current_index == priv_ep->dequeue) 1465 goto finish; 1466 1467 /* The corner case for TRBS_PER_SEGMENT equal 2). */ 1468 if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 1469 handled = 1; 1470 goto finish; 1471 } 1472 1473 if (priv_ep->enqueue == priv_ep->dequeue && 1474 priv_ep->free_trbs == 0) { 1475 handled = 1; 1476 } else if (priv_ep->dequeue < current_index) { 1477 if ((current_index == (priv_ep->num_trbs - 1)) && 1478 !priv_ep->dequeue) 1479 goto finish; 1480 1481 handled = 1; 1482 } else if (priv_ep->dequeue > current_index) { 1483 handled = 1; 1484 } 1485 1486 finish: 1487 trace_cdns3_request_handled(priv_req, current_index, handled); 1488 1489 return handled; 1490 } 1491 1492 static void cdns3_transfer_completed(struct cdns3_device *priv_dev, 1493 struct cdns3_endpoint *priv_ep) 1494 { 1495 struct cdns3_request *priv_req; 1496 struct usb_request *request; 1497 struct cdns3_trb *trb; 1498 bool request_handled = false; 1499 bool transfer_end = false; 1500 1501 while (!list_empty(&priv_ep->pending_req_list)) { 1502 request = cdns3_next_request(&priv_ep->pending_req_list); 1503 priv_req = to_cdns3_request(request); 1504 1505 trb = priv_ep->trb_pool + priv_ep->dequeue; 1506 1507 /* Request was dequeued and TRB was changed to TRB_LINK. */ 1508 if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { 1509 trace_cdns3_complete_trb(priv_ep, trb); 1510 cdns3_move_deq_to_next_trb(priv_req); 1511 } 1512 1513 if (!request->stream_id) { 1514 /* Re-select endpoint. It could be changed by other CPU 1515 * during handling usb_gadget_giveback_request. 1516 */ 1517 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1518 1519 while (cdns3_trb_handled(priv_ep, priv_req)) { 1520 priv_req->finished_trb++; 1521 if (priv_req->finished_trb >= priv_req->num_of_trb) 1522 request_handled = true; 1523 1524 trb = priv_ep->trb_pool + priv_ep->dequeue; 1525 trace_cdns3_complete_trb(priv_ep, trb); 1526 1527 if (!transfer_end) 1528 request->actual += 1529 TRB_LEN(le32_to_cpu(trb->length)); 1530 1531 if (priv_req->num_of_trb > 1 && 1532 le32_to_cpu(trb->control) & TRB_SMM) 1533 transfer_end = true; 1534 1535 cdns3_ep_inc_deq(priv_ep); 1536 } 1537 1538 if (request_handled) { 1539 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1540 request_handled = false; 1541 transfer_end = false; 1542 } else { 1543 goto prepare_next_td; 1544 } 1545 1546 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && 1547 TRBS_PER_SEGMENT == 2) 1548 break; 1549 } else { 1550 /* Re-select endpoint. It could be changed by other CPU 1551 * during handling usb_gadget_giveback_request. 1552 */ 1553 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1554 1555 trb = priv_ep->trb_pool; 1556 trace_cdns3_complete_trb(priv_ep, trb); 1557 1558 if (trb != priv_req->trb) 1559 dev_warn(priv_dev->dev, 1560 "request_trb=0x%p, queue_trb=0x%p\n", 1561 priv_req->trb, trb); 1562 1563 request->actual += TRB_LEN(le32_to_cpu(trb->length)); 1564 1565 if (!request->num_sgs || 1566 (request->num_sgs == (priv_ep->stream_sg_idx + 1))) { 1567 priv_ep->stream_sg_idx = 0; 1568 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1569 } else { 1570 priv_ep->stream_sg_idx++; 1571 cdns3_ep_run_stream_transfer(priv_ep, request); 1572 } 1573 break; 1574 } 1575 } 1576 priv_ep->flags &= ~EP_PENDING_REQUEST; 1577 1578 prepare_next_td: 1579 if (!(priv_ep->flags & EP_STALLED) && 1580 !(priv_ep->flags & EP_STALL_PENDING)) 1581 cdns3_start_all_request(priv_dev, priv_ep); 1582 } 1583 1584 void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm) 1585 { 1586 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1587 1588 cdns3_wa1_restore_cycle_bit(priv_ep); 1589 1590 if (rearm) { 1591 trace_cdns3_ring(priv_ep); 1592 1593 /* Cycle Bit must be updated before arming DMA. */ 1594 wmb(); 1595 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1596 1597 __cdns3_gadget_wakeup(priv_dev); 1598 1599 trace_cdns3_doorbell_epx(priv_ep->name, 1600 readl(&priv_dev->regs->ep_traddr)); 1601 } 1602 } 1603 1604 static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep) 1605 { 1606 u16 tdl = priv_ep->pending_tdl; 1607 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1608 1609 if (tdl > EP_CMD_TDL_MAX) { 1610 tdl = EP_CMD_TDL_MAX; 1611 priv_ep->pending_tdl -= EP_CMD_TDL_MAX; 1612 } else { 1613 priv_ep->pending_tdl = 0; 1614 } 1615 1616 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd); 1617 } 1618 1619 /** 1620 * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint 1621 * @priv_ep: endpoint object 1622 * 1623 * Returns 0 1624 */ 1625 static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) 1626 { 1627 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1628 u32 ep_sts_reg; 1629 struct usb_request *deferred_request; 1630 struct usb_request *pending_request; 1631 u32 tdl = 0; 1632 1633 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1634 1635 trace_cdns3_epx_irq(priv_dev, priv_ep); 1636 1637 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 1638 writel(ep_sts_reg, &priv_dev->regs->ep_sts); 1639 1640 if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) { 1641 bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY); 1642 1643 tdl = cdns3_get_tdl(priv_dev); 1644 1645 /* 1646 * Continue the previous transfer: 1647 * There is some racing between ERDY and PRIME. The device send 1648 * ERDY and almost in the same time Host send PRIME. It cause 1649 * that host ignore the ERDY packet and driver has to send it 1650 * again. 1651 */ 1652 if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) || 1653 EP_STS_HOSTPP(ep_sts_reg))) { 1654 writel(EP_CMD_ERDY | 1655 EP_CMD_ERDY_SID(priv_ep->last_stream_id), 1656 &priv_dev->regs->ep_cmd); 1657 ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC); 1658 } else { 1659 priv_ep->prime_flag = true; 1660 1661 pending_request = cdns3_next_request(&priv_ep->pending_req_list); 1662 deferred_request = cdns3_next_request(&priv_ep->deferred_req_list); 1663 1664 if (deferred_request && !pending_request) { 1665 cdns3_start_all_request(priv_dev, priv_ep); 1666 } 1667 } 1668 } 1669 1670 if (ep_sts_reg & EP_STS_TRBERR) { 1671 if (priv_ep->flags & EP_STALL_PENDING && 1672 !(ep_sts_reg & EP_STS_DESCMIS && 1673 priv_dev->dev_ver < DEV_VER_V2)) { 1674 cdns3_ep_stall_flush(priv_ep); 1675 } 1676 1677 /* 1678 * For isochronous transfer driver completes request on 1679 * IOC or on TRBERR. IOC appears only when device receive 1680 * OUT data packet. If host disable stream or lost some packet 1681 * then the only way to finish all queued transfer is to do it 1682 * on TRBERR event. 1683 */ 1684 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && 1685 !priv_ep->wa1_set) { 1686 if (!priv_ep->dir) { 1687 u32 ep_cfg = readl(&priv_dev->regs->ep_cfg); 1688 1689 ep_cfg &= ~EP_CFG_ENABLE; 1690 writel(ep_cfg, &priv_dev->regs->ep_cfg); 1691 priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; 1692 } 1693 cdns3_transfer_completed(priv_dev, priv_ep); 1694 } else if (!(priv_ep->flags & EP_STALLED) && 1695 !(priv_ep->flags & EP_STALL_PENDING)) { 1696 if (priv_ep->flags & EP_DEFERRED_DRDY) { 1697 priv_ep->flags &= ~EP_DEFERRED_DRDY; 1698 cdns3_start_all_request(priv_dev, priv_ep); 1699 } else { 1700 cdns3_rearm_transfer(priv_ep, 1701 priv_ep->wa1_set); 1702 } 1703 } 1704 } 1705 1706 if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) || 1707 (ep_sts_reg & EP_STS_IOT)) { 1708 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 1709 if (ep_sts_reg & EP_STS_ISP) 1710 priv_ep->flags |= EP_QUIRK_END_TRANSFER; 1711 else 1712 priv_ep->flags &= ~EP_QUIRK_END_TRANSFER; 1713 } 1714 1715 if (!priv_ep->use_streams) { 1716 if ((ep_sts_reg & EP_STS_IOC) || 1717 (ep_sts_reg & EP_STS_ISP)) { 1718 cdns3_transfer_completed(priv_dev, priv_ep); 1719 } else if ((priv_ep->flags & EP_TDLCHK_EN) & 1720 priv_ep->pending_tdl) { 1721 /* handle IOT with pending tdl */ 1722 cdns3_reprogram_tdl(priv_ep); 1723 } 1724 } else if (priv_ep->dir == USB_DIR_OUT) { 1725 priv_ep->ep_sts_pending |= ep_sts_reg; 1726 } else if (ep_sts_reg & EP_STS_IOT) { 1727 cdns3_transfer_completed(priv_dev, priv_ep); 1728 } 1729 } 1730 1731 /* 1732 * MD_EXIT interrupt sets when stream capable endpoint exits 1733 * from MOVE DATA state of Bulk IN/OUT stream protocol state machine 1734 */ 1735 if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) && 1736 (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) { 1737 priv_ep->ep_sts_pending = 0; 1738 cdns3_transfer_completed(priv_dev, priv_ep); 1739 } 1740 1741 /* 1742 * WA2: this condition should only be meet when 1743 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or 1744 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. 1745 * In other cases this interrupt will be disabled. 1746 */ 1747 if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 && 1748 !(priv_ep->flags & EP_STALLED)) 1749 cdns3_wa2_descmissing_packet(priv_ep); 1750 1751 return 0; 1752 } 1753 1754 static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev) 1755 { 1756 if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) 1757 priv_dev->gadget_driver->disconnect(&priv_dev->gadget); 1758 } 1759 1760 /** 1761 * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device 1762 * @priv_dev: extended gadget object 1763 * @usb_ists: bitmap representation of device's reported interrupts 1764 * (usb_ists register value) 1765 */ 1766 static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, 1767 u32 usb_ists) 1768 __must_hold(&priv_dev->lock) 1769 { 1770 int speed = 0; 1771 1772 trace_cdns3_usb_irq(priv_dev, usb_ists); 1773 if (usb_ists & USB_ISTS_L1ENTI) { 1774 /* 1775 * WORKAROUND: CDNS3 controller has issue with hardware resuming 1776 * from L1. To fix it, if any DMA transfer is pending driver 1777 * must starts driving resume signal immediately. 1778 */ 1779 if (readl(&priv_dev->regs->drbl)) 1780 __cdns3_gadget_wakeup(priv_dev); 1781 } 1782 1783 /* Connection detected */ 1784 if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { 1785 speed = cdns3_get_speed(priv_dev); 1786 priv_dev->gadget.speed = speed; 1787 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED); 1788 cdns3_ep0_config(priv_dev); 1789 } 1790 1791 /* Disconnection detected */ 1792 if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) { 1793 spin_unlock(&priv_dev->lock); 1794 cdns3_disconnect_gadget(priv_dev); 1795 spin_lock(&priv_dev->lock); 1796 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 1797 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 1798 cdns3_hw_reset_eps_config(priv_dev); 1799 } 1800 1801 if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { 1802 if (priv_dev->gadget_driver && 1803 priv_dev->gadget_driver->suspend) { 1804 spin_unlock(&priv_dev->lock); 1805 priv_dev->gadget_driver->suspend(&priv_dev->gadget); 1806 spin_lock(&priv_dev->lock); 1807 } 1808 } 1809 1810 if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) { 1811 if (priv_dev->gadget_driver && 1812 priv_dev->gadget_driver->resume) { 1813 spin_unlock(&priv_dev->lock); 1814 priv_dev->gadget_driver->resume(&priv_dev->gadget); 1815 spin_lock(&priv_dev->lock); 1816 } 1817 } 1818 1819 /* reset*/ 1820 if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) { 1821 if (priv_dev->gadget_driver) { 1822 spin_unlock(&priv_dev->lock); 1823 usb_gadget_udc_reset(&priv_dev->gadget, 1824 priv_dev->gadget_driver); 1825 spin_lock(&priv_dev->lock); 1826 1827 /*read again to check the actual speed*/ 1828 speed = cdns3_get_speed(priv_dev); 1829 priv_dev->gadget.speed = speed; 1830 cdns3_hw_reset_eps_config(priv_dev); 1831 cdns3_ep0_config(priv_dev); 1832 } 1833 } 1834 } 1835 1836 /** 1837 * cdns3_device_irq_handler- interrupt handler for device part of controller 1838 * 1839 * @irq: irq number for cdns3 core device 1840 * @data: structure of cdns3 1841 * 1842 * Returns IRQ_HANDLED or IRQ_NONE 1843 */ 1844 static irqreturn_t cdns3_device_irq_handler(int irq, void *data) 1845 { 1846 struct cdns3_device *priv_dev = data; 1847 struct cdns *cdns = dev_get_drvdata(priv_dev->dev); 1848 irqreturn_t ret = IRQ_NONE; 1849 u32 reg; 1850 1851 if (cdns->in_lpm) 1852 return ret; 1853 1854 /* check USB device interrupt */ 1855 reg = readl(&priv_dev->regs->usb_ists); 1856 if (reg) { 1857 /* After masking interrupts the new interrupts won't be 1858 * reported in usb_ists/ep_ists. In order to not lose some 1859 * of them driver disables only detected interrupts. 1860 * They will be enabled ASAP after clearing source of 1861 * interrupt. This an unusual behavior only applies to 1862 * usb_ists register. 1863 */ 1864 reg = ~reg & readl(&priv_dev->regs->usb_ien); 1865 /* mask deferred interrupt. */ 1866 writel(reg, &priv_dev->regs->usb_ien); 1867 ret = IRQ_WAKE_THREAD; 1868 } 1869 1870 /* check endpoint interrupt */ 1871 reg = readl(&priv_dev->regs->ep_ists); 1872 if (reg) { 1873 writel(0, &priv_dev->regs->ep_ien); 1874 ret = IRQ_WAKE_THREAD; 1875 } 1876 1877 return ret; 1878 } 1879 1880 /** 1881 * cdns3_device_thread_irq_handler- interrupt handler for device part 1882 * of controller 1883 * 1884 * @irq: irq number for cdns3 core device 1885 * @data: structure of cdns3 1886 * 1887 * Returns IRQ_HANDLED or IRQ_NONE 1888 */ 1889 static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) 1890 { 1891 struct cdns3_device *priv_dev = data; 1892 irqreturn_t ret = IRQ_NONE; 1893 unsigned long flags; 1894 unsigned int bit; 1895 unsigned long reg; 1896 1897 spin_lock_irqsave(&priv_dev->lock, flags); 1898 1899 reg = readl(&priv_dev->regs->usb_ists); 1900 if (reg) { 1901 writel(reg, &priv_dev->regs->usb_ists); 1902 writel(USB_IEN_INIT, &priv_dev->regs->usb_ien); 1903 cdns3_check_usb_interrupt_proceed(priv_dev, reg); 1904 ret = IRQ_HANDLED; 1905 } 1906 1907 reg = readl(&priv_dev->regs->ep_ists); 1908 1909 /* handle default endpoint OUT */ 1910 if (reg & EP_ISTS_EP_OUT0) { 1911 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT); 1912 ret = IRQ_HANDLED; 1913 } 1914 1915 /* handle default endpoint IN */ 1916 if (reg & EP_ISTS_EP_IN0) { 1917 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN); 1918 ret = IRQ_HANDLED; 1919 } 1920 1921 /* check if interrupt from non default endpoint, if no exit */ 1922 reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0); 1923 if (!reg) 1924 goto irqend; 1925 1926 for_each_set_bit(bit, ®, 1927 sizeof(u32) * BITS_PER_BYTE) { 1928 cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]); 1929 ret = IRQ_HANDLED; 1930 } 1931 1932 if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams) 1933 cdns3_wa2_check_outq_status(priv_dev); 1934 1935 irqend: 1936 writel(~0, &priv_dev->regs->ep_ien); 1937 spin_unlock_irqrestore(&priv_dev->lock, flags); 1938 1939 return ret; 1940 } 1941 1942 /** 1943 * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP 1944 * 1945 * The real reservation will occur during write to EP_CFG register, 1946 * this function is used to check if the 'size' reservation is allowed. 1947 * 1948 * @priv_dev: extended gadget object 1949 * @size: the size (KB) for EP would like to allocate 1950 * @is_in: endpoint direction 1951 * 1952 * Return 0 if the required size can met or negative value on failure 1953 */ 1954 static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, 1955 int size, int is_in) 1956 { 1957 int remained; 1958 1959 /* 2KB are reserved for EP0*/ 1960 remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2; 1961 1962 if (is_in) { 1963 if (remained < size) 1964 return -EPERM; 1965 1966 priv_dev->onchip_used_size += size; 1967 } else { 1968 int required; 1969 1970 /** 1971 * ALL OUT EPs are shared the same chunk onchip memory, so 1972 * driver checks if it already has assigned enough buffers 1973 */ 1974 if (priv_dev->out_mem_is_allocated >= size) 1975 return 0; 1976 1977 required = size - priv_dev->out_mem_is_allocated; 1978 1979 if (required > remained) 1980 return -EPERM; 1981 1982 priv_dev->out_mem_is_allocated += required; 1983 priv_dev->onchip_used_size += required; 1984 } 1985 1986 return 0; 1987 } 1988 1989 static void cdns3_configure_dmult(struct cdns3_device *priv_dev, 1990 struct cdns3_endpoint *priv_ep) 1991 { 1992 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 1993 1994 /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */ 1995 if (priv_dev->dev_ver <= DEV_VER_V2) 1996 writel(USB_CONF_DMULT, ®s->usb_conf); 1997 1998 if (priv_dev->dev_ver == DEV_VER_V2) 1999 writel(USB_CONF2_EN_TDL_TRB, ®s->usb_conf2); 2000 2001 if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) { 2002 u32 mask; 2003 2004 if (priv_ep->dir) 2005 mask = BIT(priv_ep->num + 16); 2006 else 2007 mask = BIT(priv_ep->num); 2008 2009 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 2010 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2011 cdns3_set_register_bit(®s->tdl_beh, mask); 2012 cdns3_set_register_bit(®s->tdl_beh2, mask); 2013 cdns3_set_register_bit(®s->dma_adv_td, mask); 2014 } 2015 2016 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2017 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2018 2019 cdns3_set_register_bit(®s->dtrans, mask); 2020 } 2021 } 2022 2023 /** 2024 * cdns3_ep_config Configure hardware endpoint 2025 * @priv_ep: extended endpoint object 2026 * @enable: set EP_CFG_ENABLE bit in ep_cfg register. 2027 */ 2028 int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) 2029 { 2030 bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC); 2031 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2032 u32 bEndpointAddress = priv_ep->num | priv_ep->dir; 2033 u32 max_packet_size = 0; 2034 u8 maxburst = 0; 2035 u32 ep_cfg = 0; 2036 u8 buffering; 2037 u8 mult = 0; 2038 int ret; 2039 2040 buffering = CDNS3_EP_BUF_SIZE - 1; 2041 2042 cdns3_configure_dmult(priv_dev, priv_ep); 2043 2044 switch (priv_ep->type) { 2045 case USB_ENDPOINT_XFER_INT: 2046 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT); 2047 2048 if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) || 2049 priv_dev->dev_ver > DEV_VER_V2) 2050 ep_cfg |= EP_CFG_TDL_CHK; 2051 break; 2052 case USB_ENDPOINT_XFER_BULK: 2053 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK); 2054 2055 if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) || 2056 priv_dev->dev_ver > DEV_VER_V2) 2057 ep_cfg |= EP_CFG_TDL_CHK; 2058 break; 2059 default: 2060 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); 2061 mult = CDNS3_EP_ISO_HS_MULT - 1; 2062 buffering = mult + 1; 2063 } 2064 2065 switch (priv_dev->gadget.speed) { 2066 case USB_SPEED_FULL: 2067 max_packet_size = is_iso_ep ? 1023 : 64; 2068 break; 2069 case USB_SPEED_HIGH: 2070 max_packet_size = is_iso_ep ? 1024 : 512; 2071 break; 2072 case USB_SPEED_SUPER: 2073 /* It's limitation that driver assumes in driver. */ 2074 mult = 0; 2075 max_packet_size = 1024; 2076 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2077 maxburst = CDNS3_EP_ISO_SS_BURST - 1; 2078 buffering = (mult + 1) * 2079 (maxburst + 1); 2080 2081 if (priv_ep->interval > 1) 2082 buffering++; 2083 } else { 2084 maxburst = CDNS3_EP_BUF_SIZE - 1; 2085 } 2086 break; 2087 default: 2088 /* all other speed are not supported */ 2089 return -EINVAL; 2090 } 2091 2092 if (max_packet_size == 1024) 2093 priv_ep->trb_burst_size = 128; 2094 else if (max_packet_size >= 512) 2095 priv_ep->trb_burst_size = 64; 2096 else 2097 priv_ep->trb_burst_size = 16; 2098 2099 /* onchip buffer is only allocated before configuration */ 2100 if (!priv_dev->hw_configured_flag) { 2101 ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1, 2102 !!priv_ep->dir); 2103 if (ret) { 2104 dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n"); 2105 return ret; 2106 } 2107 } 2108 2109 if (enable) 2110 ep_cfg |= EP_CFG_ENABLE; 2111 2112 if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2113 if (priv_dev->dev_ver >= DEV_VER_V3) { 2114 u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0)); 2115 2116 /* 2117 * Stream capable endpoints are handled by using ep_tdl 2118 * register. Other endpoints use TDL from TRB feature. 2119 */ 2120 cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, 2121 mask); 2122 } 2123 2124 /* Enable Stream Bit TDL chk and SID chk */ 2125 ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK; 2126 } 2127 2128 ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) | 2129 EP_CFG_MULT(mult) | 2130 EP_CFG_BUFFERING(buffering) | 2131 EP_CFG_MAXBURST(maxburst); 2132 2133 cdns3_select_ep(priv_dev, bEndpointAddress); 2134 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2135 priv_ep->flags |= EP_CONFIGURED; 2136 2137 dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n", 2138 priv_ep->name, ep_cfg); 2139 2140 return 0; 2141 } 2142 2143 /* Find correct direction for HW endpoint according to description */ 2144 static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc, 2145 struct cdns3_endpoint *priv_ep) 2146 { 2147 return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) || 2148 (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc)); 2149 } 2150 2151 static struct 2152 cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev, 2153 struct usb_endpoint_descriptor *desc) 2154 { 2155 struct usb_ep *ep; 2156 struct cdns3_endpoint *priv_ep; 2157 2158 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2159 unsigned long num; 2160 int ret; 2161 /* ep name pattern likes epXin or epXout */ 2162 char c[2] = {ep->name[2], '\0'}; 2163 2164 ret = kstrtoul(c, 10, &num); 2165 if (ret) 2166 return ERR_PTR(ret); 2167 2168 priv_ep = ep_to_cdns3_ep(ep); 2169 if (cdns3_ep_dir_is_correct(desc, priv_ep)) { 2170 if (!(priv_ep->flags & EP_CLAIMED)) { 2171 priv_ep->num = num; 2172 return priv_ep; 2173 } 2174 } 2175 } 2176 2177 return ERR_PTR(-ENOENT); 2178 } 2179 2180 /* 2181 * Cadence IP has one limitation that all endpoints must be configured 2182 * (Type & MaxPacketSize) before setting configuration through hardware 2183 * register, it means we can't change endpoints configuration after 2184 * set_configuration. 2185 * 2186 * This function set EP_CLAIMED flag which is added when the gadget driver 2187 * uses usb_ep_autoconfig to configure specific endpoint; 2188 * When the udc driver receives set_configurion request, 2189 * it goes through all claimed endpoints, and configure all endpoints 2190 * accordingly. 2191 * 2192 * At usb_ep_ops.enable/disable, we only enable and disable endpoint through 2193 * ep_cfg register which can be changed after set_configuration, and do 2194 * some software operation accordingly. 2195 */ 2196 static struct 2197 usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, 2198 struct usb_endpoint_descriptor *desc, 2199 struct usb_ss_ep_comp_descriptor *comp_desc) 2200 { 2201 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2202 struct cdns3_endpoint *priv_ep; 2203 unsigned long flags; 2204 2205 priv_ep = cdns3_find_available_ep(priv_dev, desc); 2206 if (IS_ERR(priv_ep)) { 2207 dev_err(priv_dev->dev, "no available ep\n"); 2208 return NULL; 2209 } 2210 2211 dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name); 2212 2213 spin_lock_irqsave(&priv_dev->lock, flags); 2214 priv_ep->endpoint.desc = desc; 2215 priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT; 2216 priv_ep->type = usb_endpoint_type(desc); 2217 priv_ep->flags |= EP_CLAIMED; 2218 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2219 2220 spin_unlock_irqrestore(&priv_dev->lock, flags); 2221 return &priv_ep->endpoint; 2222 } 2223 2224 /** 2225 * cdns3_gadget_ep_alloc_request Allocates request 2226 * @ep: endpoint object associated with request 2227 * @gfp_flags: gfp flags 2228 * 2229 * Returns allocated request address, NULL on allocation error 2230 */ 2231 struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, 2232 gfp_t gfp_flags) 2233 { 2234 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2235 struct cdns3_request *priv_req; 2236 2237 priv_req = kzalloc(sizeof(*priv_req), gfp_flags); 2238 if (!priv_req) 2239 return NULL; 2240 2241 priv_req->priv_ep = priv_ep; 2242 2243 trace_cdns3_alloc_request(priv_req); 2244 return &priv_req->request; 2245 } 2246 2247 /** 2248 * cdns3_gadget_ep_free_request Free memory occupied by request 2249 * @ep: endpoint object associated with request 2250 * @request: request to free memory 2251 */ 2252 void cdns3_gadget_ep_free_request(struct usb_ep *ep, 2253 struct usb_request *request) 2254 { 2255 struct cdns3_request *priv_req = to_cdns3_request(request); 2256 2257 if (priv_req->aligned_buf) 2258 priv_req->aligned_buf->in_use = 0; 2259 2260 trace_cdns3_free_request(priv_req); 2261 kfree(priv_req); 2262 } 2263 2264 /** 2265 * cdns3_gadget_ep_enable Enable endpoint 2266 * @ep: endpoint object 2267 * @desc: endpoint descriptor 2268 * 2269 * Returns 0 on success, error code elsewhere 2270 */ 2271 static int cdns3_gadget_ep_enable(struct usb_ep *ep, 2272 const struct usb_endpoint_descriptor *desc) 2273 { 2274 struct cdns3_endpoint *priv_ep; 2275 struct cdns3_device *priv_dev; 2276 const struct usb_ss_ep_comp_descriptor *comp_desc; 2277 u32 reg = EP_STS_EN_TRBERREN; 2278 u32 bEndpointAddress; 2279 unsigned long flags; 2280 int enable = 1; 2281 int ret = 0; 2282 int val; 2283 2284 priv_ep = ep_to_cdns3_ep(ep); 2285 priv_dev = priv_ep->cdns3_dev; 2286 comp_desc = priv_ep->endpoint.comp_desc; 2287 2288 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 2289 dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); 2290 return -EINVAL; 2291 } 2292 2293 if (!desc->wMaxPacketSize) { 2294 dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n"); 2295 return -EINVAL; 2296 } 2297 2298 if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED, 2299 "%s is already enabled\n", priv_ep->name)) 2300 return 0; 2301 2302 spin_lock_irqsave(&priv_dev->lock, flags); 2303 2304 priv_ep->endpoint.desc = desc; 2305 priv_ep->type = usb_endpoint_type(desc); 2306 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2307 2308 if (priv_ep->interval > ISO_MAX_INTERVAL && 2309 priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2310 dev_err(priv_dev->dev, "Driver is limited to %d period\n", 2311 ISO_MAX_INTERVAL); 2312 2313 ret = -EINVAL; 2314 goto exit; 2315 } 2316 2317 bEndpointAddress = priv_ep->num | priv_ep->dir; 2318 cdns3_select_ep(priv_dev, bEndpointAddress); 2319 2320 /* 2321 * For some versions of controller at some point during ISO OUT traffic 2322 * DMA reads Transfer Ring for the EP which has never got doorbell. 2323 * This issue was detected only on simulation, but to avoid this issue 2324 * driver add protection against it. To fix it driver enable ISO OUT 2325 * endpoint before setting DRBL. This special treatment of ISO OUT 2326 * endpoints are recommended by controller specification. 2327 */ 2328 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2329 enable = 0; 2330 2331 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 2332 /* 2333 * Enable stream support (SS mode) related interrupts 2334 * in EP_STS_EN Register 2335 */ 2336 if (priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2337 reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN | 2338 EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN | 2339 EP_STS_EN_STREAMREN; 2340 priv_ep->use_streams = true; 2341 ret = cdns3_ep_config(priv_ep, enable); 2342 priv_dev->using_streams |= true; 2343 } 2344 } else { 2345 ret = cdns3_ep_config(priv_ep, enable); 2346 } 2347 2348 if (ret) 2349 goto exit; 2350 2351 ret = cdns3_allocate_trb_pool(priv_ep); 2352 if (ret) 2353 goto exit; 2354 2355 bEndpointAddress = priv_ep->num | priv_ep->dir; 2356 cdns3_select_ep(priv_dev, bEndpointAddress); 2357 2358 trace_cdns3_gadget_ep_enable(priv_ep); 2359 2360 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2361 2362 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2363 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2364 1, 1000); 2365 2366 if (unlikely(ret)) { 2367 cdns3_free_trb_pool(priv_ep); 2368 ret = -EINVAL; 2369 goto exit; 2370 } 2371 2372 /* enable interrupt for selected endpoint */ 2373 cdns3_set_register_bit(&priv_dev->regs->ep_ien, 2374 BIT(cdns3_ep_addr_to_index(bEndpointAddress))); 2375 2376 if (priv_dev->dev_ver < DEV_VER_V2) 2377 cdns3_wa2_enable_detection(priv_dev, priv_ep, reg); 2378 2379 writel(reg, &priv_dev->regs->ep_sts_en); 2380 2381 ep->desc = desc; 2382 priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING | 2383 EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN); 2384 priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR; 2385 priv_ep->wa1_set = 0; 2386 priv_ep->enqueue = 0; 2387 priv_ep->dequeue = 0; 2388 reg = readl(&priv_dev->regs->ep_sts); 2389 priv_ep->pcs = !!EP_STS_CCS(reg); 2390 priv_ep->ccs = !!EP_STS_CCS(reg); 2391 /* one TRB is reserved for link TRB used in DMULT mode*/ 2392 priv_ep->free_trbs = priv_ep->num_trbs - 1; 2393 exit: 2394 spin_unlock_irqrestore(&priv_dev->lock, flags); 2395 2396 return ret; 2397 } 2398 2399 /** 2400 * cdns3_gadget_ep_disable Disable endpoint 2401 * @ep: endpoint object 2402 * 2403 * Returns 0 on success, error code elsewhere 2404 */ 2405 static int cdns3_gadget_ep_disable(struct usb_ep *ep) 2406 { 2407 struct cdns3_endpoint *priv_ep; 2408 struct cdns3_request *priv_req; 2409 struct cdns3_device *priv_dev; 2410 struct usb_request *request; 2411 unsigned long flags; 2412 int ret = 0; 2413 u32 ep_cfg; 2414 int val; 2415 2416 if (!ep) { 2417 pr_err("usbss: invalid parameters\n"); 2418 return -EINVAL; 2419 } 2420 2421 priv_ep = ep_to_cdns3_ep(ep); 2422 priv_dev = priv_ep->cdns3_dev; 2423 2424 if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED), 2425 "%s is already disabled\n", priv_ep->name)) 2426 return 0; 2427 2428 spin_lock_irqsave(&priv_dev->lock, flags); 2429 2430 trace_cdns3_gadget_ep_disable(priv_ep); 2431 2432 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2433 2434 ep_cfg = readl(&priv_dev->regs->ep_cfg); 2435 ep_cfg &= ~EP_CFG_ENABLE; 2436 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2437 2438 /** 2439 * Driver needs some time before resetting endpoint. 2440 * It need waits for clearing DBUSY bit or for timeout expired. 2441 * 10us is enough time for controller to stop transfer. 2442 */ 2443 readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val, 2444 !(val & EP_STS_DBUSY), 1, 10); 2445 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2446 2447 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2448 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2449 1, 1000); 2450 if (unlikely(ret)) 2451 dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n", 2452 priv_ep->name); 2453 2454 while (!list_empty(&priv_ep->pending_req_list)) { 2455 request = cdns3_next_request(&priv_ep->pending_req_list); 2456 2457 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2458 -ESHUTDOWN); 2459 } 2460 2461 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 2462 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 2463 2464 kfree(priv_req->request.buf); 2465 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 2466 &priv_req->request); 2467 list_del_init(&priv_req->list); 2468 --priv_ep->wa2_counter; 2469 } 2470 2471 while (!list_empty(&priv_ep->deferred_req_list)) { 2472 request = cdns3_next_request(&priv_ep->deferred_req_list); 2473 2474 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2475 -ESHUTDOWN); 2476 } 2477 2478 priv_ep->descmis_req = NULL; 2479 2480 ep->desc = NULL; 2481 priv_ep->flags &= ~EP_ENABLED; 2482 priv_ep->use_streams = false; 2483 2484 spin_unlock_irqrestore(&priv_dev->lock, flags); 2485 2486 return ret; 2487 } 2488 2489 /** 2490 * cdns3_gadget_ep_queue Transfer data on endpoint 2491 * @ep: endpoint object 2492 * @request: request object 2493 * @gfp_flags: gfp flags 2494 * 2495 * Returns 0 on success, error code elsewhere 2496 */ 2497 static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 2498 struct usb_request *request, 2499 gfp_t gfp_flags) 2500 { 2501 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2502 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2503 struct cdns3_request *priv_req; 2504 int ret = 0; 2505 2506 request->actual = 0; 2507 request->status = -EINPROGRESS; 2508 priv_req = to_cdns3_request(request); 2509 trace_cdns3_ep_queue(priv_req); 2510 2511 if (priv_dev->dev_ver < DEV_VER_V2) { 2512 ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep, 2513 priv_req); 2514 2515 if (ret == EINPROGRESS) 2516 return 0; 2517 } 2518 2519 ret = cdns3_prepare_aligned_request_buf(priv_req); 2520 if (ret < 0) 2521 return ret; 2522 2523 ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request, 2524 usb_endpoint_dir_in(ep->desc)); 2525 if (ret) 2526 return ret; 2527 2528 list_add_tail(&request->list, &priv_ep->deferred_req_list); 2529 2530 /* 2531 * For stream capable endpoint if prime irq flag is set then only start 2532 * request. 2533 * If hardware endpoint configuration has not been set yet then 2534 * just queue request in deferred list. Transfer will be started in 2535 * cdns3_set_hw_configuration. 2536 */ 2537 if (!request->stream_id) { 2538 if (priv_dev->hw_configured_flag && 2539 !(priv_ep->flags & EP_STALLED) && 2540 !(priv_ep->flags & EP_STALL_PENDING)) 2541 cdns3_start_all_request(priv_dev, priv_ep); 2542 } else { 2543 if (priv_dev->hw_configured_flag && priv_ep->prime_flag) 2544 cdns3_start_all_request(priv_dev, priv_ep); 2545 } 2546 2547 return 0; 2548 } 2549 2550 static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 2551 gfp_t gfp_flags) 2552 { 2553 struct usb_request *zlp_request; 2554 struct cdns3_endpoint *priv_ep; 2555 struct cdns3_device *priv_dev; 2556 unsigned long flags; 2557 int ret; 2558 2559 if (!request || !ep) 2560 return -EINVAL; 2561 2562 priv_ep = ep_to_cdns3_ep(ep); 2563 priv_dev = priv_ep->cdns3_dev; 2564 2565 spin_lock_irqsave(&priv_dev->lock, flags); 2566 2567 ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags); 2568 2569 if (ret == 0 && request->zero && request->length && 2570 (request->length % ep->maxpacket == 0)) { 2571 struct cdns3_request *priv_req; 2572 2573 zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 2574 zlp_request->buf = priv_dev->zlp_buf; 2575 zlp_request->length = 0; 2576 2577 priv_req = to_cdns3_request(zlp_request); 2578 priv_req->flags |= REQUEST_ZLP; 2579 2580 dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n", 2581 priv_ep->name); 2582 ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags); 2583 } 2584 2585 spin_unlock_irqrestore(&priv_dev->lock, flags); 2586 return ret; 2587 } 2588 2589 /** 2590 * cdns3_gadget_ep_dequeue Remove request from transfer queue 2591 * @ep: endpoint object associated with request 2592 * @request: request object 2593 * 2594 * Returns 0 on success, error code elsewhere 2595 */ 2596 int cdns3_gadget_ep_dequeue(struct usb_ep *ep, 2597 struct usb_request *request) 2598 { 2599 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2600 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2601 struct usb_request *req, *req_temp; 2602 struct cdns3_request *priv_req; 2603 struct cdns3_trb *link_trb; 2604 u8 req_on_hw_ring = 0; 2605 unsigned long flags; 2606 int ret = 0; 2607 2608 if (!ep || !request || !ep->desc) 2609 return -EINVAL; 2610 2611 spin_lock_irqsave(&priv_dev->lock, flags); 2612 2613 priv_req = to_cdns3_request(request); 2614 2615 trace_cdns3_ep_dequeue(priv_req); 2616 2617 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2618 2619 list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list, 2620 list) { 2621 if (request == req) { 2622 req_on_hw_ring = 1; 2623 goto found; 2624 } 2625 } 2626 2627 list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list, 2628 list) { 2629 if (request == req) 2630 goto found; 2631 } 2632 2633 goto not_found; 2634 2635 found: 2636 link_trb = priv_req->trb; 2637 2638 /* Update ring only if removed request is on pending_req_list list */ 2639 if (req_on_hw_ring && link_trb) { 2640 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma + 2641 ((priv_req->end_trb + 1) * TRB_SIZE))); 2642 link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | 2643 TRB_TYPE(TRB_LINK) | TRB_CHAIN); 2644 2645 if (priv_ep->wa1_trb == priv_req->trb) 2646 cdns3_wa1_restore_cycle_bit(priv_ep); 2647 } 2648 2649 cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); 2650 2651 not_found: 2652 spin_unlock_irqrestore(&priv_dev->lock, flags); 2653 return ret; 2654 } 2655 2656 /** 2657 * __cdns3_gadget_ep_set_halt Sets stall on selected endpoint 2658 * Should be called after acquiring spin_lock and selecting ep 2659 * @priv_ep: endpoint object to set stall on. 2660 */ 2661 void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep) 2662 { 2663 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2664 2665 trace_cdns3_halt(priv_ep, 1, 0); 2666 2667 if (!(priv_ep->flags & EP_STALLED)) { 2668 u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 2669 2670 if (!(ep_sts_reg & EP_STS_DBUSY)) 2671 cdns3_ep_stall_flush(priv_ep); 2672 else 2673 priv_ep->flags |= EP_STALL_PENDING; 2674 } 2675 } 2676 2677 /** 2678 * __cdns3_gadget_ep_clear_halt Clears stall on selected endpoint 2679 * Should be called after acquiring spin_lock and selecting ep 2680 * @priv_ep: endpoint object to clear stall on 2681 */ 2682 int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) 2683 { 2684 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2685 struct usb_request *request; 2686 struct cdns3_request *priv_req; 2687 struct cdns3_trb *trb = NULL; 2688 int ret; 2689 int val; 2690 2691 trace_cdns3_halt(priv_ep, 0, 0); 2692 2693 request = cdns3_next_request(&priv_ep->pending_req_list); 2694 if (request) { 2695 priv_req = to_cdns3_request(request); 2696 trb = priv_req->trb; 2697 if (trb) 2698 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2699 } 2700 2701 writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2702 2703 /* wait for EPRST cleared */ 2704 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2705 !(val & EP_CMD_EPRST), 1, 100); 2706 if (ret) 2707 return -EINVAL; 2708 2709 priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); 2710 2711 if (request) { 2712 if (trb) 2713 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2714 2715 cdns3_rearm_transfer(priv_ep, 1); 2716 } 2717 2718 cdns3_start_all_request(priv_dev, priv_ep); 2719 return ret; 2720 } 2721 2722 /** 2723 * cdns3_gadget_ep_set_halt Sets/clears stall on selected endpoint 2724 * @ep: endpoint object to set/clear stall on 2725 * @value: 1 for set stall, 0 for clear stall 2726 * 2727 * Returns 0 on success, error code elsewhere 2728 */ 2729 int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value) 2730 { 2731 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2732 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2733 unsigned long flags; 2734 int ret = 0; 2735 2736 if (!(priv_ep->flags & EP_ENABLED)) 2737 return -EPERM; 2738 2739 spin_lock_irqsave(&priv_dev->lock, flags); 2740 2741 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2742 2743 if (!value) { 2744 priv_ep->flags &= ~EP_WEDGE; 2745 ret = __cdns3_gadget_ep_clear_halt(priv_ep); 2746 } else { 2747 __cdns3_gadget_ep_set_halt(priv_ep); 2748 } 2749 2750 spin_unlock_irqrestore(&priv_dev->lock, flags); 2751 2752 return ret; 2753 } 2754 2755 extern const struct usb_ep_ops cdns3_gadget_ep0_ops; 2756 2757 static const struct usb_ep_ops cdns3_gadget_ep_ops = { 2758 .enable = cdns3_gadget_ep_enable, 2759 .disable = cdns3_gadget_ep_disable, 2760 .alloc_request = cdns3_gadget_ep_alloc_request, 2761 .free_request = cdns3_gadget_ep_free_request, 2762 .queue = cdns3_gadget_ep_queue, 2763 .dequeue = cdns3_gadget_ep_dequeue, 2764 .set_halt = cdns3_gadget_ep_set_halt, 2765 .set_wedge = cdns3_gadget_ep_set_wedge, 2766 }; 2767 2768 /** 2769 * cdns3_gadget_get_frame Returns number of actual ITP frame 2770 * @gadget: gadget object 2771 * 2772 * Returns number of actual ITP frame 2773 */ 2774 static int cdns3_gadget_get_frame(struct usb_gadget *gadget) 2775 { 2776 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2777 2778 return readl(&priv_dev->regs->usb_itpn); 2779 } 2780 2781 int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev) 2782 { 2783 enum usb_device_speed speed; 2784 2785 speed = cdns3_get_speed(priv_dev); 2786 2787 if (speed >= USB_SPEED_SUPER) 2788 return 0; 2789 2790 /* Start driving resume signaling to indicate remote wakeup. */ 2791 writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf); 2792 2793 return 0; 2794 } 2795 2796 static int cdns3_gadget_wakeup(struct usb_gadget *gadget) 2797 { 2798 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2799 unsigned long flags; 2800 int ret = 0; 2801 2802 spin_lock_irqsave(&priv_dev->lock, flags); 2803 ret = __cdns3_gadget_wakeup(priv_dev); 2804 spin_unlock_irqrestore(&priv_dev->lock, flags); 2805 return ret; 2806 } 2807 2808 static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget, 2809 int is_selfpowered) 2810 { 2811 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2812 unsigned long flags; 2813 2814 spin_lock_irqsave(&priv_dev->lock, flags); 2815 priv_dev->is_selfpowered = !!is_selfpowered; 2816 spin_unlock_irqrestore(&priv_dev->lock, flags); 2817 return 0; 2818 } 2819 2820 static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on) 2821 { 2822 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2823 2824 if (is_on) { 2825 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 2826 } else { 2827 writel(~0, &priv_dev->regs->ep_ists); 2828 writel(~0, &priv_dev->regs->usb_ists); 2829 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2830 } 2831 2832 return 0; 2833 } 2834 2835 static void cdns3_gadget_config(struct cdns3_device *priv_dev) 2836 { 2837 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 2838 u32 reg; 2839 2840 cdns3_ep0_config(priv_dev); 2841 2842 /* enable interrupts for endpoint 0 (in and out) */ 2843 writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, ®s->ep_ien); 2844 2845 /* 2846 * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1 2847 * revision of controller. 2848 */ 2849 if (priv_dev->dev_ver == DEV_VER_TI_V1) { 2850 reg = readl(®s->dbg_link1); 2851 2852 reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK; 2853 reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) | 2854 DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET; 2855 writel(reg, ®s->dbg_link1); 2856 } 2857 2858 /* 2859 * By default some platforms has set protected access to memory. 2860 * This cause problem with cache, so driver restore non-secure 2861 * access to memory. 2862 */ 2863 reg = readl(®s->dma_axi_ctrl); 2864 reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) | 2865 DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE); 2866 writel(reg, ®s->dma_axi_ctrl); 2867 2868 /* enable generic interrupt*/ 2869 writel(USB_IEN_INIT, ®s->usb_ien); 2870 writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); 2871 /* keep Fast Access bit */ 2872 writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr); 2873 2874 cdns3_configure_dmult(priv_dev, NULL); 2875 } 2876 2877 /** 2878 * cdns3_gadget_udc_start Gadget start 2879 * @gadget: gadget object 2880 * @driver: driver which operates on this gadget 2881 * 2882 * Returns 0 on success, error code elsewhere 2883 */ 2884 static int cdns3_gadget_udc_start(struct usb_gadget *gadget, 2885 struct usb_gadget_driver *driver) 2886 { 2887 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2888 unsigned long flags; 2889 enum usb_device_speed max_speed = driver->max_speed; 2890 2891 spin_lock_irqsave(&priv_dev->lock, flags); 2892 priv_dev->gadget_driver = driver; 2893 2894 /* limit speed if necessary */ 2895 max_speed = min(driver->max_speed, gadget->max_speed); 2896 2897 switch (max_speed) { 2898 case USB_SPEED_FULL: 2899 writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); 2900 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2901 break; 2902 case USB_SPEED_HIGH: 2903 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2904 break; 2905 case USB_SPEED_SUPER: 2906 break; 2907 default: 2908 dev_err(priv_dev->dev, 2909 "invalid maximum_speed parameter %d\n", 2910 max_speed); 2911 fallthrough; 2912 case USB_SPEED_UNKNOWN: 2913 /* default to superspeed */ 2914 max_speed = USB_SPEED_SUPER; 2915 break; 2916 } 2917 2918 cdns3_gadget_config(priv_dev); 2919 spin_unlock_irqrestore(&priv_dev->lock, flags); 2920 return 0; 2921 } 2922 2923 /** 2924 * cdns3_gadget_udc_stop Stops gadget 2925 * @gadget: gadget object 2926 * 2927 * Returns 0 2928 */ 2929 static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) 2930 { 2931 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2932 struct cdns3_endpoint *priv_ep; 2933 u32 bEndpointAddress; 2934 struct usb_ep *ep; 2935 int val; 2936 2937 priv_dev->gadget_driver = NULL; 2938 2939 priv_dev->onchip_used_size = 0; 2940 priv_dev->out_mem_is_allocated = 0; 2941 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 2942 2943 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2944 priv_ep = ep_to_cdns3_ep(ep); 2945 bEndpointAddress = priv_ep->num | priv_ep->dir; 2946 cdns3_select_ep(priv_dev, bEndpointAddress); 2947 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2948 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2949 !(val & EP_CMD_EPRST), 1, 100); 2950 2951 priv_ep->flags &= ~EP_CLAIMED; 2952 } 2953 2954 /* disable interrupt for device */ 2955 writel(0, &priv_dev->regs->usb_ien); 2956 writel(0, &priv_dev->regs->usb_pwr); 2957 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2958 2959 return 0; 2960 } 2961 2962 static const struct usb_gadget_ops cdns3_gadget_ops = { 2963 .get_frame = cdns3_gadget_get_frame, 2964 .wakeup = cdns3_gadget_wakeup, 2965 .set_selfpowered = cdns3_gadget_set_selfpowered, 2966 .pullup = cdns3_gadget_pullup, 2967 .udc_start = cdns3_gadget_udc_start, 2968 .udc_stop = cdns3_gadget_udc_stop, 2969 .match_ep = cdns3_gadget_match_ep, 2970 }; 2971 2972 static void cdns3_free_all_eps(struct cdns3_device *priv_dev) 2973 { 2974 int i; 2975 2976 /* ep0 OUT point to ep0 IN. */ 2977 priv_dev->eps[16] = NULL; 2978 2979 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 2980 if (priv_dev->eps[i]) { 2981 cdns3_free_trb_pool(priv_dev->eps[i]); 2982 devm_kfree(priv_dev->dev, priv_dev->eps[i]); 2983 } 2984 } 2985 2986 /** 2987 * cdns3_init_eps Initializes software endpoints of gadget 2988 * @priv_dev: extended gadget object 2989 * 2990 * Returns 0 on success, error code elsewhere 2991 */ 2992 static int cdns3_init_eps(struct cdns3_device *priv_dev) 2993 { 2994 u32 ep_enabled_reg, iso_ep_reg; 2995 struct cdns3_endpoint *priv_ep; 2996 int ep_dir, ep_number; 2997 u32 ep_mask; 2998 int ret = 0; 2999 int i; 3000 3001 /* Read it from USB_CAP3 to USB_CAP5 */ 3002 ep_enabled_reg = readl(&priv_dev->regs->usb_cap3); 3003 iso_ep_reg = readl(&priv_dev->regs->usb_cap4); 3004 3005 dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n"); 3006 3007 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { 3008 ep_dir = i >> 4; /* i div 16 */ 3009 ep_number = i & 0xF; /* i % 16 */ 3010 ep_mask = BIT(i); 3011 3012 if (!(ep_enabled_reg & ep_mask)) 3013 continue; 3014 3015 if (ep_dir && !ep_number) { 3016 priv_dev->eps[i] = priv_dev->eps[0]; 3017 continue; 3018 } 3019 3020 priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep), 3021 GFP_KERNEL); 3022 if (!priv_ep) 3023 goto err; 3024 3025 /* set parent of endpoint object */ 3026 priv_ep->cdns3_dev = priv_dev; 3027 priv_dev->eps[i] = priv_ep; 3028 priv_ep->num = ep_number; 3029 priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT; 3030 3031 if (!ep_number) { 3032 ret = cdns3_init_ep0(priv_dev, priv_ep); 3033 if (ret) { 3034 dev_err(priv_dev->dev, "Failed to init ep0\n"); 3035 goto err; 3036 } 3037 } else { 3038 snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s", 3039 ep_number, !!ep_dir ? "in" : "out"); 3040 priv_ep->endpoint.name = priv_ep->name; 3041 3042 usb_ep_set_maxpacket_limit(&priv_ep->endpoint, 3043 CDNS3_EP_MAX_PACKET_LIMIT); 3044 priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS; 3045 priv_ep->endpoint.ops = &cdns3_gadget_ep_ops; 3046 if (ep_dir) 3047 priv_ep->endpoint.caps.dir_in = 1; 3048 else 3049 priv_ep->endpoint.caps.dir_out = 1; 3050 3051 if (iso_ep_reg & ep_mask) 3052 priv_ep->endpoint.caps.type_iso = 1; 3053 3054 priv_ep->endpoint.caps.type_bulk = 1; 3055 priv_ep->endpoint.caps.type_int = 1; 3056 3057 list_add_tail(&priv_ep->endpoint.ep_list, 3058 &priv_dev->gadget.ep_list); 3059 } 3060 3061 priv_ep->flags = 0; 3062 3063 dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n", 3064 priv_ep->name, 3065 priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "", 3066 priv_ep->endpoint.caps.type_iso ? "ISO" : ""); 3067 3068 INIT_LIST_HEAD(&priv_ep->pending_req_list); 3069 INIT_LIST_HEAD(&priv_ep->deferred_req_list); 3070 INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list); 3071 } 3072 3073 return 0; 3074 err: 3075 cdns3_free_all_eps(priv_dev); 3076 return -ENOMEM; 3077 } 3078 3079 static void cdns3_gadget_release(struct device *dev) 3080 { 3081 struct cdns3_device *priv_dev = container_of(dev, 3082 struct cdns3_device, gadget.dev); 3083 3084 kfree(priv_dev); 3085 } 3086 3087 static void cdns3_gadget_exit(struct cdns *cdns) 3088 { 3089 struct cdns3_device *priv_dev; 3090 3091 priv_dev = cdns->gadget_dev; 3092 3093 3094 pm_runtime_mark_last_busy(cdns->dev); 3095 pm_runtime_put_autosuspend(cdns->dev); 3096 3097 usb_del_gadget(&priv_dev->gadget); 3098 devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); 3099 3100 cdns3_free_all_eps(priv_dev); 3101 3102 while (!list_empty(&priv_dev->aligned_buf_list)) { 3103 struct cdns3_aligned_buf *buf; 3104 3105 buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list); 3106 dma_free_coherent(priv_dev->sysdev, buf->size, 3107 buf->buf, 3108 buf->dma); 3109 3110 list_del(&buf->list); 3111 kfree(buf); 3112 } 3113 3114 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3115 priv_dev->setup_dma); 3116 3117 kfree(priv_dev->zlp_buf); 3118 usb_put_gadget(&priv_dev->gadget); 3119 cdns->gadget_dev = NULL; 3120 cdns_drd_gadget_off(cdns); 3121 } 3122 3123 static int cdns3_gadget_start(struct cdns *cdns) 3124 { 3125 struct cdns3_device *priv_dev; 3126 u32 max_speed; 3127 int ret; 3128 3129 priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL); 3130 if (!priv_dev) 3131 return -ENOMEM; 3132 3133 usb_initialize_gadget(cdns->dev, &priv_dev->gadget, 3134 cdns3_gadget_release); 3135 cdns->gadget_dev = priv_dev; 3136 priv_dev->sysdev = cdns->dev; 3137 priv_dev->dev = cdns->dev; 3138 priv_dev->regs = cdns->dev_regs; 3139 3140 device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size", 3141 &priv_dev->onchip_buffers); 3142 3143 if (priv_dev->onchip_buffers <= 0) { 3144 u32 reg = readl(&priv_dev->regs->usb_cap2); 3145 3146 priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg); 3147 } 3148 3149 if (!priv_dev->onchip_buffers) 3150 priv_dev->onchip_buffers = 256; 3151 3152 max_speed = usb_get_maximum_speed(cdns->dev); 3153 3154 /* Check the maximum_speed parameter */ 3155 switch (max_speed) { 3156 case USB_SPEED_FULL: 3157 case USB_SPEED_HIGH: 3158 case USB_SPEED_SUPER: 3159 break; 3160 default: 3161 dev_err(cdns->dev, "invalid maximum_speed parameter %d\n", 3162 max_speed); 3163 fallthrough; 3164 case USB_SPEED_UNKNOWN: 3165 /* default to superspeed */ 3166 max_speed = USB_SPEED_SUPER; 3167 break; 3168 } 3169 3170 /* fill gadget fields */ 3171 priv_dev->gadget.max_speed = max_speed; 3172 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3173 priv_dev->gadget.ops = &cdns3_gadget_ops; 3174 priv_dev->gadget.name = "usb-ss-gadget"; 3175 priv_dev->gadget.quirk_avoids_skb_reserve = 1; 3176 priv_dev->gadget.irq = cdns->dev_irq; 3177 3178 spin_lock_init(&priv_dev->lock); 3179 INIT_WORK(&priv_dev->pending_status_wq, 3180 cdns3_pending_setup_status_handler); 3181 3182 INIT_WORK(&priv_dev->aligned_buf_wq, 3183 cdns3_free_aligned_request_buf); 3184 3185 /* initialize endpoint container */ 3186 INIT_LIST_HEAD(&priv_dev->gadget.ep_list); 3187 INIT_LIST_HEAD(&priv_dev->aligned_buf_list); 3188 3189 ret = cdns3_init_eps(priv_dev); 3190 if (ret) { 3191 dev_err(priv_dev->dev, "Failed to create endpoints\n"); 3192 goto err1; 3193 } 3194 3195 /* allocate memory for setup packet buffer */ 3196 priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8, 3197 &priv_dev->setup_dma, GFP_DMA); 3198 if (!priv_dev->setup_buf) { 3199 ret = -ENOMEM; 3200 goto err2; 3201 } 3202 3203 priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); 3204 3205 dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", 3206 readl(&priv_dev->regs->usb_cap6)); 3207 dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n", 3208 readl(&priv_dev->regs->usb_cap1)); 3209 dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n", 3210 readl(&priv_dev->regs->usb_cap2)); 3211 3212 priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver); 3213 if (priv_dev->dev_ver >= DEV_VER_V2) 3214 priv_dev->gadget.sg_supported = 1; 3215 3216 priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL); 3217 if (!priv_dev->zlp_buf) { 3218 ret = -ENOMEM; 3219 goto err3; 3220 } 3221 3222 /* add USB gadget device */ 3223 ret = usb_add_gadget(&priv_dev->gadget); 3224 if (ret < 0) { 3225 dev_err(priv_dev->dev, "Failed to add gadget\n"); 3226 goto err4; 3227 } 3228 3229 return 0; 3230 err4: 3231 kfree(priv_dev->zlp_buf); 3232 err3: 3233 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3234 priv_dev->setup_dma); 3235 err2: 3236 cdns3_free_all_eps(priv_dev); 3237 err1: 3238 usb_put_gadget(&priv_dev->gadget); 3239 cdns->gadget_dev = NULL; 3240 return ret; 3241 } 3242 3243 static int __cdns3_gadget_init(struct cdns *cdns) 3244 { 3245 int ret = 0; 3246 3247 /* Ensure 32-bit DMA Mask in case we switched back from Host mode */ 3248 ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32)); 3249 if (ret) { 3250 dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret); 3251 return ret; 3252 } 3253 3254 cdns_drd_gadget_on(cdns); 3255 pm_runtime_get_sync(cdns->dev); 3256 3257 ret = cdns3_gadget_start(cdns); 3258 if (ret) 3259 return ret; 3260 3261 /* 3262 * Because interrupt line can be shared with other components in 3263 * driver it can't use IRQF_ONESHOT flag here. 3264 */ 3265 ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, 3266 cdns3_device_irq_handler, 3267 cdns3_device_thread_irq_handler, 3268 IRQF_SHARED, dev_name(cdns->dev), 3269 cdns->gadget_dev); 3270 3271 if (ret) 3272 goto err0; 3273 3274 return 0; 3275 err0: 3276 cdns3_gadget_exit(cdns); 3277 return ret; 3278 } 3279 3280 static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup) 3281 __must_hold(&cdns->lock) 3282 { 3283 struct cdns3_device *priv_dev = cdns->gadget_dev; 3284 3285 spin_unlock(&cdns->lock); 3286 cdns3_disconnect_gadget(priv_dev); 3287 spin_lock(&cdns->lock); 3288 3289 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3290 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 3291 cdns3_hw_reset_eps_config(priv_dev); 3292 3293 /* disable interrupt for device */ 3294 writel(0, &priv_dev->regs->usb_ien); 3295 3296 return 0; 3297 } 3298 3299 static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated) 3300 { 3301 struct cdns3_device *priv_dev = cdns->gadget_dev; 3302 3303 if (!priv_dev->gadget_driver) 3304 return 0; 3305 3306 cdns3_gadget_config(priv_dev); 3307 3308 return 0; 3309 } 3310 3311 /** 3312 * cdns3_gadget_init - initialize device structure 3313 * 3314 * @cdns: cdns instance 3315 * 3316 * This function initializes the gadget. 3317 */ 3318 int cdns3_gadget_init(struct cdns *cdns) 3319 { 3320 struct cdns_role_driver *rdrv; 3321 3322 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 3323 if (!rdrv) 3324 return -ENOMEM; 3325 3326 rdrv->start = __cdns3_gadget_init; 3327 rdrv->stop = cdns3_gadget_exit; 3328 rdrv->suspend = cdns3_gadget_suspend; 3329 rdrv->resume = cdns3_gadget_resume; 3330 rdrv->state = CDNS_ROLE_STATE_INACTIVE; 3331 rdrv->name = "gadget"; 3332 cdns->roles[USB_ROLE_DEVICE] = rdrv; 3333 3334 return 0; 3335 } 3336