1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NVIDIA Tegra XUSB device mode controller 4 * 5 * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved. 6 * Copyright (c) 2015, Google Inc. 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/completion.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dmapool.h> 14 #include <linux/interrupt.h> 15 #include <linux/iopoll.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/of.h> 19 #include <linux/of_device.h> 20 #include <linux/phy/phy.h> 21 #include <linux/phy/tegra/xusb.h> 22 #include <linux/pm_domain.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/reset.h> 27 #include <linux/usb/ch9.h> 28 #include <linux/usb/gadget.h> 29 #include <linux/usb/role.h> 30 #include <linux/workqueue.h> 31 32 /* XUSB_DEV registers */ 33 #define SPARAM 0x000 34 #define SPARAM_ERSTMAX_MASK GENMASK(20, 16) 35 #define SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK) 36 #define DB 0x004 37 #define DB_TARGET_MASK GENMASK(15, 8) 38 #define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK) 39 #define DB_STREAMID_MASK GENMASK(31, 16) 40 #define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK) 41 #define ERSTSZ 0x008 42 #define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16) 43 #define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0) 44 #define ERSTXBALO(x) (0x010 + 8 * (x)) 45 #define ERSTXBAHI(x) (0x014 + 8 * (x)) 46 #define ERDPLO 0x020 47 #define ERDPLO_EHB BIT(3) 48 #define ERDPHI 0x024 49 #define EREPLO 0x028 50 #define EREPLO_ECS BIT(0) 51 #define EREPLO_SEGI BIT(1) 52 #define EREPHI 0x02c 53 #define CTRL 0x030 54 #define CTRL_RUN BIT(0) 55 #define CTRL_LSE BIT(1) 56 #define CTRL_IE BIT(4) 57 #define CTRL_SMI_EVT BIT(5) 58 #define CTRL_SMI_DSE BIT(6) 59 #define CTRL_EWE BIT(7) 60 #define CTRL_DEVADDR_MASK GENMASK(30, 24) 61 #define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK) 62 #define CTRL_ENABLE BIT(31) 63 #define ST 0x034 64 #define ST_RC BIT(0) 65 #define ST_IP BIT(4) 66 #define RT_IMOD 0x038 67 #define RT_IMOD_IMODI_MASK GENMASK(15, 0) 68 #define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK) 69 #define RT_IMOD_IMODC_MASK GENMASK(31, 16) 70 #define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK) 71 #define PORTSC 0x03c 72 #define PORTSC_CCS BIT(0) 73 #define PORTSC_PED BIT(1) 74 #define PORTSC_PR BIT(4) 75 #define PORTSC_PLS_SHIFT 5 76 #define PORTSC_PLS_MASK GENMASK(8, 5) 77 #define PORTSC_PLS_U0 0x0 78 #define PORTSC_PLS_U2 0x2 79 #define PORTSC_PLS_U3 0x3 80 #define PORTSC_PLS_DISABLED 0x4 81 #define PORTSC_PLS_RXDETECT 0x5 82 #define PORTSC_PLS_INACTIVE 0x6 83 #define PORTSC_PLS_RESUME 0xf 84 #define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK) 85 #define PORTSC_PS_SHIFT 10 86 #define PORTSC_PS_MASK GENMASK(13, 10) 87 #define PORTSC_PS_UNDEFINED 0x0 88 #define PORTSC_PS_FS 0x1 89 #define PORTSC_PS_LS 0x2 90 #define PORTSC_PS_HS 0x3 91 #define PORTSC_PS_SS 0x4 92 #define PORTSC_LWS BIT(16) 93 #define PORTSC_CSC BIT(17) 94 #define PORTSC_WRC BIT(19) 95 #define PORTSC_PRC BIT(21) 96 #define PORTSC_PLC BIT(22) 97 #define PORTSC_CEC BIT(23) 98 #define PORTSC_WPR BIT(30) 99 #define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \ 100 PORTSC_PLC | PORTSC_CEC) 101 #define ECPLO 0x040 102 #define ECPHI 0x044 103 #define MFINDEX 0x048 104 #define MFINDEX_FRAME_SHIFT 3 105 #define MFINDEX_FRAME_MASK GENMASK(13, 3) 106 #define PORTPM 0x04c 107 #define PORTPM_L1S_MASK GENMASK(1, 0) 108 #define PORTPM_L1S_DROP 0x0 109 #define PORTPM_L1S_ACCEPT 0x1 110 #define PORTPM_L1S_NYET 0x2 111 #define PORTPM_L1S_STALL 0x3 112 #define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK) 113 #define PORTPM_RWE BIT(3) 114 #define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8) 115 #define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16) 116 #define PORTPM_FLA BIT(24) 117 #define PORTPM_VBA BIT(25) 118 #define PORTPM_WOC BIT(26) 119 #define PORTPM_WOD BIT(27) 120 #define PORTPM_U1E BIT(28) 121 #define PORTPM_U2E BIT(29) 122 #define PORTPM_FRWE BIT(30) 123 #define PORTPM_PNG_CYA BIT(31) 124 #define EP_HALT 0x050 125 #define EP_PAUSE 0x054 126 #define EP_RELOAD 0x058 127 #define EP_STCHG 0x05c 128 #define DEVNOTIF_LO 0x064 129 #define DEVNOTIF_LO_TRIG BIT(0) 130 #define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4) 131 #define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK) 132 #define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1 133 #define DEVNOTIF_HI 0x068 134 #define PORTHALT 0x06c 135 #define PORTHALT_HALT_LTSSM BIT(0) 136 #define PORTHALT_HALT_REJECT BIT(1) 137 #define PORTHALT_STCHG_REQ BIT(20) 138 #define PORTHALT_STCHG_INTR_EN BIT(24) 139 #define PORT_TM 0x070 140 #define EP_THREAD_ACTIVE 0x074 141 #define EP_STOPPED 0x078 142 #define HSFSPI_COUNT0 0x100 143 #define HSFSPI_COUNT13 0x134 144 #define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0) 145 #define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \ 146 HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK) 147 #define BLCG 0x840 148 #define SSPX_CORE_CNT0 0x610 149 #define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0) 150 #define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK) 151 #define SSPX_CORE_CNT30 0x688 152 #define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0) 153 #define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \ 154 SSPX_CORE_CNT30_LMPITP_TIMER_MASK) 155 #define SSPX_CORE_CNT32 0x690 156 #define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0) 157 #define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \ 158 SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK) 159 #define SSPX_CORE_PADCTL4 0x750 160 #define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0) 161 #define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \ 162 SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK) 163 #define BLCG_DFPCI BIT(0) 164 #define BLCG_UFPCI BIT(1) 165 #define BLCG_FE BIT(2) 166 #define BLCG_COREPLL_PWRDN BIT(8) 167 #define BLCG_IOPLL_0_PWRDN BIT(9) 168 #define BLCG_IOPLL_1_PWRDN BIT(10) 169 #define BLCG_IOPLL_2_PWRDN BIT(11) 170 #define BLCG_ALL 0x1ff 171 #define CFG_DEV_SSPI_XFER 0x858 172 #define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0) 173 #define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \ 174 CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK) 175 #define CFG_DEV_FE 0x85c 176 #define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0) 177 #define CFG_DEV_FE_PORTREGSEL_SS_PI 1 178 #define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2 179 #define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK) 180 #define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29) 181 182 /* FPCI registers */ 183 #define XUSB_DEV_CFG_1 0x004 184 #define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0) 185 #define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1) 186 #define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2) 187 #define XUSB_DEV_CFG_4 0x010 188 #define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15) 189 #define XUSB_DEV_CFG_5 0x014 190 191 /* IPFS registers */ 192 #define XUSB_DEV_CONFIGURATION_0 0x180 193 #define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0) 194 #define XUSB_DEV_INTR_MASK_0 0x188 195 #define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16) 196 197 struct tegra_xudc_ep_context { 198 __le32 info0; 199 __le32 info1; 200 __le32 deq_lo; 201 __le32 deq_hi; 202 __le32 tx_info; 203 __le32 rsvd[11]; 204 }; 205 206 #define EP_STATE_DISABLED 0 207 #define EP_STATE_RUNNING 1 208 #define EP_STATE_HALTED 2 209 #define EP_STATE_STOPPED 3 210 #define EP_STATE_ERROR 4 211 212 #define EP_TYPE_INVALID 0 213 #define EP_TYPE_ISOCH_OUT 1 214 #define EP_TYPE_BULK_OUT 2 215 #define EP_TYPE_INTERRUPT_OUT 3 216 #define EP_TYPE_CONTROL 4 217 #define EP_TYPE_ISCOH_IN 5 218 #define EP_TYPE_BULK_IN 6 219 #define EP_TYPE_INTERRUPT_IN 7 220 221 #define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \ 222 static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \ 223 { \ 224 return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \ 225 } \ 226 static inline void \ 227 ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \ 228 { \ 229 u32 tmp; \ 230 \ 231 tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \ 232 tmp |= (val & (mask)) << (shift); \ 233 ctx->member = cpu_to_le32(tmp); \ 234 } 235 236 BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7) 237 BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3) 238 BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f) 239 BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1) 240 BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff) 241 BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3) 242 BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7) 243 BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1) 244 BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff) 245 BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff) 246 BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1) 247 BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff) 248 BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff) 249 BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff) 250 BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff) 251 BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff) 252 BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff) 253 BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1) 254 BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3) 255 BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff) 256 BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f) 257 BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f) 258 259 static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx) 260 { 261 return ((u64)ep_ctx_read_deq_hi(ctx) << 32) | 262 (ep_ctx_read_deq_lo(ctx) << 4); 263 } 264 265 static inline void 266 ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr) 267 { 268 ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4); 269 ep_ctx_write_deq_hi(ctx, upper_32_bits(addr)); 270 } 271 272 struct tegra_xudc_trb { 273 __le32 data_lo; 274 __le32 data_hi; 275 __le32 status; 276 __le32 control; 277 }; 278 279 #define TRB_TYPE_RSVD 0 280 #define TRB_TYPE_NORMAL 1 281 #define TRB_TYPE_SETUP_STAGE 2 282 #define TRB_TYPE_DATA_STAGE 3 283 #define TRB_TYPE_STATUS_STAGE 4 284 #define TRB_TYPE_ISOCH 5 285 #define TRB_TYPE_LINK 6 286 #define TRB_TYPE_TRANSFER_EVENT 32 287 #define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34 288 #define TRB_TYPE_STREAM 48 289 #define TRB_TYPE_SETUP_PACKET_EVENT 63 290 291 #define TRB_CMPL_CODE_INVALID 0 292 #define TRB_CMPL_CODE_SUCCESS 1 293 #define TRB_CMPL_CODE_DATA_BUFFER_ERR 2 294 #define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3 295 #define TRB_CMPL_CODE_USB_TRANS_ERR 4 296 #define TRB_CMPL_CODE_TRB_ERR 5 297 #define TRB_CMPL_CODE_STALL 6 298 #define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10 299 #define TRB_CMPL_CODE_SHORT_PACKET 13 300 #define TRB_CMPL_CODE_RING_UNDERRUN 14 301 #define TRB_CMPL_CODE_RING_OVERRUN 15 302 #define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21 303 #define TRB_CMPL_CODE_STOPPED 26 304 #define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31 305 #define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219 306 #define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220 307 #define TRB_CMPL_CODE_HOST_REJECTED 221 308 #define TRB_CMPL_CODE_CTRL_DIR_ERR 222 309 #define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223 310 311 #define BUILD_TRB_RW(name, member, shift, mask) \ 312 static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \ 313 { \ 314 return (le32_to_cpu(trb->member) >> (shift)) & (mask); \ 315 } \ 316 static inline void \ 317 trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \ 318 { \ 319 u32 tmp; \ 320 \ 321 tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \ 322 tmp |= (val & (mask)) << (shift); \ 323 trb->member = cpu_to_le32(tmp); \ 324 } 325 326 BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff) 327 BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff) 328 BUILD_TRB_RW(seq_num, status, 0, 0xffff) 329 BUILD_TRB_RW(transfer_len, status, 0, 0xffffff) 330 BUILD_TRB_RW(td_size, status, 17, 0x1f) 331 BUILD_TRB_RW(cmpl_code, status, 24, 0xff) 332 BUILD_TRB_RW(cycle, control, 0, 0x1) 333 BUILD_TRB_RW(toggle_cycle, control, 1, 0x1) 334 BUILD_TRB_RW(isp, control, 2, 0x1) 335 BUILD_TRB_RW(chain, control, 4, 0x1) 336 BUILD_TRB_RW(ioc, control, 5, 0x1) 337 BUILD_TRB_RW(type, control, 10, 0x3f) 338 BUILD_TRB_RW(stream_id, control, 16, 0xffff) 339 BUILD_TRB_RW(endpoint_id, control, 16, 0x1f) 340 BUILD_TRB_RW(tlbpc, control, 16, 0xf) 341 BUILD_TRB_RW(data_stage_dir, control, 16, 0x1) 342 BUILD_TRB_RW(frame_id, control, 20, 0x7ff) 343 BUILD_TRB_RW(sia, control, 31, 0x1) 344 345 static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb) 346 { 347 return ((u64)trb_read_data_hi(trb) << 32) | 348 trb_read_data_lo(trb); 349 } 350 351 static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr) 352 { 353 trb_write_data_lo(trb, lower_32_bits(addr)); 354 trb_write_data_hi(trb, upper_32_bits(addr)); 355 } 356 357 struct tegra_xudc_request { 358 struct usb_request usb_req; 359 360 size_t buf_queued; 361 unsigned int trbs_queued; 362 unsigned int trbs_needed; 363 bool need_zlp; 364 365 struct tegra_xudc_trb *first_trb; 366 struct tegra_xudc_trb *last_trb; 367 368 struct list_head list; 369 }; 370 371 struct tegra_xudc_ep { 372 struct tegra_xudc *xudc; 373 struct usb_ep usb_ep; 374 unsigned int index; 375 char name[8]; 376 377 struct tegra_xudc_ep_context *context; 378 379 #define XUDC_TRANSFER_RING_SIZE 64 380 struct tegra_xudc_trb *transfer_ring; 381 dma_addr_t transfer_ring_phys; 382 383 unsigned int enq_ptr; 384 unsigned int deq_ptr; 385 bool pcs; 386 bool ring_full; 387 bool stream_rejected; 388 389 struct list_head queue; 390 const struct usb_endpoint_descriptor *desc; 391 const struct usb_ss_ep_comp_descriptor *comp_desc; 392 }; 393 394 struct tegra_xudc_sel_timing { 395 __u8 u1sel; 396 __u8 u1pel; 397 __le16 u2sel; 398 __le16 u2pel; 399 }; 400 401 enum tegra_xudc_setup_state { 402 WAIT_FOR_SETUP, 403 DATA_STAGE_XFER, 404 DATA_STAGE_RECV, 405 STATUS_STAGE_XFER, 406 STATUS_STAGE_RECV, 407 }; 408 409 struct tegra_xudc_setup_packet { 410 struct usb_ctrlrequest ctrl_req; 411 unsigned int seq_num; 412 }; 413 414 struct tegra_xudc_save_regs { 415 u32 ctrl; 416 u32 portpm; 417 }; 418 419 struct tegra_xudc { 420 struct device *dev; 421 const struct tegra_xudc_soc *soc; 422 struct tegra_xusb_padctl *padctl; 423 424 spinlock_t lock; 425 426 struct usb_gadget gadget; 427 struct usb_gadget_driver *driver; 428 429 #define XUDC_NR_EVENT_RINGS 2 430 #define XUDC_EVENT_RING_SIZE 4096 431 struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS]; 432 dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS]; 433 unsigned int event_ring_index; 434 unsigned int event_ring_deq_ptr; 435 bool ccs; 436 437 #define XUDC_NR_EPS 32 438 struct tegra_xudc_ep ep[XUDC_NR_EPS]; 439 struct tegra_xudc_ep_context *ep_context; 440 dma_addr_t ep_context_phys; 441 442 struct device *genpd_dev_device; 443 struct device *genpd_dev_ss; 444 struct device_link *genpd_dl_device; 445 struct device_link *genpd_dl_ss; 446 447 struct dma_pool *transfer_ring_pool; 448 449 bool queued_setup_packet; 450 struct tegra_xudc_setup_packet setup_packet; 451 enum tegra_xudc_setup_state setup_state; 452 u16 setup_seq_num; 453 454 u16 dev_addr; 455 u16 isoch_delay; 456 struct tegra_xudc_sel_timing sel_timing; 457 u8 test_mode_pattern; 458 u16 status_buf; 459 struct tegra_xudc_request *ep0_req; 460 461 bool pullup; 462 463 unsigned int nr_enabled_eps; 464 unsigned int nr_isoch_eps; 465 466 unsigned int device_state; 467 unsigned int resume_state; 468 469 int irq; 470 471 void __iomem *base; 472 resource_size_t phys_base; 473 void __iomem *ipfs; 474 void __iomem *fpci; 475 476 struct regulator_bulk_data *supplies; 477 478 struct clk_bulk_data *clks; 479 480 enum usb_role device_mode; 481 struct usb_role_switch *usb_role_sw; 482 struct work_struct usb_role_sw_work; 483 484 struct phy *usb3_phy; 485 struct phy *utmi_phy; 486 487 struct tegra_xudc_save_regs saved_regs; 488 bool suspended; 489 bool powergated; 490 491 struct completion disconnect_complete; 492 493 bool selfpowered; 494 495 #define TOGGLE_VBUS_WAIT_MS 100 496 struct delayed_work plc_reset_work; 497 bool wait_csc; 498 499 struct delayed_work port_reset_war_work; 500 bool wait_for_sec_prc; 501 }; 502 503 #define XUDC_TRB_MAX_BUFFER_SIZE 65536 504 #define XUDC_MAX_ISOCH_EPS 4 505 #define XUDC_INTERRUPT_MODERATION_US 0 506 507 static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = { 508 .bLength = USB_DT_ENDPOINT_SIZE, 509 .bDescriptorType = USB_DT_ENDPOINT, 510 .bEndpointAddress = 0, 511 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 512 .wMaxPacketSize = cpu_to_le16(64), 513 }; 514 515 struct tegra_xudc_soc { 516 const char * const *supply_names; 517 unsigned int num_supplies; 518 const char * const *clock_names; 519 unsigned int num_clks; 520 bool u1_enable; 521 bool u2_enable; 522 bool lpm_enable; 523 bool invalid_seq_num; 524 bool pls_quirk; 525 bool port_reset_quirk; 526 bool has_ipfs; 527 }; 528 529 static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset) 530 { 531 return readl(xudc->fpci + offset); 532 } 533 534 static inline void fpci_writel(struct tegra_xudc *xudc, u32 val, 535 unsigned int offset) 536 { 537 writel(val, xudc->fpci + offset); 538 } 539 540 static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset) 541 { 542 return readl(xudc->ipfs + offset); 543 } 544 545 static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val, 546 unsigned int offset) 547 { 548 writel(val, xudc->ipfs + offset); 549 } 550 551 static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset) 552 { 553 return readl(xudc->base + offset); 554 } 555 556 static inline void xudc_writel(struct tegra_xudc *xudc, u32 val, 557 unsigned int offset) 558 { 559 writel(val, xudc->base + offset); 560 } 561 562 static inline int xudc_readl_poll(struct tegra_xudc *xudc, 563 unsigned int offset, u32 mask, u32 val) 564 { 565 u32 regval; 566 567 return readl_poll_timeout_atomic(xudc->base + offset, regval, 568 (regval & mask) == val, 1, 100); 569 } 570 571 static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget) 572 { 573 return container_of(gadget, struct tegra_xudc, gadget); 574 } 575 576 static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep) 577 { 578 return container_of(ep, struct tegra_xudc_ep, usb_ep); 579 } 580 581 static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req) 582 { 583 return container_of(req, struct tegra_xudc_request, usb_req); 584 } 585 586 static inline void dump_trb(struct tegra_xudc *xudc, const char *type, 587 struct tegra_xudc_trb *trb) 588 { 589 dev_dbg(xudc->dev, 590 "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n", 591 type, trb, trb->data_lo, trb->data_hi, trb->status, 592 trb->control); 593 } 594 595 static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc) 596 { 597 int err; 598 599 pm_runtime_get_sync(xudc->dev); 600 601 err = phy_power_on(xudc->utmi_phy); 602 if (err < 0) 603 dev_err(xudc->dev, "utmi power on failed %d\n", err); 604 605 err = phy_power_on(xudc->usb3_phy); 606 if (err < 0) 607 dev_err(xudc->dev, "usb3 phy power on failed %d\n", err); 608 609 dev_dbg(xudc->dev, "device mode on\n"); 610 611 tegra_xusb_padctl_set_vbus_override(xudc->padctl, true); 612 613 xudc->device_mode = USB_ROLE_DEVICE; 614 } 615 616 static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc) 617 { 618 bool connected = false; 619 u32 pls, val; 620 int err; 621 622 dev_dbg(xudc->dev, "device mode off\n"); 623 624 connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS); 625 626 reinit_completion(&xudc->disconnect_complete); 627 628 tegra_xusb_padctl_set_vbus_override(xudc->padctl, false); 629 630 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >> 631 PORTSC_PLS_SHIFT; 632 633 /* Direct link to U0 if disconnected in RESUME or U2. */ 634 if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER && 635 (pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) { 636 val = xudc_readl(xudc, PORTPM); 637 val |= PORTPM_FRWE; 638 xudc_writel(xudc, val, PORTPM); 639 640 val = xudc_readl(xudc, PORTSC); 641 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK); 642 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0); 643 xudc_writel(xudc, val, PORTSC); 644 } 645 646 xudc->device_mode = USB_ROLE_NONE; 647 648 /* Wait for disconnect event. */ 649 if (connected) 650 wait_for_completion(&xudc->disconnect_complete); 651 652 /* Make sure interrupt handler has completed before powergating. */ 653 synchronize_irq(xudc->irq); 654 655 err = phy_power_off(xudc->utmi_phy); 656 if (err < 0) 657 dev_err(xudc->dev, "utmi_phy power off failed %d\n", err); 658 659 err = phy_power_off(xudc->usb3_phy); 660 if (err < 0) 661 dev_err(xudc->dev, "usb3_phy power off failed %d\n", err); 662 663 pm_runtime_put(xudc->dev); 664 } 665 666 static void tegra_xudc_usb_role_sw_work(struct work_struct *work) 667 { 668 struct tegra_xudc *xudc = container_of(work, struct tegra_xudc, 669 usb_role_sw_work); 670 671 if (!xudc->usb_role_sw || 672 usb_role_switch_get_role(xudc->usb_role_sw) == USB_ROLE_DEVICE) 673 tegra_xudc_device_mode_on(xudc); 674 else 675 tegra_xudc_device_mode_off(xudc); 676 677 } 678 679 static int tegra_xudc_usb_role_sw_set(struct device *dev, enum usb_role role) 680 { 681 struct tegra_xudc *xudc = dev_get_drvdata(dev); 682 unsigned long flags; 683 684 dev_dbg(dev, "%s role is %d\n", __func__, role); 685 686 spin_lock_irqsave(&xudc->lock, flags); 687 688 if (!xudc->suspended) 689 schedule_work(&xudc->usb_role_sw_work); 690 691 spin_unlock_irqrestore(&xudc->lock, flags); 692 693 return 0; 694 } 695 696 static void tegra_xudc_plc_reset_work(struct work_struct *work) 697 { 698 struct delayed_work *dwork = to_delayed_work(work); 699 struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc, 700 plc_reset_work); 701 unsigned long flags; 702 703 spin_lock_irqsave(&xudc->lock, flags); 704 705 if (xudc->wait_csc) { 706 u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >> 707 PORTSC_PLS_SHIFT; 708 709 if (pls == PORTSC_PLS_INACTIVE) { 710 dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n"); 711 tegra_xusb_padctl_set_vbus_override(xudc->padctl, 712 false); 713 tegra_xusb_padctl_set_vbus_override(xudc->padctl, true); 714 xudc->wait_csc = false; 715 } 716 } 717 718 spin_unlock_irqrestore(&xudc->lock, flags); 719 } 720 721 static void tegra_xudc_port_reset_war_work(struct work_struct *work) 722 { 723 struct delayed_work *dwork = to_delayed_work(work); 724 struct tegra_xudc *xudc = 725 container_of(dwork, struct tegra_xudc, port_reset_war_work); 726 unsigned long flags; 727 u32 pls; 728 int ret; 729 730 spin_lock_irqsave(&xudc->lock, flags); 731 732 if ((xudc->device_mode == USB_ROLE_DEVICE) 733 && xudc->wait_for_sec_prc) { 734 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >> 735 PORTSC_PLS_SHIFT; 736 dev_dbg(xudc->dev, "pls = %x\n", pls); 737 738 if (pls == PORTSC_PLS_DISABLED) { 739 dev_dbg(xudc->dev, "toggle vbus\n"); 740 /* PRC doesn't complete in 100ms, toggle the vbus */ 741 ret = tegra_phy_xusb_utmi_port_reset(xudc->utmi_phy); 742 if (ret == 1) 743 xudc->wait_for_sec_prc = 0; 744 } 745 } 746 747 spin_unlock_irqrestore(&xudc->lock, flags); 748 } 749 750 static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep, 751 struct tegra_xudc_trb *trb) 752 { 753 unsigned int index; 754 755 index = trb - ep->transfer_ring; 756 757 if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE)) 758 return 0; 759 760 return (ep->transfer_ring_phys + index * sizeof(*trb)); 761 } 762 763 static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep, 764 dma_addr_t addr) 765 { 766 struct tegra_xudc_trb *trb; 767 unsigned int index; 768 769 index = (addr - ep->transfer_ring_phys) / sizeof(*trb); 770 771 if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE)) 772 return NULL; 773 774 trb = &ep->transfer_ring[index]; 775 776 return trb; 777 } 778 779 static void ep_reload(struct tegra_xudc *xudc, unsigned int ep) 780 { 781 xudc_writel(xudc, BIT(ep), EP_RELOAD); 782 xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0); 783 } 784 785 static void ep_pause(struct tegra_xudc *xudc, unsigned int ep) 786 { 787 u32 val; 788 789 val = xudc_readl(xudc, EP_PAUSE); 790 if (val & BIT(ep)) 791 return; 792 val |= BIT(ep); 793 794 xudc_writel(xudc, val, EP_PAUSE); 795 796 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep)); 797 798 xudc_writel(xudc, BIT(ep), EP_STCHG); 799 } 800 801 static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep) 802 { 803 u32 val; 804 805 val = xudc_readl(xudc, EP_PAUSE); 806 if (!(val & BIT(ep))) 807 return; 808 val &= ~BIT(ep); 809 810 xudc_writel(xudc, val, EP_PAUSE); 811 812 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep)); 813 814 xudc_writel(xudc, BIT(ep), EP_STCHG); 815 } 816 817 static void ep_unpause_all(struct tegra_xudc *xudc) 818 { 819 u32 val; 820 821 val = xudc_readl(xudc, EP_PAUSE); 822 823 xudc_writel(xudc, 0, EP_PAUSE); 824 825 xudc_readl_poll(xudc, EP_STCHG, val, val); 826 827 xudc_writel(xudc, val, EP_STCHG); 828 } 829 830 static void ep_halt(struct tegra_xudc *xudc, unsigned int ep) 831 { 832 u32 val; 833 834 val = xudc_readl(xudc, EP_HALT); 835 if (val & BIT(ep)) 836 return; 837 val |= BIT(ep); 838 xudc_writel(xudc, val, EP_HALT); 839 840 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep)); 841 842 xudc_writel(xudc, BIT(ep), EP_STCHG); 843 } 844 845 static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep) 846 { 847 u32 val; 848 849 val = xudc_readl(xudc, EP_HALT); 850 if (!(val & BIT(ep))) 851 return; 852 val &= ~BIT(ep); 853 xudc_writel(xudc, val, EP_HALT); 854 855 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep)); 856 857 xudc_writel(xudc, BIT(ep), EP_STCHG); 858 } 859 860 static void ep_unhalt_all(struct tegra_xudc *xudc) 861 { 862 u32 val; 863 864 val = xudc_readl(xudc, EP_HALT); 865 if (!val) 866 return; 867 xudc_writel(xudc, 0, EP_HALT); 868 869 xudc_readl_poll(xudc, EP_STCHG, val, val); 870 871 xudc_writel(xudc, val, EP_STCHG); 872 } 873 874 static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep) 875 { 876 xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep)); 877 xudc_writel(xudc, BIT(ep), EP_STOPPED); 878 } 879 880 static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep) 881 { 882 xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0); 883 } 884 885 static void tegra_xudc_req_done(struct tegra_xudc_ep *ep, 886 struct tegra_xudc_request *req, int status) 887 { 888 struct tegra_xudc *xudc = ep->xudc; 889 890 dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n", 891 req, ep->index, status); 892 893 if (likely(req->usb_req.status == -EINPROGRESS)) 894 req->usb_req.status = status; 895 896 list_del_init(&req->list); 897 898 if (usb_endpoint_xfer_control(ep->desc)) { 899 usb_gadget_unmap_request(&xudc->gadget, &req->usb_req, 900 (xudc->setup_state == 901 DATA_STAGE_XFER)); 902 } else { 903 usb_gadget_unmap_request(&xudc->gadget, &req->usb_req, 904 usb_endpoint_dir_in(ep->desc)); 905 } 906 907 spin_unlock(&xudc->lock); 908 usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req); 909 spin_lock(&xudc->lock); 910 } 911 912 static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status) 913 { 914 struct tegra_xudc_request *req; 915 916 while (!list_empty(&ep->queue)) { 917 req = list_first_entry(&ep->queue, struct tegra_xudc_request, 918 list); 919 tegra_xudc_req_done(ep, req, status); 920 } 921 } 922 923 static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep) 924 { 925 if (ep->ring_full) 926 return 0; 927 928 if (ep->deq_ptr > ep->enq_ptr) 929 return ep->deq_ptr - ep->enq_ptr - 1; 930 931 return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2; 932 } 933 934 static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep, 935 struct tegra_xudc_request *req, 936 struct tegra_xudc_trb *trb, 937 bool ioc) 938 { 939 struct tegra_xudc *xudc = ep->xudc; 940 dma_addr_t buf_addr; 941 size_t len; 942 943 len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length - 944 req->buf_queued); 945 if (len > 0) 946 buf_addr = req->usb_req.dma + req->buf_queued; 947 else 948 buf_addr = 0; 949 950 trb_write_data_ptr(trb, buf_addr); 951 952 trb_write_transfer_len(trb, len); 953 trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1); 954 955 if (req->trbs_queued == req->trbs_needed - 1 || 956 (req->need_zlp && req->trbs_queued == req->trbs_needed - 2)) 957 trb_write_chain(trb, 0); 958 else 959 trb_write_chain(trb, 1); 960 961 trb_write_ioc(trb, ioc); 962 963 if (usb_endpoint_dir_out(ep->desc) || 964 (usb_endpoint_xfer_control(ep->desc) && 965 (xudc->setup_state == DATA_STAGE_RECV))) 966 trb_write_isp(trb, 1); 967 else 968 trb_write_isp(trb, 0); 969 970 if (usb_endpoint_xfer_control(ep->desc)) { 971 if (xudc->setup_state == DATA_STAGE_XFER || 972 xudc->setup_state == DATA_STAGE_RECV) 973 trb_write_type(trb, TRB_TYPE_DATA_STAGE); 974 else 975 trb_write_type(trb, TRB_TYPE_STATUS_STAGE); 976 977 if (xudc->setup_state == DATA_STAGE_XFER || 978 xudc->setup_state == STATUS_STAGE_XFER) 979 trb_write_data_stage_dir(trb, 1); 980 else 981 trb_write_data_stage_dir(trb, 0); 982 } else if (usb_endpoint_xfer_isoc(ep->desc)) { 983 trb_write_type(trb, TRB_TYPE_ISOCH); 984 trb_write_sia(trb, 1); 985 trb_write_frame_id(trb, 0); 986 trb_write_tlbpc(trb, 0); 987 } else if (usb_ss_max_streams(ep->comp_desc)) { 988 trb_write_type(trb, TRB_TYPE_STREAM); 989 trb_write_stream_id(trb, req->usb_req.stream_id); 990 } else { 991 trb_write_type(trb, TRB_TYPE_NORMAL); 992 trb_write_stream_id(trb, 0); 993 } 994 995 trb_write_cycle(trb, ep->pcs); 996 997 req->trbs_queued++; 998 req->buf_queued += len; 999 1000 dump_trb(xudc, "TRANSFER", trb); 1001 } 1002 1003 static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep, 1004 struct tegra_xudc_request *req) 1005 { 1006 unsigned int i, count, available; 1007 bool wait_td = false; 1008 1009 available = ep_available_trbs(ep); 1010 count = req->trbs_needed - req->trbs_queued; 1011 if (available < count) { 1012 count = available; 1013 ep->ring_full = true; 1014 } 1015 1016 /* 1017 * To generate zero-length packet on USB bus, SW needs schedule a 1018 * standalone zero-length TD. According to HW's behavior, SW needs 1019 * to schedule TDs in different ways for different endpoint types. 1020 * 1021 * For control endpoint: 1022 * - Data stage TD (IOC = 1, CH = 0) 1023 * - Ring doorbell and wait transfer event 1024 * - Data stage TD for ZLP (IOC = 1, CH = 0) 1025 * - Ring doorbell 1026 * 1027 * For bulk and interrupt endpoints: 1028 * - Normal transfer TD (IOC = 0, CH = 0) 1029 * - Normal transfer TD for ZLP (IOC = 1, CH = 0) 1030 * - Ring doorbell 1031 */ 1032 1033 if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1) 1034 wait_td = true; 1035 1036 if (!req->first_trb) 1037 req->first_trb = &ep->transfer_ring[ep->enq_ptr]; 1038 1039 for (i = 0; i < count; i++) { 1040 struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr]; 1041 bool ioc = false; 1042 1043 if ((i == count - 1) || (wait_td && i == count - 2)) 1044 ioc = true; 1045 1046 tegra_xudc_queue_one_trb(ep, req, trb, ioc); 1047 req->last_trb = trb; 1048 1049 ep->enq_ptr++; 1050 if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) { 1051 trb = &ep->transfer_ring[ep->enq_ptr]; 1052 trb_write_cycle(trb, ep->pcs); 1053 ep->pcs = !ep->pcs; 1054 ep->enq_ptr = 0; 1055 } 1056 1057 if (ioc) 1058 break; 1059 } 1060 1061 return count; 1062 } 1063 1064 static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep) 1065 { 1066 struct tegra_xudc *xudc = ep->xudc; 1067 u32 val; 1068 1069 if (list_empty(&ep->queue)) 1070 return; 1071 1072 val = DB_TARGET(ep->index); 1073 if (usb_endpoint_xfer_control(ep->desc)) { 1074 val |= DB_STREAMID(xudc->setup_seq_num); 1075 } else if (usb_ss_max_streams(ep->comp_desc) > 0) { 1076 struct tegra_xudc_request *req; 1077 1078 /* Don't ring doorbell if the stream has been rejected. */ 1079 if (ep->stream_rejected) 1080 return; 1081 1082 req = list_first_entry(&ep->queue, struct tegra_xudc_request, 1083 list); 1084 val |= DB_STREAMID(req->usb_req.stream_id); 1085 } 1086 1087 dev_dbg(xudc->dev, "ring doorbell: %#x\n", val); 1088 xudc_writel(xudc, val, DB); 1089 } 1090 1091 static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep) 1092 { 1093 struct tegra_xudc_request *req; 1094 bool trbs_queued = false; 1095 1096 list_for_each_entry(req, &ep->queue, list) { 1097 if (ep->ring_full) 1098 break; 1099 1100 if (tegra_xudc_queue_trbs(ep, req) > 0) 1101 trbs_queued = true; 1102 } 1103 1104 if (trbs_queued) 1105 tegra_xudc_ep_ring_doorbell(ep); 1106 } 1107 1108 static int 1109 __tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req) 1110 { 1111 struct tegra_xudc *xudc = ep->xudc; 1112 int err; 1113 1114 if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) { 1115 dev_err(xudc->dev, "control EP has pending transfers\n"); 1116 return -EINVAL; 1117 } 1118 1119 if (usb_endpoint_xfer_control(ep->desc)) { 1120 err = usb_gadget_map_request(&xudc->gadget, &req->usb_req, 1121 (xudc->setup_state == 1122 DATA_STAGE_XFER)); 1123 } else { 1124 err = usb_gadget_map_request(&xudc->gadget, &req->usb_req, 1125 usb_endpoint_dir_in(ep->desc)); 1126 } 1127 1128 if (err < 0) { 1129 dev_err(xudc->dev, "failed to map request: %d\n", err); 1130 return err; 1131 } 1132 1133 req->first_trb = NULL; 1134 req->last_trb = NULL; 1135 req->buf_queued = 0; 1136 req->trbs_queued = 0; 1137 req->need_zlp = false; 1138 req->trbs_needed = DIV_ROUND_UP(req->usb_req.length, 1139 XUDC_TRB_MAX_BUFFER_SIZE); 1140 if (req->usb_req.length == 0) 1141 req->trbs_needed++; 1142 1143 if (!usb_endpoint_xfer_isoc(ep->desc) && 1144 req->usb_req.zero && req->usb_req.length && 1145 ((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) { 1146 req->trbs_needed++; 1147 req->need_zlp = true; 1148 } 1149 1150 req->usb_req.status = -EINPROGRESS; 1151 req->usb_req.actual = 0; 1152 1153 list_add_tail(&req->list, &ep->queue); 1154 1155 tegra_xudc_ep_kick_queue(ep); 1156 1157 return 0; 1158 } 1159 1160 static int 1161 tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req, 1162 gfp_t gfp) 1163 { 1164 struct tegra_xudc_request *req; 1165 struct tegra_xudc_ep *ep; 1166 struct tegra_xudc *xudc; 1167 unsigned long flags; 1168 int ret; 1169 1170 if (!usb_ep || !usb_req) 1171 return -EINVAL; 1172 1173 ep = to_xudc_ep(usb_ep); 1174 req = to_xudc_req(usb_req); 1175 xudc = ep->xudc; 1176 1177 spin_lock_irqsave(&xudc->lock, flags); 1178 if (xudc->powergated || !ep->desc) { 1179 ret = -ESHUTDOWN; 1180 goto unlock; 1181 } 1182 1183 ret = __tegra_xudc_ep_queue(ep, req); 1184 unlock: 1185 spin_unlock_irqrestore(&xudc->lock, flags); 1186 1187 return ret; 1188 } 1189 1190 static void squeeze_transfer_ring(struct tegra_xudc_ep *ep, 1191 struct tegra_xudc_request *req) 1192 { 1193 struct tegra_xudc_trb *trb = req->first_trb; 1194 bool pcs_enq = trb_read_cycle(trb); 1195 bool pcs; 1196 1197 /* 1198 * Clear out all the TRBs part of or after the cancelled request, 1199 * and must correct trb cycle bit to the last un-enqueued state. 1200 */ 1201 while (trb != &ep->transfer_ring[ep->enq_ptr]) { 1202 pcs = trb_read_cycle(trb); 1203 memset(trb, 0, sizeof(*trb)); 1204 trb_write_cycle(trb, !pcs); 1205 trb++; 1206 1207 if (trb_read_type(trb) == TRB_TYPE_LINK) 1208 trb = ep->transfer_ring; 1209 } 1210 1211 /* Requests will be re-queued at the start of the cancelled request. */ 1212 ep->enq_ptr = req->first_trb - ep->transfer_ring; 1213 /* 1214 * Retrieve the correct cycle bit state from the first trb of 1215 * the cancelled request. 1216 */ 1217 ep->pcs = pcs_enq; 1218 ep->ring_full = false; 1219 list_for_each_entry_continue(req, &ep->queue, list) { 1220 req->usb_req.status = -EINPROGRESS; 1221 req->usb_req.actual = 0; 1222 1223 req->first_trb = NULL; 1224 req->last_trb = NULL; 1225 req->buf_queued = 0; 1226 req->trbs_queued = 0; 1227 } 1228 } 1229 1230 /* 1231 * Determine if the given TRB is in the range [first trb, last trb] for the 1232 * given request. 1233 */ 1234 static bool trb_in_request(struct tegra_xudc_ep *ep, 1235 struct tegra_xudc_request *req, 1236 struct tegra_xudc_trb *trb) 1237 { 1238 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__, 1239 req->first_trb, req->last_trb, trb); 1240 1241 if (trb >= req->first_trb && (trb <= req->last_trb || 1242 req->last_trb < req->first_trb)) 1243 return true; 1244 1245 if (trb < req->first_trb && trb <= req->last_trb && 1246 req->last_trb < req->first_trb) 1247 return true; 1248 1249 return false; 1250 } 1251 1252 /* 1253 * Determine if the given TRB is in the range [EP enqueue pointer, first TRB) 1254 * for the given endpoint and request. 1255 */ 1256 static bool trb_before_request(struct tegra_xudc_ep *ep, 1257 struct tegra_xudc_request *req, 1258 struct tegra_xudc_trb *trb) 1259 { 1260 struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr]; 1261 1262 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n", 1263 __func__, req->first_trb, req->last_trb, enq_trb, trb); 1264 1265 if (trb < req->first_trb && (enq_trb <= trb || 1266 req->first_trb < enq_trb)) 1267 return true; 1268 1269 if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb) 1270 return true; 1271 1272 return false; 1273 } 1274 1275 static int 1276 __tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep, 1277 struct tegra_xudc_request *req) 1278 { 1279 struct tegra_xudc *xudc = ep->xudc; 1280 struct tegra_xudc_request *r; 1281 struct tegra_xudc_trb *deq_trb; 1282 bool busy, kick_queue = false; 1283 int ret = 0; 1284 1285 /* Make sure the request is actually queued to this endpoint. */ 1286 list_for_each_entry(r, &ep->queue, list) { 1287 if (r == req) 1288 break; 1289 } 1290 1291 if (r != req) 1292 return -EINVAL; 1293 1294 /* Request hasn't been queued in the transfer ring yet. */ 1295 if (!req->trbs_queued) { 1296 tegra_xudc_req_done(ep, req, -ECONNRESET); 1297 return 0; 1298 } 1299 1300 /* Halt DMA for this endpiont. */ 1301 if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) { 1302 ep_pause(xudc, ep->index); 1303 ep_wait_for_inactive(xudc, ep->index); 1304 } 1305 1306 deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context)); 1307 /* Is the hardware processing the TRB at the dequeue pointer? */ 1308 busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context)); 1309 1310 if (trb_in_request(ep, req, deq_trb) && busy) { 1311 /* 1312 * Request has been partially completed or it hasn't 1313 * started processing yet. 1314 */ 1315 dma_addr_t deq_ptr; 1316 1317 squeeze_transfer_ring(ep, req); 1318 1319 req->usb_req.actual = ep_ctx_read_edtla(ep->context); 1320 tegra_xudc_req_done(ep, req, -ECONNRESET); 1321 kick_queue = true; 1322 1323 /* EDTLA is > 0: request has been partially completed */ 1324 if (req->usb_req.actual > 0) { 1325 /* 1326 * Abort the pending transfer and update the dequeue 1327 * pointer 1328 */ 1329 ep_ctx_write_edtla(ep->context, 0); 1330 ep_ctx_write_partial_td(ep->context, 0); 1331 ep_ctx_write_data_offset(ep->context, 0); 1332 1333 deq_ptr = trb_virt_to_phys(ep, 1334 &ep->transfer_ring[ep->enq_ptr]); 1335 1336 if (dma_mapping_error(xudc->dev, deq_ptr)) { 1337 ret = -EINVAL; 1338 } else { 1339 ep_ctx_write_deq_ptr(ep->context, deq_ptr); 1340 ep_ctx_write_dcs(ep->context, ep->pcs); 1341 ep_reload(xudc, ep->index); 1342 } 1343 } 1344 } else if (trb_before_request(ep, req, deq_trb) && busy) { 1345 /* Request hasn't started processing yet. */ 1346 squeeze_transfer_ring(ep, req); 1347 1348 tegra_xudc_req_done(ep, req, -ECONNRESET); 1349 kick_queue = true; 1350 } else { 1351 /* 1352 * Request has completed, but we haven't processed the 1353 * completion event yet. 1354 */ 1355 tegra_xudc_req_done(ep, req, -ECONNRESET); 1356 ret = -EINVAL; 1357 } 1358 1359 /* Resume the endpoint. */ 1360 ep_unpause(xudc, ep->index); 1361 1362 if (kick_queue) 1363 tegra_xudc_ep_kick_queue(ep); 1364 1365 return ret; 1366 } 1367 1368 static int 1369 tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req) 1370 { 1371 struct tegra_xudc_request *req; 1372 struct tegra_xudc_ep *ep; 1373 struct tegra_xudc *xudc; 1374 unsigned long flags; 1375 int ret; 1376 1377 if (!usb_ep || !usb_req) 1378 return -EINVAL; 1379 1380 ep = to_xudc_ep(usb_ep); 1381 req = to_xudc_req(usb_req); 1382 xudc = ep->xudc; 1383 1384 spin_lock_irqsave(&xudc->lock, flags); 1385 1386 if (xudc->powergated || !ep->desc) { 1387 ret = -ESHUTDOWN; 1388 goto unlock; 1389 } 1390 1391 ret = __tegra_xudc_ep_dequeue(ep, req); 1392 unlock: 1393 spin_unlock_irqrestore(&xudc->lock, flags); 1394 1395 return ret; 1396 } 1397 1398 static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt) 1399 { 1400 struct tegra_xudc *xudc = ep->xudc; 1401 1402 if (!ep->desc) 1403 return -EINVAL; 1404 1405 if (usb_endpoint_xfer_isoc(ep->desc)) { 1406 dev_err(xudc->dev, "can't halt isoc EP\n"); 1407 return -ENOTSUPP; 1408 } 1409 1410 if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) { 1411 dev_dbg(xudc->dev, "EP %u already %s\n", ep->index, 1412 halt ? "halted" : "not halted"); 1413 return 0; 1414 } 1415 1416 if (halt) { 1417 ep_halt(xudc, ep->index); 1418 } else { 1419 ep_ctx_write_state(ep->context, EP_STATE_DISABLED); 1420 1421 ep_reload(xudc, ep->index); 1422 1423 ep_ctx_write_state(ep->context, EP_STATE_RUNNING); 1424 ep_ctx_write_seq_num(ep->context, 0); 1425 1426 ep_reload(xudc, ep->index); 1427 ep_unpause(xudc, ep->index); 1428 ep_unhalt(xudc, ep->index); 1429 1430 tegra_xudc_ep_ring_doorbell(ep); 1431 } 1432 1433 return 0; 1434 } 1435 1436 static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value) 1437 { 1438 struct tegra_xudc_ep *ep; 1439 struct tegra_xudc *xudc; 1440 unsigned long flags; 1441 int ret; 1442 1443 if (!usb_ep) 1444 return -EINVAL; 1445 1446 ep = to_xudc_ep(usb_ep); 1447 xudc = ep->xudc; 1448 1449 spin_lock_irqsave(&xudc->lock, flags); 1450 if (xudc->powergated) { 1451 ret = -ESHUTDOWN; 1452 goto unlock; 1453 } 1454 1455 if (value && usb_endpoint_dir_in(ep->desc) && 1456 !list_empty(&ep->queue)) { 1457 dev_err(xudc->dev, "can't halt EP with requests pending\n"); 1458 ret = -EAGAIN; 1459 goto unlock; 1460 } 1461 1462 ret = __tegra_xudc_ep_set_halt(ep, value); 1463 unlock: 1464 spin_unlock_irqrestore(&xudc->lock, flags); 1465 1466 return ret; 1467 } 1468 1469 static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep) 1470 { 1471 const struct usb_endpoint_descriptor *desc = ep->desc; 1472 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc; 1473 struct tegra_xudc *xudc = ep->xudc; 1474 u16 maxpacket, maxburst = 0, esit = 0; 1475 u32 val; 1476 1477 maxpacket = usb_endpoint_maxp(desc) & 0x7ff; 1478 if (xudc->gadget.speed == USB_SPEED_SUPER) { 1479 if (!usb_endpoint_xfer_control(desc)) 1480 maxburst = comp_desc->bMaxBurst; 1481 1482 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) 1483 esit = le16_to_cpu(comp_desc->wBytesPerInterval); 1484 } else if ((xudc->gadget.speed < USB_SPEED_SUPER) && 1485 (usb_endpoint_xfer_int(desc) || 1486 usb_endpoint_xfer_isoc(desc))) { 1487 if (xudc->gadget.speed == USB_SPEED_HIGH) { 1488 maxburst = (usb_endpoint_maxp(desc) >> 11) & 0x3; 1489 if (maxburst == 0x3) { 1490 dev_warn(xudc->dev, 1491 "invalid endpoint maxburst\n"); 1492 maxburst = 0x2; 1493 } 1494 } 1495 esit = maxpacket * (maxburst + 1); 1496 } 1497 1498 memset(ep->context, 0, sizeof(*ep->context)); 1499 1500 ep_ctx_write_state(ep->context, EP_STATE_RUNNING); 1501 ep_ctx_write_interval(ep->context, desc->bInterval); 1502 if (xudc->gadget.speed == USB_SPEED_SUPER) { 1503 if (usb_endpoint_xfer_isoc(desc)) { 1504 ep_ctx_write_mult(ep->context, 1505 comp_desc->bmAttributes & 0x3); 1506 } 1507 1508 if (usb_endpoint_xfer_bulk(desc)) { 1509 ep_ctx_write_max_pstreams(ep->context, 1510 comp_desc->bmAttributes & 1511 0x1f); 1512 ep_ctx_write_lsa(ep->context, 1); 1513 } 1514 } 1515 1516 if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc)) 1517 val = usb_endpoint_type(desc); 1518 else 1519 val = usb_endpoint_type(desc) + EP_TYPE_CONTROL; 1520 1521 ep_ctx_write_type(ep->context, val); 1522 ep_ctx_write_cerr(ep->context, 0x3); 1523 ep_ctx_write_max_packet_size(ep->context, maxpacket); 1524 ep_ctx_write_max_burst_size(ep->context, maxburst); 1525 1526 ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys); 1527 ep_ctx_write_dcs(ep->context, ep->pcs); 1528 1529 /* Select a reasonable average TRB length based on endpoint type. */ 1530 switch (usb_endpoint_type(desc)) { 1531 case USB_ENDPOINT_XFER_CONTROL: 1532 val = 8; 1533 break; 1534 case USB_ENDPOINT_XFER_INT: 1535 val = 1024; 1536 break; 1537 case USB_ENDPOINT_XFER_BULK: 1538 case USB_ENDPOINT_XFER_ISOC: 1539 default: 1540 val = 3072; 1541 break; 1542 } 1543 1544 ep_ctx_write_avg_trb_len(ep->context, val); 1545 ep_ctx_write_max_esit_payload(ep->context, esit); 1546 1547 ep_ctx_write_cerrcnt(ep->context, 0x3); 1548 } 1549 1550 static void setup_link_trb(struct tegra_xudc_ep *ep, 1551 struct tegra_xudc_trb *trb) 1552 { 1553 trb_write_data_ptr(trb, ep->transfer_ring_phys); 1554 trb_write_type(trb, TRB_TYPE_LINK); 1555 trb_write_toggle_cycle(trb, 1); 1556 } 1557 1558 static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep) 1559 { 1560 struct tegra_xudc *xudc = ep->xudc; 1561 1562 if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) { 1563 dev_err(xudc->dev, "endpoint %u already disabled\n", 1564 ep->index); 1565 return -EINVAL; 1566 } 1567 1568 ep_ctx_write_state(ep->context, EP_STATE_DISABLED); 1569 1570 ep_reload(xudc, ep->index); 1571 1572 tegra_xudc_ep_nuke(ep, -ESHUTDOWN); 1573 1574 xudc->nr_enabled_eps--; 1575 if (usb_endpoint_xfer_isoc(ep->desc)) 1576 xudc->nr_isoch_eps--; 1577 1578 ep->desc = NULL; 1579 ep->comp_desc = NULL; 1580 1581 memset(ep->context, 0, sizeof(*ep->context)); 1582 1583 ep_unpause(xudc, ep->index); 1584 ep_unhalt(xudc, ep->index); 1585 if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index)) 1586 xudc_writel(xudc, BIT(ep->index), EP_STOPPED); 1587 1588 /* 1589 * If this is the last endpoint disabled in a de-configure request, 1590 * switch back to address state. 1591 */ 1592 if ((xudc->device_state == USB_STATE_CONFIGURED) && 1593 (xudc->nr_enabled_eps == 1)) { 1594 u32 val; 1595 1596 xudc->device_state = USB_STATE_ADDRESS; 1597 usb_gadget_set_state(&xudc->gadget, xudc->device_state); 1598 1599 val = xudc_readl(xudc, CTRL); 1600 val &= ~CTRL_RUN; 1601 xudc_writel(xudc, val, CTRL); 1602 } 1603 1604 dev_info(xudc->dev, "ep %u disabled\n", ep->index); 1605 1606 return 0; 1607 } 1608 1609 static int tegra_xudc_ep_disable(struct usb_ep *usb_ep) 1610 { 1611 struct tegra_xudc_ep *ep; 1612 struct tegra_xudc *xudc; 1613 unsigned long flags; 1614 int ret; 1615 1616 if (!usb_ep) 1617 return -EINVAL; 1618 1619 ep = to_xudc_ep(usb_ep); 1620 xudc = ep->xudc; 1621 1622 spin_lock_irqsave(&xudc->lock, flags); 1623 if (xudc->powergated) { 1624 ret = -ESHUTDOWN; 1625 goto unlock; 1626 } 1627 1628 ret = __tegra_xudc_ep_disable(ep); 1629 unlock: 1630 spin_unlock_irqrestore(&xudc->lock, flags); 1631 1632 return ret; 1633 } 1634 1635 static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep, 1636 const struct usb_endpoint_descriptor *desc) 1637 { 1638 struct tegra_xudc *xudc = ep->xudc; 1639 unsigned int i; 1640 u32 val; 1641 1642 if (xudc->gadget.speed == USB_SPEED_SUPER && 1643 !usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc) 1644 return -EINVAL; 1645 1646 /* Disable the EP if it is not disabled */ 1647 if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED) 1648 __tegra_xudc_ep_disable(ep); 1649 1650 ep->desc = desc; 1651 ep->comp_desc = ep->usb_ep.comp_desc; 1652 1653 if (usb_endpoint_xfer_isoc(desc)) { 1654 if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) { 1655 dev_err(xudc->dev, "too many isoch endpoints\n"); 1656 return -EBUSY; 1657 } 1658 xudc->nr_isoch_eps++; 1659 } 1660 1661 memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE * 1662 sizeof(*ep->transfer_ring)); 1663 setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]); 1664 1665 ep->enq_ptr = 0; 1666 ep->deq_ptr = 0; 1667 ep->pcs = true; 1668 ep->ring_full = false; 1669 xudc->nr_enabled_eps++; 1670 1671 tegra_xudc_ep_context_setup(ep); 1672 1673 /* 1674 * No need to reload and un-halt EP0. This will be done automatically 1675 * once a valid SETUP packet is received. 1676 */ 1677 if (usb_endpoint_xfer_control(desc)) 1678 goto out; 1679 1680 /* 1681 * Transition to configured state once the first non-control 1682 * endpoint is enabled. 1683 */ 1684 if (xudc->device_state == USB_STATE_ADDRESS) { 1685 val = xudc_readl(xudc, CTRL); 1686 val |= CTRL_RUN; 1687 xudc_writel(xudc, val, CTRL); 1688 1689 xudc->device_state = USB_STATE_CONFIGURED; 1690 usb_gadget_set_state(&xudc->gadget, xudc->device_state); 1691 } 1692 1693 if (usb_endpoint_xfer_isoc(desc)) { 1694 /* 1695 * Pause all bulk endpoints when enabling an isoch endpoint 1696 * to ensure the isoch endpoint is allocated enough bandwidth. 1697 */ 1698 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) { 1699 if (xudc->ep[i].desc && 1700 usb_endpoint_xfer_bulk(xudc->ep[i].desc)) 1701 ep_pause(xudc, i); 1702 } 1703 } 1704 1705 ep_reload(xudc, ep->index); 1706 ep_unpause(xudc, ep->index); 1707 ep_unhalt(xudc, ep->index); 1708 1709 if (usb_endpoint_xfer_isoc(desc)) { 1710 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) { 1711 if (xudc->ep[i].desc && 1712 usb_endpoint_xfer_bulk(xudc->ep[i].desc)) 1713 ep_unpause(xudc, i); 1714 } 1715 } 1716 1717 out: 1718 dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index, 1719 usb_ep_type_string(usb_endpoint_type(ep->desc)), 1720 usb_endpoint_dir_in(ep->desc) ? "in" : "out"); 1721 1722 return 0; 1723 } 1724 1725 static int tegra_xudc_ep_enable(struct usb_ep *usb_ep, 1726 const struct usb_endpoint_descriptor *desc) 1727 { 1728 struct tegra_xudc_ep *ep; 1729 struct tegra_xudc *xudc; 1730 unsigned long flags; 1731 int ret; 1732 1733 if (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT)) 1734 return -EINVAL; 1735 1736 ep = to_xudc_ep(usb_ep); 1737 xudc = ep->xudc; 1738 1739 spin_lock_irqsave(&xudc->lock, flags); 1740 if (xudc->powergated) { 1741 ret = -ESHUTDOWN; 1742 goto unlock; 1743 } 1744 1745 ret = __tegra_xudc_ep_enable(ep, desc); 1746 unlock: 1747 spin_unlock_irqrestore(&xudc->lock, flags); 1748 1749 return ret; 1750 } 1751 1752 static struct usb_request * 1753 tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp) 1754 { 1755 struct tegra_xudc_request *req; 1756 1757 req = kzalloc(sizeof(*req), gfp); 1758 if (!req) 1759 return NULL; 1760 1761 INIT_LIST_HEAD(&req->list); 1762 1763 return &req->usb_req; 1764 } 1765 1766 static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep, 1767 struct usb_request *usb_req) 1768 { 1769 struct tegra_xudc_request *req = to_xudc_req(usb_req); 1770 1771 kfree(req); 1772 } 1773 1774 static struct usb_ep_ops tegra_xudc_ep_ops = { 1775 .enable = tegra_xudc_ep_enable, 1776 .disable = tegra_xudc_ep_disable, 1777 .alloc_request = tegra_xudc_ep_alloc_request, 1778 .free_request = tegra_xudc_ep_free_request, 1779 .queue = tegra_xudc_ep_queue, 1780 .dequeue = tegra_xudc_ep_dequeue, 1781 .set_halt = tegra_xudc_ep_set_halt, 1782 }; 1783 1784 static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep, 1785 const struct usb_endpoint_descriptor *desc) 1786 { 1787 return -EBUSY; 1788 } 1789 1790 static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep) 1791 { 1792 return -EBUSY; 1793 } 1794 1795 static struct usb_ep_ops tegra_xudc_ep0_ops = { 1796 .enable = tegra_xudc_ep0_enable, 1797 .disable = tegra_xudc_ep0_disable, 1798 .alloc_request = tegra_xudc_ep_alloc_request, 1799 .free_request = tegra_xudc_ep_free_request, 1800 .queue = tegra_xudc_ep_queue, 1801 .dequeue = tegra_xudc_ep_dequeue, 1802 .set_halt = tegra_xudc_ep_set_halt, 1803 }; 1804 1805 static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget) 1806 { 1807 struct tegra_xudc *xudc = to_xudc(gadget); 1808 unsigned long flags; 1809 int ret; 1810 1811 spin_lock_irqsave(&xudc->lock, flags); 1812 if (xudc->powergated) { 1813 ret = -ESHUTDOWN; 1814 goto unlock; 1815 } 1816 1817 ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >> 1818 MFINDEX_FRAME_SHIFT; 1819 unlock: 1820 spin_unlock_irqrestore(&xudc->lock, flags); 1821 1822 return ret; 1823 } 1824 1825 static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc) 1826 { 1827 unsigned int i; 1828 u32 val; 1829 1830 ep_unpause_all(xudc); 1831 1832 /* Direct link to U0. */ 1833 val = xudc_readl(xudc, PORTSC); 1834 if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) { 1835 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK); 1836 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0); 1837 xudc_writel(xudc, val, PORTSC); 1838 } 1839 1840 if (xudc->device_state == USB_STATE_SUSPENDED) { 1841 xudc->device_state = xudc->resume_state; 1842 usb_gadget_set_state(&xudc->gadget, xudc->device_state); 1843 xudc->resume_state = 0; 1844 } 1845 1846 /* 1847 * Doorbells may be dropped if they are sent too soon (< ~200ns) 1848 * after unpausing the endpoint. Wait for 500ns just to be safe. 1849 */ 1850 ndelay(500); 1851 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) 1852 tegra_xudc_ep_ring_doorbell(&xudc->ep[i]); 1853 } 1854 1855 static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget) 1856 { 1857 struct tegra_xudc *xudc = to_xudc(gadget); 1858 unsigned long flags; 1859 int ret = 0; 1860 u32 val; 1861 1862 spin_lock_irqsave(&xudc->lock, flags); 1863 1864 if (xudc->powergated) { 1865 ret = -ESHUTDOWN; 1866 goto unlock; 1867 } 1868 val = xudc_readl(xudc, PORTPM); 1869 dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__, 1870 val, gadget->speed); 1871 1872 if (((xudc->gadget.speed <= USB_SPEED_HIGH) && 1873 (val & PORTPM_RWE)) || 1874 ((xudc->gadget.speed == USB_SPEED_SUPER) && 1875 (val & PORTPM_FRWE))) { 1876 tegra_xudc_resume_device_state(xudc); 1877 1878 /* Send Device Notification packet. */ 1879 if (xudc->gadget.speed == USB_SPEED_SUPER) { 1880 val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE) 1881 | DEVNOTIF_LO_TRIG; 1882 xudc_writel(xudc, 0, DEVNOTIF_HI); 1883 xudc_writel(xudc, val, DEVNOTIF_LO); 1884 } 1885 } 1886 1887 unlock: 1888 dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret); 1889 spin_unlock_irqrestore(&xudc->lock, flags); 1890 1891 return ret; 1892 } 1893 1894 static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on) 1895 { 1896 struct tegra_xudc *xudc = to_xudc(gadget); 1897 unsigned long flags; 1898 u32 val; 1899 1900 pm_runtime_get_sync(xudc->dev); 1901 1902 spin_lock_irqsave(&xudc->lock, flags); 1903 1904 if (is_on != xudc->pullup) { 1905 val = xudc_readl(xudc, CTRL); 1906 if (is_on) 1907 val |= CTRL_ENABLE; 1908 else 1909 val &= ~CTRL_ENABLE; 1910 xudc_writel(xudc, val, CTRL); 1911 } 1912 1913 xudc->pullup = is_on; 1914 dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on); 1915 1916 spin_unlock_irqrestore(&xudc->lock, flags); 1917 1918 pm_runtime_put(xudc->dev); 1919 1920 return 0; 1921 } 1922 1923 static int tegra_xudc_gadget_start(struct usb_gadget *gadget, 1924 struct usb_gadget_driver *driver) 1925 { 1926 struct tegra_xudc *xudc = to_xudc(gadget); 1927 unsigned long flags; 1928 u32 val; 1929 int ret; 1930 1931 if (!driver) 1932 return -EINVAL; 1933 1934 pm_runtime_get_sync(xudc->dev); 1935 1936 spin_lock_irqsave(&xudc->lock, flags); 1937 1938 if (xudc->driver) { 1939 ret = -EBUSY; 1940 goto unlock; 1941 } 1942 1943 xudc->setup_state = WAIT_FOR_SETUP; 1944 xudc->device_state = USB_STATE_DEFAULT; 1945 usb_gadget_set_state(&xudc->gadget, xudc->device_state); 1946 1947 ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc); 1948 if (ret < 0) 1949 goto unlock; 1950 1951 val = xudc_readl(xudc, CTRL); 1952 val |= CTRL_IE | CTRL_LSE; 1953 xudc_writel(xudc, val, CTRL); 1954 1955 val = xudc_readl(xudc, PORTHALT); 1956 val |= PORTHALT_STCHG_INTR_EN; 1957 xudc_writel(xudc, val, PORTHALT); 1958 1959 if (xudc->pullup) { 1960 val = xudc_readl(xudc, CTRL); 1961 val |= CTRL_ENABLE; 1962 xudc_writel(xudc, val, CTRL); 1963 } 1964 1965 xudc->driver = driver; 1966 unlock: 1967 dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret); 1968 spin_unlock_irqrestore(&xudc->lock, flags); 1969 1970 pm_runtime_put(xudc->dev); 1971 1972 return ret; 1973 } 1974 1975 static int tegra_xudc_gadget_stop(struct usb_gadget *gadget) 1976 { 1977 struct tegra_xudc *xudc = to_xudc(gadget); 1978 unsigned long flags; 1979 u32 val; 1980 1981 pm_runtime_get_sync(xudc->dev); 1982 1983 spin_lock_irqsave(&xudc->lock, flags); 1984 1985 val = xudc_readl(xudc, CTRL); 1986 val &= ~(CTRL_IE | CTRL_ENABLE); 1987 xudc_writel(xudc, val, CTRL); 1988 1989 __tegra_xudc_ep_disable(&xudc->ep[0]); 1990 1991 xudc->driver = NULL; 1992 dev_dbg(xudc->dev, "Gadget stopped"); 1993 1994 spin_unlock_irqrestore(&xudc->lock, flags); 1995 1996 pm_runtime_put(xudc->dev); 1997 1998 return 0; 1999 } 2000 2001 static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on) 2002 { 2003 struct tegra_xudc *xudc = to_xudc(gadget); 2004 2005 dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on); 2006 xudc->selfpowered = !!is_on; 2007 2008 return 0; 2009 } 2010 2011 static struct usb_gadget_ops tegra_xudc_gadget_ops = { 2012 .get_frame = tegra_xudc_gadget_get_frame, 2013 .wakeup = tegra_xudc_gadget_wakeup, 2014 .pullup = tegra_xudc_gadget_pullup, 2015 .udc_start = tegra_xudc_gadget_start, 2016 .udc_stop = tegra_xudc_gadget_stop, 2017 .set_selfpowered = tegra_xudc_set_selfpowered, 2018 }; 2019 2020 static void no_op_complete(struct usb_ep *ep, struct usb_request *req) 2021 { 2022 } 2023 2024 static int 2025 tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc, 2026 void (*cmpl)(struct usb_ep *, struct usb_request *)) 2027 { 2028 xudc->ep0_req->usb_req.buf = NULL; 2029 xudc->ep0_req->usb_req.dma = 0; 2030 xudc->ep0_req->usb_req.length = 0; 2031 xudc->ep0_req->usb_req.complete = cmpl; 2032 xudc->ep0_req->usb_req.context = xudc; 2033 2034 return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req); 2035 } 2036 2037 static int 2038 tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len, 2039 void (*cmpl)(struct usb_ep *, struct usb_request *)) 2040 { 2041 xudc->ep0_req->usb_req.buf = buf; 2042 xudc->ep0_req->usb_req.length = len; 2043 xudc->ep0_req->usb_req.complete = cmpl; 2044 xudc->ep0_req->usb_req.context = xudc; 2045 2046 return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req); 2047 } 2048 2049 static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc) 2050 { 2051 switch (xudc->setup_state) { 2052 case DATA_STAGE_XFER: 2053 xudc->setup_state = STATUS_STAGE_RECV; 2054 tegra_xudc_ep0_queue_status(xudc, no_op_complete); 2055 break; 2056 case DATA_STAGE_RECV: 2057 xudc->setup_state = STATUS_STAGE_XFER; 2058 tegra_xudc_ep0_queue_status(xudc, no_op_complete); 2059 break; 2060 default: 2061 xudc->setup_state = WAIT_FOR_SETUP; 2062 break; 2063 } 2064 } 2065 2066 static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc, 2067 struct usb_ctrlrequest *ctrl) 2068 { 2069 int ret; 2070 2071 spin_unlock(&xudc->lock); 2072 ret = xudc->driver->setup(&xudc->gadget, ctrl); 2073 spin_lock(&xudc->lock); 2074 2075 return ret; 2076 } 2077 2078 static void set_feature_complete(struct usb_ep *ep, struct usb_request *req) 2079 { 2080 struct tegra_xudc *xudc = req->context; 2081 2082 if (xudc->test_mode_pattern) { 2083 xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM); 2084 xudc->test_mode_pattern = 0; 2085 } 2086 } 2087 2088 static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc, 2089 struct usb_ctrlrequest *ctrl) 2090 { 2091 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE); 2092 u32 feature = le16_to_cpu(ctrl->wValue); 2093 u32 index = le16_to_cpu(ctrl->wIndex); 2094 u32 val, ep; 2095 int ret; 2096 2097 if (le16_to_cpu(ctrl->wLength) != 0) 2098 return -EINVAL; 2099 2100 switch (ctrl->bRequestType & USB_RECIP_MASK) { 2101 case USB_RECIP_DEVICE: 2102 switch (feature) { 2103 case USB_DEVICE_REMOTE_WAKEUP: 2104 if ((xudc->gadget.speed == USB_SPEED_SUPER) || 2105 (xudc->device_state == USB_STATE_DEFAULT)) 2106 return -EINVAL; 2107 2108 val = xudc_readl(xudc, PORTPM); 2109 if (set) 2110 val |= PORTPM_RWE; 2111 else 2112 val &= ~PORTPM_RWE; 2113 2114 xudc_writel(xudc, val, PORTPM); 2115 break; 2116 case USB_DEVICE_U1_ENABLE: 2117 case USB_DEVICE_U2_ENABLE: 2118 if ((xudc->device_state != USB_STATE_CONFIGURED) || 2119 (xudc->gadget.speed != USB_SPEED_SUPER)) 2120 return -EINVAL; 2121 2122 val = xudc_readl(xudc, PORTPM); 2123 if ((feature == USB_DEVICE_U1_ENABLE) && 2124 xudc->soc->u1_enable) { 2125 if (set) 2126 val |= PORTPM_U1E; 2127 else 2128 val &= ~PORTPM_U1E; 2129 } 2130 2131 if ((feature == USB_DEVICE_U2_ENABLE) && 2132 xudc->soc->u2_enable) { 2133 if (set) 2134 val |= PORTPM_U2E; 2135 else 2136 val &= ~PORTPM_U2E; 2137 } 2138 2139 xudc_writel(xudc, val, PORTPM); 2140 break; 2141 case USB_DEVICE_TEST_MODE: 2142 if (xudc->gadget.speed != USB_SPEED_HIGH) 2143 return -EINVAL; 2144 2145 if (!set) 2146 return -EINVAL; 2147 2148 xudc->test_mode_pattern = index >> 8; 2149 break; 2150 default: 2151 return -EINVAL; 2152 } 2153 2154 break; 2155 case USB_RECIP_INTERFACE: 2156 if (xudc->device_state != USB_STATE_CONFIGURED) 2157 return -EINVAL; 2158 2159 switch (feature) { 2160 case USB_INTRF_FUNC_SUSPEND: 2161 if (set) { 2162 val = xudc_readl(xudc, PORTPM); 2163 2164 if (index & USB_INTRF_FUNC_SUSPEND_RW) 2165 val |= PORTPM_FRWE; 2166 else 2167 val &= ~PORTPM_FRWE; 2168 2169 xudc_writel(xudc, val, PORTPM); 2170 } 2171 2172 return tegra_xudc_ep0_delegate_req(xudc, ctrl); 2173 default: 2174 return -EINVAL; 2175 } 2176 2177 break; 2178 case USB_RECIP_ENDPOINT: 2179 ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 + 2180 ((index & USB_DIR_IN) ? 1 : 0); 2181 2182 if ((xudc->device_state == USB_STATE_DEFAULT) || 2183 ((xudc->device_state == USB_STATE_ADDRESS) && 2184 (index != 0))) 2185 return -EINVAL; 2186 2187 ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set); 2188 if (ret < 0) 2189 return ret; 2190 break; 2191 default: 2192 return -EINVAL; 2193 } 2194 2195 return tegra_xudc_ep0_queue_status(xudc, set_feature_complete); 2196 } 2197 2198 static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc, 2199 struct usb_ctrlrequest *ctrl) 2200 { 2201 struct tegra_xudc_ep_context *ep_ctx; 2202 u32 val, ep, index = le16_to_cpu(ctrl->wIndex); 2203 u16 status = 0; 2204 2205 if (!(ctrl->bRequestType & USB_DIR_IN)) 2206 return -EINVAL; 2207 2208 if ((le16_to_cpu(ctrl->wValue) != 0) || 2209 (le16_to_cpu(ctrl->wLength) != 2)) 2210 return -EINVAL; 2211 2212 switch (ctrl->bRequestType & USB_RECIP_MASK) { 2213 case USB_RECIP_DEVICE: 2214 val = xudc_readl(xudc, PORTPM); 2215 2216 if (xudc->selfpowered) 2217 status |= BIT(USB_DEVICE_SELF_POWERED); 2218 2219 if ((xudc->gadget.speed < USB_SPEED_SUPER) && 2220 (val & PORTPM_RWE)) 2221 status |= BIT(USB_DEVICE_REMOTE_WAKEUP); 2222 2223 if (xudc->gadget.speed == USB_SPEED_SUPER) { 2224 if (val & PORTPM_U1E) 2225 status |= BIT(USB_DEV_STAT_U1_ENABLED); 2226 if (val & PORTPM_U2E) 2227 status |= BIT(USB_DEV_STAT_U2_ENABLED); 2228 } 2229 break; 2230 case USB_RECIP_INTERFACE: 2231 if (xudc->gadget.speed == USB_SPEED_SUPER) { 2232 status |= USB_INTRF_STAT_FUNC_RW_CAP; 2233 val = xudc_readl(xudc, PORTPM); 2234 if (val & PORTPM_FRWE) 2235 status |= USB_INTRF_STAT_FUNC_RW; 2236 } 2237 break; 2238 case USB_RECIP_ENDPOINT: 2239 ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 + 2240 ((index & USB_DIR_IN) ? 1 : 0); 2241 ep_ctx = &xudc->ep_context[ep]; 2242 2243 if ((xudc->device_state != USB_STATE_CONFIGURED) && 2244 ((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0))) 2245 return -EINVAL; 2246 2247 if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED) 2248 return -EINVAL; 2249 2250 if (xudc_readl(xudc, EP_HALT) & BIT(ep)) 2251 status |= BIT(USB_ENDPOINT_HALT); 2252 break; 2253 default: 2254 return -EINVAL; 2255 } 2256 2257 xudc->status_buf = cpu_to_le16(status); 2258 return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf, 2259 sizeof(xudc->status_buf), 2260 no_op_complete); 2261 } 2262 2263 static void set_sel_complete(struct usb_ep *ep, struct usb_request *req) 2264 { 2265 /* Nothing to do with SEL values */ 2266 } 2267 2268 static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc, 2269 struct usb_ctrlrequest *ctrl) 2270 { 2271 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE | 2272 USB_TYPE_STANDARD)) 2273 return -EINVAL; 2274 2275 if (xudc->device_state == USB_STATE_DEFAULT) 2276 return -EINVAL; 2277 2278 if ((le16_to_cpu(ctrl->wIndex) != 0) || 2279 (le16_to_cpu(ctrl->wValue) != 0) || 2280 (le16_to_cpu(ctrl->wLength) != 6)) 2281 return -EINVAL; 2282 2283 return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing, 2284 sizeof(xudc->sel_timing), 2285 set_sel_complete); 2286 } 2287 2288 static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req) 2289 { 2290 /* Nothing to do with isoch delay */ 2291 } 2292 2293 static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc, 2294 struct usb_ctrlrequest *ctrl) 2295 { 2296 u32 delay = le16_to_cpu(ctrl->wValue); 2297 2298 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE | 2299 USB_TYPE_STANDARD)) 2300 return -EINVAL; 2301 2302 if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) || 2303 (le16_to_cpu(ctrl->wLength) != 0)) 2304 return -EINVAL; 2305 2306 xudc->isoch_delay = delay; 2307 2308 return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete); 2309 } 2310 2311 static void set_address_complete(struct usb_ep *ep, struct usb_request *req) 2312 { 2313 struct tegra_xudc *xudc = req->context; 2314 2315 if ((xudc->device_state == USB_STATE_DEFAULT) && 2316 (xudc->dev_addr != 0)) { 2317 xudc->device_state = USB_STATE_ADDRESS; 2318 usb_gadget_set_state(&xudc->gadget, xudc->device_state); 2319 } else if ((xudc->device_state == USB_STATE_ADDRESS) && 2320 (xudc->dev_addr == 0)) { 2321 xudc->device_state = USB_STATE_DEFAULT; 2322 usb_gadget_set_state(&xudc->gadget, xudc->device_state); 2323 } 2324 } 2325 2326 static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc, 2327 struct usb_ctrlrequest *ctrl) 2328 { 2329 struct tegra_xudc_ep *ep0 = &xudc->ep[0]; 2330 u32 val, addr = le16_to_cpu(ctrl->wValue); 2331 2332 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE | 2333 USB_TYPE_STANDARD)) 2334 return -EINVAL; 2335 2336 if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) || 2337 (le16_to_cpu(ctrl->wLength) != 0)) 2338 return -EINVAL; 2339 2340 if (xudc->device_state == USB_STATE_CONFIGURED) 2341 return -EINVAL; 2342 2343 dev_dbg(xudc->dev, "set address: %u\n", addr); 2344 2345 xudc->dev_addr = addr; 2346 val = xudc_readl(xudc, CTRL); 2347 val &= ~(CTRL_DEVADDR_MASK); 2348 val |= CTRL_DEVADDR(addr); 2349 xudc_writel(xudc, val, CTRL); 2350 2351 ep_ctx_write_devaddr(ep0->context, addr); 2352 2353 return tegra_xudc_ep0_queue_status(xudc, set_address_complete); 2354 } 2355 2356 static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc, 2357 struct usb_ctrlrequest *ctrl) 2358 { 2359 int ret; 2360 2361 switch (ctrl->bRequest) { 2362 case USB_REQ_GET_STATUS: 2363 dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n"); 2364 ret = tegra_xudc_ep0_get_status(xudc, ctrl); 2365 break; 2366 case USB_REQ_SET_ADDRESS: 2367 dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n"); 2368 ret = tegra_xudc_ep0_set_address(xudc, ctrl); 2369 break; 2370 case USB_REQ_SET_SEL: 2371 dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n"); 2372 ret = tegra_xudc_ep0_set_sel(xudc, ctrl); 2373 break; 2374 case USB_REQ_SET_ISOCH_DELAY: 2375 dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n"); 2376 ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl); 2377 break; 2378 case USB_REQ_CLEAR_FEATURE: 2379 case USB_REQ_SET_FEATURE: 2380 dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n"); 2381 ret = tegra_xudc_ep0_set_feature(xudc, ctrl); 2382 break; 2383 case USB_REQ_SET_CONFIGURATION: 2384 dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n"); 2385 /* 2386 * In theory we need to clear RUN bit before status stage of 2387 * deconfig request sent, but this seems to be causing problems. 2388 * Clear RUN once all endpoints are disabled instead. 2389 */ 2390 fallthrough; 2391 default: 2392 ret = tegra_xudc_ep0_delegate_req(xudc, ctrl); 2393 break; 2394 } 2395 2396 return ret; 2397 } 2398 2399 static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc, 2400 struct usb_ctrlrequest *ctrl, 2401 u16 seq_num) 2402 { 2403 int ret; 2404 2405 xudc->setup_seq_num = seq_num; 2406 2407 /* Ensure EP0 is unhalted. */ 2408 ep_unhalt(xudc, 0); 2409 2410 /* 2411 * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff 2412 * are invalid. Halt EP0 until we get a valid packet. 2413 */ 2414 if (xudc->soc->invalid_seq_num && 2415 (seq_num == 0xfffe || seq_num == 0xffff)) { 2416 dev_warn(xudc->dev, "invalid sequence number detected\n"); 2417 ep_halt(xudc, 0); 2418 return; 2419 } 2420 2421 if (ctrl->wLength) 2422 xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ? 2423 DATA_STAGE_XFER : DATA_STAGE_RECV; 2424 else 2425 xudc->setup_state = STATUS_STAGE_XFER; 2426 2427 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) 2428 ret = tegra_xudc_ep0_standard_req(xudc, ctrl); 2429 else 2430 ret = tegra_xudc_ep0_delegate_req(xudc, ctrl); 2431 2432 if (ret < 0) { 2433 dev_warn(xudc->dev, "setup request failed: %d\n", ret); 2434 xudc->setup_state = WAIT_FOR_SETUP; 2435 ep_halt(xudc, 0); 2436 } 2437 } 2438 2439 static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc, 2440 struct tegra_xudc_trb *event) 2441 { 2442 struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event; 2443 u16 seq_num = trb_read_seq_num(event); 2444 2445 if (xudc->setup_state != WAIT_FOR_SETUP) { 2446 /* 2447 * The controller is in the process of handling another 2448 * setup request. Queue subsequent requests and handle 2449 * the last one once the controller reports a sequence 2450 * number error. 2451 */ 2452 memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl)); 2453 xudc->setup_packet.seq_num = seq_num; 2454 xudc->queued_setup_packet = true; 2455 } else { 2456 tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num); 2457 } 2458 } 2459 2460 static struct tegra_xudc_request * 2461 trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb) 2462 { 2463 struct tegra_xudc_request *req; 2464 2465 list_for_each_entry(req, &ep->queue, list) { 2466 if (!req->trbs_queued) 2467 break; 2468 2469 if (trb_in_request(ep, req, trb)) 2470 return req; 2471 } 2472 2473 return NULL; 2474 } 2475 2476 static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc, 2477 struct tegra_xudc_ep *ep, 2478 struct tegra_xudc_trb *event) 2479 { 2480 struct tegra_xudc_request *req; 2481 struct tegra_xudc_trb *trb; 2482 bool short_packet; 2483 2484 short_packet = (trb_read_cmpl_code(event) == 2485 TRB_CMPL_CODE_SHORT_PACKET); 2486 2487 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event)); 2488 req = trb_to_request(ep, trb); 2489 2490 /* 2491 * TDs are complete on short packet or when the completed TRB is the 2492 * last TRB in the TD (the CHAIN bit is unset). 2493 */ 2494 if (req && (short_packet || (!trb_read_chain(trb) && 2495 (req->trbs_needed == req->trbs_queued)))) { 2496 struct tegra_xudc_trb *last = req->last_trb; 2497 unsigned int residual; 2498 2499 residual = trb_read_transfer_len(event); 2500 req->usb_req.actual = req->usb_req.length - residual; 2501 2502 dev_dbg(xudc->dev, "bytes transferred %u / %u\n", 2503 req->usb_req.actual, req->usb_req.length); 2504 2505 tegra_xudc_req_done(ep, req, 0); 2506 2507 if (ep->desc && usb_endpoint_xfer_control(ep->desc)) 2508 tegra_xudc_ep0_req_done(xudc); 2509 2510 /* 2511 * Advance the dequeue pointer past the end of the current TD 2512 * on short packet completion. 2513 */ 2514 if (short_packet) { 2515 ep->deq_ptr = (last - ep->transfer_ring) + 1; 2516 if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1) 2517 ep->deq_ptr = 0; 2518 } 2519 } else if (!req) { 2520 dev_warn(xudc->dev, "transfer event on dequeued request\n"); 2521 } 2522 2523 if (ep->desc) 2524 tegra_xudc_ep_kick_queue(ep); 2525 } 2526 2527 static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc, 2528 struct tegra_xudc_trb *event) 2529 { 2530 unsigned int ep_index = trb_read_endpoint_id(event); 2531 struct tegra_xudc_ep *ep = &xudc->ep[ep_index]; 2532 struct tegra_xudc_trb *trb; 2533 u16 comp_code; 2534 2535 if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) { 2536 dev_warn(xudc->dev, "transfer event on disabled EP %u\n", 2537 ep_index); 2538 return; 2539 } 2540 2541 /* Update transfer ring dequeue pointer. */ 2542 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event)); 2543 comp_code = trb_read_cmpl_code(event); 2544 if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) { 2545 ep->deq_ptr = (trb - ep->transfer_ring) + 1; 2546 2547 if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1) 2548 ep->deq_ptr = 0; 2549 ep->ring_full = false; 2550 } 2551 2552 switch (comp_code) { 2553 case TRB_CMPL_CODE_SUCCESS: 2554 case TRB_CMPL_CODE_SHORT_PACKET: 2555 tegra_xudc_handle_transfer_completion(xudc, ep, event); 2556 break; 2557 case TRB_CMPL_CODE_HOST_REJECTED: 2558 dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index); 2559 2560 ep->stream_rejected = true; 2561 break; 2562 case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED: 2563 dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index); 2564 2565 if (ep->stream_rejected) { 2566 ep->stream_rejected = false; 2567 /* 2568 * An EP is stopped when a stream is rejected. Wait 2569 * for the EP to report that it is stopped and then 2570 * un-stop it. 2571 */ 2572 ep_wait_for_stopped(xudc, ep_index); 2573 } 2574 tegra_xudc_ep_ring_doorbell(ep); 2575 break; 2576 case TRB_CMPL_CODE_BABBLE_DETECTED_ERR: 2577 /* 2578 * Wait for the EP to be stopped so the controller stops 2579 * processing doorbells. 2580 */ 2581 ep_wait_for_stopped(xudc, ep_index); 2582 ep->enq_ptr = ep->deq_ptr; 2583 tegra_xudc_ep_nuke(ep, -EIO); 2584 /* FALLTHROUGH */ 2585 case TRB_CMPL_CODE_STREAM_NUMP_ERROR: 2586 case TRB_CMPL_CODE_CTRL_DIR_ERR: 2587 case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR: 2588 case TRB_CMPL_CODE_RING_UNDERRUN: 2589 case TRB_CMPL_CODE_RING_OVERRUN: 2590 case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN: 2591 case TRB_CMPL_CODE_USB_TRANS_ERR: 2592 case TRB_CMPL_CODE_TRB_ERR: 2593 dev_err(xudc->dev, "completion error %#x on EP %u\n", 2594 comp_code, ep_index); 2595 2596 ep_halt(xudc, ep_index); 2597 break; 2598 case TRB_CMPL_CODE_CTRL_SEQNUM_ERR: 2599 dev_info(xudc->dev, "sequence number error\n"); 2600 2601 /* 2602 * Kill any queued control request and skip to the last 2603 * setup packet we received. 2604 */ 2605 tegra_xudc_ep_nuke(ep, -EINVAL); 2606 xudc->setup_state = WAIT_FOR_SETUP; 2607 if (!xudc->queued_setup_packet) 2608 break; 2609 2610 tegra_xudc_handle_ep0_setup_packet(xudc, 2611 &xudc->setup_packet.ctrl_req, 2612 xudc->setup_packet.seq_num); 2613 xudc->queued_setup_packet = false; 2614 break; 2615 case TRB_CMPL_CODE_STOPPED: 2616 dev_dbg(xudc->dev, "stop completion code on EP %u\n", 2617 ep_index); 2618 2619 /* Disconnected. */ 2620 tegra_xudc_ep_nuke(ep, -ECONNREFUSED); 2621 break; 2622 default: 2623 dev_dbg(xudc->dev, "completion event %#x on EP %u\n", 2624 comp_code, ep_index); 2625 break; 2626 } 2627 } 2628 2629 static void tegra_xudc_reset(struct tegra_xudc *xudc) 2630 { 2631 struct tegra_xudc_ep *ep0 = &xudc->ep[0]; 2632 dma_addr_t deq_ptr; 2633 unsigned int i; 2634 2635 xudc->setup_state = WAIT_FOR_SETUP; 2636 xudc->device_state = USB_STATE_DEFAULT; 2637 usb_gadget_set_state(&xudc->gadget, xudc->device_state); 2638 2639 ep_unpause_all(xudc); 2640 2641 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) 2642 tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN); 2643 2644 /* 2645 * Reset sequence number and dequeue pointer to flush the transfer 2646 * ring. 2647 */ 2648 ep0->deq_ptr = ep0->enq_ptr; 2649 ep0->ring_full = false; 2650 2651 xudc->setup_seq_num = 0; 2652 xudc->queued_setup_packet = false; 2653 2654 ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num); 2655 2656 deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]); 2657 2658 if (!dma_mapping_error(xudc->dev, deq_ptr)) { 2659 ep_ctx_write_deq_ptr(ep0->context, deq_ptr); 2660 ep_ctx_write_dcs(ep0->context, ep0->pcs); 2661 } 2662 2663 ep_unhalt_all(xudc); 2664 ep_reload(xudc, 0); 2665 ep_unpause(xudc, 0); 2666 } 2667 2668 static void tegra_xudc_port_connect(struct tegra_xudc *xudc) 2669 { 2670 struct tegra_xudc_ep *ep0 = &xudc->ep[0]; 2671 u16 maxpacket; 2672 u32 val; 2673 2674 val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT; 2675 switch (val) { 2676 case PORTSC_PS_LS: 2677 xudc->gadget.speed = USB_SPEED_LOW; 2678 break; 2679 case PORTSC_PS_FS: 2680 xudc->gadget.speed = USB_SPEED_FULL; 2681 break; 2682 case PORTSC_PS_HS: 2683 xudc->gadget.speed = USB_SPEED_HIGH; 2684 break; 2685 case PORTSC_PS_SS: 2686 xudc->gadget.speed = USB_SPEED_SUPER; 2687 break; 2688 default: 2689 xudc->gadget.speed = USB_SPEED_UNKNOWN; 2690 break; 2691 } 2692 2693 xudc->device_state = USB_STATE_DEFAULT; 2694 usb_gadget_set_state(&xudc->gadget, xudc->device_state); 2695 2696 xudc->setup_state = WAIT_FOR_SETUP; 2697 2698 if (xudc->gadget.speed == USB_SPEED_SUPER) 2699 maxpacket = 512; 2700 else 2701 maxpacket = 64; 2702 2703 ep_ctx_write_max_packet_size(ep0->context, maxpacket); 2704 tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket); 2705 usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket); 2706 2707 if (!xudc->soc->u1_enable) { 2708 val = xudc_readl(xudc, PORTPM); 2709 val &= ~(PORTPM_U1TIMEOUT_MASK); 2710 xudc_writel(xudc, val, PORTPM); 2711 } 2712 2713 if (!xudc->soc->u2_enable) { 2714 val = xudc_readl(xudc, PORTPM); 2715 val &= ~(PORTPM_U2TIMEOUT_MASK); 2716 xudc_writel(xudc, val, PORTPM); 2717 } 2718 2719 if (xudc->gadget.speed <= USB_SPEED_HIGH) { 2720 val = xudc_readl(xudc, PORTPM); 2721 val &= ~(PORTPM_L1S_MASK); 2722 if (xudc->soc->lpm_enable) 2723 val |= PORTPM_L1S(PORTPM_L1S_ACCEPT); 2724 else 2725 val |= PORTPM_L1S(PORTPM_L1S_NYET); 2726 xudc_writel(xudc, val, PORTPM); 2727 } 2728 2729 val = xudc_readl(xudc, ST); 2730 if (val & ST_RC) 2731 xudc_writel(xudc, ST_RC, ST); 2732 } 2733 2734 static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc) 2735 { 2736 tegra_xudc_reset(xudc); 2737 2738 if (xudc->driver && xudc->driver->disconnect) { 2739 spin_unlock(&xudc->lock); 2740 xudc->driver->disconnect(&xudc->gadget); 2741 spin_lock(&xudc->lock); 2742 } 2743 2744 xudc->device_state = USB_STATE_NOTATTACHED; 2745 usb_gadget_set_state(&xudc->gadget, xudc->device_state); 2746 2747 complete(&xudc->disconnect_complete); 2748 } 2749 2750 static void tegra_xudc_port_reset(struct tegra_xudc *xudc) 2751 { 2752 tegra_xudc_reset(xudc); 2753 2754 if (xudc->driver) { 2755 spin_unlock(&xudc->lock); 2756 usb_gadget_udc_reset(&xudc->gadget, xudc->driver); 2757 spin_lock(&xudc->lock); 2758 } 2759 2760 tegra_xudc_port_connect(xudc); 2761 } 2762 2763 static void tegra_xudc_port_suspend(struct tegra_xudc *xudc) 2764 { 2765 dev_dbg(xudc->dev, "port suspend\n"); 2766 2767 xudc->resume_state = xudc->device_state; 2768 xudc->device_state = USB_STATE_SUSPENDED; 2769 usb_gadget_set_state(&xudc->gadget, xudc->device_state); 2770 2771 if (xudc->driver->suspend) { 2772 spin_unlock(&xudc->lock); 2773 xudc->driver->suspend(&xudc->gadget); 2774 spin_lock(&xudc->lock); 2775 } 2776 } 2777 2778 static void tegra_xudc_port_resume(struct tegra_xudc *xudc) 2779 { 2780 dev_dbg(xudc->dev, "port resume\n"); 2781 2782 tegra_xudc_resume_device_state(xudc); 2783 2784 if (xudc->driver->resume) { 2785 spin_unlock(&xudc->lock); 2786 xudc->driver->resume(&xudc->gadget); 2787 spin_lock(&xudc->lock); 2788 } 2789 } 2790 2791 static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag) 2792 { 2793 u32 val; 2794 2795 val = xudc_readl(xudc, PORTSC); 2796 val &= ~PORTSC_CHANGE_MASK; 2797 val |= flag; 2798 xudc_writel(xudc, val, PORTSC); 2799 } 2800 2801 static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc) 2802 { 2803 u32 portsc, porthalt; 2804 2805 porthalt = xudc_readl(xudc, PORTHALT); 2806 if ((porthalt & PORTHALT_STCHG_REQ) && 2807 (porthalt & PORTHALT_HALT_LTSSM)) { 2808 dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt); 2809 porthalt &= ~PORTHALT_HALT_LTSSM; 2810 xudc_writel(xudc, porthalt, PORTHALT); 2811 } 2812 2813 portsc = xudc_readl(xudc, PORTSC); 2814 if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) { 2815 dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc); 2816 clear_port_change(xudc, PORTSC_PRC | PORTSC_PED); 2817 #define TOGGLE_VBUS_WAIT_MS 100 2818 if (xudc->soc->port_reset_quirk) { 2819 schedule_delayed_work(&xudc->port_reset_war_work, 2820 msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS)); 2821 xudc->wait_for_sec_prc = 1; 2822 } 2823 } 2824 2825 if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) { 2826 dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc); 2827 clear_port_change(xudc, PORTSC_PRC | PORTSC_PED); 2828 tegra_xudc_port_reset(xudc); 2829 cancel_delayed_work(&xudc->port_reset_war_work); 2830 xudc->wait_for_sec_prc = 0; 2831 } 2832 2833 portsc = xudc_readl(xudc, PORTSC); 2834 if (portsc & PORTSC_WRC) { 2835 dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc); 2836 clear_port_change(xudc, PORTSC_WRC | PORTSC_PED); 2837 if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR)) 2838 tegra_xudc_port_reset(xudc); 2839 } 2840 2841 portsc = xudc_readl(xudc, PORTSC); 2842 if (portsc & PORTSC_CSC) { 2843 dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc); 2844 clear_port_change(xudc, PORTSC_CSC); 2845 2846 if (portsc & PORTSC_CCS) 2847 tegra_xudc_port_connect(xudc); 2848 else 2849 tegra_xudc_port_disconnect(xudc); 2850 2851 if (xudc->wait_csc) { 2852 cancel_delayed_work(&xudc->plc_reset_work); 2853 xudc->wait_csc = false; 2854 } 2855 } 2856 2857 portsc = xudc_readl(xudc, PORTSC); 2858 if (portsc & PORTSC_PLC) { 2859 u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT; 2860 2861 dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc); 2862 clear_port_change(xudc, PORTSC_PLC); 2863 switch (pls) { 2864 case PORTSC_PLS_U3: 2865 tegra_xudc_port_suspend(xudc); 2866 break; 2867 case PORTSC_PLS_U0: 2868 if (xudc->gadget.speed < USB_SPEED_SUPER) 2869 tegra_xudc_port_resume(xudc); 2870 break; 2871 case PORTSC_PLS_RESUME: 2872 if (xudc->gadget.speed == USB_SPEED_SUPER) 2873 tegra_xudc_port_resume(xudc); 2874 break; 2875 case PORTSC_PLS_INACTIVE: 2876 schedule_delayed_work(&xudc->plc_reset_work, 2877 msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS)); 2878 xudc->wait_csc = true; 2879 break; 2880 default: 2881 break; 2882 } 2883 } 2884 2885 if (portsc & PORTSC_CEC) { 2886 dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc); 2887 clear_port_change(xudc, PORTSC_CEC); 2888 } 2889 2890 dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC)); 2891 } 2892 2893 static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc) 2894 { 2895 while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) || 2896 (xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ)) 2897 __tegra_xudc_handle_port_status(xudc); 2898 } 2899 2900 static void tegra_xudc_handle_event(struct tegra_xudc *xudc, 2901 struct tegra_xudc_trb *event) 2902 { 2903 u32 type = trb_read_type(event); 2904 2905 dump_trb(xudc, "EVENT", event); 2906 2907 switch (type) { 2908 case TRB_TYPE_PORT_STATUS_CHANGE_EVENT: 2909 tegra_xudc_handle_port_status(xudc); 2910 break; 2911 case TRB_TYPE_TRANSFER_EVENT: 2912 tegra_xudc_handle_transfer_event(xudc, event); 2913 break; 2914 case TRB_TYPE_SETUP_PACKET_EVENT: 2915 tegra_xudc_handle_ep0_event(xudc, event); 2916 break; 2917 default: 2918 dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type); 2919 break; 2920 } 2921 } 2922 2923 static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc) 2924 { 2925 struct tegra_xudc_trb *event; 2926 dma_addr_t erdp; 2927 2928 while (true) { 2929 event = xudc->event_ring[xudc->event_ring_index] + 2930 xudc->event_ring_deq_ptr; 2931 2932 if (trb_read_cycle(event) != xudc->ccs) 2933 break; 2934 2935 tegra_xudc_handle_event(xudc, event); 2936 2937 xudc->event_ring_deq_ptr++; 2938 if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) { 2939 xudc->event_ring_deq_ptr = 0; 2940 xudc->event_ring_index++; 2941 } 2942 2943 if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) { 2944 xudc->event_ring_index = 0; 2945 xudc->ccs = !xudc->ccs; 2946 } 2947 } 2948 2949 erdp = xudc->event_ring_phys[xudc->event_ring_index] + 2950 xudc->event_ring_deq_ptr * sizeof(*event); 2951 2952 xudc_writel(xudc, upper_32_bits(erdp), ERDPHI); 2953 xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO); 2954 } 2955 2956 static irqreturn_t tegra_xudc_irq(int irq, void *data) 2957 { 2958 struct tegra_xudc *xudc = data; 2959 unsigned long flags; 2960 u32 val; 2961 2962 val = xudc_readl(xudc, ST); 2963 if (!(val & ST_IP)) 2964 return IRQ_NONE; 2965 xudc_writel(xudc, ST_IP, ST); 2966 2967 spin_lock_irqsave(&xudc->lock, flags); 2968 tegra_xudc_process_event_ring(xudc); 2969 spin_unlock_irqrestore(&xudc->lock, flags); 2970 2971 return IRQ_HANDLED; 2972 } 2973 2974 static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index) 2975 { 2976 struct tegra_xudc_ep *ep = &xudc->ep[index]; 2977 2978 ep->xudc = xudc; 2979 ep->index = index; 2980 ep->context = &xudc->ep_context[index]; 2981 INIT_LIST_HEAD(&ep->queue); 2982 2983 /* 2984 * EP1 would be the input endpoint corresponding to EP0, but since 2985 * EP0 is bi-directional, EP1 is unused. 2986 */ 2987 if (index == 1) 2988 return 0; 2989 2990 ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool, 2991 GFP_KERNEL, 2992 &ep->transfer_ring_phys); 2993 if (!ep->transfer_ring) 2994 return -ENOMEM; 2995 2996 if (index) { 2997 snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2, 2998 (index % 2 == 0) ? "out" : "in"); 2999 ep->usb_ep.name = ep->name; 3000 usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024); 3001 ep->usb_ep.max_streams = 16; 3002 ep->usb_ep.ops = &tegra_xudc_ep_ops; 3003 ep->usb_ep.caps.type_bulk = true; 3004 ep->usb_ep.caps.type_int = true; 3005 if (index & 1) 3006 ep->usb_ep.caps.dir_in = true; 3007 else 3008 ep->usb_ep.caps.dir_out = true; 3009 list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list); 3010 } else { 3011 strscpy(ep->name, "ep0", 3); 3012 ep->usb_ep.name = ep->name; 3013 usb_ep_set_maxpacket_limit(&ep->usb_ep, 512); 3014 ep->usb_ep.ops = &tegra_xudc_ep0_ops; 3015 ep->usb_ep.caps.type_control = true; 3016 ep->usb_ep.caps.dir_in = true; 3017 ep->usb_ep.caps.dir_out = true; 3018 } 3019 3020 return 0; 3021 } 3022 3023 static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index) 3024 { 3025 struct tegra_xudc_ep *ep = &xudc->ep[index]; 3026 3027 /* 3028 * EP1 would be the input endpoint corresponding to EP0, but since 3029 * EP0 is bi-directional, EP1 is unused. 3030 */ 3031 if (index == 1) 3032 return; 3033 3034 dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring, 3035 ep->transfer_ring_phys); 3036 } 3037 3038 static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc) 3039 { 3040 struct usb_request *req; 3041 unsigned int i; 3042 int err; 3043 3044 xudc->ep_context = 3045 dma_alloc_coherent(xudc->dev, XUDC_NR_EPS * 3046 sizeof(*xudc->ep_context), 3047 &xudc->ep_context_phys, GFP_KERNEL); 3048 if (!xudc->ep_context) 3049 return -ENOMEM; 3050 3051 xudc->transfer_ring_pool = 3052 dmam_pool_create(dev_name(xudc->dev), xudc->dev, 3053 XUDC_TRANSFER_RING_SIZE * 3054 sizeof(struct tegra_xudc_trb), 3055 sizeof(struct tegra_xudc_trb), 0); 3056 if (!xudc->transfer_ring_pool) { 3057 err = -ENOMEM; 3058 goto free_ep_context; 3059 } 3060 3061 INIT_LIST_HEAD(&xudc->gadget.ep_list); 3062 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) { 3063 err = tegra_xudc_alloc_ep(xudc, i); 3064 if (err < 0) 3065 goto free_eps; 3066 } 3067 3068 req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL); 3069 if (!req) { 3070 err = -ENOMEM; 3071 goto free_eps; 3072 } 3073 xudc->ep0_req = to_xudc_req(req); 3074 3075 return 0; 3076 3077 free_eps: 3078 for (; i > 0; i--) 3079 tegra_xudc_free_ep(xudc, i - 1); 3080 free_ep_context: 3081 dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context), 3082 xudc->ep_context, xudc->ep_context_phys); 3083 return err; 3084 } 3085 3086 static void tegra_xudc_init_eps(struct tegra_xudc *xudc) 3087 { 3088 xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO); 3089 xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI); 3090 } 3091 3092 static void tegra_xudc_free_eps(struct tegra_xudc *xudc) 3093 { 3094 unsigned int i; 3095 3096 tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep, 3097 &xudc->ep0_req->usb_req); 3098 3099 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) 3100 tegra_xudc_free_ep(xudc, i); 3101 3102 dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context), 3103 xudc->ep_context, xudc->ep_context_phys); 3104 } 3105 3106 static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc) 3107 { 3108 unsigned int i; 3109 3110 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) { 3111 xudc->event_ring[i] = 3112 dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE * 3113 sizeof(*xudc->event_ring[i]), 3114 &xudc->event_ring_phys[i], 3115 GFP_KERNEL); 3116 if (!xudc->event_ring[i]) 3117 goto free_dma; 3118 } 3119 3120 return 0; 3121 3122 free_dma: 3123 for (; i > 0; i--) { 3124 dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE * 3125 sizeof(*xudc->event_ring[i - 1]), 3126 xudc->event_ring[i - 1], 3127 xudc->event_ring_phys[i - 1]); 3128 } 3129 return -ENOMEM; 3130 } 3131 3132 static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc) 3133 { 3134 unsigned int i; 3135 u32 val; 3136 3137 val = xudc_readl(xudc, SPARAM); 3138 val &= ~(SPARAM_ERSTMAX_MASK); 3139 val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS); 3140 xudc_writel(xudc, val, SPARAM); 3141 3142 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) { 3143 memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE * 3144 sizeof(*xudc->event_ring[i])); 3145 3146 val = xudc_readl(xudc, ERSTSZ); 3147 val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i)); 3148 val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i); 3149 xudc_writel(xudc, val, ERSTSZ); 3150 3151 xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]), 3152 ERSTXBALO(i)); 3153 xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]), 3154 ERSTXBAHI(i)); 3155 } 3156 3157 val = lower_32_bits(xudc->event_ring_phys[0]); 3158 xudc_writel(xudc, val, ERDPLO); 3159 val |= EREPLO_ECS; 3160 xudc_writel(xudc, val, EREPLO); 3161 3162 val = upper_32_bits(xudc->event_ring_phys[0]); 3163 xudc_writel(xudc, val, ERDPHI); 3164 xudc_writel(xudc, val, EREPHI); 3165 3166 xudc->ccs = true; 3167 xudc->event_ring_index = 0; 3168 xudc->event_ring_deq_ptr = 0; 3169 } 3170 3171 static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc) 3172 { 3173 unsigned int i; 3174 3175 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) { 3176 dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE * 3177 sizeof(*xudc->event_ring[i]), 3178 xudc->event_ring[i], 3179 xudc->event_ring_phys[i]); 3180 } 3181 } 3182 3183 static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc) 3184 { 3185 u32 val; 3186 3187 if (xudc->soc->has_ipfs) { 3188 val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0); 3189 val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI; 3190 ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0); 3191 usleep_range(10, 15); 3192 } 3193 3194 /* Enable bus master */ 3195 val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN | 3196 XUSB_DEV_CFG_1_BUS_MASTER_EN; 3197 fpci_writel(xudc, val, XUSB_DEV_CFG_1); 3198 3199 /* Program BAR0 space */ 3200 val = fpci_readl(xudc, XUSB_DEV_CFG_4); 3201 val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK); 3202 val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK); 3203 3204 fpci_writel(xudc, val, XUSB_DEV_CFG_4); 3205 fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5); 3206 3207 usleep_range(100, 200); 3208 3209 if (xudc->soc->has_ipfs) { 3210 /* Enable interrupt assertion */ 3211 val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0); 3212 val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK; 3213 ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0); 3214 } 3215 } 3216 3217 static void tegra_xudc_device_params_init(struct tegra_xudc *xudc) 3218 { 3219 u32 val, imod; 3220 3221 if (xudc->soc->has_ipfs) { 3222 val = xudc_readl(xudc, BLCG); 3223 val |= BLCG_ALL; 3224 val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE | 3225 BLCG_COREPLL_PWRDN); 3226 val |= BLCG_IOPLL_0_PWRDN; 3227 val |= BLCG_IOPLL_1_PWRDN; 3228 val |= BLCG_IOPLL_2_PWRDN; 3229 3230 xudc_writel(xudc, val, BLCG); 3231 } 3232 3233 /* Set a reasonable U3 exit timer value. */ 3234 val = xudc_readl(xudc, SSPX_CORE_PADCTL4); 3235 val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK); 3236 val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0); 3237 xudc_writel(xudc, val, SSPX_CORE_PADCTL4); 3238 3239 /* Default ping LFPS tBurst is too large. */ 3240 val = xudc_readl(xudc, SSPX_CORE_CNT0); 3241 val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK); 3242 val |= SSPX_CORE_CNT0_PING_TBURST(0xa); 3243 xudc_writel(xudc, val, SSPX_CORE_CNT0); 3244 3245 /* Default tPortConfiguration timeout is too small. */ 3246 val = xudc_readl(xudc, SSPX_CORE_CNT30); 3247 val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK); 3248 val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978); 3249 xudc_writel(xudc, val, SSPX_CORE_CNT30); 3250 3251 if (xudc->soc->lpm_enable) { 3252 /* Set L1 resume duration to 95 us. */ 3253 val = xudc_readl(xudc, HSFSPI_COUNT13); 3254 val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK); 3255 val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88); 3256 xudc_writel(xudc, val, HSFSPI_COUNT13); 3257 } 3258 3259 /* 3260 * Compliacne suite appears to be violating polling LFPS tBurst max 3261 * of 1.4us. Send 1.45us instead. 3262 */ 3263 val = xudc_readl(xudc, SSPX_CORE_CNT32); 3264 val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK); 3265 val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0); 3266 xudc_writel(xudc, val, SSPX_CORE_CNT32); 3267 3268 /* Direct HS/FS port instance to RxDetect. */ 3269 val = xudc_readl(xudc, CFG_DEV_FE); 3270 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK); 3271 val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI); 3272 xudc_writel(xudc, val, CFG_DEV_FE); 3273 3274 val = xudc_readl(xudc, PORTSC); 3275 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK); 3276 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT); 3277 xudc_writel(xudc, val, PORTSC); 3278 3279 /* Direct SS port instance to RxDetect. */ 3280 val = xudc_readl(xudc, CFG_DEV_FE); 3281 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK); 3282 val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK; 3283 xudc_writel(xudc, val, CFG_DEV_FE); 3284 3285 val = xudc_readl(xudc, PORTSC); 3286 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK); 3287 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT); 3288 xudc_writel(xudc, val, PORTSC); 3289 3290 /* Restore port instance. */ 3291 val = xudc_readl(xudc, CFG_DEV_FE); 3292 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK); 3293 xudc_writel(xudc, val, CFG_DEV_FE); 3294 3295 /* 3296 * Enable INFINITE_SS_RETRY to prevent device from entering 3297 * Disabled.Error when attached to buggy SuperSpeed hubs. 3298 */ 3299 val = xudc_readl(xudc, CFG_DEV_FE); 3300 val |= CFG_DEV_FE_INFINITE_SS_RETRY; 3301 xudc_writel(xudc, val, CFG_DEV_FE); 3302 3303 /* Set interrupt moderation. */ 3304 imod = XUDC_INTERRUPT_MODERATION_US * 4; 3305 val = xudc_readl(xudc, RT_IMOD); 3306 val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK)); 3307 val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod)); 3308 xudc_writel(xudc, val, RT_IMOD); 3309 3310 /* increase SSPI transaction timeout from 32us to 512us */ 3311 val = xudc_readl(xudc, CFG_DEV_SSPI_XFER); 3312 val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK); 3313 val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000); 3314 xudc_writel(xudc, val, CFG_DEV_SSPI_XFER); 3315 } 3316 3317 static int tegra_xudc_phy_init(struct tegra_xudc *xudc) 3318 { 3319 int err; 3320 3321 err = phy_init(xudc->utmi_phy); 3322 if (err < 0) { 3323 dev_err(xudc->dev, "utmi phy init failed: %d\n", err); 3324 return err; 3325 } 3326 3327 err = phy_init(xudc->usb3_phy); 3328 if (err < 0) { 3329 dev_err(xudc->dev, "usb3 phy init failed: %d\n", err); 3330 goto exit_utmi_phy; 3331 } 3332 3333 return 0; 3334 3335 exit_utmi_phy: 3336 phy_exit(xudc->utmi_phy); 3337 return err; 3338 } 3339 3340 static void tegra_xudc_phy_exit(struct tegra_xudc *xudc) 3341 { 3342 phy_exit(xudc->usb3_phy); 3343 phy_exit(xudc->utmi_phy); 3344 } 3345 3346 static const char * const tegra210_xudc_supply_names[] = { 3347 "hvdd-usb", 3348 "avddio-usb", 3349 }; 3350 3351 static const char * const tegra210_xudc_clock_names[] = { 3352 "dev", 3353 "ss", 3354 "ss_src", 3355 "hs_src", 3356 "fs_src", 3357 }; 3358 3359 static const char * const tegra186_xudc_clock_names[] = { 3360 "dev", 3361 "ss", 3362 "ss_src", 3363 "fs_src", 3364 }; 3365 3366 static struct tegra_xudc_soc tegra210_xudc_soc_data = { 3367 .supply_names = tegra210_xudc_supply_names, 3368 .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names), 3369 .clock_names = tegra210_xudc_clock_names, 3370 .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names), 3371 .u1_enable = false, 3372 .u2_enable = true, 3373 .lpm_enable = false, 3374 .invalid_seq_num = true, 3375 .pls_quirk = true, 3376 .port_reset_quirk = true, 3377 .has_ipfs = true, 3378 }; 3379 3380 static struct tegra_xudc_soc tegra186_xudc_soc_data = { 3381 .clock_names = tegra186_xudc_clock_names, 3382 .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names), 3383 .u1_enable = true, 3384 .u2_enable = true, 3385 .lpm_enable = false, 3386 .invalid_seq_num = false, 3387 .pls_quirk = false, 3388 .port_reset_quirk = false, 3389 .has_ipfs = false, 3390 }; 3391 3392 static const struct of_device_id tegra_xudc_of_match[] = { 3393 { 3394 .compatible = "nvidia,tegra210-xudc", 3395 .data = &tegra210_xudc_soc_data 3396 }, 3397 { 3398 .compatible = "nvidia,tegra186-xudc", 3399 .data = &tegra186_xudc_soc_data 3400 }, 3401 { } 3402 }; 3403 MODULE_DEVICE_TABLE(of, tegra_xudc_of_match); 3404 3405 static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc) 3406 { 3407 if (xudc->genpd_dl_ss) 3408 device_link_del(xudc->genpd_dl_ss); 3409 if (xudc->genpd_dl_device) 3410 device_link_del(xudc->genpd_dl_device); 3411 if (xudc->genpd_dev_ss) 3412 dev_pm_domain_detach(xudc->genpd_dev_ss, true); 3413 if (xudc->genpd_dev_device) 3414 dev_pm_domain_detach(xudc->genpd_dev_device, true); 3415 } 3416 3417 static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc) 3418 { 3419 struct device *dev = xudc->dev; 3420 int err; 3421 3422 xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, 3423 "dev"); 3424 if (IS_ERR(xudc->genpd_dev_device)) { 3425 err = PTR_ERR(xudc->genpd_dev_device); 3426 dev_err(dev, "failed to get dev pm-domain: %d\n", err); 3427 return err; 3428 } 3429 3430 xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss"); 3431 if (IS_ERR(xudc->genpd_dev_ss)) { 3432 err = PTR_ERR(xudc->genpd_dev_ss); 3433 dev_err(dev, "failed to get superspeed pm-domain: %d\n", err); 3434 return err; 3435 } 3436 3437 xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device, 3438 DL_FLAG_PM_RUNTIME | 3439 DL_FLAG_STATELESS); 3440 if (!xudc->genpd_dl_device) { 3441 dev_err(dev, "adding usb device device link failed!\n"); 3442 return -ENODEV; 3443 } 3444 3445 xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss, 3446 DL_FLAG_PM_RUNTIME | 3447 DL_FLAG_STATELESS); 3448 if (!xudc->genpd_dl_ss) { 3449 dev_err(dev, "adding superspeed device link failed!\n"); 3450 return -ENODEV; 3451 } 3452 3453 return 0; 3454 } 3455 3456 static int tegra_xudc_probe(struct platform_device *pdev) 3457 { 3458 struct tegra_xudc *xudc; 3459 struct resource *res; 3460 struct usb_role_switch_desc role_sx_desc = { 0 }; 3461 unsigned int i; 3462 int err; 3463 3464 xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_ATOMIC); 3465 if (!xudc) 3466 return -ENOMEM; 3467 3468 xudc->dev = &pdev->dev; 3469 platform_set_drvdata(pdev, xudc); 3470 3471 xudc->soc = of_device_get_match_data(&pdev->dev); 3472 if (!xudc->soc) 3473 return -ENODEV; 3474 3475 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); 3476 xudc->base = devm_ioremap_resource(&pdev->dev, res); 3477 if (IS_ERR(xudc->base)) 3478 return PTR_ERR(xudc->base); 3479 xudc->phys_base = res->start; 3480 3481 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fpci"); 3482 xudc->fpci = devm_ioremap_resource(&pdev->dev, res); 3483 if (IS_ERR(xudc->fpci)) 3484 return PTR_ERR(xudc->fpci); 3485 3486 if (xudc->soc->has_ipfs) { 3487 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 3488 "ipfs"); 3489 xudc->ipfs = devm_ioremap_resource(&pdev->dev, res); 3490 if (IS_ERR(xudc->ipfs)) 3491 return PTR_ERR(xudc->ipfs); 3492 } 3493 3494 xudc->irq = platform_get_irq(pdev, 0); 3495 if (xudc->irq < 0) { 3496 dev_err(xudc->dev, "failed to get IRQ: %d\n", 3497 xudc->irq); 3498 return xudc->irq; 3499 } 3500 3501 err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0, 3502 dev_name(&pdev->dev), xudc); 3503 if (err < 0) { 3504 dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq, 3505 err); 3506 return err; 3507 } 3508 3509 xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks, 3510 sizeof(*xudc->clks), GFP_KERNEL); 3511 if (!xudc->clks) 3512 return -ENOMEM; 3513 3514 for (i = 0; i < xudc->soc->num_clks; i++) 3515 xudc->clks[i].id = xudc->soc->clock_names[i]; 3516 3517 err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks, 3518 xudc->clks); 3519 if (err) { 3520 dev_err(xudc->dev, "failed to request clks %d\n", err); 3521 return err; 3522 } 3523 3524 xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies, 3525 sizeof(*xudc->supplies), GFP_KERNEL); 3526 if (!xudc->supplies) 3527 return -ENOMEM; 3528 3529 for (i = 0; i < xudc->soc->num_supplies; i++) 3530 xudc->supplies[i].supply = xudc->soc->supply_names[i]; 3531 3532 err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies, 3533 xudc->supplies); 3534 if (err) { 3535 dev_err(xudc->dev, "failed to request regulators %d\n", err); 3536 return err; 3537 } 3538 3539 xudc->padctl = tegra_xusb_padctl_get(&pdev->dev); 3540 if (IS_ERR(xudc->padctl)) 3541 return PTR_ERR(xudc->padctl); 3542 3543 err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies); 3544 if (err) { 3545 dev_err(xudc->dev, "failed to enable regulators %d\n", err); 3546 goto put_padctl; 3547 } 3548 3549 xudc->usb3_phy = devm_phy_optional_get(&pdev->dev, "usb3"); 3550 if (IS_ERR(xudc->usb3_phy)) { 3551 err = PTR_ERR(xudc->usb3_phy); 3552 dev_err(xudc->dev, "failed to get usb3 phy: %d\n", err); 3553 goto disable_regulator; 3554 } 3555 3556 xudc->utmi_phy = devm_phy_optional_get(&pdev->dev, "usb2"); 3557 if (IS_ERR(xudc->utmi_phy)) { 3558 err = PTR_ERR(xudc->utmi_phy); 3559 dev_err(xudc->dev, "failed to get usb2 phy: %d\n", err); 3560 goto disable_regulator; 3561 } 3562 3563 err = tegra_xudc_powerdomain_init(xudc); 3564 if (err) 3565 goto put_powerdomains; 3566 3567 err = tegra_xudc_phy_init(xudc); 3568 if (err) 3569 goto put_powerdomains; 3570 3571 err = tegra_xudc_alloc_event_ring(xudc); 3572 if (err) 3573 goto disable_phy; 3574 3575 err = tegra_xudc_alloc_eps(xudc); 3576 if (err) 3577 goto free_event_ring; 3578 3579 spin_lock_init(&xudc->lock); 3580 3581 init_completion(&xudc->disconnect_complete); 3582 3583 INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work); 3584 3585 INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work); 3586 3587 INIT_DELAYED_WORK(&xudc->port_reset_war_work, 3588 tegra_xudc_port_reset_war_work); 3589 3590 if (of_property_read_bool(xudc->dev->of_node, "usb-role-switch")) { 3591 role_sx_desc.set = tegra_xudc_usb_role_sw_set; 3592 role_sx_desc.fwnode = dev_fwnode(xudc->dev); 3593 3594 xudc->usb_role_sw = usb_role_switch_register(xudc->dev, 3595 &role_sx_desc); 3596 if (IS_ERR(xudc->usb_role_sw)) { 3597 err = PTR_ERR(xudc->usb_role_sw); 3598 dev_err(xudc->dev, "Failed to register USB role SW: %d", 3599 err); 3600 goto free_eps; 3601 } 3602 } else { 3603 /* Set the mode as device mode and this keeps phy always ON */ 3604 dev_info(xudc->dev, "Set usb role to device mode always"); 3605 schedule_work(&xudc->usb_role_sw_work); 3606 } 3607 3608 pm_runtime_enable(&pdev->dev); 3609 3610 xudc->gadget.ops = &tegra_xudc_gadget_ops; 3611 xudc->gadget.ep0 = &xudc->ep[0].usb_ep; 3612 xudc->gadget.name = "tegra-xudc"; 3613 xudc->gadget.max_speed = USB_SPEED_SUPER; 3614 3615 err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget); 3616 if (err) { 3617 dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err); 3618 goto free_eps; 3619 } 3620 3621 return 0; 3622 3623 free_eps: 3624 tegra_xudc_free_eps(xudc); 3625 free_event_ring: 3626 tegra_xudc_free_event_ring(xudc); 3627 disable_phy: 3628 tegra_xudc_phy_exit(xudc); 3629 put_powerdomains: 3630 tegra_xudc_powerdomain_remove(xudc); 3631 disable_regulator: 3632 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies); 3633 put_padctl: 3634 tegra_xusb_padctl_put(xudc->padctl); 3635 3636 return err; 3637 } 3638 3639 static int tegra_xudc_remove(struct platform_device *pdev) 3640 { 3641 struct tegra_xudc *xudc = platform_get_drvdata(pdev); 3642 3643 pm_runtime_get_sync(xudc->dev); 3644 3645 cancel_delayed_work(&xudc->plc_reset_work); 3646 3647 if (xudc->usb_role_sw) { 3648 usb_role_switch_unregister(xudc->usb_role_sw); 3649 cancel_work_sync(&xudc->usb_role_sw_work); 3650 } 3651 3652 usb_del_gadget_udc(&xudc->gadget); 3653 3654 tegra_xudc_free_eps(xudc); 3655 tegra_xudc_free_event_ring(xudc); 3656 3657 tegra_xudc_powerdomain_remove(xudc); 3658 3659 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies); 3660 3661 phy_power_off(xudc->utmi_phy); 3662 phy_power_off(xudc->usb3_phy); 3663 3664 tegra_xudc_phy_exit(xudc); 3665 3666 pm_runtime_disable(xudc->dev); 3667 pm_runtime_put(xudc->dev); 3668 3669 tegra_xusb_padctl_put(xudc->padctl); 3670 3671 return 0; 3672 } 3673 3674 static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc) 3675 { 3676 unsigned long flags; 3677 3678 dev_dbg(xudc->dev, "entering ELPG\n"); 3679 3680 spin_lock_irqsave(&xudc->lock, flags); 3681 3682 xudc->powergated = true; 3683 xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL); 3684 xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM); 3685 xudc_writel(xudc, 0, CTRL); 3686 3687 spin_unlock_irqrestore(&xudc->lock, flags); 3688 3689 clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks); 3690 3691 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies); 3692 3693 dev_dbg(xudc->dev, "entering ELPG done\n"); 3694 return 0; 3695 } 3696 3697 static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc) 3698 { 3699 unsigned long flags; 3700 int err; 3701 3702 dev_dbg(xudc->dev, "exiting ELPG\n"); 3703 3704 err = regulator_bulk_enable(xudc->soc->num_supplies, 3705 xudc->supplies); 3706 if (err < 0) 3707 return err; 3708 3709 err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks); 3710 if (err < 0) 3711 return err; 3712 3713 tegra_xudc_fpci_ipfs_init(xudc); 3714 3715 tegra_xudc_device_params_init(xudc); 3716 3717 tegra_xudc_init_event_ring(xudc); 3718 3719 tegra_xudc_init_eps(xudc); 3720 3721 xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM); 3722 xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL); 3723 3724 spin_lock_irqsave(&xudc->lock, flags); 3725 xudc->powergated = false; 3726 spin_unlock_irqrestore(&xudc->lock, flags); 3727 3728 dev_dbg(xudc->dev, "exiting ELPG done\n"); 3729 return 0; 3730 } 3731 3732 static int __maybe_unused tegra_xudc_suspend(struct device *dev) 3733 { 3734 struct tegra_xudc *xudc = dev_get_drvdata(dev); 3735 unsigned long flags; 3736 3737 spin_lock_irqsave(&xudc->lock, flags); 3738 xudc->suspended = true; 3739 spin_unlock_irqrestore(&xudc->lock, flags); 3740 3741 flush_work(&xudc->usb_role_sw_work); 3742 3743 /* Forcibly disconnect before powergating. */ 3744 tegra_xudc_device_mode_off(xudc); 3745 3746 if (!pm_runtime_status_suspended(dev)) 3747 tegra_xudc_powergate(xudc); 3748 3749 pm_runtime_disable(dev); 3750 3751 return 0; 3752 } 3753 3754 static int __maybe_unused tegra_xudc_resume(struct device *dev) 3755 { 3756 struct tegra_xudc *xudc = dev_get_drvdata(dev); 3757 unsigned long flags; 3758 int err; 3759 3760 err = tegra_xudc_unpowergate(xudc); 3761 if (err < 0) 3762 return err; 3763 3764 spin_lock_irqsave(&xudc->lock, flags); 3765 xudc->suspended = false; 3766 spin_unlock_irqrestore(&xudc->lock, flags); 3767 3768 schedule_work(&xudc->usb_role_sw_work); 3769 3770 pm_runtime_enable(dev); 3771 3772 return 0; 3773 } 3774 3775 static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev) 3776 { 3777 struct tegra_xudc *xudc = dev_get_drvdata(dev); 3778 3779 return tegra_xudc_powergate(xudc); 3780 } 3781 3782 static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev) 3783 { 3784 struct tegra_xudc *xudc = dev_get_drvdata(dev); 3785 3786 return tegra_xudc_unpowergate(xudc); 3787 } 3788 3789 static const struct dev_pm_ops tegra_xudc_pm_ops = { 3790 SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume) 3791 SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend, 3792 tegra_xudc_runtime_resume, NULL) 3793 }; 3794 3795 static struct platform_driver tegra_xudc_driver = { 3796 .probe = tegra_xudc_probe, 3797 .remove = tegra_xudc_remove, 3798 .driver = { 3799 .name = "tegra-xudc", 3800 .pm = &tegra_xudc_pm_ops, 3801 .of_match_table = tegra_xudc_of_match, 3802 }, 3803 }; 3804 module_platform_driver(tegra_xudc_driver); 3805 3806 MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller"); 3807 MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>"); 3808 MODULE_AUTHOR("Hui Fu <hfu@nvidia.com>"); 3809 MODULE_AUTHOR("Nagarjuna Kristam <nkristam@nvidia.com>"); 3810 MODULE_LICENSE("GPL v2"); 3811