1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. 4 * 5 */ 6 7 #include <linux/bitfield.h> 8 #include <linux/debugfs.h> 9 #include <linux/device.h> 10 #include <linux/dma-direction.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/idr.h> 13 #include <linux/interrupt.h> 14 #include <linux/list.h> 15 #include <linux/mhi.h> 16 #include <linux/mod_devicetable.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/wait.h> 21 #include "internal.h" 22 23 static DEFINE_IDA(mhi_controller_ida); 24 25 const char * const mhi_ee_str[MHI_EE_MAX] = { 26 [MHI_EE_PBL] = "PRIMARY BOOTLOADER", 27 [MHI_EE_SBL] = "SECONDARY BOOTLOADER", 28 [MHI_EE_AMSS] = "MISSION MODE", 29 [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE", 30 [MHI_EE_WFW] = "WLAN FIRMWARE", 31 [MHI_EE_PTHRU] = "PASS THROUGH", 32 [MHI_EE_EDL] = "EMERGENCY DOWNLOAD", 33 [MHI_EE_FP] = "FLASH PROGRAMMER", 34 [MHI_EE_DISABLE_TRANSITION] = "DISABLE", 35 [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED", 36 }; 37 38 const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { 39 [DEV_ST_TRANSITION_PBL] = "PBL", 40 [DEV_ST_TRANSITION_READY] = "READY", 41 [DEV_ST_TRANSITION_SBL] = "SBL", 42 [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE", 43 [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER", 44 [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR", 45 [DEV_ST_TRANSITION_DISABLE] = "DISABLE", 46 }; 47 48 const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = { 49 [MHI_CH_STATE_TYPE_RESET] = "RESET", 50 [MHI_CH_STATE_TYPE_STOP] = "STOP", 51 [MHI_CH_STATE_TYPE_START] = "START", 52 }; 53 54 static const char * const mhi_pm_state_str[] = { 55 [MHI_PM_STATE_DISABLE] = "DISABLE", 56 [MHI_PM_STATE_POR] = "POWER ON RESET", 57 [MHI_PM_STATE_M0] = "M0", 58 [MHI_PM_STATE_M2] = "M2", 59 [MHI_PM_STATE_M3_ENTER] = "M?->M3", 60 [MHI_PM_STATE_M3] = "M3", 61 [MHI_PM_STATE_M3_EXIT] = "M3->M0", 62 [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error", 63 [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect", 64 [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process", 65 [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", 66 [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect", 67 }; 68 69 const char *to_mhi_pm_state_str(u32 state) 70 { 71 int index; 72 73 if (state) 74 index = __fls(state); 75 76 if (!state || index >= ARRAY_SIZE(mhi_pm_state_str)) 77 return "Invalid State"; 78 79 return mhi_pm_state_str[index]; 80 } 81 82 static ssize_t serial_number_show(struct device *dev, 83 struct device_attribute *attr, 84 char *buf) 85 { 86 struct mhi_device *mhi_dev = to_mhi_device(dev); 87 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 88 89 return sysfs_emit(buf, "Serial Number: %u\n", 90 mhi_cntrl->serial_number); 91 } 92 static DEVICE_ATTR_RO(serial_number); 93 94 static ssize_t oem_pk_hash_show(struct device *dev, 95 struct device_attribute *attr, 96 char *buf) 97 { 98 struct mhi_device *mhi_dev = to_mhi_device(dev); 99 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 100 int i, cnt = 0; 101 102 for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) 103 cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", 104 i, mhi_cntrl->oem_pk_hash[i]); 105 106 return cnt; 107 } 108 static DEVICE_ATTR_RO(oem_pk_hash); 109 110 static ssize_t soc_reset_store(struct device *dev, 111 struct device_attribute *attr, 112 const char *buf, 113 size_t count) 114 { 115 struct mhi_device *mhi_dev = to_mhi_device(dev); 116 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 117 118 mhi_soc_reset(mhi_cntrl); 119 return count; 120 } 121 static DEVICE_ATTR_WO(soc_reset); 122 123 static struct attribute *mhi_dev_attrs[] = { 124 &dev_attr_serial_number.attr, 125 &dev_attr_oem_pk_hash.attr, 126 &dev_attr_soc_reset.attr, 127 NULL, 128 }; 129 ATTRIBUTE_GROUPS(mhi_dev); 130 131 /* MHI protocol requires the transfer ring to be aligned with ring length */ 132 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, 133 struct mhi_ring *ring, 134 u64 len) 135 { 136 ring->alloc_size = len + (len - 1); 137 ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, 138 &ring->dma_handle, GFP_KERNEL); 139 if (!ring->pre_aligned) 140 return -ENOMEM; 141 142 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); 143 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); 144 145 return 0; 146 } 147 148 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) 149 { 150 int i; 151 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; 152 153 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 154 if (mhi_event->offload_ev) 155 continue; 156 157 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); 158 } 159 160 free_irq(mhi_cntrl->irq[0], mhi_cntrl); 161 } 162 163 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) 164 { 165 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; 166 struct device *dev = &mhi_cntrl->mhi_dev->dev; 167 unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; 168 int i, ret; 169 170 /* if controller driver has set irq_flags, use it */ 171 if (mhi_cntrl->irq_flags) 172 irq_flags = mhi_cntrl->irq_flags; 173 174 /* Setup BHI_INTVEC IRQ */ 175 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, 176 mhi_intvec_threaded_handler, 177 irq_flags, 178 "bhi", mhi_cntrl); 179 if (ret) 180 return ret; 181 /* 182 * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here. 183 * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that 184 * IRQ_NOAUTOEN is not applicable. 185 */ 186 disable_irq(mhi_cntrl->irq[0]); 187 188 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 189 if (mhi_event->offload_ev) 190 continue; 191 192 if (mhi_event->irq >= mhi_cntrl->nr_irqs) { 193 dev_err(dev, "irq %d not available for event ring\n", 194 mhi_event->irq); 195 ret = -EINVAL; 196 goto error_request; 197 } 198 199 ret = request_irq(mhi_cntrl->irq[mhi_event->irq], 200 mhi_irq_handler, 201 irq_flags, 202 "mhi", mhi_event); 203 if (ret) { 204 dev_err(dev, "Error requesting irq:%d for ev:%d\n", 205 mhi_cntrl->irq[mhi_event->irq], i); 206 goto error_request; 207 } 208 209 disable_irq(mhi_cntrl->irq[mhi_event->irq]); 210 } 211 212 return 0; 213 214 error_request: 215 for (--i, --mhi_event; i >= 0; i--, mhi_event--) { 216 if (mhi_event->offload_ev) 217 continue; 218 219 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); 220 } 221 free_irq(mhi_cntrl->irq[0], mhi_cntrl); 222 223 return ret; 224 } 225 226 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) 227 { 228 int i; 229 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; 230 struct mhi_cmd *mhi_cmd; 231 struct mhi_event *mhi_event; 232 struct mhi_ring *ring; 233 234 mhi_cmd = mhi_cntrl->mhi_cmd; 235 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { 236 ring = &mhi_cmd->ring; 237 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, 238 ring->pre_aligned, ring->dma_handle); 239 ring->base = NULL; 240 ring->iommu_base = 0; 241 } 242 243 dma_free_coherent(mhi_cntrl->cntrl_dev, 244 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, 245 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); 246 247 mhi_event = mhi_cntrl->mhi_event; 248 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 249 if (mhi_event->offload_ev) 250 continue; 251 252 ring = &mhi_event->ring; 253 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, 254 ring->pre_aligned, ring->dma_handle); 255 ring->base = NULL; 256 ring->iommu_base = 0; 257 } 258 259 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * 260 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, 261 mhi_ctxt->er_ctxt_addr); 262 263 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * 264 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, 265 mhi_ctxt->chan_ctxt_addr); 266 267 kfree(mhi_ctxt); 268 mhi_cntrl->mhi_ctxt = NULL; 269 } 270 271 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) 272 { 273 struct mhi_ctxt *mhi_ctxt; 274 struct mhi_chan_ctxt *chan_ctxt; 275 struct mhi_event_ctxt *er_ctxt; 276 struct mhi_cmd_ctxt *cmd_ctxt; 277 struct mhi_chan *mhi_chan; 278 struct mhi_event *mhi_event; 279 struct mhi_cmd *mhi_cmd; 280 u32 tmp; 281 int ret = -ENOMEM, i; 282 283 atomic_set(&mhi_cntrl->dev_wake, 0); 284 atomic_set(&mhi_cntrl->pending_pkts, 0); 285 286 mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); 287 if (!mhi_ctxt) 288 return -ENOMEM; 289 290 /* Setup channel ctxt */ 291 mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, 292 sizeof(*mhi_ctxt->chan_ctxt) * 293 mhi_cntrl->max_chan, 294 &mhi_ctxt->chan_ctxt_addr, 295 GFP_KERNEL); 296 if (!mhi_ctxt->chan_ctxt) 297 goto error_alloc_chan_ctxt; 298 299 mhi_chan = mhi_cntrl->mhi_chan; 300 chan_ctxt = mhi_ctxt->chan_ctxt; 301 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { 302 /* Skip if it is an offload channel */ 303 if (mhi_chan->offload_ch) 304 continue; 305 306 tmp = le32_to_cpu(chan_ctxt->chcfg); 307 tmp &= ~CHAN_CTX_CHSTATE_MASK; 308 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); 309 tmp &= ~CHAN_CTX_BRSTMODE_MASK; 310 tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode); 311 tmp &= ~CHAN_CTX_POLLCFG_MASK; 312 tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg); 313 chan_ctxt->chcfg = cpu_to_le32(tmp); 314 315 chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); 316 chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); 317 318 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; 319 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; 320 } 321 322 /* Setup event context */ 323 mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, 324 sizeof(*mhi_ctxt->er_ctxt) * 325 mhi_cntrl->total_ev_rings, 326 &mhi_ctxt->er_ctxt_addr, 327 GFP_KERNEL); 328 if (!mhi_ctxt->er_ctxt) 329 goto error_alloc_er_ctxt; 330 331 er_ctxt = mhi_ctxt->er_ctxt; 332 mhi_event = mhi_cntrl->mhi_event; 333 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, 334 mhi_event++) { 335 struct mhi_ring *ring = &mhi_event->ring; 336 337 /* Skip if it is an offload event */ 338 if (mhi_event->offload_ev) 339 continue; 340 341 tmp = le32_to_cpu(er_ctxt->intmod); 342 tmp &= ~EV_CTX_INTMODC_MASK; 343 tmp &= ~EV_CTX_INTMODT_MASK; 344 tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod); 345 er_ctxt->intmod = cpu_to_le32(tmp); 346 347 er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); 348 er_ctxt->msivec = cpu_to_le32(mhi_event->irq); 349 mhi_event->db_cfg.db_mode = true; 350 351 ring->el_size = sizeof(struct mhi_ring_element); 352 ring->len = ring->el_size * ring->elements; 353 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); 354 if (ret) 355 goto error_alloc_er; 356 357 /* 358 * If the read pointer equals to the write pointer, then the 359 * ring is empty 360 */ 361 ring->rp = ring->wp = ring->base; 362 er_ctxt->rbase = cpu_to_le64(ring->iommu_base); 363 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; 364 er_ctxt->rlen = cpu_to_le64(ring->len); 365 ring->ctxt_wp = &er_ctxt->wp; 366 } 367 368 /* Setup cmd context */ 369 ret = -ENOMEM; 370 mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, 371 sizeof(*mhi_ctxt->cmd_ctxt) * 372 NR_OF_CMD_RINGS, 373 &mhi_ctxt->cmd_ctxt_addr, 374 GFP_KERNEL); 375 if (!mhi_ctxt->cmd_ctxt) 376 goto error_alloc_er; 377 378 mhi_cmd = mhi_cntrl->mhi_cmd; 379 cmd_ctxt = mhi_ctxt->cmd_ctxt; 380 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { 381 struct mhi_ring *ring = &mhi_cmd->ring; 382 383 ring->el_size = sizeof(struct mhi_ring_element); 384 ring->elements = CMD_EL_PER_RING; 385 ring->len = ring->el_size * ring->elements; 386 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); 387 if (ret) 388 goto error_alloc_cmd; 389 390 ring->rp = ring->wp = ring->base; 391 cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); 392 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; 393 cmd_ctxt->rlen = cpu_to_le64(ring->len); 394 ring->ctxt_wp = &cmd_ctxt->wp; 395 } 396 397 mhi_cntrl->mhi_ctxt = mhi_ctxt; 398 399 return 0; 400 401 error_alloc_cmd: 402 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { 403 struct mhi_ring *ring = &mhi_cmd->ring; 404 405 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, 406 ring->pre_aligned, ring->dma_handle); 407 } 408 dma_free_coherent(mhi_cntrl->cntrl_dev, 409 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, 410 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); 411 i = mhi_cntrl->total_ev_rings; 412 mhi_event = mhi_cntrl->mhi_event + i; 413 414 error_alloc_er: 415 for (--i, --mhi_event; i >= 0; i--, mhi_event--) { 416 struct mhi_ring *ring = &mhi_event->ring; 417 418 if (mhi_event->offload_ev) 419 continue; 420 421 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, 422 ring->pre_aligned, ring->dma_handle); 423 } 424 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * 425 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, 426 mhi_ctxt->er_ctxt_addr); 427 428 error_alloc_er_ctxt: 429 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * 430 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, 431 mhi_ctxt->chan_ctxt_addr); 432 433 error_alloc_chan_ctxt: 434 kfree(mhi_ctxt); 435 436 return ret; 437 } 438 439 int mhi_init_mmio(struct mhi_controller *mhi_cntrl) 440 { 441 u32 val; 442 int i, ret; 443 struct mhi_chan *mhi_chan; 444 struct mhi_event *mhi_event; 445 void __iomem *base = mhi_cntrl->regs; 446 struct device *dev = &mhi_cntrl->mhi_dev->dev; 447 struct { 448 u32 offset; 449 u32 val; 450 } reg_info[] = { 451 { 452 CCABAP_HIGHER, 453 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), 454 }, 455 { 456 CCABAP_LOWER, 457 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), 458 }, 459 { 460 ECABAP_HIGHER, 461 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), 462 }, 463 { 464 ECABAP_LOWER, 465 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), 466 }, 467 { 468 CRCBAP_HIGHER, 469 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), 470 }, 471 { 472 CRCBAP_LOWER, 473 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), 474 }, 475 { 476 MHICTRLBASE_HIGHER, 477 upper_32_bits(mhi_cntrl->iova_start), 478 }, 479 { 480 MHICTRLBASE_LOWER, 481 lower_32_bits(mhi_cntrl->iova_start), 482 }, 483 { 484 MHIDATABASE_HIGHER, 485 upper_32_bits(mhi_cntrl->iova_start), 486 }, 487 { 488 MHIDATABASE_LOWER, 489 lower_32_bits(mhi_cntrl->iova_start), 490 }, 491 { 492 MHICTRLLIMIT_HIGHER, 493 upper_32_bits(mhi_cntrl->iova_stop), 494 }, 495 { 496 MHICTRLLIMIT_LOWER, 497 lower_32_bits(mhi_cntrl->iova_stop), 498 }, 499 { 500 MHIDATALIMIT_HIGHER, 501 upper_32_bits(mhi_cntrl->iova_stop), 502 }, 503 { 504 MHIDATALIMIT_LOWER, 505 lower_32_bits(mhi_cntrl->iova_stop), 506 }, 507 {0, 0} 508 }; 509 510 dev_dbg(dev, "Initializing MHI registers\n"); 511 512 /* Read channel db offset */ 513 ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val); 514 if (ret) { 515 dev_err(dev, "Unable to read CHDBOFF register\n"); 516 return -EIO; 517 } 518 519 /* Setup wake db */ 520 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); 521 mhi_cntrl->wake_set = false; 522 523 /* Setup channel db address for each channel in tre_ring */ 524 mhi_chan = mhi_cntrl->mhi_chan; 525 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) 526 mhi_chan->tre_ring.db_addr = base + val; 527 528 /* Read event ring db offset */ 529 ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val); 530 if (ret) { 531 dev_err(dev, "Unable to read ERDBOFF register\n"); 532 return -EIO; 533 } 534 535 /* Setup event db address for each ev_ring */ 536 mhi_event = mhi_cntrl->mhi_event; 537 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { 538 if (mhi_event->offload_ev) 539 continue; 540 541 mhi_event->ring.db_addr = base + val; 542 } 543 544 /* Setup DB register for primary CMD rings */ 545 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; 546 547 /* Write to MMIO registers */ 548 for (i = 0; reg_info[i].offset; i++) 549 mhi_write_reg(mhi_cntrl, base, reg_info[i].offset, 550 reg_info[i].val); 551 552 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK, 553 mhi_cntrl->total_ev_rings); 554 if (ret) { 555 dev_err(dev, "Unable to write MHICFG register\n"); 556 return ret; 557 } 558 559 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK, 560 mhi_cntrl->hw_ev_rings); 561 if (ret) { 562 dev_err(dev, "Unable to write MHICFG register\n"); 563 return ret; 564 } 565 566 return 0; 567 } 568 569 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, 570 struct mhi_chan *mhi_chan) 571 { 572 struct mhi_ring *buf_ring; 573 struct mhi_ring *tre_ring; 574 struct mhi_chan_ctxt *chan_ctxt; 575 u32 tmp; 576 577 buf_ring = &mhi_chan->buf_ring; 578 tre_ring = &mhi_chan->tre_ring; 579 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; 580 581 if (!chan_ctxt->rbase) /* Already uninitialized */ 582 return; 583 584 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, 585 tre_ring->pre_aligned, tre_ring->dma_handle); 586 vfree(buf_ring->base); 587 588 buf_ring->base = tre_ring->base = NULL; 589 tre_ring->ctxt_wp = NULL; 590 chan_ctxt->rbase = 0; 591 chan_ctxt->rlen = 0; 592 chan_ctxt->rp = 0; 593 chan_ctxt->wp = 0; 594 595 tmp = le32_to_cpu(chan_ctxt->chcfg); 596 tmp &= ~CHAN_CTX_CHSTATE_MASK; 597 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); 598 chan_ctxt->chcfg = cpu_to_le32(tmp); 599 600 /* Update to all cores */ 601 smp_wmb(); 602 } 603 604 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, 605 struct mhi_chan *mhi_chan) 606 { 607 struct mhi_ring *buf_ring; 608 struct mhi_ring *tre_ring; 609 struct mhi_chan_ctxt *chan_ctxt; 610 u32 tmp; 611 int ret; 612 613 buf_ring = &mhi_chan->buf_ring; 614 tre_ring = &mhi_chan->tre_ring; 615 tre_ring->el_size = sizeof(struct mhi_ring_element); 616 tre_ring->len = tre_ring->el_size * tre_ring->elements; 617 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; 618 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); 619 if (ret) 620 return -ENOMEM; 621 622 buf_ring->el_size = sizeof(struct mhi_buf_info); 623 buf_ring->len = buf_ring->el_size * buf_ring->elements; 624 buf_ring->base = vzalloc(buf_ring->len); 625 626 if (!buf_ring->base) { 627 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, 628 tre_ring->pre_aligned, tre_ring->dma_handle); 629 return -ENOMEM; 630 } 631 632 tmp = le32_to_cpu(chan_ctxt->chcfg); 633 tmp &= ~CHAN_CTX_CHSTATE_MASK; 634 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED); 635 chan_ctxt->chcfg = cpu_to_le32(tmp); 636 637 chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); 638 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; 639 chan_ctxt->rlen = cpu_to_le64(tre_ring->len); 640 tre_ring->ctxt_wp = &chan_ctxt->wp; 641 642 tre_ring->rp = tre_ring->wp = tre_ring->base; 643 buf_ring->rp = buf_ring->wp = buf_ring->base; 644 mhi_chan->db_cfg.db_mode = 1; 645 646 /* Update to all cores */ 647 smp_wmb(); 648 649 return 0; 650 } 651 652 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, 653 const struct mhi_controller_config *config) 654 { 655 struct mhi_event *mhi_event; 656 const struct mhi_event_config *event_cfg; 657 struct device *dev = mhi_cntrl->cntrl_dev; 658 int i, num; 659 660 num = config->num_events; 661 mhi_cntrl->total_ev_rings = num; 662 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), 663 GFP_KERNEL); 664 if (!mhi_cntrl->mhi_event) 665 return -ENOMEM; 666 667 /* Populate event ring */ 668 mhi_event = mhi_cntrl->mhi_event; 669 for (i = 0; i < num; i++) { 670 event_cfg = &config->event_cfg[i]; 671 672 mhi_event->er_index = i; 673 mhi_event->ring.elements = event_cfg->num_elements; 674 mhi_event->intmod = event_cfg->irq_moderation_ms; 675 mhi_event->irq = event_cfg->irq; 676 677 if (event_cfg->channel != U32_MAX) { 678 /* This event ring has a dedicated channel */ 679 mhi_event->chan = event_cfg->channel; 680 if (mhi_event->chan >= mhi_cntrl->max_chan) { 681 dev_err(dev, 682 "Event Ring channel not available\n"); 683 goto error_ev_cfg; 684 } 685 686 mhi_event->mhi_chan = 687 &mhi_cntrl->mhi_chan[mhi_event->chan]; 688 } 689 690 /* Priority is fixed to 1 for now */ 691 mhi_event->priority = 1; 692 693 mhi_event->db_cfg.brstmode = event_cfg->mode; 694 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) 695 goto error_ev_cfg; 696 697 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) 698 mhi_event->db_cfg.process_db = mhi_db_brstmode; 699 else 700 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; 701 702 mhi_event->data_type = event_cfg->data_type; 703 704 switch (mhi_event->data_type) { 705 case MHI_ER_DATA: 706 mhi_event->process_event = mhi_process_data_event_ring; 707 break; 708 case MHI_ER_CTRL: 709 mhi_event->process_event = mhi_process_ctrl_ev_ring; 710 break; 711 default: 712 dev_err(dev, "Event Ring type not supported\n"); 713 goto error_ev_cfg; 714 } 715 716 mhi_event->hw_ring = event_cfg->hardware_event; 717 if (mhi_event->hw_ring) 718 mhi_cntrl->hw_ev_rings++; 719 else 720 mhi_cntrl->sw_ev_rings++; 721 722 mhi_event->cl_manage = event_cfg->client_managed; 723 mhi_event->offload_ev = event_cfg->offload_channel; 724 mhi_event++; 725 } 726 727 return 0; 728 729 error_ev_cfg: 730 731 kfree(mhi_cntrl->mhi_event); 732 return -EINVAL; 733 } 734 735 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, 736 const struct mhi_controller_config *config) 737 { 738 const struct mhi_channel_config *ch_cfg; 739 struct device *dev = mhi_cntrl->cntrl_dev; 740 int i; 741 u32 chan; 742 743 mhi_cntrl->max_chan = config->max_channels; 744 745 /* 746 * The allocation of MHI channels can exceed 32KB in some scenarios, 747 * so to avoid any memory possible allocation failures, vzalloc is 748 * used here 749 */ 750 mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan * 751 sizeof(*mhi_cntrl->mhi_chan)); 752 if (!mhi_cntrl->mhi_chan) 753 return -ENOMEM; 754 755 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); 756 757 /* Populate channel configurations */ 758 for (i = 0; i < config->num_channels; i++) { 759 struct mhi_chan *mhi_chan; 760 761 ch_cfg = &config->ch_cfg[i]; 762 763 chan = ch_cfg->num; 764 if (chan >= mhi_cntrl->max_chan) { 765 dev_err(dev, "Channel %d not available\n", chan); 766 goto error_chan_cfg; 767 } 768 769 mhi_chan = &mhi_cntrl->mhi_chan[chan]; 770 mhi_chan->name = ch_cfg->name; 771 mhi_chan->chan = chan; 772 773 mhi_chan->tre_ring.elements = ch_cfg->num_elements; 774 if (!mhi_chan->tre_ring.elements) 775 goto error_chan_cfg; 776 777 /* 778 * For some channels, local ring length should be bigger than 779 * the transfer ring length due to internal logical channels 780 * in device. So host can queue much more buffers than transfer 781 * ring length. Example, RSC channels should have a larger local 782 * channel length than transfer ring length. 783 */ 784 mhi_chan->buf_ring.elements = ch_cfg->local_elements; 785 if (!mhi_chan->buf_ring.elements) 786 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; 787 mhi_chan->er_index = ch_cfg->event_ring; 788 mhi_chan->dir = ch_cfg->dir; 789 790 /* 791 * For most channels, chtype is identical to channel directions. 792 * So, if it is not defined then assign channel direction to 793 * chtype 794 */ 795 mhi_chan->type = ch_cfg->type; 796 if (!mhi_chan->type) 797 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; 798 799 mhi_chan->ee_mask = ch_cfg->ee_mask; 800 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; 801 mhi_chan->lpm_notify = ch_cfg->lpm_notify; 802 mhi_chan->offload_ch = ch_cfg->offload_channel; 803 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; 804 mhi_chan->pre_alloc = ch_cfg->auto_queue; 805 mhi_chan->wake_capable = ch_cfg->wake_capable; 806 807 /* 808 * If MHI host allocates buffers, then the channel direction 809 * should be DMA_FROM_DEVICE 810 */ 811 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { 812 dev_err(dev, "Invalid channel configuration\n"); 813 goto error_chan_cfg; 814 } 815 816 /* 817 * Bi-directional and direction less channel must be an 818 * offload channel 819 */ 820 if ((mhi_chan->dir == DMA_BIDIRECTIONAL || 821 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { 822 dev_err(dev, "Invalid channel configuration\n"); 823 goto error_chan_cfg; 824 } 825 826 if (!mhi_chan->offload_ch) { 827 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; 828 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { 829 dev_err(dev, "Invalid Door bell mode\n"); 830 goto error_chan_cfg; 831 } 832 } 833 834 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) 835 mhi_chan->db_cfg.process_db = mhi_db_brstmode; 836 else 837 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; 838 839 mhi_chan->configured = true; 840 841 if (mhi_chan->lpm_notify) 842 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); 843 } 844 845 return 0; 846 847 error_chan_cfg: 848 vfree(mhi_cntrl->mhi_chan); 849 850 return -EINVAL; 851 } 852 853 static int parse_config(struct mhi_controller *mhi_cntrl, 854 const struct mhi_controller_config *config) 855 { 856 int ret; 857 858 /* Parse MHI channel configuration */ 859 ret = parse_ch_cfg(mhi_cntrl, config); 860 if (ret) 861 return ret; 862 863 /* Parse MHI event configuration */ 864 ret = parse_ev_cfg(mhi_cntrl, config); 865 if (ret) 866 goto error_ev_cfg; 867 868 mhi_cntrl->timeout_ms = config->timeout_ms; 869 if (!mhi_cntrl->timeout_ms) 870 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; 871 872 mhi_cntrl->bounce_buf = config->use_bounce_buf; 873 mhi_cntrl->buffer_len = config->buf_len; 874 if (!mhi_cntrl->buffer_len) 875 mhi_cntrl->buffer_len = MHI_MAX_MTU; 876 877 /* By default, host is allowed to ring DB in both M0 and M2 states */ 878 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; 879 if (config->m2_no_db) 880 mhi_cntrl->db_access &= ~MHI_PM_M2; 881 882 return 0; 883 884 error_ev_cfg: 885 vfree(mhi_cntrl->mhi_chan); 886 887 return ret; 888 } 889 890 int mhi_register_controller(struct mhi_controller *mhi_cntrl, 891 const struct mhi_controller_config *config) 892 { 893 struct mhi_event *mhi_event; 894 struct mhi_chan *mhi_chan; 895 struct mhi_cmd *mhi_cmd; 896 struct mhi_device *mhi_dev; 897 u32 soc_info; 898 int ret, i; 899 900 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || 901 !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || 902 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || 903 !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || 904 !mhi_cntrl->irq || !mhi_cntrl->reg_len) 905 return -EINVAL; 906 907 ret = parse_config(mhi_cntrl, config); 908 if (ret) 909 return -EINVAL; 910 911 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, 912 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); 913 if (!mhi_cntrl->mhi_cmd) { 914 ret = -ENOMEM; 915 goto err_free_event; 916 } 917 918 INIT_LIST_HEAD(&mhi_cntrl->transition_list); 919 mutex_init(&mhi_cntrl->pm_mutex); 920 rwlock_init(&mhi_cntrl->pm_lock); 921 spin_lock_init(&mhi_cntrl->transition_lock); 922 spin_lock_init(&mhi_cntrl->wlock); 923 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); 924 init_waitqueue_head(&mhi_cntrl->state_event); 925 926 mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI); 927 if (!mhi_cntrl->hiprio_wq) { 928 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); 929 ret = -ENOMEM; 930 goto err_free_cmd; 931 } 932 933 mhi_cmd = mhi_cntrl->mhi_cmd; 934 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) 935 spin_lock_init(&mhi_cmd->lock); 936 937 mhi_event = mhi_cntrl->mhi_event; 938 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 939 /* Skip for offload events */ 940 if (mhi_event->offload_ev) 941 continue; 942 943 mhi_event->mhi_cntrl = mhi_cntrl; 944 spin_lock_init(&mhi_event->lock); 945 if (mhi_event->data_type == MHI_ER_CTRL) 946 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, 947 (ulong)mhi_event); 948 else 949 tasklet_init(&mhi_event->task, mhi_ev_task, 950 (ulong)mhi_event); 951 } 952 953 mhi_chan = mhi_cntrl->mhi_chan; 954 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { 955 mutex_init(&mhi_chan->mutex); 956 init_completion(&mhi_chan->completion); 957 rwlock_init(&mhi_chan->lock); 958 959 /* used in setting bei field of TRE */ 960 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; 961 mhi_chan->intmod = mhi_event->intmod; 962 } 963 964 if (mhi_cntrl->bounce_buf) { 965 mhi_cntrl->map_single = mhi_map_single_use_bb; 966 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; 967 } else { 968 mhi_cntrl->map_single = mhi_map_single_no_bb; 969 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; 970 } 971 972 /* Read the MHI device info */ 973 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, 974 SOC_HW_VERSION_OFFS, &soc_info); 975 if (ret) 976 goto err_destroy_wq; 977 978 mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info); 979 mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info); 980 mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info); 981 mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info); 982 983 mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); 984 if (mhi_cntrl->index < 0) { 985 ret = mhi_cntrl->index; 986 goto err_destroy_wq; 987 } 988 989 ret = mhi_init_irq_setup(mhi_cntrl); 990 if (ret) 991 goto err_ida_free; 992 993 /* Register controller with MHI bus */ 994 mhi_dev = mhi_alloc_device(mhi_cntrl); 995 if (IS_ERR(mhi_dev)) { 996 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); 997 ret = PTR_ERR(mhi_dev); 998 goto error_setup_irq; 999 } 1000 1001 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; 1002 mhi_dev->mhi_cntrl = mhi_cntrl; 1003 dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index); 1004 mhi_dev->name = dev_name(&mhi_dev->dev); 1005 1006 /* Init wakeup source */ 1007 device_init_wakeup(&mhi_dev->dev, true); 1008 1009 ret = device_add(&mhi_dev->dev); 1010 if (ret) 1011 goto err_release_dev; 1012 1013 mhi_cntrl->mhi_dev = mhi_dev; 1014 1015 mhi_create_debugfs(mhi_cntrl); 1016 1017 return 0; 1018 1019 err_release_dev: 1020 put_device(&mhi_dev->dev); 1021 error_setup_irq: 1022 mhi_deinit_free_irq(mhi_cntrl); 1023 err_ida_free: 1024 ida_free(&mhi_controller_ida, mhi_cntrl->index); 1025 err_destroy_wq: 1026 destroy_workqueue(mhi_cntrl->hiprio_wq); 1027 err_free_cmd: 1028 kfree(mhi_cntrl->mhi_cmd); 1029 err_free_event: 1030 kfree(mhi_cntrl->mhi_event); 1031 vfree(mhi_cntrl->mhi_chan); 1032 1033 return ret; 1034 } 1035 EXPORT_SYMBOL_GPL(mhi_register_controller); 1036 1037 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) 1038 { 1039 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; 1040 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; 1041 unsigned int i; 1042 1043 mhi_deinit_free_irq(mhi_cntrl); 1044 mhi_destroy_debugfs(mhi_cntrl); 1045 1046 destroy_workqueue(mhi_cntrl->hiprio_wq); 1047 kfree(mhi_cntrl->mhi_cmd); 1048 kfree(mhi_cntrl->mhi_event); 1049 1050 /* Drop the references to MHI devices created for channels */ 1051 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { 1052 if (!mhi_chan->mhi_dev) 1053 continue; 1054 1055 put_device(&mhi_chan->mhi_dev->dev); 1056 } 1057 vfree(mhi_cntrl->mhi_chan); 1058 1059 device_del(&mhi_dev->dev); 1060 put_device(&mhi_dev->dev); 1061 1062 ida_free(&mhi_controller_ida, mhi_cntrl->index); 1063 } 1064 EXPORT_SYMBOL_GPL(mhi_unregister_controller); 1065 1066 struct mhi_controller *mhi_alloc_controller(void) 1067 { 1068 struct mhi_controller *mhi_cntrl; 1069 1070 mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL); 1071 1072 return mhi_cntrl; 1073 } 1074 EXPORT_SYMBOL_GPL(mhi_alloc_controller); 1075 1076 void mhi_free_controller(struct mhi_controller *mhi_cntrl) 1077 { 1078 kfree(mhi_cntrl); 1079 } 1080 EXPORT_SYMBOL_GPL(mhi_free_controller); 1081 1082 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) 1083 { 1084 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1085 u32 bhi_off, bhie_off; 1086 int ret; 1087 1088 mutex_lock(&mhi_cntrl->pm_mutex); 1089 1090 ret = mhi_init_dev_ctxt(mhi_cntrl); 1091 if (ret) 1092 goto error_dev_ctxt; 1093 1094 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off); 1095 if (ret) { 1096 dev_err(dev, "Error getting BHI offset\n"); 1097 goto error_reg_offset; 1098 } 1099 1100 if (bhi_off >= mhi_cntrl->reg_len) { 1101 dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n", 1102 bhi_off, mhi_cntrl->reg_len); 1103 ret = -EINVAL; 1104 goto error_reg_offset; 1105 } 1106 mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; 1107 1108 if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { 1109 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, 1110 &bhie_off); 1111 if (ret) { 1112 dev_err(dev, "Error getting BHIE offset\n"); 1113 goto error_reg_offset; 1114 } 1115 1116 if (bhie_off >= mhi_cntrl->reg_len) { 1117 dev_err(dev, 1118 "BHIe offset: 0x%x is out of range: 0x%zx\n", 1119 bhie_off, mhi_cntrl->reg_len); 1120 ret = -EINVAL; 1121 goto error_reg_offset; 1122 } 1123 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; 1124 } 1125 1126 if (mhi_cntrl->rddm_size) { 1127 /* 1128 * This controller supports RDDM, so we need to manually clear 1129 * BHIE RX registers since POR values are undefined. 1130 */ 1131 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, 1132 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + 1133 4); 1134 /* 1135 * Allocate RDDM table for debugging purpose if specified 1136 */ 1137 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, 1138 mhi_cntrl->rddm_size); 1139 if (mhi_cntrl->rddm_image) { 1140 ret = mhi_rddm_prepare(mhi_cntrl, 1141 mhi_cntrl->rddm_image); 1142 if (ret) { 1143 mhi_free_bhie_table(mhi_cntrl, 1144 mhi_cntrl->rddm_image); 1145 goto error_reg_offset; 1146 } 1147 } 1148 } 1149 1150 mutex_unlock(&mhi_cntrl->pm_mutex); 1151 1152 return 0; 1153 1154 error_reg_offset: 1155 mhi_deinit_dev_ctxt(mhi_cntrl); 1156 1157 error_dev_ctxt: 1158 mutex_unlock(&mhi_cntrl->pm_mutex); 1159 1160 return ret; 1161 } 1162 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up); 1163 1164 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) 1165 { 1166 if (mhi_cntrl->fbc_image) { 1167 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); 1168 mhi_cntrl->fbc_image = NULL; 1169 } 1170 1171 if (mhi_cntrl->rddm_image) { 1172 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); 1173 mhi_cntrl->rddm_image = NULL; 1174 } 1175 1176 mhi_cntrl->bhi = NULL; 1177 mhi_cntrl->bhie = NULL; 1178 1179 mhi_deinit_dev_ctxt(mhi_cntrl); 1180 } 1181 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); 1182 1183 static void mhi_release_device(struct device *dev) 1184 { 1185 struct mhi_device *mhi_dev = to_mhi_device(dev); 1186 1187 /* 1188 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI 1189 * devices for the channels will only get created if the mhi_dev 1190 * associated with it is NULL. This scenario will happen during the 1191 * controller suspend and resume. 1192 */ 1193 if (mhi_dev->ul_chan) 1194 mhi_dev->ul_chan->mhi_dev = NULL; 1195 1196 if (mhi_dev->dl_chan) 1197 mhi_dev->dl_chan->mhi_dev = NULL; 1198 1199 kfree(mhi_dev); 1200 } 1201 1202 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) 1203 { 1204 struct mhi_device *mhi_dev; 1205 struct device *dev; 1206 1207 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); 1208 if (!mhi_dev) 1209 return ERR_PTR(-ENOMEM); 1210 1211 dev = &mhi_dev->dev; 1212 device_initialize(dev); 1213 dev->bus = &mhi_bus_type; 1214 dev->release = mhi_release_device; 1215 1216 if (mhi_cntrl->mhi_dev) { 1217 /* for MHI client devices, parent is the MHI controller device */ 1218 dev->parent = &mhi_cntrl->mhi_dev->dev; 1219 } else { 1220 /* for MHI controller device, parent is the bus device (e.g. pci device) */ 1221 dev->parent = mhi_cntrl->cntrl_dev; 1222 } 1223 1224 mhi_dev->mhi_cntrl = mhi_cntrl; 1225 mhi_dev->dev_wake = 0; 1226 1227 return mhi_dev; 1228 } 1229 1230 static int mhi_driver_probe(struct device *dev) 1231 { 1232 struct mhi_device *mhi_dev = to_mhi_device(dev); 1233 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 1234 struct device_driver *drv = dev->driver; 1235 struct mhi_driver *mhi_drv = to_mhi_driver(drv); 1236 struct mhi_event *mhi_event; 1237 struct mhi_chan *ul_chan = mhi_dev->ul_chan; 1238 struct mhi_chan *dl_chan = mhi_dev->dl_chan; 1239 int ret; 1240 1241 /* Bring device out of LPM */ 1242 ret = mhi_device_get_sync(mhi_dev); 1243 if (ret) 1244 return ret; 1245 1246 ret = -EINVAL; 1247 1248 if (ul_chan) { 1249 /* 1250 * If channel supports LPM notifications then status_cb should 1251 * be provided 1252 */ 1253 if (ul_chan->lpm_notify && !mhi_drv->status_cb) 1254 goto exit_probe; 1255 1256 /* For non-offload channels then xfer_cb should be provided */ 1257 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) 1258 goto exit_probe; 1259 1260 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; 1261 } 1262 1263 ret = -EINVAL; 1264 if (dl_chan) { 1265 /* 1266 * If channel supports LPM notifications then status_cb should 1267 * be provided 1268 */ 1269 if (dl_chan->lpm_notify && !mhi_drv->status_cb) 1270 goto exit_probe; 1271 1272 /* For non-offload channels then xfer_cb should be provided */ 1273 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) 1274 goto exit_probe; 1275 1276 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; 1277 1278 /* 1279 * If the channel event ring is managed by client, then 1280 * status_cb must be provided so that the framework can 1281 * notify pending data 1282 */ 1283 if (mhi_event->cl_manage && !mhi_drv->status_cb) 1284 goto exit_probe; 1285 1286 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; 1287 } 1288 1289 /* Call the user provided probe function */ 1290 ret = mhi_drv->probe(mhi_dev, mhi_dev->id); 1291 if (ret) 1292 goto exit_probe; 1293 1294 mhi_device_put(mhi_dev); 1295 1296 return ret; 1297 1298 exit_probe: 1299 mhi_unprepare_from_transfer(mhi_dev); 1300 1301 mhi_device_put(mhi_dev); 1302 1303 return ret; 1304 } 1305 1306 static int mhi_driver_remove(struct device *dev) 1307 { 1308 struct mhi_device *mhi_dev = to_mhi_device(dev); 1309 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); 1310 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 1311 struct mhi_chan *mhi_chan; 1312 enum mhi_ch_state ch_state[] = { 1313 MHI_CH_STATE_DISABLED, 1314 MHI_CH_STATE_DISABLED 1315 }; 1316 int dir; 1317 1318 /* Skip if it is a controller device */ 1319 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1320 return 0; 1321 1322 /* Reset both channels */ 1323 for (dir = 0; dir < 2; dir++) { 1324 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; 1325 1326 if (!mhi_chan) 1327 continue; 1328 1329 /* Wake all threads waiting for completion */ 1330 write_lock_irq(&mhi_chan->lock); 1331 mhi_chan->ccs = MHI_EV_CC_INVALID; 1332 complete_all(&mhi_chan->completion); 1333 write_unlock_irq(&mhi_chan->lock); 1334 1335 /* Set the channel state to disabled */ 1336 mutex_lock(&mhi_chan->mutex); 1337 write_lock_irq(&mhi_chan->lock); 1338 ch_state[dir] = mhi_chan->ch_state; 1339 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; 1340 write_unlock_irq(&mhi_chan->lock); 1341 1342 /* Reset the non-offload channel */ 1343 if (!mhi_chan->offload_ch) 1344 mhi_reset_chan(mhi_cntrl, mhi_chan); 1345 1346 mutex_unlock(&mhi_chan->mutex); 1347 } 1348 1349 mhi_drv->remove(mhi_dev); 1350 1351 /* De-init channel if it was enabled */ 1352 for (dir = 0; dir < 2; dir++) { 1353 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; 1354 1355 if (!mhi_chan) 1356 continue; 1357 1358 mutex_lock(&mhi_chan->mutex); 1359 1360 if ((ch_state[dir] == MHI_CH_STATE_ENABLED || 1361 ch_state[dir] == MHI_CH_STATE_STOP) && 1362 !mhi_chan->offload_ch) 1363 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); 1364 1365 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; 1366 1367 mutex_unlock(&mhi_chan->mutex); 1368 } 1369 1370 while (mhi_dev->dev_wake) 1371 mhi_device_put(mhi_dev); 1372 1373 return 0; 1374 } 1375 1376 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner) 1377 { 1378 struct device_driver *driver = &mhi_drv->driver; 1379 1380 if (!mhi_drv->probe || !mhi_drv->remove) 1381 return -EINVAL; 1382 1383 driver->bus = &mhi_bus_type; 1384 driver->owner = owner; 1385 driver->probe = mhi_driver_probe; 1386 driver->remove = mhi_driver_remove; 1387 1388 return driver_register(driver); 1389 } 1390 EXPORT_SYMBOL_GPL(__mhi_driver_register); 1391 1392 void mhi_driver_unregister(struct mhi_driver *mhi_drv) 1393 { 1394 driver_unregister(&mhi_drv->driver); 1395 } 1396 EXPORT_SYMBOL_GPL(mhi_driver_unregister); 1397 1398 static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env) 1399 { 1400 struct mhi_device *mhi_dev = to_mhi_device(dev); 1401 1402 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, 1403 mhi_dev->name); 1404 } 1405 1406 static int mhi_match(struct device *dev, struct device_driver *drv) 1407 { 1408 struct mhi_device *mhi_dev = to_mhi_device(dev); 1409 struct mhi_driver *mhi_drv = to_mhi_driver(drv); 1410 const struct mhi_device_id *id; 1411 1412 /* 1413 * If the device is a controller type then there is no client driver 1414 * associated with it 1415 */ 1416 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1417 return 0; 1418 1419 for (id = mhi_drv->id_table; id->chan[0]; id++) 1420 if (!strcmp(mhi_dev->name, id->chan)) { 1421 mhi_dev->id = id; 1422 return 1; 1423 } 1424 1425 return 0; 1426 }; 1427 1428 struct bus_type mhi_bus_type = { 1429 .name = "mhi", 1430 .dev_name = "mhi", 1431 .match = mhi_match, 1432 .uevent = mhi_uevent, 1433 .dev_groups = mhi_dev_groups, 1434 }; 1435 1436 static int __init mhi_init(void) 1437 { 1438 mhi_debugfs_init(); 1439 return bus_register(&mhi_bus_type); 1440 } 1441 1442 static void __exit mhi_exit(void) 1443 { 1444 mhi_debugfs_exit(); 1445 bus_unregister(&mhi_bus_type); 1446 } 1447 1448 postcore_initcall(mhi_init); 1449 module_exit(mhi_exit); 1450 1451 MODULE_LICENSE("GPL v2"); 1452 MODULE_DESCRIPTION("MHI Host Interface"); 1453