1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MHI Endpoint bus stack 4 * 5 * Copyright (C) 2022 Linaro Ltd. 6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/delay.h> 11 #include <linux/dma-direction.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/irq.h> 15 #include <linux/mhi_ep.h> 16 #include <linux/mod_devicetable.h> 17 #include <linux/module.h> 18 #include "internal.h" 19 20 #define M0_WAIT_DELAY_MS 100 21 #define M0_WAIT_COUNT 100 22 23 static DEFINE_IDA(mhi_ep_cntrl_ida); 24 25 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); 26 static int mhi_ep_destroy_device(struct device *dev, void *data); 27 28 static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, 29 struct mhi_ring_element *el, bool bei) 30 { 31 struct device *dev = &mhi_cntrl->mhi_dev->dev; 32 union mhi_ep_ring_ctx *ctx; 33 struct mhi_ep_ring *ring; 34 int ret; 35 36 mutex_lock(&mhi_cntrl->event_lock); 37 ring = &mhi_cntrl->mhi_event[ring_idx].ring; 38 ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; 39 if (!ring->started) { 40 ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); 41 if (ret) { 42 dev_err(dev, "Error starting event ring (%u)\n", ring_idx); 43 goto err_unlock; 44 } 45 } 46 47 /* Add element to the event ring */ 48 ret = mhi_ep_ring_add_element(ring, el); 49 if (ret) { 50 dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx); 51 goto err_unlock; 52 } 53 54 mutex_unlock(&mhi_cntrl->event_lock); 55 56 /* 57 * Raise IRQ to host only if the BEI flag is not set in TRE. Host might 58 * set this flag for interrupt moderation as per MHI protocol. 59 */ 60 if (!bei) 61 mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); 62 63 return 0; 64 65 err_unlock: 66 mutex_unlock(&mhi_cntrl->event_lock); 67 68 return ret; 69 } 70 71 static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, 72 struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) 73 { 74 struct mhi_ring_element *event; 75 int ret; 76 77 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); 78 if (!event) 79 return -ENOMEM; 80 81 event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); 82 event->dword[0] = MHI_TRE_EV_DWORD0(code, len); 83 event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); 84 85 ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre)); 86 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 87 88 return ret; 89 } 90 91 int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) 92 { 93 struct mhi_ring_element *event; 94 int ret; 95 96 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); 97 if (!event) 98 return -ENOMEM; 99 100 event->dword[0] = MHI_SC_EV_DWORD0(state); 101 event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); 102 103 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); 104 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 105 106 return ret; 107 } 108 109 int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) 110 { 111 struct mhi_ring_element *event; 112 int ret; 113 114 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); 115 if (!event) 116 return -ENOMEM; 117 118 event->dword[0] = MHI_EE_EV_DWORD0(exec_env); 119 event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); 120 121 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); 122 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 123 124 return ret; 125 } 126 127 static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) 128 { 129 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; 130 struct mhi_ring_element *event; 131 int ret; 132 133 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); 134 if (!event) 135 return -ENOMEM; 136 137 event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); 138 event->dword[0] = MHI_CC_EV_DWORD0(code); 139 event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); 140 141 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); 142 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 143 144 return ret; 145 } 146 147 static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) 148 { 149 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; 150 struct device *dev = &mhi_cntrl->mhi_dev->dev; 151 struct mhi_result result = {}; 152 struct mhi_ep_chan *mhi_chan; 153 struct mhi_ep_ring *ch_ring; 154 u32 tmp, ch_id; 155 int ret; 156 157 ch_id = MHI_TRE_GET_CMD_CHID(el); 158 159 /* Check if the channel is supported by the controller */ 160 if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) { 161 dev_dbg(dev, "Channel (%u) not supported!\n", ch_id); 162 return -ENODEV; 163 } 164 165 mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; 166 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; 167 168 switch (MHI_TRE_GET_CMD_TYPE(el)) { 169 case MHI_PKT_TYPE_START_CHAN_CMD: 170 dev_dbg(dev, "Received START command for channel (%u)\n", ch_id); 171 172 mutex_lock(&mhi_chan->lock); 173 /* Initialize and configure the corresponding channel ring */ 174 if (!ch_ring->started) { 175 ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, 176 (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); 177 if (ret) { 178 dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id); 179 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, 180 MHI_EV_CC_UNDEFINED_ERR); 181 if (ret) 182 dev_err(dev, "Error sending completion event: %d\n", ret); 183 184 goto err_unlock; 185 } 186 187 mhi_chan->rd_offset = ch_ring->rd_offset; 188 } 189 190 /* Set channel state to RUNNING */ 191 mhi_chan->state = MHI_CH_STATE_RUNNING; 192 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 193 tmp &= ~CHAN_CTX_CHSTATE_MASK; 194 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); 195 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 196 197 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 198 if (ret) { 199 dev_err(dev, "Error sending command completion event (%u)\n", 200 MHI_EV_CC_SUCCESS); 201 goto err_unlock; 202 } 203 204 mutex_unlock(&mhi_chan->lock); 205 206 /* 207 * Create MHI device only during UL channel start. Since the MHI 208 * channels operate in a pair, we'll associate both UL and DL 209 * channels to the same device. 210 * 211 * We also need to check for mhi_dev != NULL because, the host 212 * will issue START_CHAN command during resume and we don't 213 * destroy the device during suspend. 214 */ 215 if (!(ch_id % 2) && !mhi_chan->mhi_dev) { 216 ret = mhi_ep_create_device(mhi_cntrl, ch_id); 217 if (ret) { 218 dev_err(dev, "Error creating device for channel (%u)\n", ch_id); 219 mhi_ep_handle_syserr(mhi_cntrl); 220 return ret; 221 } 222 } 223 224 /* Finally, enable DB for the channel */ 225 mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); 226 227 break; 228 case MHI_PKT_TYPE_STOP_CHAN_CMD: 229 dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); 230 if (!ch_ring->started) { 231 dev_err(dev, "Channel (%u) not opened\n", ch_id); 232 return -ENODEV; 233 } 234 235 mutex_lock(&mhi_chan->lock); 236 /* Disable DB for the channel */ 237 mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); 238 239 /* Send channel disconnect status to client drivers */ 240 if (mhi_chan->xfer_cb) { 241 result.transaction_status = -ENOTCONN; 242 result.bytes_xferd = 0; 243 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 244 } 245 246 /* Set channel state to STOP */ 247 mhi_chan->state = MHI_CH_STATE_STOP; 248 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 249 tmp &= ~CHAN_CTX_CHSTATE_MASK; 250 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); 251 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 252 253 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 254 if (ret) { 255 dev_err(dev, "Error sending command completion event (%u)\n", 256 MHI_EV_CC_SUCCESS); 257 goto err_unlock; 258 } 259 260 mutex_unlock(&mhi_chan->lock); 261 break; 262 case MHI_PKT_TYPE_RESET_CHAN_CMD: 263 dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id); 264 if (!ch_ring->started) { 265 dev_err(dev, "Channel (%u) not opened\n", ch_id); 266 return -ENODEV; 267 } 268 269 mutex_lock(&mhi_chan->lock); 270 /* Stop and reset the transfer ring */ 271 mhi_ep_ring_reset(mhi_cntrl, ch_ring); 272 273 /* Send channel disconnect status to client driver */ 274 if (mhi_chan->xfer_cb) { 275 result.transaction_status = -ENOTCONN; 276 result.bytes_xferd = 0; 277 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 278 } 279 280 /* Set channel state to DISABLED */ 281 mhi_chan->state = MHI_CH_STATE_DISABLED; 282 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 283 tmp &= ~CHAN_CTX_CHSTATE_MASK; 284 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); 285 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 286 287 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 288 if (ret) { 289 dev_err(dev, "Error sending command completion event (%u)\n", 290 MHI_EV_CC_SUCCESS); 291 goto err_unlock; 292 } 293 294 mutex_unlock(&mhi_chan->lock); 295 break; 296 default: 297 dev_err(dev, "Invalid command received: %lu for channel (%u)\n", 298 MHI_TRE_GET_CMD_TYPE(el), ch_id); 299 return -EINVAL; 300 } 301 302 return 0; 303 304 err_unlock: 305 mutex_unlock(&mhi_chan->lock); 306 307 return ret; 308 } 309 310 bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) 311 { 312 struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : 313 mhi_dev->ul_chan; 314 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; 315 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; 316 317 return !!(mhi_chan->rd_offset == ring->wr_offset); 318 } 319 EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); 320 321 static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info) 322 { 323 struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; 324 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; 325 struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan; 326 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; 327 struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; 328 struct mhi_result result = {}; 329 int ret; 330 331 if (mhi_chan->xfer_cb) { 332 result.buf_addr = buf_info->cb_buf; 333 result.dir = mhi_chan->dir; 334 result.bytes_xferd = buf_info->size; 335 336 mhi_chan->xfer_cb(mhi_dev, &result); 337 } 338 339 /* 340 * The host will split the data packet into multiple TREs if it can't fit 341 * the packet in a single TRE. In that case, CHAIN flag will be set by the 342 * host for all TREs except the last one. 343 */ 344 if (buf_info->code != MHI_EV_CC_OVERFLOW) { 345 if (MHI_TRE_DATA_GET_CHAIN(el)) { 346 /* 347 * IEOB (Interrupt on End of Block) flag will be set by the host if 348 * it expects the completion event for all TREs of a TD. 349 */ 350 if (MHI_TRE_DATA_GET_IEOB(el)) { 351 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, 352 MHI_TRE_DATA_GET_LEN(el), 353 MHI_EV_CC_EOB); 354 if (ret < 0) { 355 dev_err(&mhi_chan->mhi_dev->dev, 356 "Error sending transfer compl. event\n"); 357 goto err_free_tre_buf; 358 } 359 } 360 } else { 361 /* 362 * IEOT (Interrupt on End of Transfer) flag will be set by the host 363 * for the last TRE of the TD and expects the completion event for 364 * the same. 365 */ 366 if (MHI_TRE_DATA_GET_IEOT(el)) { 367 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, 368 MHI_TRE_DATA_GET_LEN(el), 369 MHI_EV_CC_EOT); 370 if (ret < 0) { 371 dev_err(&mhi_chan->mhi_dev->dev, 372 "Error sending transfer compl. event\n"); 373 goto err_free_tre_buf; 374 } 375 } 376 } 377 } 378 379 mhi_ep_ring_inc_index(ring); 380 381 err_free_tre_buf: 382 kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf); 383 } 384 385 static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, 386 struct mhi_ep_ring *ring) 387 { 388 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 389 struct device *dev = &mhi_cntrl->mhi_dev->dev; 390 size_t tr_len, read_offset, write_offset; 391 struct mhi_ep_buf_info buf_info = {}; 392 u32 len = MHI_EP_DEFAULT_MTU; 393 struct mhi_ring_element *el; 394 bool tr_done = false; 395 void *buf_addr; 396 u32 buf_left; 397 int ret; 398 399 buf_left = len; 400 401 do { 402 /* Don't process the transfer ring if the channel is not in RUNNING state */ 403 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { 404 dev_err(dev, "Channel not available\n"); 405 return -ENODEV; 406 } 407 408 el = &ring->ring_cache[mhi_chan->rd_offset]; 409 410 /* Check if there is data pending to be read from previous read operation */ 411 if (mhi_chan->tre_bytes_left) { 412 dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); 413 tr_len = min(buf_left, mhi_chan->tre_bytes_left); 414 } else { 415 mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); 416 mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); 417 mhi_chan->tre_bytes_left = mhi_chan->tre_size; 418 419 tr_len = min(buf_left, mhi_chan->tre_size); 420 } 421 422 read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; 423 write_offset = len - buf_left; 424 425 buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL); 426 if (!buf_addr) 427 return -ENOMEM; 428 429 buf_info.host_addr = mhi_chan->tre_loc + read_offset; 430 buf_info.dev_addr = buf_addr + write_offset; 431 buf_info.size = tr_len; 432 buf_info.cb = mhi_ep_read_completion; 433 buf_info.cb_buf = buf_addr; 434 buf_info.mhi_dev = mhi_chan->mhi_dev; 435 436 if (mhi_chan->tre_bytes_left - tr_len) 437 buf_info.code = MHI_EV_CC_OVERFLOW; 438 439 dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); 440 ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info); 441 if (ret < 0) { 442 dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); 443 goto err_free_buf_addr; 444 } 445 446 buf_left -= tr_len; 447 mhi_chan->tre_bytes_left -= tr_len; 448 449 if (!mhi_chan->tre_bytes_left) { 450 if (MHI_TRE_DATA_GET_IEOT(el)) 451 tr_done = true; 452 453 mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; 454 } 455 } while (buf_left && !tr_done); 456 457 return 0; 458 459 err_free_buf_addr: 460 kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr); 461 462 return ret; 463 } 464 465 static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring) 466 { 467 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; 468 struct mhi_result result = {}; 469 struct mhi_ep_chan *mhi_chan; 470 int ret; 471 472 mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 473 474 /* 475 * Bail out if transfer callback is not registered for the channel. 476 * This is most likely due to the client driver not loaded at this point. 477 */ 478 if (!mhi_chan->xfer_cb) { 479 dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); 480 return -ENODEV; 481 } 482 483 if (ring->ch_id % 2) { 484 /* DL channel */ 485 result.dir = mhi_chan->dir; 486 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 487 } else { 488 /* UL channel */ 489 do { 490 ret = mhi_ep_read_channel(mhi_cntrl, ring); 491 if (ret < 0) { 492 dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); 493 return ret; 494 } 495 496 /* Read until the ring becomes empty */ 497 } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); 498 } 499 500 return 0; 501 } 502 503 static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info) 504 { 505 struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; 506 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; 507 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; 508 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; 509 struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; 510 struct device *dev = &mhi_dev->dev; 511 struct mhi_result result = {}; 512 int ret; 513 514 if (mhi_chan->xfer_cb) { 515 result.buf_addr = buf_info->cb_buf; 516 result.dir = mhi_chan->dir; 517 result.bytes_xferd = buf_info->size; 518 519 mhi_chan->xfer_cb(mhi_dev, &result); 520 } 521 522 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size, 523 buf_info->code); 524 if (ret) { 525 dev_err(dev, "Error sending transfer completion event\n"); 526 return; 527 } 528 529 mhi_ep_ring_inc_index(ring); 530 } 531 532 /* TODO: Handle partially formed TDs */ 533 int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) 534 { 535 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; 536 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; 537 struct device *dev = &mhi_chan->mhi_dev->dev; 538 struct mhi_ep_buf_info buf_info = {}; 539 struct mhi_ring_element *el; 540 u32 buf_left, read_offset; 541 struct mhi_ep_ring *ring; 542 size_t tr_len; 543 u32 tre_len; 544 int ret; 545 546 buf_left = skb->len; 547 ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; 548 549 mutex_lock(&mhi_chan->lock); 550 551 do { 552 /* Don't process the transfer ring if the channel is not in RUNNING state */ 553 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { 554 dev_err(dev, "Channel not available\n"); 555 ret = -ENODEV; 556 goto err_exit; 557 } 558 559 if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { 560 dev_err(dev, "TRE not available!\n"); 561 ret = -ENOSPC; 562 goto err_exit; 563 } 564 565 el = &ring->ring_cache[mhi_chan->rd_offset]; 566 tre_len = MHI_TRE_DATA_GET_LEN(el); 567 568 tr_len = min(buf_left, tre_len); 569 read_offset = skb->len - buf_left; 570 571 buf_info.dev_addr = skb->data + read_offset; 572 buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el); 573 buf_info.size = tr_len; 574 buf_info.cb = mhi_ep_skb_completion; 575 buf_info.cb_buf = skb; 576 buf_info.mhi_dev = mhi_dev; 577 578 /* 579 * For all TREs queued by the host for DL channel, only the EOT flag will be set. 580 * If the packet doesn't fit into a single TRE, send the OVERFLOW event to 581 * the host so that the host can adjust the packet boundary to next TREs. Else send 582 * the EOT event to the host indicating the packet boundary. 583 */ 584 if (buf_left - tr_len) 585 buf_info.code = MHI_EV_CC_OVERFLOW; 586 else 587 buf_info.code = MHI_EV_CC_EOT; 588 589 dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); 590 ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info); 591 if (ret < 0) { 592 dev_err(dev, "Error writing to the channel\n"); 593 goto err_exit; 594 } 595 596 buf_left -= tr_len; 597 598 /* 599 * Update the read offset cached in mhi_chan. Actual read offset 600 * will be updated by the completion handler. 601 */ 602 mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; 603 } while (buf_left); 604 605 mutex_unlock(&mhi_chan->lock); 606 607 return 0; 608 609 err_exit: 610 mutex_unlock(&mhi_chan->lock); 611 612 return ret; 613 } 614 EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); 615 616 static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) 617 { 618 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; 619 struct device *dev = &mhi_cntrl->mhi_dev->dev; 620 int ret; 621 622 /* Update the number of event rings (NER) programmed by the host */ 623 mhi_ep_mmio_update_ner(mhi_cntrl); 624 625 dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n", 626 mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); 627 628 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; 629 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; 630 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; 631 632 /* Get the channel context base pointer from host */ 633 mhi_ep_mmio_get_chc_base(mhi_cntrl); 634 635 /* Allocate and map memory for caching host channel context */ 636 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, 637 &mhi_cntrl->ch_ctx_cache_phys, 638 (void __iomem **) &mhi_cntrl->ch_ctx_cache, 639 ch_ctx_host_size); 640 if (ret) { 641 dev_err(dev, "Failed to allocate and map ch_ctx_cache\n"); 642 return ret; 643 } 644 645 /* Get the event context base pointer from host */ 646 mhi_ep_mmio_get_erc_base(mhi_cntrl); 647 648 /* Allocate and map memory for caching host event context */ 649 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, 650 &mhi_cntrl->ev_ctx_cache_phys, 651 (void __iomem **) &mhi_cntrl->ev_ctx_cache, 652 ev_ctx_host_size); 653 if (ret) { 654 dev_err(dev, "Failed to allocate and map ev_ctx_cache\n"); 655 goto err_ch_ctx; 656 } 657 658 /* Get the command context base pointer from host */ 659 mhi_ep_mmio_get_crc_base(mhi_cntrl); 660 661 /* Allocate and map memory for caching host command context */ 662 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, 663 &mhi_cntrl->cmd_ctx_cache_phys, 664 (void __iomem **) &mhi_cntrl->cmd_ctx_cache, 665 cmd_ctx_host_size); 666 if (ret) { 667 dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n"); 668 goto err_ev_ctx; 669 } 670 671 /* Initialize command ring */ 672 ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, 673 (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); 674 if (ret) { 675 dev_err(dev, "Failed to start the command ring\n"); 676 goto err_cmd_ctx; 677 } 678 679 return ret; 680 681 err_cmd_ctx: 682 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, 683 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); 684 685 err_ev_ctx: 686 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, 687 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); 688 689 err_ch_ctx: 690 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, 691 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); 692 693 return ret; 694 } 695 696 static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) 697 { 698 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; 699 700 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; 701 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; 702 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; 703 704 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, 705 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); 706 707 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, 708 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); 709 710 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, 711 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); 712 } 713 714 static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) 715 { 716 /* 717 * Doorbell interrupts are enabled when the corresponding channel gets started. 718 * Enabling all interrupts here triggers spurious irqs as some of the interrupts 719 * associated with hw channels always get triggered. 720 */ 721 mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); 722 mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); 723 } 724 725 static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) 726 { 727 struct device *dev = &mhi_cntrl->mhi_dev->dev; 728 enum mhi_state state; 729 bool mhi_reset; 730 u32 count = 0; 731 int ret; 732 733 /* Wait for Host to set the M0 state */ 734 do { 735 msleep(M0_WAIT_DELAY_MS); 736 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); 737 if (mhi_reset) { 738 /* Clear the MHI reset if host is in reset state */ 739 mhi_ep_mmio_clear_reset(mhi_cntrl); 740 dev_info(dev, "Detected Host reset while waiting for M0\n"); 741 } 742 count++; 743 } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); 744 745 if (state != MHI_STATE_M0) { 746 dev_err(dev, "Host failed to enter M0\n"); 747 return -ETIMEDOUT; 748 } 749 750 ret = mhi_ep_cache_host_cfg(mhi_cntrl); 751 if (ret) { 752 dev_err(dev, "Failed to cache host config\n"); 753 return ret; 754 } 755 756 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 757 758 /* Enable all interrupts now */ 759 mhi_ep_enable_int(mhi_cntrl); 760 761 return 0; 762 } 763 764 static void mhi_ep_cmd_ring_worker(struct work_struct *work) 765 { 766 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); 767 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; 768 struct device *dev = &mhi_cntrl->mhi_dev->dev; 769 struct mhi_ring_element *el; 770 int ret; 771 772 /* Update the write offset for the ring */ 773 ret = mhi_ep_update_wr_offset(ring); 774 if (ret) { 775 dev_err(dev, "Error updating write offset for ring\n"); 776 return; 777 } 778 779 /* Sanity check to make sure there are elements in the ring */ 780 if (ring->rd_offset == ring->wr_offset) 781 return; 782 783 /* 784 * Process command ring element till write offset. In case of an error, just try to 785 * process next element. 786 */ 787 while (ring->rd_offset != ring->wr_offset) { 788 el = &ring->ring_cache[ring->rd_offset]; 789 790 ret = mhi_ep_process_cmd_ring(ring, el); 791 if (ret && ret != -ENODEV) 792 dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); 793 794 mhi_ep_ring_inc_index(ring); 795 } 796 } 797 798 static void mhi_ep_ch_ring_worker(struct work_struct *work) 799 { 800 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); 801 struct device *dev = &mhi_cntrl->mhi_dev->dev; 802 struct mhi_ep_ring_item *itr, *tmp; 803 struct mhi_ep_ring *ring; 804 struct mhi_ep_chan *chan; 805 unsigned long flags; 806 LIST_HEAD(head); 807 int ret; 808 809 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); 810 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); 811 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); 812 813 /* Process each queued channel ring. In case of an error, just process next element. */ 814 list_for_each_entry_safe(itr, tmp, &head, node) { 815 list_del(&itr->node); 816 ring = itr->ring; 817 818 chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 819 mutex_lock(&chan->lock); 820 821 /* 822 * The ring could've stopped while we waited to grab the (chan->lock), so do 823 * a sanity check before going further. 824 */ 825 if (!ring->started) { 826 mutex_unlock(&chan->lock); 827 kfree(itr); 828 continue; 829 } 830 831 /* Update the write offset for the ring */ 832 ret = mhi_ep_update_wr_offset(ring); 833 if (ret) { 834 dev_err(dev, "Error updating write offset for ring\n"); 835 mutex_unlock(&chan->lock); 836 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 837 continue; 838 } 839 840 /* Sanity check to make sure there are elements in the ring */ 841 if (chan->rd_offset == ring->wr_offset) { 842 mutex_unlock(&chan->lock); 843 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 844 continue; 845 } 846 847 dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); 848 ret = mhi_ep_process_ch_ring(ring); 849 if (ret) { 850 dev_err(dev, "Error processing ring for channel (%u): %d\n", 851 ring->ch_id, ret); 852 mutex_unlock(&chan->lock); 853 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 854 continue; 855 } 856 857 mutex_unlock(&chan->lock); 858 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 859 } 860 } 861 862 static void mhi_ep_state_worker(struct work_struct *work) 863 { 864 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); 865 struct device *dev = &mhi_cntrl->mhi_dev->dev; 866 struct mhi_ep_state_transition *itr, *tmp; 867 unsigned long flags; 868 LIST_HEAD(head); 869 int ret; 870 871 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); 872 list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); 873 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); 874 875 list_for_each_entry_safe(itr, tmp, &head, node) { 876 list_del(&itr->node); 877 dev_dbg(dev, "Handling MHI state transition to %s\n", 878 mhi_state_str(itr->state)); 879 880 switch (itr->state) { 881 case MHI_STATE_M0: 882 ret = mhi_ep_set_m0_state(mhi_cntrl); 883 if (ret) 884 dev_err(dev, "Failed to transition to M0 state\n"); 885 break; 886 case MHI_STATE_M3: 887 ret = mhi_ep_set_m3_state(mhi_cntrl); 888 if (ret) 889 dev_err(dev, "Failed to transition to M3 state\n"); 890 break; 891 default: 892 dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); 893 break; 894 } 895 kfree(itr); 896 } 897 } 898 899 static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, 900 u32 ch_idx) 901 { 902 struct mhi_ep_ring_item *item; 903 struct mhi_ep_ring *ring; 904 bool work = !!ch_int; 905 LIST_HEAD(head); 906 u32 i; 907 908 /* First add the ring items to a local list */ 909 for_each_set_bit(i, &ch_int, 32) { 910 /* Channel index varies for each register: 0, 32, 64, 96 */ 911 u32 ch_id = ch_idx + i; 912 913 ring = &mhi_cntrl->mhi_chan[ch_id].ring; 914 item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC); 915 if (!item) 916 return; 917 918 item->ring = ring; 919 list_add_tail(&item->node, &head); 920 } 921 922 /* Now, splice the local list into ch_db_list and queue the work item */ 923 if (work) { 924 spin_lock(&mhi_cntrl->list_lock); 925 list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); 926 spin_unlock(&mhi_cntrl->list_lock); 927 928 queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); 929 } 930 } 931 932 /* 933 * Channel interrupt statuses are contained in 4 registers each of 32bit length. 934 * For checking all interrupts, we need to loop through each registers and then 935 * check for bits set. 936 */ 937 static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) 938 { 939 u32 ch_int, ch_idx, i; 940 941 /* Bail out if there is no channel doorbell interrupt */ 942 if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) 943 return; 944 945 for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { 946 ch_idx = i * MHI_MASK_CH_LEN; 947 948 /* Only process channel interrupt if the mask is enabled */ 949 ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; 950 if (ch_int) { 951 mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); 952 mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), 953 mhi_cntrl->chdb[i].status); 954 } 955 } 956 } 957 958 static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, 959 enum mhi_state state) 960 { 961 struct mhi_ep_state_transition *item; 962 963 item = kzalloc(sizeof(*item), GFP_ATOMIC); 964 if (!item) 965 return; 966 967 item->state = state; 968 spin_lock(&mhi_cntrl->list_lock); 969 list_add_tail(&item->node, &mhi_cntrl->st_transition_list); 970 spin_unlock(&mhi_cntrl->list_lock); 971 972 queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); 973 } 974 975 /* 976 * Interrupt handler that services interrupts raised by the host writing to 977 * MHICTRL and Command ring doorbell (CRDB) registers for state change and 978 * channel interrupts. 979 */ 980 static irqreturn_t mhi_ep_irq(int irq, void *data) 981 { 982 struct mhi_ep_cntrl *mhi_cntrl = data; 983 struct device *dev = &mhi_cntrl->mhi_dev->dev; 984 enum mhi_state state; 985 u32 int_value; 986 bool mhi_reset; 987 988 /* Acknowledge the ctrl interrupt */ 989 int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); 990 mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); 991 992 /* Check for ctrl interrupt */ 993 if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { 994 dev_dbg(dev, "Processing ctrl interrupt\n"); 995 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); 996 if (mhi_reset) { 997 dev_info(dev, "Host triggered MHI reset!\n"); 998 disable_irq_nosync(mhi_cntrl->irq); 999 schedule_work(&mhi_cntrl->reset_work); 1000 return IRQ_HANDLED; 1001 } 1002 1003 mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); 1004 } 1005 1006 /* Check for command doorbell interrupt */ 1007 if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { 1008 dev_dbg(dev, "Processing command doorbell interrupt\n"); 1009 queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); 1010 } 1011 1012 /* Check for channel interrupts */ 1013 mhi_ep_check_channel_interrupt(mhi_cntrl); 1014 1015 return IRQ_HANDLED; 1016 } 1017 1018 static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) 1019 { 1020 struct mhi_ep_ring *ch_ring, *ev_ring; 1021 struct mhi_result result = {}; 1022 struct mhi_ep_chan *mhi_chan; 1023 int i; 1024 1025 /* Stop all the channels */ 1026 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1027 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1028 if (!mhi_chan->ring.started) 1029 continue; 1030 1031 mutex_lock(&mhi_chan->lock); 1032 /* Send channel disconnect status to client drivers */ 1033 if (mhi_chan->xfer_cb) { 1034 result.transaction_status = -ENOTCONN; 1035 result.bytes_xferd = 0; 1036 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 1037 } 1038 1039 mhi_chan->state = MHI_CH_STATE_DISABLED; 1040 mutex_unlock(&mhi_chan->lock); 1041 } 1042 1043 flush_workqueue(mhi_cntrl->wq); 1044 1045 /* Destroy devices associated with all channels */ 1046 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); 1047 1048 /* Stop and reset the transfer rings */ 1049 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1050 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1051 if (!mhi_chan->ring.started) 1052 continue; 1053 1054 ch_ring = &mhi_cntrl->mhi_chan[i].ring; 1055 mutex_lock(&mhi_chan->lock); 1056 mhi_ep_ring_reset(mhi_cntrl, ch_ring); 1057 mutex_unlock(&mhi_chan->lock); 1058 } 1059 1060 /* Stop and reset the event rings */ 1061 for (i = 0; i < mhi_cntrl->event_rings; i++) { 1062 ev_ring = &mhi_cntrl->mhi_event[i].ring; 1063 if (!ev_ring->started) 1064 continue; 1065 1066 mutex_lock(&mhi_cntrl->event_lock); 1067 mhi_ep_ring_reset(mhi_cntrl, ev_ring); 1068 mutex_unlock(&mhi_cntrl->event_lock); 1069 } 1070 1071 /* Stop and reset the command ring */ 1072 mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); 1073 1074 mhi_ep_free_host_cfg(mhi_cntrl); 1075 mhi_ep_mmio_mask_interrupts(mhi_cntrl); 1076 1077 mhi_cntrl->enabled = false; 1078 } 1079 1080 static void mhi_ep_reset_worker(struct work_struct *work) 1081 { 1082 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); 1083 enum mhi_state cur_state; 1084 1085 mhi_ep_power_down(mhi_cntrl); 1086 1087 mutex_lock(&mhi_cntrl->state_lock); 1088 1089 /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ 1090 mhi_ep_mmio_reset(mhi_cntrl); 1091 cur_state = mhi_cntrl->mhi_state; 1092 1093 /* 1094 * Only proceed further if the reset is due to SYS_ERR. The host will 1095 * issue reset during shutdown also and we don't need to do re-init in 1096 * that case. 1097 */ 1098 if (cur_state == MHI_STATE_SYS_ERR) 1099 mhi_ep_power_up(mhi_cntrl); 1100 1101 mutex_unlock(&mhi_cntrl->state_lock); 1102 } 1103 1104 /* 1105 * We don't need to do anything special other than setting the MHI SYS_ERR 1106 * state. The host will reset all contexts and issue MHI RESET so that we 1107 * could also recover from error state. 1108 */ 1109 void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) 1110 { 1111 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1112 int ret; 1113 1114 ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); 1115 if (ret) 1116 return; 1117 1118 /* Signal host that the device went to SYS_ERR state */ 1119 ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); 1120 if (ret) 1121 dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret); 1122 } 1123 1124 int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) 1125 { 1126 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1127 int ret, i; 1128 1129 /* 1130 * Mask all interrupts until the state machine is ready. Interrupts will 1131 * be enabled later with mhi_ep_enable(). 1132 */ 1133 mhi_ep_mmio_mask_interrupts(mhi_cntrl); 1134 mhi_ep_mmio_init(mhi_cntrl); 1135 1136 mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), 1137 GFP_KERNEL); 1138 if (!mhi_cntrl->mhi_event) 1139 return -ENOMEM; 1140 1141 /* Initialize command, channel and event rings */ 1142 mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); 1143 for (i = 0; i < mhi_cntrl->max_chan; i++) 1144 mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); 1145 for (i = 0; i < mhi_cntrl->event_rings; i++) 1146 mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); 1147 1148 mhi_cntrl->mhi_state = MHI_STATE_RESET; 1149 1150 /* Set AMSS EE before signaling ready state */ 1151 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 1152 1153 /* All set, notify the host that we are ready */ 1154 ret = mhi_ep_set_ready_state(mhi_cntrl); 1155 if (ret) 1156 goto err_free_event; 1157 1158 dev_dbg(dev, "READY state notification sent to the host\n"); 1159 1160 ret = mhi_ep_enable(mhi_cntrl); 1161 if (ret) { 1162 dev_err(dev, "Failed to enable MHI endpoint\n"); 1163 goto err_free_event; 1164 } 1165 1166 enable_irq(mhi_cntrl->irq); 1167 mhi_cntrl->enabled = true; 1168 1169 return 0; 1170 1171 err_free_event: 1172 kfree(mhi_cntrl->mhi_event); 1173 1174 return ret; 1175 } 1176 EXPORT_SYMBOL_GPL(mhi_ep_power_up); 1177 1178 void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) 1179 { 1180 if (mhi_cntrl->enabled) { 1181 mhi_ep_abort_transfer(mhi_cntrl); 1182 kfree(mhi_cntrl->mhi_event); 1183 disable_irq(mhi_cntrl->irq); 1184 } 1185 } 1186 EXPORT_SYMBOL_GPL(mhi_ep_power_down); 1187 1188 void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) 1189 { 1190 struct mhi_ep_chan *mhi_chan; 1191 u32 tmp; 1192 int i; 1193 1194 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1195 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1196 1197 if (!mhi_chan->mhi_dev) 1198 continue; 1199 1200 mutex_lock(&mhi_chan->lock); 1201 /* Skip if the channel is not currently running */ 1202 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); 1203 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { 1204 mutex_unlock(&mhi_chan->lock); 1205 continue; 1206 } 1207 1208 dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); 1209 /* Set channel state to SUSPENDED */ 1210 mhi_chan->state = MHI_CH_STATE_SUSPENDED; 1211 tmp &= ~CHAN_CTX_CHSTATE_MASK; 1212 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); 1213 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); 1214 mutex_unlock(&mhi_chan->lock); 1215 } 1216 } 1217 1218 void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) 1219 { 1220 struct mhi_ep_chan *mhi_chan; 1221 u32 tmp; 1222 int i; 1223 1224 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1225 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1226 1227 if (!mhi_chan->mhi_dev) 1228 continue; 1229 1230 mutex_lock(&mhi_chan->lock); 1231 /* Skip if the channel is not currently suspended */ 1232 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); 1233 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { 1234 mutex_unlock(&mhi_chan->lock); 1235 continue; 1236 } 1237 1238 dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); 1239 /* Set channel state to RUNNING */ 1240 mhi_chan->state = MHI_CH_STATE_RUNNING; 1241 tmp &= ~CHAN_CTX_CHSTATE_MASK; 1242 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); 1243 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); 1244 mutex_unlock(&mhi_chan->lock); 1245 } 1246 } 1247 1248 static void mhi_ep_release_device(struct device *dev) 1249 { 1250 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1251 1252 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1253 mhi_dev->mhi_cntrl->mhi_dev = NULL; 1254 1255 /* 1256 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI 1257 * devices for the channels will only get created in mhi_ep_create_device() 1258 * if the mhi_dev associated with it is NULL. 1259 */ 1260 if (mhi_dev->ul_chan) 1261 mhi_dev->ul_chan->mhi_dev = NULL; 1262 1263 if (mhi_dev->dl_chan) 1264 mhi_dev->dl_chan->mhi_dev = NULL; 1265 1266 kfree(mhi_dev); 1267 } 1268 1269 static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, 1270 enum mhi_device_type dev_type) 1271 { 1272 struct mhi_ep_device *mhi_dev; 1273 struct device *dev; 1274 1275 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); 1276 if (!mhi_dev) 1277 return ERR_PTR(-ENOMEM); 1278 1279 dev = &mhi_dev->dev; 1280 device_initialize(dev); 1281 dev->bus = &mhi_ep_bus_type; 1282 dev->release = mhi_ep_release_device; 1283 1284 /* Controller device is always allocated first */ 1285 if (dev_type == MHI_DEVICE_CONTROLLER) 1286 /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ 1287 dev->parent = mhi_cntrl->cntrl_dev; 1288 else 1289 /* for MHI client devices, parent is the MHI controller device */ 1290 dev->parent = &mhi_cntrl->mhi_dev->dev; 1291 1292 mhi_dev->mhi_cntrl = mhi_cntrl; 1293 mhi_dev->dev_type = dev_type; 1294 1295 return mhi_dev; 1296 } 1297 1298 /* 1299 * MHI channels are always defined in pairs with UL as the even numbered 1300 * channel and DL as odd numbered one. This function gets UL channel (primary) 1301 * as the ch_id and always looks after the next entry in channel list for 1302 * the corresponding DL channel (secondary). 1303 */ 1304 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) 1305 { 1306 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; 1307 struct device *dev = mhi_cntrl->cntrl_dev; 1308 struct mhi_ep_device *mhi_dev; 1309 int ret; 1310 1311 /* Check if the channel name is same for both UL and DL */ 1312 if (strcmp(mhi_chan->name, mhi_chan[1].name)) { 1313 dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n", 1314 mhi_chan->name, mhi_chan[1].name); 1315 return -EINVAL; 1316 } 1317 1318 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER); 1319 if (IS_ERR(mhi_dev)) 1320 return PTR_ERR(mhi_dev); 1321 1322 /* Configure primary channel */ 1323 mhi_dev->ul_chan = mhi_chan; 1324 get_device(&mhi_dev->dev); 1325 mhi_chan->mhi_dev = mhi_dev; 1326 1327 /* Configure secondary channel as well */ 1328 mhi_chan++; 1329 mhi_dev->dl_chan = mhi_chan; 1330 get_device(&mhi_dev->dev); 1331 mhi_chan->mhi_dev = mhi_dev; 1332 1333 /* Channel name is same for both UL and DL */ 1334 mhi_dev->name = mhi_chan->name; 1335 ret = dev_set_name(&mhi_dev->dev, "%s_%s", 1336 dev_name(&mhi_cntrl->mhi_dev->dev), 1337 mhi_dev->name); 1338 if (ret) { 1339 put_device(&mhi_dev->dev); 1340 return ret; 1341 } 1342 1343 ret = device_add(&mhi_dev->dev); 1344 if (ret) 1345 put_device(&mhi_dev->dev); 1346 1347 return ret; 1348 } 1349 1350 static int mhi_ep_destroy_device(struct device *dev, void *data) 1351 { 1352 struct mhi_ep_device *mhi_dev; 1353 struct mhi_ep_cntrl *mhi_cntrl; 1354 struct mhi_ep_chan *ul_chan, *dl_chan; 1355 1356 if (dev->bus != &mhi_ep_bus_type) 1357 return 0; 1358 1359 mhi_dev = to_mhi_ep_device(dev); 1360 mhi_cntrl = mhi_dev->mhi_cntrl; 1361 1362 /* Only destroy devices created for channels */ 1363 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1364 return 0; 1365 1366 ul_chan = mhi_dev->ul_chan; 1367 dl_chan = mhi_dev->dl_chan; 1368 1369 if (ul_chan) 1370 put_device(&ul_chan->mhi_dev->dev); 1371 1372 if (dl_chan) 1373 put_device(&dl_chan->mhi_dev->dev); 1374 1375 dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", 1376 mhi_dev->name); 1377 1378 /* Notify the client and remove the device from MHI bus */ 1379 device_del(dev); 1380 put_device(dev); 1381 1382 return 0; 1383 } 1384 1385 static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, 1386 const struct mhi_ep_cntrl_config *config) 1387 { 1388 const struct mhi_ep_channel_config *ch_cfg; 1389 struct device *dev = mhi_cntrl->cntrl_dev; 1390 u32 chan, i; 1391 int ret = -EINVAL; 1392 1393 mhi_cntrl->max_chan = config->max_channels; 1394 1395 /* 1396 * Allocate max_channels supported by the MHI endpoint and populate 1397 * only the defined channels 1398 */ 1399 mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), 1400 GFP_KERNEL); 1401 if (!mhi_cntrl->mhi_chan) 1402 return -ENOMEM; 1403 1404 for (i = 0; i < config->num_channels; i++) { 1405 struct mhi_ep_chan *mhi_chan; 1406 1407 ch_cfg = &config->ch_cfg[i]; 1408 1409 chan = ch_cfg->num; 1410 if (chan >= mhi_cntrl->max_chan) { 1411 dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n", 1412 chan, mhi_cntrl->max_chan); 1413 goto error_chan_cfg; 1414 } 1415 1416 /* Bi-directional and direction less channels are not supported */ 1417 if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { 1418 dev_err(dev, "Invalid direction (%u) for channel (%u)\n", 1419 ch_cfg->dir, chan); 1420 goto error_chan_cfg; 1421 } 1422 1423 mhi_chan = &mhi_cntrl->mhi_chan[chan]; 1424 mhi_chan->name = ch_cfg->name; 1425 mhi_chan->chan = chan; 1426 mhi_chan->dir = ch_cfg->dir; 1427 mutex_init(&mhi_chan->lock); 1428 } 1429 1430 return 0; 1431 1432 error_chan_cfg: 1433 kfree(mhi_cntrl->mhi_chan); 1434 1435 return ret; 1436 } 1437 1438 /* 1439 * Allocate channel and command rings here. Event rings will be allocated 1440 * in mhi_ep_power_up() as the config comes from the host. 1441 */ 1442 int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, 1443 const struct mhi_ep_cntrl_config *config) 1444 { 1445 struct mhi_ep_device *mhi_dev; 1446 int ret; 1447 1448 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) 1449 return -EINVAL; 1450 1451 ret = mhi_ep_chan_init(mhi_cntrl, config); 1452 if (ret) 1453 return ret; 1454 1455 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); 1456 if (!mhi_cntrl->mhi_cmd) { 1457 ret = -ENOMEM; 1458 goto err_free_ch; 1459 } 1460 1461 mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", 1462 sizeof(struct mhi_ring_element), 0, 1463 0, NULL); 1464 if (!mhi_cntrl->ev_ring_el_cache) { 1465 ret = -ENOMEM; 1466 goto err_free_cmd; 1467 } 1468 1469 mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, 1470 0, NULL); 1471 if (!mhi_cntrl->tre_buf_cache) { 1472 ret = -ENOMEM; 1473 goto err_destroy_ev_ring_el_cache; 1474 } 1475 1476 mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item", 1477 sizeof(struct mhi_ep_ring_item), 0, 1478 0, NULL); 1479 if (!mhi_cntrl->ring_item_cache) { 1480 ret = -ENOMEM; 1481 goto err_destroy_tre_buf_cache; 1482 } 1483 1484 INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); 1485 INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); 1486 INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); 1487 INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); 1488 1489 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); 1490 if (!mhi_cntrl->wq) { 1491 ret = -ENOMEM; 1492 goto err_destroy_ring_item_cache; 1493 } 1494 1495 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); 1496 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); 1497 spin_lock_init(&mhi_cntrl->list_lock); 1498 mutex_init(&mhi_cntrl->state_lock); 1499 mutex_init(&mhi_cntrl->event_lock); 1500 1501 /* Set MHI version and AMSS EE before enumeration */ 1502 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); 1503 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 1504 1505 /* Set controller index */ 1506 ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); 1507 if (ret < 0) 1508 goto err_destroy_wq; 1509 1510 mhi_cntrl->index = ret; 1511 1512 irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); 1513 ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, 1514 "doorbell_irq", mhi_cntrl); 1515 if (ret) { 1516 dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); 1517 goto err_ida_free; 1518 } 1519 1520 /* Allocate the controller device */ 1521 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); 1522 if (IS_ERR(mhi_dev)) { 1523 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); 1524 ret = PTR_ERR(mhi_dev); 1525 goto err_free_irq; 1526 } 1527 1528 ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); 1529 if (ret) 1530 goto err_put_dev; 1531 1532 mhi_dev->name = dev_name(&mhi_dev->dev); 1533 mhi_cntrl->mhi_dev = mhi_dev; 1534 1535 ret = device_add(&mhi_dev->dev); 1536 if (ret) 1537 goto err_put_dev; 1538 1539 dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); 1540 1541 return 0; 1542 1543 err_put_dev: 1544 put_device(&mhi_dev->dev); 1545 err_free_irq: 1546 free_irq(mhi_cntrl->irq, mhi_cntrl); 1547 err_ida_free: 1548 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); 1549 err_destroy_wq: 1550 destroy_workqueue(mhi_cntrl->wq); 1551 err_destroy_ring_item_cache: 1552 kmem_cache_destroy(mhi_cntrl->ring_item_cache); 1553 err_destroy_ev_ring_el_cache: 1554 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); 1555 err_destroy_tre_buf_cache: 1556 kmem_cache_destroy(mhi_cntrl->tre_buf_cache); 1557 err_free_cmd: 1558 kfree(mhi_cntrl->mhi_cmd); 1559 err_free_ch: 1560 kfree(mhi_cntrl->mhi_chan); 1561 1562 return ret; 1563 } 1564 EXPORT_SYMBOL_GPL(mhi_ep_register_controller); 1565 1566 /* 1567 * It is expected that the controller drivers will power down the MHI EP stack 1568 * using "mhi_ep_power_down()" before calling this function to unregister themselves. 1569 */ 1570 void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) 1571 { 1572 struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; 1573 1574 destroy_workqueue(mhi_cntrl->wq); 1575 1576 free_irq(mhi_cntrl->irq, mhi_cntrl); 1577 1578 kmem_cache_destroy(mhi_cntrl->tre_buf_cache); 1579 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); 1580 kmem_cache_destroy(mhi_cntrl->ring_item_cache); 1581 kfree(mhi_cntrl->mhi_cmd); 1582 kfree(mhi_cntrl->mhi_chan); 1583 1584 device_del(&mhi_dev->dev); 1585 put_device(&mhi_dev->dev); 1586 1587 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); 1588 } 1589 EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); 1590 1591 static int mhi_ep_driver_probe(struct device *dev) 1592 { 1593 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1594 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); 1595 struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; 1596 struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; 1597 1598 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; 1599 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; 1600 1601 return mhi_drv->probe(mhi_dev, mhi_dev->id); 1602 } 1603 1604 static int mhi_ep_driver_remove(struct device *dev) 1605 { 1606 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1607 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); 1608 struct mhi_result result = {}; 1609 struct mhi_ep_chan *mhi_chan; 1610 int dir; 1611 1612 /* Skip if it is a controller device */ 1613 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1614 return 0; 1615 1616 /* Disconnect the channels associated with the driver */ 1617 for (dir = 0; dir < 2; dir++) { 1618 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; 1619 1620 if (!mhi_chan) 1621 continue; 1622 1623 mutex_lock(&mhi_chan->lock); 1624 /* Send channel disconnect status to the client driver */ 1625 if (mhi_chan->xfer_cb) { 1626 result.transaction_status = -ENOTCONN; 1627 result.bytes_xferd = 0; 1628 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 1629 } 1630 1631 mhi_chan->state = MHI_CH_STATE_DISABLED; 1632 mhi_chan->xfer_cb = NULL; 1633 mutex_unlock(&mhi_chan->lock); 1634 } 1635 1636 /* Remove the client driver now */ 1637 mhi_drv->remove(mhi_dev); 1638 1639 return 0; 1640 } 1641 1642 int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) 1643 { 1644 struct device_driver *driver = &mhi_drv->driver; 1645 1646 if (!mhi_drv->probe || !mhi_drv->remove) 1647 return -EINVAL; 1648 1649 /* Client drivers should have callbacks defined for both channels */ 1650 if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) 1651 return -EINVAL; 1652 1653 driver->bus = &mhi_ep_bus_type; 1654 driver->owner = owner; 1655 driver->probe = mhi_ep_driver_probe; 1656 driver->remove = mhi_ep_driver_remove; 1657 1658 return driver_register(driver); 1659 } 1660 EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); 1661 1662 void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) 1663 { 1664 driver_unregister(&mhi_drv->driver); 1665 } 1666 EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); 1667 1668 static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env) 1669 { 1670 const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1671 1672 return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, 1673 mhi_dev->name); 1674 } 1675 1676 static int mhi_ep_match(struct device *dev, struct device_driver *drv) 1677 { 1678 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1679 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); 1680 const struct mhi_device_id *id; 1681 1682 /* 1683 * If the device is a controller type then there is no client driver 1684 * associated with it 1685 */ 1686 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1687 return 0; 1688 1689 for (id = mhi_drv->id_table; id->chan[0]; id++) 1690 if (!strcmp(mhi_dev->name, id->chan)) { 1691 mhi_dev->id = id; 1692 return 1; 1693 } 1694 1695 return 0; 1696 }; 1697 1698 struct bus_type mhi_ep_bus_type = { 1699 .name = "mhi_ep", 1700 .dev_name = "mhi_ep", 1701 .match = mhi_ep_match, 1702 .uevent = mhi_ep_uevent, 1703 }; 1704 1705 static int __init mhi_ep_init(void) 1706 { 1707 return bus_register(&mhi_ep_bus_type); 1708 } 1709 1710 static void __exit mhi_ep_exit(void) 1711 { 1712 bus_unregister(&mhi_ep_bus_type); 1713 } 1714 1715 postcore_initcall(mhi_ep_init); 1716 module_exit(mhi_ep_exit); 1717 1718 MODULE_LICENSE("GPL v2"); 1719 MODULE_DESCRIPTION("MHI Bus Endpoint stack"); 1720 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); 1721