1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MHI Endpoint bus stack 4 * 5 * Copyright (C) 2022 Linaro Ltd. 6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/delay.h> 11 #include <linux/dma-direction.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/irq.h> 15 #include <linux/mhi_ep.h> 16 #include <linux/mod_devicetable.h> 17 #include <linux/module.h> 18 #include "internal.h" 19 20 #define M0_WAIT_DELAY_MS 100 21 #define M0_WAIT_COUNT 100 22 23 static DEFINE_IDA(mhi_ep_cntrl_ida); 24 25 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); 26 static int mhi_ep_destroy_device(struct device *dev, void *data); 27 28 static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, 29 struct mhi_ring_element *el, bool bei) 30 { 31 struct device *dev = &mhi_cntrl->mhi_dev->dev; 32 union mhi_ep_ring_ctx *ctx; 33 struct mhi_ep_ring *ring; 34 int ret; 35 36 mutex_lock(&mhi_cntrl->event_lock); 37 ring = &mhi_cntrl->mhi_event[ring_idx].ring; 38 ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; 39 if (!ring->started) { 40 ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); 41 if (ret) { 42 dev_err(dev, "Error starting event ring (%u)\n", ring_idx); 43 goto err_unlock; 44 } 45 } 46 47 /* Add element to the event ring */ 48 ret = mhi_ep_ring_add_element(ring, el); 49 if (ret) { 50 dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx); 51 goto err_unlock; 52 } 53 54 mutex_unlock(&mhi_cntrl->event_lock); 55 56 /* 57 * Raise IRQ to host only if the BEI flag is not set in TRE. Host might 58 * set this flag for interrupt moderation as per MHI protocol. 59 */ 60 if (!bei) 61 mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); 62 63 return 0; 64 65 err_unlock: 66 mutex_unlock(&mhi_cntrl->event_lock); 67 68 return ret; 69 } 70 71 static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, 72 struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) 73 { 74 struct mhi_ring_element *event; 75 int ret; 76 77 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); 78 if (!event) 79 return -ENOMEM; 80 81 event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); 82 event->dword[0] = MHI_TRE_EV_DWORD0(code, len); 83 event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); 84 85 ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre)); 86 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 87 88 return ret; 89 } 90 91 int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) 92 { 93 struct mhi_ring_element *event; 94 int ret; 95 96 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); 97 if (!event) 98 return -ENOMEM; 99 100 event->dword[0] = MHI_SC_EV_DWORD0(state); 101 event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); 102 103 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); 104 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 105 106 return ret; 107 } 108 109 int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) 110 { 111 struct mhi_ring_element *event; 112 int ret; 113 114 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); 115 if (!event) 116 return -ENOMEM; 117 118 event->dword[0] = MHI_EE_EV_DWORD0(exec_env); 119 event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); 120 121 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); 122 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 123 124 return ret; 125 } 126 127 static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) 128 { 129 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; 130 struct mhi_ring_element *event; 131 int ret; 132 133 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); 134 if (!event) 135 return -ENOMEM; 136 137 event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); 138 event->dword[0] = MHI_CC_EV_DWORD0(code); 139 event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); 140 141 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); 142 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 143 144 return ret; 145 } 146 147 static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) 148 { 149 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; 150 struct device *dev = &mhi_cntrl->mhi_dev->dev; 151 struct mhi_result result = {}; 152 struct mhi_ep_chan *mhi_chan; 153 struct mhi_ep_ring *ch_ring; 154 u32 tmp, ch_id; 155 int ret; 156 157 ch_id = MHI_TRE_GET_CMD_CHID(el); 158 159 /* Check if the channel is supported by the controller */ 160 if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) { 161 dev_dbg(dev, "Channel (%u) not supported!\n", ch_id); 162 return -ENODEV; 163 } 164 165 mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; 166 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; 167 168 switch (MHI_TRE_GET_CMD_TYPE(el)) { 169 case MHI_PKT_TYPE_START_CHAN_CMD: 170 dev_dbg(dev, "Received START command for channel (%u)\n", ch_id); 171 172 mutex_lock(&mhi_chan->lock); 173 /* Initialize and configure the corresponding channel ring */ 174 if (!ch_ring->started) { 175 ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, 176 (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); 177 if (ret) { 178 dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id); 179 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, 180 MHI_EV_CC_UNDEFINED_ERR); 181 if (ret) 182 dev_err(dev, "Error sending completion event: %d\n", ret); 183 184 goto err_unlock; 185 } 186 } 187 188 /* Set channel state to RUNNING */ 189 mhi_chan->state = MHI_CH_STATE_RUNNING; 190 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 191 tmp &= ~CHAN_CTX_CHSTATE_MASK; 192 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); 193 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 194 195 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 196 if (ret) { 197 dev_err(dev, "Error sending command completion event (%u)\n", 198 MHI_EV_CC_SUCCESS); 199 goto err_unlock; 200 } 201 202 mutex_unlock(&mhi_chan->lock); 203 204 /* 205 * Create MHI device only during UL channel start. Since the MHI 206 * channels operate in a pair, we'll associate both UL and DL 207 * channels to the same device. 208 * 209 * We also need to check for mhi_dev != NULL because, the host 210 * will issue START_CHAN command during resume and we don't 211 * destroy the device during suspend. 212 */ 213 if (!(ch_id % 2) && !mhi_chan->mhi_dev) { 214 ret = mhi_ep_create_device(mhi_cntrl, ch_id); 215 if (ret) { 216 dev_err(dev, "Error creating device for channel (%u)\n", ch_id); 217 mhi_ep_handle_syserr(mhi_cntrl); 218 return ret; 219 } 220 } 221 222 /* Finally, enable DB for the channel */ 223 mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); 224 225 break; 226 case MHI_PKT_TYPE_STOP_CHAN_CMD: 227 dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); 228 if (!ch_ring->started) { 229 dev_err(dev, "Channel (%u) not opened\n", ch_id); 230 return -ENODEV; 231 } 232 233 mutex_lock(&mhi_chan->lock); 234 /* Disable DB for the channel */ 235 mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); 236 237 /* Send channel disconnect status to client drivers */ 238 if (mhi_chan->xfer_cb) { 239 result.transaction_status = -ENOTCONN; 240 result.bytes_xferd = 0; 241 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 242 } 243 244 /* Set channel state to STOP */ 245 mhi_chan->state = MHI_CH_STATE_STOP; 246 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 247 tmp &= ~CHAN_CTX_CHSTATE_MASK; 248 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); 249 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 250 251 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 252 if (ret) { 253 dev_err(dev, "Error sending command completion event (%u)\n", 254 MHI_EV_CC_SUCCESS); 255 goto err_unlock; 256 } 257 258 mutex_unlock(&mhi_chan->lock); 259 break; 260 case MHI_PKT_TYPE_RESET_CHAN_CMD: 261 dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id); 262 if (!ch_ring->started) { 263 dev_err(dev, "Channel (%u) not opened\n", ch_id); 264 return -ENODEV; 265 } 266 267 mutex_lock(&mhi_chan->lock); 268 /* Stop and reset the transfer ring */ 269 mhi_ep_ring_reset(mhi_cntrl, ch_ring); 270 271 /* Send channel disconnect status to client driver */ 272 if (mhi_chan->xfer_cb) { 273 result.transaction_status = -ENOTCONN; 274 result.bytes_xferd = 0; 275 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 276 } 277 278 /* Set channel state to DISABLED */ 279 mhi_chan->state = MHI_CH_STATE_DISABLED; 280 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 281 tmp &= ~CHAN_CTX_CHSTATE_MASK; 282 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); 283 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 284 285 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 286 if (ret) { 287 dev_err(dev, "Error sending command completion event (%u)\n", 288 MHI_EV_CC_SUCCESS); 289 goto err_unlock; 290 } 291 292 mutex_unlock(&mhi_chan->lock); 293 break; 294 default: 295 dev_err(dev, "Invalid command received: %lu for channel (%u)\n", 296 MHI_TRE_GET_CMD_TYPE(el), ch_id); 297 return -EINVAL; 298 } 299 300 return 0; 301 302 err_unlock: 303 mutex_unlock(&mhi_chan->lock); 304 305 return ret; 306 } 307 308 bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) 309 { 310 struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : 311 mhi_dev->ul_chan; 312 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; 313 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; 314 315 return !!(ring->rd_offset == ring->wr_offset); 316 } 317 EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); 318 319 static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, 320 struct mhi_ep_ring *ring, 321 struct mhi_result *result, 322 u32 len) 323 { 324 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 325 struct device *dev = &mhi_cntrl->mhi_dev->dev; 326 size_t tr_len, read_offset, write_offset; 327 struct mhi_ring_element *el; 328 bool tr_done = false; 329 void *write_addr; 330 u64 read_addr; 331 u32 buf_left; 332 int ret; 333 334 buf_left = len; 335 336 do { 337 /* Don't process the transfer ring if the channel is not in RUNNING state */ 338 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { 339 dev_err(dev, "Channel not available\n"); 340 return -ENODEV; 341 } 342 343 el = &ring->ring_cache[ring->rd_offset]; 344 345 /* Check if there is data pending to be read from previous read operation */ 346 if (mhi_chan->tre_bytes_left) { 347 dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); 348 tr_len = min(buf_left, mhi_chan->tre_bytes_left); 349 } else { 350 mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); 351 mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); 352 mhi_chan->tre_bytes_left = mhi_chan->tre_size; 353 354 tr_len = min(buf_left, mhi_chan->tre_size); 355 } 356 357 read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; 358 write_offset = len - buf_left; 359 read_addr = mhi_chan->tre_loc + read_offset; 360 write_addr = result->buf_addr + write_offset; 361 362 dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); 363 ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); 364 if (ret < 0) { 365 dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); 366 return ret; 367 } 368 369 buf_left -= tr_len; 370 mhi_chan->tre_bytes_left -= tr_len; 371 372 /* 373 * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been 374 * read completely: 375 * 376 * 1. Send completion event to the host based on the flags set in TRE. 377 * 2. Increment the local read offset of the transfer ring. 378 */ 379 if (!mhi_chan->tre_bytes_left) { 380 /* 381 * The host will split the data packet into multiple TREs if it can't fit 382 * the packet in a single TRE. In that case, CHAIN flag will be set by the 383 * host for all TREs except the last one. 384 */ 385 if (MHI_TRE_DATA_GET_CHAIN(el)) { 386 /* 387 * IEOB (Interrupt on End of Block) flag will be set by the host if 388 * it expects the completion event for all TREs of a TD. 389 */ 390 if (MHI_TRE_DATA_GET_IEOB(el)) { 391 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, 392 MHI_TRE_DATA_GET_LEN(el), 393 MHI_EV_CC_EOB); 394 if (ret < 0) { 395 dev_err(&mhi_chan->mhi_dev->dev, 396 "Error sending transfer compl. event\n"); 397 return ret; 398 } 399 } 400 } else { 401 /* 402 * IEOT (Interrupt on End of Transfer) flag will be set by the host 403 * for the last TRE of the TD and expects the completion event for 404 * the same. 405 */ 406 if (MHI_TRE_DATA_GET_IEOT(el)) { 407 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, 408 MHI_TRE_DATA_GET_LEN(el), 409 MHI_EV_CC_EOT); 410 if (ret < 0) { 411 dev_err(&mhi_chan->mhi_dev->dev, 412 "Error sending transfer compl. event\n"); 413 return ret; 414 } 415 } 416 417 tr_done = true; 418 } 419 420 mhi_ep_ring_inc_index(ring); 421 } 422 423 result->bytes_xferd += tr_len; 424 } while (buf_left && !tr_done); 425 426 return 0; 427 } 428 429 static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) 430 { 431 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; 432 struct mhi_result result = {}; 433 u32 len = MHI_EP_DEFAULT_MTU; 434 struct mhi_ep_chan *mhi_chan; 435 int ret; 436 437 mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 438 439 /* 440 * Bail out if transfer callback is not registered for the channel. 441 * This is most likely due to the client driver not loaded at this point. 442 */ 443 if (!mhi_chan->xfer_cb) { 444 dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); 445 return -ENODEV; 446 } 447 448 if (ring->ch_id % 2) { 449 /* DL channel */ 450 result.dir = mhi_chan->dir; 451 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 452 } else { 453 /* UL channel */ 454 result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); 455 if (!result.buf_addr) 456 return -ENOMEM; 457 458 do { 459 ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); 460 if (ret < 0) { 461 dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); 462 kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr); 463 return ret; 464 } 465 466 result.dir = mhi_chan->dir; 467 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 468 result.bytes_xferd = 0; 469 memset(result.buf_addr, 0, len); 470 471 /* Read until the ring becomes empty */ 472 } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); 473 474 kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr); 475 } 476 477 return 0; 478 } 479 480 /* TODO: Handle partially formed TDs */ 481 int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) 482 { 483 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; 484 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; 485 struct device *dev = &mhi_chan->mhi_dev->dev; 486 struct mhi_ring_element *el; 487 u32 buf_left, read_offset; 488 struct mhi_ep_ring *ring; 489 enum mhi_ev_ccs code; 490 void *read_addr; 491 u64 write_addr; 492 size_t tr_len; 493 u32 tre_len; 494 int ret; 495 496 buf_left = skb->len; 497 ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; 498 499 mutex_lock(&mhi_chan->lock); 500 501 do { 502 /* Don't process the transfer ring if the channel is not in RUNNING state */ 503 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { 504 dev_err(dev, "Channel not available\n"); 505 ret = -ENODEV; 506 goto err_exit; 507 } 508 509 if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { 510 dev_err(dev, "TRE not available!\n"); 511 ret = -ENOSPC; 512 goto err_exit; 513 } 514 515 el = &ring->ring_cache[ring->rd_offset]; 516 tre_len = MHI_TRE_DATA_GET_LEN(el); 517 518 tr_len = min(buf_left, tre_len); 519 read_offset = skb->len - buf_left; 520 read_addr = skb->data + read_offset; 521 write_addr = MHI_TRE_DATA_GET_PTR(el); 522 523 dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); 524 ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); 525 if (ret < 0) { 526 dev_err(dev, "Error writing to the channel\n"); 527 goto err_exit; 528 } 529 530 buf_left -= tr_len; 531 /* 532 * For all TREs queued by the host for DL channel, only the EOT flag will be set. 533 * If the packet doesn't fit into a single TRE, send the OVERFLOW event to 534 * the host so that the host can adjust the packet boundary to next TREs. Else send 535 * the EOT event to the host indicating the packet boundary. 536 */ 537 if (buf_left) 538 code = MHI_EV_CC_OVERFLOW; 539 else 540 code = MHI_EV_CC_EOT; 541 542 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); 543 if (ret) { 544 dev_err(dev, "Error sending transfer completion event\n"); 545 goto err_exit; 546 } 547 548 mhi_ep_ring_inc_index(ring); 549 } while (buf_left); 550 551 mutex_unlock(&mhi_chan->lock); 552 553 return 0; 554 555 err_exit: 556 mutex_unlock(&mhi_chan->lock); 557 558 return ret; 559 } 560 EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); 561 562 static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) 563 { 564 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; 565 struct device *dev = &mhi_cntrl->mhi_dev->dev; 566 int ret; 567 568 /* Update the number of event rings (NER) programmed by the host */ 569 mhi_ep_mmio_update_ner(mhi_cntrl); 570 571 dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n", 572 mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); 573 574 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; 575 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; 576 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; 577 578 /* Get the channel context base pointer from host */ 579 mhi_ep_mmio_get_chc_base(mhi_cntrl); 580 581 /* Allocate and map memory for caching host channel context */ 582 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, 583 &mhi_cntrl->ch_ctx_cache_phys, 584 (void __iomem **) &mhi_cntrl->ch_ctx_cache, 585 ch_ctx_host_size); 586 if (ret) { 587 dev_err(dev, "Failed to allocate and map ch_ctx_cache\n"); 588 return ret; 589 } 590 591 /* Get the event context base pointer from host */ 592 mhi_ep_mmio_get_erc_base(mhi_cntrl); 593 594 /* Allocate and map memory for caching host event context */ 595 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, 596 &mhi_cntrl->ev_ctx_cache_phys, 597 (void __iomem **) &mhi_cntrl->ev_ctx_cache, 598 ev_ctx_host_size); 599 if (ret) { 600 dev_err(dev, "Failed to allocate and map ev_ctx_cache\n"); 601 goto err_ch_ctx; 602 } 603 604 /* Get the command context base pointer from host */ 605 mhi_ep_mmio_get_crc_base(mhi_cntrl); 606 607 /* Allocate and map memory for caching host command context */ 608 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, 609 &mhi_cntrl->cmd_ctx_cache_phys, 610 (void __iomem **) &mhi_cntrl->cmd_ctx_cache, 611 cmd_ctx_host_size); 612 if (ret) { 613 dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n"); 614 goto err_ev_ctx; 615 } 616 617 /* Initialize command ring */ 618 ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, 619 (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); 620 if (ret) { 621 dev_err(dev, "Failed to start the command ring\n"); 622 goto err_cmd_ctx; 623 } 624 625 return ret; 626 627 err_cmd_ctx: 628 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, 629 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); 630 631 err_ev_ctx: 632 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, 633 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); 634 635 err_ch_ctx: 636 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, 637 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); 638 639 return ret; 640 } 641 642 static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) 643 { 644 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; 645 646 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; 647 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; 648 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; 649 650 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, 651 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); 652 653 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, 654 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); 655 656 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, 657 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); 658 } 659 660 static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) 661 { 662 /* 663 * Doorbell interrupts are enabled when the corresponding channel gets started. 664 * Enabling all interrupts here triggers spurious irqs as some of the interrupts 665 * associated with hw channels always get triggered. 666 */ 667 mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); 668 mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); 669 } 670 671 static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) 672 { 673 struct device *dev = &mhi_cntrl->mhi_dev->dev; 674 enum mhi_state state; 675 bool mhi_reset; 676 u32 count = 0; 677 int ret; 678 679 /* Wait for Host to set the M0 state */ 680 do { 681 msleep(M0_WAIT_DELAY_MS); 682 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); 683 if (mhi_reset) { 684 /* Clear the MHI reset if host is in reset state */ 685 mhi_ep_mmio_clear_reset(mhi_cntrl); 686 dev_info(dev, "Detected Host reset while waiting for M0\n"); 687 } 688 count++; 689 } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); 690 691 if (state != MHI_STATE_M0) { 692 dev_err(dev, "Host failed to enter M0\n"); 693 return -ETIMEDOUT; 694 } 695 696 ret = mhi_ep_cache_host_cfg(mhi_cntrl); 697 if (ret) { 698 dev_err(dev, "Failed to cache host config\n"); 699 return ret; 700 } 701 702 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 703 704 /* Enable all interrupts now */ 705 mhi_ep_enable_int(mhi_cntrl); 706 707 return 0; 708 } 709 710 static void mhi_ep_cmd_ring_worker(struct work_struct *work) 711 { 712 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); 713 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; 714 struct device *dev = &mhi_cntrl->mhi_dev->dev; 715 struct mhi_ring_element *el; 716 int ret; 717 718 /* Update the write offset for the ring */ 719 ret = mhi_ep_update_wr_offset(ring); 720 if (ret) { 721 dev_err(dev, "Error updating write offset for ring\n"); 722 return; 723 } 724 725 /* Sanity check to make sure there are elements in the ring */ 726 if (ring->rd_offset == ring->wr_offset) 727 return; 728 729 /* 730 * Process command ring element till write offset. In case of an error, just try to 731 * process next element. 732 */ 733 while (ring->rd_offset != ring->wr_offset) { 734 el = &ring->ring_cache[ring->rd_offset]; 735 736 ret = mhi_ep_process_cmd_ring(ring, el); 737 if (ret && ret != -ENODEV) 738 dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); 739 740 mhi_ep_ring_inc_index(ring); 741 } 742 } 743 744 static void mhi_ep_ch_ring_worker(struct work_struct *work) 745 { 746 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); 747 struct device *dev = &mhi_cntrl->mhi_dev->dev; 748 struct mhi_ep_ring_item *itr, *tmp; 749 struct mhi_ring_element *el; 750 struct mhi_ep_ring *ring; 751 struct mhi_ep_chan *chan; 752 unsigned long flags; 753 LIST_HEAD(head); 754 int ret; 755 756 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); 757 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); 758 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); 759 760 /* Process each queued channel ring. In case of an error, just process next element. */ 761 list_for_each_entry_safe(itr, tmp, &head, node) { 762 list_del(&itr->node); 763 ring = itr->ring; 764 765 chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 766 mutex_lock(&chan->lock); 767 768 /* 769 * The ring could've stopped while we waited to grab the (chan->lock), so do 770 * a sanity check before going further. 771 */ 772 if (!ring->started) { 773 mutex_unlock(&chan->lock); 774 kfree(itr); 775 continue; 776 } 777 778 /* Update the write offset for the ring */ 779 ret = mhi_ep_update_wr_offset(ring); 780 if (ret) { 781 dev_err(dev, "Error updating write offset for ring\n"); 782 mutex_unlock(&chan->lock); 783 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 784 continue; 785 } 786 787 /* Sanity check to make sure there are elements in the ring */ 788 if (ring->rd_offset == ring->wr_offset) { 789 mutex_unlock(&chan->lock); 790 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 791 continue; 792 } 793 794 el = &ring->ring_cache[ring->rd_offset]; 795 796 dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); 797 ret = mhi_ep_process_ch_ring(ring, el); 798 if (ret) { 799 dev_err(dev, "Error processing ring for channel (%u): %d\n", 800 ring->ch_id, ret); 801 mutex_unlock(&chan->lock); 802 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 803 continue; 804 } 805 806 mutex_unlock(&chan->lock); 807 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 808 } 809 } 810 811 static void mhi_ep_state_worker(struct work_struct *work) 812 { 813 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); 814 struct device *dev = &mhi_cntrl->mhi_dev->dev; 815 struct mhi_ep_state_transition *itr, *tmp; 816 unsigned long flags; 817 LIST_HEAD(head); 818 int ret; 819 820 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); 821 list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); 822 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); 823 824 list_for_each_entry_safe(itr, tmp, &head, node) { 825 list_del(&itr->node); 826 dev_dbg(dev, "Handling MHI state transition to %s\n", 827 mhi_state_str(itr->state)); 828 829 switch (itr->state) { 830 case MHI_STATE_M0: 831 ret = mhi_ep_set_m0_state(mhi_cntrl); 832 if (ret) 833 dev_err(dev, "Failed to transition to M0 state\n"); 834 break; 835 case MHI_STATE_M3: 836 ret = mhi_ep_set_m3_state(mhi_cntrl); 837 if (ret) 838 dev_err(dev, "Failed to transition to M3 state\n"); 839 break; 840 default: 841 dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); 842 break; 843 } 844 kfree(itr); 845 } 846 } 847 848 static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, 849 u32 ch_idx) 850 { 851 struct mhi_ep_ring_item *item; 852 struct mhi_ep_ring *ring; 853 bool work = !!ch_int; 854 LIST_HEAD(head); 855 u32 i; 856 857 /* First add the ring items to a local list */ 858 for_each_set_bit(i, &ch_int, 32) { 859 /* Channel index varies for each register: 0, 32, 64, 96 */ 860 u32 ch_id = ch_idx + i; 861 862 ring = &mhi_cntrl->mhi_chan[ch_id].ring; 863 item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC); 864 if (!item) 865 return; 866 867 item->ring = ring; 868 list_add_tail(&item->node, &head); 869 } 870 871 /* Now, splice the local list into ch_db_list and queue the work item */ 872 if (work) { 873 spin_lock(&mhi_cntrl->list_lock); 874 list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); 875 spin_unlock(&mhi_cntrl->list_lock); 876 877 queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); 878 } 879 } 880 881 /* 882 * Channel interrupt statuses are contained in 4 registers each of 32bit length. 883 * For checking all interrupts, we need to loop through each registers and then 884 * check for bits set. 885 */ 886 static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) 887 { 888 u32 ch_int, ch_idx, i; 889 890 /* Bail out if there is no channel doorbell interrupt */ 891 if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) 892 return; 893 894 for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { 895 ch_idx = i * MHI_MASK_CH_LEN; 896 897 /* Only process channel interrupt if the mask is enabled */ 898 ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; 899 if (ch_int) { 900 mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); 901 mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), 902 mhi_cntrl->chdb[i].status); 903 } 904 } 905 } 906 907 static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, 908 enum mhi_state state) 909 { 910 struct mhi_ep_state_transition *item; 911 912 item = kzalloc(sizeof(*item), GFP_ATOMIC); 913 if (!item) 914 return; 915 916 item->state = state; 917 spin_lock(&mhi_cntrl->list_lock); 918 list_add_tail(&item->node, &mhi_cntrl->st_transition_list); 919 spin_unlock(&mhi_cntrl->list_lock); 920 921 queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); 922 } 923 924 /* 925 * Interrupt handler that services interrupts raised by the host writing to 926 * MHICTRL and Command ring doorbell (CRDB) registers for state change and 927 * channel interrupts. 928 */ 929 static irqreturn_t mhi_ep_irq(int irq, void *data) 930 { 931 struct mhi_ep_cntrl *mhi_cntrl = data; 932 struct device *dev = &mhi_cntrl->mhi_dev->dev; 933 enum mhi_state state; 934 u32 int_value; 935 bool mhi_reset; 936 937 /* Acknowledge the ctrl interrupt */ 938 int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); 939 mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); 940 941 /* Check for ctrl interrupt */ 942 if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { 943 dev_dbg(dev, "Processing ctrl interrupt\n"); 944 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); 945 if (mhi_reset) { 946 dev_info(dev, "Host triggered MHI reset!\n"); 947 disable_irq_nosync(mhi_cntrl->irq); 948 schedule_work(&mhi_cntrl->reset_work); 949 return IRQ_HANDLED; 950 } 951 952 mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); 953 } 954 955 /* Check for command doorbell interrupt */ 956 if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { 957 dev_dbg(dev, "Processing command doorbell interrupt\n"); 958 queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); 959 } 960 961 /* Check for channel interrupts */ 962 mhi_ep_check_channel_interrupt(mhi_cntrl); 963 964 return IRQ_HANDLED; 965 } 966 967 static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) 968 { 969 struct mhi_ep_ring *ch_ring, *ev_ring; 970 struct mhi_result result = {}; 971 struct mhi_ep_chan *mhi_chan; 972 int i; 973 974 /* Stop all the channels */ 975 for (i = 0; i < mhi_cntrl->max_chan; i++) { 976 mhi_chan = &mhi_cntrl->mhi_chan[i]; 977 if (!mhi_chan->ring.started) 978 continue; 979 980 mutex_lock(&mhi_chan->lock); 981 /* Send channel disconnect status to client drivers */ 982 if (mhi_chan->xfer_cb) { 983 result.transaction_status = -ENOTCONN; 984 result.bytes_xferd = 0; 985 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 986 } 987 988 mhi_chan->state = MHI_CH_STATE_DISABLED; 989 mutex_unlock(&mhi_chan->lock); 990 } 991 992 flush_workqueue(mhi_cntrl->wq); 993 994 /* Destroy devices associated with all channels */ 995 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); 996 997 /* Stop and reset the transfer rings */ 998 for (i = 0; i < mhi_cntrl->max_chan; i++) { 999 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1000 if (!mhi_chan->ring.started) 1001 continue; 1002 1003 ch_ring = &mhi_cntrl->mhi_chan[i].ring; 1004 mutex_lock(&mhi_chan->lock); 1005 mhi_ep_ring_reset(mhi_cntrl, ch_ring); 1006 mutex_unlock(&mhi_chan->lock); 1007 } 1008 1009 /* Stop and reset the event rings */ 1010 for (i = 0; i < mhi_cntrl->event_rings; i++) { 1011 ev_ring = &mhi_cntrl->mhi_event[i].ring; 1012 if (!ev_ring->started) 1013 continue; 1014 1015 mutex_lock(&mhi_cntrl->event_lock); 1016 mhi_ep_ring_reset(mhi_cntrl, ev_ring); 1017 mutex_unlock(&mhi_cntrl->event_lock); 1018 } 1019 1020 /* Stop and reset the command ring */ 1021 mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); 1022 1023 mhi_ep_free_host_cfg(mhi_cntrl); 1024 mhi_ep_mmio_mask_interrupts(mhi_cntrl); 1025 1026 mhi_cntrl->enabled = false; 1027 } 1028 1029 static void mhi_ep_reset_worker(struct work_struct *work) 1030 { 1031 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); 1032 enum mhi_state cur_state; 1033 1034 mhi_ep_power_down(mhi_cntrl); 1035 1036 mutex_lock(&mhi_cntrl->state_lock); 1037 1038 /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ 1039 mhi_ep_mmio_reset(mhi_cntrl); 1040 cur_state = mhi_cntrl->mhi_state; 1041 1042 /* 1043 * Only proceed further if the reset is due to SYS_ERR. The host will 1044 * issue reset during shutdown also and we don't need to do re-init in 1045 * that case. 1046 */ 1047 if (cur_state == MHI_STATE_SYS_ERR) 1048 mhi_ep_power_up(mhi_cntrl); 1049 1050 mutex_unlock(&mhi_cntrl->state_lock); 1051 } 1052 1053 /* 1054 * We don't need to do anything special other than setting the MHI SYS_ERR 1055 * state. The host will reset all contexts and issue MHI RESET so that we 1056 * could also recover from error state. 1057 */ 1058 void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) 1059 { 1060 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1061 int ret; 1062 1063 ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); 1064 if (ret) 1065 return; 1066 1067 /* Signal host that the device went to SYS_ERR state */ 1068 ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); 1069 if (ret) 1070 dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret); 1071 } 1072 1073 int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) 1074 { 1075 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1076 int ret, i; 1077 1078 /* 1079 * Mask all interrupts until the state machine is ready. Interrupts will 1080 * be enabled later with mhi_ep_enable(). 1081 */ 1082 mhi_ep_mmio_mask_interrupts(mhi_cntrl); 1083 mhi_ep_mmio_init(mhi_cntrl); 1084 1085 mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), 1086 GFP_KERNEL); 1087 if (!mhi_cntrl->mhi_event) 1088 return -ENOMEM; 1089 1090 /* Initialize command, channel and event rings */ 1091 mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); 1092 for (i = 0; i < mhi_cntrl->max_chan; i++) 1093 mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); 1094 for (i = 0; i < mhi_cntrl->event_rings; i++) 1095 mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); 1096 1097 mhi_cntrl->mhi_state = MHI_STATE_RESET; 1098 1099 /* Set AMSS EE before signaling ready state */ 1100 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 1101 1102 /* All set, notify the host that we are ready */ 1103 ret = mhi_ep_set_ready_state(mhi_cntrl); 1104 if (ret) 1105 goto err_free_event; 1106 1107 dev_dbg(dev, "READY state notification sent to the host\n"); 1108 1109 ret = mhi_ep_enable(mhi_cntrl); 1110 if (ret) { 1111 dev_err(dev, "Failed to enable MHI endpoint\n"); 1112 goto err_free_event; 1113 } 1114 1115 enable_irq(mhi_cntrl->irq); 1116 mhi_cntrl->enabled = true; 1117 1118 return 0; 1119 1120 err_free_event: 1121 kfree(mhi_cntrl->mhi_event); 1122 1123 return ret; 1124 } 1125 EXPORT_SYMBOL_GPL(mhi_ep_power_up); 1126 1127 void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) 1128 { 1129 if (mhi_cntrl->enabled) { 1130 mhi_ep_abort_transfer(mhi_cntrl); 1131 kfree(mhi_cntrl->mhi_event); 1132 disable_irq(mhi_cntrl->irq); 1133 } 1134 } 1135 EXPORT_SYMBOL_GPL(mhi_ep_power_down); 1136 1137 void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) 1138 { 1139 struct mhi_ep_chan *mhi_chan; 1140 u32 tmp; 1141 int i; 1142 1143 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1144 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1145 1146 if (!mhi_chan->mhi_dev) 1147 continue; 1148 1149 mutex_lock(&mhi_chan->lock); 1150 /* Skip if the channel is not currently running */ 1151 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); 1152 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { 1153 mutex_unlock(&mhi_chan->lock); 1154 continue; 1155 } 1156 1157 dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); 1158 /* Set channel state to SUSPENDED */ 1159 mhi_chan->state = MHI_CH_STATE_SUSPENDED; 1160 tmp &= ~CHAN_CTX_CHSTATE_MASK; 1161 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); 1162 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); 1163 mutex_unlock(&mhi_chan->lock); 1164 } 1165 } 1166 1167 void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) 1168 { 1169 struct mhi_ep_chan *mhi_chan; 1170 u32 tmp; 1171 int i; 1172 1173 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1174 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1175 1176 if (!mhi_chan->mhi_dev) 1177 continue; 1178 1179 mutex_lock(&mhi_chan->lock); 1180 /* Skip if the channel is not currently suspended */ 1181 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); 1182 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { 1183 mutex_unlock(&mhi_chan->lock); 1184 continue; 1185 } 1186 1187 dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); 1188 /* Set channel state to RUNNING */ 1189 mhi_chan->state = MHI_CH_STATE_RUNNING; 1190 tmp &= ~CHAN_CTX_CHSTATE_MASK; 1191 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); 1192 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); 1193 mutex_unlock(&mhi_chan->lock); 1194 } 1195 } 1196 1197 static void mhi_ep_release_device(struct device *dev) 1198 { 1199 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1200 1201 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1202 mhi_dev->mhi_cntrl->mhi_dev = NULL; 1203 1204 /* 1205 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI 1206 * devices for the channels will only get created in mhi_ep_create_device() 1207 * if the mhi_dev associated with it is NULL. 1208 */ 1209 if (mhi_dev->ul_chan) 1210 mhi_dev->ul_chan->mhi_dev = NULL; 1211 1212 if (mhi_dev->dl_chan) 1213 mhi_dev->dl_chan->mhi_dev = NULL; 1214 1215 kfree(mhi_dev); 1216 } 1217 1218 static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, 1219 enum mhi_device_type dev_type) 1220 { 1221 struct mhi_ep_device *mhi_dev; 1222 struct device *dev; 1223 1224 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); 1225 if (!mhi_dev) 1226 return ERR_PTR(-ENOMEM); 1227 1228 dev = &mhi_dev->dev; 1229 device_initialize(dev); 1230 dev->bus = &mhi_ep_bus_type; 1231 dev->release = mhi_ep_release_device; 1232 1233 /* Controller device is always allocated first */ 1234 if (dev_type == MHI_DEVICE_CONTROLLER) 1235 /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ 1236 dev->parent = mhi_cntrl->cntrl_dev; 1237 else 1238 /* for MHI client devices, parent is the MHI controller device */ 1239 dev->parent = &mhi_cntrl->mhi_dev->dev; 1240 1241 mhi_dev->mhi_cntrl = mhi_cntrl; 1242 mhi_dev->dev_type = dev_type; 1243 1244 return mhi_dev; 1245 } 1246 1247 /* 1248 * MHI channels are always defined in pairs with UL as the even numbered 1249 * channel and DL as odd numbered one. This function gets UL channel (primary) 1250 * as the ch_id and always looks after the next entry in channel list for 1251 * the corresponding DL channel (secondary). 1252 */ 1253 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) 1254 { 1255 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; 1256 struct device *dev = mhi_cntrl->cntrl_dev; 1257 struct mhi_ep_device *mhi_dev; 1258 int ret; 1259 1260 /* Check if the channel name is same for both UL and DL */ 1261 if (strcmp(mhi_chan->name, mhi_chan[1].name)) { 1262 dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n", 1263 mhi_chan->name, mhi_chan[1].name); 1264 return -EINVAL; 1265 } 1266 1267 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER); 1268 if (IS_ERR(mhi_dev)) 1269 return PTR_ERR(mhi_dev); 1270 1271 /* Configure primary channel */ 1272 mhi_dev->ul_chan = mhi_chan; 1273 get_device(&mhi_dev->dev); 1274 mhi_chan->mhi_dev = mhi_dev; 1275 1276 /* Configure secondary channel as well */ 1277 mhi_chan++; 1278 mhi_dev->dl_chan = mhi_chan; 1279 get_device(&mhi_dev->dev); 1280 mhi_chan->mhi_dev = mhi_dev; 1281 1282 /* Channel name is same for both UL and DL */ 1283 mhi_dev->name = mhi_chan->name; 1284 ret = dev_set_name(&mhi_dev->dev, "%s_%s", 1285 dev_name(&mhi_cntrl->mhi_dev->dev), 1286 mhi_dev->name); 1287 if (ret) { 1288 put_device(&mhi_dev->dev); 1289 return ret; 1290 } 1291 1292 ret = device_add(&mhi_dev->dev); 1293 if (ret) 1294 put_device(&mhi_dev->dev); 1295 1296 return ret; 1297 } 1298 1299 static int mhi_ep_destroy_device(struct device *dev, void *data) 1300 { 1301 struct mhi_ep_device *mhi_dev; 1302 struct mhi_ep_cntrl *mhi_cntrl; 1303 struct mhi_ep_chan *ul_chan, *dl_chan; 1304 1305 if (dev->bus != &mhi_ep_bus_type) 1306 return 0; 1307 1308 mhi_dev = to_mhi_ep_device(dev); 1309 mhi_cntrl = mhi_dev->mhi_cntrl; 1310 1311 /* Only destroy devices created for channels */ 1312 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1313 return 0; 1314 1315 ul_chan = mhi_dev->ul_chan; 1316 dl_chan = mhi_dev->dl_chan; 1317 1318 if (ul_chan) 1319 put_device(&ul_chan->mhi_dev->dev); 1320 1321 if (dl_chan) 1322 put_device(&dl_chan->mhi_dev->dev); 1323 1324 dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", 1325 mhi_dev->name); 1326 1327 /* Notify the client and remove the device from MHI bus */ 1328 device_del(dev); 1329 put_device(dev); 1330 1331 return 0; 1332 } 1333 1334 static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, 1335 const struct mhi_ep_cntrl_config *config) 1336 { 1337 const struct mhi_ep_channel_config *ch_cfg; 1338 struct device *dev = mhi_cntrl->cntrl_dev; 1339 u32 chan, i; 1340 int ret = -EINVAL; 1341 1342 mhi_cntrl->max_chan = config->max_channels; 1343 1344 /* 1345 * Allocate max_channels supported by the MHI endpoint and populate 1346 * only the defined channels 1347 */ 1348 mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), 1349 GFP_KERNEL); 1350 if (!mhi_cntrl->mhi_chan) 1351 return -ENOMEM; 1352 1353 for (i = 0; i < config->num_channels; i++) { 1354 struct mhi_ep_chan *mhi_chan; 1355 1356 ch_cfg = &config->ch_cfg[i]; 1357 1358 chan = ch_cfg->num; 1359 if (chan >= mhi_cntrl->max_chan) { 1360 dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n", 1361 chan, mhi_cntrl->max_chan); 1362 goto error_chan_cfg; 1363 } 1364 1365 /* Bi-directional and direction less channels are not supported */ 1366 if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { 1367 dev_err(dev, "Invalid direction (%u) for channel (%u)\n", 1368 ch_cfg->dir, chan); 1369 goto error_chan_cfg; 1370 } 1371 1372 mhi_chan = &mhi_cntrl->mhi_chan[chan]; 1373 mhi_chan->name = ch_cfg->name; 1374 mhi_chan->chan = chan; 1375 mhi_chan->dir = ch_cfg->dir; 1376 mutex_init(&mhi_chan->lock); 1377 } 1378 1379 return 0; 1380 1381 error_chan_cfg: 1382 kfree(mhi_cntrl->mhi_chan); 1383 1384 return ret; 1385 } 1386 1387 /* 1388 * Allocate channel and command rings here. Event rings will be allocated 1389 * in mhi_ep_power_up() as the config comes from the host. 1390 */ 1391 int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, 1392 const struct mhi_ep_cntrl_config *config) 1393 { 1394 struct mhi_ep_device *mhi_dev; 1395 int ret; 1396 1397 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) 1398 return -EINVAL; 1399 1400 ret = mhi_ep_chan_init(mhi_cntrl, config); 1401 if (ret) 1402 return ret; 1403 1404 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); 1405 if (!mhi_cntrl->mhi_cmd) { 1406 ret = -ENOMEM; 1407 goto err_free_ch; 1408 } 1409 1410 mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", 1411 sizeof(struct mhi_ring_element), 0, 1412 SLAB_CACHE_DMA, NULL); 1413 if (!mhi_cntrl->ev_ring_el_cache) { 1414 ret = -ENOMEM; 1415 goto err_free_cmd; 1416 } 1417 1418 mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, 1419 SLAB_CACHE_DMA, NULL); 1420 if (!mhi_cntrl->tre_buf_cache) { 1421 ret = -ENOMEM; 1422 goto err_destroy_ev_ring_el_cache; 1423 } 1424 1425 mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item", 1426 sizeof(struct mhi_ep_ring_item), 0, 1427 0, NULL); 1428 if (!mhi_cntrl->ev_ring_el_cache) { 1429 ret = -ENOMEM; 1430 goto err_destroy_tre_buf_cache; 1431 } 1432 1433 INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); 1434 INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); 1435 INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); 1436 INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); 1437 1438 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); 1439 if (!mhi_cntrl->wq) { 1440 ret = -ENOMEM; 1441 goto err_destroy_ring_item_cache; 1442 } 1443 1444 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); 1445 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); 1446 spin_lock_init(&mhi_cntrl->list_lock); 1447 mutex_init(&mhi_cntrl->state_lock); 1448 mutex_init(&mhi_cntrl->event_lock); 1449 1450 /* Set MHI version and AMSS EE before enumeration */ 1451 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); 1452 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 1453 1454 /* Set controller index */ 1455 ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); 1456 if (ret < 0) 1457 goto err_destroy_wq; 1458 1459 mhi_cntrl->index = ret; 1460 1461 irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); 1462 ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, 1463 "doorbell_irq", mhi_cntrl); 1464 if (ret) { 1465 dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); 1466 goto err_ida_free; 1467 } 1468 1469 /* Allocate the controller device */ 1470 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); 1471 if (IS_ERR(mhi_dev)) { 1472 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); 1473 ret = PTR_ERR(mhi_dev); 1474 goto err_free_irq; 1475 } 1476 1477 ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); 1478 if (ret) 1479 goto err_put_dev; 1480 1481 mhi_dev->name = dev_name(&mhi_dev->dev); 1482 mhi_cntrl->mhi_dev = mhi_dev; 1483 1484 ret = device_add(&mhi_dev->dev); 1485 if (ret) 1486 goto err_put_dev; 1487 1488 dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); 1489 1490 return 0; 1491 1492 err_put_dev: 1493 put_device(&mhi_dev->dev); 1494 err_free_irq: 1495 free_irq(mhi_cntrl->irq, mhi_cntrl); 1496 err_ida_free: 1497 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); 1498 err_destroy_wq: 1499 destroy_workqueue(mhi_cntrl->wq); 1500 err_destroy_ring_item_cache: 1501 kmem_cache_destroy(mhi_cntrl->ring_item_cache); 1502 err_destroy_ev_ring_el_cache: 1503 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); 1504 err_destroy_tre_buf_cache: 1505 kmem_cache_destroy(mhi_cntrl->tre_buf_cache); 1506 err_free_cmd: 1507 kfree(mhi_cntrl->mhi_cmd); 1508 err_free_ch: 1509 kfree(mhi_cntrl->mhi_chan); 1510 1511 return ret; 1512 } 1513 EXPORT_SYMBOL_GPL(mhi_ep_register_controller); 1514 1515 /* 1516 * It is expected that the controller drivers will power down the MHI EP stack 1517 * using "mhi_ep_power_down()" before calling this function to unregister themselves. 1518 */ 1519 void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) 1520 { 1521 struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; 1522 1523 destroy_workqueue(mhi_cntrl->wq); 1524 1525 free_irq(mhi_cntrl->irq, mhi_cntrl); 1526 1527 kmem_cache_destroy(mhi_cntrl->tre_buf_cache); 1528 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); 1529 kmem_cache_destroy(mhi_cntrl->ring_item_cache); 1530 kfree(mhi_cntrl->mhi_cmd); 1531 kfree(mhi_cntrl->mhi_chan); 1532 1533 device_del(&mhi_dev->dev); 1534 put_device(&mhi_dev->dev); 1535 1536 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); 1537 } 1538 EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); 1539 1540 static int mhi_ep_driver_probe(struct device *dev) 1541 { 1542 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1543 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); 1544 struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; 1545 struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; 1546 1547 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; 1548 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; 1549 1550 return mhi_drv->probe(mhi_dev, mhi_dev->id); 1551 } 1552 1553 static int mhi_ep_driver_remove(struct device *dev) 1554 { 1555 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1556 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); 1557 struct mhi_result result = {}; 1558 struct mhi_ep_chan *mhi_chan; 1559 int dir; 1560 1561 /* Skip if it is a controller device */ 1562 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1563 return 0; 1564 1565 /* Disconnect the channels associated with the driver */ 1566 for (dir = 0; dir < 2; dir++) { 1567 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; 1568 1569 if (!mhi_chan) 1570 continue; 1571 1572 mutex_lock(&mhi_chan->lock); 1573 /* Send channel disconnect status to the client driver */ 1574 if (mhi_chan->xfer_cb) { 1575 result.transaction_status = -ENOTCONN; 1576 result.bytes_xferd = 0; 1577 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 1578 } 1579 1580 mhi_chan->state = MHI_CH_STATE_DISABLED; 1581 mhi_chan->xfer_cb = NULL; 1582 mutex_unlock(&mhi_chan->lock); 1583 } 1584 1585 /* Remove the client driver now */ 1586 mhi_drv->remove(mhi_dev); 1587 1588 return 0; 1589 } 1590 1591 int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) 1592 { 1593 struct device_driver *driver = &mhi_drv->driver; 1594 1595 if (!mhi_drv->probe || !mhi_drv->remove) 1596 return -EINVAL; 1597 1598 /* Client drivers should have callbacks defined for both channels */ 1599 if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) 1600 return -EINVAL; 1601 1602 driver->bus = &mhi_ep_bus_type; 1603 driver->owner = owner; 1604 driver->probe = mhi_ep_driver_probe; 1605 driver->remove = mhi_ep_driver_remove; 1606 1607 return driver_register(driver); 1608 } 1609 EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); 1610 1611 void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) 1612 { 1613 driver_unregister(&mhi_drv->driver); 1614 } 1615 EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); 1616 1617 static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env) 1618 { 1619 const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1620 1621 return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, 1622 mhi_dev->name); 1623 } 1624 1625 static int mhi_ep_match(struct device *dev, struct device_driver *drv) 1626 { 1627 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1628 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); 1629 const struct mhi_device_id *id; 1630 1631 /* 1632 * If the device is a controller type then there is no client driver 1633 * associated with it 1634 */ 1635 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1636 return 0; 1637 1638 for (id = mhi_drv->id_table; id->chan[0]; id++) 1639 if (!strcmp(mhi_dev->name, id->chan)) { 1640 mhi_dev->id = id; 1641 return 1; 1642 } 1643 1644 return 0; 1645 }; 1646 1647 struct bus_type mhi_ep_bus_type = { 1648 .name = "mhi_ep", 1649 .dev_name = "mhi_ep", 1650 .match = mhi_ep_match, 1651 .uevent = mhi_ep_uevent, 1652 }; 1653 1654 static int __init mhi_ep_init(void) 1655 { 1656 return bus_register(&mhi_ep_bus_type); 1657 } 1658 1659 static void __exit mhi_ep_exit(void) 1660 { 1661 bus_unregister(&mhi_ep_bus_type); 1662 } 1663 1664 postcore_initcall(mhi_ep_init); 1665 module_exit(mhi_ep_exit); 1666 1667 MODULE_LICENSE("GPL v2"); 1668 MODULE_DESCRIPTION("MHI Bus Endpoint stack"); 1669 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); 1670