1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MHI Endpoint bus stack 4 * 5 * Copyright (C) 2022 Linaro Ltd. 6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/delay.h> 11 #include <linux/dma-direction.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/irq.h> 15 #include <linux/mhi_ep.h> 16 #include <linux/mod_devicetable.h> 17 #include <linux/module.h> 18 #include "internal.h" 19 20 #define M0_WAIT_DELAY_MS 100 21 #define M0_WAIT_COUNT 100 22 23 static DEFINE_IDA(mhi_ep_cntrl_ida); 24 25 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); 26 static int mhi_ep_destroy_device(struct device *dev, void *data); 27 28 static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, 29 struct mhi_ring_element *el, bool bei) 30 { 31 struct device *dev = &mhi_cntrl->mhi_dev->dev; 32 union mhi_ep_ring_ctx *ctx; 33 struct mhi_ep_ring *ring; 34 int ret; 35 36 mutex_lock(&mhi_cntrl->event_lock); 37 ring = &mhi_cntrl->mhi_event[ring_idx].ring; 38 ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; 39 if (!ring->started) { 40 ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); 41 if (ret) { 42 dev_err(dev, "Error starting event ring (%u)\n", ring_idx); 43 goto err_unlock; 44 } 45 } 46 47 /* Add element to the event ring */ 48 ret = mhi_ep_ring_add_element(ring, el); 49 if (ret) { 50 dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx); 51 goto err_unlock; 52 } 53 54 mutex_unlock(&mhi_cntrl->event_lock); 55 56 /* 57 * Raise IRQ to host only if the BEI flag is not set in TRE. Host might 58 * set this flag for interrupt moderation as per MHI protocol. 59 */ 60 if (!bei) 61 mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); 62 63 return 0; 64 65 err_unlock: 66 mutex_unlock(&mhi_cntrl->event_lock); 67 68 return ret; 69 } 70 71 static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, 72 struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) 73 { 74 struct mhi_ring_element event = {}; 75 76 event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); 77 event.dword[0] = MHI_TRE_EV_DWORD0(code, len); 78 event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); 79 80 return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre)); 81 } 82 83 int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) 84 { 85 struct mhi_ring_element event = {}; 86 87 event.dword[0] = MHI_SC_EV_DWORD0(state); 88 event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); 89 90 return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); 91 } 92 93 int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) 94 { 95 struct mhi_ring_element event = {}; 96 97 event.dword[0] = MHI_EE_EV_DWORD0(exec_env); 98 event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); 99 100 return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); 101 } 102 103 static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) 104 { 105 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; 106 struct mhi_ring_element event = {}; 107 108 event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); 109 event.dword[0] = MHI_CC_EV_DWORD0(code); 110 event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); 111 112 return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); 113 } 114 115 static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) 116 { 117 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; 118 struct device *dev = &mhi_cntrl->mhi_dev->dev; 119 struct mhi_result result = {}; 120 struct mhi_ep_chan *mhi_chan; 121 struct mhi_ep_ring *ch_ring; 122 u32 tmp, ch_id; 123 int ret; 124 125 ch_id = MHI_TRE_GET_CMD_CHID(el); 126 127 /* Check if the channel is supported by the controller */ 128 if ((ch_id > mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) { 129 dev_err(dev, "Channel (%u) not supported!\n", ch_id); 130 return -ENODEV; 131 } 132 133 mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; 134 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; 135 136 switch (MHI_TRE_GET_CMD_TYPE(el)) { 137 case MHI_PKT_TYPE_START_CHAN_CMD: 138 dev_dbg(dev, "Received START command for channel (%u)\n", ch_id); 139 140 mutex_lock(&mhi_chan->lock); 141 /* Initialize and configure the corresponding channel ring */ 142 if (!ch_ring->started) { 143 ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, 144 (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); 145 if (ret) { 146 dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id); 147 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, 148 MHI_EV_CC_UNDEFINED_ERR); 149 if (ret) 150 dev_err(dev, "Error sending completion event: %d\n", ret); 151 152 goto err_unlock; 153 } 154 } 155 156 /* Set channel state to RUNNING */ 157 mhi_chan->state = MHI_CH_STATE_RUNNING; 158 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 159 tmp &= ~CHAN_CTX_CHSTATE_MASK; 160 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); 161 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 162 163 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 164 if (ret) { 165 dev_err(dev, "Error sending command completion event (%u)\n", 166 MHI_EV_CC_SUCCESS); 167 goto err_unlock; 168 } 169 170 mutex_unlock(&mhi_chan->lock); 171 172 /* 173 * Create MHI device only during UL channel start. Since the MHI 174 * channels operate in a pair, we'll associate both UL and DL 175 * channels to the same device. 176 * 177 * We also need to check for mhi_dev != NULL because, the host 178 * will issue START_CHAN command during resume and we don't 179 * destroy the device during suspend. 180 */ 181 if (!(ch_id % 2) && !mhi_chan->mhi_dev) { 182 ret = mhi_ep_create_device(mhi_cntrl, ch_id); 183 if (ret) { 184 dev_err(dev, "Error creating device for channel (%u)\n", ch_id); 185 mhi_ep_handle_syserr(mhi_cntrl); 186 return ret; 187 } 188 } 189 190 /* Finally, enable DB for the channel */ 191 mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); 192 193 break; 194 case MHI_PKT_TYPE_STOP_CHAN_CMD: 195 dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); 196 if (!ch_ring->started) { 197 dev_err(dev, "Channel (%u) not opened\n", ch_id); 198 return -ENODEV; 199 } 200 201 mutex_lock(&mhi_chan->lock); 202 /* Disable DB for the channel */ 203 mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); 204 205 /* Send channel disconnect status to client drivers */ 206 if (mhi_chan->xfer_cb) { 207 result.transaction_status = -ENOTCONN; 208 result.bytes_xferd = 0; 209 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 210 } 211 212 /* Set channel state to STOP */ 213 mhi_chan->state = MHI_CH_STATE_STOP; 214 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 215 tmp &= ~CHAN_CTX_CHSTATE_MASK; 216 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); 217 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 218 219 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 220 if (ret) { 221 dev_err(dev, "Error sending command completion event (%u)\n", 222 MHI_EV_CC_SUCCESS); 223 goto err_unlock; 224 } 225 226 mutex_unlock(&mhi_chan->lock); 227 break; 228 case MHI_PKT_TYPE_RESET_CHAN_CMD: 229 dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); 230 if (!ch_ring->started) { 231 dev_err(dev, "Channel (%u) not opened\n", ch_id); 232 return -ENODEV; 233 } 234 235 mutex_lock(&mhi_chan->lock); 236 /* Stop and reset the transfer ring */ 237 mhi_ep_ring_reset(mhi_cntrl, ch_ring); 238 239 /* Send channel disconnect status to client driver */ 240 if (mhi_chan->xfer_cb) { 241 result.transaction_status = -ENOTCONN; 242 result.bytes_xferd = 0; 243 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 244 } 245 246 /* Set channel state to DISABLED */ 247 mhi_chan->state = MHI_CH_STATE_DISABLED; 248 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 249 tmp &= ~CHAN_CTX_CHSTATE_MASK; 250 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); 251 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 252 253 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 254 if (ret) { 255 dev_err(dev, "Error sending command completion event (%u)\n", 256 MHI_EV_CC_SUCCESS); 257 goto err_unlock; 258 } 259 260 mutex_unlock(&mhi_chan->lock); 261 break; 262 default: 263 dev_err(dev, "Invalid command received: %lu for channel (%u)\n", 264 MHI_TRE_GET_CMD_TYPE(el), ch_id); 265 return -EINVAL; 266 } 267 268 return 0; 269 270 err_unlock: 271 mutex_unlock(&mhi_chan->lock); 272 273 return ret; 274 } 275 276 bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) 277 { 278 struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : 279 mhi_dev->ul_chan; 280 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; 281 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; 282 283 return !!(ring->rd_offset == ring->wr_offset); 284 } 285 EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); 286 287 static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, 288 struct mhi_ep_ring *ring, 289 struct mhi_result *result, 290 u32 len) 291 { 292 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 293 struct device *dev = &mhi_cntrl->mhi_dev->dev; 294 size_t tr_len, read_offset, write_offset; 295 struct mhi_ring_element *el; 296 bool tr_done = false; 297 void *write_addr; 298 u64 read_addr; 299 u32 buf_left; 300 int ret; 301 302 buf_left = len; 303 304 do { 305 /* Don't process the transfer ring if the channel is not in RUNNING state */ 306 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { 307 dev_err(dev, "Channel not available\n"); 308 return -ENODEV; 309 } 310 311 el = &ring->ring_cache[ring->rd_offset]; 312 313 /* Check if there is data pending to be read from previous read operation */ 314 if (mhi_chan->tre_bytes_left) { 315 dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); 316 tr_len = min(buf_left, mhi_chan->tre_bytes_left); 317 } else { 318 mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); 319 mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); 320 mhi_chan->tre_bytes_left = mhi_chan->tre_size; 321 322 tr_len = min(buf_left, mhi_chan->tre_size); 323 } 324 325 read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; 326 write_offset = len - buf_left; 327 read_addr = mhi_chan->tre_loc + read_offset; 328 write_addr = result->buf_addr + write_offset; 329 330 dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); 331 ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); 332 if (ret < 0) { 333 dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); 334 return ret; 335 } 336 337 buf_left -= tr_len; 338 mhi_chan->tre_bytes_left -= tr_len; 339 340 /* 341 * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been 342 * read completely: 343 * 344 * 1. Send completion event to the host based on the flags set in TRE. 345 * 2. Increment the local read offset of the transfer ring. 346 */ 347 if (!mhi_chan->tre_bytes_left) { 348 /* 349 * The host will split the data packet into multiple TREs if it can't fit 350 * the packet in a single TRE. In that case, CHAIN flag will be set by the 351 * host for all TREs except the last one. 352 */ 353 if (MHI_TRE_DATA_GET_CHAIN(el)) { 354 /* 355 * IEOB (Interrupt on End of Block) flag will be set by the host if 356 * it expects the completion event for all TREs of a TD. 357 */ 358 if (MHI_TRE_DATA_GET_IEOB(el)) { 359 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, 360 MHI_TRE_DATA_GET_LEN(el), 361 MHI_EV_CC_EOB); 362 if (ret < 0) { 363 dev_err(&mhi_chan->mhi_dev->dev, 364 "Error sending transfer compl. event\n"); 365 return ret; 366 } 367 } 368 } else { 369 /* 370 * IEOT (Interrupt on End of Transfer) flag will be set by the host 371 * for the last TRE of the TD and expects the completion event for 372 * the same. 373 */ 374 if (MHI_TRE_DATA_GET_IEOT(el)) { 375 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, 376 MHI_TRE_DATA_GET_LEN(el), 377 MHI_EV_CC_EOT); 378 if (ret < 0) { 379 dev_err(&mhi_chan->mhi_dev->dev, 380 "Error sending transfer compl. event\n"); 381 return ret; 382 } 383 } 384 385 tr_done = true; 386 } 387 388 mhi_ep_ring_inc_index(ring); 389 } 390 391 result->bytes_xferd += tr_len; 392 } while (buf_left && !tr_done); 393 394 return 0; 395 } 396 397 static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) 398 { 399 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; 400 struct mhi_result result = {}; 401 u32 len = MHI_EP_DEFAULT_MTU; 402 struct mhi_ep_chan *mhi_chan; 403 int ret; 404 405 mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 406 407 /* 408 * Bail out if transfer callback is not registered for the channel. 409 * This is most likely due to the client driver not loaded at this point. 410 */ 411 if (!mhi_chan->xfer_cb) { 412 dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); 413 return -ENODEV; 414 } 415 416 if (ring->ch_id % 2) { 417 /* DL channel */ 418 result.dir = mhi_chan->dir; 419 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 420 } else { 421 /* UL channel */ 422 result.buf_addr = kzalloc(len, GFP_KERNEL); 423 if (!result.buf_addr) 424 return -ENOMEM; 425 426 do { 427 ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); 428 if (ret < 0) { 429 dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); 430 kfree(result.buf_addr); 431 return ret; 432 } 433 434 result.dir = mhi_chan->dir; 435 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 436 result.bytes_xferd = 0; 437 memset(result.buf_addr, 0, len); 438 439 /* Read until the ring becomes empty */ 440 } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); 441 442 kfree(result.buf_addr); 443 } 444 445 return 0; 446 } 447 448 /* TODO: Handle partially formed TDs */ 449 int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) 450 { 451 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; 452 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; 453 struct device *dev = &mhi_chan->mhi_dev->dev; 454 struct mhi_ring_element *el; 455 u32 buf_left, read_offset; 456 struct mhi_ep_ring *ring; 457 enum mhi_ev_ccs code; 458 void *read_addr; 459 u64 write_addr; 460 size_t tr_len; 461 u32 tre_len; 462 int ret; 463 464 buf_left = skb->len; 465 ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; 466 467 mutex_lock(&mhi_chan->lock); 468 469 do { 470 /* Don't process the transfer ring if the channel is not in RUNNING state */ 471 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { 472 dev_err(dev, "Channel not available\n"); 473 ret = -ENODEV; 474 goto err_exit; 475 } 476 477 if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { 478 dev_err(dev, "TRE not available!\n"); 479 ret = -ENOSPC; 480 goto err_exit; 481 } 482 483 el = &ring->ring_cache[ring->rd_offset]; 484 tre_len = MHI_TRE_DATA_GET_LEN(el); 485 486 tr_len = min(buf_left, tre_len); 487 read_offset = skb->len - buf_left; 488 read_addr = skb->data + read_offset; 489 write_addr = MHI_TRE_DATA_GET_PTR(el); 490 491 dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); 492 ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); 493 if (ret < 0) { 494 dev_err(dev, "Error writing to the channel\n"); 495 goto err_exit; 496 } 497 498 buf_left -= tr_len; 499 /* 500 * For all TREs queued by the host for DL channel, only the EOT flag will be set. 501 * If the packet doesn't fit into a single TRE, send the OVERFLOW event to 502 * the host so that the host can adjust the packet boundary to next TREs. Else send 503 * the EOT event to the host indicating the packet boundary. 504 */ 505 if (buf_left) 506 code = MHI_EV_CC_OVERFLOW; 507 else 508 code = MHI_EV_CC_EOT; 509 510 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); 511 if (ret) { 512 dev_err(dev, "Error sending transfer completion event\n"); 513 goto err_exit; 514 } 515 516 mhi_ep_ring_inc_index(ring); 517 } while (buf_left); 518 519 mutex_unlock(&mhi_chan->lock); 520 521 return 0; 522 523 err_exit: 524 mutex_unlock(&mhi_chan->lock); 525 526 return ret; 527 } 528 EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); 529 530 static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) 531 { 532 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; 533 struct device *dev = &mhi_cntrl->mhi_dev->dev; 534 int ret; 535 536 /* Update the number of event rings (NER) programmed by the host */ 537 mhi_ep_mmio_update_ner(mhi_cntrl); 538 539 dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n", 540 mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); 541 542 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; 543 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; 544 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; 545 546 /* Get the channel context base pointer from host */ 547 mhi_ep_mmio_get_chc_base(mhi_cntrl); 548 549 /* Allocate and map memory for caching host channel context */ 550 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, 551 &mhi_cntrl->ch_ctx_cache_phys, 552 (void __iomem **) &mhi_cntrl->ch_ctx_cache, 553 ch_ctx_host_size); 554 if (ret) { 555 dev_err(dev, "Failed to allocate and map ch_ctx_cache\n"); 556 return ret; 557 } 558 559 /* Get the event context base pointer from host */ 560 mhi_ep_mmio_get_erc_base(mhi_cntrl); 561 562 /* Allocate and map memory for caching host event context */ 563 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, 564 &mhi_cntrl->ev_ctx_cache_phys, 565 (void __iomem **) &mhi_cntrl->ev_ctx_cache, 566 ev_ctx_host_size); 567 if (ret) { 568 dev_err(dev, "Failed to allocate and map ev_ctx_cache\n"); 569 goto err_ch_ctx; 570 } 571 572 /* Get the command context base pointer from host */ 573 mhi_ep_mmio_get_crc_base(mhi_cntrl); 574 575 /* Allocate and map memory for caching host command context */ 576 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, 577 &mhi_cntrl->cmd_ctx_cache_phys, 578 (void __iomem **) &mhi_cntrl->cmd_ctx_cache, 579 cmd_ctx_host_size); 580 if (ret) { 581 dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n"); 582 goto err_ev_ctx; 583 } 584 585 /* Initialize command ring */ 586 ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, 587 (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); 588 if (ret) { 589 dev_err(dev, "Failed to start the command ring\n"); 590 goto err_cmd_ctx; 591 } 592 593 return ret; 594 595 err_cmd_ctx: 596 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, 597 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); 598 599 err_ev_ctx: 600 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, 601 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); 602 603 err_ch_ctx: 604 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, 605 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); 606 607 return ret; 608 } 609 610 static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) 611 { 612 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; 613 614 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; 615 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; 616 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; 617 618 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, 619 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); 620 621 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, 622 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); 623 624 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, 625 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); 626 } 627 628 static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) 629 { 630 /* 631 * Doorbell interrupts are enabled when the corresponding channel gets started. 632 * Enabling all interrupts here triggers spurious irqs as some of the interrupts 633 * associated with hw channels always get triggered. 634 */ 635 mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); 636 mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); 637 } 638 639 static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) 640 { 641 struct device *dev = &mhi_cntrl->mhi_dev->dev; 642 enum mhi_state state; 643 bool mhi_reset; 644 u32 count = 0; 645 int ret; 646 647 /* Wait for Host to set the M0 state */ 648 do { 649 msleep(M0_WAIT_DELAY_MS); 650 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); 651 if (mhi_reset) { 652 /* Clear the MHI reset if host is in reset state */ 653 mhi_ep_mmio_clear_reset(mhi_cntrl); 654 dev_info(dev, "Detected Host reset while waiting for M0\n"); 655 } 656 count++; 657 } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); 658 659 if (state != MHI_STATE_M0) { 660 dev_err(dev, "Host failed to enter M0\n"); 661 return -ETIMEDOUT; 662 } 663 664 ret = mhi_ep_cache_host_cfg(mhi_cntrl); 665 if (ret) { 666 dev_err(dev, "Failed to cache host config\n"); 667 return ret; 668 } 669 670 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 671 672 /* Enable all interrupts now */ 673 mhi_ep_enable_int(mhi_cntrl); 674 675 return 0; 676 } 677 678 static void mhi_ep_cmd_ring_worker(struct work_struct *work) 679 { 680 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); 681 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; 682 struct device *dev = &mhi_cntrl->mhi_dev->dev; 683 struct mhi_ring_element *el; 684 int ret; 685 686 /* Update the write offset for the ring */ 687 ret = mhi_ep_update_wr_offset(ring); 688 if (ret) { 689 dev_err(dev, "Error updating write offset for ring\n"); 690 return; 691 } 692 693 /* Sanity check to make sure there are elements in the ring */ 694 if (ring->rd_offset == ring->wr_offset) 695 return; 696 697 /* 698 * Process command ring element till write offset. In case of an error, just try to 699 * process next element. 700 */ 701 while (ring->rd_offset != ring->wr_offset) { 702 el = &ring->ring_cache[ring->rd_offset]; 703 704 ret = mhi_ep_process_cmd_ring(ring, el); 705 if (ret) 706 dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); 707 708 mhi_ep_ring_inc_index(ring); 709 } 710 } 711 712 static void mhi_ep_ch_ring_worker(struct work_struct *work) 713 { 714 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); 715 struct device *dev = &mhi_cntrl->mhi_dev->dev; 716 struct mhi_ep_ring_item *itr, *tmp; 717 struct mhi_ring_element *el; 718 struct mhi_ep_ring *ring; 719 struct mhi_ep_chan *chan; 720 unsigned long flags; 721 LIST_HEAD(head); 722 int ret; 723 724 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); 725 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); 726 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); 727 728 /* Process each queued channel ring. In case of an error, just process next element. */ 729 list_for_each_entry_safe(itr, tmp, &head, node) { 730 list_del(&itr->node); 731 ring = itr->ring; 732 733 /* Update the write offset for the ring */ 734 ret = mhi_ep_update_wr_offset(ring); 735 if (ret) { 736 dev_err(dev, "Error updating write offset for ring\n"); 737 kfree(itr); 738 continue; 739 } 740 741 /* Sanity check to make sure there are elements in the ring */ 742 if (ring->rd_offset == ring->wr_offset) { 743 kfree(itr); 744 continue; 745 } 746 747 el = &ring->ring_cache[ring->rd_offset]; 748 chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 749 750 mutex_lock(&chan->lock); 751 dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); 752 ret = mhi_ep_process_ch_ring(ring, el); 753 if (ret) { 754 dev_err(dev, "Error processing ring for channel (%u): %d\n", 755 ring->ch_id, ret); 756 mutex_unlock(&chan->lock); 757 kfree(itr); 758 continue; 759 } 760 761 mutex_unlock(&chan->lock); 762 kfree(itr); 763 } 764 } 765 766 static void mhi_ep_state_worker(struct work_struct *work) 767 { 768 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); 769 struct device *dev = &mhi_cntrl->mhi_dev->dev; 770 struct mhi_ep_state_transition *itr, *tmp; 771 unsigned long flags; 772 LIST_HEAD(head); 773 int ret; 774 775 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); 776 list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); 777 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); 778 779 list_for_each_entry_safe(itr, tmp, &head, node) { 780 list_del(&itr->node); 781 dev_dbg(dev, "Handling MHI state transition to %s\n", 782 mhi_state_str(itr->state)); 783 784 switch (itr->state) { 785 case MHI_STATE_M0: 786 ret = mhi_ep_set_m0_state(mhi_cntrl); 787 if (ret) 788 dev_err(dev, "Failed to transition to M0 state\n"); 789 break; 790 case MHI_STATE_M3: 791 ret = mhi_ep_set_m3_state(mhi_cntrl); 792 if (ret) 793 dev_err(dev, "Failed to transition to M3 state\n"); 794 break; 795 default: 796 dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); 797 break; 798 } 799 kfree(itr); 800 } 801 } 802 803 static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, 804 u32 ch_idx) 805 { 806 struct mhi_ep_ring_item *item; 807 struct mhi_ep_ring *ring; 808 bool work = !!ch_int; 809 LIST_HEAD(head); 810 u32 i; 811 812 /* First add the ring items to a local list */ 813 for_each_set_bit(i, &ch_int, 32) { 814 /* Channel index varies for each register: 0, 32, 64, 96 */ 815 u32 ch_id = ch_idx + i; 816 817 ring = &mhi_cntrl->mhi_chan[ch_id].ring; 818 item = kzalloc(sizeof(*item), GFP_ATOMIC); 819 if (!item) 820 return; 821 822 item->ring = ring; 823 list_add_tail(&item->node, &head); 824 } 825 826 /* Now, splice the local list into ch_db_list and queue the work item */ 827 if (work) { 828 spin_lock(&mhi_cntrl->list_lock); 829 list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); 830 spin_unlock(&mhi_cntrl->list_lock); 831 832 queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); 833 } 834 } 835 836 /* 837 * Channel interrupt statuses are contained in 4 registers each of 32bit length. 838 * For checking all interrupts, we need to loop through each registers and then 839 * check for bits set. 840 */ 841 static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) 842 { 843 u32 ch_int, ch_idx, i; 844 845 /* Bail out if there is no channel doorbell interrupt */ 846 if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) 847 return; 848 849 for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { 850 ch_idx = i * MHI_MASK_CH_LEN; 851 852 /* Only process channel interrupt if the mask is enabled */ 853 ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; 854 if (ch_int) { 855 mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); 856 mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), 857 mhi_cntrl->chdb[i].status); 858 } 859 } 860 } 861 862 static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, 863 enum mhi_state state) 864 { 865 struct mhi_ep_state_transition *item; 866 867 item = kzalloc(sizeof(*item), GFP_ATOMIC); 868 if (!item) 869 return; 870 871 item->state = state; 872 spin_lock(&mhi_cntrl->list_lock); 873 list_add_tail(&item->node, &mhi_cntrl->st_transition_list); 874 spin_unlock(&mhi_cntrl->list_lock); 875 876 queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); 877 } 878 879 /* 880 * Interrupt handler that services interrupts raised by the host writing to 881 * MHICTRL and Command ring doorbell (CRDB) registers for state change and 882 * channel interrupts. 883 */ 884 static irqreturn_t mhi_ep_irq(int irq, void *data) 885 { 886 struct mhi_ep_cntrl *mhi_cntrl = data; 887 struct device *dev = &mhi_cntrl->mhi_dev->dev; 888 enum mhi_state state; 889 u32 int_value; 890 bool mhi_reset; 891 892 /* Acknowledge the ctrl interrupt */ 893 int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); 894 mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); 895 896 /* Check for ctrl interrupt */ 897 if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { 898 dev_dbg(dev, "Processing ctrl interrupt\n"); 899 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); 900 if (mhi_reset) { 901 dev_info(dev, "Host triggered MHI reset!\n"); 902 disable_irq_nosync(mhi_cntrl->irq); 903 schedule_work(&mhi_cntrl->reset_work); 904 return IRQ_HANDLED; 905 } 906 907 mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); 908 } 909 910 /* Check for command doorbell interrupt */ 911 if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { 912 dev_dbg(dev, "Processing command doorbell interrupt\n"); 913 queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); 914 } 915 916 /* Check for channel interrupts */ 917 mhi_ep_check_channel_interrupt(mhi_cntrl); 918 919 return IRQ_HANDLED; 920 } 921 922 static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) 923 { 924 struct mhi_ep_ring *ch_ring, *ev_ring; 925 struct mhi_result result = {}; 926 struct mhi_ep_chan *mhi_chan; 927 int i; 928 929 /* Stop all the channels */ 930 for (i = 0; i < mhi_cntrl->max_chan; i++) { 931 mhi_chan = &mhi_cntrl->mhi_chan[i]; 932 if (!mhi_chan->ring.started) 933 continue; 934 935 mutex_lock(&mhi_chan->lock); 936 /* Send channel disconnect status to client drivers */ 937 if (mhi_chan->xfer_cb) { 938 result.transaction_status = -ENOTCONN; 939 result.bytes_xferd = 0; 940 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 941 } 942 943 mhi_chan->state = MHI_CH_STATE_DISABLED; 944 mutex_unlock(&mhi_chan->lock); 945 } 946 947 flush_workqueue(mhi_cntrl->wq); 948 949 /* Destroy devices associated with all channels */ 950 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); 951 952 /* Stop and reset the transfer rings */ 953 for (i = 0; i < mhi_cntrl->max_chan; i++) { 954 mhi_chan = &mhi_cntrl->mhi_chan[i]; 955 if (!mhi_chan->ring.started) 956 continue; 957 958 ch_ring = &mhi_cntrl->mhi_chan[i].ring; 959 mutex_lock(&mhi_chan->lock); 960 mhi_ep_ring_reset(mhi_cntrl, ch_ring); 961 mutex_unlock(&mhi_chan->lock); 962 } 963 964 /* Stop and reset the event rings */ 965 for (i = 0; i < mhi_cntrl->event_rings; i++) { 966 ev_ring = &mhi_cntrl->mhi_event[i].ring; 967 if (!ev_ring->started) 968 continue; 969 970 mutex_lock(&mhi_cntrl->event_lock); 971 mhi_ep_ring_reset(mhi_cntrl, ev_ring); 972 mutex_unlock(&mhi_cntrl->event_lock); 973 } 974 975 /* Stop and reset the command ring */ 976 mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); 977 978 mhi_ep_free_host_cfg(mhi_cntrl); 979 mhi_ep_mmio_mask_interrupts(mhi_cntrl); 980 981 mhi_cntrl->enabled = false; 982 } 983 984 static void mhi_ep_reset_worker(struct work_struct *work) 985 { 986 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); 987 enum mhi_state cur_state; 988 989 mhi_ep_power_down(mhi_cntrl); 990 991 spin_lock_bh(&mhi_cntrl->state_lock); 992 /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ 993 mhi_ep_mmio_reset(mhi_cntrl); 994 cur_state = mhi_cntrl->mhi_state; 995 spin_unlock_bh(&mhi_cntrl->state_lock); 996 997 /* 998 * Only proceed further if the reset is due to SYS_ERR. The host will 999 * issue reset during shutdown also and we don't need to do re-init in 1000 * that case. 1001 */ 1002 if (cur_state == MHI_STATE_SYS_ERR) 1003 mhi_ep_power_up(mhi_cntrl); 1004 } 1005 1006 /* 1007 * We don't need to do anything special other than setting the MHI SYS_ERR 1008 * state. The host will reset all contexts and issue MHI RESET so that we 1009 * could also recover from error state. 1010 */ 1011 void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) 1012 { 1013 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1014 int ret; 1015 1016 ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); 1017 if (ret) 1018 return; 1019 1020 /* Signal host that the device went to SYS_ERR state */ 1021 ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); 1022 if (ret) 1023 dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret); 1024 } 1025 1026 int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) 1027 { 1028 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1029 int ret, i; 1030 1031 /* 1032 * Mask all interrupts until the state machine is ready. Interrupts will 1033 * be enabled later with mhi_ep_enable(). 1034 */ 1035 mhi_ep_mmio_mask_interrupts(mhi_cntrl); 1036 mhi_ep_mmio_init(mhi_cntrl); 1037 1038 mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), 1039 GFP_KERNEL); 1040 if (!mhi_cntrl->mhi_event) 1041 return -ENOMEM; 1042 1043 /* Initialize command, channel and event rings */ 1044 mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); 1045 for (i = 0; i < mhi_cntrl->max_chan; i++) 1046 mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); 1047 for (i = 0; i < mhi_cntrl->event_rings; i++) 1048 mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); 1049 1050 mhi_cntrl->mhi_state = MHI_STATE_RESET; 1051 1052 /* Set AMSS EE before signaling ready state */ 1053 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 1054 1055 /* All set, notify the host that we are ready */ 1056 ret = mhi_ep_set_ready_state(mhi_cntrl); 1057 if (ret) 1058 goto err_free_event; 1059 1060 dev_dbg(dev, "READY state notification sent to the host\n"); 1061 1062 ret = mhi_ep_enable(mhi_cntrl); 1063 if (ret) { 1064 dev_err(dev, "Failed to enable MHI endpoint\n"); 1065 goto err_free_event; 1066 } 1067 1068 enable_irq(mhi_cntrl->irq); 1069 mhi_cntrl->enabled = true; 1070 1071 return 0; 1072 1073 err_free_event: 1074 kfree(mhi_cntrl->mhi_event); 1075 1076 return ret; 1077 } 1078 EXPORT_SYMBOL_GPL(mhi_ep_power_up); 1079 1080 void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) 1081 { 1082 if (mhi_cntrl->enabled) { 1083 mhi_ep_abort_transfer(mhi_cntrl); 1084 kfree(mhi_cntrl->mhi_event); 1085 disable_irq(mhi_cntrl->irq); 1086 } 1087 } 1088 EXPORT_SYMBOL_GPL(mhi_ep_power_down); 1089 1090 void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) 1091 { 1092 struct mhi_ep_chan *mhi_chan; 1093 u32 tmp; 1094 int i; 1095 1096 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1097 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1098 1099 if (!mhi_chan->mhi_dev) 1100 continue; 1101 1102 mutex_lock(&mhi_chan->lock); 1103 /* Skip if the channel is not currently running */ 1104 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); 1105 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { 1106 mutex_unlock(&mhi_chan->lock); 1107 continue; 1108 } 1109 1110 dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); 1111 /* Set channel state to SUSPENDED */ 1112 tmp &= ~CHAN_CTX_CHSTATE_MASK; 1113 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); 1114 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); 1115 mutex_unlock(&mhi_chan->lock); 1116 } 1117 } 1118 1119 void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) 1120 { 1121 struct mhi_ep_chan *mhi_chan; 1122 u32 tmp; 1123 int i; 1124 1125 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1126 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1127 1128 if (!mhi_chan->mhi_dev) 1129 continue; 1130 1131 mutex_lock(&mhi_chan->lock); 1132 /* Skip if the channel is not currently suspended */ 1133 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); 1134 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { 1135 mutex_unlock(&mhi_chan->lock); 1136 continue; 1137 } 1138 1139 dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); 1140 /* Set channel state to RUNNING */ 1141 tmp &= ~CHAN_CTX_CHSTATE_MASK; 1142 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); 1143 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); 1144 mutex_unlock(&mhi_chan->lock); 1145 } 1146 } 1147 1148 static void mhi_ep_release_device(struct device *dev) 1149 { 1150 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1151 1152 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1153 mhi_dev->mhi_cntrl->mhi_dev = NULL; 1154 1155 /* 1156 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI 1157 * devices for the channels will only get created in mhi_ep_create_device() 1158 * if the mhi_dev associated with it is NULL. 1159 */ 1160 if (mhi_dev->ul_chan) 1161 mhi_dev->ul_chan->mhi_dev = NULL; 1162 1163 if (mhi_dev->dl_chan) 1164 mhi_dev->dl_chan->mhi_dev = NULL; 1165 1166 kfree(mhi_dev); 1167 } 1168 1169 static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, 1170 enum mhi_device_type dev_type) 1171 { 1172 struct mhi_ep_device *mhi_dev; 1173 struct device *dev; 1174 1175 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); 1176 if (!mhi_dev) 1177 return ERR_PTR(-ENOMEM); 1178 1179 dev = &mhi_dev->dev; 1180 device_initialize(dev); 1181 dev->bus = &mhi_ep_bus_type; 1182 dev->release = mhi_ep_release_device; 1183 1184 /* Controller device is always allocated first */ 1185 if (dev_type == MHI_DEVICE_CONTROLLER) 1186 /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ 1187 dev->parent = mhi_cntrl->cntrl_dev; 1188 else 1189 /* for MHI client devices, parent is the MHI controller device */ 1190 dev->parent = &mhi_cntrl->mhi_dev->dev; 1191 1192 mhi_dev->mhi_cntrl = mhi_cntrl; 1193 mhi_dev->dev_type = dev_type; 1194 1195 return mhi_dev; 1196 } 1197 1198 /* 1199 * MHI channels are always defined in pairs with UL as the even numbered 1200 * channel and DL as odd numbered one. This function gets UL channel (primary) 1201 * as the ch_id and always looks after the next entry in channel list for 1202 * the corresponding DL channel (secondary). 1203 */ 1204 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) 1205 { 1206 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; 1207 struct device *dev = mhi_cntrl->cntrl_dev; 1208 struct mhi_ep_device *mhi_dev; 1209 int ret; 1210 1211 /* Check if the channel name is same for both UL and DL */ 1212 if (strcmp(mhi_chan->name, mhi_chan[1].name)) { 1213 dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n", 1214 mhi_chan->name, mhi_chan[1].name); 1215 return -EINVAL; 1216 } 1217 1218 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER); 1219 if (IS_ERR(mhi_dev)) 1220 return PTR_ERR(mhi_dev); 1221 1222 /* Configure primary channel */ 1223 mhi_dev->ul_chan = mhi_chan; 1224 get_device(&mhi_dev->dev); 1225 mhi_chan->mhi_dev = mhi_dev; 1226 1227 /* Configure secondary channel as well */ 1228 mhi_chan++; 1229 mhi_dev->dl_chan = mhi_chan; 1230 get_device(&mhi_dev->dev); 1231 mhi_chan->mhi_dev = mhi_dev; 1232 1233 /* Channel name is same for both UL and DL */ 1234 mhi_dev->name = mhi_chan->name; 1235 ret = dev_set_name(&mhi_dev->dev, "%s_%s", 1236 dev_name(&mhi_cntrl->mhi_dev->dev), 1237 mhi_dev->name); 1238 if (ret) { 1239 put_device(&mhi_dev->dev); 1240 return ret; 1241 } 1242 1243 ret = device_add(&mhi_dev->dev); 1244 if (ret) 1245 put_device(&mhi_dev->dev); 1246 1247 return ret; 1248 } 1249 1250 static int mhi_ep_destroy_device(struct device *dev, void *data) 1251 { 1252 struct mhi_ep_device *mhi_dev; 1253 struct mhi_ep_cntrl *mhi_cntrl; 1254 struct mhi_ep_chan *ul_chan, *dl_chan; 1255 1256 if (dev->bus != &mhi_ep_bus_type) 1257 return 0; 1258 1259 mhi_dev = to_mhi_ep_device(dev); 1260 mhi_cntrl = mhi_dev->mhi_cntrl; 1261 1262 /* Only destroy devices created for channels */ 1263 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1264 return 0; 1265 1266 ul_chan = mhi_dev->ul_chan; 1267 dl_chan = mhi_dev->dl_chan; 1268 1269 if (ul_chan) 1270 put_device(&ul_chan->mhi_dev->dev); 1271 1272 if (dl_chan) 1273 put_device(&dl_chan->mhi_dev->dev); 1274 1275 dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", 1276 mhi_dev->name); 1277 1278 /* Notify the client and remove the device from MHI bus */ 1279 device_del(dev); 1280 put_device(dev); 1281 1282 return 0; 1283 } 1284 1285 static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, 1286 const struct mhi_ep_cntrl_config *config) 1287 { 1288 const struct mhi_ep_channel_config *ch_cfg; 1289 struct device *dev = mhi_cntrl->cntrl_dev; 1290 u32 chan, i; 1291 int ret = -EINVAL; 1292 1293 mhi_cntrl->max_chan = config->max_channels; 1294 1295 /* 1296 * Allocate max_channels supported by the MHI endpoint and populate 1297 * only the defined channels 1298 */ 1299 mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), 1300 GFP_KERNEL); 1301 if (!mhi_cntrl->mhi_chan) 1302 return -ENOMEM; 1303 1304 for (i = 0; i < config->num_channels; i++) { 1305 struct mhi_ep_chan *mhi_chan; 1306 1307 ch_cfg = &config->ch_cfg[i]; 1308 1309 chan = ch_cfg->num; 1310 if (chan >= mhi_cntrl->max_chan) { 1311 dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n", 1312 chan, mhi_cntrl->max_chan); 1313 goto error_chan_cfg; 1314 } 1315 1316 /* Bi-directional and direction less channels are not supported */ 1317 if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { 1318 dev_err(dev, "Invalid direction (%u) for channel (%u)\n", 1319 ch_cfg->dir, chan); 1320 goto error_chan_cfg; 1321 } 1322 1323 mhi_chan = &mhi_cntrl->mhi_chan[chan]; 1324 mhi_chan->name = ch_cfg->name; 1325 mhi_chan->chan = chan; 1326 mhi_chan->dir = ch_cfg->dir; 1327 mutex_init(&mhi_chan->lock); 1328 } 1329 1330 return 0; 1331 1332 error_chan_cfg: 1333 kfree(mhi_cntrl->mhi_chan); 1334 1335 return ret; 1336 } 1337 1338 /* 1339 * Allocate channel and command rings here. Event rings will be allocated 1340 * in mhi_ep_power_up() as the config comes from the host. 1341 */ 1342 int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, 1343 const struct mhi_ep_cntrl_config *config) 1344 { 1345 struct mhi_ep_device *mhi_dev; 1346 int ret; 1347 1348 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) 1349 return -EINVAL; 1350 1351 ret = mhi_ep_chan_init(mhi_cntrl, config); 1352 if (ret) 1353 return ret; 1354 1355 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); 1356 if (!mhi_cntrl->mhi_cmd) { 1357 ret = -ENOMEM; 1358 goto err_free_ch; 1359 } 1360 1361 INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); 1362 INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); 1363 INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); 1364 INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); 1365 1366 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); 1367 if (!mhi_cntrl->wq) { 1368 ret = -ENOMEM; 1369 goto err_free_cmd; 1370 } 1371 1372 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); 1373 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); 1374 spin_lock_init(&mhi_cntrl->state_lock); 1375 spin_lock_init(&mhi_cntrl->list_lock); 1376 mutex_init(&mhi_cntrl->event_lock); 1377 1378 /* Set MHI version and AMSS EE before enumeration */ 1379 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); 1380 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 1381 1382 /* Set controller index */ 1383 ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); 1384 if (ret < 0) 1385 goto err_destroy_wq; 1386 1387 mhi_cntrl->index = ret; 1388 1389 irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); 1390 ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, 1391 "doorbell_irq", mhi_cntrl); 1392 if (ret) { 1393 dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); 1394 goto err_ida_free; 1395 } 1396 1397 /* Allocate the controller device */ 1398 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); 1399 if (IS_ERR(mhi_dev)) { 1400 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); 1401 ret = PTR_ERR(mhi_dev); 1402 goto err_free_irq; 1403 } 1404 1405 ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); 1406 if (ret) 1407 goto err_put_dev; 1408 1409 mhi_dev->name = dev_name(&mhi_dev->dev); 1410 mhi_cntrl->mhi_dev = mhi_dev; 1411 1412 ret = device_add(&mhi_dev->dev); 1413 if (ret) 1414 goto err_put_dev; 1415 1416 dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); 1417 1418 return 0; 1419 1420 err_put_dev: 1421 put_device(&mhi_dev->dev); 1422 err_free_irq: 1423 free_irq(mhi_cntrl->irq, mhi_cntrl); 1424 err_ida_free: 1425 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); 1426 err_destroy_wq: 1427 destroy_workqueue(mhi_cntrl->wq); 1428 err_free_cmd: 1429 kfree(mhi_cntrl->mhi_cmd); 1430 err_free_ch: 1431 kfree(mhi_cntrl->mhi_chan); 1432 1433 return ret; 1434 } 1435 EXPORT_SYMBOL_GPL(mhi_ep_register_controller); 1436 1437 /* 1438 * It is expected that the controller drivers will power down the MHI EP stack 1439 * using "mhi_ep_power_down()" before calling this function to unregister themselves. 1440 */ 1441 void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) 1442 { 1443 struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; 1444 1445 destroy_workqueue(mhi_cntrl->wq); 1446 1447 free_irq(mhi_cntrl->irq, mhi_cntrl); 1448 1449 kfree(mhi_cntrl->mhi_cmd); 1450 kfree(mhi_cntrl->mhi_chan); 1451 1452 device_del(&mhi_dev->dev); 1453 put_device(&mhi_dev->dev); 1454 1455 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); 1456 } 1457 EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); 1458 1459 static int mhi_ep_driver_probe(struct device *dev) 1460 { 1461 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1462 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); 1463 struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; 1464 struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; 1465 1466 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; 1467 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; 1468 1469 return mhi_drv->probe(mhi_dev, mhi_dev->id); 1470 } 1471 1472 static int mhi_ep_driver_remove(struct device *dev) 1473 { 1474 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1475 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); 1476 struct mhi_result result = {}; 1477 struct mhi_ep_chan *mhi_chan; 1478 int dir; 1479 1480 /* Skip if it is a controller device */ 1481 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1482 return 0; 1483 1484 /* Disconnect the channels associated with the driver */ 1485 for (dir = 0; dir < 2; dir++) { 1486 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; 1487 1488 if (!mhi_chan) 1489 continue; 1490 1491 mutex_lock(&mhi_chan->lock); 1492 /* Send channel disconnect status to the client driver */ 1493 if (mhi_chan->xfer_cb) { 1494 result.transaction_status = -ENOTCONN; 1495 result.bytes_xferd = 0; 1496 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 1497 } 1498 1499 mhi_chan->state = MHI_CH_STATE_DISABLED; 1500 mhi_chan->xfer_cb = NULL; 1501 mutex_unlock(&mhi_chan->lock); 1502 } 1503 1504 /* Remove the client driver now */ 1505 mhi_drv->remove(mhi_dev); 1506 1507 return 0; 1508 } 1509 1510 int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) 1511 { 1512 struct device_driver *driver = &mhi_drv->driver; 1513 1514 if (!mhi_drv->probe || !mhi_drv->remove) 1515 return -EINVAL; 1516 1517 /* Client drivers should have callbacks defined for both channels */ 1518 if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) 1519 return -EINVAL; 1520 1521 driver->bus = &mhi_ep_bus_type; 1522 driver->owner = owner; 1523 driver->probe = mhi_ep_driver_probe; 1524 driver->remove = mhi_ep_driver_remove; 1525 1526 return driver_register(driver); 1527 } 1528 EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); 1529 1530 void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) 1531 { 1532 driver_unregister(&mhi_drv->driver); 1533 } 1534 EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); 1535 1536 static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env) 1537 { 1538 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1539 1540 return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, 1541 mhi_dev->name); 1542 } 1543 1544 static int mhi_ep_match(struct device *dev, struct device_driver *drv) 1545 { 1546 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1547 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); 1548 const struct mhi_device_id *id; 1549 1550 /* 1551 * If the device is a controller type then there is no client driver 1552 * associated with it 1553 */ 1554 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1555 return 0; 1556 1557 for (id = mhi_drv->id_table; id->chan[0]; id++) 1558 if (!strcmp(mhi_dev->name, id->chan)) { 1559 mhi_dev->id = id; 1560 return 1; 1561 } 1562 1563 return 0; 1564 }; 1565 1566 struct bus_type mhi_ep_bus_type = { 1567 .name = "mhi_ep", 1568 .dev_name = "mhi_ep", 1569 .match = mhi_ep_match, 1570 .uevent = mhi_ep_uevent, 1571 }; 1572 1573 static int __init mhi_ep_init(void) 1574 { 1575 return bus_register(&mhi_ep_bus_type); 1576 } 1577 1578 static void __exit mhi_ep_exit(void) 1579 { 1580 bus_unregister(&mhi_ep_bus_type); 1581 } 1582 1583 postcore_initcall(mhi_ep_init); 1584 module_exit(mhi_ep_exit); 1585 1586 MODULE_LICENSE("GPL v2"); 1587 MODULE_DESCRIPTION("MHI Bus Endpoint stack"); 1588 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); 1589