1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MHI Endpoint bus stack 4 * 5 * Copyright (C) 2022 Linaro Ltd. 6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/delay.h> 11 #include <linux/dma-direction.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/irq.h> 15 #include <linux/mhi_ep.h> 16 #include <linux/mod_devicetable.h> 17 #include <linux/module.h> 18 #include "internal.h" 19 20 #define M0_WAIT_DELAY_MS 100 21 #define M0_WAIT_COUNT 100 22 23 static DEFINE_IDA(mhi_ep_cntrl_ida); 24 25 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); 26 static int mhi_ep_destroy_device(struct device *dev, void *data); 27 28 static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, 29 struct mhi_ring_element *el, bool bei) 30 { 31 struct device *dev = &mhi_cntrl->mhi_dev->dev; 32 union mhi_ep_ring_ctx *ctx; 33 struct mhi_ep_ring *ring; 34 int ret; 35 36 mutex_lock(&mhi_cntrl->event_lock); 37 ring = &mhi_cntrl->mhi_event[ring_idx].ring; 38 ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; 39 if (!ring->started) { 40 ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); 41 if (ret) { 42 dev_err(dev, "Error starting event ring (%u)\n", ring_idx); 43 goto err_unlock; 44 } 45 } 46 47 /* Add element to the event ring */ 48 ret = mhi_ep_ring_add_element(ring, el); 49 if (ret) { 50 dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx); 51 goto err_unlock; 52 } 53 54 mutex_unlock(&mhi_cntrl->event_lock); 55 56 /* 57 * Raise IRQ to host only if the BEI flag is not set in TRE. Host might 58 * set this flag for interrupt moderation as per MHI protocol. 59 */ 60 if (!bei) 61 mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); 62 63 return 0; 64 65 err_unlock: 66 mutex_unlock(&mhi_cntrl->event_lock); 67 68 return ret; 69 } 70 71 static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, 72 struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) 73 { 74 struct mhi_ring_element *event; 75 int ret; 76 77 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); 78 if (!event) 79 return -ENOMEM; 80 81 event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); 82 event->dword[0] = MHI_TRE_EV_DWORD0(code, len); 83 event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); 84 85 ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre)); 86 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 87 88 return ret; 89 } 90 91 int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) 92 { 93 struct mhi_ring_element *event; 94 int ret; 95 96 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); 97 if (!event) 98 return -ENOMEM; 99 100 event->dword[0] = MHI_SC_EV_DWORD0(state); 101 event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); 102 103 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); 104 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 105 106 return ret; 107 } 108 109 int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) 110 { 111 struct mhi_ring_element *event; 112 int ret; 113 114 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); 115 if (!event) 116 return -ENOMEM; 117 118 event->dword[0] = MHI_EE_EV_DWORD0(exec_env); 119 event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); 120 121 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); 122 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 123 124 return ret; 125 } 126 127 static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) 128 { 129 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; 130 struct mhi_ring_element *event; 131 int ret; 132 133 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); 134 if (!event) 135 return -ENOMEM; 136 137 event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); 138 event->dword[0] = MHI_CC_EV_DWORD0(code); 139 event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); 140 141 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); 142 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); 143 144 return ret; 145 } 146 147 static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) 148 { 149 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; 150 struct device *dev = &mhi_cntrl->mhi_dev->dev; 151 struct mhi_result result = {}; 152 struct mhi_ep_chan *mhi_chan; 153 struct mhi_ep_ring *ch_ring; 154 u32 tmp, ch_id; 155 int ret; 156 157 ch_id = MHI_TRE_GET_CMD_CHID(el); 158 159 /* Check if the channel is supported by the controller */ 160 if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) { 161 dev_dbg(dev, "Channel (%u) not supported!\n", ch_id); 162 return -ENODEV; 163 } 164 165 mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; 166 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; 167 168 switch (MHI_TRE_GET_CMD_TYPE(el)) { 169 case MHI_PKT_TYPE_START_CHAN_CMD: 170 dev_dbg(dev, "Received START command for channel (%u)\n", ch_id); 171 172 mutex_lock(&mhi_chan->lock); 173 /* Initialize and configure the corresponding channel ring */ 174 if (!ch_ring->started) { 175 ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, 176 (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); 177 if (ret) { 178 dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id); 179 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, 180 MHI_EV_CC_UNDEFINED_ERR); 181 if (ret) 182 dev_err(dev, "Error sending completion event: %d\n", ret); 183 184 goto err_unlock; 185 } 186 187 mhi_chan->rd_offset = ch_ring->rd_offset; 188 } 189 190 /* Set channel state to RUNNING */ 191 mhi_chan->state = MHI_CH_STATE_RUNNING; 192 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 193 tmp &= ~CHAN_CTX_CHSTATE_MASK; 194 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); 195 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 196 197 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 198 if (ret) { 199 dev_err(dev, "Error sending command completion event (%u)\n", 200 MHI_EV_CC_SUCCESS); 201 goto err_unlock; 202 } 203 204 mutex_unlock(&mhi_chan->lock); 205 206 /* 207 * Create MHI device only during UL channel start. Since the MHI 208 * channels operate in a pair, we'll associate both UL and DL 209 * channels to the same device. 210 * 211 * We also need to check for mhi_dev != NULL because, the host 212 * will issue START_CHAN command during resume and we don't 213 * destroy the device during suspend. 214 */ 215 if (!(ch_id % 2) && !mhi_chan->mhi_dev) { 216 ret = mhi_ep_create_device(mhi_cntrl, ch_id); 217 if (ret) { 218 dev_err(dev, "Error creating device for channel (%u)\n", ch_id); 219 mhi_ep_handle_syserr(mhi_cntrl); 220 return ret; 221 } 222 } 223 224 /* Finally, enable DB for the channel */ 225 mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); 226 227 break; 228 case MHI_PKT_TYPE_STOP_CHAN_CMD: 229 dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); 230 if (!ch_ring->started) { 231 dev_err(dev, "Channel (%u) not opened\n", ch_id); 232 return -ENODEV; 233 } 234 235 mutex_lock(&mhi_chan->lock); 236 /* Disable DB for the channel */ 237 mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); 238 239 /* Send channel disconnect status to client drivers */ 240 if (mhi_chan->xfer_cb) { 241 result.transaction_status = -ENOTCONN; 242 result.bytes_xferd = 0; 243 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 244 } 245 246 /* Set channel state to STOP */ 247 mhi_chan->state = MHI_CH_STATE_STOP; 248 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 249 tmp &= ~CHAN_CTX_CHSTATE_MASK; 250 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); 251 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 252 253 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 254 if (ret) { 255 dev_err(dev, "Error sending command completion event (%u)\n", 256 MHI_EV_CC_SUCCESS); 257 goto err_unlock; 258 } 259 260 mutex_unlock(&mhi_chan->lock); 261 break; 262 case MHI_PKT_TYPE_RESET_CHAN_CMD: 263 dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id); 264 if (!ch_ring->started) { 265 dev_err(dev, "Channel (%u) not opened\n", ch_id); 266 return -ENODEV; 267 } 268 269 mutex_lock(&mhi_chan->lock); 270 /* Stop and reset the transfer ring */ 271 mhi_ep_ring_reset(mhi_cntrl, ch_ring); 272 273 /* Send channel disconnect status to client driver */ 274 if (mhi_chan->xfer_cb) { 275 result.transaction_status = -ENOTCONN; 276 result.bytes_xferd = 0; 277 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 278 } 279 280 /* Set channel state to DISABLED */ 281 mhi_chan->state = MHI_CH_STATE_DISABLED; 282 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); 283 tmp &= ~CHAN_CTX_CHSTATE_MASK; 284 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); 285 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); 286 287 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); 288 if (ret) { 289 dev_err(dev, "Error sending command completion event (%u)\n", 290 MHI_EV_CC_SUCCESS); 291 goto err_unlock; 292 } 293 294 mutex_unlock(&mhi_chan->lock); 295 break; 296 default: 297 dev_err(dev, "Invalid command received: %lu for channel (%u)\n", 298 MHI_TRE_GET_CMD_TYPE(el), ch_id); 299 return -EINVAL; 300 } 301 302 return 0; 303 304 err_unlock: 305 mutex_unlock(&mhi_chan->lock); 306 307 return ret; 308 } 309 310 bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) 311 { 312 struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : 313 mhi_dev->ul_chan; 314 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; 315 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; 316 317 return !!(mhi_chan->rd_offset == ring->wr_offset); 318 } 319 EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); 320 321 static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, 322 struct mhi_ep_ring *ring, 323 struct mhi_result *result, 324 u32 len) 325 { 326 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 327 struct device *dev = &mhi_cntrl->mhi_dev->dev; 328 size_t tr_len, read_offset, write_offset; 329 struct mhi_ep_buf_info buf_info = {}; 330 struct mhi_ring_element *el; 331 bool tr_done = false; 332 u32 buf_left; 333 int ret; 334 335 buf_left = len; 336 337 do { 338 /* Don't process the transfer ring if the channel is not in RUNNING state */ 339 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { 340 dev_err(dev, "Channel not available\n"); 341 return -ENODEV; 342 } 343 344 el = &ring->ring_cache[mhi_chan->rd_offset]; 345 346 /* Check if there is data pending to be read from previous read operation */ 347 if (mhi_chan->tre_bytes_left) { 348 dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); 349 tr_len = min(buf_left, mhi_chan->tre_bytes_left); 350 } else { 351 mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); 352 mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); 353 mhi_chan->tre_bytes_left = mhi_chan->tre_size; 354 355 tr_len = min(buf_left, mhi_chan->tre_size); 356 } 357 358 read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; 359 write_offset = len - buf_left; 360 361 buf_info.host_addr = mhi_chan->tre_loc + read_offset; 362 buf_info.dev_addr = result->buf_addr + write_offset; 363 buf_info.size = tr_len; 364 365 dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); 366 ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); 367 if (ret < 0) { 368 dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); 369 return ret; 370 } 371 372 buf_left -= tr_len; 373 mhi_chan->tre_bytes_left -= tr_len; 374 375 /* 376 * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been 377 * read completely: 378 * 379 * 1. Send completion event to the host based on the flags set in TRE. 380 * 2. Increment the local read offset of the transfer ring. 381 */ 382 if (!mhi_chan->tre_bytes_left) { 383 /* 384 * The host will split the data packet into multiple TREs if it can't fit 385 * the packet in a single TRE. In that case, CHAIN flag will be set by the 386 * host for all TREs except the last one. 387 */ 388 if (MHI_TRE_DATA_GET_CHAIN(el)) { 389 /* 390 * IEOB (Interrupt on End of Block) flag will be set by the host if 391 * it expects the completion event for all TREs of a TD. 392 */ 393 if (MHI_TRE_DATA_GET_IEOB(el)) { 394 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, 395 MHI_TRE_DATA_GET_LEN(el), 396 MHI_EV_CC_EOB); 397 if (ret < 0) { 398 dev_err(&mhi_chan->mhi_dev->dev, 399 "Error sending transfer compl. event\n"); 400 return ret; 401 } 402 } 403 } else { 404 /* 405 * IEOT (Interrupt on End of Transfer) flag will be set by the host 406 * for the last TRE of the TD and expects the completion event for 407 * the same. 408 */ 409 if (MHI_TRE_DATA_GET_IEOT(el)) { 410 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, 411 MHI_TRE_DATA_GET_LEN(el), 412 MHI_EV_CC_EOT); 413 if (ret < 0) { 414 dev_err(&mhi_chan->mhi_dev->dev, 415 "Error sending transfer compl. event\n"); 416 return ret; 417 } 418 } 419 420 tr_done = true; 421 } 422 423 mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; 424 mhi_ep_ring_inc_index(ring); 425 } 426 427 result->bytes_xferd += tr_len; 428 } while (buf_left && !tr_done); 429 430 return 0; 431 } 432 433 static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) 434 { 435 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; 436 struct mhi_result result = {}; 437 u32 len = MHI_EP_DEFAULT_MTU; 438 struct mhi_ep_chan *mhi_chan; 439 int ret; 440 441 mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 442 443 /* 444 * Bail out if transfer callback is not registered for the channel. 445 * This is most likely due to the client driver not loaded at this point. 446 */ 447 if (!mhi_chan->xfer_cb) { 448 dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); 449 return -ENODEV; 450 } 451 452 if (ring->ch_id % 2) { 453 /* DL channel */ 454 result.dir = mhi_chan->dir; 455 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 456 } else { 457 /* UL channel */ 458 result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); 459 if (!result.buf_addr) 460 return -ENOMEM; 461 462 do { 463 ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); 464 if (ret < 0) { 465 dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); 466 kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr); 467 return ret; 468 } 469 470 result.dir = mhi_chan->dir; 471 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 472 result.bytes_xferd = 0; 473 memset(result.buf_addr, 0, len); 474 475 /* Read until the ring becomes empty */ 476 } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); 477 478 kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr); 479 } 480 481 return 0; 482 } 483 484 static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info) 485 { 486 struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; 487 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; 488 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; 489 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; 490 struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; 491 struct device *dev = &mhi_dev->dev; 492 struct mhi_result result = {}; 493 int ret; 494 495 if (mhi_chan->xfer_cb) { 496 result.buf_addr = buf_info->cb_buf; 497 result.dir = mhi_chan->dir; 498 result.bytes_xferd = buf_info->size; 499 500 mhi_chan->xfer_cb(mhi_dev, &result); 501 } 502 503 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size, 504 buf_info->code); 505 if (ret) { 506 dev_err(dev, "Error sending transfer completion event\n"); 507 return; 508 } 509 510 mhi_ep_ring_inc_index(ring); 511 } 512 513 /* TODO: Handle partially formed TDs */ 514 int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) 515 { 516 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; 517 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; 518 struct device *dev = &mhi_chan->mhi_dev->dev; 519 struct mhi_ep_buf_info buf_info = {}; 520 struct mhi_ring_element *el; 521 u32 buf_left, read_offset; 522 struct mhi_ep_ring *ring; 523 size_t tr_len; 524 u32 tre_len; 525 int ret; 526 527 buf_left = skb->len; 528 ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; 529 530 mutex_lock(&mhi_chan->lock); 531 532 do { 533 /* Don't process the transfer ring if the channel is not in RUNNING state */ 534 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { 535 dev_err(dev, "Channel not available\n"); 536 ret = -ENODEV; 537 goto err_exit; 538 } 539 540 if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { 541 dev_err(dev, "TRE not available!\n"); 542 ret = -ENOSPC; 543 goto err_exit; 544 } 545 546 el = &ring->ring_cache[mhi_chan->rd_offset]; 547 tre_len = MHI_TRE_DATA_GET_LEN(el); 548 549 tr_len = min(buf_left, tre_len); 550 read_offset = skb->len - buf_left; 551 552 buf_info.dev_addr = skb->data + read_offset; 553 buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el); 554 buf_info.size = tr_len; 555 buf_info.cb = mhi_ep_skb_completion; 556 buf_info.cb_buf = skb; 557 buf_info.mhi_dev = mhi_dev; 558 559 /* 560 * For all TREs queued by the host for DL channel, only the EOT flag will be set. 561 * If the packet doesn't fit into a single TRE, send the OVERFLOW event to 562 * the host so that the host can adjust the packet boundary to next TREs. Else send 563 * the EOT event to the host indicating the packet boundary. 564 */ 565 if (buf_left - tr_len) 566 buf_info.code = MHI_EV_CC_OVERFLOW; 567 else 568 buf_info.code = MHI_EV_CC_EOT; 569 570 dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); 571 ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info); 572 if (ret < 0) { 573 dev_err(dev, "Error writing to the channel\n"); 574 goto err_exit; 575 } 576 577 buf_left -= tr_len; 578 579 /* 580 * Update the read offset cached in mhi_chan. Actual read offset 581 * will be updated by the completion handler. 582 */ 583 mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; 584 } while (buf_left); 585 586 mutex_unlock(&mhi_chan->lock); 587 588 return 0; 589 590 err_exit: 591 mutex_unlock(&mhi_chan->lock); 592 593 return ret; 594 } 595 EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); 596 597 static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) 598 { 599 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; 600 struct device *dev = &mhi_cntrl->mhi_dev->dev; 601 int ret; 602 603 /* Update the number of event rings (NER) programmed by the host */ 604 mhi_ep_mmio_update_ner(mhi_cntrl); 605 606 dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n", 607 mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); 608 609 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; 610 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; 611 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; 612 613 /* Get the channel context base pointer from host */ 614 mhi_ep_mmio_get_chc_base(mhi_cntrl); 615 616 /* Allocate and map memory for caching host channel context */ 617 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, 618 &mhi_cntrl->ch_ctx_cache_phys, 619 (void __iomem **) &mhi_cntrl->ch_ctx_cache, 620 ch_ctx_host_size); 621 if (ret) { 622 dev_err(dev, "Failed to allocate and map ch_ctx_cache\n"); 623 return ret; 624 } 625 626 /* Get the event context base pointer from host */ 627 mhi_ep_mmio_get_erc_base(mhi_cntrl); 628 629 /* Allocate and map memory for caching host event context */ 630 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, 631 &mhi_cntrl->ev_ctx_cache_phys, 632 (void __iomem **) &mhi_cntrl->ev_ctx_cache, 633 ev_ctx_host_size); 634 if (ret) { 635 dev_err(dev, "Failed to allocate and map ev_ctx_cache\n"); 636 goto err_ch_ctx; 637 } 638 639 /* Get the command context base pointer from host */ 640 mhi_ep_mmio_get_crc_base(mhi_cntrl); 641 642 /* Allocate and map memory for caching host command context */ 643 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, 644 &mhi_cntrl->cmd_ctx_cache_phys, 645 (void __iomem **) &mhi_cntrl->cmd_ctx_cache, 646 cmd_ctx_host_size); 647 if (ret) { 648 dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n"); 649 goto err_ev_ctx; 650 } 651 652 /* Initialize command ring */ 653 ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, 654 (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); 655 if (ret) { 656 dev_err(dev, "Failed to start the command ring\n"); 657 goto err_cmd_ctx; 658 } 659 660 return ret; 661 662 err_cmd_ctx: 663 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, 664 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); 665 666 err_ev_ctx: 667 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, 668 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); 669 670 err_ch_ctx: 671 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, 672 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); 673 674 return ret; 675 } 676 677 static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) 678 { 679 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; 680 681 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; 682 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; 683 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; 684 685 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, 686 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); 687 688 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, 689 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); 690 691 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, 692 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); 693 } 694 695 static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) 696 { 697 /* 698 * Doorbell interrupts are enabled when the corresponding channel gets started. 699 * Enabling all interrupts here triggers spurious irqs as some of the interrupts 700 * associated with hw channels always get triggered. 701 */ 702 mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); 703 mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); 704 } 705 706 static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) 707 { 708 struct device *dev = &mhi_cntrl->mhi_dev->dev; 709 enum mhi_state state; 710 bool mhi_reset; 711 u32 count = 0; 712 int ret; 713 714 /* Wait for Host to set the M0 state */ 715 do { 716 msleep(M0_WAIT_DELAY_MS); 717 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); 718 if (mhi_reset) { 719 /* Clear the MHI reset if host is in reset state */ 720 mhi_ep_mmio_clear_reset(mhi_cntrl); 721 dev_info(dev, "Detected Host reset while waiting for M0\n"); 722 } 723 count++; 724 } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); 725 726 if (state != MHI_STATE_M0) { 727 dev_err(dev, "Host failed to enter M0\n"); 728 return -ETIMEDOUT; 729 } 730 731 ret = mhi_ep_cache_host_cfg(mhi_cntrl); 732 if (ret) { 733 dev_err(dev, "Failed to cache host config\n"); 734 return ret; 735 } 736 737 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 738 739 /* Enable all interrupts now */ 740 mhi_ep_enable_int(mhi_cntrl); 741 742 return 0; 743 } 744 745 static void mhi_ep_cmd_ring_worker(struct work_struct *work) 746 { 747 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); 748 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; 749 struct device *dev = &mhi_cntrl->mhi_dev->dev; 750 struct mhi_ring_element *el; 751 int ret; 752 753 /* Update the write offset for the ring */ 754 ret = mhi_ep_update_wr_offset(ring); 755 if (ret) { 756 dev_err(dev, "Error updating write offset for ring\n"); 757 return; 758 } 759 760 /* Sanity check to make sure there are elements in the ring */ 761 if (ring->rd_offset == ring->wr_offset) 762 return; 763 764 /* 765 * Process command ring element till write offset. In case of an error, just try to 766 * process next element. 767 */ 768 while (ring->rd_offset != ring->wr_offset) { 769 el = &ring->ring_cache[ring->rd_offset]; 770 771 ret = mhi_ep_process_cmd_ring(ring, el); 772 if (ret && ret != -ENODEV) 773 dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); 774 775 mhi_ep_ring_inc_index(ring); 776 } 777 } 778 779 static void mhi_ep_ch_ring_worker(struct work_struct *work) 780 { 781 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); 782 struct device *dev = &mhi_cntrl->mhi_dev->dev; 783 struct mhi_ep_ring_item *itr, *tmp; 784 struct mhi_ring_element *el; 785 struct mhi_ep_ring *ring; 786 struct mhi_ep_chan *chan; 787 unsigned long flags; 788 LIST_HEAD(head); 789 int ret; 790 791 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); 792 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); 793 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); 794 795 /* Process each queued channel ring. In case of an error, just process next element. */ 796 list_for_each_entry_safe(itr, tmp, &head, node) { 797 list_del(&itr->node); 798 ring = itr->ring; 799 800 chan = &mhi_cntrl->mhi_chan[ring->ch_id]; 801 mutex_lock(&chan->lock); 802 803 /* 804 * The ring could've stopped while we waited to grab the (chan->lock), so do 805 * a sanity check before going further. 806 */ 807 if (!ring->started) { 808 mutex_unlock(&chan->lock); 809 kfree(itr); 810 continue; 811 } 812 813 /* Update the write offset for the ring */ 814 ret = mhi_ep_update_wr_offset(ring); 815 if (ret) { 816 dev_err(dev, "Error updating write offset for ring\n"); 817 mutex_unlock(&chan->lock); 818 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 819 continue; 820 } 821 822 /* Sanity check to make sure there are elements in the ring */ 823 if (chan->rd_offset == ring->wr_offset) { 824 mutex_unlock(&chan->lock); 825 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 826 continue; 827 } 828 829 el = &ring->ring_cache[ring->rd_offset]; 830 831 dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); 832 ret = mhi_ep_process_ch_ring(ring, el); 833 if (ret) { 834 dev_err(dev, "Error processing ring for channel (%u): %d\n", 835 ring->ch_id, ret); 836 mutex_unlock(&chan->lock); 837 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 838 continue; 839 } 840 841 mutex_unlock(&chan->lock); 842 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); 843 } 844 } 845 846 static void mhi_ep_state_worker(struct work_struct *work) 847 { 848 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); 849 struct device *dev = &mhi_cntrl->mhi_dev->dev; 850 struct mhi_ep_state_transition *itr, *tmp; 851 unsigned long flags; 852 LIST_HEAD(head); 853 int ret; 854 855 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); 856 list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); 857 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); 858 859 list_for_each_entry_safe(itr, tmp, &head, node) { 860 list_del(&itr->node); 861 dev_dbg(dev, "Handling MHI state transition to %s\n", 862 mhi_state_str(itr->state)); 863 864 switch (itr->state) { 865 case MHI_STATE_M0: 866 ret = mhi_ep_set_m0_state(mhi_cntrl); 867 if (ret) 868 dev_err(dev, "Failed to transition to M0 state\n"); 869 break; 870 case MHI_STATE_M3: 871 ret = mhi_ep_set_m3_state(mhi_cntrl); 872 if (ret) 873 dev_err(dev, "Failed to transition to M3 state\n"); 874 break; 875 default: 876 dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); 877 break; 878 } 879 kfree(itr); 880 } 881 } 882 883 static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, 884 u32 ch_idx) 885 { 886 struct mhi_ep_ring_item *item; 887 struct mhi_ep_ring *ring; 888 bool work = !!ch_int; 889 LIST_HEAD(head); 890 u32 i; 891 892 /* First add the ring items to a local list */ 893 for_each_set_bit(i, &ch_int, 32) { 894 /* Channel index varies for each register: 0, 32, 64, 96 */ 895 u32 ch_id = ch_idx + i; 896 897 ring = &mhi_cntrl->mhi_chan[ch_id].ring; 898 item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC); 899 if (!item) 900 return; 901 902 item->ring = ring; 903 list_add_tail(&item->node, &head); 904 } 905 906 /* Now, splice the local list into ch_db_list and queue the work item */ 907 if (work) { 908 spin_lock(&mhi_cntrl->list_lock); 909 list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); 910 spin_unlock(&mhi_cntrl->list_lock); 911 912 queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); 913 } 914 } 915 916 /* 917 * Channel interrupt statuses are contained in 4 registers each of 32bit length. 918 * For checking all interrupts, we need to loop through each registers and then 919 * check for bits set. 920 */ 921 static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) 922 { 923 u32 ch_int, ch_idx, i; 924 925 /* Bail out if there is no channel doorbell interrupt */ 926 if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) 927 return; 928 929 for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { 930 ch_idx = i * MHI_MASK_CH_LEN; 931 932 /* Only process channel interrupt if the mask is enabled */ 933 ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; 934 if (ch_int) { 935 mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); 936 mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), 937 mhi_cntrl->chdb[i].status); 938 } 939 } 940 } 941 942 static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, 943 enum mhi_state state) 944 { 945 struct mhi_ep_state_transition *item; 946 947 item = kzalloc(sizeof(*item), GFP_ATOMIC); 948 if (!item) 949 return; 950 951 item->state = state; 952 spin_lock(&mhi_cntrl->list_lock); 953 list_add_tail(&item->node, &mhi_cntrl->st_transition_list); 954 spin_unlock(&mhi_cntrl->list_lock); 955 956 queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); 957 } 958 959 /* 960 * Interrupt handler that services interrupts raised by the host writing to 961 * MHICTRL and Command ring doorbell (CRDB) registers for state change and 962 * channel interrupts. 963 */ 964 static irqreturn_t mhi_ep_irq(int irq, void *data) 965 { 966 struct mhi_ep_cntrl *mhi_cntrl = data; 967 struct device *dev = &mhi_cntrl->mhi_dev->dev; 968 enum mhi_state state; 969 u32 int_value; 970 bool mhi_reset; 971 972 /* Acknowledge the ctrl interrupt */ 973 int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); 974 mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); 975 976 /* Check for ctrl interrupt */ 977 if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { 978 dev_dbg(dev, "Processing ctrl interrupt\n"); 979 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); 980 if (mhi_reset) { 981 dev_info(dev, "Host triggered MHI reset!\n"); 982 disable_irq_nosync(mhi_cntrl->irq); 983 schedule_work(&mhi_cntrl->reset_work); 984 return IRQ_HANDLED; 985 } 986 987 mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); 988 } 989 990 /* Check for command doorbell interrupt */ 991 if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { 992 dev_dbg(dev, "Processing command doorbell interrupt\n"); 993 queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); 994 } 995 996 /* Check for channel interrupts */ 997 mhi_ep_check_channel_interrupt(mhi_cntrl); 998 999 return IRQ_HANDLED; 1000 } 1001 1002 static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) 1003 { 1004 struct mhi_ep_ring *ch_ring, *ev_ring; 1005 struct mhi_result result = {}; 1006 struct mhi_ep_chan *mhi_chan; 1007 int i; 1008 1009 /* Stop all the channels */ 1010 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1011 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1012 if (!mhi_chan->ring.started) 1013 continue; 1014 1015 mutex_lock(&mhi_chan->lock); 1016 /* Send channel disconnect status to client drivers */ 1017 if (mhi_chan->xfer_cb) { 1018 result.transaction_status = -ENOTCONN; 1019 result.bytes_xferd = 0; 1020 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 1021 } 1022 1023 mhi_chan->state = MHI_CH_STATE_DISABLED; 1024 mutex_unlock(&mhi_chan->lock); 1025 } 1026 1027 flush_workqueue(mhi_cntrl->wq); 1028 1029 /* Destroy devices associated with all channels */ 1030 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); 1031 1032 /* Stop and reset the transfer rings */ 1033 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1034 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1035 if (!mhi_chan->ring.started) 1036 continue; 1037 1038 ch_ring = &mhi_cntrl->mhi_chan[i].ring; 1039 mutex_lock(&mhi_chan->lock); 1040 mhi_ep_ring_reset(mhi_cntrl, ch_ring); 1041 mutex_unlock(&mhi_chan->lock); 1042 } 1043 1044 /* Stop and reset the event rings */ 1045 for (i = 0; i < mhi_cntrl->event_rings; i++) { 1046 ev_ring = &mhi_cntrl->mhi_event[i].ring; 1047 if (!ev_ring->started) 1048 continue; 1049 1050 mutex_lock(&mhi_cntrl->event_lock); 1051 mhi_ep_ring_reset(mhi_cntrl, ev_ring); 1052 mutex_unlock(&mhi_cntrl->event_lock); 1053 } 1054 1055 /* Stop and reset the command ring */ 1056 mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); 1057 1058 mhi_ep_free_host_cfg(mhi_cntrl); 1059 mhi_ep_mmio_mask_interrupts(mhi_cntrl); 1060 1061 mhi_cntrl->enabled = false; 1062 } 1063 1064 static void mhi_ep_reset_worker(struct work_struct *work) 1065 { 1066 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); 1067 enum mhi_state cur_state; 1068 1069 mhi_ep_power_down(mhi_cntrl); 1070 1071 mutex_lock(&mhi_cntrl->state_lock); 1072 1073 /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ 1074 mhi_ep_mmio_reset(mhi_cntrl); 1075 cur_state = mhi_cntrl->mhi_state; 1076 1077 /* 1078 * Only proceed further if the reset is due to SYS_ERR. The host will 1079 * issue reset during shutdown also and we don't need to do re-init in 1080 * that case. 1081 */ 1082 if (cur_state == MHI_STATE_SYS_ERR) 1083 mhi_ep_power_up(mhi_cntrl); 1084 1085 mutex_unlock(&mhi_cntrl->state_lock); 1086 } 1087 1088 /* 1089 * We don't need to do anything special other than setting the MHI SYS_ERR 1090 * state. The host will reset all contexts and issue MHI RESET so that we 1091 * could also recover from error state. 1092 */ 1093 void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) 1094 { 1095 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1096 int ret; 1097 1098 ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); 1099 if (ret) 1100 return; 1101 1102 /* Signal host that the device went to SYS_ERR state */ 1103 ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); 1104 if (ret) 1105 dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret); 1106 } 1107 1108 int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) 1109 { 1110 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1111 int ret, i; 1112 1113 /* 1114 * Mask all interrupts until the state machine is ready. Interrupts will 1115 * be enabled later with mhi_ep_enable(). 1116 */ 1117 mhi_ep_mmio_mask_interrupts(mhi_cntrl); 1118 mhi_ep_mmio_init(mhi_cntrl); 1119 1120 mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), 1121 GFP_KERNEL); 1122 if (!mhi_cntrl->mhi_event) 1123 return -ENOMEM; 1124 1125 /* Initialize command, channel and event rings */ 1126 mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); 1127 for (i = 0; i < mhi_cntrl->max_chan; i++) 1128 mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); 1129 for (i = 0; i < mhi_cntrl->event_rings; i++) 1130 mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); 1131 1132 mhi_cntrl->mhi_state = MHI_STATE_RESET; 1133 1134 /* Set AMSS EE before signaling ready state */ 1135 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 1136 1137 /* All set, notify the host that we are ready */ 1138 ret = mhi_ep_set_ready_state(mhi_cntrl); 1139 if (ret) 1140 goto err_free_event; 1141 1142 dev_dbg(dev, "READY state notification sent to the host\n"); 1143 1144 ret = mhi_ep_enable(mhi_cntrl); 1145 if (ret) { 1146 dev_err(dev, "Failed to enable MHI endpoint\n"); 1147 goto err_free_event; 1148 } 1149 1150 enable_irq(mhi_cntrl->irq); 1151 mhi_cntrl->enabled = true; 1152 1153 return 0; 1154 1155 err_free_event: 1156 kfree(mhi_cntrl->mhi_event); 1157 1158 return ret; 1159 } 1160 EXPORT_SYMBOL_GPL(mhi_ep_power_up); 1161 1162 void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) 1163 { 1164 if (mhi_cntrl->enabled) { 1165 mhi_ep_abort_transfer(mhi_cntrl); 1166 kfree(mhi_cntrl->mhi_event); 1167 disable_irq(mhi_cntrl->irq); 1168 } 1169 } 1170 EXPORT_SYMBOL_GPL(mhi_ep_power_down); 1171 1172 void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) 1173 { 1174 struct mhi_ep_chan *mhi_chan; 1175 u32 tmp; 1176 int i; 1177 1178 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1179 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1180 1181 if (!mhi_chan->mhi_dev) 1182 continue; 1183 1184 mutex_lock(&mhi_chan->lock); 1185 /* Skip if the channel is not currently running */ 1186 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); 1187 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { 1188 mutex_unlock(&mhi_chan->lock); 1189 continue; 1190 } 1191 1192 dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); 1193 /* Set channel state to SUSPENDED */ 1194 mhi_chan->state = MHI_CH_STATE_SUSPENDED; 1195 tmp &= ~CHAN_CTX_CHSTATE_MASK; 1196 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); 1197 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); 1198 mutex_unlock(&mhi_chan->lock); 1199 } 1200 } 1201 1202 void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) 1203 { 1204 struct mhi_ep_chan *mhi_chan; 1205 u32 tmp; 1206 int i; 1207 1208 for (i = 0; i < mhi_cntrl->max_chan; i++) { 1209 mhi_chan = &mhi_cntrl->mhi_chan[i]; 1210 1211 if (!mhi_chan->mhi_dev) 1212 continue; 1213 1214 mutex_lock(&mhi_chan->lock); 1215 /* Skip if the channel is not currently suspended */ 1216 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); 1217 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { 1218 mutex_unlock(&mhi_chan->lock); 1219 continue; 1220 } 1221 1222 dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); 1223 /* Set channel state to RUNNING */ 1224 mhi_chan->state = MHI_CH_STATE_RUNNING; 1225 tmp &= ~CHAN_CTX_CHSTATE_MASK; 1226 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); 1227 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); 1228 mutex_unlock(&mhi_chan->lock); 1229 } 1230 } 1231 1232 static void mhi_ep_release_device(struct device *dev) 1233 { 1234 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1235 1236 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1237 mhi_dev->mhi_cntrl->mhi_dev = NULL; 1238 1239 /* 1240 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI 1241 * devices for the channels will only get created in mhi_ep_create_device() 1242 * if the mhi_dev associated with it is NULL. 1243 */ 1244 if (mhi_dev->ul_chan) 1245 mhi_dev->ul_chan->mhi_dev = NULL; 1246 1247 if (mhi_dev->dl_chan) 1248 mhi_dev->dl_chan->mhi_dev = NULL; 1249 1250 kfree(mhi_dev); 1251 } 1252 1253 static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, 1254 enum mhi_device_type dev_type) 1255 { 1256 struct mhi_ep_device *mhi_dev; 1257 struct device *dev; 1258 1259 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); 1260 if (!mhi_dev) 1261 return ERR_PTR(-ENOMEM); 1262 1263 dev = &mhi_dev->dev; 1264 device_initialize(dev); 1265 dev->bus = &mhi_ep_bus_type; 1266 dev->release = mhi_ep_release_device; 1267 1268 /* Controller device is always allocated first */ 1269 if (dev_type == MHI_DEVICE_CONTROLLER) 1270 /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ 1271 dev->parent = mhi_cntrl->cntrl_dev; 1272 else 1273 /* for MHI client devices, parent is the MHI controller device */ 1274 dev->parent = &mhi_cntrl->mhi_dev->dev; 1275 1276 mhi_dev->mhi_cntrl = mhi_cntrl; 1277 mhi_dev->dev_type = dev_type; 1278 1279 return mhi_dev; 1280 } 1281 1282 /* 1283 * MHI channels are always defined in pairs with UL as the even numbered 1284 * channel and DL as odd numbered one. This function gets UL channel (primary) 1285 * as the ch_id and always looks after the next entry in channel list for 1286 * the corresponding DL channel (secondary). 1287 */ 1288 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) 1289 { 1290 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; 1291 struct device *dev = mhi_cntrl->cntrl_dev; 1292 struct mhi_ep_device *mhi_dev; 1293 int ret; 1294 1295 /* Check if the channel name is same for both UL and DL */ 1296 if (strcmp(mhi_chan->name, mhi_chan[1].name)) { 1297 dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n", 1298 mhi_chan->name, mhi_chan[1].name); 1299 return -EINVAL; 1300 } 1301 1302 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER); 1303 if (IS_ERR(mhi_dev)) 1304 return PTR_ERR(mhi_dev); 1305 1306 /* Configure primary channel */ 1307 mhi_dev->ul_chan = mhi_chan; 1308 get_device(&mhi_dev->dev); 1309 mhi_chan->mhi_dev = mhi_dev; 1310 1311 /* Configure secondary channel as well */ 1312 mhi_chan++; 1313 mhi_dev->dl_chan = mhi_chan; 1314 get_device(&mhi_dev->dev); 1315 mhi_chan->mhi_dev = mhi_dev; 1316 1317 /* Channel name is same for both UL and DL */ 1318 mhi_dev->name = mhi_chan->name; 1319 ret = dev_set_name(&mhi_dev->dev, "%s_%s", 1320 dev_name(&mhi_cntrl->mhi_dev->dev), 1321 mhi_dev->name); 1322 if (ret) { 1323 put_device(&mhi_dev->dev); 1324 return ret; 1325 } 1326 1327 ret = device_add(&mhi_dev->dev); 1328 if (ret) 1329 put_device(&mhi_dev->dev); 1330 1331 return ret; 1332 } 1333 1334 static int mhi_ep_destroy_device(struct device *dev, void *data) 1335 { 1336 struct mhi_ep_device *mhi_dev; 1337 struct mhi_ep_cntrl *mhi_cntrl; 1338 struct mhi_ep_chan *ul_chan, *dl_chan; 1339 1340 if (dev->bus != &mhi_ep_bus_type) 1341 return 0; 1342 1343 mhi_dev = to_mhi_ep_device(dev); 1344 mhi_cntrl = mhi_dev->mhi_cntrl; 1345 1346 /* Only destroy devices created for channels */ 1347 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1348 return 0; 1349 1350 ul_chan = mhi_dev->ul_chan; 1351 dl_chan = mhi_dev->dl_chan; 1352 1353 if (ul_chan) 1354 put_device(&ul_chan->mhi_dev->dev); 1355 1356 if (dl_chan) 1357 put_device(&dl_chan->mhi_dev->dev); 1358 1359 dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", 1360 mhi_dev->name); 1361 1362 /* Notify the client and remove the device from MHI bus */ 1363 device_del(dev); 1364 put_device(dev); 1365 1366 return 0; 1367 } 1368 1369 static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, 1370 const struct mhi_ep_cntrl_config *config) 1371 { 1372 const struct mhi_ep_channel_config *ch_cfg; 1373 struct device *dev = mhi_cntrl->cntrl_dev; 1374 u32 chan, i; 1375 int ret = -EINVAL; 1376 1377 mhi_cntrl->max_chan = config->max_channels; 1378 1379 /* 1380 * Allocate max_channels supported by the MHI endpoint and populate 1381 * only the defined channels 1382 */ 1383 mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), 1384 GFP_KERNEL); 1385 if (!mhi_cntrl->mhi_chan) 1386 return -ENOMEM; 1387 1388 for (i = 0; i < config->num_channels; i++) { 1389 struct mhi_ep_chan *mhi_chan; 1390 1391 ch_cfg = &config->ch_cfg[i]; 1392 1393 chan = ch_cfg->num; 1394 if (chan >= mhi_cntrl->max_chan) { 1395 dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n", 1396 chan, mhi_cntrl->max_chan); 1397 goto error_chan_cfg; 1398 } 1399 1400 /* Bi-directional and direction less channels are not supported */ 1401 if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { 1402 dev_err(dev, "Invalid direction (%u) for channel (%u)\n", 1403 ch_cfg->dir, chan); 1404 goto error_chan_cfg; 1405 } 1406 1407 mhi_chan = &mhi_cntrl->mhi_chan[chan]; 1408 mhi_chan->name = ch_cfg->name; 1409 mhi_chan->chan = chan; 1410 mhi_chan->dir = ch_cfg->dir; 1411 mutex_init(&mhi_chan->lock); 1412 } 1413 1414 return 0; 1415 1416 error_chan_cfg: 1417 kfree(mhi_cntrl->mhi_chan); 1418 1419 return ret; 1420 } 1421 1422 /* 1423 * Allocate channel and command rings here. Event rings will be allocated 1424 * in mhi_ep_power_up() as the config comes from the host. 1425 */ 1426 int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, 1427 const struct mhi_ep_cntrl_config *config) 1428 { 1429 struct mhi_ep_device *mhi_dev; 1430 int ret; 1431 1432 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) 1433 return -EINVAL; 1434 1435 ret = mhi_ep_chan_init(mhi_cntrl, config); 1436 if (ret) 1437 return ret; 1438 1439 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); 1440 if (!mhi_cntrl->mhi_cmd) { 1441 ret = -ENOMEM; 1442 goto err_free_ch; 1443 } 1444 1445 mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", 1446 sizeof(struct mhi_ring_element), 0, 1447 SLAB_CACHE_DMA, NULL); 1448 if (!mhi_cntrl->ev_ring_el_cache) { 1449 ret = -ENOMEM; 1450 goto err_free_cmd; 1451 } 1452 1453 mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, 1454 SLAB_CACHE_DMA, NULL); 1455 if (!mhi_cntrl->tre_buf_cache) { 1456 ret = -ENOMEM; 1457 goto err_destroy_ev_ring_el_cache; 1458 } 1459 1460 mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item", 1461 sizeof(struct mhi_ep_ring_item), 0, 1462 0, NULL); 1463 if (!mhi_cntrl->ring_item_cache) { 1464 ret = -ENOMEM; 1465 goto err_destroy_tre_buf_cache; 1466 } 1467 1468 INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); 1469 INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); 1470 INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); 1471 INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); 1472 1473 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); 1474 if (!mhi_cntrl->wq) { 1475 ret = -ENOMEM; 1476 goto err_destroy_ring_item_cache; 1477 } 1478 1479 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); 1480 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); 1481 spin_lock_init(&mhi_cntrl->list_lock); 1482 mutex_init(&mhi_cntrl->state_lock); 1483 mutex_init(&mhi_cntrl->event_lock); 1484 1485 /* Set MHI version and AMSS EE before enumeration */ 1486 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); 1487 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); 1488 1489 /* Set controller index */ 1490 ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); 1491 if (ret < 0) 1492 goto err_destroy_wq; 1493 1494 mhi_cntrl->index = ret; 1495 1496 irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); 1497 ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, 1498 "doorbell_irq", mhi_cntrl); 1499 if (ret) { 1500 dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); 1501 goto err_ida_free; 1502 } 1503 1504 /* Allocate the controller device */ 1505 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); 1506 if (IS_ERR(mhi_dev)) { 1507 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); 1508 ret = PTR_ERR(mhi_dev); 1509 goto err_free_irq; 1510 } 1511 1512 ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); 1513 if (ret) 1514 goto err_put_dev; 1515 1516 mhi_dev->name = dev_name(&mhi_dev->dev); 1517 mhi_cntrl->mhi_dev = mhi_dev; 1518 1519 ret = device_add(&mhi_dev->dev); 1520 if (ret) 1521 goto err_put_dev; 1522 1523 dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); 1524 1525 return 0; 1526 1527 err_put_dev: 1528 put_device(&mhi_dev->dev); 1529 err_free_irq: 1530 free_irq(mhi_cntrl->irq, mhi_cntrl); 1531 err_ida_free: 1532 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); 1533 err_destroy_wq: 1534 destroy_workqueue(mhi_cntrl->wq); 1535 err_destroy_ring_item_cache: 1536 kmem_cache_destroy(mhi_cntrl->ring_item_cache); 1537 err_destroy_ev_ring_el_cache: 1538 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); 1539 err_destroy_tre_buf_cache: 1540 kmem_cache_destroy(mhi_cntrl->tre_buf_cache); 1541 err_free_cmd: 1542 kfree(mhi_cntrl->mhi_cmd); 1543 err_free_ch: 1544 kfree(mhi_cntrl->mhi_chan); 1545 1546 return ret; 1547 } 1548 EXPORT_SYMBOL_GPL(mhi_ep_register_controller); 1549 1550 /* 1551 * It is expected that the controller drivers will power down the MHI EP stack 1552 * using "mhi_ep_power_down()" before calling this function to unregister themselves. 1553 */ 1554 void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) 1555 { 1556 struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; 1557 1558 destroy_workqueue(mhi_cntrl->wq); 1559 1560 free_irq(mhi_cntrl->irq, mhi_cntrl); 1561 1562 kmem_cache_destroy(mhi_cntrl->tre_buf_cache); 1563 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); 1564 kmem_cache_destroy(mhi_cntrl->ring_item_cache); 1565 kfree(mhi_cntrl->mhi_cmd); 1566 kfree(mhi_cntrl->mhi_chan); 1567 1568 device_del(&mhi_dev->dev); 1569 put_device(&mhi_dev->dev); 1570 1571 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); 1572 } 1573 EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); 1574 1575 static int mhi_ep_driver_probe(struct device *dev) 1576 { 1577 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1578 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); 1579 struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; 1580 struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; 1581 1582 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; 1583 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; 1584 1585 return mhi_drv->probe(mhi_dev, mhi_dev->id); 1586 } 1587 1588 static int mhi_ep_driver_remove(struct device *dev) 1589 { 1590 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1591 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); 1592 struct mhi_result result = {}; 1593 struct mhi_ep_chan *mhi_chan; 1594 int dir; 1595 1596 /* Skip if it is a controller device */ 1597 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1598 return 0; 1599 1600 /* Disconnect the channels associated with the driver */ 1601 for (dir = 0; dir < 2; dir++) { 1602 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; 1603 1604 if (!mhi_chan) 1605 continue; 1606 1607 mutex_lock(&mhi_chan->lock); 1608 /* Send channel disconnect status to the client driver */ 1609 if (mhi_chan->xfer_cb) { 1610 result.transaction_status = -ENOTCONN; 1611 result.bytes_xferd = 0; 1612 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 1613 } 1614 1615 mhi_chan->state = MHI_CH_STATE_DISABLED; 1616 mhi_chan->xfer_cb = NULL; 1617 mutex_unlock(&mhi_chan->lock); 1618 } 1619 1620 /* Remove the client driver now */ 1621 mhi_drv->remove(mhi_dev); 1622 1623 return 0; 1624 } 1625 1626 int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) 1627 { 1628 struct device_driver *driver = &mhi_drv->driver; 1629 1630 if (!mhi_drv->probe || !mhi_drv->remove) 1631 return -EINVAL; 1632 1633 /* Client drivers should have callbacks defined for both channels */ 1634 if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) 1635 return -EINVAL; 1636 1637 driver->bus = &mhi_ep_bus_type; 1638 driver->owner = owner; 1639 driver->probe = mhi_ep_driver_probe; 1640 driver->remove = mhi_ep_driver_remove; 1641 1642 return driver_register(driver); 1643 } 1644 EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); 1645 1646 void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) 1647 { 1648 driver_unregister(&mhi_drv->driver); 1649 } 1650 EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); 1651 1652 static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env) 1653 { 1654 const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1655 1656 return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, 1657 mhi_dev->name); 1658 } 1659 1660 static int mhi_ep_match(struct device *dev, struct device_driver *drv) 1661 { 1662 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1663 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); 1664 const struct mhi_device_id *id; 1665 1666 /* 1667 * If the device is a controller type then there is no client driver 1668 * associated with it 1669 */ 1670 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1671 return 0; 1672 1673 for (id = mhi_drv->id_table; id->chan[0]; id++) 1674 if (!strcmp(mhi_dev->name, id->chan)) { 1675 mhi_dev->id = id; 1676 return 1; 1677 } 1678 1679 return 0; 1680 }; 1681 1682 struct bus_type mhi_ep_bus_type = { 1683 .name = "mhi_ep", 1684 .dev_name = "mhi_ep", 1685 .match = mhi_ep_match, 1686 .uevent = mhi_ep_uevent, 1687 }; 1688 1689 static int __init mhi_ep_init(void) 1690 { 1691 return bus_register(&mhi_ep_bus_type); 1692 } 1693 1694 static void __exit mhi_ep_exit(void) 1695 { 1696 bus_unregister(&mhi_ep_bus_type); 1697 } 1698 1699 postcore_initcall(mhi_ep_init); 1700 module_exit(mhi_ep_exit); 1701 1702 MODULE_LICENSE("GPL v2"); 1703 MODULE_DESCRIPTION("MHI Bus Endpoint stack"); 1704 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); 1705