1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. 4 * 5 */ 6 7 #include <linux/delay.h> 8 #include <linux/device.h> 9 #include <linux/dma-direction.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/firmware.h> 12 #include <linux/interrupt.h> 13 #include <linux/list.h> 14 #include <linux/mhi.h> 15 #include <linux/module.h> 16 #include <linux/random.h> 17 #include <linux/slab.h> 18 #include <linux/wait.h> 19 #include "internal.h" 20 21 /* Setup RDDM vector table for RDDM transfer and program RXVEC */ 22 void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, 23 struct image_info *img_info) 24 { 25 struct mhi_buf *mhi_buf = img_info->mhi_buf; 26 struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; 27 void __iomem *base = mhi_cntrl->bhie; 28 struct device *dev = &mhi_cntrl->mhi_dev->dev; 29 u32 sequence_id; 30 unsigned int i; 31 32 for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { 33 bhi_vec->dma_addr = mhi_buf->dma_addr; 34 bhi_vec->size = mhi_buf->len; 35 } 36 37 dev_dbg(dev, "BHIe programming for RDDM\n"); 38 39 mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, 40 upper_32_bits(mhi_buf->dma_addr)); 41 42 mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, 43 lower_32_bits(mhi_buf->dma_addr)); 44 45 mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); 46 sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); 47 48 mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, 49 BHIE_RXVECDB_SEQNUM_BMSK, sequence_id); 50 51 dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n", 52 &mhi_buf->dma_addr, mhi_buf->len, sequence_id); 53 } 54 55 /* Collect RDDM buffer during kernel panic */ 56 static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) 57 { 58 int ret; 59 u32 rx_status; 60 enum mhi_ee_type ee; 61 const u32 delayus = 2000; 62 u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; 63 const u32 rddm_timeout_us = 200000; 64 int rddm_retry = rddm_timeout_us / delayus; 65 void __iomem *base = mhi_cntrl->bhie; 66 struct device *dev = &mhi_cntrl->mhi_dev->dev; 67 68 dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n", 69 to_mhi_pm_state_str(mhi_cntrl->pm_state), 70 mhi_state_str(mhi_cntrl->dev_state), 71 TO_MHI_EXEC_STR(mhi_cntrl->ee)); 72 73 /* 74 * This should only be executing during a kernel panic, we expect all 75 * other cores to shutdown while we're collecting RDDM buffer. After 76 * returning from this function, we expect the device to reset. 77 * 78 * Normaly, we read/write pm_state only after grabbing the 79 * pm_lock, since we're in a panic, skipping it. Also there is no 80 * gurantee that this state change would take effect since 81 * we're setting it w/o grabbing pm_lock 82 */ 83 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; 84 /* update should take the effect immediately */ 85 smp_wmb(); 86 87 /* 88 * Make sure device is not already in RDDM. In case the device asserts 89 * and a kernel panic follows, device will already be in RDDM. 90 * Do not trigger SYS ERR again and proceed with waiting for 91 * image download completion. 92 */ 93 ee = mhi_get_exec_env(mhi_cntrl); 94 if (ee == MHI_EE_MAX) 95 goto error_exit_rddm; 96 97 if (ee != MHI_EE_RDDM) { 98 dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n"); 99 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); 100 101 dev_dbg(dev, "Waiting for device to enter RDDM\n"); 102 while (rddm_retry--) { 103 ee = mhi_get_exec_env(mhi_cntrl); 104 if (ee == MHI_EE_RDDM) 105 break; 106 107 udelay(delayus); 108 } 109 110 if (rddm_retry <= 0) { 111 /* Hardware reset so force device to enter RDDM */ 112 dev_dbg(dev, 113 "Did not enter RDDM, do a host req reset\n"); 114 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, 115 MHI_SOC_RESET_REQ_OFFSET, 116 MHI_SOC_RESET_REQ); 117 udelay(delayus); 118 } 119 120 ee = mhi_get_exec_env(mhi_cntrl); 121 } 122 123 dev_dbg(dev, 124 "Waiting for RDDM image download via BHIe, current EE:%s\n", 125 TO_MHI_EXEC_STR(ee)); 126 127 while (retry--) { 128 ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, 129 BHIE_RXVECSTATUS_STATUS_BMSK, &rx_status); 130 if (ret) 131 return -EIO; 132 133 if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) 134 return 0; 135 136 udelay(delayus); 137 } 138 139 ee = mhi_get_exec_env(mhi_cntrl); 140 ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); 141 142 dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status); 143 144 error_exit_rddm: 145 dev_err(dev, "RDDM transfer failed. Current EE: %s\n", 146 TO_MHI_EXEC_STR(ee)); 147 148 return -EIO; 149 } 150 151 /* Download RDDM image from device */ 152 int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic) 153 { 154 void __iomem *base = mhi_cntrl->bhie; 155 struct device *dev = &mhi_cntrl->mhi_dev->dev; 156 u32 rx_status; 157 158 if (in_panic) 159 return __mhi_download_rddm_in_panic(mhi_cntrl); 160 161 dev_dbg(dev, "Waiting for RDDM image download via BHIe\n"); 162 163 /* Wait for the image download to complete */ 164 wait_event_timeout(mhi_cntrl->state_event, 165 mhi_read_reg_field(mhi_cntrl, base, 166 BHIE_RXVECSTATUS_OFFS, 167 BHIE_RXVECSTATUS_STATUS_BMSK, 168 &rx_status) || rx_status, 169 msecs_to_jiffies(mhi_cntrl->timeout_ms)); 170 171 return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; 172 } 173 EXPORT_SYMBOL_GPL(mhi_download_rddm_image); 174 175 static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, 176 const struct mhi_buf *mhi_buf) 177 { 178 void __iomem *base = mhi_cntrl->bhie; 179 struct device *dev = &mhi_cntrl->mhi_dev->dev; 180 rwlock_t *pm_lock = &mhi_cntrl->pm_lock; 181 u32 tx_status, sequence_id; 182 int ret; 183 184 read_lock_bh(pm_lock); 185 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { 186 read_unlock_bh(pm_lock); 187 return -EIO; 188 } 189 190 sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK); 191 dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n", 192 sequence_id); 193 mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, 194 upper_32_bits(mhi_buf->dma_addr)); 195 196 mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, 197 lower_32_bits(mhi_buf->dma_addr)); 198 199 mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); 200 201 mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, 202 BHIE_TXVECDB_SEQNUM_BMSK, sequence_id); 203 read_unlock_bh(pm_lock); 204 205 /* Wait for the image download to complete */ 206 ret = wait_event_timeout(mhi_cntrl->state_event, 207 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || 208 mhi_read_reg_field(mhi_cntrl, base, 209 BHIE_TXVECSTATUS_OFFS, 210 BHIE_TXVECSTATUS_STATUS_BMSK, 211 &tx_status) || tx_status, 212 msecs_to_jiffies(mhi_cntrl->timeout_ms)); 213 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || 214 tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL) 215 return -EIO; 216 217 return (!ret) ? -ETIMEDOUT : 0; 218 } 219 220 static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, 221 dma_addr_t dma_addr, 222 size_t size) 223 { 224 u32 tx_status, val, session_id; 225 int i, ret; 226 void __iomem *base = mhi_cntrl->bhi; 227 rwlock_t *pm_lock = &mhi_cntrl->pm_lock; 228 struct device *dev = &mhi_cntrl->mhi_dev->dev; 229 struct { 230 char *name; 231 u32 offset; 232 } error_reg[] = { 233 { "ERROR_CODE", BHI_ERRCODE }, 234 { "ERROR_DBG1", BHI_ERRDBG1 }, 235 { "ERROR_DBG2", BHI_ERRDBG2 }, 236 { "ERROR_DBG3", BHI_ERRDBG3 }, 237 { NULL }, 238 }; 239 240 read_lock_bh(pm_lock); 241 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { 242 read_unlock_bh(pm_lock); 243 goto invalid_pm_state; 244 } 245 246 session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK); 247 dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n", 248 session_id); 249 mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); 250 mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, 251 upper_32_bits(dma_addr)); 252 mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, 253 lower_32_bits(dma_addr)); 254 mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); 255 mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id); 256 read_unlock_bh(pm_lock); 257 258 /* Wait for the image download to complete */ 259 ret = wait_event_timeout(mhi_cntrl->state_event, 260 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || 261 mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, 262 BHI_STATUS_MASK, &tx_status) || tx_status, 263 msecs_to_jiffies(mhi_cntrl->timeout_ms)); 264 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) 265 goto invalid_pm_state; 266 267 if (tx_status == BHI_STATUS_ERROR) { 268 dev_err(dev, "Image transfer failed\n"); 269 read_lock_bh(pm_lock); 270 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { 271 for (i = 0; error_reg[i].name; i++) { 272 ret = mhi_read_reg(mhi_cntrl, base, 273 error_reg[i].offset, &val); 274 if (ret) 275 break; 276 dev_err(dev, "Reg: %s value: 0x%x\n", 277 error_reg[i].name, val); 278 } 279 } 280 read_unlock_bh(pm_lock); 281 goto invalid_pm_state; 282 } 283 284 return (!ret) ? -ETIMEDOUT : 0; 285 286 invalid_pm_state: 287 288 return -EIO; 289 } 290 291 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, 292 struct image_info *image_info) 293 { 294 int i; 295 struct mhi_buf *mhi_buf = image_info->mhi_buf; 296 297 for (i = 0; i < image_info->entries; i++, mhi_buf++) 298 dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, 299 mhi_buf->buf, mhi_buf->dma_addr); 300 301 kfree(image_info->mhi_buf); 302 kfree(image_info); 303 } 304 305 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, 306 struct image_info **image_info, 307 size_t alloc_size) 308 { 309 size_t seg_size = mhi_cntrl->seg_len; 310 int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; 311 int i; 312 struct image_info *img_info; 313 struct mhi_buf *mhi_buf; 314 315 img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); 316 if (!img_info) 317 return -ENOMEM; 318 319 /* Allocate memory for entries */ 320 img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), 321 GFP_KERNEL); 322 if (!img_info->mhi_buf) 323 goto error_alloc_mhi_buf; 324 325 /* Allocate and populate vector table */ 326 mhi_buf = img_info->mhi_buf; 327 for (i = 0; i < segments; i++, mhi_buf++) { 328 size_t vec_size = seg_size; 329 330 /* Vector table is the last entry */ 331 if (i == segments - 1) 332 vec_size = sizeof(struct bhi_vec_entry) * i; 333 334 mhi_buf->len = vec_size; 335 mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, 336 vec_size, &mhi_buf->dma_addr, 337 GFP_KERNEL); 338 if (!mhi_buf->buf) 339 goto error_alloc_segment; 340 } 341 342 img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; 343 img_info->entries = segments; 344 *image_info = img_info; 345 346 return 0; 347 348 error_alloc_segment: 349 for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) 350 dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, 351 mhi_buf->buf, mhi_buf->dma_addr); 352 353 error_alloc_mhi_buf: 354 kfree(img_info); 355 356 return -ENOMEM; 357 } 358 359 static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, 360 const struct firmware *firmware, 361 struct image_info *img_info) 362 { 363 size_t remainder = firmware->size; 364 size_t to_cpy; 365 const u8 *buf = firmware->data; 366 struct mhi_buf *mhi_buf = img_info->mhi_buf; 367 struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; 368 369 while (remainder) { 370 to_cpy = min(remainder, mhi_buf->len); 371 memcpy(mhi_buf->buf, buf, to_cpy); 372 bhi_vec->dma_addr = mhi_buf->dma_addr; 373 bhi_vec->size = to_cpy; 374 375 buf += to_cpy; 376 remainder -= to_cpy; 377 bhi_vec++; 378 mhi_buf++; 379 } 380 } 381 382 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) 383 { 384 const struct firmware *firmware = NULL; 385 struct device *dev = &mhi_cntrl->mhi_dev->dev; 386 const char *fw_name; 387 void *buf; 388 dma_addr_t dma_addr; 389 size_t size; 390 int i, ret; 391 392 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { 393 dev_err(dev, "Device MHI is not in valid state\n"); 394 return; 395 } 396 397 /* save hardware info from BHI */ 398 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU, 399 &mhi_cntrl->serial_number); 400 if (ret) 401 dev_err(dev, "Could not capture serial number via BHI\n"); 402 403 for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) { 404 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), 405 &mhi_cntrl->oem_pk_hash[i]); 406 if (ret) { 407 dev_err(dev, "Could not capture OEM PK HASH via BHI\n"); 408 break; 409 } 410 } 411 412 /* wait for ready on pass through or any other execution environment */ 413 if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee)) 414 goto fw_load_ready_state; 415 416 fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? 417 mhi_cntrl->edl_image : mhi_cntrl->fw_image; 418 419 if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || 420 !mhi_cntrl->seg_len))) { 421 dev_err(dev, 422 "No firmware image defined or !sbl_size || !seg_len\n"); 423 goto error_fw_load; 424 } 425 426 ret = request_firmware(&firmware, fw_name, dev); 427 if (ret) { 428 dev_err(dev, "Error loading firmware: %d\n", ret); 429 goto error_fw_load; 430 } 431 432 size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; 433 434 /* SBL size provided is maximum size, not necessarily the image size */ 435 if (size > firmware->size) 436 size = firmware->size; 437 438 buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr, 439 GFP_KERNEL); 440 if (!buf) { 441 release_firmware(firmware); 442 goto error_fw_load; 443 } 444 445 /* Download image using BHI */ 446 memcpy(buf, firmware->data, size); 447 ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); 448 dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr); 449 450 /* Error or in EDL mode, we're done */ 451 if (ret) { 452 dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret); 453 release_firmware(firmware); 454 goto error_fw_load; 455 } 456 457 /* Wait for ready since EDL image was loaded */ 458 if (fw_name == mhi_cntrl->edl_image) { 459 release_firmware(firmware); 460 goto fw_load_ready_state; 461 } 462 463 write_lock_irq(&mhi_cntrl->pm_lock); 464 mhi_cntrl->dev_state = MHI_STATE_RESET; 465 write_unlock_irq(&mhi_cntrl->pm_lock); 466 467 /* 468 * If we're doing fbc, populate vector tables while 469 * device transitioning into MHI READY state 470 */ 471 if (mhi_cntrl->fbc_download) { 472 ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, 473 firmware->size); 474 if (ret) { 475 release_firmware(firmware); 476 goto error_fw_load; 477 } 478 479 /* Load the firmware into BHIE vec table */ 480 mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); 481 } 482 483 release_firmware(firmware); 484 485 fw_load_ready_state: 486 /* Transitioning into MHI RESET->READY state */ 487 ret = mhi_ready_state_transition(mhi_cntrl); 488 if (ret) { 489 dev_err(dev, "MHI did not enter READY state\n"); 490 goto error_ready_state; 491 } 492 493 dev_info(dev, "Wait for device to enter SBL or Mission mode\n"); 494 return; 495 496 error_ready_state: 497 if (mhi_cntrl->fbc_download) { 498 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); 499 mhi_cntrl->fbc_image = NULL; 500 } 501 502 error_fw_load: 503 mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; 504 wake_up_all(&mhi_cntrl->state_event); 505 } 506 507 int mhi_download_amss_image(struct mhi_controller *mhi_cntrl) 508 { 509 struct image_info *image_info = mhi_cntrl->fbc_image; 510 struct device *dev = &mhi_cntrl->mhi_dev->dev; 511 int ret; 512 513 if (!image_info) 514 return -EIO; 515 516 ret = mhi_fw_load_bhie(mhi_cntrl, 517 /* Vector table is the last entry */ 518 &image_info->mhi_buf[image_info->entries - 1]); 519 if (ret) { 520 dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret); 521 mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; 522 wake_up_all(&mhi_cntrl->state_event); 523 } 524 525 return ret; 526 } 527