1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Universal Flash Storage Host controller driver Core 4 * Copyright (C) 2011-2013 Samsung India Software Operations 5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 6 * 7 * Authors: 8 * Santosh Yaraganavi <santosh.sy@samsung.com> 9 * Vinayak Holikatti <h.vinayak@samsung.com> 10 */ 11 12 #include <linux/async.h> 13 #include <linux/devfreq.h> 14 #include <linux/nls.h> 15 #include <linux/of.h> 16 #include <linux/bitfield.h> 17 #include <linux/blk-pm.h> 18 #include <linux/blkdev.h> 19 #include <linux/clk.h> 20 #include <linux/delay.h> 21 #include <linux/interrupt.h> 22 #include <linux/module.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/sched/clock.h> 25 #include <scsi/scsi_cmnd.h> 26 #include <scsi/scsi_dbg.h> 27 #include <scsi/scsi_driver.h> 28 #include <scsi/scsi_eh.h> 29 #include "ufshcd-priv.h" 30 #include <ufs/ufs_quirks.h> 31 #include <ufs/unipro.h> 32 #include "ufs-sysfs.h" 33 #include "ufs-debugfs.h" 34 #include "ufs-fault-injection.h" 35 #include "ufs_bsg.h" 36 #include "ufshcd-crypto.h" 37 #include "ufshpb.h" 38 #include <asm/unaligned.h> 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/ufs.h> 42 43 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 44 UTP_TASK_REQ_COMPL |\ 45 UFSHCD_ERROR_MASK) 46 47 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\ 48 UFSHCD_ERROR_MASK |\ 49 MCQ_CQ_EVENT_STATUS) 50 51 52 /* UIC command timeout, unit: ms */ 53 #define UIC_CMD_TIMEOUT 500 54 55 /* NOP OUT retries waiting for NOP IN response */ 56 #define NOP_OUT_RETRIES 10 57 /* Timeout after 50 msecs if NOP OUT hangs without response */ 58 #define NOP_OUT_TIMEOUT 50 /* msecs */ 59 60 /* Query request retries */ 61 #define QUERY_REQ_RETRIES 3 62 /* Query request timeout */ 63 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ 64 65 /* Advanced RPMB request timeout */ 66 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */ 67 68 /* Task management command timeout */ 69 #define TM_CMD_TIMEOUT 100 /* msecs */ 70 71 /* maximum number of retries for a general UIC command */ 72 #define UFS_UIC_COMMAND_RETRIES 3 73 74 /* maximum number of link-startup retries */ 75 #define DME_LINKSTARTUP_RETRIES 3 76 77 /* maximum number of reset retries before giving up */ 78 #define MAX_HOST_RESET_RETRIES 5 79 80 /* Maximum number of error handler retries before giving up */ 81 #define MAX_ERR_HANDLER_RETRIES 5 82 83 /* Expose the flag value from utp_upiu_query.value */ 84 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF 85 86 /* Interrupt aggregation default timeout, unit: 40us */ 87 #define INT_AGGR_DEF_TO 0x02 88 89 /* default delay of autosuspend: 2000 ms */ 90 #define RPM_AUTOSUSPEND_DELAY_MS 2000 91 92 /* Default delay of RPM device flush delayed work */ 93 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000 94 95 /* Default value of wait time before gating device ref clock */ 96 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */ 97 98 /* Polling time to wait for fDeviceInit */ 99 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */ 100 101 /* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */ 102 static bool use_mcq_mode = true; 103 104 static bool is_mcq_supported(struct ufs_hba *hba) 105 { 106 return hba->mcq_sup && use_mcq_mode; 107 } 108 109 static int param_set_mcq_mode(const char *val, const struct kernel_param *kp) 110 { 111 int ret; 112 113 ret = param_set_bool(val, kp); 114 if (ret) 115 return ret; 116 117 return 0; 118 } 119 120 static const struct kernel_param_ops mcq_mode_ops = { 121 .set = param_set_mcq_mode, 122 .get = param_get_bool, 123 }; 124 125 module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644); 126 MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default"); 127 128 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ 129 ({ \ 130 int _ret; \ 131 if (_on) \ 132 _ret = ufshcd_enable_vreg(_dev, _vreg); \ 133 else \ 134 _ret = ufshcd_disable_vreg(_dev, _vreg); \ 135 _ret; \ 136 }) 137 138 #define ufshcd_hex_dump(prefix_str, buf, len) do { \ 139 size_t __len = (len); \ 140 print_hex_dump(KERN_ERR, prefix_str, \ 141 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\ 142 16, 4, buf, __len, false); \ 143 } while (0) 144 145 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, 146 const char *prefix) 147 { 148 u32 *regs; 149 size_t pos; 150 151 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ 152 return -EINVAL; 153 154 regs = kzalloc(len, GFP_ATOMIC); 155 if (!regs) 156 return -ENOMEM; 157 158 for (pos = 0; pos < len; pos += 4) { 159 if (offset == 0 && 160 pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER && 161 pos <= REG_UIC_ERROR_CODE_DME) 162 continue; 163 regs[pos / 4] = ufshcd_readl(hba, offset + pos); 164 } 165 166 ufshcd_hex_dump(prefix, regs, len); 167 kfree(regs); 168 169 return 0; 170 } 171 EXPORT_SYMBOL_GPL(ufshcd_dump_regs); 172 173 enum { 174 UFSHCD_MAX_CHANNEL = 0, 175 UFSHCD_MAX_ID = 1, 176 UFSHCD_NUM_RESERVED = 1, 177 UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED, 178 UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED, 179 }; 180 181 static const char *const ufshcd_state_name[] = { 182 [UFSHCD_STATE_RESET] = "reset", 183 [UFSHCD_STATE_OPERATIONAL] = "operational", 184 [UFSHCD_STATE_ERROR] = "error", 185 [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal", 186 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal", 187 }; 188 189 /* UFSHCD error handling flags */ 190 enum { 191 UFSHCD_EH_IN_PROGRESS = (1 << 0), 192 }; 193 194 /* UFSHCD UIC layer error flags */ 195 enum { 196 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ 197 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */ 198 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */ 199 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */ 200 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */ 201 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */ 202 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */ 203 }; 204 205 #define ufshcd_set_eh_in_progress(h) \ 206 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS) 207 #define ufshcd_eh_in_progress(h) \ 208 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS) 209 #define ufshcd_clear_eh_in_progress(h) \ 210 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) 211 212 const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { 213 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, 214 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, 215 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, 216 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, 217 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, 218 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, 219 /* 220 * For DeepSleep, the link is first put in hibern8 and then off. 221 * Leaving the link in hibern8 is not supported. 222 */ 223 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE}, 224 }; 225 226 static inline enum ufs_dev_pwr_mode 227 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) 228 { 229 return ufs_pm_lvl_states[lvl].dev_state; 230 } 231 232 static inline enum uic_link_state 233 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) 234 { 235 return ufs_pm_lvl_states[lvl].link_state; 236 } 237 238 static inline enum ufs_pm_level 239 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, 240 enum uic_link_state link_state) 241 { 242 enum ufs_pm_level lvl; 243 244 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) { 245 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) && 246 (ufs_pm_lvl_states[lvl].link_state == link_state)) 247 return lvl; 248 } 249 250 /* if no match found, return the level 0 */ 251 return UFS_PM_LVL_0; 252 } 253 254 static const struct ufs_dev_quirk ufs_fixups[] = { 255 /* UFS cards deviations table */ 256 { .wmanufacturerid = UFS_VENDOR_MICRON, 257 .model = UFS_ANY_MODEL, 258 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | 259 UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ }, 260 { .wmanufacturerid = UFS_VENDOR_SAMSUNG, 261 .model = UFS_ANY_MODEL, 262 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | 263 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | 264 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS }, 265 { .wmanufacturerid = UFS_VENDOR_SKHYNIX, 266 .model = UFS_ANY_MODEL, 267 .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME }, 268 { .wmanufacturerid = UFS_VENDOR_SKHYNIX, 269 .model = "hB8aL1" /*H28U62301AMR*/, 270 .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME }, 271 { .wmanufacturerid = UFS_VENDOR_TOSHIBA, 272 .model = UFS_ANY_MODEL, 273 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, 274 { .wmanufacturerid = UFS_VENDOR_TOSHIBA, 275 .model = "THGLF2G9C8KBADG", 276 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, 277 { .wmanufacturerid = UFS_VENDOR_TOSHIBA, 278 .model = "THGLF2G9D8KBADG", 279 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, 280 {} 281 }; 282 283 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba); 284 static void ufshcd_async_scan(void *data, async_cookie_t cookie); 285 static int ufshcd_reset_and_restore(struct ufs_hba *hba); 286 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); 287 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); 288 static void ufshcd_hba_exit(struct ufs_hba *hba); 289 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params); 290 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); 291 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); 292 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); 293 static void ufshcd_resume_clkscaling(struct ufs_hba *hba); 294 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); 295 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba); 296 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); 297 static irqreturn_t ufshcd_intr(int irq, void *__hba); 298 static int ufshcd_change_power_mode(struct ufs_hba *hba, 299 struct ufs_pa_layer_attr *pwr_mode); 300 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); 301 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); 302 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 303 struct ufs_vreg *vreg); 304 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag); 305 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba, 306 bool enable); 307 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba); 308 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba); 309 310 static inline void ufshcd_enable_irq(struct ufs_hba *hba) 311 { 312 if (!hba->is_irq_enabled) { 313 enable_irq(hba->irq); 314 hba->is_irq_enabled = true; 315 } 316 } 317 318 static inline void ufshcd_disable_irq(struct ufs_hba *hba) 319 { 320 if (hba->is_irq_enabled) { 321 disable_irq(hba->irq); 322 hba->is_irq_enabled = false; 323 } 324 } 325 326 static void ufshcd_configure_wb(struct ufs_hba *hba) 327 { 328 if (!ufshcd_is_wb_allowed(hba)) 329 return; 330 331 ufshcd_wb_toggle(hba, true); 332 333 ufshcd_wb_toggle_buf_flush_during_h8(hba, true); 334 335 if (ufshcd_is_wb_buf_flush_allowed(hba)) 336 ufshcd_wb_toggle_buf_flush(hba, true); 337 } 338 339 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) 340 { 341 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) 342 scsi_unblock_requests(hba->host); 343 } 344 345 static void ufshcd_scsi_block_requests(struct ufs_hba *hba) 346 { 347 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) 348 scsi_block_requests(hba->host); 349 } 350 351 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, 352 enum ufs_trace_str_t str_t) 353 { 354 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; 355 struct utp_upiu_header *header; 356 357 if (!trace_ufshcd_upiu_enabled()) 358 return; 359 360 if (str_t == UFS_CMD_SEND) 361 header = &rq->header; 362 else 363 header = &hba->lrb[tag].ucd_rsp_ptr->header; 364 365 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb, 366 UFS_TSF_CDB); 367 } 368 369 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, 370 enum ufs_trace_str_t str_t, 371 struct utp_upiu_req *rq_rsp) 372 { 373 if (!trace_ufshcd_upiu_enabled()) 374 return; 375 376 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header, 377 &rq_rsp->qr, UFS_TSF_OSF); 378 } 379 380 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, 381 enum ufs_trace_str_t str_t) 382 { 383 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag]; 384 385 if (!trace_ufshcd_upiu_enabled()) 386 return; 387 388 if (str_t == UFS_TM_SEND) 389 trace_ufshcd_upiu(dev_name(hba->dev), str_t, 390 &descp->upiu_req.req_header, 391 &descp->upiu_req.input_param1, 392 UFS_TSF_TM_INPUT); 393 else 394 trace_ufshcd_upiu(dev_name(hba->dev), str_t, 395 &descp->upiu_rsp.rsp_header, 396 &descp->upiu_rsp.output_param1, 397 UFS_TSF_TM_OUTPUT); 398 } 399 400 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, 401 const struct uic_command *ucmd, 402 enum ufs_trace_str_t str_t) 403 { 404 u32 cmd; 405 406 if (!trace_ufshcd_uic_command_enabled()) 407 return; 408 409 if (str_t == UFS_CMD_SEND) 410 cmd = ucmd->command; 411 else 412 cmd = ufshcd_readl(hba, REG_UIC_COMMAND); 413 414 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd, 415 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), 416 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), 417 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); 418 } 419 420 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, 421 enum ufs_trace_str_t str_t) 422 { 423 u64 lba = 0; 424 u8 opcode = 0, group_id = 0; 425 u32 doorbell = 0; 426 u32 intr; 427 int hwq_id = -1; 428 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; 429 struct scsi_cmnd *cmd = lrbp->cmd; 430 struct request *rq = scsi_cmd_to_rq(cmd); 431 int transfer_len = -1; 432 433 if (!cmd) 434 return; 435 436 /* trace UPIU also */ 437 ufshcd_add_cmd_upiu_trace(hba, tag, str_t); 438 if (!trace_ufshcd_command_enabled()) 439 return; 440 441 opcode = cmd->cmnd[0]; 442 443 if (opcode == READ_10 || opcode == WRITE_10) { 444 /* 445 * Currently we only fully trace read(10) and write(10) commands 446 */ 447 transfer_len = 448 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len); 449 lba = scsi_get_lba(cmd); 450 if (opcode == WRITE_10) 451 group_id = lrbp->cmd->cmnd[6]; 452 } else if (opcode == UNMAP) { 453 /* 454 * The number of Bytes to be unmapped beginning with the lba. 455 */ 456 transfer_len = blk_rq_bytes(rq); 457 lba = scsi_get_lba(cmd); 458 } 459 460 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 461 462 if (is_mcq_enabled(hba)) { 463 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); 464 465 hwq_id = hwq->id; 466 } else { 467 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 468 } 469 trace_ufshcd_command(dev_name(hba->dev), str_t, tag, 470 doorbell, hwq_id, transfer_len, intr, lba, opcode, group_id); 471 } 472 473 static void ufshcd_print_clk_freqs(struct ufs_hba *hba) 474 { 475 struct ufs_clk_info *clki; 476 struct list_head *head = &hba->clk_list_head; 477 478 if (list_empty(head)) 479 return; 480 481 list_for_each_entry(clki, head, list) { 482 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq && 483 clki->max_freq) 484 dev_err(hba->dev, "clk: %s, rate: %u\n", 485 clki->name, clki->curr_freq); 486 } 487 } 488 489 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id, 490 const char *err_name) 491 { 492 int i; 493 bool found = false; 494 const struct ufs_event_hist *e; 495 496 if (id >= UFS_EVT_CNT) 497 return; 498 499 e = &hba->ufs_stats.event[id]; 500 501 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) { 502 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH; 503 504 if (e->tstamp[p] == 0) 505 continue; 506 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, 507 e->val[p], div_u64(e->tstamp[p], 1000)); 508 found = true; 509 } 510 511 if (!found) 512 dev_err(hba->dev, "No record of %s\n", err_name); 513 else 514 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt); 515 } 516 517 static void ufshcd_print_evt_hist(struct ufs_hba *hba) 518 { 519 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); 520 521 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err"); 522 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err"); 523 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err"); 524 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err"); 525 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err"); 526 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR, 527 "auto_hibern8_err"); 528 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err"); 529 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL, 530 "link_startup_fail"); 531 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail"); 532 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR, 533 "suspend_fail"); 534 ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail"); 535 ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR, 536 "wlun suspend_fail"); 537 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset"); 538 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset"); 539 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort"); 540 541 ufshcd_vops_dbg_register_dump(hba); 542 } 543 544 static 545 void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt) 546 { 547 const struct ufshcd_lrb *lrbp; 548 int prdt_length; 549 550 lrbp = &hba->lrb[tag]; 551 552 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", 553 tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000)); 554 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", 555 tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000)); 556 dev_err(hba->dev, 557 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n", 558 tag, (u64)lrbp->utrd_dma_addr); 559 560 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr, 561 sizeof(struct utp_transfer_req_desc)); 562 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, 563 (u64)lrbp->ucd_req_dma_addr); 564 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr, 565 sizeof(struct utp_upiu_req)); 566 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, 567 (u64)lrbp->ucd_rsp_dma_addr); 568 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr, 569 sizeof(struct utp_upiu_rsp)); 570 571 prdt_length = le16_to_cpu( 572 lrbp->utr_descriptor_ptr->prd_table_length); 573 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) 574 prdt_length /= ufshcd_sg_entry_size(hba); 575 576 dev_err(hba->dev, 577 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", 578 tag, prdt_length, 579 (u64)lrbp->ucd_prdt_dma_addr); 580 581 if (pr_prdt) 582 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, 583 ufshcd_sg_entry_size(hba) * prdt_length); 584 } 585 586 static bool ufshcd_print_tr_iter(struct request *req, void *priv) 587 { 588 struct scsi_device *sdev = req->q->queuedata; 589 struct Scsi_Host *shost = sdev->host; 590 struct ufs_hba *hba = shost_priv(shost); 591 592 ufshcd_print_tr(hba, req->tag, *(bool *)priv); 593 594 return true; 595 } 596 597 /** 598 * ufshcd_print_trs_all - print trs for all started requests. 599 * @hba: per-adapter instance. 600 * @pr_prdt: need to print prdt or not. 601 */ 602 static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt) 603 { 604 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt); 605 } 606 607 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) 608 { 609 int tag; 610 611 for_each_set_bit(tag, &bitmap, hba->nutmrs) { 612 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; 613 614 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); 615 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp)); 616 } 617 } 618 619 static void ufshcd_print_host_state(struct ufs_hba *hba) 620 { 621 const struct scsi_device *sdev_ufs = hba->ufs_device_wlun; 622 623 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); 624 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", 625 hba->outstanding_reqs, hba->outstanding_tasks); 626 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", 627 hba->saved_err, hba->saved_uic_err); 628 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", 629 hba->curr_dev_pwr_mode, hba->uic_link_state); 630 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", 631 hba->pm_op_in_progress, hba->is_sys_suspended); 632 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", 633 hba->auto_bkops_enabled, hba->host->host_self_blocked); 634 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); 635 dev_err(hba->dev, 636 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n", 637 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000), 638 hba->ufs_stats.hibern8_exit_cnt); 639 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", 640 div_u64(hba->ufs_stats.last_intr_ts, 1000), 641 hba->ufs_stats.last_intr_status); 642 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", 643 hba->eh_flags, hba->req_abort_count); 644 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", 645 hba->ufs_version, hba->capabilities, hba->caps); 646 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, 647 hba->dev_quirks); 648 if (sdev_ufs) 649 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n", 650 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev); 651 652 ufshcd_print_clk_freqs(hba); 653 } 654 655 /** 656 * ufshcd_print_pwr_info - print power params as saved in hba 657 * power info 658 * @hba: per-adapter instance 659 */ 660 static void ufshcd_print_pwr_info(struct ufs_hba *hba) 661 { 662 static const char * const names[] = { 663 "INVALID MODE", 664 "FAST MODE", 665 "SLOW_MODE", 666 "INVALID MODE", 667 "FASTAUTO_MODE", 668 "SLOWAUTO_MODE", 669 "INVALID MODE", 670 }; 671 672 /* 673 * Using dev_dbg to avoid messages during runtime PM to avoid 674 * never-ending cycles of messages written back to storage by user space 675 * causing runtime resume, causing more messages and so on. 676 */ 677 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", 678 __func__, 679 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, 680 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, 681 names[hba->pwr_info.pwr_rx], 682 names[hba->pwr_info.pwr_tx], 683 hba->pwr_info.hs_rate); 684 } 685 686 static void ufshcd_device_reset(struct ufs_hba *hba) 687 { 688 int err; 689 690 err = ufshcd_vops_device_reset(hba); 691 692 if (!err) { 693 ufshcd_set_ufs_dev_active(hba); 694 if (ufshcd_is_wb_allowed(hba)) { 695 hba->dev_info.wb_enabled = false; 696 hba->dev_info.wb_buf_flush_enabled = false; 697 } 698 } 699 if (err != -EOPNOTSUPP) 700 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); 701 } 702 703 void ufshcd_delay_us(unsigned long us, unsigned long tolerance) 704 { 705 if (!us) 706 return; 707 708 if (us < 10) 709 udelay(us); 710 else 711 usleep_range(us, us + tolerance); 712 } 713 EXPORT_SYMBOL_GPL(ufshcd_delay_us); 714 715 /** 716 * ufshcd_wait_for_register - wait for register value to change 717 * @hba: per-adapter interface 718 * @reg: mmio register offset 719 * @mask: mask to apply to the read register value 720 * @val: value to wait for 721 * @interval_us: polling interval in microseconds 722 * @timeout_ms: timeout in milliseconds 723 * 724 * Return: 725 * -ETIMEDOUT on error, zero on success. 726 */ 727 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 728 u32 val, unsigned long interval_us, 729 unsigned long timeout_ms) 730 { 731 int err = 0; 732 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); 733 734 /* ignore bits that we don't intend to wait on */ 735 val = val & mask; 736 737 while ((ufshcd_readl(hba, reg) & mask) != val) { 738 usleep_range(interval_us, interval_us + 50); 739 if (time_after(jiffies, timeout)) { 740 if ((ufshcd_readl(hba, reg) & mask) != val) 741 err = -ETIMEDOUT; 742 break; 743 } 744 } 745 746 return err; 747 } 748 749 /** 750 * ufshcd_get_intr_mask - Get the interrupt bit mask 751 * @hba: Pointer to adapter instance 752 * 753 * Returns interrupt bit mask per version 754 */ 755 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) 756 { 757 if (hba->ufs_version == ufshci_version(1, 0)) 758 return INTERRUPT_MASK_ALL_VER_10; 759 if (hba->ufs_version <= ufshci_version(2, 0)) 760 return INTERRUPT_MASK_ALL_VER_11; 761 762 return INTERRUPT_MASK_ALL_VER_21; 763 } 764 765 /** 766 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA 767 * @hba: Pointer to adapter instance 768 * 769 * Returns UFSHCI version supported by the controller 770 */ 771 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) 772 { 773 u32 ufshci_ver; 774 775 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) 776 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba); 777 else 778 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION); 779 780 /* 781 * UFSHCI v1.x uses a different version scheme, in order 782 * to allow the use of comparisons with the ufshci_version 783 * function, we convert it to the same scheme as ufs 2.0+. 784 */ 785 if (ufshci_ver & 0x00010000) 786 return ufshci_version(1, ufshci_ver & 0x00000100); 787 788 return ufshci_ver; 789 } 790 791 /** 792 * ufshcd_is_device_present - Check if any device connected to 793 * the host controller 794 * @hba: pointer to adapter instance 795 * 796 * Returns true if device present, false if no device detected 797 */ 798 static inline bool ufshcd_is_device_present(struct ufs_hba *hba) 799 { 800 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT; 801 } 802 803 /** 804 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status 805 * @lrbp: pointer to local command reference block 806 * @cqe: pointer to the completion queue entry 807 * 808 * This function is used to get the OCS field from UTRD 809 * Returns the OCS field in the UTRD 810 */ 811 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp, 812 struct cq_entry *cqe) 813 { 814 if (cqe) 815 return le32_to_cpu(cqe->status) & MASK_OCS; 816 817 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; 818 } 819 820 /** 821 * ufshcd_utrl_clear() - Clear requests from the controller request list. 822 * @hba: per adapter instance 823 * @mask: mask with one bit set for each request to be cleared 824 */ 825 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask) 826 { 827 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) 828 mask = ~mask; 829 /* 830 * From the UFSHCI specification: "UTP Transfer Request List CLear 831 * Register (UTRLCLR): This field is bit significant. Each bit 832 * corresponds to a slot in the UTP Transfer Request List, where bit 0 833 * corresponds to request slot 0. A bit in this field is set to ‘0’ 834 * by host software to indicate to the host controller that a transfer 835 * request slot is cleared. The host controller 836 * shall free up any resources associated to the request slot 837 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The 838 * host software indicates no change to request slots by setting the 839 * associated bits in this field to ‘1’. Bits in this field shall only 840 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’." 841 */ 842 ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR); 843 } 844 845 /** 846 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register 847 * @hba: per adapter instance 848 * @pos: position of the bit to be cleared 849 */ 850 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) 851 { 852 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) 853 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); 854 else 855 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); 856 } 857 858 /** 859 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY 860 * @reg: Register value of host controller status 861 * 862 * Returns integer, 0 on Success and positive value if failed 863 */ 864 static inline int ufshcd_get_lists_status(u32 reg) 865 { 866 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); 867 } 868 869 /** 870 * ufshcd_get_uic_cmd_result - Get the UIC command result 871 * @hba: Pointer to adapter instance 872 * 873 * This function gets the result of UIC command completion 874 * Returns 0 on success, non zero value on error 875 */ 876 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) 877 { 878 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & 879 MASK_UIC_COMMAND_RESULT; 880 } 881 882 /** 883 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command 884 * @hba: Pointer to adapter instance 885 * 886 * This function gets UIC command argument3 887 * Returns 0 on success, non zero value on error 888 */ 889 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) 890 { 891 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); 892 } 893 894 /** 895 * ufshcd_get_req_rsp - returns the TR response transaction type 896 * @ucd_rsp_ptr: pointer to response UPIU 897 */ 898 static inline int 899 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) 900 { 901 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; 902 } 903 904 /** 905 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU 906 * @ucd_rsp_ptr: pointer to response UPIU 907 * 908 * This function gets the response status and scsi_status from response UPIU 909 * Returns the response result code. 910 */ 911 static inline int 912 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) 913 { 914 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; 915 } 916 917 /* 918 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length 919 * from response UPIU 920 * @ucd_rsp_ptr: pointer to response UPIU 921 * 922 * Return the data segment length. 923 */ 924 static inline unsigned int 925 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) 926 { 927 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & 928 MASK_RSP_UPIU_DATA_SEG_LEN; 929 } 930 931 /** 932 * ufshcd_is_exception_event - Check if the device raised an exception event 933 * @ucd_rsp_ptr: pointer to response UPIU 934 * 935 * The function checks if the device raised an exception event indicated in 936 * the Device Information field of response UPIU. 937 * 938 * Returns true if exception is raised, false otherwise. 939 */ 940 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) 941 { 942 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & 943 MASK_RSP_EXCEPTION_EVENT; 944 } 945 946 /** 947 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. 948 * @hba: per adapter instance 949 */ 950 static inline void 951 ufshcd_reset_intr_aggr(struct ufs_hba *hba) 952 { 953 ufshcd_writel(hba, INT_AGGR_ENABLE | 954 INT_AGGR_COUNTER_AND_TIMER_RESET, 955 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 956 } 957 958 /** 959 * ufshcd_config_intr_aggr - Configure interrupt aggregation values. 960 * @hba: per adapter instance 961 * @cnt: Interrupt aggregation counter threshold 962 * @tmout: Interrupt aggregation timeout value 963 */ 964 static inline void 965 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) 966 { 967 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | 968 INT_AGGR_COUNTER_THLD_VAL(cnt) | 969 INT_AGGR_TIMEOUT_VAL(tmout), 970 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 971 } 972 973 /** 974 * ufshcd_disable_intr_aggr - Disables interrupt aggregation. 975 * @hba: per adapter instance 976 */ 977 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) 978 { 979 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 980 } 981 982 /** 983 * ufshcd_enable_run_stop_reg - Enable run-stop registers, 984 * When run-stop registers are set to 1, it indicates the 985 * host controller that it can process the requests 986 * @hba: per adapter instance 987 */ 988 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) 989 { 990 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, 991 REG_UTP_TASK_REQ_LIST_RUN_STOP); 992 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, 993 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); 994 } 995 996 /** 997 * ufshcd_hba_start - Start controller initialization sequence 998 * @hba: per adapter instance 999 */ 1000 static inline void ufshcd_hba_start(struct ufs_hba *hba) 1001 { 1002 u32 val = CONTROLLER_ENABLE; 1003 1004 if (ufshcd_crypto_enable(hba)) 1005 val |= CRYPTO_GENERAL_ENABLE; 1006 1007 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); 1008 } 1009 1010 /** 1011 * ufshcd_is_hba_active - Get controller state 1012 * @hba: per adapter instance 1013 * 1014 * Returns true if and only if the controller is active. 1015 */ 1016 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) 1017 { 1018 return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE; 1019 } 1020 1021 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) 1022 { 1023 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ 1024 if (hba->ufs_version <= ufshci_version(1, 1)) 1025 return UFS_UNIPRO_VER_1_41; 1026 else 1027 return UFS_UNIPRO_VER_1_6; 1028 } 1029 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver); 1030 1031 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) 1032 { 1033 /* 1034 * If both host and device support UniPro ver1.6 or later, PA layer 1035 * parameters tuning happens during link startup itself. 1036 * 1037 * We can manually tune PA layer parameters if either host or device 1038 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning 1039 * logic simple, we will only do manual tuning if local unipro version 1040 * doesn't support ver1.6 or later. 1041 */ 1042 return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6; 1043 } 1044 1045 /** 1046 * ufshcd_set_clk_freq - set UFS controller clock frequencies 1047 * @hba: per adapter instance 1048 * @scale_up: If True, set max possible frequency othewise set low frequency 1049 * 1050 * Returns 0 if successful 1051 * Returns < 0 for any other errors 1052 */ 1053 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) 1054 { 1055 int ret = 0; 1056 struct ufs_clk_info *clki; 1057 struct list_head *head = &hba->clk_list_head; 1058 1059 if (list_empty(head)) 1060 goto out; 1061 1062 list_for_each_entry(clki, head, list) { 1063 if (!IS_ERR_OR_NULL(clki->clk)) { 1064 if (scale_up && clki->max_freq) { 1065 if (clki->curr_freq == clki->max_freq) 1066 continue; 1067 1068 ret = clk_set_rate(clki->clk, clki->max_freq); 1069 if (ret) { 1070 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", 1071 __func__, clki->name, 1072 clki->max_freq, ret); 1073 break; 1074 } 1075 trace_ufshcd_clk_scaling(dev_name(hba->dev), 1076 "scaled up", clki->name, 1077 clki->curr_freq, 1078 clki->max_freq); 1079 1080 clki->curr_freq = clki->max_freq; 1081 1082 } else if (!scale_up && clki->min_freq) { 1083 if (clki->curr_freq == clki->min_freq) 1084 continue; 1085 1086 ret = clk_set_rate(clki->clk, clki->min_freq); 1087 if (ret) { 1088 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", 1089 __func__, clki->name, 1090 clki->min_freq, ret); 1091 break; 1092 } 1093 trace_ufshcd_clk_scaling(dev_name(hba->dev), 1094 "scaled down", clki->name, 1095 clki->curr_freq, 1096 clki->min_freq); 1097 clki->curr_freq = clki->min_freq; 1098 } 1099 } 1100 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, 1101 clki->name, clk_get_rate(clki->clk)); 1102 } 1103 1104 out: 1105 return ret; 1106 } 1107 1108 /** 1109 * ufshcd_scale_clks - scale up or scale down UFS controller clocks 1110 * @hba: per adapter instance 1111 * @scale_up: True if scaling up and false if scaling down 1112 * 1113 * Returns 0 if successful 1114 * Returns < 0 for any other errors 1115 */ 1116 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) 1117 { 1118 int ret = 0; 1119 ktime_t start = ktime_get(); 1120 1121 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); 1122 if (ret) 1123 goto out; 1124 1125 ret = ufshcd_set_clk_freq(hba, scale_up); 1126 if (ret) 1127 goto out; 1128 1129 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); 1130 if (ret) 1131 ufshcd_set_clk_freq(hba, !scale_up); 1132 1133 out: 1134 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), 1135 (scale_up ? "up" : "down"), 1136 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 1137 return ret; 1138 } 1139 1140 /** 1141 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not 1142 * @hba: per adapter instance 1143 * @scale_up: True if scaling up and false if scaling down 1144 * 1145 * Returns true if scaling is required, false otherwise. 1146 */ 1147 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, 1148 bool scale_up) 1149 { 1150 struct ufs_clk_info *clki; 1151 struct list_head *head = &hba->clk_list_head; 1152 1153 if (list_empty(head)) 1154 return false; 1155 1156 list_for_each_entry(clki, head, list) { 1157 if (!IS_ERR_OR_NULL(clki->clk)) { 1158 if (scale_up && clki->max_freq) { 1159 if (clki->curr_freq == clki->max_freq) 1160 continue; 1161 return true; 1162 } else if (!scale_up && clki->min_freq) { 1163 if (clki->curr_freq == clki->min_freq) 1164 continue; 1165 return true; 1166 } 1167 } 1168 } 1169 1170 return false; 1171 } 1172 1173 /* 1174 * Determine the number of pending commands by counting the bits in the SCSI 1175 * device budget maps. This approach has been selected because a bit is set in 1176 * the budget map before scsi_host_queue_ready() checks the host_self_blocked 1177 * flag. The host_self_blocked flag can be modified by calling 1178 * scsi_block_requests() or scsi_unblock_requests(). 1179 */ 1180 static u32 ufshcd_pending_cmds(struct ufs_hba *hba) 1181 { 1182 const struct scsi_device *sdev; 1183 u32 pending = 0; 1184 1185 lockdep_assert_held(hba->host->host_lock); 1186 __shost_for_each_device(sdev, hba->host) 1187 pending += sbitmap_weight(&sdev->budget_map); 1188 1189 return pending; 1190 } 1191 1192 /* 1193 * Wait until all pending SCSI commands and TMFs have finished or the timeout 1194 * has expired. 1195 * 1196 * Return: 0 upon success; -EBUSY upon timeout. 1197 */ 1198 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, 1199 u64 wait_timeout_us) 1200 { 1201 unsigned long flags; 1202 int ret = 0; 1203 u32 tm_doorbell; 1204 u32 tr_pending; 1205 bool timeout = false, do_last_check = false; 1206 ktime_t start; 1207 1208 ufshcd_hold(hba, false); 1209 spin_lock_irqsave(hba->host->host_lock, flags); 1210 /* 1211 * Wait for all the outstanding tasks/transfer requests. 1212 * Verify by checking the doorbell registers are clear. 1213 */ 1214 start = ktime_get(); 1215 do { 1216 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { 1217 ret = -EBUSY; 1218 goto out; 1219 } 1220 1221 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); 1222 tr_pending = ufshcd_pending_cmds(hba); 1223 if (!tm_doorbell && !tr_pending) { 1224 timeout = false; 1225 break; 1226 } else if (do_last_check) { 1227 break; 1228 } 1229 1230 spin_unlock_irqrestore(hba->host->host_lock, flags); 1231 io_schedule_timeout(msecs_to_jiffies(20)); 1232 if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1233 wait_timeout_us) { 1234 timeout = true; 1235 /* 1236 * We might have scheduled out for long time so make 1237 * sure to check if doorbells are cleared by this time 1238 * or not. 1239 */ 1240 do_last_check = true; 1241 } 1242 spin_lock_irqsave(hba->host->host_lock, flags); 1243 } while (tm_doorbell || tr_pending); 1244 1245 if (timeout) { 1246 dev_err(hba->dev, 1247 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n", 1248 __func__, tm_doorbell, tr_pending); 1249 ret = -EBUSY; 1250 } 1251 out: 1252 spin_unlock_irqrestore(hba->host->host_lock, flags); 1253 ufshcd_release(hba); 1254 return ret; 1255 } 1256 1257 /** 1258 * ufshcd_scale_gear - scale up/down UFS gear 1259 * @hba: per adapter instance 1260 * @scale_up: True for scaling up gear and false for scaling down 1261 * 1262 * Returns 0 for success, 1263 * Returns -EBUSY if scaling can't happen at this time 1264 * Returns non-zero for any other errors 1265 */ 1266 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) 1267 { 1268 int ret = 0; 1269 struct ufs_pa_layer_attr new_pwr_info; 1270 1271 if (scale_up) { 1272 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info, 1273 sizeof(struct ufs_pa_layer_attr)); 1274 } else { 1275 memcpy(&new_pwr_info, &hba->pwr_info, 1276 sizeof(struct ufs_pa_layer_attr)); 1277 1278 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear || 1279 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) { 1280 /* save the current power mode */ 1281 memcpy(&hba->clk_scaling.saved_pwr_info, 1282 &hba->pwr_info, 1283 sizeof(struct ufs_pa_layer_attr)); 1284 1285 /* scale down gear */ 1286 new_pwr_info.gear_tx = hba->clk_scaling.min_gear; 1287 new_pwr_info.gear_rx = hba->clk_scaling.min_gear; 1288 } 1289 } 1290 1291 /* check if the power mode needs to be changed or not? */ 1292 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); 1293 if (ret) 1294 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", 1295 __func__, ret, 1296 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, 1297 new_pwr_info.gear_tx, new_pwr_info.gear_rx); 1298 1299 return ret; 1300 } 1301 1302 /* 1303 * Wait until all pending SCSI commands and TMFs have finished or the timeout 1304 * has expired. 1305 * 1306 * Return: 0 upon success; -EBUSY upon timeout. 1307 */ 1308 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) 1309 { 1310 int ret = 0; 1311 /* 1312 * make sure that there are no outstanding requests when 1313 * clock scaling is in progress 1314 */ 1315 ufshcd_scsi_block_requests(hba); 1316 mutex_lock(&hba->wb_mutex); 1317 down_write(&hba->clk_scaling_lock); 1318 1319 if (!hba->clk_scaling.is_allowed || 1320 ufshcd_wait_for_doorbell_clr(hba, timeout_us)) { 1321 ret = -EBUSY; 1322 up_write(&hba->clk_scaling_lock); 1323 mutex_unlock(&hba->wb_mutex); 1324 ufshcd_scsi_unblock_requests(hba); 1325 goto out; 1326 } 1327 1328 /* let's not get into low power until clock scaling is completed */ 1329 ufshcd_hold(hba, false); 1330 1331 out: 1332 return ret; 1333 } 1334 1335 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up) 1336 { 1337 up_write(&hba->clk_scaling_lock); 1338 1339 /* Enable Write Booster if we have scaled up else disable it */ 1340 if (ufshcd_enable_wb_if_scaling_up(hba) && !err) 1341 ufshcd_wb_toggle(hba, scale_up); 1342 1343 mutex_unlock(&hba->wb_mutex); 1344 1345 ufshcd_scsi_unblock_requests(hba); 1346 ufshcd_release(hba); 1347 } 1348 1349 /** 1350 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear 1351 * @hba: per adapter instance 1352 * @scale_up: True for scaling up and false for scalin down 1353 * 1354 * Returns 0 for success, 1355 * Returns -EBUSY if scaling can't happen at this time 1356 * Returns non-zero for any other errors 1357 */ 1358 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) 1359 { 1360 int ret = 0; 1361 1362 ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC); 1363 if (ret) 1364 return ret; 1365 1366 /* scale down the gear before scaling down clocks */ 1367 if (!scale_up) { 1368 ret = ufshcd_scale_gear(hba, false); 1369 if (ret) 1370 goto out_unprepare; 1371 } 1372 1373 ret = ufshcd_scale_clks(hba, scale_up); 1374 if (ret) { 1375 if (!scale_up) 1376 ufshcd_scale_gear(hba, true); 1377 goto out_unprepare; 1378 } 1379 1380 /* scale up the gear after scaling up clocks */ 1381 if (scale_up) { 1382 ret = ufshcd_scale_gear(hba, true); 1383 if (ret) { 1384 ufshcd_scale_clks(hba, false); 1385 goto out_unprepare; 1386 } 1387 } 1388 1389 out_unprepare: 1390 ufshcd_clock_scaling_unprepare(hba, ret, scale_up); 1391 return ret; 1392 } 1393 1394 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work) 1395 { 1396 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1397 clk_scaling.suspend_work); 1398 unsigned long irq_flags; 1399 1400 spin_lock_irqsave(hba->host->host_lock, irq_flags); 1401 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { 1402 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1403 return; 1404 } 1405 hba->clk_scaling.is_suspended = true; 1406 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1407 1408 __ufshcd_suspend_clkscaling(hba); 1409 } 1410 1411 static void ufshcd_clk_scaling_resume_work(struct work_struct *work) 1412 { 1413 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1414 clk_scaling.resume_work); 1415 unsigned long irq_flags; 1416 1417 spin_lock_irqsave(hba->host->host_lock, irq_flags); 1418 if (!hba->clk_scaling.is_suspended) { 1419 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1420 return; 1421 } 1422 hba->clk_scaling.is_suspended = false; 1423 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1424 1425 devfreq_resume_device(hba->devfreq); 1426 } 1427 1428 static int ufshcd_devfreq_target(struct device *dev, 1429 unsigned long *freq, u32 flags) 1430 { 1431 int ret = 0; 1432 struct ufs_hba *hba = dev_get_drvdata(dev); 1433 ktime_t start; 1434 bool scale_up, sched_clk_scaling_suspend_work = false; 1435 struct list_head *clk_list = &hba->clk_list_head; 1436 struct ufs_clk_info *clki; 1437 unsigned long irq_flags; 1438 1439 /* 1440 * Skip devfreq if UFS initialization is not finished. 1441 * Otherwise ufs could be in a inconsistent state. 1442 */ 1443 if (!smp_load_acquire(&hba->logical_unit_scan_finished)) 1444 return 0; 1445 1446 if (!ufshcd_is_clkscaling_supported(hba)) 1447 return -EINVAL; 1448 1449 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); 1450 /* Override with the closest supported frequency */ 1451 *freq = (unsigned long) clk_round_rate(clki->clk, *freq); 1452 spin_lock_irqsave(hba->host->host_lock, irq_flags); 1453 if (ufshcd_eh_in_progress(hba)) { 1454 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1455 return 0; 1456 } 1457 1458 if (!hba->clk_scaling.active_reqs) 1459 sched_clk_scaling_suspend_work = true; 1460 1461 if (list_empty(clk_list)) { 1462 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1463 goto out; 1464 } 1465 1466 /* Decide based on the rounded-off frequency and update */ 1467 scale_up = *freq == clki->max_freq; 1468 if (!scale_up) 1469 *freq = clki->min_freq; 1470 /* Update the frequency */ 1471 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { 1472 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1473 ret = 0; 1474 goto out; /* no state change required */ 1475 } 1476 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1477 1478 start = ktime_get(); 1479 ret = ufshcd_devfreq_scale(hba, scale_up); 1480 1481 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), 1482 (scale_up ? "up" : "down"), 1483 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 1484 1485 out: 1486 if (sched_clk_scaling_suspend_work) 1487 queue_work(hba->clk_scaling.workq, 1488 &hba->clk_scaling.suspend_work); 1489 1490 return ret; 1491 } 1492 1493 static int ufshcd_devfreq_get_dev_status(struct device *dev, 1494 struct devfreq_dev_status *stat) 1495 { 1496 struct ufs_hba *hba = dev_get_drvdata(dev); 1497 struct ufs_clk_scaling *scaling = &hba->clk_scaling; 1498 unsigned long flags; 1499 struct list_head *clk_list = &hba->clk_list_head; 1500 struct ufs_clk_info *clki; 1501 ktime_t curr_t; 1502 1503 if (!ufshcd_is_clkscaling_supported(hba)) 1504 return -EINVAL; 1505 1506 memset(stat, 0, sizeof(*stat)); 1507 1508 spin_lock_irqsave(hba->host->host_lock, flags); 1509 curr_t = ktime_get(); 1510 if (!scaling->window_start_t) 1511 goto start_window; 1512 1513 clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1514 /* 1515 * If current frequency is 0, then the ondemand governor considers 1516 * there's no initial frequency set. And it always requests to set 1517 * to max. frequency. 1518 */ 1519 stat->current_frequency = clki->curr_freq; 1520 if (scaling->is_busy_started) 1521 scaling->tot_busy_t += ktime_us_delta(curr_t, 1522 scaling->busy_start_t); 1523 1524 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t); 1525 stat->busy_time = scaling->tot_busy_t; 1526 start_window: 1527 scaling->window_start_t = curr_t; 1528 scaling->tot_busy_t = 0; 1529 1530 if (scaling->active_reqs) { 1531 scaling->busy_start_t = curr_t; 1532 scaling->is_busy_started = true; 1533 } else { 1534 scaling->busy_start_t = 0; 1535 scaling->is_busy_started = false; 1536 } 1537 spin_unlock_irqrestore(hba->host->host_lock, flags); 1538 return 0; 1539 } 1540 1541 static int ufshcd_devfreq_init(struct ufs_hba *hba) 1542 { 1543 struct list_head *clk_list = &hba->clk_list_head; 1544 struct ufs_clk_info *clki; 1545 struct devfreq *devfreq; 1546 int ret; 1547 1548 /* Skip devfreq if we don't have any clocks in the list */ 1549 if (list_empty(clk_list)) 1550 return 0; 1551 1552 clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1553 dev_pm_opp_add(hba->dev, clki->min_freq, 0); 1554 dev_pm_opp_add(hba->dev, clki->max_freq, 0); 1555 1556 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, 1557 &hba->vps->ondemand_data); 1558 devfreq = devfreq_add_device(hba->dev, 1559 &hba->vps->devfreq_profile, 1560 DEVFREQ_GOV_SIMPLE_ONDEMAND, 1561 &hba->vps->ondemand_data); 1562 if (IS_ERR(devfreq)) { 1563 ret = PTR_ERR(devfreq); 1564 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); 1565 1566 dev_pm_opp_remove(hba->dev, clki->min_freq); 1567 dev_pm_opp_remove(hba->dev, clki->max_freq); 1568 return ret; 1569 } 1570 1571 hba->devfreq = devfreq; 1572 1573 return 0; 1574 } 1575 1576 static void ufshcd_devfreq_remove(struct ufs_hba *hba) 1577 { 1578 struct list_head *clk_list = &hba->clk_list_head; 1579 struct ufs_clk_info *clki; 1580 1581 if (!hba->devfreq) 1582 return; 1583 1584 devfreq_remove_device(hba->devfreq); 1585 hba->devfreq = NULL; 1586 1587 clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1588 dev_pm_opp_remove(hba->dev, clki->min_freq); 1589 dev_pm_opp_remove(hba->dev, clki->max_freq); 1590 } 1591 1592 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) 1593 { 1594 unsigned long flags; 1595 1596 devfreq_suspend_device(hba->devfreq); 1597 spin_lock_irqsave(hba->host->host_lock, flags); 1598 hba->clk_scaling.window_start_t = 0; 1599 spin_unlock_irqrestore(hba->host->host_lock, flags); 1600 } 1601 1602 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) 1603 { 1604 unsigned long flags; 1605 bool suspend = false; 1606 1607 cancel_work_sync(&hba->clk_scaling.suspend_work); 1608 cancel_work_sync(&hba->clk_scaling.resume_work); 1609 1610 spin_lock_irqsave(hba->host->host_lock, flags); 1611 if (!hba->clk_scaling.is_suspended) { 1612 suspend = true; 1613 hba->clk_scaling.is_suspended = true; 1614 } 1615 spin_unlock_irqrestore(hba->host->host_lock, flags); 1616 1617 if (suspend) 1618 __ufshcd_suspend_clkscaling(hba); 1619 } 1620 1621 static void ufshcd_resume_clkscaling(struct ufs_hba *hba) 1622 { 1623 unsigned long flags; 1624 bool resume = false; 1625 1626 spin_lock_irqsave(hba->host->host_lock, flags); 1627 if (hba->clk_scaling.is_suspended) { 1628 resume = true; 1629 hba->clk_scaling.is_suspended = false; 1630 } 1631 spin_unlock_irqrestore(hba->host->host_lock, flags); 1632 1633 if (resume) 1634 devfreq_resume_device(hba->devfreq); 1635 } 1636 1637 static ssize_t ufshcd_clkscale_enable_show(struct device *dev, 1638 struct device_attribute *attr, char *buf) 1639 { 1640 struct ufs_hba *hba = dev_get_drvdata(dev); 1641 1642 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled); 1643 } 1644 1645 static ssize_t ufshcd_clkscale_enable_store(struct device *dev, 1646 struct device_attribute *attr, const char *buf, size_t count) 1647 { 1648 struct ufs_hba *hba = dev_get_drvdata(dev); 1649 u32 value; 1650 int err = 0; 1651 1652 if (kstrtou32(buf, 0, &value)) 1653 return -EINVAL; 1654 1655 down(&hba->host_sem); 1656 if (!ufshcd_is_user_access_allowed(hba)) { 1657 err = -EBUSY; 1658 goto out; 1659 } 1660 1661 value = !!value; 1662 if (value == hba->clk_scaling.is_enabled) 1663 goto out; 1664 1665 ufshcd_rpm_get_sync(hba); 1666 ufshcd_hold(hba, false); 1667 1668 hba->clk_scaling.is_enabled = value; 1669 1670 if (value) { 1671 ufshcd_resume_clkscaling(hba); 1672 } else { 1673 ufshcd_suspend_clkscaling(hba); 1674 err = ufshcd_devfreq_scale(hba, true); 1675 if (err) 1676 dev_err(hba->dev, "%s: failed to scale clocks up %d\n", 1677 __func__, err); 1678 } 1679 1680 ufshcd_release(hba); 1681 ufshcd_rpm_put_sync(hba); 1682 out: 1683 up(&hba->host_sem); 1684 return err ? err : count; 1685 } 1686 1687 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba) 1688 { 1689 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; 1690 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; 1691 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); 1692 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; 1693 hba->clk_scaling.enable_attr.attr.mode = 0644; 1694 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) 1695 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); 1696 } 1697 1698 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) 1699 { 1700 if (hba->clk_scaling.enable_attr.attr.name) 1701 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); 1702 } 1703 1704 static void ufshcd_init_clk_scaling(struct ufs_hba *hba) 1705 { 1706 char wq_name[sizeof("ufs_clkscaling_00")]; 1707 1708 if (!ufshcd_is_clkscaling_supported(hba)) 1709 return; 1710 1711 if (!hba->clk_scaling.min_gear) 1712 hba->clk_scaling.min_gear = UFS_HS_G1; 1713 1714 INIT_WORK(&hba->clk_scaling.suspend_work, 1715 ufshcd_clk_scaling_suspend_work); 1716 INIT_WORK(&hba->clk_scaling.resume_work, 1717 ufshcd_clk_scaling_resume_work); 1718 1719 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", 1720 hba->host->host_no); 1721 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); 1722 1723 hba->clk_scaling.is_initialized = true; 1724 } 1725 1726 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) 1727 { 1728 if (!hba->clk_scaling.is_initialized) 1729 return; 1730 1731 ufshcd_remove_clk_scaling_sysfs(hba); 1732 destroy_workqueue(hba->clk_scaling.workq); 1733 ufshcd_devfreq_remove(hba); 1734 hba->clk_scaling.is_initialized = false; 1735 } 1736 1737 static void ufshcd_ungate_work(struct work_struct *work) 1738 { 1739 int ret; 1740 unsigned long flags; 1741 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1742 clk_gating.ungate_work); 1743 1744 cancel_delayed_work_sync(&hba->clk_gating.gate_work); 1745 1746 spin_lock_irqsave(hba->host->host_lock, flags); 1747 if (hba->clk_gating.state == CLKS_ON) { 1748 spin_unlock_irqrestore(hba->host->host_lock, flags); 1749 goto unblock_reqs; 1750 } 1751 1752 spin_unlock_irqrestore(hba->host->host_lock, flags); 1753 ufshcd_hba_vreg_set_hpm(hba); 1754 ufshcd_setup_clocks(hba, true); 1755 1756 ufshcd_enable_irq(hba); 1757 1758 /* Exit from hibern8 */ 1759 if (ufshcd_can_hibern8_during_gating(hba)) { 1760 /* Prevent gating in this path */ 1761 hba->clk_gating.is_suspended = true; 1762 if (ufshcd_is_link_hibern8(hba)) { 1763 ret = ufshcd_uic_hibern8_exit(hba); 1764 if (ret) 1765 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", 1766 __func__, ret); 1767 else 1768 ufshcd_set_link_active(hba); 1769 } 1770 hba->clk_gating.is_suspended = false; 1771 } 1772 unblock_reqs: 1773 ufshcd_scsi_unblock_requests(hba); 1774 } 1775 1776 /** 1777 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. 1778 * Also, exit from hibern8 mode and set the link as active. 1779 * @hba: per adapter instance 1780 * @async: This indicates whether caller should ungate clocks asynchronously. 1781 */ 1782 int ufshcd_hold(struct ufs_hba *hba, bool async) 1783 { 1784 int rc = 0; 1785 bool flush_result; 1786 unsigned long flags; 1787 1788 if (!ufshcd_is_clkgating_allowed(hba) || 1789 !hba->clk_gating.is_initialized) 1790 goto out; 1791 spin_lock_irqsave(hba->host->host_lock, flags); 1792 hba->clk_gating.active_reqs++; 1793 1794 start: 1795 switch (hba->clk_gating.state) { 1796 case CLKS_ON: 1797 /* 1798 * Wait for the ungate work to complete if in progress. 1799 * Though the clocks may be in ON state, the link could 1800 * still be in hibner8 state if hibern8 is allowed 1801 * during clock gating. 1802 * Make sure we exit hibern8 state also in addition to 1803 * clocks being ON. 1804 */ 1805 if (ufshcd_can_hibern8_during_gating(hba) && 1806 ufshcd_is_link_hibern8(hba)) { 1807 if (async) { 1808 rc = -EAGAIN; 1809 hba->clk_gating.active_reqs--; 1810 break; 1811 } 1812 spin_unlock_irqrestore(hba->host->host_lock, flags); 1813 flush_result = flush_work(&hba->clk_gating.ungate_work); 1814 if (hba->clk_gating.is_suspended && !flush_result) 1815 goto out; 1816 spin_lock_irqsave(hba->host->host_lock, flags); 1817 goto start; 1818 } 1819 break; 1820 case REQ_CLKS_OFF: 1821 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { 1822 hba->clk_gating.state = CLKS_ON; 1823 trace_ufshcd_clk_gating(dev_name(hba->dev), 1824 hba->clk_gating.state); 1825 break; 1826 } 1827 /* 1828 * If we are here, it means gating work is either done or 1829 * currently running. Hence, fall through to cancel gating 1830 * work and to enable clocks. 1831 */ 1832 fallthrough; 1833 case CLKS_OFF: 1834 hba->clk_gating.state = REQ_CLKS_ON; 1835 trace_ufshcd_clk_gating(dev_name(hba->dev), 1836 hba->clk_gating.state); 1837 if (queue_work(hba->clk_gating.clk_gating_workq, 1838 &hba->clk_gating.ungate_work)) 1839 ufshcd_scsi_block_requests(hba); 1840 /* 1841 * fall through to check if we should wait for this 1842 * work to be done or not. 1843 */ 1844 fallthrough; 1845 case REQ_CLKS_ON: 1846 if (async) { 1847 rc = -EAGAIN; 1848 hba->clk_gating.active_reqs--; 1849 break; 1850 } 1851 1852 spin_unlock_irqrestore(hba->host->host_lock, flags); 1853 flush_work(&hba->clk_gating.ungate_work); 1854 /* Make sure state is CLKS_ON before returning */ 1855 spin_lock_irqsave(hba->host->host_lock, flags); 1856 goto start; 1857 default: 1858 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", 1859 __func__, hba->clk_gating.state); 1860 break; 1861 } 1862 spin_unlock_irqrestore(hba->host->host_lock, flags); 1863 out: 1864 return rc; 1865 } 1866 EXPORT_SYMBOL_GPL(ufshcd_hold); 1867 1868 static void ufshcd_gate_work(struct work_struct *work) 1869 { 1870 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1871 clk_gating.gate_work.work); 1872 unsigned long flags; 1873 int ret; 1874 1875 spin_lock_irqsave(hba->host->host_lock, flags); 1876 /* 1877 * In case you are here to cancel this work the gating state 1878 * would be marked as REQ_CLKS_ON. In this case save time by 1879 * skipping the gating work and exit after changing the clock 1880 * state to CLKS_ON. 1881 */ 1882 if (hba->clk_gating.is_suspended || 1883 (hba->clk_gating.state != REQ_CLKS_OFF)) { 1884 hba->clk_gating.state = CLKS_ON; 1885 trace_ufshcd_clk_gating(dev_name(hba->dev), 1886 hba->clk_gating.state); 1887 goto rel_lock; 1888 } 1889 1890 if (hba->clk_gating.active_reqs 1891 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL 1892 || hba->outstanding_reqs || hba->outstanding_tasks 1893 || hba->active_uic_cmd || hba->uic_async_done) 1894 goto rel_lock; 1895 1896 spin_unlock_irqrestore(hba->host->host_lock, flags); 1897 1898 /* put the link into hibern8 mode before turning off clocks */ 1899 if (ufshcd_can_hibern8_during_gating(hba)) { 1900 ret = ufshcd_uic_hibern8_enter(hba); 1901 if (ret) { 1902 hba->clk_gating.state = CLKS_ON; 1903 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", 1904 __func__, ret); 1905 trace_ufshcd_clk_gating(dev_name(hba->dev), 1906 hba->clk_gating.state); 1907 goto out; 1908 } 1909 ufshcd_set_link_hibern8(hba); 1910 } 1911 1912 ufshcd_disable_irq(hba); 1913 1914 ufshcd_setup_clocks(hba, false); 1915 1916 /* Put the host controller in low power mode if possible */ 1917 ufshcd_hba_vreg_set_lpm(hba); 1918 /* 1919 * In case you are here to cancel this work the gating state 1920 * would be marked as REQ_CLKS_ON. In this case keep the state 1921 * as REQ_CLKS_ON which would anyway imply that clocks are off 1922 * and a request to turn them on is pending. By doing this way, 1923 * we keep the state machine in tact and this would ultimately 1924 * prevent from doing cancel work multiple times when there are 1925 * new requests arriving before the current cancel work is done. 1926 */ 1927 spin_lock_irqsave(hba->host->host_lock, flags); 1928 if (hba->clk_gating.state == REQ_CLKS_OFF) { 1929 hba->clk_gating.state = CLKS_OFF; 1930 trace_ufshcd_clk_gating(dev_name(hba->dev), 1931 hba->clk_gating.state); 1932 } 1933 rel_lock: 1934 spin_unlock_irqrestore(hba->host->host_lock, flags); 1935 out: 1936 return; 1937 } 1938 1939 /* host lock must be held before calling this variant */ 1940 static void __ufshcd_release(struct ufs_hba *hba) 1941 { 1942 if (!ufshcd_is_clkgating_allowed(hba)) 1943 return; 1944 1945 hba->clk_gating.active_reqs--; 1946 1947 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || 1948 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || 1949 hba->outstanding_tasks || !hba->clk_gating.is_initialized || 1950 hba->active_uic_cmd || hba->uic_async_done || 1951 hba->clk_gating.state == CLKS_OFF) 1952 return; 1953 1954 hba->clk_gating.state = REQ_CLKS_OFF; 1955 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); 1956 queue_delayed_work(hba->clk_gating.clk_gating_workq, 1957 &hba->clk_gating.gate_work, 1958 msecs_to_jiffies(hba->clk_gating.delay_ms)); 1959 } 1960 1961 void ufshcd_release(struct ufs_hba *hba) 1962 { 1963 unsigned long flags; 1964 1965 spin_lock_irqsave(hba->host->host_lock, flags); 1966 __ufshcd_release(hba); 1967 spin_unlock_irqrestore(hba->host->host_lock, flags); 1968 } 1969 EXPORT_SYMBOL_GPL(ufshcd_release); 1970 1971 static ssize_t ufshcd_clkgate_delay_show(struct device *dev, 1972 struct device_attribute *attr, char *buf) 1973 { 1974 struct ufs_hba *hba = dev_get_drvdata(dev); 1975 1976 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms); 1977 } 1978 1979 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value) 1980 { 1981 struct ufs_hba *hba = dev_get_drvdata(dev); 1982 unsigned long flags; 1983 1984 spin_lock_irqsave(hba->host->host_lock, flags); 1985 hba->clk_gating.delay_ms = value; 1986 spin_unlock_irqrestore(hba->host->host_lock, flags); 1987 } 1988 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set); 1989 1990 static ssize_t ufshcd_clkgate_delay_store(struct device *dev, 1991 struct device_attribute *attr, const char *buf, size_t count) 1992 { 1993 unsigned long value; 1994 1995 if (kstrtoul(buf, 0, &value)) 1996 return -EINVAL; 1997 1998 ufshcd_clkgate_delay_set(dev, value); 1999 return count; 2000 } 2001 2002 static ssize_t ufshcd_clkgate_enable_show(struct device *dev, 2003 struct device_attribute *attr, char *buf) 2004 { 2005 struct ufs_hba *hba = dev_get_drvdata(dev); 2006 2007 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled); 2008 } 2009 2010 static ssize_t ufshcd_clkgate_enable_store(struct device *dev, 2011 struct device_attribute *attr, const char *buf, size_t count) 2012 { 2013 struct ufs_hba *hba = dev_get_drvdata(dev); 2014 unsigned long flags; 2015 u32 value; 2016 2017 if (kstrtou32(buf, 0, &value)) 2018 return -EINVAL; 2019 2020 value = !!value; 2021 2022 spin_lock_irqsave(hba->host->host_lock, flags); 2023 if (value == hba->clk_gating.is_enabled) 2024 goto out; 2025 2026 if (value) 2027 __ufshcd_release(hba); 2028 else 2029 hba->clk_gating.active_reqs++; 2030 2031 hba->clk_gating.is_enabled = value; 2032 out: 2033 spin_unlock_irqrestore(hba->host->host_lock, flags); 2034 return count; 2035 } 2036 2037 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba) 2038 { 2039 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; 2040 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; 2041 sysfs_attr_init(&hba->clk_gating.delay_attr.attr); 2042 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; 2043 hba->clk_gating.delay_attr.attr.mode = 0644; 2044 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) 2045 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); 2046 2047 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; 2048 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; 2049 sysfs_attr_init(&hba->clk_gating.enable_attr.attr); 2050 hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; 2051 hba->clk_gating.enable_attr.attr.mode = 0644; 2052 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) 2053 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); 2054 } 2055 2056 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) 2057 { 2058 if (hba->clk_gating.delay_attr.attr.name) 2059 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); 2060 if (hba->clk_gating.enable_attr.attr.name) 2061 device_remove_file(hba->dev, &hba->clk_gating.enable_attr); 2062 } 2063 2064 static void ufshcd_init_clk_gating(struct ufs_hba *hba) 2065 { 2066 char wq_name[sizeof("ufs_clk_gating_00")]; 2067 2068 if (!ufshcd_is_clkgating_allowed(hba)) 2069 return; 2070 2071 hba->clk_gating.state = CLKS_ON; 2072 2073 hba->clk_gating.delay_ms = 150; 2074 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); 2075 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); 2076 2077 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d", 2078 hba->host->host_no); 2079 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, 2080 WQ_MEM_RECLAIM | WQ_HIGHPRI); 2081 2082 ufshcd_init_clk_gating_sysfs(hba); 2083 2084 hba->clk_gating.is_enabled = true; 2085 hba->clk_gating.is_initialized = true; 2086 } 2087 2088 static void ufshcd_exit_clk_gating(struct ufs_hba *hba) 2089 { 2090 if (!hba->clk_gating.is_initialized) 2091 return; 2092 2093 ufshcd_remove_clk_gating_sysfs(hba); 2094 2095 /* Ungate the clock if necessary. */ 2096 ufshcd_hold(hba, false); 2097 hba->clk_gating.is_initialized = false; 2098 ufshcd_release(hba); 2099 2100 destroy_workqueue(hba->clk_gating.clk_gating_workq); 2101 } 2102 2103 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) 2104 { 2105 bool queue_resume_work = false; 2106 ktime_t curr_t = ktime_get(); 2107 unsigned long flags; 2108 2109 if (!ufshcd_is_clkscaling_supported(hba)) 2110 return; 2111 2112 spin_lock_irqsave(hba->host->host_lock, flags); 2113 if (!hba->clk_scaling.active_reqs++) 2114 queue_resume_work = true; 2115 2116 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { 2117 spin_unlock_irqrestore(hba->host->host_lock, flags); 2118 return; 2119 } 2120 2121 if (queue_resume_work) 2122 queue_work(hba->clk_scaling.workq, 2123 &hba->clk_scaling.resume_work); 2124 2125 if (!hba->clk_scaling.window_start_t) { 2126 hba->clk_scaling.window_start_t = curr_t; 2127 hba->clk_scaling.tot_busy_t = 0; 2128 hba->clk_scaling.is_busy_started = false; 2129 } 2130 2131 if (!hba->clk_scaling.is_busy_started) { 2132 hba->clk_scaling.busy_start_t = curr_t; 2133 hba->clk_scaling.is_busy_started = true; 2134 } 2135 spin_unlock_irqrestore(hba->host->host_lock, flags); 2136 } 2137 2138 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) 2139 { 2140 struct ufs_clk_scaling *scaling = &hba->clk_scaling; 2141 unsigned long flags; 2142 2143 if (!ufshcd_is_clkscaling_supported(hba)) 2144 return; 2145 2146 spin_lock_irqsave(hba->host->host_lock, flags); 2147 hba->clk_scaling.active_reqs--; 2148 if (!scaling->active_reqs && scaling->is_busy_started) { 2149 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), 2150 scaling->busy_start_t)); 2151 scaling->busy_start_t = 0; 2152 scaling->is_busy_started = false; 2153 } 2154 spin_unlock_irqrestore(hba->host->host_lock, flags); 2155 } 2156 2157 static inline int ufshcd_monitor_opcode2dir(u8 opcode) 2158 { 2159 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16) 2160 return READ; 2161 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16) 2162 return WRITE; 2163 else 2164 return -EINVAL; 2165 } 2166 2167 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba, 2168 struct ufshcd_lrb *lrbp) 2169 { 2170 const struct ufs_hba_monitor *m = &hba->monitor; 2171 2172 return (m->enabled && lrbp && lrbp->cmd && 2173 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) && 2174 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp)); 2175 } 2176 2177 static void ufshcd_start_monitor(struct ufs_hba *hba, 2178 const struct ufshcd_lrb *lrbp) 2179 { 2180 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); 2181 unsigned long flags; 2182 2183 spin_lock_irqsave(hba->host->host_lock, flags); 2184 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) 2185 hba->monitor.busy_start_ts[dir] = ktime_get(); 2186 spin_unlock_irqrestore(hba->host->host_lock, flags); 2187 } 2188 2189 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp) 2190 { 2191 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); 2192 unsigned long flags; 2193 2194 spin_lock_irqsave(hba->host->host_lock, flags); 2195 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { 2196 const struct request *req = scsi_cmd_to_rq(lrbp->cmd); 2197 struct ufs_hba_monitor *m = &hba->monitor; 2198 ktime_t now, inc, lat; 2199 2200 now = lrbp->compl_time_stamp; 2201 inc = ktime_sub(now, m->busy_start_ts[dir]); 2202 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc); 2203 m->nr_sec_rw[dir] += blk_rq_sectors(req); 2204 2205 /* Update latencies */ 2206 m->nr_req[dir]++; 2207 lat = ktime_sub(now, lrbp->issue_time_stamp); 2208 m->lat_sum[dir] += lat; 2209 if (m->lat_max[dir] < lat || !m->lat_max[dir]) 2210 m->lat_max[dir] = lat; 2211 if (m->lat_min[dir] > lat || !m->lat_min[dir]) 2212 m->lat_min[dir] = lat; 2213 2214 m->nr_queued[dir]--; 2215 /* Push forward the busy start of monitor */ 2216 m->busy_start_ts[dir] = now; 2217 } 2218 spin_unlock_irqrestore(hba->host->host_lock, flags); 2219 } 2220 2221 /** 2222 * ufshcd_send_command - Send SCSI or device management commands 2223 * @hba: per adapter instance 2224 * @task_tag: Task tag of the command 2225 * @hwq: pointer to hardware queue instance 2226 */ 2227 static inline 2228 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag, 2229 struct ufs_hw_queue *hwq) 2230 { 2231 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; 2232 unsigned long flags; 2233 2234 lrbp->issue_time_stamp = ktime_get(); 2235 lrbp->issue_time_stamp_local_clock = local_clock(); 2236 lrbp->compl_time_stamp = ktime_set(0, 0); 2237 lrbp->compl_time_stamp_local_clock = 0; 2238 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND); 2239 ufshcd_clk_scaling_start_busy(hba); 2240 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) 2241 ufshcd_start_monitor(hba, lrbp); 2242 2243 if (is_mcq_enabled(hba)) { 2244 int utrd_size = sizeof(struct utp_transfer_req_desc); 2245 2246 spin_lock(&hwq->sq_lock); 2247 memcpy(hwq->sqe_base_addr + (hwq->sq_tail_slot * utrd_size), 2248 lrbp->utr_descriptor_ptr, utrd_size); 2249 ufshcd_inc_sq_tail(hwq); 2250 spin_unlock(&hwq->sq_lock); 2251 } else { 2252 spin_lock_irqsave(&hba->outstanding_lock, flags); 2253 if (hba->vops && hba->vops->setup_xfer_req) 2254 hba->vops->setup_xfer_req(hba, lrbp->task_tag, 2255 !!lrbp->cmd); 2256 __set_bit(lrbp->task_tag, &hba->outstanding_reqs); 2257 ufshcd_writel(hba, 1 << lrbp->task_tag, 2258 REG_UTP_TRANSFER_REQ_DOOR_BELL); 2259 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 2260 } 2261 } 2262 2263 /** 2264 * ufshcd_copy_sense_data - Copy sense data in case of check condition 2265 * @lrbp: pointer to local reference block 2266 */ 2267 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) 2268 { 2269 u8 *const sense_buffer = lrbp->cmd->sense_buffer; 2270 int len; 2271 2272 if (sense_buffer && 2273 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { 2274 int len_to_copy; 2275 2276 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); 2277 len_to_copy = min_t(int, UFS_SENSE_SIZE, len); 2278 2279 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data, 2280 len_to_copy); 2281 } 2282 } 2283 2284 /** 2285 * ufshcd_copy_query_response() - Copy the Query Response and the data 2286 * descriptor 2287 * @hba: per adapter instance 2288 * @lrbp: pointer to local reference block 2289 */ 2290 static 2291 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 2292 { 2293 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; 2294 2295 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); 2296 2297 /* Get the descriptor */ 2298 if (hba->dev_cmd.query.descriptor && 2299 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { 2300 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + 2301 GENERAL_UPIU_REQUEST_SIZE; 2302 u16 resp_len; 2303 u16 buf_len; 2304 2305 /* data segment length */ 2306 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & 2307 MASK_QUERY_DATA_SEG_LEN; 2308 buf_len = be16_to_cpu( 2309 hba->dev_cmd.query.request.upiu_req.length); 2310 if (likely(buf_len >= resp_len)) { 2311 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); 2312 } else { 2313 dev_warn(hba->dev, 2314 "%s: rsp size %d is bigger than buffer size %d", 2315 __func__, resp_len, buf_len); 2316 return -EINVAL; 2317 } 2318 } 2319 2320 return 0; 2321 } 2322 2323 /** 2324 * ufshcd_hba_capabilities - Read controller capabilities 2325 * @hba: per adapter instance 2326 * 2327 * Return: 0 on success, negative on error. 2328 */ 2329 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) 2330 { 2331 int err; 2332 2333 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); 2334 if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) 2335 hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; 2336 2337 /* nutrs and nutmrs are 0 based values */ 2338 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; 2339 hba->nutmrs = 2340 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; 2341 hba->reserved_slot = hba->nutrs - 1; 2342 2343 /* Read crypto capabilities */ 2344 err = ufshcd_hba_init_crypto_capabilities(hba); 2345 if (err) 2346 dev_err(hba->dev, "crypto setup failed\n"); 2347 2348 hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities); 2349 if (!hba->mcq_sup) 2350 return err; 2351 2352 hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP); 2353 hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT, 2354 hba->mcq_capabilities); 2355 2356 return err; 2357 } 2358 2359 /** 2360 * ufshcd_ready_for_uic_cmd - Check if controller is ready 2361 * to accept UIC commands 2362 * @hba: per adapter instance 2363 * Return true on success, else false 2364 */ 2365 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) 2366 { 2367 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY; 2368 } 2369 2370 /** 2371 * ufshcd_get_upmcrs - Get the power mode change request status 2372 * @hba: Pointer to adapter instance 2373 * 2374 * This function gets the UPMCRS field of HCS register 2375 * Returns value of UPMCRS field 2376 */ 2377 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) 2378 { 2379 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; 2380 } 2381 2382 /** 2383 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer 2384 * @hba: per adapter instance 2385 * @uic_cmd: UIC command 2386 */ 2387 static inline void 2388 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 2389 { 2390 lockdep_assert_held(&hba->uic_cmd_mutex); 2391 2392 WARN_ON(hba->active_uic_cmd); 2393 2394 hba->active_uic_cmd = uic_cmd; 2395 2396 /* Write Args */ 2397 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); 2398 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); 2399 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); 2400 2401 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND); 2402 2403 /* Write UIC Cmd */ 2404 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, 2405 REG_UIC_COMMAND); 2406 } 2407 2408 /** 2409 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command 2410 * @hba: per adapter instance 2411 * @uic_cmd: UIC command 2412 * 2413 * Returns 0 only if success. 2414 */ 2415 static int 2416 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 2417 { 2418 int ret; 2419 unsigned long flags; 2420 2421 lockdep_assert_held(&hba->uic_cmd_mutex); 2422 2423 if (wait_for_completion_timeout(&uic_cmd->done, 2424 msecs_to_jiffies(UIC_CMD_TIMEOUT))) { 2425 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; 2426 } else { 2427 ret = -ETIMEDOUT; 2428 dev_err(hba->dev, 2429 "uic cmd 0x%x with arg3 0x%x completion timeout\n", 2430 uic_cmd->command, uic_cmd->argument3); 2431 2432 if (!uic_cmd->cmd_active) { 2433 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n", 2434 __func__); 2435 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; 2436 } 2437 } 2438 2439 spin_lock_irqsave(hba->host->host_lock, flags); 2440 hba->active_uic_cmd = NULL; 2441 spin_unlock_irqrestore(hba->host->host_lock, flags); 2442 2443 return ret; 2444 } 2445 2446 /** 2447 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result 2448 * @hba: per adapter instance 2449 * @uic_cmd: UIC command 2450 * @completion: initialize the completion only if this is set to true 2451 * 2452 * Returns 0 only if success. 2453 */ 2454 static int 2455 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, 2456 bool completion) 2457 { 2458 lockdep_assert_held(&hba->uic_cmd_mutex); 2459 lockdep_assert_held(hba->host->host_lock); 2460 2461 if (!ufshcd_ready_for_uic_cmd(hba)) { 2462 dev_err(hba->dev, 2463 "Controller not ready to accept UIC commands\n"); 2464 return -EIO; 2465 } 2466 2467 if (completion) 2468 init_completion(&uic_cmd->done); 2469 2470 uic_cmd->cmd_active = 1; 2471 ufshcd_dispatch_uic_cmd(hba, uic_cmd); 2472 2473 return 0; 2474 } 2475 2476 /** 2477 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result 2478 * @hba: per adapter instance 2479 * @uic_cmd: UIC command 2480 * 2481 * Returns 0 only if success. 2482 */ 2483 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 2484 { 2485 int ret; 2486 unsigned long flags; 2487 2488 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) 2489 return 0; 2490 2491 ufshcd_hold(hba, false); 2492 mutex_lock(&hba->uic_cmd_mutex); 2493 ufshcd_add_delay_before_dme_cmd(hba); 2494 2495 spin_lock_irqsave(hba->host->host_lock, flags); 2496 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); 2497 spin_unlock_irqrestore(hba->host->host_lock, flags); 2498 if (!ret) 2499 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); 2500 2501 mutex_unlock(&hba->uic_cmd_mutex); 2502 2503 ufshcd_release(hba); 2504 return ret; 2505 } 2506 2507 /** 2508 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format) 2509 * @hba: per-adapter instance 2510 * @lrbp: pointer to local reference block 2511 * @sg_entries: The number of sg lists actually used 2512 * @sg_list: Pointer to SG list 2513 */ 2514 static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries, 2515 struct scatterlist *sg_list) 2516 { 2517 struct ufshcd_sg_entry *prd; 2518 struct scatterlist *sg; 2519 int i; 2520 2521 if (sg_entries) { 2522 2523 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) 2524 lrbp->utr_descriptor_ptr->prd_table_length = 2525 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba)); 2526 else 2527 lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries); 2528 2529 prd = lrbp->ucd_prdt_ptr; 2530 2531 for_each_sg(sg_list, sg, sg_entries, i) { 2532 const unsigned int len = sg_dma_len(sg); 2533 2534 /* 2535 * From the UFSHCI spec: "Data Byte Count (DBC): A '0' 2536 * based value that indicates the length, in bytes, of 2537 * the data block. A maximum of length of 256KB may 2538 * exist for any entry. Bits 1:0 of this field shall be 2539 * 11b to indicate Dword granularity. A value of '3' 2540 * indicates 4 bytes, '7' indicates 8 bytes, etc." 2541 */ 2542 WARN_ONCE(len > 256 * 1024, "len = %#x\n", len); 2543 prd->size = cpu_to_le32(len - 1); 2544 prd->addr = cpu_to_le64(sg->dma_address); 2545 prd->reserved = 0; 2546 prd = (void *)prd + ufshcd_sg_entry_size(hba); 2547 } 2548 } else { 2549 lrbp->utr_descriptor_ptr->prd_table_length = 0; 2550 } 2551 } 2552 2553 /** 2554 * ufshcd_map_sg - Map scatter-gather list to prdt 2555 * @hba: per adapter instance 2556 * @lrbp: pointer to local reference block 2557 * 2558 * Returns 0 in case of success, non-zero value in case of failure 2559 */ 2560 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 2561 { 2562 struct scsi_cmnd *cmd = lrbp->cmd; 2563 int sg_segments = scsi_dma_map(cmd); 2564 2565 if (sg_segments < 0) 2566 return sg_segments; 2567 2568 ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd)); 2569 2570 return 0; 2571 } 2572 2573 /** 2574 * ufshcd_enable_intr - enable interrupts 2575 * @hba: per adapter instance 2576 * @intrs: interrupt bits 2577 */ 2578 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) 2579 { 2580 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 2581 2582 if (hba->ufs_version == ufshci_version(1, 0)) { 2583 u32 rw; 2584 rw = set & INTERRUPT_MASK_RW_VER_10; 2585 set = rw | ((set ^ intrs) & intrs); 2586 } else { 2587 set |= intrs; 2588 } 2589 2590 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); 2591 } 2592 2593 /** 2594 * ufshcd_disable_intr - disable interrupts 2595 * @hba: per adapter instance 2596 * @intrs: interrupt bits 2597 */ 2598 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) 2599 { 2600 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 2601 2602 if (hba->ufs_version == ufshci_version(1, 0)) { 2603 u32 rw; 2604 rw = (set & INTERRUPT_MASK_RW_VER_10) & 2605 ~(intrs & INTERRUPT_MASK_RW_VER_10); 2606 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); 2607 2608 } else { 2609 set &= ~intrs; 2610 } 2611 2612 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); 2613 } 2614 2615 /** 2616 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request 2617 * descriptor according to request 2618 * @lrbp: pointer to local reference block 2619 * @upiu_flags: flags required in the header 2620 * @cmd_dir: requests data direction 2621 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments) 2622 */ 2623 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags, 2624 enum dma_data_direction cmd_dir, int ehs_length) 2625 { 2626 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; 2627 u32 data_direction; 2628 u32 dword_0; 2629 u32 dword_1 = 0; 2630 u32 dword_3 = 0; 2631 2632 if (cmd_dir == DMA_FROM_DEVICE) { 2633 data_direction = UTP_DEVICE_TO_HOST; 2634 *upiu_flags = UPIU_CMD_FLAGS_READ; 2635 } else if (cmd_dir == DMA_TO_DEVICE) { 2636 data_direction = UTP_HOST_TO_DEVICE; 2637 *upiu_flags = UPIU_CMD_FLAGS_WRITE; 2638 } else { 2639 data_direction = UTP_NO_DATA_TRANSFER; 2640 *upiu_flags = UPIU_CMD_FLAGS_NONE; 2641 } 2642 2643 dword_0 = data_direction | (lrbp->command_type << UPIU_COMMAND_TYPE_OFFSET) | 2644 ehs_length << 8; 2645 if (lrbp->intr_cmd) 2646 dword_0 |= UTP_REQ_DESC_INT_CMD; 2647 2648 /* Prepare crypto related dwords */ 2649 ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3); 2650 2651 /* Transfer request descriptor header fields */ 2652 req_desc->header.dword_0 = cpu_to_le32(dword_0); 2653 req_desc->header.dword_1 = cpu_to_le32(dword_1); 2654 /* 2655 * assigning invalid value for command status. Controller 2656 * updates OCS on command completion, with the command 2657 * status 2658 */ 2659 req_desc->header.dword_2 = 2660 cpu_to_le32(OCS_INVALID_COMMAND_STATUS); 2661 req_desc->header.dword_3 = cpu_to_le32(dword_3); 2662 2663 req_desc->prd_table_length = 0; 2664 } 2665 2666 /** 2667 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, 2668 * for scsi commands 2669 * @lrbp: local reference block pointer 2670 * @upiu_flags: flags 2671 */ 2672 static 2673 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags) 2674 { 2675 struct scsi_cmnd *cmd = lrbp->cmd; 2676 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 2677 unsigned short cdb_len; 2678 2679 /* command descriptor fields */ 2680 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( 2681 UPIU_TRANSACTION_COMMAND, upiu_flags, 2682 lrbp->lun, lrbp->task_tag); 2683 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( 2684 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); 2685 2686 /* Total EHS length and Data segment length will be zero */ 2687 ucd_req_ptr->header.dword_2 = 0; 2688 2689 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); 2690 2691 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); 2692 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); 2693 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); 2694 2695 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 2696 } 2697 2698 /** 2699 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request 2700 * @hba: UFS hba 2701 * @lrbp: local reference block pointer 2702 * @upiu_flags: flags 2703 */ 2704 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, 2705 struct ufshcd_lrb *lrbp, u8 upiu_flags) 2706 { 2707 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 2708 struct ufs_query *query = &hba->dev_cmd.query; 2709 u16 len = be16_to_cpu(query->request.upiu_req.length); 2710 2711 /* Query request header */ 2712 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( 2713 UPIU_TRANSACTION_QUERY_REQ, upiu_flags, 2714 lrbp->lun, lrbp->task_tag); 2715 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( 2716 0, query->request.query_func, 0, 0); 2717 2718 /* Data segment length only need for WRITE_DESC */ 2719 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) 2720 ucd_req_ptr->header.dword_2 = 2721 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); 2722 else 2723 ucd_req_ptr->header.dword_2 = 0; 2724 2725 /* Copy the Query Request buffer as is */ 2726 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, 2727 QUERY_OSF_SIZE); 2728 2729 /* Copy the Descriptor */ 2730 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) 2731 memcpy(ucd_req_ptr + 1, query->descriptor, len); 2732 2733 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 2734 } 2735 2736 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) 2737 { 2738 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 2739 2740 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); 2741 2742 /* command descriptor fields */ 2743 ucd_req_ptr->header.dword_0 = 2744 UPIU_HEADER_DWORD( 2745 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); 2746 /* clear rest of the fields of basic header */ 2747 ucd_req_ptr->header.dword_1 = 0; 2748 ucd_req_ptr->header.dword_2 = 0; 2749 2750 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 2751 } 2752 2753 /** 2754 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU) 2755 * for Device Management Purposes 2756 * @hba: per adapter instance 2757 * @lrbp: pointer to local reference block 2758 */ 2759 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, 2760 struct ufshcd_lrb *lrbp) 2761 { 2762 u8 upiu_flags; 2763 int ret = 0; 2764 2765 if (hba->ufs_version <= ufshci_version(1, 1)) 2766 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; 2767 else 2768 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 2769 2770 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0); 2771 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) 2772 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); 2773 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) 2774 ufshcd_prepare_utp_nop_upiu(lrbp); 2775 else 2776 ret = -EINVAL; 2777 2778 return ret; 2779 } 2780 2781 /** 2782 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU) 2783 * for SCSI Purposes 2784 * @hba: per adapter instance 2785 * @lrbp: pointer to local reference block 2786 */ 2787 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 2788 { 2789 u8 upiu_flags; 2790 int ret = 0; 2791 2792 if (hba->ufs_version <= ufshci_version(1, 1)) 2793 lrbp->command_type = UTP_CMD_TYPE_SCSI; 2794 else 2795 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 2796 2797 if (likely(lrbp->cmd)) { 2798 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0); 2799 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); 2800 } else { 2801 ret = -EINVAL; 2802 } 2803 2804 return ret; 2805 } 2806 2807 /** 2808 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID 2809 * @upiu_wlun_id: UPIU W-LUN id 2810 * 2811 * Returns SCSI W-LUN id 2812 */ 2813 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) 2814 { 2815 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; 2816 } 2817 2818 static inline bool is_device_wlun(struct scsi_device *sdev) 2819 { 2820 return sdev->lun == 2821 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN); 2822 } 2823 2824 /* 2825 * Associate the UFS controller queue with the default and poll HCTX types. 2826 * Initialize the mq_map[] arrays. 2827 */ 2828 static void ufshcd_map_queues(struct Scsi_Host *shost) 2829 { 2830 struct ufs_hba *hba = shost_priv(shost); 2831 int i, queue_offset = 0; 2832 2833 if (!is_mcq_supported(hba)) { 2834 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1; 2835 hba->nr_queues[HCTX_TYPE_READ] = 0; 2836 hba->nr_queues[HCTX_TYPE_POLL] = 1; 2837 hba->nr_hw_queues = 1; 2838 } 2839 2840 for (i = 0; i < shost->nr_maps; i++) { 2841 struct blk_mq_queue_map *map = &shost->tag_set.map[i]; 2842 2843 map->nr_queues = hba->nr_queues[i]; 2844 if (!map->nr_queues) 2845 continue; 2846 map->queue_offset = queue_offset; 2847 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba)) 2848 map->queue_offset = 0; 2849 2850 blk_mq_map_queues(map); 2851 queue_offset += map->nr_queues; 2852 } 2853 } 2854 2855 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) 2856 { 2857 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr + 2858 i * sizeof_utp_transfer_cmd_desc(hba); 2859 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; 2860 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + 2861 i * sizeof_utp_transfer_cmd_desc(hba); 2862 u16 response_offset = offsetof(struct utp_transfer_cmd_desc, 2863 response_upiu); 2864 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); 2865 2866 lrb->utr_descriptor_ptr = utrdlp + i; 2867 lrb->utrd_dma_addr = hba->utrdl_dma_addr + 2868 i * sizeof(struct utp_transfer_req_desc); 2869 lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu; 2870 lrb->ucd_req_dma_addr = cmd_desc_element_addr; 2871 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu; 2872 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset; 2873 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table; 2874 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset; 2875 } 2876 2877 /** 2878 * ufshcd_queuecommand - main entry point for SCSI requests 2879 * @host: SCSI host pointer 2880 * @cmd: command from SCSI Midlayer 2881 * 2882 * Returns 0 for success, non-zero in case of failure 2883 */ 2884 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 2885 { 2886 struct ufs_hba *hba = shost_priv(host); 2887 int tag = scsi_cmd_to_rq(cmd)->tag; 2888 struct ufshcd_lrb *lrbp; 2889 int err = 0; 2890 struct ufs_hw_queue *hwq = NULL; 2891 2892 WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag); 2893 2894 /* 2895 * Allows the UFS error handler to wait for prior ufshcd_queuecommand() 2896 * calls. 2897 */ 2898 rcu_read_lock(); 2899 2900 switch (hba->ufshcd_state) { 2901 case UFSHCD_STATE_OPERATIONAL: 2902 break; 2903 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: 2904 /* 2905 * SCSI error handler can call ->queuecommand() while UFS error 2906 * handler is in progress. Error interrupts could change the 2907 * state from UFSHCD_STATE_RESET to 2908 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests 2909 * being issued in that case. 2910 */ 2911 if (ufshcd_eh_in_progress(hba)) { 2912 err = SCSI_MLQUEUE_HOST_BUSY; 2913 goto out; 2914 } 2915 break; 2916 case UFSHCD_STATE_EH_SCHEDULED_FATAL: 2917 /* 2918 * pm_runtime_get_sync() is used at error handling preparation 2919 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's 2920 * PM ops, it can never be finished if we let SCSI layer keep 2921 * retrying it, which gets err handler stuck forever. Neither 2922 * can we let the scsi cmd pass through, because UFS is in bad 2923 * state, the scsi cmd may eventually time out, which will get 2924 * err handler blocked for too long. So, just fail the scsi cmd 2925 * sent from PM ops, err handler can recover PM error anyways. 2926 */ 2927 if (hba->pm_op_in_progress) { 2928 hba->force_reset = true; 2929 set_host_byte(cmd, DID_BAD_TARGET); 2930 scsi_done(cmd); 2931 goto out; 2932 } 2933 fallthrough; 2934 case UFSHCD_STATE_RESET: 2935 err = SCSI_MLQUEUE_HOST_BUSY; 2936 goto out; 2937 case UFSHCD_STATE_ERROR: 2938 set_host_byte(cmd, DID_ERROR); 2939 scsi_done(cmd); 2940 goto out; 2941 } 2942 2943 hba->req_abort_count = 0; 2944 2945 err = ufshcd_hold(hba, true); 2946 if (err) { 2947 err = SCSI_MLQUEUE_HOST_BUSY; 2948 goto out; 2949 } 2950 WARN_ON(ufshcd_is_clkgating_allowed(hba) && 2951 (hba->clk_gating.state != CLKS_ON)); 2952 2953 lrbp = &hba->lrb[tag]; 2954 WARN_ON(lrbp->cmd); 2955 lrbp->cmd = cmd; 2956 lrbp->task_tag = tag; 2957 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); 2958 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); 2959 2960 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp); 2961 2962 lrbp->req_abort_skip = false; 2963 2964 ufshpb_prep(hba, lrbp); 2965 2966 ufshcd_comp_scsi_upiu(hba, lrbp); 2967 2968 err = ufshcd_map_sg(hba, lrbp); 2969 if (err) { 2970 lrbp->cmd = NULL; 2971 ufshcd_release(hba); 2972 goto out; 2973 } 2974 2975 if (is_mcq_enabled(hba)) 2976 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); 2977 2978 ufshcd_send_command(hba, tag, hwq); 2979 2980 out: 2981 rcu_read_unlock(); 2982 2983 if (ufs_trigger_eh()) { 2984 unsigned long flags; 2985 2986 spin_lock_irqsave(hba->host->host_lock, flags); 2987 ufshcd_schedule_eh_work(hba); 2988 spin_unlock_irqrestore(hba->host->host_lock, flags); 2989 } 2990 2991 return err; 2992 } 2993 2994 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, 2995 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) 2996 { 2997 lrbp->cmd = NULL; 2998 lrbp->task_tag = tag; 2999 lrbp->lun = 0; /* device management cmd is not specific to any LUN */ 3000 lrbp->intr_cmd = true; /* No interrupt aggregation */ 3001 ufshcd_prepare_lrbp_crypto(NULL, lrbp); 3002 hba->dev_cmd.type = cmd_type; 3003 3004 return ufshcd_compose_devman_upiu(hba, lrbp); 3005 } 3006 3007 /* 3008 * Clear all the requests from the controller for which a bit has been set in 3009 * @mask and wait until the controller confirms that these requests have been 3010 * cleared. 3011 */ 3012 static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask) 3013 { 3014 unsigned long flags; 3015 3016 /* clear outstanding transaction before retry */ 3017 spin_lock_irqsave(hba->host->host_lock, flags); 3018 ufshcd_utrl_clear(hba, mask); 3019 spin_unlock_irqrestore(hba->host->host_lock, flags); 3020 3021 /* 3022 * wait for h/w to clear corresponding bit in door-bell. 3023 * max. wait is 1 sec. 3024 */ 3025 return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, 3026 mask, ~mask, 1000, 1000); 3027 } 3028 3029 static int 3030 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 3031 { 3032 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; 3033 3034 /* Get the UPIU response */ 3035 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> 3036 UPIU_RSP_CODE_OFFSET; 3037 return query_res->response; 3038 } 3039 3040 /** 3041 * ufshcd_dev_cmd_completion() - handles device management command responses 3042 * @hba: per adapter instance 3043 * @lrbp: pointer to local reference block 3044 */ 3045 static int 3046 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 3047 { 3048 int resp; 3049 int err = 0; 3050 3051 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); 3052 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); 3053 3054 switch (resp) { 3055 case UPIU_TRANSACTION_NOP_IN: 3056 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { 3057 err = -EINVAL; 3058 dev_err(hba->dev, "%s: unexpected response %x\n", 3059 __func__, resp); 3060 } 3061 break; 3062 case UPIU_TRANSACTION_QUERY_RSP: 3063 err = ufshcd_check_query_response(hba, lrbp); 3064 if (!err) 3065 err = ufshcd_copy_query_response(hba, lrbp); 3066 break; 3067 case UPIU_TRANSACTION_REJECT_UPIU: 3068 /* TODO: handle Reject UPIU Response */ 3069 err = -EPERM; 3070 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", 3071 __func__); 3072 break; 3073 case UPIU_TRANSACTION_RESPONSE: 3074 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) { 3075 err = -EINVAL; 3076 dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp); 3077 } 3078 break; 3079 default: 3080 err = -EINVAL; 3081 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", 3082 __func__, resp); 3083 break; 3084 } 3085 3086 return err; 3087 } 3088 3089 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, 3090 struct ufshcd_lrb *lrbp, int max_timeout) 3091 { 3092 unsigned long time_left = msecs_to_jiffies(max_timeout); 3093 unsigned long flags; 3094 bool pending; 3095 int err; 3096 3097 retry: 3098 time_left = wait_for_completion_timeout(hba->dev_cmd.complete, 3099 time_left); 3100 3101 if (likely(time_left)) { 3102 /* 3103 * The completion handler called complete() and the caller of 3104 * this function still owns the @lrbp tag so the code below does 3105 * not trigger any race conditions. 3106 */ 3107 hba->dev_cmd.complete = NULL; 3108 err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe); 3109 if (!err) 3110 err = ufshcd_dev_cmd_completion(hba, lrbp); 3111 } else { 3112 err = -ETIMEDOUT; 3113 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", 3114 __func__, lrbp->task_tag); 3115 if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) { 3116 /* successfully cleared the command, retry if needed */ 3117 err = -EAGAIN; 3118 /* 3119 * Since clearing the command succeeded we also need to 3120 * clear the task tag bit from the outstanding_reqs 3121 * variable. 3122 */ 3123 spin_lock_irqsave(&hba->outstanding_lock, flags); 3124 pending = test_bit(lrbp->task_tag, 3125 &hba->outstanding_reqs); 3126 if (pending) { 3127 hba->dev_cmd.complete = NULL; 3128 __clear_bit(lrbp->task_tag, 3129 &hba->outstanding_reqs); 3130 } 3131 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 3132 3133 if (!pending) { 3134 /* 3135 * The completion handler ran while we tried to 3136 * clear the command. 3137 */ 3138 time_left = 1; 3139 goto retry; 3140 } 3141 } else { 3142 dev_err(hba->dev, "%s: failed to clear tag %d\n", 3143 __func__, lrbp->task_tag); 3144 3145 spin_lock_irqsave(&hba->outstanding_lock, flags); 3146 pending = test_bit(lrbp->task_tag, 3147 &hba->outstanding_reqs); 3148 if (pending) 3149 hba->dev_cmd.complete = NULL; 3150 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 3151 3152 if (!pending) { 3153 /* 3154 * The completion handler ran while we tried to 3155 * clear the command. 3156 */ 3157 time_left = 1; 3158 goto retry; 3159 } 3160 } 3161 } 3162 3163 return err; 3164 } 3165 3166 /** 3167 * ufshcd_exec_dev_cmd - API for sending device management requests 3168 * @hba: UFS hba 3169 * @cmd_type: specifies the type (NOP, Query...) 3170 * @timeout: timeout in milliseconds 3171 * 3172 * NOTE: Since there is only one available tag for device management commands, 3173 * it is expected you hold the hba->dev_cmd.lock mutex. 3174 */ 3175 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, 3176 enum dev_cmd_type cmd_type, int timeout) 3177 { 3178 DECLARE_COMPLETION_ONSTACK(wait); 3179 const u32 tag = hba->reserved_slot; 3180 struct ufshcd_lrb *lrbp; 3181 int err; 3182 3183 /* Protects use of hba->reserved_slot. */ 3184 lockdep_assert_held(&hba->dev_cmd.lock); 3185 3186 down_read(&hba->clk_scaling_lock); 3187 3188 lrbp = &hba->lrb[tag]; 3189 WARN_ON(lrbp->cmd); 3190 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); 3191 if (unlikely(err)) 3192 goto out; 3193 3194 hba->dev_cmd.complete = &wait; 3195 hba->dev_cmd.cqe = NULL; 3196 3197 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); 3198 3199 ufshcd_send_command(hba, tag, hba->dev_cmd_queue); 3200 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); 3201 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, 3202 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); 3203 3204 out: 3205 up_read(&hba->clk_scaling_lock); 3206 return err; 3207 } 3208 3209 /** 3210 * ufshcd_init_query() - init the query response and request parameters 3211 * @hba: per-adapter instance 3212 * @request: address of the request pointer to be initialized 3213 * @response: address of the response pointer to be initialized 3214 * @opcode: operation to perform 3215 * @idn: flag idn to access 3216 * @index: LU number to access 3217 * @selector: query/flag/descriptor further identification 3218 */ 3219 static inline void ufshcd_init_query(struct ufs_hba *hba, 3220 struct ufs_query_req **request, struct ufs_query_res **response, 3221 enum query_opcode opcode, u8 idn, u8 index, u8 selector) 3222 { 3223 *request = &hba->dev_cmd.query.request; 3224 *response = &hba->dev_cmd.query.response; 3225 memset(*request, 0, sizeof(struct ufs_query_req)); 3226 memset(*response, 0, sizeof(struct ufs_query_res)); 3227 (*request)->upiu_req.opcode = opcode; 3228 (*request)->upiu_req.idn = idn; 3229 (*request)->upiu_req.index = index; 3230 (*request)->upiu_req.selector = selector; 3231 } 3232 3233 static int ufshcd_query_flag_retry(struct ufs_hba *hba, 3234 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) 3235 { 3236 int ret; 3237 int retries; 3238 3239 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { 3240 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res); 3241 if (ret) 3242 dev_dbg(hba->dev, 3243 "%s: failed with error %d, retries %d\n", 3244 __func__, ret, retries); 3245 else 3246 break; 3247 } 3248 3249 if (ret) 3250 dev_err(hba->dev, 3251 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n", 3252 __func__, opcode, idn, ret, retries); 3253 return ret; 3254 } 3255 3256 /** 3257 * ufshcd_query_flag() - API function for sending flag query requests 3258 * @hba: per-adapter instance 3259 * @opcode: flag query to perform 3260 * @idn: flag idn to access 3261 * @index: flag index to access 3262 * @flag_res: the flag value after the query request completes 3263 * 3264 * Returns 0 for success, non-zero in case of failure 3265 */ 3266 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 3267 enum flag_idn idn, u8 index, bool *flag_res) 3268 { 3269 struct ufs_query_req *request = NULL; 3270 struct ufs_query_res *response = NULL; 3271 int err, selector = 0; 3272 int timeout = QUERY_REQ_TIMEOUT; 3273 3274 BUG_ON(!hba); 3275 3276 ufshcd_hold(hba, false); 3277 mutex_lock(&hba->dev_cmd.lock); 3278 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 3279 selector); 3280 3281 switch (opcode) { 3282 case UPIU_QUERY_OPCODE_SET_FLAG: 3283 case UPIU_QUERY_OPCODE_CLEAR_FLAG: 3284 case UPIU_QUERY_OPCODE_TOGGLE_FLAG: 3285 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 3286 break; 3287 case UPIU_QUERY_OPCODE_READ_FLAG: 3288 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 3289 if (!flag_res) { 3290 /* No dummy reads */ 3291 dev_err(hba->dev, "%s: Invalid argument for read request\n", 3292 __func__); 3293 err = -EINVAL; 3294 goto out_unlock; 3295 } 3296 break; 3297 default: 3298 dev_err(hba->dev, 3299 "%s: Expected query flag opcode but got = %d\n", 3300 __func__, opcode); 3301 err = -EINVAL; 3302 goto out_unlock; 3303 } 3304 3305 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); 3306 3307 if (err) { 3308 dev_err(hba->dev, 3309 "%s: Sending flag query for idn %d failed, err = %d\n", 3310 __func__, idn, err); 3311 goto out_unlock; 3312 } 3313 3314 if (flag_res) 3315 *flag_res = (be32_to_cpu(response->upiu_res.value) & 3316 MASK_QUERY_UPIU_FLAG_LOC) & 0x1; 3317 3318 out_unlock: 3319 mutex_unlock(&hba->dev_cmd.lock); 3320 ufshcd_release(hba); 3321 return err; 3322 } 3323 3324 /** 3325 * ufshcd_query_attr - API function for sending attribute requests 3326 * @hba: per-adapter instance 3327 * @opcode: attribute opcode 3328 * @idn: attribute idn to access 3329 * @index: index field 3330 * @selector: selector field 3331 * @attr_val: the attribute value after the query request completes 3332 * 3333 * Returns 0 for success, non-zero in case of failure 3334 */ 3335 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, 3336 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) 3337 { 3338 struct ufs_query_req *request = NULL; 3339 struct ufs_query_res *response = NULL; 3340 int err; 3341 3342 BUG_ON(!hba); 3343 3344 if (!attr_val) { 3345 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", 3346 __func__, opcode); 3347 return -EINVAL; 3348 } 3349 3350 ufshcd_hold(hba, false); 3351 3352 mutex_lock(&hba->dev_cmd.lock); 3353 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 3354 selector); 3355 3356 switch (opcode) { 3357 case UPIU_QUERY_OPCODE_WRITE_ATTR: 3358 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 3359 request->upiu_req.value = cpu_to_be32(*attr_val); 3360 break; 3361 case UPIU_QUERY_OPCODE_READ_ATTR: 3362 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 3363 break; 3364 default: 3365 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", 3366 __func__, opcode); 3367 err = -EINVAL; 3368 goto out_unlock; 3369 } 3370 3371 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); 3372 3373 if (err) { 3374 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", 3375 __func__, opcode, idn, index, err); 3376 goto out_unlock; 3377 } 3378 3379 *attr_val = be32_to_cpu(response->upiu_res.value); 3380 3381 out_unlock: 3382 mutex_unlock(&hba->dev_cmd.lock); 3383 ufshcd_release(hba); 3384 return err; 3385 } 3386 3387 /** 3388 * ufshcd_query_attr_retry() - API function for sending query 3389 * attribute with retries 3390 * @hba: per-adapter instance 3391 * @opcode: attribute opcode 3392 * @idn: attribute idn to access 3393 * @index: index field 3394 * @selector: selector field 3395 * @attr_val: the attribute value after the query request 3396 * completes 3397 * 3398 * Returns 0 for success, non-zero in case of failure 3399 */ 3400 int ufshcd_query_attr_retry(struct ufs_hba *hba, 3401 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, 3402 u32 *attr_val) 3403 { 3404 int ret = 0; 3405 u32 retries; 3406 3407 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { 3408 ret = ufshcd_query_attr(hba, opcode, idn, index, 3409 selector, attr_val); 3410 if (ret) 3411 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", 3412 __func__, ret, retries); 3413 else 3414 break; 3415 } 3416 3417 if (ret) 3418 dev_err(hba->dev, 3419 "%s: query attribute, idn %d, failed with error %d after %d retries\n", 3420 __func__, idn, ret, QUERY_REQ_RETRIES); 3421 return ret; 3422 } 3423 3424 static int __ufshcd_query_descriptor(struct ufs_hba *hba, 3425 enum query_opcode opcode, enum desc_idn idn, u8 index, 3426 u8 selector, u8 *desc_buf, int *buf_len) 3427 { 3428 struct ufs_query_req *request = NULL; 3429 struct ufs_query_res *response = NULL; 3430 int err; 3431 3432 BUG_ON(!hba); 3433 3434 if (!desc_buf) { 3435 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", 3436 __func__, opcode); 3437 return -EINVAL; 3438 } 3439 3440 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { 3441 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", 3442 __func__, *buf_len); 3443 return -EINVAL; 3444 } 3445 3446 ufshcd_hold(hba, false); 3447 3448 mutex_lock(&hba->dev_cmd.lock); 3449 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 3450 selector); 3451 hba->dev_cmd.query.descriptor = desc_buf; 3452 request->upiu_req.length = cpu_to_be16(*buf_len); 3453 3454 switch (opcode) { 3455 case UPIU_QUERY_OPCODE_WRITE_DESC: 3456 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 3457 break; 3458 case UPIU_QUERY_OPCODE_READ_DESC: 3459 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 3460 break; 3461 default: 3462 dev_err(hba->dev, 3463 "%s: Expected query descriptor opcode but got = 0x%.2x\n", 3464 __func__, opcode); 3465 err = -EINVAL; 3466 goto out_unlock; 3467 } 3468 3469 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); 3470 3471 if (err) { 3472 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", 3473 __func__, opcode, idn, index, err); 3474 goto out_unlock; 3475 } 3476 3477 *buf_len = be16_to_cpu(response->upiu_res.length); 3478 3479 out_unlock: 3480 hba->dev_cmd.query.descriptor = NULL; 3481 mutex_unlock(&hba->dev_cmd.lock); 3482 ufshcd_release(hba); 3483 return err; 3484 } 3485 3486 /** 3487 * ufshcd_query_descriptor_retry - API function for sending descriptor requests 3488 * @hba: per-adapter instance 3489 * @opcode: attribute opcode 3490 * @idn: attribute idn to access 3491 * @index: index field 3492 * @selector: selector field 3493 * @desc_buf: the buffer that contains the descriptor 3494 * @buf_len: length parameter passed to the device 3495 * 3496 * Returns 0 for success, non-zero in case of failure. 3497 * The buf_len parameter will contain, on return, the length parameter 3498 * received on the response. 3499 */ 3500 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, 3501 enum query_opcode opcode, 3502 enum desc_idn idn, u8 index, 3503 u8 selector, 3504 u8 *desc_buf, int *buf_len) 3505 { 3506 int err; 3507 int retries; 3508 3509 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { 3510 err = __ufshcd_query_descriptor(hba, opcode, idn, index, 3511 selector, desc_buf, buf_len); 3512 if (!err || err == -EINVAL) 3513 break; 3514 } 3515 3516 return err; 3517 } 3518 3519 /** 3520 * ufshcd_read_desc_param - read the specified descriptor parameter 3521 * @hba: Pointer to adapter instance 3522 * @desc_id: descriptor idn value 3523 * @desc_index: descriptor index 3524 * @param_offset: offset of the parameter to read 3525 * @param_read_buf: pointer to buffer where parameter would be read 3526 * @param_size: sizeof(param_read_buf) 3527 * 3528 * Return 0 in case of success, non-zero otherwise 3529 */ 3530 int ufshcd_read_desc_param(struct ufs_hba *hba, 3531 enum desc_idn desc_id, 3532 int desc_index, 3533 u8 param_offset, 3534 u8 *param_read_buf, 3535 u8 param_size) 3536 { 3537 int ret; 3538 u8 *desc_buf; 3539 int buff_len = QUERY_DESC_MAX_SIZE; 3540 bool is_kmalloc = true; 3541 3542 /* Safety check */ 3543 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) 3544 return -EINVAL; 3545 3546 /* Check whether we need temp memory */ 3547 if (param_offset != 0 || param_size < buff_len) { 3548 desc_buf = kzalloc(buff_len, GFP_KERNEL); 3549 if (!desc_buf) 3550 return -ENOMEM; 3551 } else { 3552 desc_buf = param_read_buf; 3553 is_kmalloc = false; 3554 } 3555 3556 /* Request for full descriptor */ 3557 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, 3558 desc_id, desc_index, 0, 3559 desc_buf, &buff_len); 3560 if (ret) { 3561 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n", 3562 __func__, desc_id, desc_index, param_offset, ret); 3563 goto out; 3564 } 3565 3566 /* Update descriptor length */ 3567 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET]; 3568 3569 if (param_offset >= buff_len) { 3570 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n", 3571 __func__, param_offset, desc_id, buff_len); 3572 ret = -EINVAL; 3573 goto out; 3574 } 3575 3576 /* Sanity check */ 3577 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { 3578 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", 3579 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); 3580 ret = -EINVAL; 3581 goto out; 3582 } 3583 3584 if (is_kmalloc) { 3585 /* Make sure we don't copy more data than available */ 3586 if (param_offset >= buff_len) 3587 ret = -EINVAL; 3588 else 3589 memcpy(param_read_buf, &desc_buf[param_offset], 3590 min_t(u32, param_size, buff_len - param_offset)); 3591 } 3592 out: 3593 if (is_kmalloc) 3594 kfree(desc_buf); 3595 return ret; 3596 } 3597 3598 /** 3599 * struct uc_string_id - unicode string 3600 * 3601 * @len: size of this descriptor inclusive 3602 * @type: descriptor type 3603 * @uc: unicode string character 3604 */ 3605 struct uc_string_id { 3606 u8 len; 3607 u8 type; 3608 wchar_t uc[]; 3609 } __packed; 3610 3611 /* replace non-printable or non-ASCII characters with spaces */ 3612 static inline char ufshcd_remove_non_printable(u8 ch) 3613 { 3614 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' '; 3615 } 3616 3617 /** 3618 * ufshcd_read_string_desc - read string descriptor 3619 * @hba: pointer to adapter instance 3620 * @desc_index: descriptor index 3621 * @buf: pointer to buffer where descriptor would be read, 3622 * the caller should free the memory. 3623 * @ascii: if true convert from unicode to ascii characters 3624 * null terminated string. 3625 * 3626 * Return: 3627 * * string size on success. 3628 * * -ENOMEM: on allocation failure 3629 * * -EINVAL: on a wrong parameter 3630 */ 3631 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, 3632 u8 **buf, bool ascii) 3633 { 3634 struct uc_string_id *uc_str; 3635 u8 *str; 3636 int ret; 3637 3638 if (!buf) 3639 return -EINVAL; 3640 3641 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 3642 if (!uc_str) 3643 return -ENOMEM; 3644 3645 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0, 3646 (u8 *)uc_str, QUERY_DESC_MAX_SIZE); 3647 if (ret < 0) { 3648 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", 3649 QUERY_REQ_RETRIES, ret); 3650 str = NULL; 3651 goto out; 3652 } 3653 3654 if (uc_str->len <= QUERY_DESC_HDR_SIZE) { 3655 dev_dbg(hba->dev, "String Desc is of zero length\n"); 3656 str = NULL; 3657 ret = 0; 3658 goto out; 3659 } 3660 3661 if (ascii) { 3662 ssize_t ascii_len; 3663 int i; 3664 /* remove header and divide by 2 to move from UTF16 to UTF8 */ 3665 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; 3666 str = kzalloc(ascii_len, GFP_KERNEL); 3667 if (!str) { 3668 ret = -ENOMEM; 3669 goto out; 3670 } 3671 3672 /* 3673 * the descriptor contains string in UTF16 format 3674 * we need to convert to utf-8 so it can be displayed 3675 */ 3676 ret = utf16s_to_utf8s(uc_str->uc, 3677 uc_str->len - QUERY_DESC_HDR_SIZE, 3678 UTF16_BIG_ENDIAN, str, ascii_len); 3679 3680 /* replace non-printable or non-ASCII characters with spaces */ 3681 for (i = 0; i < ret; i++) 3682 str[i] = ufshcd_remove_non_printable(str[i]); 3683 3684 str[ret++] = '\0'; 3685 3686 } else { 3687 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL); 3688 if (!str) { 3689 ret = -ENOMEM; 3690 goto out; 3691 } 3692 ret = uc_str->len; 3693 } 3694 out: 3695 *buf = str; 3696 kfree(uc_str); 3697 return ret; 3698 } 3699 3700 /** 3701 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter 3702 * @hba: Pointer to adapter instance 3703 * @lun: lun id 3704 * @param_offset: offset of the parameter to read 3705 * @param_read_buf: pointer to buffer where parameter would be read 3706 * @param_size: sizeof(param_read_buf) 3707 * 3708 * Return 0 in case of success, non-zero otherwise 3709 */ 3710 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, 3711 int lun, 3712 enum unit_desc_param param_offset, 3713 u8 *param_read_buf, 3714 u32 param_size) 3715 { 3716 /* 3717 * Unit descriptors are only available for general purpose LUs (LUN id 3718 * from 0 to 7) and RPMB Well known LU. 3719 */ 3720 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) 3721 return -EOPNOTSUPP; 3722 3723 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, 3724 param_offset, param_read_buf, param_size); 3725 } 3726 3727 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba) 3728 { 3729 int err = 0; 3730 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US; 3731 3732 if (hba->dev_info.wspecversion >= 0x300) { 3733 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 3734 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0, 3735 &gating_wait); 3736 if (err) 3737 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n", 3738 err, gating_wait); 3739 3740 if (gating_wait == 0) { 3741 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US; 3742 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n", 3743 gating_wait); 3744 } 3745 3746 hba->dev_info.clk_gating_wait_us = gating_wait; 3747 } 3748 3749 return err; 3750 } 3751 3752 /** 3753 * ufshcd_memory_alloc - allocate memory for host memory space data structures 3754 * @hba: per adapter instance 3755 * 3756 * 1. Allocate DMA memory for Command Descriptor array 3757 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT 3758 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). 3759 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List 3760 * (UTMRDL) 3761 * 4. Allocate memory for local reference block(lrb). 3762 * 3763 * Returns 0 for success, non-zero in case of failure 3764 */ 3765 static int ufshcd_memory_alloc(struct ufs_hba *hba) 3766 { 3767 size_t utmrdl_size, utrdl_size, ucdl_size; 3768 3769 /* Allocate memory for UTP command descriptors */ 3770 ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs; 3771 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, 3772 ucdl_size, 3773 &hba->ucdl_dma_addr, 3774 GFP_KERNEL); 3775 3776 /* 3777 * UFSHCI requires UTP command descriptor to be 128 byte aligned. 3778 */ 3779 if (!hba->ucdl_base_addr || 3780 WARN_ON(hba->ucdl_dma_addr & (128 - 1))) { 3781 dev_err(hba->dev, 3782 "Command Descriptor Memory allocation failed\n"); 3783 goto out; 3784 } 3785 3786 /* 3787 * Allocate memory for UTP Transfer descriptors 3788 * UFSHCI requires 1024 byte alignment of UTRD 3789 */ 3790 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); 3791 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, 3792 utrdl_size, 3793 &hba->utrdl_dma_addr, 3794 GFP_KERNEL); 3795 if (!hba->utrdl_base_addr || 3796 WARN_ON(hba->utrdl_dma_addr & (1024 - 1))) { 3797 dev_err(hba->dev, 3798 "Transfer Descriptor Memory allocation failed\n"); 3799 goto out; 3800 } 3801 3802 /* 3803 * Skip utmrdl allocation; it may have been 3804 * allocated during first pass and not released during 3805 * MCQ memory allocation. 3806 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq() 3807 */ 3808 if (hba->utmrdl_base_addr) 3809 goto skip_utmrdl; 3810 /* 3811 * Allocate memory for UTP Task Management descriptors 3812 * UFSHCI requires 1024 byte alignment of UTMRD 3813 */ 3814 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; 3815 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, 3816 utmrdl_size, 3817 &hba->utmrdl_dma_addr, 3818 GFP_KERNEL); 3819 if (!hba->utmrdl_base_addr || 3820 WARN_ON(hba->utmrdl_dma_addr & (1024 - 1))) { 3821 dev_err(hba->dev, 3822 "Task Management Descriptor Memory allocation failed\n"); 3823 goto out; 3824 } 3825 3826 skip_utmrdl: 3827 /* Allocate memory for local reference block */ 3828 hba->lrb = devm_kcalloc(hba->dev, 3829 hba->nutrs, sizeof(struct ufshcd_lrb), 3830 GFP_KERNEL); 3831 if (!hba->lrb) { 3832 dev_err(hba->dev, "LRB Memory allocation failed\n"); 3833 goto out; 3834 } 3835 return 0; 3836 out: 3837 return -ENOMEM; 3838 } 3839 3840 /** 3841 * ufshcd_host_memory_configure - configure local reference block with 3842 * memory offsets 3843 * @hba: per adapter instance 3844 * 3845 * Configure Host memory space 3846 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA 3847 * address. 3848 * 2. Update each UTRD with Response UPIU offset, Response UPIU length 3849 * and PRDT offset. 3850 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT 3851 * into local reference block. 3852 */ 3853 static void ufshcd_host_memory_configure(struct ufs_hba *hba) 3854 { 3855 struct utp_transfer_req_desc *utrdlp; 3856 dma_addr_t cmd_desc_dma_addr; 3857 dma_addr_t cmd_desc_element_addr; 3858 u16 response_offset; 3859 u16 prdt_offset; 3860 int cmd_desc_size; 3861 int i; 3862 3863 utrdlp = hba->utrdl_base_addr; 3864 3865 response_offset = 3866 offsetof(struct utp_transfer_cmd_desc, response_upiu); 3867 prdt_offset = 3868 offsetof(struct utp_transfer_cmd_desc, prd_table); 3869 3870 cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba); 3871 cmd_desc_dma_addr = hba->ucdl_dma_addr; 3872 3873 for (i = 0; i < hba->nutrs; i++) { 3874 /* Configure UTRD with command descriptor base address */ 3875 cmd_desc_element_addr = 3876 (cmd_desc_dma_addr + (cmd_desc_size * i)); 3877 utrdlp[i].command_desc_base_addr_lo = 3878 cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); 3879 utrdlp[i].command_desc_base_addr_hi = 3880 cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); 3881 3882 /* Response upiu and prdt offset should be in double words */ 3883 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { 3884 utrdlp[i].response_upiu_offset = 3885 cpu_to_le16(response_offset); 3886 utrdlp[i].prd_table_offset = 3887 cpu_to_le16(prdt_offset); 3888 utrdlp[i].response_upiu_length = 3889 cpu_to_le16(ALIGNED_UPIU_SIZE); 3890 } else { 3891 utrdlp[i].response_upiu_offset = 3892 cpu_to_le16(response_offset >> 2); 3893 utrdlp[i].prd_table_offset = 3894 cpu_to_le16(prdt_offset >> 2); 3895 utrdlp[i].response_upiu_length = 3896 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); 3897 } 3898 3899 ufshcd_init_lrb(hba, &hba->lrb[i], i); 3900 } 3901 } 3902 3903 /** 3904 * ufshcd_dme_link_startup - Notify Unipro to perform link startup 3905 * @hba: per adapter instance 3906 * 3907 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, 3908 * in order to initialize the Unipro link startup procedure. 3909 * Once the Unipro links are up, the device connected to the controller 3910 * is detected. 3911 * 3912 * Returns 0 on success, non-zero value on failure 3913 */ 3914 static int ufshcd_dme_link_startup(struct ufs_hba *hba) 3915 { 3916 struct uic_command uic_cmd = {0}; 3917 int ret; 3918 3919 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; 3920 3921 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3922 if (ret) 3923 dev_dbg(hba->dev, 3924 "dme-link-startup: error code %d\n", ret); 3925 return ret; 3926 } 3927 /** 3928 * ufshcd_dme_reset - UIC command for DME_RESET 3929 * @hba: per adapter instance 3930 * 3931 * DME_RESET command is issued in order to reset UniPro stack. 3932 * This function now deals with cold reset. 3933 * 3934 * Returns 0 on success, non-zero value on failure 3935 */ 3936 static int ufshcd_dme_reset(struct ufs_hba *hba) 3937 { 3938 struct uic_command uic_cmd = {0}; 3939 int ret; 3940 3941 uic_cmd.command = UIC_CMD_DME_RESET; 3942 3943 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3944 if (ret) 3945 dev_err(hba->dev, 3946 "dme-reset: error code %d\n", ret); 3947 3948 return ret; 3949 } 3950 3951 int ufshcd_dme_configure_adapt(struct ufs_hba *hba, 3952 int agreed_gear, 3953 int adapt_val) 3954 { 3955 int ret; 3956 3957 if (agreed_gear < UFS_HS_G4) 3958 adapt_val = PA_NO_ADAPT; 3959 3960 ret = ufshcd_dme_set(hba, 3961 UIC_ARG_MIB(PA_TXHSADAPTTYPE), 3962 adapt_val); 3963 return ret; 3964 } 3965 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt); 3966 3967 /** 3968 * ufshcd_dme_enable - UIC command for DME_ENABLE 3969 * @hba: per adapter instance 3970 * 3971 * DME_ENABLE command is issued in order to enable UniPro stack. 3972 * 3973 * Returns 0 on success, non-zero value on failure 3974 */ 3975 static int ufshcd_dme_enable(struct ufs_hba *hba) 3976 { 3977 struct uic_command uic_cmd = {0}; 3978 int ret; 3979 3980 uic_cmd.command = UIC_CMD_DME_ENABLE; 3981 3982 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3983 if (ret) 3984 dev_err(hba->dev, 3985 "dme-enable: error code %d\n", ret); 3986 3987 return ret; 3988 } 3989 3990 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) 3991 { 3992 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000 3993 unsigned long min_sleep_time_us; 3994 3995 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) 3996 return; 3997 3998 /* 3999 * last_dme_cmd_tstamp will be 0 only for 1st call to 4000 * this function 4001 */ 4002 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { 4003 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US; 4004 } else { 4005 unsigned long delta = 4006 (unsigned long) ktime_to_us( 4007 ktime_sub(ktime_get(), 4008 hba->last_dme_cmd_tstamp)); 4009 4010 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US) 4011 min_sleep_time_us = 4012 MIN_DELAY_BEFORE_DME_CMDS_US - delta; 4013 else 4014 return; /* no more delay required */ 4015 } 4016 4017 /* allow sleep for extra 50us if needed */ 4018 usleep_range(min_sleep_time_us, min_sleep_time_us + 50); 4019 } 4020 4021 /** 4022 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET 4023 * @hba: per adapter instance 4024 * @attr_sel: uic command argument1 4025 * @attr_set: attribute set type as uic command argument2 4026 * @mib_val: setting value as uic command argument3 4027 * @peer: indicate whether peer or local 4028 * 4029 * Returns 0 on success, non-zero value on failure 4030 */ 4031 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, 4032 u8 attr_set, u32 mib_val, u8 peer) 4033 { 4034 struct uic_command uic_cmd = {0}; 4035 static const char *const action[] = { 4036 "dme-set", 4037 "dme-peer-set" 4038 }; 4039 const char *set = action[!!peer]; 4040 int ret; 4041 int retries = UFS_UIC_COMMAND_RETRIES; 4042 4043 uic_cmd.command = peer ? 4044 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; 4045 uic_cmd.argument1 = attr_sel; 4046 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); 4047 uic_cmd.argument3 = mib_val; 4048 4049 do { 4050 /* for peer attributes we retry upon failure */ 4051 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 4052 if (ret) 4053 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", 4054 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); 4055 } while (ret && peer && --retries); 4056 4057 if (ret) 4058 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", 4059 set, UIC_GET_ATTR_ID(attr_sel), mib_val, 4060 UFS_UIC_COMMAND_RETRIES - retries); 4061 4062 return ret; 4063 } 4064 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); 4065 4066 /** 4067 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET 4068 * @hba: per adapter instance 4069 * @attr_sel: uic command argument1 4070 * @mib_val: the value of the attribute as returned by the UIC command 4071 * @peer: indicate whether peer or local 4072 * 4073 * Returns 0 on success, non-zero value on failure 4074 */ 4075 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 4076 u32 *mib_val, u8 peer) 4077 { 4078 struct uic_command uic_cmd = {0}; 4079 static const char *const action[] = { 4080 "dme-get", 4081 "dme-peer-get" 4082 }; 4083 const char *get = action[!!peer]; 4084 int ret; 4085 int retries = UFS_UIC_COMMAND_RETRIES; 4086 struct ufs_pa_layer_attr orig_pwr_info; 4087 struct ufs_pa_layer_attr temp_pwr_info; 4088 bool pwr_mode_change = false; 4089 4090 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { 4091 orig_pwr_info = hba->pwr_info; 4092 temp_pwr_info = orig_pwr_info; 4093 4094 if (orig_pwr_info.pwr_tx == FAST_MODE || 4095 orig_pwr_info.pwr_rx == FAST_MODE) { 4096 temp_pwr_info.pwr_tx = FASTAUTO_MODE; 4097 temp_pwr_info.pwr_rx = FASTAUTO_MODE; 4098 pwr_mode_change = true; 4099 } else if (orig_pwr_info.pwr_tx == SLOW_MODE || 4100 orig_pwr_info.pwr_rx == SLOW_MODE) { 4101 temp_pwr_info.pwr_tx = SLOWAUTO_MODE; 4102 temp_pwr_info.pwr_rx = SLOWAUTO_MODE; 4103 pwr_mode_change = true; 4104 } 4105 if (pwr_mode_change) { 4106 ret = ufshcd_change_power_mode(hba, &temp_pwr_info); 4107 if (ret) 4108 goto out; 4109 } 4110 } 4111 4112 uic_cmd.command = peer ? 4113 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; 4114 uic_cmd.argument1 = attr_sel; 4115 4116 do { 4117 /* for peer attributes we retry upon failure */ 4118 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 4119 if (ret) 4120 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", 4121 get, UIC_GET_ATTR_ID(attr_sel), ret); 4122 } while (ret && peer && --retries); 4123 4124 if (ret) 4125 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", 4126 get, UIC_GET_ATTR_ID(attr_sel), 4127 UFS_UIC_COMMAND_RETRIES - retries); 4128 4129 if (mib_val && !ret) 4130 *mib_val = uic_cmd.argument3; 4131 4132 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) 4133 && pwr_mode_change) 4134 ufshcd_change_power_mode(hba, &orig_pwr_info); 4135 out: 4136 return ret; 4137 } 4138 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); 4139 4140 /** 4141 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power 4142 * state) and waits for it to take effect. 4143 * 4144 * @hba: per adapter instance 4145 * @cmd: UIC command to execute 4146 * 4147 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & 4148 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host 4149 * and device UniPro link and hence it's final completion would be indicated by 4150 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in 4151 * addition to normal UIC command completion Status (UCCS). This function only 4152 * returns after the relevant status bits indicate the completion. 4153 * 4154 * Returns 0 on success, non-zero value on failure 4155 */ 4156 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) 4157 { 4158 DECLARE_COMPLETION_ONSTACK(uic_async_done); 4159 unsigned long flags; 4160 u8 status; 4161 int ret; 4162 bool reenable_intr = false; 4163 4164 mutex_lock(&hba->uic_cmd_mutex); 4165 ufshcd_add_delay_before_dme_cmd(hba); 4166 4167 spin_lock_irqsave(hba->host->host_lock, flags); 4168 if (ufshcd_is_link_broken(hba)) { 4169 ret = -ENOLINK; 4170 goto out_unlock; 4171 } 4172 hba->uic_async_done = &uic_async_done; 4173 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { 4174 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); 4175 /* 4176 * Make sure UIC command completion interrupt is disabled before 4177 * issuing UIC command. 4178 */ 4179 wmb(); 4180 reenable_intr = true; 4181 } 4182 ret = __ufshcd_send_uic_cmd(hba, cmd, false); 4183 spin_unlock_irqrestore(hba->host->host_lock, flags); 4184 if (ret) { 4185 dev_err(hba->dev, 4186 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", 4187 cmd->command, cmd->argument3, ret); 4188 goto out; 4189 } 4190 4191 if (!wait_for_completion_timeout(hba->uic_async_done, 4192 msecs_to_jiffies(UIC_CMD_TIMEOUT))) { 4193 dev_err(hba->dev, 4194 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", 4195 cmd->command, cmd->argument3); 4196 4197 if (!cmd->cmd_active) { 4198 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", 4199 __func__); 4200 goto check_upmcrs; 4201 } 4202 4203 ret = -ETIMEDOUT; 4204 goto out; 4205 } 4206 4207 check_upmcrs: 4208 status = ufshcd_get_upmcrs(hba); 4209 if (status != PWR_LOCAL) { 4210 dev_err(hba->dev, 4211 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", 4212 cmd->command, status); 4213 ret = (status != PWR_OK) ? status : -1; 4214 } 4215 out: 4216 if (ret) { 4217 ufshcd_print_host_state(hba); 4218 ufshcd_print_pwr_info(hba); 4219 ufshcd_print_evt_hist(hba); 4220 } 4221 4222 spin_lock_irqsave(hba->host->host_lock, flags); 4223 hba->active_uic_cmd = NULL; 4224 hba->uic_async_done = NULL; 4225 if (reenable_intr) 4226 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); 4227 if (ret) { 4228 ufshcd_set_link_broken(hba); 4229 ufshcd_schedule_eh_work(hba); 4230 } 4231 out_unlock: 4232 spin_unlock_irqrestore(hba->host->host_lock, flags); 4233 mutex_unlock(&hba->uic_cmd_mutex); 4234 4235 return ret; 4236 } 4237 4238 /** 4239 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage 4240 * using DME_SET primitives. 4241 * @hba: per adapter instance 4242 * @mode: powr mode value 4243 * 4244 * Returns 0 on success, non-zero value on failure 4245 */ 4246 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) 4247 { 4248 struct uic_command uic_cmd = {0}; 4249 int ret; 4250 4251 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { 4252 ret = ufshcd_dme_set(hba, 4253 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1); 4254 if (ret) { 4255 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", 4256 __func__, ret); 4257 goto out; 4258 } 4259 } 4260 4261 uic_cmd.command = UIC_CMD_DME_SET; 4262 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); 4263 uic_cmd.argument3 = mode; 4264 ufshcd_hold(hba, false); 4265 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4266 ufshcd_release(hba); 4267 4268 out: 4269 return ret; 4270 } 4271 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode); 4272 4273 int ufshcd_link_recovery(struct ufs_hba *hba) 4274 { 4275 int ret; 4276 unsigned long flags; 4277 4278 spin_lock_irqsave(hba->host->host_lock, flags); 4279 hba->ufshcd_state = UFSHCD_STATE_RESET; 4280 ufshcd_set_eh_in_progress(hba); 4281 spin_unlock_irqrestore(hba->host->host_lock, flags); 4282 4283 /* Reset the attached device */ 4284 ufshcd_device_reset(hba); 4285 4286 ret = ufshcd_host_reset_and_restore(hba); 4287 4288 spin_lock_irqsave(hba->host->host_lock, flags); 4289 if (ret) 4290 hba->ufshcd_state = UFSHCD_STATE_ERROR; 4291 ufshcd_clear_eh_in_progress(hba); 4292 spin_unlock_irqrestore(hba->host->host_lock, flags); 4293 4294 if (ret) 4295 dev_err(hba->dev, "%s: link recovery failed, err %d", 4296 __func__, ret); 4297 4298 return ret; 4299 } 4300 EXPORT_SYMBOL_GPL(ufshcd_link_recovery); 4301 4302 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) 4303 { 4304 int ret; 4305 struct uic_command uic_cmd = {0}; 4306 ktime_t start = ktime_get(); 4307 4308 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); 4309 4310 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; 4311 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4312 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", 4313 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 4314 4315 if (ret) 4316 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", 4317 __func__, ret); 4318 else 4319 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, 4320 POST_CHANGE); 4321 4322 return ret; 4323 } 4324 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter); 4325 4326 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) 4327 { 4328 struct uic_command uic_cmd = {0}; 4329 int ret; 4330 ktime_t start = ktime_get(); 4331 4332 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); 4333 4334 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; 4335 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4336 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", 4337 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 4338 4339 if (ret) { 4340 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", 4341 __func__, ret); 4342 } else { 4343 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, 4344 POST_CHANGE); 4345 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock(); 4346 hba->ufs_stats.hibern8_exit_cnt++; 4347 } 4348 4349 return ret; 4350 } 4351 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit); 4352 4353 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) 4354 { 4355 unsigned long flags; 4356 bool update = false; 4357 4358 if (!ufshcd_is_auto_hibern8_supported(hba)) 4359 return; 4360 4361 spin_lock_irqsave(hba->host->host_lock, flags); 4362 if (hba->ahit != ahit) { 4363 hba->ahit = ahit; 4364 update = true; 4365 } 4366 spin_unlock_irqrestore(hba->host->host_lock, flags); 4367 4368 if (update && 4369 !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) { 4370 ufshcd_rpm_get_sync(hba); 4371 ufshcd_hold(hba, false); 4372 ufshcd_auto_hibern8_enable(hba); 4373 ufshcd_release(hba); 4374 ufshcd_rpm_put_sync(hba); 4375 } 4376 } 4377 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update); 4378 4379 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) 4380 { 4381 if (!ufshcd_is_auto_hibern8_supported(hba)) 4382 return; 4383 4384 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); 4385 } 4386 4387 /** 4388 * ufshcd_init_pwr_info - setting the POR (power on reset) 4389 * values in hba power info 4390 * @hba: per-adapter instance 4391 */ 4392 static void ufshcd_init_pwr_info(struct ufs_hba *hba) 4393 { 4394 hba->pwr_info.gear_rx = UFS_PWM_G1; 4395 hba->pwr_info.gear_tx = UFS_PWM_G1; 4396 hba->pwr_info.lane_rx = 1; 4397 hba->pwr_info.lane_tx = 1; 4398 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; 4399 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; 4400 hba->pwr_info.hs_rate = 0; 4401 } 4402 4403 /** 4404 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device 4405 * @hba: per-adapter instance 4406 */ 4407 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) 4408 { 4409 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; 4410 4411 if (hba->max_pwr_info.is_valid) 4412 return 0; 4413 4414 if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { 4415 pwr_info->pwr_tx = FASTAUTO_MODE; 4416 pwr_info->pwr_rx = FASTAUTO_MODE; 4417 } else { 4418 pwr_info->pwr_tx = FAST_MODE; 4419 pwr_info->pwr_rx = FAST_MODE; 4420 } 4421 pwr_info->hs_rate = PA_HS_MODE_B; 4422 4423 /* Get the connected lane count */ 4424 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), 4425 &pwr_info->lane_rx); 4426 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 4427 &pwr_info->lane_tx); 4428 4429 if (!pwr_info->lane_rx || !pwr_info->lane_tx) { 4430 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", 4431 __func__, 4432 pwr_info->lane_rx, 4433 pwr_info->lane_tx); 4434 return -EINVAL; 4435 } 4436 4437 /* 4438 * First, get the maximum gears of HS speed. 4439 * If a zero value, it means there is no HSGEAR capability. 4440 * Then, get the maximum gears of PWM speed. 4441 */ 4442 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); 4443 if (!pwr_info->gear_rx) { 4444 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 4445 &pwr_info->gear_rx); 4446 if (!pwr_info->gear_rx) { 4447 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", 4448 __func__, pwr_info->gear_rx); 4449 return -EINVAL; 4450 } 4451 pwr_info->pwr_rx = SLOW_MODE; 4452 } 4453 4454 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), 4455 &pwr_info->gear_tx); 4456 if (!pwr_info->gear_tx) { 4457 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 4458 &pwr_info->gear_tx); 4459 if (!pwr_info->gear_tx) { 4460 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", 4461 __func__, pwr_info->gear_tx); 4462 return -EINVAL; 4463 } 4464 pwr_info->pwr_tx = SLOW_MODE; 4465 } 4466 4467 hba->max_pwr_info.is_valid = true; 4468 return 0; 4469 } 4470 4471 static int ufshcd_change_power_mode(struct ufs_hba *hba, 4472 struct ufs_pa_layer_attr *pwr_mode) 4473 { 4474 int ret; 4475 4476 /* if already configured to the requested pwr_mode */ 4477 if (!hba->force_pmc && 4478 pwr_mode->gear_rx == hba->pwr_info.gear_rx && 4479 pwr_mode->gear_tx == hba->pwr_info.gear_tx && 4480 pwr_mode->lane_rx == hba->pwr_info.lane_rx && 4481 pwr_mode->lane_tx == hba->pwr_info.lane_tx && 4482 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && 4483 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && 4484 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { 4485 dev_dbg(hba->dev, "%s: power already configured\n", __func__); 4486 return 0; 4487 } 4488 4489 /* 4490 * Configure attributes for power mode change with below. 4491 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, 4492 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, 4493 * - PA_HSSERIES 4494 */ 4495 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); 4496 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), 4497 pwr_mode->lane_rx); 4498 if (pwr_mode->pwr_rx == FASTAUTO_MODE || 4499 pwr_mode->pwr_rx == FAST_MODE) 4500 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); 4501 else 4502 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false); 4503 4504 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); 4505 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), 4506 pwr_mode->lane_tx); 4507 if (pwr_mode->pwr_tx == FASTAUTO_MODE || 4508 pwr_mode->pwr_tx == FAST_MODE) 4509 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); 4510 else 4511 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false); 4512 4513 if (pwr_mode->pwr_rx == FASTAUTO_MODE || 4514 pwr_mode->pwr_tx == FASTAUTO_MODE || 4515 pwr_mode->pwr_rx == FAST_MODE || 4516 pwr_mode->pwr_tx == FAST_MODE) 4517 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), 4518 pwr_mode->hs_rate); 4519 4520 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { 4521 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 4522 DL_FC0ProtectionTimeOutVal_Default); 4523 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 4524 DL_TC0ReplayTimeOutVal_Default); 4525 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 4526 DL_AFC0ReqTimeOutVal_Default); 4527 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), 4528 DL_FC1ProtectionTimeOutVal_Default); 4529 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), 4530 DL_TC1ReplayTimeOutVal_Default); 4531 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), 4532 DL_AFC1ReqTimeOutVal_Default); 4533 4534 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), 4535 DL_FC0ProtectionTimeOutVal_Default); 4536 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), 4537 DL_TC0ReplayTimeOutVal_Default); 4538 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), 4539 DL_AFC0ReqTimeOutVal_Default); 4540 } 4541 4542 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 4543 | pwr_mode->pwr_tx); 4544 4545 if (ret) { 4546 dev_err(hba->dev, 4547 "%s: power mode change failed %d\n", __func__, ret); 4548 } else { 4549 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, 4550 pwr_mode); 4551 4552 memcpy(&hba->pwr_info, pwr_mode, 4553 sizeof(struct ufs_pa_layer_attr)); 4554 } 4555 4556 return ret; 4557 } 4558 4559 /** 4560 * ufshcd_config_pwr_mode - configure a new power mode 4561 * @hba: per-adapter instance 4562 * @desired_pwr_mode: desired power configuration 4563 */ 4564 int ufshcd_config_pwr_mode(struct ufs_hba *hba, 4565 struct ufs_pa_layer_attr *desired_pwr_mode) 4566 { 4567 struct ufs_pa_layer_attr final_params = { 0 }; 4568 int ret; 4569 4570 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, 4571 desired_pwr_mode, &final_params); 4572 4573 if (ret) 4574 memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); 4575 4576 ret = ufshcd_change_power_mode(hba, &final_params); 4577 4578 return ret; 4579 } 4580 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); 4581 4582 /** 4583 * ufshcd_complete_dev_init() - checks device readiness 4584 * @hba: per-adapter instance 4585 * 4586 * Set fDeviceInit flag and poll until device toggles it. 4587 */ 4588 static int ufshcd_complete_dev_init(struct ufs_hba *hba) 4589 { 4590 int err; 4591 bool flag_res = true; 4592 ktime_t timeout; 4593 4594 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, 4595 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL); 4596 if (err) { 4597 dev_err(hba->dev, 4598 "%s: setting fDeviceInit flag failed with error %d\n", 4599 __func__, err); 4600 goto out; 4601 } 4602 4603 /* Poll fDeviceInit flag to be cleared */ 4604 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT); 4605 do { 4606 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, 4607 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res); 4608 if (!flag_res) 4609 break; 4610 usleep_range(500, 1000); 4611 } while (ktime_before(ktime_get(), timeout)); 4612 4613 if (err) { 4614 dev_err(hba->dev, 4615 "%s: reading fDeviceInit flag failed with error %d\n", 4616 __func__, err); 4617 } else if (flag_res) { 4618 dev_err(hba->dev, 4619 "%s: fDeviceInit was not cleared by the device\n", 4620 __func__); 4621 err = -EBUSY; 4622 } 4623 out: 4624 return err; 4625 } 4626 4627 /** 4628 * ufshcd_make_hba_operational - Make UFS controller operational 4629 * @hba: per adapter instance 4630 * 4631 * To bring UFS host controller to operational state, 4632 * 1. Enable required interrupts 4633 * 2. Configure interrupt aggregation 4634 * 3. Program UTRL and UTMRL base address 4635 * 4. Configure run-stop-registers 4636 * 4637 * Returns 0 on success, non-zero value on failure 4638 */ 4639 int ufshcd_make_hba_operational(struct ufs_hba *hba) 4640 { 4641 int err = 0; 4642 u32 reg; 4643 4644 /* Enable required interrupts */ 4645 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); 4646 4647 /* Configure interrupt aggregation */ 4648 if (ufshcd_is_intr_aggr_allowed(hba)) 4649 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); 4650 else 4651 ufshcd_disable_intr_aggr(hba); 4652 4653 /* Configure UTRL and UTMRL base address registers */ 4654 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), 4655 REG_UTP_TRANSFER_REQ_LIST_BASE_L); 4656 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), 4657 REG_UTP_TRANSFER_REQ_LIST_BASE_H); 4658 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), 4659 REG_UTP_TASK_REQ_LIST_BASE_L); 4660 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), 4661 REG_UTP_TASK_REQ_LIST_BASE_H); 4662 4663 /* 4664 * Make sure base address and interrupt setup are updated before 4665 * enabling the run/stop registers below. 4666 */ 4667 wmb(); 4668 4669 /* 4670 * UCRDY, UTMRLDY and UTRLRDY bits must be 1 4671 */ 4672 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); 4673 if (!(ufshcd_get_lists_status(reg))) { 4674 ufshcd_enable_run_stop_reg(hba); 4675 } else { 4676 dev_err(hba->dev, 4677 "Host controller not ready to process requests"); 4678 err = -EIO; 4679 } 4680 4681 return err; 4682 } 4683 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational); 4684 4685 /** 4686 * ufshcd_hba_stop - Send controller to reset state 4687 * @hba: per adapter instance 4688 */ 4689 void ufshcd_hba_stop(struct ufs_hba *hba) 4690 { 4691 unsigned long flags; 4692 int err; 4693 4694 /* 4695 * Obtain the host lock to prevent that the controller is disabled 4696 * while the UFS interrupt handler is active on another CPU. 4697 */ 4698 spin_lock_irqsave(hba->host->host_lock, flags); 4699 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); 4700 spin_unlock_irqrestore(hba->host->host_lock, flags); 4701 4702 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, 4703 CONTROLLER_ENABLE, CONTROLLER_DISABLE, 4704 10, 1); 4705 if (err) 4706 dev_err(hba->dev, "%s: Controller disable failed\n", __func__); 4707 } 4708 EXPORT_SYMBOL_GPL(ufshcd_hba_stop); 4709 4710 /** 4711 * ufshcd_hba_execute_hce - initialize the controller 4712 * @hba: per adapter instance 4713 * 4714 * The controller resets itself and controller firmware initialization 4715 * sequence kicks off. When controller is ready it will set 4716 * the Host Controller Enable bit to 1. 4717 * 4718 * Returns 0 on success, non-zero value on failure 4719 */ 4720 static int ufshcd_hba_execute_hce(struct ufs_hba *hba) 4721 { 4722 int retry_outer = 3; 4723 int retry_inner; 4724 4725 start: 4726 if (ufshcd_is_hba_active(hba)) 4727 /* change controller state to "reset state" */ 4728 ufshcd_hba_stop(hba); 4729 4730 /* UniPro link is disabled at this point */ 4731 ufshcd_set_link_off(hba); 4732 4733 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); 4734 4735 /* start controller initialization sequence */ 4736 ufshcd_hba_start(hba); 4737 4738 /* 4739 * To initialize a UFS host controller HCE bit must be set to 1. 4740 * During initialization the HCE bit value changes from 1->0->1. 4741 * When the host controller completes initialization sequence 4742 * it sets the value of HCE bit to 1. The same HCE bit is read back 4743 * to check if the controller has completed initialization sequence. 4744 * So without this delay the value HCE = 1, set in the previous 4745 * instruction might be read back. 4746 * This delay can be changed based on the controller. 4747 */ 4748 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); 4749 4750 /* wait for the host controller to complete initialization */ 4751 retry_inner = 50; 4752 while (!ufshcd_is_hba_active(hba)) { 4753 if (retry_inner) { 4754 retry_inner--; 4755 } else { 4756 dev_err(hba->dev, 4757 "Controller enable failed\n"); 4758 if (retry_outer) { 4759 retry_outer--; 4760 goto start; 4761 } 4762 return -EIO; 4763 } 4764 usleep_range(1000, 1100); 4765 } 4766 4767 /* enable UIC related interrupts */ 4768 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); 4769 4770 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); 4771 4772 return 0; 4773 } 4774 4775 int ufshcd_hba_enable(struct ufs_hba *hba) 4776 { 4777 int ret; 4778 4779 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { 4780 ufshcd_set_link_off(hba); 4781 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); 4782 4783 /* enable UIC related interrupts */ 4784 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); 4785 ret = ufshcd_dme_reset(hba); 4786 if (ret) { 4787 dev_err(hba->dev, "DME_RESET failed\n"); 4788 return ret; 4789 } 4790 4791 ret = ufshcd_dme_enable(hba); 4792 if (ret) { 4793 dev_err(hba->dev, "Enabling DME failed\n"); 4794 return ret; 4795 } 4796 4797 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); 4798 } else { 4799 ret = ufshcd_hba_execute_hce(hba); 4800 } 4801 4802 return ret; 4803 } 4804 EXPORT_SYMBOL_GPL(ufshcd_hba_enable); 4805 4806 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) 4807 { 4808 int tx_lanes = 0, i, err = 0; 4809 4810 if (!peer) 4811 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 4812 &tx_lanes); 4813 else 4814 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 4815 &tx_lanes); 4816 for (i = 0; i < tx_lanes; i++) { 4817 if (!peer) 4818 err = ufshcd_dme_set(hba, 4819 UIC_ARG_MIB_SEL(TX_LCC_ENABLE, 4820 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), 4821 0); 4822 else 4823 err = ufshcd_dme_peer_set(hba, 4824 UIC_ARG_MIB_SEL(TX_LCC_ENABLE, 4825 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), 4826 0); 4827 if (err) { 4828 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", 4829 __func__, peer, i, err); 4830 break; 4831 } 4832 } 4833 4834 return err; 4835 } 4836 4837 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) 4838 { 4839 return ufshcd_disable_tx_lcc(hba, true); 4840 } 4841 4842 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val) 4843 { 4844 struct ufs_event_hist *e; 4845 4846 if (id >= UFS_EVT_CNT) 4847 return; 4848 4849 e = &hba->ufs_stats.event[id]; 4850 e->val[e->pos] = val; 4851 e->tstamp[e->pos] = local_clock(); 4852 e->cnt += 1; 4853 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH; 4854 4855 ufshcd_vops_event_notify(hba, id, &val); 4856 } 4857 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist); 4858 4859 /** 4860 * ufshcd_link_startup - Initialize unipro link startup 4861 * @hba: per adapter instance 4862 * 4863 * Returns 0 for success, non-zero in case of failure 4864 */ 4865 static int ufshcd_link_startup(struct ufs_hba *hba) 4866 { 4867 int ret; 4868 int retries = DME_LINKSTARTUP_RETRIES; 4869 bool link_startup_again = false; 4870 4871 /* 4872 * If UFS device isn't active then we will have to issue link startup 4873 * 2 times to make sure the device state move to active. 4874 */ 4875 if (!ufshcd_is_ufs_dev_active(hba)) 4876 link_startup_again = true; 4877 4878 link_startup: 4879 do { 4880 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); 4881 4882 ret = ufshcd_dme_link_startup(hba); 4883 4884 /* check if device is detected by inter-connect layer */ 4885 if (!ret && !ufshcd_is_device_present(hba)) { 4886 ufshcd_update_evt_hist(hba, 4887 UFS_EVT_LINK_STARTUP_FAIL, 4888 0); 4889 dev_err(hba->dev, "%s: Device not present\n", __func__); 4890 ret = -ENXIO; 4891 goto out; 4892 } 4893 4894 /* 4895 * DME link lost indication is only received when link is up, 4896 * but we can't be sure if the link is up until link startup 4897 * succeeds. So reset the local Uni-Pro and try again. 4898 */ 4899 if (ret && retries && ufshcd_hba_enable(hba)) { 4900 ufshcd_update_evt_hist(hba, 4901 UFS_EVT_LINK_STARTUP_FAIL, 4902 (u32)ret); 4903 goto out; 4904 } 4905 } while (ret && retries--); 4906 4907 if (ret) { 4908 /* failed to get the link up... retire */ 4909 ufshcd_update_evt_hist(hba, 4910 UFS_EVT_LINK_STARTUP_FAIL, 4911 (u32)ret); 4912 goto out; 4913 } 4914 4915 if (link_startup_again) { 4916 link_startup_again = false; 4917 retries = DME_LINKSTARTUP_RETRIES; 4918 goto link_startup; 4919 } 4920 4921 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ 4922 ufshcd_init_pwr_info(hba); 4923 ufshcd_print_pwr_info(hba); 4924 4925 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { 4926 ret = ufshcd_disable_device_tx_lcc(hba); 4927 if (ret) 4928 goto out; 4929 } 4930 4931 /* Include any host controller configuration via UIC commands */ 4932 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); 4933 if (ret) 4934 goto out; 4935 4936 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */ 4937 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); 4938 ret = ufshcd_make_hba_operational(hba); 4939 out: 4940 if (ret) { 4941 dev_err(hba->dev, "link startup failed %d\n", ret); 4942 ufshcd_print_host_state(hba); 4943 ufshcd_print_pwr_info(hba); 4944 ufshcd_print_evt_hist(hba); 4945 } 4946 return ret; 4947 } 4948 4949 /** 4950 * ufshcd_verify_dev_init() - Verify device initialization 4951 * @hba: per-adapter instance 4952 * 4953 * Send NOP OUT UPIU and wait for NOP IN response to check whether the 4954 * device Transport Protocol (UTP) layer is ready after a reset. 4955 * If the UTP layer at the device side is not initialized, it may 4956 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT 4957 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. 4958 */ 4959 static int ufshcd_verify_dev_init(struct ufs_hba *hba) 4960 { 4961 int err = 0; 4962 int retries; 4963 4964 ufshcd_hold(hba, false); 4965 mutex_lock(&hba->dev_cmd.lock); 4966 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { 4967 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, 4968 hba->nop_out_timeout); 4969 4970 if (!err || err == -ETIMEDOUT) 4971 break; 4972 4973 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); 4974 } 4975 mutex_unlock(&hba->dev_cmd.lock); 4976 ufshcd_release(hba); 4977 4978 if (err) 4979 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); 4980 return err; 4981 } 4982 4983 /** 4984 * ufshcd_setup_links - associate link b/w device wlun and other luns 4985 * @sdev: pointer to SCSI device 4986 * @hba: pointer to ufs hba 4987 */ 4988 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev) 4989 { 4990 struct device_link *link; 4991 4992 /* 4993 * Device wlun is the supplier & rest of the luns are consumers. 4994 * This ensures that device wlun suspends after all other luns. 4995 */ 4996 if (hba->ufs_device_wlun) { 4997 link = device_link_add(&sdev->sdev_gendev, 4998 &hba->ufs_device_wlun->sdev_gendev, 4999 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); 5000 if (!link) { 5001 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n", 5002 dev_name(&hba->ufs_device_wlun->sdev_gendev)); 5003 return; 5004 } 5005 hba->luns_avail--; 5006 /* Ignore REPORT_LUN wlun probing */ 5007 if (hba->luns_avail == 1) { 5008 ufshcd_rpm_put(hba); 5009 return; 5010 } 5011 } else { 5012 /* 5013 * Device wlun is probed. The assumption is that WLUNs are 5014 * scanned before other LUNs. 5015 */ 5016 hba->luns_avail--; 5017 } 5018 } 5019 5020 /** 5021 * ufshcd_lu_init - Initialize the relevant parameters of the LU 5022 * @hba: per-adapter instance 5023 * @sdev: pointer to SCSI device 5024 */ 5025 static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev) 5026 { 5027 int len = QUERY_DESC_MAX_SIZE; 5028 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); 5029 u8 lun_qdepth = hba->nutrs; 5030 u8 *desc_buf; 5031 int ret; 5032 5033 desc_buf = kzalloc(len, GFP_KERNEL); 5034 if (!desc_buf) 5035 goto set_qdepth; 5036 5037 ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len); 5038 if (ret < 0) { 5039 if (ret == -EOPNOTSUPP) 5040 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */ 5041 lun_qdepth = 1; 5042 kfree(desc_buf); 5043 goto set_qdepth; 5044 } 5045 5046 if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) { 5047 /* 5048 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will 5049 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth 5050 */ 5051 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs); 5052 } 5053 /* 5054 * According to UFS device specification, the write protection mode is only supported by 5055 * normal LU, not supported by WLUN. 5056 */ 5057 if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported && 5058 !hba->dev_info.is_lu_power_on_wp && 5059 desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP) 5060 hba->dev_info.is_lu_power_on_wp = true; 5061 5062 /* In case of RPMB LU, check if advanced RPMB mode is enabled */ 5063 if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN && 5064 desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4)) 5065 hba->dev_info.b_advanced_rpmb_en = true; 5066 5067 5068 kfree(desc_buf); 5069 set_qdepth: 5070 /* 5071 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose 5072 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue. 5073 */ 5074 dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth); 5075 scsi_change_queue_depth(sdev, lun_qdepth); 5076 } 5077 5078 /** 5079 * ufshcd_slave_alloc - handle initial SCSI device configurations 5080 * @sdev: pointer to SCSI device 5081 * 5082 * Returns success 5083 */ 5084 static int ufshcd_slave_alloc(struct scsi_device *sdev) 5085 { 5086 struct ufs_hba *hba; 5087 5088 hba = shost_priv(sdev->host); 5089 5090 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ 5091 sdev->use_10_for_ms = 1; 5092 5093 /* DBD field should be set to 1 in mode sense(10) */ 5094 sdev->set_dbd_for_ms = 1; 5095 5096 /* allow SCSI layer to restart the device in case of errors */ 5097 sdev->allow_restart = 1; 5098 5099 /* REPORT SUPPORTED OPERATION CODES is not supported */ 5100 sdev->no_report_opcodes = 1; 5101 5102 /* WRITE_SAME command is not supported */ 5103 sdev->no_write_same = 1; 5104 5105 ufshcd_lu_init(hba, sdev); 5106 5107 ufshcd_setup_links(hba, sdev); 5108 5109 return 0; 5110 } 5111 5112 /** 5113 * ufshcd_change_queue_depth - change queue depth 5114 * @sdev: pointer to SCSI device 5115 * @depth: required depth to set 5116 * 5117 * Change queue depth and make sure the max. limits are not crossed. 5118 */ 5119 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) 5120 { 5121 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue)); 5122 } 5123 5124 static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev) 5125 { 5126 /* skip well-known LU */ 5127 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || 5128 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) 5129 return; 5130 5131 ufshpb_destroy_lu(hba, sdev); 5132 } 5133 5134 static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev) 5135 { 5136 /* skip well-known LU */ 5137 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || 5138 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) 5139 return; 5140 5141 ufshpb_init_hpb_lu(hba, sdev); 5142 } 5143 5144 /** 5145 * ufshcd_slave_configure - adjust SCSI device configurations 5146 * @sdev: pointer to SCSI device 5147 */ 5148 static int ufshcd_slave_configure(struct scsi_device *sdev) 5149 { 5150 struct ufs_hba *hba = shost_priv(sdev->host); 5151 struct request_queue *q = sdev->request_queue; 5152 5153 ufshcd_hpb_configure(hba, sdev); 5154 5155 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); 5156 if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT) 5157 blk_queue_update_dma_alignment(q, 4096 - 1); 5158 /* 5159 * Block runtime-pm until all consumers are added. 5160 * Refer ufshcd_setup_links(). 5161 */ 5162 if (is_device_wlun(sdev)) 5163 pm_runtime_get_noresume(&sdev->sdev_gendev); 5164 else if (ufshcd_is_rpm_autosuspend_allowed(hba)) 5165 sdev->rpm_autosuspend = 1; 5166 /* 5167 * Do not print messages during runtime PM to avoid never-ending cycles 5168 * of messages written back to storage by user space causing runtime 5169 * resume, causing more messages and so on. 5170 */ 5171 sdev->silence_suspend = 1; 5172 5173 ufshcd_crypto_register(hba, q); 5174 5175 return 0; 5176 } 5177 5178 /** 5179 * ufshcd_slave_destroy - remove SCSI device configurations 5180 * @sdev: pointer to SCSI device 5181 */ 5182 static void ufshcd_slave_destroy(struct scsi_device *sdev) 5183 { 5184 struct ufs_hba *hba; 5185 unsigned long flags; 5186 5187 hba = shost_priv(sdev->host); 5188 5189 ufshcd_hpb_destroy(hba, sdev); 5190 5191 /* Drop the reference as it won't be needed anymore */ 5192 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { 5193 spin_lock_irqsave(hba->host->host_lock, flags); 5194 hba->ufs_device_wlun = NULL; 5195 spin_unlock_irqrestore(hba->host->host_lock, flags); 5196 } else if (hba->ufs_device_wlun) { 5197 struct device *supplier = NULL; 5198 5199 /* Ensure UFS Device WLUN exists and does not disappear */ 5200 spin_lock_irqsave(hba->host->host_lock, flags); 5201 if (hba->ufs_device_wlun) { 5202 supplier = &hba->ufs_device_wlun->sdev_gendev; 5203 get_device(supplier); 5204 } 5205 spin_unlock_irqrestore(hba->host->host_lock, flags); 5206 5207 if (supplier) { 5208 /* 5209 * If a LUN fails to probe (e.g. absent BOOT WLUN), the 5210 * device will not have been registered but can still 5211 * have a device link holding a reference to the device. 5212 */ 5213 device_link_remove(&sdev->sdev_gendev, supplier); 5214 put_device(supplier); 5215 } 5216 } 5217 } 5218 5219 /** 5220 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status 5221 * @lrbp: pointer to local reference block of completed command 5222 * @scsi_status: SCSI command status 5223 * 5224 * Returns value base on SCSI command status 5225 */ 5226 static inline int 5227 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) 5228 { 5229 int result = 0; 5230 5231 switch (scsi_status) { 5232 case SAM_STAT_CHECK_CONDITION: 5233 ufshcd_copy_sense_data(lrbp); 5234 fallthrough; 5235 case SAM_STAT_GOOD: 5236 result |= DID_OK << 16 | scsi_status; 5237 break; 5238 case SAM_STAT_TASK_SET_FULL: 5239 case SAM_STAT_BUSY: 5240 case SAM_STAT_TASK_ABORTED: 5241 ufshcd_copy_sense_data(lrbp); 5242 result |= scsi_status; 5243 break; 5244 default: 5245 result |= DID_ERROR << 16; 5246 break; 5247 } /* end of switch */ 5248 5249 return result; 5250 } 5251 5252 /** 5253 * ufshcd_transfer_rsp_status - Get overall status of the response 5254 * @hba: per adapter instance 5255 * @lrbp: pointer to local reference block of completed command 5256 * @cqe: pointer to the completion queue entry 5257 * 5258 * Returns result of the command to notify SCSI midlayer 5259 */ 5260 static inline int 5261 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, 5262 struct cq_entry *cqe) 5263 { 5264 int result = 0; 5265 int scsi_status; 5266 enum utp_ocs ocs; 5267 5268 scsi_set_resid(lrbp->cmd, 5269 be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count)); 5270 5271 /* overall command status of utrd */ 5272 ocs = ufshcd_get_tr_ocs(lrbp, cqe); 5273 5274 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) { 5275 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) & 5276 MASK_RSP_UPIU_RESULT) 5277 ocs = OCS_SUCCESS; 5278 } 5279 5280 switch (ocs) { 5281 case OCS_SUCCESS: 5282 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); 5283 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); 5284 switch (result) { 5285 case UPIU_TRANSACTION_RESPONSE: 5286 /* 5287 * get the response UPIU result to extract 5288 * the SCSI command status 5289 */ 5290 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); 5291 5292 /* 5293 * get the result based on SCSI status response 5294 * to notify the SCSI midlayer of the command status 5295 */ 5296 scsi_status = result & MASK_SCSI_STATUS; 5297 result = ufshcd_scsi_cmd_status(lrbp, scsi_status); 5298 5299 /* 5300 * Currently we are only supporting BKOPs exception 5301 * events hence we can ignore BKOPs exception event 5302 * during power management callbacks. BKOPs exception 5303 * event is not expected to be raised in runtime suspend 5304 * callback as it allows the urgent bkops. 5305 * During system suspend, we are anyway forcefully 5306 * disabling the bkops and if urgent bkops is needed 5307 * it will be enabled on system resume. Long term 5308 * solution could be to abort the system suspend if 5309 * UFS device needs urgent BKOPs. 5310 */ 5311 if (!hba->pm_op_in_progress && 5312 !ufshcd_eh_in_progress(hba) && 5313 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) 5314 /* Flushed in suspend */ 5315 schedule_work(&hba->eeh_work); 5316 5317 if (scsi_status == SAM_STAT_GOOD) 5318 ufshpb_rsp_upiu(hba, lrbp); 5319 break; 5320 case UPIU_TRANSACTION_REJECT_UPIU: 5321 /* TODO: handle Reject UPIU Response */ 5322 result = DID_ERROR << 16; 5323 dev_err(hba->dev, 5324 "Reject UPIU not fully implemented\n"); 5325 break; 5326 default: 5327 dev_err(hba->dev, 5328 "Unexpected request response code = %x\n", 5329 result); 5330 result = DID_ERROR << 16; 5331 break; 5332 } 5333 break; 5334 case OCS_ABORTED: 5335 result |= DID_ABORT << 16; 5336 break; 5337 case OCS_INVALID_COMMAND_STATUS: 5338 result |= DID_REQUEUE << 16; 5339 break; 5340 case OCS_INVALID_CMD_TABLE_ATTR: 5341 case OCS_INVALID_PRDT_ATTR: 5342 case OCS_MISMATCH_DATA_BUF_SIZE: 5343 case OCS_MISMATCH_RESP_UPIU_SIZE: 5344 case OCS_PEER_COMM_FAILURE: 5345 case OCS_FATAL_ERROR: 5346 case OCS_DEVICE_FATAL_ERROR: 5347 case OCS_INVALID_CRYPTO_CONFIG: 5348 case OCS_GENERAL_CRYPTO_ERROR: 5349 default: 5350 result |= DID_ERROR << 16; 5351 dev_err(hba->dev, 5352 "OCS error from controller = %x for tag %d\n", 5353 ocs, lrbp->task_tag); 5354 ufshcd_print_evt_hist(hba); 5355 ufshcd_print_host_state(hba); 5356 break; 5357 } /* end of switch */ 5358 5359 if ((host_byte(result) != DID_OK) && 5360 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) 5361 ufshcd_print_tr(hba, lrbp->task_tag, true); 5362 return result; 5363 } 5364 5365 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, 5366 u32 intr_mask) 5367 { 5368 if (!ufshcd_is_auto_hibern8_supported(hba) || 5369 !ufshcd_is_auto_hibern8_enabled(hba)) 5370 return false; 5371 5372 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK)) 5373 return false; 5374 5375 if (hba->active_uic_cmd && 5376 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || 5377 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) 5378 return false; 5379 5380 return true; 5381 } 5382 5383 /** 5384 * ufshcd_uic_cmd_compl - handle completion of uic command 5385 * @hba: per adapter instance 5386 * @intr_status: interrupt status generated by the controller 5387 * 5388 * Returns 5389 * IRQ_HANDLED - If interrupt is valid 5390 * IRQ_NONE - If invalid interrupt 5391 */ 5392 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) 5393 { 5394 irqreturn_t retval = IRQ_NONE; 5395 5396 spin_lock(hba->host->host_lock); 5397 if (ufshcd_is_auto_hibern8_error(hba, intr_status)) 5398 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); 5399 5400 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { 5401 hba->active_uic_cmd->argument2 |= 5402 ufshcd_get_uic_cmd_result(hba); 5403 hba->active_uic_cmd->argument3 = 5404 ufshcd_get_dme_attr_val(hba); 5405 if (!hba->uic_async_done) 5406 hba->active_uic_cmd->cmd_active = 0; 5407 complete(&hba->active_uic_cmd->done); 5408 retval = IRQ_HANDLED; 5409 } 5410 5411 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { 5412 hba->active_uic_cmd->cmd_active = 0; 5413 complete(hba->uic_async_done); 5414 retval = IRQ_HANDLED; 5415 } 5416 5417 if (retval == IRQ_HANDLED) 5418 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, 5419 UFS_CMD_COMP); 5420 spin_unlock(hba->host->host_lock); 5421 return retval; 5422 } 5423 5424 /* Release the resources allocated for processing a SCSI command. */ 5425 static void ufshcd_release_scsi_cmd(struct ufs_hba *hba, 5426 struct ufshcd_lrb *lrbp) 5427 { 5428 struct scsi_cmnd *cmd = lrbp->cmd; 5429 5430 scsi_dma_unmap(cmd); 5431 lrbp->cmd = NULL; /* Mark the command as completed. */ 5432 ufshcd_release(hba); 5433 ufshcd_clk_scaling_update_busy(hba); 5434 } 5435 5436 /** 5437 * ufshcd_compl_one_cqe - handle a completion queue entry 5438 * @hba: per adapter instance 5439 * @task_tag: the task tag of the request to be completed 5440 * @cqe: pointer to the completion queue entry 5441 */ 5442 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, 5443 struct cq_entry *cqe) 5444 { 5445 struct ufshcd_lrb *lrbp; 5446 struct scsi_cmnd *cmd; 5447 5448 lrbp = &hba->lrb[task_tag]; 5449 lrbp->compl_time_stamp = ktime_get(); 5450 cmd = lrbp->cmd; 5451 if (cmd) { 5452 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) 5453 ufshcd_update_monitor(hba, lrbp); 5454 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP); 5455 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe); 5456 ufshcd_release_scsi_cmd(hba, lrbp); 5457 /* Do not touch lrbp after scsi done */ 5458 scsi_done(cmd); 5459 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || 5460 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { 5461 if (hba->dev_cmd.complete) { 5462 hba->dev_cmd.cqe = cqe; 5463 ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP); 5464 complete(hba->dev_cmd.complete); 5465 ufshcd_clk_scaling_update_busy(hba); 5466 } 5467 } 5468 } 5469 5470 /** 5471 * __ufshcd_transfer_req_compl - handle SCSI and query command completion 5472 * @hba: per adapter instance 5473 * @completed_reqs: bitmask that indicates which requests to complete 5474 */ 5475 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, 5476 unsigned long completed_reqs) 5477 { 5478 int tag; 5479 5480 for_each_set_bit(tag, &completed_reqs, hba->nutrs) 5481 ufshcd_compl_one_cqe(hba, tag, NULL); 5482 } 5483 5484 /* Any value that is not an existing queue number is fine for this constant. */ 5485 enum { 5486 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1 5487 }; 5488 5489 static void ufshcd_clear_polled(struct ufs_hba *hba, 5490 unsigned long *completed_reqs) 5491 { 5492 int tag; 5493 5494 for_each_set_bit(tag, completed_reqs, hba->nutrs) { 5495 struct scsi_cmnd *cmd = hba->lrb[tag].cmd; 5496 5497 if (!cmd) 5498 continue; 5499 if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED) 5500 __clear_bit(tag, completed_reqs); 5501 } 5502 } 5503 5504 /* 5505 * Returns > 0 if one or more commands have been completed or 0 if no 5506 * requests have been completed. 5507 */ 5508 static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) 5509 { 5510 struct ufs_hba *hba = shost_priv(shost); 5511 unsigned long completed_reqs, flags; 5512 u32 tr_doorbell; 5513 struct ufs_hw_queue *hwq; 5514 5515 if (is_mcq_enabled(hba)) { 5516 hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET]; 5517 5518 return ufshcd_mcq_poll_cqe_lock(hba, hwq); 5519 } 5520 5521 spin_lock_irqsave(&hba->outstanding_lock, flags); 5522 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 5523 completed_reqs = ~tr_doorbell & hba->outstanding_reqs; 5524 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, 5525 "completed: %#lx; outstanding: %#lx\n", completed_reqs, 5526 hba->outstanding_reqs); 5527 if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) { 5528 /* Do not complete polled requests from interrupt context. */ 5529 ufshcd_clear_polled(hba, &completed_reqs); 5530 } 5531 hba->outstanding_reqs &= ~completed_reqs; 5532 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 5533 5534 if (completed_reqs) 5535 __ufshcd_transfer_req_compl(hba, completed_reqs); 5536 5537 return completed_reqs != 0; 5538 } 5539 5540 /** 5541 * ufshcd_transfer_req_compl - handle SCSI and query command completion 5542 * @hba: per adapter instance 5543 * 5544 * Returns 5545 * IRQ_HANDLED - If interrupt is valid 5546 * IRQ_NONE - If invalid interrupt 5547 */ 5548 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) 5549 { 5550 /* Resetting interrupt aggregation counters first and reading the 5551 * DOOR_BELL afterward allows us to handle all the completed requests. 5552 * In order to prevent other interrupts starvation the DB is read once 5553 * after reset. The down side of this solution is the possibility of 5554 * false interrupt if device completes another request after resetting 5555 * aggregation and before reading the DB. 5556 */ 5557 if (ufshcd_is_intr_aggr_allowed(hba) && 5558 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) 5559 ufshcd_reset_intr_aggr(hba); 5560 5561 if (ufs_fail_completion()) 5562 return IRQ_HANDLED; 5563 5564 /* 5565 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we 5566 * do not want polling to trigger spurious interrupt complaints. 5567 */ 5568 ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT); 5569 5570 return IRQ_HANDLED; 5571 } 5572 5573 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask) 5574 { 5575 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 5576 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 5577 &ee_ctrl_mask); 5578 } 5579 5580 int ufshcd_write_ee_control(struct ufs_hba *hba) 5581 { 5582 int err; 5583 5584 mutex_lock(&hba->ee_ctrl_mutex); 5585 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask); 5586 mutex_unlock(&hba->ee_ctrl_mutex); 5587 if (err) 5588 dev_err(hba->dev, "%s: failed to write ee control %d\n", 5589 __func__, err); 5590 return err; 5591 } 5592 5593 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, 5594 const u16 *other_mask, u16 set, u16 clr) 5595 { 5596 u16 new_mask, ee_ctrl_mask; 5597 int err = 0; 5598 5599 mutex_lock(&hba->ee_ctrl_mutex); 5600 new_mask = (*mask & ~clr) | set; 5601 ee_ctrl_mask = new_mask | *other_mask; 5602 if (ee_ctrl_mask != hba->ee_ctrl_mask) 5603 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask); 5604 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */ 5605 if (!err) { 5606 hba->ee_ctrl_mask = ee_ctrl_mask; 5607 *mask = new_mask; 5608 } 5609 mutex_unlock(&hba->ee_ctrl_mutex); 5610 return err; 5611 } 5612 5613 /** 5614 * ufshcd_disable_ee - disable exception event 5615 * @hba: per-adapter instance 5616 * @mask: exception event to disable 5617 * 5618 * Disables exception event in the device so that the EVENT_ALERT 5619 * bit is not set. 5620 * 5621 * Returns zero on success, non-zero error value on failure. 5622 */ 5623 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) 5624 { 5625 return ufshcd_update_ee_drv_mask(hba, 0, mask); 5626 } 5627 5628 /** 5629 * ufshcd_enable_ee - enable exception event 5630 * @hba: per-adapter instance 5631 * @mask: exception event to enable 5632 * 5633 * Enable corresponding exception event in the device to allow 5634 * device to alert host in critical scenarios. 5635 * 5636 * Returns zero on success, non-zero error value on failure. 5637 */ 5638 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) 5639 { 5640 return ufshcd_update_ee_drv_mask(hba, mask, 0); 5641 } 5642 5643 /** 5644 * ufshcd_enable_auto_bkops - Allow device managed BKOPS 5645 * @hba: per-adapter instance 5646 * 5647 * Allow device to manage background operations on its own. Enabling 5648 * this might lead to inconsistent latencies during normal data transfers 5649 * as the device is allowed to manage its own way of handling background 5650 * operations. 5651 * 5652 * Returns zero on success, non-zero on failure. 5653 */ 5654 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) 5655 { 5656 int err = 0; 5657 5658 if (hba->auto_bkops_enabled) 5659 goto out; 5660 5661 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, 5662 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); 5663 if (err) { 5664 dev_err(hba->dev, "%s: failed to enable bkops %d\n", 5665 __func__, err); 5666 goto out; 5667 } 5668 5669 hba->auto_bkops_enabled = true; 5670 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); 5671 5672 /* No need of URGENT_BKOPS exception from the device */ 5673 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); 5674 if (err) 5675 dev_err(hba->dev, "%s: failed to disable exception event %d\n", 5676 __func__, err); 5677 out: 5678 return err; 5679 } 5680 5681 /** 5682 * ufshcd_disable_auto_bkops - block device in doing background operations 5683 * @hba: per-adapter instance 5684 * 5685 * Disabling background operations improves command response latency but 5686 * has drawback of device moving into critical state where the device is 5687 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the 5688 * host is idle so that BKOPS are managed effectively without any negative 5689 * impacts. 5690 * 5691 * Returns zero on success, non-zero on failure. 5692 */ 5693 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) 5694 { 5695 int err = 0; 5696 5697 if (!hba->auto_bkops_enabled) 5698 goto out; 5699 5700 /* 5701 * If host assisted BKOPs is to be enabled, make sure 5702 * urgent bkops exception is allowed. 5703 */ 5704 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); 5705 if (err) { 5706 dev_err(hba->dev, "%s: failed to enable exception event %d\n", 5707 __func__, err); 5708 goto out; 5709 } 5710 5711 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, 5712 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); 5713 if (err) { 5714 dev_err(hba->dev, "%s: failed to disable bkops %d\n", 5715 __func__, err); 5716 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); 5717 goto out; 5718 } 5719 5720 hba->auto_bkops_enabled = false; 5721 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); 5722 hba->is_urgent_bkops_lvl_checked = false; 5723 out: 5724 return err; 5725 } 5726 5727 /** 5728 * ufshcd_force_reset_auto_bkops - force reset auto bkops state 5729 * @hba: per adapter instance 5730 * 5731 * After a device reset the device may toggle the BKOPS_EN flag 5732 * to default value. The s/w tracking variables should be updated 5733 * as well. This function would change the auto-bkops state based on 5734 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND. 5735 */ 5736 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) 5737 { 5738 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { 5739 hba->auto_bkops_enabled = false; 5740 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; 5741 ufshcd_enable_auto_bkops(hba); 5742 } else { 5743 hba->auto_bkops_enabled = true; 5744 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; 5745 ufshcd_disable_auto_bkops(hba); 5746 } 5747 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; 5748 hba->is_urgent_bkops_lvl_checked = false; 5749 } 5750 5751 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) 5752 { 5753 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5754 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); 5755 } 5756 5757 /** 5758 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status 5759 * @hba: per-adapter instance 5760 * @status: bkops_status value 5761 * 5762 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn 5763 * flag in the device to permit background operations if the device 5764 * bkops_status is greater than or equal to "status" argument passed to 5765 * this function, disable otherwise. 5766 * 5767 * Returns 0 for success, non-zero in case of failure. 5768 * 5769 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag 5770 * to know whether auto bkops is enabled or disabled after this function 5771 * returns control to it. 5772 */ 5773 static int ufshcd_bkops_ctrl(struct ufs_hba *hba, 5774 enum bkops_status status) 5775 { 5776 int err; 5777 u32 curr_status = 0; 5778 5779 err = ufshcd_get_bkops_status(hba, &curr_status); 5780 if (err) { 5781 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", 5782 __func__, err); 5783 goto out; 5784 } else if (curr_status > BKOPS_STATUS_MAX) { 5785 dev_err(hba->dev, "%s: invalid BKOPS status %d\n", 5786 __func__, curr_status); 5787 err = -EINVAL; 5788 goto out; 5789 } 5790 5791 if (curr_status >= status) 5792 err = ufshcd_enable_auto_bkops(hba); 5793 else 5794 err = ufshcd_disable_auto_bkops(hba); 5795 out: 5796 return err; 5797 } 5798 5799 /** 5800 * ufshcd_urgent_bkops - handle urgent bkops exception event 5801 * @hba: per-adapter instance 5802 * 5803 * Enable fBackgroundOpsEn flag in the device to permit background 5804 * operations. 5805 * 5806 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled 5807 * and negative error value for any other failure. 5808 */ 5809 static int ufshcd_urgent_bkops(struct ufs_hba *hba) 5810 { 5811 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); 5812 } 5813 5814 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) 5815 { 5816 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5817 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); 5818 } 5819 5820 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) 5821 { 5822 int err; 5823 u32 curr_status = 0; 5824 5825 if (hba->is_urgent_bkops_lvl_checked) 5826 goto enable_auto_bkops; 5827 5828 err = ufshcd_get_bkops_status(hba, &curr_status); 5829 if (err) { 5830 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", 5831 __func__, err); 5832 goto out; 5833 } 5834 5835 /* 5836 * We are seeing that some devices are raising the urgent bkops 5837 * exception events even when BKOPS status doesn't indicate performace 5838 * impacted or critical. Handle these device by determining their urgent 5839 * bkops status at runtime. 5840 */ 5841 if (curr_status < BKOPS_STATUS_PERF_IMPACT) { 5842 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", 5843 __func__, curr_status); 5844 /* update the current status as the urgent bkops level */ 5845 hba->urgent_bkops_lvl = curr_status; 5846 hba->is_urgent_bkops_lvl_checked = true; 5847 } 5848 5849 enable_auto_bkops: 5850 err = ufshcd_enable_auto_bkops(hba); 5851 out: 5852 if (err < 0) 5853 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", 5854 __func__, err); 5855 } 5856 5857 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status) 5858 { 5859 u32 value; 5860 5861 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5862 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value)) 5863 return; 5864 5865 dev_info(hba->dev, "exception Tcase %d\n", value - 80); 5866 5867 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP); 5868 5869 /* 5870 * A placeholder for the platform vendors to add whatever additional 5871 * steps required 5872 */ 5873 } 5874 5875 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) 5876 { 5877 u8 index; 5878 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG : 5879 UPIU_QUERY_OPCODE_CLEAR_FLAG; 5880 5881 index = ufshcd_wb_get_query_index(hba); 5882 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL); 5883 } 5884 5885 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable) 5886 { 5887 int ret; 5888 5889 if (!ufshcd_is_wb_allowed(hba) || 5890 hba->dev_info.wb_enabled == enable) 5891 return 0; 5892 5893 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN); 5894 if (ret) { 5895 dev_err(hba->dev, "%s: Write Booster %s failed %d\n", 5896 __func__, enable ? "enabling" : "disabling", ret); 5897 return ret; 5898 } 5899 5900 hba->dev_info.wb_enabled = enable; 5901 dev_dbg(hba->dev, "%s: Write Booster %s\n", 5902 __func__, enable ? "enabled" : "disabled"); 5903 5904 return ret; 5905 } 5906 5907 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba, 5908 bool enable) 5909 { 5910 int ret; 5911 5912 ret = __ufshcd_wb_toggle(hba, enable, 5913 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8); 5914 if (ret) { 5915 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n", 5916 __func__, enable ? "enabling" : "disabling", ret); 5917 return; 5918 } 5919 dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n", 5920 __func__, enable ? "enabled" : "disabled"); 5921 } 5922 5923 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable) 5924 { 5925 int ret; 5926 5927 if (!ufshcd_is_wb_allowed(hba) || 5928 hba->dev_info.wb_buf_flush_enabled == enable) 5929 return 0; 5930 5931 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN); 5932 if (ret) { 5933 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n", 5934 __func__, enable ? "enabling" : "disabling", ret); 5935 return ret; 5936 } 5937 5938 hba->dev_info.wb_buf_flush_enabled = enable; 5939 dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n", 5940 __func__, enable ? "enabled" : "disabled"); 5941 5942 return ret; 5943 } 5944 5945 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, 5946 u32 avail_buf) 5947 { 5948 u32 cur_buf; 5949 int ret; 5950 u8 index; 5951 5952 index = ufshcd_wb_get_query_index(hba); 5953 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5954 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE, 5955 index, 0, &cur_buf); 5956 if (ret) { 5957 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n", 5958 __func__, ret); 5959 return false; 5960 } 5961 5962 if (!cur_buf) { 5963 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n", 5964 cur_buf); 5965 return false; 5966 } 5967 /* Let it continue to flush when available buffer exceeds threshold */ 5968 return avail_buf < hba->vps->wb_flush_threshold; 5969 } 5970 5971 static void ufshcd_wb_force_disable(struct ufs_hba *hba) 5972 { 5973 if (ufshcd_is_wb_buf_flush_allowed(hba)) 5974 ufshcd_wb_toggle_buf_flush(hba, false); 5975 5976 ufshcd_wb_toggle_buf_flush_during_h8(hba, false); 5977 ufshcd_wb_toggle(hba, false); 5978 hba->caps &= ~UFSHCD_CAP_WB_EN; 5979 5980 dev_info(hba->dev, "%s: WB force disabled\n", __func__); 5981 } 5982 5983 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba) 5984 { 5985 u32 lifetime; 5986 int ret; 5987 u8 index; 5988 5989 index = ufshcd_wb_get_query_index(hba); 5990 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5991 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST, 5992 index, 0, &lifetime); 5993 if (ret) { 5994 dev_err(hba->dev, 5995 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n", 5996 __func__, ret); 5997 return false; 5998 } 5999 6000 if (lifetime == UFS_WB_EXCEED_LIFETIME) { 6001 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n", 6002 __func__, lifetime); 6003 return false; 6004 } 6005 6006 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n", 6007 __func__, lifetime); 6008 6009 return true; 6010 } 6011 6012 static bool ufshcd_wb_need_flush(struct ufs_hba *hba) 6013 { 6014 int ret; 6015 u32 avail_buf; 6016 u8 index; 6017 6018 if (!ufshcd_is_wb_allowed(hba)) 6019 return false; 6020 6021 if (!ufshcd_is_wb_buf_lifetime_available(hba)) { 6022 ufshcd_wb_force_disable(hba); 6023 return false; 6024 } 6025 6026 /* 6027 * The ufs device needs the vcc to be ON to flush. 6028 * With user-space reduction enabled, it's enough to enable flush 6029 * by checking only the available buffer. The threshold 6030 * defined here is > 90% full. 6031 * With user-space preserved enabled, the current-buffer 6032 * should be checked too because the wb buffer size can reduce 6033 * when disk tends to be full. This info is provided by current 6034 * buffer (dCurrentWriteBoosterBufferSize). There's no point in 6035 * keeping vcc on when current buffer is empty. 6036 */ 6037 index = ufshcd_wb_get_query_index(hba); 6038 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 6039 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE, 6040 index, 0, &avail_buf); 6041 if (ret) { 6042 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n", 6043 __func__, ret); 6044 return false; 6045 } 6046 6047 if (!hba->dev_info.b_presrv_uspc_en) 6048 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10); 6049 6050 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); 6051 } 6052 6053 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work) 6054 { 6055 struct ufs_hba *hba = container_of(to_delayed_work(work), 6056 struct ufs_hba, 6057 rpm_dev_flush_recheck_work); 6058 /* 6059 * To prevent unnecessary VCC power drain after device finishes 6060 * WriteBooster buffer flush or Auto BKOPs, force runtime resume 6061 * after a certain delay to recheck the threshold by next runtime 6062 * suspend. 6063 */ 6064 ufshcd_rpm_get_sync(hba); 6065 ufshcd_rpm_put_sync(hba); 6066 } 6067 6068 /** 6069 * ufshcd_exception_event_handler - handle exceptions raised by device 6070 * @work: pointer to work data 6071 * 6072 * Read bExceptionEventStatus attribute from the device and handle the 6073 * exception event accordingly. 6074 */ 6075 static void ufshcd_exception_event_handler(struct work_struct *work) 6076 { 6077 struct ufs_hba *hba; 6078 int err; 6079 u32 status = 0; 6080 hba = container_of(work, struct ufs_hba, eeh_work); 6081 6082 ufshcd_scsi_block_requests(hba); 6083 err = ufshcd_get_ee_status(hba, &status); 6084 if (err) { 6085 dev_err(hba->dev, "%s: failed to get exception status %d\n", 6086 __func__, err); 6087 goto out; 6088 } 6089 6090 trace_ufshcd_exception_event(dev_name(hba->dev), status); 6091 6092 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS) 6093 ufshcd_bkops_exception_event_handler(hba); 6094 6095 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP) 6096 ufshcd_temp_exception_event_handler(hba, status); 6097 6098 ufs_debugfs_exception_event(hba, status); 6099 out: 6100 ufshcd_scsi_unblock_requests(hba); 6101 } 6102 6103 /* Complete requests that have door-bell cleared */ 6104 static void ufshcd_complete_requests(struct ufs_hba *hba) 6105 { 6106 ufshcd_transfer_req_compl(hba); 6107 ufshcd_tmc_handler(hba); 6108 } 6109 6110 /** 6111 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is 6112 * to recover from the DL NAC errors or not. 6113 * @hba: per-adapter instance 6114 * 6115 * Returns true if error handling is required, false otherwise 6116 */ 6117 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) 6118 { 6119 unsigned long flags; 6120 bool err_handling = true; 6121 6122 spin_lock_irqsave(hba->host->host_lock, flags); 6123 /* 6124 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the 6125 * device fatal error and/or DL NAC & REPLAY timeout errors. 6126 */ 6127 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) 6128 goto out; 6129 6130 if ((hba->saved_err & DEVICE_FATAL_ERROR) || 6131 ((hba->saved_err & UIC_ERROR) && 6132 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) 6133 goto out; 6134 6135 if ((hba->saved_err & UIC_ERROR) && 6136 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { 6137 int err; 6138 /* 6139 * wait for 50ms to see if we can get any other errors or not. 6140 */ 6141 spin_unlock_irqrestore(hba->host->host_lock, flags); 6142 msleep(50); 6143 spin_lock_irqsave(hba->host->host_lock, flags); 6144 6145 /* 6146 * now check if we have got any other severe errors other than 6147 * DL NAC error? 6148 */ 6149 if ((hba->saved_err & INT_FATAL_ERRORS) || 6150 ((hba->saved_err & UIC_ERROR) && 6151 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) 6152 goto out; 6153 6154 /* 6155 * As DL NAC is the only error received so far, send out NOP 6156 * command to confirm if link is still active or not. 6157 * - If we don't get any response then do error recovery. 6158 * - If we get response then clear the DL NAC error bit. 6159 */ 6160 6161 spin_unlock_irqrestore(hba->host->host_lock, flags); 6162 err = ufshcd_verify_dev_init(hba); 6163 spin_lock_irqsave(hba->host->host_lock, flags); 6164 6165 if (err) 6166 goto out; 6167 6168 /* Link seems to be alive hence ignore the DL NAC errors */ 6169 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) 6170 hba->saved_err &= ~UIC_ERROR; 6171 /* clear NAC error */ 6172 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; 6173 if (!hba->saved_uic_err) 6174 err_handling = false; 6175 } 6176 out: 6177 spin_unlock_irqrestore(hba->host->host_lock, flags); 6178 return err_handling; 6179 } 6180 6181 /* host lock must be held before calling this func */ 6182 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) 6183 { 6184 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || 6185 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); 6186 } 6187 6188 void ufshcd_schedule_eh_work(struct ufs_hba *hba) 6189 { 6190 lockdep_assert_held(hba->host->host_lock); 6191 6192 /* handle fatal errors only when link is not in error state */ 6193 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { 6194 if (hba->force_reset || ufshcd_is_link_broken(hba) || 6195 ufshcd_is_saved_err_fatal(hba)) 6196 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; 6197 else 6198 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; 6199 queue_work(hba->eh_wq, &hba->eh_work); 6200 } 6201 } 6202 6203 static void ufshcd_force_error_recovery(struct ufs_hba *hba) 6204 { 6205 spin_lock_irq(hba->host->host_lock); 6206 hba->force_reset = true; 6207 ufshcd_schedule_eh_work(hba); 6208 spin_unlock_irq(hba->host->host_lock); 6209 } 6210 6211 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) 6212 { 6213 mutex_lock(&hba->wb_mutex); 6214 down_write(&hba->clk_scaling_lock); 6215 hba->clk_scaling.is_allowed = allow; 6216 up_write(&hba->clk_scaling_lock); 6217 mutex_unlock(&hba->wb_mutex); 6218 } 6219 6220 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) 6221 { 6222 if (suspend) { 6223 if (hba->clk_scaling.is_enabled) 6224 ufshcd_suspend_clkscaling(hba); 6225 ufshcd_clk_scaling_allow(hba, false); 6226 } else { 6227 ufshcd_clk_scaling_allow(hba, true); 6228 if (hba->clk_scaling.is_enabled) 6229 ufshcd_resume_clkscaling(hba); 6230 } 6231 } 6232 6233 static void ufshcd_err_handling_prepare(struct ufs_hba *hba) 6234 { 6235 ufshcd_rpm_get_sync(hba); 6236 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) || 6237 hba->is_sys_suspended) { 6238 enum ufs_pm_op pm_op; 6239 6240 /* 6241 * Don't assume anything of resume, if 6242 * resume fails, irq and clocks can be OFF, and powers 6243 * can be OFF or in LPM. 6244 */ 6245 ufshcd_setup_hba_vreg(hba, true); 6246 ufshcd_enable_irq(hba); 6247 ufshcd_setup_vreg(hba, true); 6248 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); 6249 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); 6250 ufshcd_hold(hba, false); 6251 if (!ufshcd_is_clkgating_allowed(hba)) 6252 ufshcd_setup_clocks(hba, true); 6253 ufshcd_release(hba); 6254 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; 6255 ufshcd_vops_resume(hba, pm_op); 6256 } else { 6257 ufshcd_hold(hba, false); 6258 if (ufshcd_is_clkscaling_supported(hba) && 6259 hba->clk_scaling.is_enabled) 6260 ufshcd_suspend_clkscaling(hba); 6261 ufshcd_clk_scaling_allow(hba, false); 6262 } 6263 ufshcd_scsi_block_requests(hba); 6264 /* Drain ufshcd_queuecommand() */ 6265 synchronize_rcu(); 6266 cancel_work_sync(&hba->eeh_work); 6267 } 6268 6269 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) 6270 { 6271 ufshcd_scsi_unblock_requests(hba); 6272 ufshcd_release(hba); 6273 if (ufshcd_is_clkscaling_supported(hba)) 6274 ufshcd_clk_scaling_suspend(hba, false); 6275 ufshcd_rpm_put(hba); 6276 } 6277 6278 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) 6279 { 6280 return (!hba->is_powered || hba->shutting_down || 6281 !hba->ufs_device_wlun || 6282 hba->ufshcd_state == UFSHCD_STATE_ERROR || 6283 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || 6284 ufshcd_is_link_broken(hba)))); 6285 } 6286 6287 #ifdef CONFIG_PM 6288 static void ufshcd_recover_pm_error(struct ufs_hba *hba) 6289 { 6290 struct Scsi_Host *shost = hba->host; 6291 struct scsi_device *sdev; 6292 struct request_queue *q; 6293 int ret; 6294 6295 hba->is_sys_suspended = false; 6296 /* 6297 * Set RPM status of wlun device to RPM_ACTIVE, 6298 * this also clears its runtime error. 6299 */ 6300 ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); 6301 6302 /* hba device might have a runtime error otherwise */ 6303 if (ret) 6304 ret = pm_runtime_set_active(hba->dev); 6305 /* 6306 * If wlun device had runtime error, we also need to resume those 6307 * consumer scsi devices in case any of them has failed to be 6308 * resumed due to supplier runtime resume failure. This is to unblock 6309 * blk_queue_enter in case there are bios waiting inside it. 6310 */ 6311 if (!ret) { 6312 shost_for_each_device(sdev, shost) { 6313 q = sdev->request_queue; 6314 if (q->dev && (q->rpm_status == RPM_SUSPENDED || 6315 q->rpm_status == RPM_SUSPENDING)) 6316 pm_request_resume(q->dev); 6317 } 6318 } 6319 } 6320 #else 6321 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba) 6322 { 6323 } 6324 #endif 6325 6326 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba) 6327 { 6328 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info; 6329 u32 mode; 6330 6331 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode); 6332 6333 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK)) 6334 return true; 6335 6336 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK)) 6337 return true; 6338 6339 return false; 6340 } 6341 6342 static bool ufshcd_abort_all(struct ufs_hba *hba) 6343 { 6344 bool needs_reset = false; 6345 int tag, ret; 6346 6347 /* Clear pending transfer requests */ 6348 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { 6349 ret = ufshcd_try_to_abort_task(hba, tag); 6350 dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag, 6351 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1, 6352 ret ? "failed" : "succeeded"); 6353 if (ret) { 6354 needs_reset = true; 6355 goto out; 6356 } 6357 } 6358 6359 /* Clear pending task management requests */ 6360 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { 6361 if (ufshcd_clear_tm_cmd(hba, tag)) { 6362 needs_reset = true; 6363 goto out; 6364 } 6365 } 6366 6367 out: 6368 /* Complete the requests that are cleared by s/w */ 6369 ufshcd_complete_requests(hba); 6370 6371 return needs_reset; 6372 } 6373 6374 /** 6375 * ufshcd_err_handler - handle UFS errors that require s/w attention 6376 * @work: pointer to work structure 6377 */ 6378 static void ufshcd_err_handler(struct work_struct *work) 6379 { 6380 int retries = MAX_ERR_HANDLER_RETRIES; 6381 struct ufs_hba *hba; 6382 unsigned long flags; 6383 bool needs_restore; 6384 bool needs_reset; 6385 int pmc_err; 6386 6387 hba = container_of(work, struct ufs_hba, eh_work); 6388 6389 dev_info(hba->dev, 6390 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n", 6391 __func__, ufshcd_state_name[hba->ufshcd_state], 6392 hba->is_powered, hba->shutting_down, hba->saved_err, 6393 hba->saved_uic_err, hba->force_reset, 6394 ufshcd_is_link_broken(hba) ? "; link is broken" : ""); 6395 6396 down(&hba->host_sem); 6397 spin_lock_irqsave(hba->host->host_lock, flags); 6398 if (ufshcd_err_handling_should_stop(hba)) { 6399 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) 6400 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 6401 spin_unlock_irqrestore(hba->host->host_lock, flags); 6402 up(&hba->host_sem); 6403 return; 6404 } 6405 ufshcd_set_eh_in_progress(hba); 6406 spin_unlock_irqrestore(hba->host->host_lock, flags); 6407 ufshcd_err_handling_prepare(hba); 6408 /* Complete requests that have door-bell cleared by h/w */ 6409 ufshcd_complete_requests(hba); 6410 spin_lock_irqsave(hba->host->host_lock, flags); 6411 again: 6412 needs_restore = false; 6413 needs_reset = false; 6414 6415 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) 6416 hba->ufshcd_state = UFSHCD_STATE_RESET; 6417 /* 6418 * A full reset and restore might have happened after preparation 6419 * is finished, double check whether we should stop. 6420 */ 6421 if (ufshcd_err_handling_should_stop(hba)) 6422 goto skip_err_handling; 6423 6424 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { 6425 bool ret; 6426 6427 spin_unlock_irqrestore(hba->host->host_lock, flags); 6428 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */ 6429 ret = ufshcd_quirk_dl_nac_errors(hba); 6430 spin_lock_irqsave(hba->host->host_lock, flags); 6431 if (!ret && ufshcd_err_handling_should_stop(hba)) 6432 goto skip_err_handling; 6433 } 6434 6435 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || 6436 (hba->saved_uic_err && 6437 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { 6438 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR); 6439 6440 spin_unlock_irqrestore(hba->host->host_lock, flags); 6441 ufshcd_print_host_state(hba); 6442 ufshcd_print_pwr_info(hba); 6443 ufshcd_print_evt_hist(hba); 6444 ufshcd_print_tmrs(hba, hba->outstanding_tasks); 6445 ufshcd_print_trs_all(hba, pr_prdt); 6446 spin_lock_irqsave(hba->host->host_lock, flags); 6447 } 6448 6449 /* 6450 * if host reset is required then skip clearing the pending 6451 * transfers forcefully because they will get cleared during 6452 * host reset and restore 6453 */ 6454 if (hba->force_reset || ufshcd_is_link_broken(hba) || 6455 ufshcd_is_saved_err_fatal(hba) || 6456 ((hba->saved_err & UIC_ERROR) && 6457 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | 6458 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) { 6459 needs_reset = true; 6460 goto do_reset; 6461 } 6462 6463 /* 6464 * If LINERESET was caught, UFS might have been put to PWM mode, 6465 * check if power mode restore is needed. 6466 */ 6467 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) { 6468 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR; 6469 if (!hba->saved_uic_err) 6470 hba->saved_err &= ~UIC_ERROR; 6471 spin_unlock_irqrestore(hba->host->host_lock, flags); 6472 if (ufshcd_is_pwr_mode_restore_needed(hba)) 6473 needs_restore = true; 6474 spin_lock_irqsave(hba->host->host_lock, flags); 6475 if (!hba->saved_err && !needs_restore) 6476 goto skip_err_handling; 6477 } 6478 6479 hba->silence_err_logs = true; 6480 /* release lock as clear command might sleep */ 6481 spin_unlock_irqrestore(hba->host->host_lock, flags); 6482 6483 needs_reset = ufshcd_abort_all(hba); 6484 6485 spin_lock_irqsave(hba->host->host_lock, flags); 6486 hba->silence_err_logs = false; 6487 if (needs_reset) 6488 goto do_reset; 6489 6490 /* 6491 * After all reqs and tasks are cleared from doorbell, 6492 * now it is safe to retore power mode. 6493 */ 6494 if (needs_restore) { 6495 spin_unlock_irqrestore(hba->host->host_lock, flags); 6496 /* 6497 * Hold the scaling lock just in case dev cmds 6498 * are sent via bsg and/or sysfs. 6499 */ 6500 down_write(&hba->clk_scaling_lock); 6501 hba->force_pmc = true; 6502 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); 6503 if (pmc_err) { 6504 needs_reset = true; 6505 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", 6506 __func__, pmc_err); 6507 } 6508 hba->force_pmc = false; 6509 ufshcd_print_pwr_info(hba); 6510 up_write(&hba->clk_scaling_lock); 6511 spin_lock_irqsave(hba->host->host_lock, flags); 6512 } 6513 6514 do_reset: 6515 /* Fatal errors need reset */ 6516 if (needs_reset) { 6517 int err; 6518 6519 hba->force_reset = false; 6520 spin_unlock_irqrestore(hba->host->host_lock, flags); 6521 err = ufshcd_reset_and_restore(hba); 6522 if (err) 6523 dev_err(hba->dev, "%s: reset and restore failed with err %d\n", 6524 __func__, err); 6525 else 6526 ufshcd_recover_pm_error(hba); 6527 spin_lock_irqsave(hba->host->host_lock, flags); 6528 } 6529 6530 skip_err_handling: 6531 if (!needs_reset) { 6532 if (hba->ufshcd_state == UFSHCD_STATE_RESET) 6533 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 6534 if (hba->saved_err || hba->saved_uic_err) 6535 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", 6536 __func__, hba->saved_err, hba->saved_uic_err); 6537 } 6538 /* Exit in an operational state or dead */ 6539 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && 6540 hba->ufshcd_state != UFSHCD_STATE_ERROR) { 6541 if (--retries) 6542 goto again; 6543 hba->ufshcd_state = UFSHCD_STATE_ERROR; 6544 } 6545 ufshcd_clear_eh_in_progress(hba); 6546 spin_unlock_irqrestore(hba->host->host_lock, flags); 6547 ufshcd_err_handling_unprepare(hba); 6548 up(&hba->host_sem); 6549 6550 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__, 6551 ufshcd_state_name[hba->ufshcd_state]); 6552 } 6553 6554 /** 6555 * ufshcd_update_uic_error - check and set fatal UIC error flags. 6556 * @hba: per-adapter instance 6557 * 6558 * Returns 6559 * IRQ_HANDLED - If interrupt is valid 6560 * IRQ_NONE - If invalid interrupt 6561 */ 6562 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) 6563 { 6564 u32 reg; 6565 irqreturn_t retval = IRQ_NONE; 6566 6567 /* PHY layer error */ 6568 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); 6569 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && 6570 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) { 6571 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg); 6572 /* 6573 * To know whether this error is fatal or not, DB timeout 6574 * must be checked but this error is handled separately. 6575 */ 6576 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK) 6577 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", 6578 __func__); 6579 6580 /* Got a LINERESET indication. */ 6581 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) { 6582 struct uic_command *cmd = NULL; 6583 6584 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR; 6585 if (hba->uic_async_done && hba->active_uic_cmd) 6586 cmd = hba->active_uic_cmd; 6587 /* 6588 * Ignore the LINERESET during power mode change 6589 * operation via DME_SET command. 6590 */ 6591 if (cmd && (cmd->command == UIC_CMD_DME_SET)) 6592 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR; 6593 } 6594 retval |= IRQ_HANDLED; 6595 } 6596 6597 /* PA_INIT_ERROR is fatal and needs UIC reset */ 6598 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); 6599 if ((reg & UIC_DATA_LINK_LAYER_ERROR) && 6600 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) { 6601 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg); 6602 6603 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) 6604 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; 6605 else if (hba->dev_quirks & 6606 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { 6607 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) 6608 hba->uic_error |= 6609 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; 6610 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) 6611 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; 6612 } 6613 retval |= IRQ_HANDLED; 6614 } 6615 6616 /* UIC NL/TL/DME errors needs software retry */ 6617 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); 6618 if ((reg & UIC_NETWORK_LAYER_ERROR) && 6619 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) { 6620 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg); 6621 hba->uic_error |= UFSHCD_UIC_NL_ERROR; 6622 retval |= IRQ_HANDLED; 6623 } 6624 6625 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); 6626 if ((reg & UIC_TRANSPORT_LAYER_ERROR) && 6627 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) { 6628 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg); 6629 hba->uic_error |= UFSHCD_UIC_TL_ERROR; 6630 retval |= IRQ_HANDLED; 6631 } 6632 6633 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); 6634 if ((reg & UIC_DME_ERROR) && 6635 (reg & UIC_DME_ERROR_CODE_MASK)) { 6636 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg); 6637 hba->uic_error |= UFSHCD_UIC_DME_ERROR; 6638 retval |= IRQ_HANDLED; 6639 } 6640 6641 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", 6642 __func__, hba->uic_error); 6643 return retval; 6644 } 6645 6646 /** 6647 * ufshcd_check_errors - Check for errors that need s/w attention 6648 * @hba: per-adapter instance 6649 * @intr_status: interrupt status generated by the controller 6650 * 6651 * Returns 6652 * IRQ_HANDLED - If interrupt is valid 6653 * IRQ_NONE - If invalid interrupt 6654 */ 6655 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) 6656 { 6657 bool queue_eh_work = false; 6658 irqreturn_t retval = IRQ_NONE; 6659 6660 spin_lock(hba->host->host_lock); 6661 hba->errors |= UFSHCD_ERROR_MASK & intr_status; 6662 6663 if (hba->errors & INT_FATAL_ERRORS) { 6664 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR, 6665 hba->errors); 6666 queue_eh_work = true; 6667 } 6668 6669 if (hba->errors & UIC_ERROR) { 6670 hba->uic_error = 0; 6671 retval = ufshcd_update_uic_error(hba); 6672 if (hba->uic_error) 6673 queue_eh_work = true; 6674 } 6675 6676 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) { 6677 dev_err(hba->dev, 6678 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n", 6679 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? 6680 "Enter" : "Exit", 6681 hba->errors, ufshcd_get_upmcrs(hba)); 6682 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR, 6683 hba->errors); 6684 ufshcd_set_link_broken(hba); 6685 queue_eh_work = true; 6686 } 6687 6688 if (queue_eh_work) { 6689 /* 6690 * update the transfer error masks to sticky bits, let's do this 6691 * irrespective of current ufshcd_state. 6692 */ 6693 hba->saved_err |= hba->errors; 6694 hba->saved_uic_err |= hba->uic_error; 6695 6696 /* dump controller state before resetting */ 6697 if ((hba->saved_err & 6698 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || 6699 (hba->saved_uic_err && 6700 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { 6701 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", 6702 __func__, hba->saved_err, 6703 hba->saved_uic_err); 6704 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, 6705 "host_regs: "); 6706 ufshcd_print_pwr_info(hba); 6707 } 6708 ufshcd_schedule_eh_work(hba); 6709 retval |= IRQ_HANDLED; 6710 } 6711 /* 6712 * if (!queue_eh_work) - 6713 * Other errors are either non-fatal where host recovers 6714 * itself without s/w intervention or errors that will be 6715 * handled by the SCSI core layer. 6716 */ 6717 hba->errors = 0; 6718 hba->uic_error = 0; 6719 spin_unlock(hba->host->host_lock); 6720 return retval; 6721 } 6722 6723 /** 6724 * ufshcd_tmc_handler - handle task management function completion 6725 * @hba: per adapter instance 6726 * 6727 * Returns 6728 * IRQ_HANDLED - If interrupt is valid 6729 * IRQ_NONE - If invalid interrupt 6730 */ 6731 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) 6732 { 6733 unsigned long flags, pending, issued; 6734 irqreturn_t ret = IRQ_NONE; 6735 int tag; 6736 6737 spin_lock_irqsave(hba->host->host_lock, flags); 6738 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); 6739 issued = hba->outstanding_tasks & ~pending; 6740 for_each_set_bit(tag, &issued, hba->nutmrs) { 6741 struct request *req = hba->tmf_rqs[tag]; 6742 struct completion *c = req->end_io_data; 6743 6744 complete(c); 6745 ret = IRQ_HANDLED; 6746 } 6747 spin_unlock_irqrestore(hba->host->host_lock, flags); 6748 6749 return ret; 6750 } 6751 6752 /** 6753 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events 6754 * @hba: per adapter instance 6755 * 6756 * Returns IRQ_HANDLED if interrupt is handled 6757 */ 6758 static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba) 6759 { 6760 struct ufs_hw_queue *hwq; 6761 unsigned long outstanding_cqs; 6762 unsigned int nr_queues; 6763 int i, ret; 6764 u32 events; 6765 6766 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs); 6767 if (ret) 6768 outstanding_cqs = (1U << hba->nr_hw_queues) - 1; 6769 6770 /* Exclude the poll queues */ 6771 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; 6772 for_each_set_bit(i, &outstanding_cqs, nr_queues) { 6773 hwq = &hba->uhq[i]; 6774 6775 events = ufshcd_mcq_read_cqis(hba, i); 6776 if (events) 6777 ufshcd_mcq_write_cqis(hba, events, i); 6778 6779 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS) 6780 ufshcd_mcq_poll_cqe_nolock(hba, hwq); 6781 } 6782 6783 return IRQ_HANDLED; 6784 } 6785 6786 /** 6787 * ufshcd_sl_intr - Interrupt service routine 6788 * @hba: per adapter instance 6789 * @intr_status: contains interrupts generated by the controller 6790 * 6791 * Returns 6792 * IRQ_HANDLED - If interrupt is valid 6793 * IRQ_NONE - If invalid interrupt 6794 */ 6795 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) 6796 { 6797 irqreturn_t retval = IRQ_NONE; 6798 6799 if (intr_status & UFSHCD_UIC_MASK) 6800 retval |= ufshcd_uic_cmd_compl(hba, intr_status); 6801 6802 if (intr_status & UFSHCD_ERROR_MASK || hba->errors) 6803 retval |= ufshcd_check_errors(hba, intr_status); 6804 6805 if (intr_status & UTP_TASK_REQ_COMPL) 6806 retval |= ufshcd_tmc_handler(hba); 6807 6808 if (intr_status & UTP_TRANSFER_REQ_COMPL) 6809 retval |= ufshcd_transfer_req_compl(hba); 6810 6811 if (intr_status & MCQ_CQ_EVENT_STATUS) 6812 retval |= ufshcd_handle_mcq_cq_events(hba); 6813 6814 return retval; 6815 } 6816 6817 /** 6818 * ufshcd_intr - Main interrupt service routine 6819 * @irq: irq number 6820 * @__hba: pointer to adapter instance 6821 * 6822 * Returns 6823 * IRQ_HANDLED - If interrupt is valid 6824 * IRQ_NONE - If invalid interrupt 6825 */ 6826 static irqreturn_t ufshcd_intr(int irq, void *__hba) 6827 { 6828 u32 intr_status, enabled_intr_status = 0; 6829 irqreturn_t retval = IRQ_NONE; 6830 struct ufs_hba *hba = __hba; 6831 int retries = hba->nutrs; 6832 6833 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 6834 hba->ufs_stats.last_intr_status = intr_status; 6835 hba->ufs_stats.last_intr_ts = local_clock(); 6836 6837 /* 6838 * There could be max of hba->nutrs reqs in flight and in worst case 6839 * if the reqs get finished 1 by 1 after the interrupt status is 6840 * read, make sure we handle them by checking the interrupt status 6841 * again in a loop until we process all of the reqs before returning. 6842 */ 6843 while (intr_status && retries--) { 6844 enabled_intr_status = 6845 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 6846 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); 6847 if (enabled_intr_status) 6848 retval |= ufshcd_sl_intr(hba, enabled_intr_status); 6849 6850 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 6851 } 6852 6853 if (enabled_intr_status && retval == IRQ_NONE && 6854 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) || 6855 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) { 6856 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", 6857 __func__, 6858 intr_status, 6859 hba->ufs_stats.last_intr_status, 6860 enabled_intr_status); 6861 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); 6862 } 6863 6864 return retval; 6865 } 6866 6867 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) 6868 { 6869 int err = 0; 6870 u32 mask = 1 << tag; 6871 unsigned long flags; 6872 6873 if (!test_bit(tag, &hba->outstanding_tasks)) 6874 goto out; 6875 6876 spin_lock_irqsave(hba->host->host_lock, flags); 6877 ufshcd_utmrl_clear(hba, tag); 6878 spin_unlock_irqrestore(hba->host->host_lock, flags); 6879 6880 /* poll for max. 1 sec to clear door bell register by h/w */ 6881 err = ufshcd_wait_for_register(hba, 6882 REG_UTP_TASK_REQ_DOOR_BELL, 6883 mask, 0, 1000, 1000); 6884 6885 dev_err(hba->dev, "Clearing task management function with tag %d %s\n", 6886 tag, err ? "succeeded" : "failed"); 6887 6888 out: 6889 return err; 6890 } 6891 6892 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, 6893 struct utp_task_req_desc *treq, u8 tm_function) 6894 { 6895 struct request_queue *q = hba->tmf_queue; 6896 struct Scsi_Host *host = hba->host; 6897 DECLARE_COMPLETION_ONSTACK(wait); 6898 struct request *req; 6899 unsigned long flags; 6900 int task_tag, err; 6901 6902 /* 6903 * blk_mq_alloc_request() is used here only to get a free tag. 6904 */ 6905 req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0); 6906 if (IS_ERR(req)) 6907 return PTR_ERR(req); 6908 6909 req->end_io_data = &wait; 6910 ufshcd_hold(hba, false); 6911 6912 spin_lock_irqsave(host->host_lock, flags); 6913 6914 task_tag = req->tag; 6915 WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n", 6916 task_tag); 6917 hba->tmf_rqs[req->tag] = req; 6918 treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag); 6919 6920 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); 6921 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); 6922 6923 /* send command to the controller */ 6924 __set_bit(task_tag, &hba->outstanding_tasks); 6925 6926 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); 6927 /* Make sure that doorbell is committed immediately */ 6928 wmb(); 6929 6930 spin_unlock_irqrestore(host->host_lock, flags); 6931 6932 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); 6933 6934 /* wait until the task management command is completed */ 6935 err = wait_for_completion_io_timeout(&wait, 6936 msecs_to_jiffies(TM_CMD_TIMEOUT)); 6937 if (!err) { 6938 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); 6939 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", 6940 __func__, tm_function); 6941 if (ufshcd_clear_tm_cmd(hba, task_tag)) 6942 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", 6943 __func__, task_tag); 6944 err = -ETIMEDOUT; 6945 } else { 6946 err = 0; 6947 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); 6948 6949 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); 6950 } 6951 6952 spin_lock_irqsave(hba->host->host_lock, flags); 6953 hba->tmf_rqs[req->tag] = NULL; 6954 __clear_bit(task_tag, &hba->outstanding_tasks); 6955 spin_unlock_irqrestore(hba->host->host_lock, flags); 6956 6957 ufshcd_release(hba); 6958 blk_mq_free_request(req); 6959 6960 return err; 6961 } 6962 6963 /** 6964 * ufshcd_issue_tm_cmd - issues task management commands to controller 6965 * @hba: per adapter instance 6966 * @lun_id: LUN ID to which TM command is sent 6967 * @task_id: task ID to which the TM command is applicable 6968 * @tm_function: task management function opcode 6969 * @tm_response: task management service response return value 6970 * 6971 * Returns non-zero value on error, zero on success. 6972 */ 6973 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, 6974 u8 tm_function, u8 *tm_response) 6975 { 6976 struct utp_task_req_desc treq = { { 0 }, }; 6977 enum utp_ocs ocs_value; 6978 int err; 6979 6980 /* Configure task request descriptor */ 6981 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); 6982 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); 6983 6984 /* Configure task request UPIU */ 6985 treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) | 6986 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24); 6987 treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16); 6988 6989 /* 6990 * The host shall provide the same value for LUN field in the basic 6991 * header and for Input Parameter. 6992 */ 6993 treq.upiu_req.input_param1 = cpu_to_be32(lun_id); 6994 treq.upiu_req.input_param2 = cpu_to_be32(task_id); 6995 6996 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function); 6997 if (err == -ETIMEDOUT) 6998 return err; 6999 7000 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS; 7001 if (ocs_value != OCS_SUCCESS) 7002 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", 7003 __func__, ocs_value); 7004 else if (tm_response) 7005 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) & 7006 MASK_TM_SERVICE_RESP; 7007 return err; 7008 } 7009 7010 /** 7011 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests 7012 * @hba: per-adapter instance 7013 * @req_upiu: upiu request 7014 * @rsp_upiu: upiu reply 7015 * @desc_buff: pointer to descriptor buffer, NULL if NA 7016 * @buff_len: descriptor size, 0 if NA 7017 * @cmd_type: specifies the type (NOP, Query...) 7018 * @desc_op: descriptor operation 7019 * 7020 * Those type of requests uses UTP Transfer Request Descriptor - utrd. 7021 * Therefore, it "rides" the device management infrastructure: uses its tag and 7022 * tasks work queues. 7023 * 7024 * Since there is only one available tag for device management commands, 7025 * the caller is expected to hold the hba->dev_cmd.lock mutex. 7026 */ 7027 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, 7028 struct utp_upiu_req *req_upiu, 7029 struct utp_upiu_req *rsp_upiu, 7030 u8 *desc_buff, int *buff_len, 7031 enum dev_cmd_type cmd_type, 7032 enum query_opcode desc_op) 7033 { 7034 DECLARE_COMPLETION_ONSTACK(wait); 7035 const u32 tag = hba->reserved_slot; 7036 struct ufshcd_lrb *lrbp; 7037 int err = 0; 7038 u8 upiu_flags; 7039 7040 /* Protects use of hba->reserved_slot. */ 7041 lockdep_assert_held(&hba->dev_cmd.lock); 7042 7043 down_read(&hba->clk_scaling_lock); 7044 7045 lrbp = &hba->lrb[tag]; 7046 WARN_ON(lrbp->cmd); 7047 lrbp->cmd = NULL; 7048 lrbp->task_tag = tag; 7049 lrbp->lun = 0; 7050 lrbp->intr_cmd = true; 7051 ufshcd_prepare_lrbp_crypto(NULL, lrbp); 7052 hba->dev_cmd.type = cmd_type; 7053 7054 if (hba->ufs_version <= ufshci_version(1, 1)) 7055 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; 7056 else 7057 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 7058 7059 /* update the task tag in the request upiu */ 7060 req_upiu->header.dword_0 |= cpu_to_be32(tag); 7061 7062 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0); 7063 7064 /* just copy the upiu request as it is */ 7065 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); 7066 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) { 7067 /* The Data Segment Area is optional depending upon the query 7068 * function value. for WRITE DESCRIPTOR, the data segment 7069 * follows right after the tsf. 7070 */ 7071 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len); 7072 *buff_len = 0; 7073 } 7074 7075 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 7076 7077 hba->dev_cmd.complete = &wait; 7078 7079 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); 7080 7081 ufshcd_send_command(hba, tag, hba->dev_cmd_queue); 7082 /* 7083 * ignore the returning value here - ufshcd_check_query_response is 7084 * bound to fail since dev_cmd.query and dev_cmd.type were left empty. 7085 * read the response directly ignoring all errors. 7086 */ 7087 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT); 7088 7089 /* just copy the upiu response as it is */ 7090 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); 7091 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) { 7092 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu); 7093 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & 7094 MASK_QUERY_DATA_SEG_LEN; 7095 7096 if (*buff_len >= resp_len) { 7097 memcpy(desc_buff, descp, resp_len); 7098 *buff_len = resp_len; 7099 } else { 7100 dev_warn(hba->dev, 7101 "%s: rsp size %d is bigger than buffer size %d", 7102 __func__, resp_len, *buff_len); 7103 *buff_len = 0; 7104 err = -EINVAL; 7105 } 7106 } 7107 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, 7108 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); 7109 7110 up_read(&hba->clk_scaling_lock); 7111 return err; 7112 } 7113 7114 /** 7115 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands 7116 * @hba: per-adapter instance 7117 * @req_upiu: upiu request 7118 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands 7119 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target 7120 * @desc_buff: pointer to descriptor buffer, NULL if NA 7121 * @buff_len: descriptor size, 0 if NA 7122 * @desc_op: descriptor operation 7123 * 7124 * Supports UTP Transfer requests (nop and query), and UTP Task 7125 * Management requests. 7126 * It is up to the caller to fill the upiu conent properly, as it will 7127 * be copied without any further input validations. 7128 */ 7129 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, 7130 struct utp_upiu_req *req_upiu, 7131 struct utp_upiu_req *rsp_upiu, 7132 int msgcode, 7133 u8 *desc_buff, int *buff_len, 7134 enum query_opcode desc_op) 7135 { 7136 int err; 7137 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY; 7138 struct utp_task_req_desc treq = { { 0 }, }; 7139 enum utp_ocs ocs_value; 7140 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC; 7141 7142 switch (msgcode) { 7143 case UPIU_TRANSACTION_NOP_OUT: 7144 cmd_type = DEV_CMD_TYPE_NOP; 7145 fallthrough; 7146 case UPIU_TRANSACTION_QUERY_REQ: 7147 ufshcd_hold(hba, false); 7148 mutex_lock(&hba->dev_cmd.lock); 7149 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, 7150 desc_buff, buff_len, 7151 cmd_type, desc_op); 7152 mutex_unlock(&hba->dev_cmd.lock); 7153 ufshcd_release(hba); 7154 7155 break; 7156 case UPIU_TRANSACTION_TASK_REQ: 7157 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); 7158 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); 7159 7160 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu)); 7161 7162 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f); 7163 if (err == -ETIMEDOUT) 7164 break; 7165 7166 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS; 7167 if (ocs_value != OCS_SUCCESS) { 7168 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, 7169 ocs_value); 7170 break; 7171 } 7172 7173 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu)); 7174 7175 break; 7176 default: 7177 err = -EINVAL; 7178 7179 break; 7180 } 7181 7182 return err; 7183 } 7184 7185 /** 7186 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request 7187 * @hba: per adapter instance 7188 * @req_upiu: upiu request 7189 * @rsp_upiu: upiu reply 7190 * @req_ehs: EHS field which contains Advanced RPMB Request Message 7191 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message 7192 * @sg_cnt: The number of sg lists actually used 7193 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation 7194 * @dir: DMA direction 7195 * 7196 * Returns zero on success, non-zero on failure 7197 */ 7198 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, 7199 struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs, 7200 struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list, 7201 enum dma_data_direction dir) 7202 { 7203 DECLARE_COMPLETION_ONSTACK(wait); 7204 const u32 tag = hba->reserved_slot; 7205 struct ufshcd_lrb *lrbp; 7206 int err = 0; 7207 int result; 7208 u8 upiu_flags; 7209 u8 *ehs_data; 7210 u16 ehs_len; 7211 7212 /* Protects use of hba->reserved_slot. */ 7213 ufshcd_hold(hba, false); 7214 mutex_lock(&hba->dev_cmd.lock); 7215 down_read(&hba->clk_scaling_lock); 7216 7217 lrbp = &hba->lrb[tag]; 7218 WARN_ON(lrbp->cmd); 7219 lrbp->cmd = NULL; 7220 lrbp->task_tag = tag; 7221 lrbp->lun = UFS_UPIU_RPMB_WLUN; 7222 7223 lrbp->intr_cmd = true; 7224 ufshcd_prepare_lrbp_crypto(NULL, lrbp); 7225 hba->dev_cmd.type = DEV_CMD_TYPE_RPMB; 7226 7227 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */ 7228 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 7229 7230 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2); 7231 7232 /* update the task tag and LUN in the request upiu */ 7233 req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag); 7234 7235 /* copy the UPIU(contains CDB) request as it is */ 7236 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); 7237 /* Copy EHS, starting with byte32, immediately after the CDB package */ 7238 memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs)); 7239 7240 if (dir != DMA_NONE && sg_list) 7241 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list); 7242 7243 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 7244 7245 hba->dev_cmd.complete = &wait; 7246 7247 ufshcd_send_command(hba, tag, hba->dev_cmd_queue); 7248 7249 err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT); 7250 7251 if (!err) { 7252 /* Just copy the upiu response as it is */ 7253 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); 7254 /* Get the response UPIU result */ 7255 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); 7256 7257 ehs_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) >> 24; 7258 /* 7259 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data 7260 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB 7261 * Message is 02h 7262 */ 7263 if (ehs_len == 2 && rsp_ehs) { 7264 /* 7265 * ucd_rsp_ptr points to a buffer with a length of 512 bytes 7266 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32 7267 */ 7268 ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE; 7269 memcpy(rsp_ehs, ehs_data, ehs_len * 32); 7270 } 7271 } 7272 7273 up_read(&hba->clk_scaling_lock); 7274 mutex_unlock(&hba->dev_cmd.lock); 7275 ufshcd_release(hba); 7276 return err ? : result; 7277 } 7278 7279 /** 7280 * ufshcd_eh_device_reset_handler() - Reset a single logical unit. 7281 * @cmd: SCSI command pointer 7282 * 7283 * Returns SUCCESS/FAILED 7284 */ 7285 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) 7286 { 7287 unsigned long flags, pending_reqs = 0, not_cleared = 0; 7288 struct Scsi_Host *host; 7289 struct ufs_hba *hba; 7290 u32 pos; 7291 int err; 7292 u8 resp = 0xF, lun; 7293 7294 host = cmd->device->host; 7295 hba = shost_priv(host); 7296 7297 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); 7298 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); 7299 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { 7300 if (!err) 7301 err = resp; 7302 goto out; 7303 } 7304 7305 /* clear the commands that were pending for corresponding LUN */ 7306 spin_lock_irqsave(&hba->outstanding_lock, flags); 7307 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) 7308 if (hba->lrb[pos].lun == lun) 7309 __set_bit(pos, &pending_reqs); 7310 hba->outstanding_reqs &= ~pending_reqs; 7311 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 7312 7313 if (ufshcd_clear_cmds(hba, pending_reqs) < 0) { 7314 spin_lock_irqsave(&hba->outstanding_lock, flags); 7315 not_cleared = pending_reqs & 7316 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 7317 hba->outstanding_reqs |= not_cleared; 7318 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 7319 7320 dev_err(hba->dev, "%s: failed to clear requests %#lx\n", 7321 __func__, not_cleared); 7322 } 7323 __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared); 7324 7325 out: 7326 hba->req_abort_count = 0; 7327 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err); 7328 if (!err) { 7329 err = SUCCESS; 7330 } else { 7331 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); 7332 err = FAILED; 7333 } 7334 return err; 7335 } 7336 7337 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) 7338 { 7339 struct ufshcd_lrb *lrbp; 7340 int tag; 7341 7342 for_each_set_bit(tag, &bitmap, hba->nutrs) { 7343 lrbp = &hba->lrb[tag]; 7344 lrbp->req_abort_skip = true; 7345 } 7346 } 7347 7348 /** 7349 * ufshcd_try_to_abort_task - abort a specific task 7350 * @hba: Pointer to adapter instance 7351 * @tag: Task tag/index to be aborted 7352 * 7353 * Abort the pending command in device by sending UFS_ABORT_TASK task management 7354 * command, and in host controller by clearing the door-bell register. There can 7355 * be race between controller sending the command to the device while abort is 7356 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is 7357 * really issued and then try to abort it. 7358 * 7359 * Returns zero on success, non-zero on failure 7360 */ 7361 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) 7362 { 7363 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; 7364 int err = 0; 7365 int poll_cnt; 7366 u8 resp = 0xF; 7367 u32 reg; 7368 7369 for (poll_cnt = 100; poll_cnt; poll_cnt--) { 7370 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, 7371 UFS_QUERY_TASK, &resp); 7372 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { 7373 /* cmd pending in the device */ 7374 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", 7375 __func__, tag); 7376 break; 7377 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { 7378 /* 7379 * cmd not pending in the device, check if it is 7380 * in transition. 7381 */ 7382 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", 7383 __func__, tag); 7384 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 7385 if (reg & (1 << tag)) { 7386 /* sleep for max. 200us to stabilize */ 7387 usleep_range(100, 200); 7388 continue; 7389 } 7390 /* command completed already */ 7391 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", 7392 __func__, tag); 7393 goto out; 7394 } else { 7395 dev_err(hba->dev, 7396 "%s: no response from device. tag = %d, err %d\n", 7397 __func__, tag, err); 7398 if (!err) 7399 err = resp; /* service response error */ 7400 goto out; 7401 } 7402 } 7403 7404 if (!poll_cnt) { 7405 err = -EBUSY; 7406 goto out; 7407 } 7408 7409 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, 7410 UFS_ABORT_TASK, &resp); 7411 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { 7412 if (!err) { 7413 err = resp; /* service response error */ 7414 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", 7415 __func__, tag, err); 7416 } 7417 goto out; 7418 } 7419 7420 err = ufshcd_clear_cmds(hba, 1U << tag); 7421 if (err) 7422 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", 7423 __func__, tag, err); 7424 7425 out: 7426 return err; 7427 } 7428 7429 /** 7430 * ufshcd_abort - scsi host template eh_abort_handler callback 7431 * @cmd: SCSI command pointer 7432 * 7433 * Returns SUCCESS/FAILED 7434 */ 7435 static int ufshcd_abort(struct scsi_cmnd *cmd) 7436 { 7437 struct Scsi_Host *host = cmd->device->host; 7438 struct ufs_hba *hba = shost_priv(host); 7439 int tag = scsi_cmd_to_rq(cmd)->tag; 7440 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; 7441 unsigned long flags; 7442 int err = FAILED; 7443 bool outstanding; 7444 u32 reg; 7445 7446 WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); 7447 7448 ufshcd_hold(hba, false); 7449 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 7450 /* If command is already aborted/completed, return FAILED. */ 7451 if (!(test_bit(tag, &hba->outstanding_reqs))) { 7452 dev_err(hba->dev, 7453 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", 7454 __func__, tag, hba->outstanding_reqs, reg); 7455 goto release; 7456 } 7457 7458 /* Print Transfer Request of aborted task */ 7459 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); 7460 7461 /* 7462 * Print detailed info about aborted request. 7463 * As more than one request might get aborted at the same time, 7464 * print full information only for the first aborted request in order 7465 * to reduce repeated printouts. For other aborted requests only print 7466 * basic details. 7467 */ 7468 scsi_print_command(cmd); 7469 if (!hba->req_abort_count) { 7470 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag); 7471 ufshcd_print_evt_hist(hba); 7472 ufshcd_print_host_state(hba); 7473 ufshcd_print_pwr_info(hba); 7474 ufshcd_print_tr(hba, tag, true); 7475 } else { 7476 ufshcd_print_tr(hba, tag, false); 7477 } 7478 hba->req_abort_count++; 7479 7480 if (!(reg & (1 << tag))) { 7481 dev_err(hba->dev, 7482 "%s: cmd was completed, but without a notifying intr, tag = %d", 7483 __func__, tag); 7484 __ufshcd_transfer_req_compl(hba, 1UL << tag); 7485 goto release; 7486 } 7487 7488 /* 7489 * Task abort to the device W-LUN is illegal. When this command 7490 * will fail, due to spec violation, scsi err handling next step 7491 * will be to send LU reset which, again, is a spec violation. 7492 * To avoid these unnecessary/illegal steps, first we clean up 7493 * the lrb taken by this cmd and re-set it in outstanding_reqs, 7494 * then queue the eh_work and bail. 7495 */ 7496 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { 7497 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); 7498 7499 spin_lock_irqsave(host->host_lock, flags); 7500 hba->force_reset = true; 7501 ufshcd_schedule_eh_work(hba); 7502 spin_unlock_irqrestore(host->host_lock, flags); 7503 goto release; 7504 } 7505 7506 /* Skip task abort in case previous aborts failed and report failure */ 7507 if (lrbp->req_abort_skip) { 7508 dev_err(hba->dev, "%s: skipping abort\n", __func__); 7509 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); 7510 goto release; 7511 } 7512 7513 err = ufshcd_try_to_abort_task(hba, tag); 7514 if (err) { 7515 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); 7516 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); 7517 err = FAILED; 7518 goto release; 7519 } 7520 7521 /* 7522 * Clear the corresponding bit from outstanding_reqs since the command 7523 * has been aborted successfully. 7524 */ 7525 spin_lock_irqsave(&hba->outstanding_lock, flags); 7526 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs); 7527 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 7528 7529 if (outstanding) 7530 ufshcd_release_scsi_cmd(hba, lrbp); 7531 7532 err = SUCCESS; 7533 7534 release: 7535 /* Matches the ufshcd_hold() call at the start of this function. */ 7536 ufshcd_release(hba); 7537 return err; 7538 } 7539 7540 /** 7541 * ufshcd_host_reset_and_restore - reset and restore host controller 7542 * @hba: per-adapter instance 7543 * 7544 * Note that host controller reset may issue DME_RESET to 7545 * local and remote (device) Uni-Pro stack and the attributes 7546 * are reset to default state. 7547 * 7548 * Returns zero on success, non-zero on failure 7549 */ 7550 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) 7551 { 7552 int err; 7553 7554 /* 7555 * Stop the host controller and complete the requests 7556 * cleared by h/w 7557 */ 7558 ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET); 7559 ufshcd_hba_stop(hba); 7560 hba->silence_err_logs = true; 7561 ufshcd_complete_requests(hba); 7562 hba->silence_err_logs = false; 7563 7564 /* scale up clocks to max frequency before full reinitialization */ 7565 ufshcd_scale_clks(hba, true); 7566 7567 err = ufshcd_hba_enable(hba); 7568 7569 /* Establish the link again and restore the device */ 7570 if (!err) 7571 err = ufshcd_probe_hba(hba, false); 7572 7573 if (err) 7574 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); 7575 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err); 7576 return err; 7577 } 7578 7579 /** 7580 * ufshcd_reset_and_restore - reset and re-initialize host/device 7581 * @hba: per-adapter instance 7582 * 7583 * Reset and recover device, host and re-establish link. This 7584 * is helpful to recover the communication in fatal error conditions. 7585 * 7586 * Returns zero on success, non-zero on failure 7587 */ 7588 static int ufshcd_reset_and_restore(struct ufs_hba *hba) 7589 { 7590 u32 saved_err = 0; 7591 u32 saved_uic_err = 0; 7592 int err = 0; 7593 unsigned long flags; 7594 int retries = MAX_HOST_RESET_RETRIES; 7595 7596 spin_lock_irqsave(hba->host->host_lock, flags); 7597 do { 7598 /* 7599 * This is a fresh start, cache and clear saved error first, 7600 * in case new error generated during reset and restore. 7601 */ 7602 saved_err |= hba->saved_err; 7603 saved_uic_err |= hba->saved_uic_err; 7604 hba->saved_err = 0; 7605 hba->saved_uic_err = 0; 7606 hba->force_reset = false; 7607 hba->ufshcd_state = UFSHCD_STATE_RESET; 7608 spin_unlock_irqrestore(hba->host->host_lock, flags); 7609 7610 /* Reset the attached device */ 7611 ufshcd_device_reset(hba); 7612 7613 err = ufshcd_host_reset_and_restore(hba); 7614 7615 spin_lock_irqsave(hba->host->host_lock, flags); 7616 if (err) 7617 continue; 7618 /* Do not exit unless operational or dead */ 7619 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && 7620 hba->ufshcd_state != UFSHCD_STATE_ERROR && 7621 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL) 7622 err = -EAGAIN; 7623 } while (err && --retries); 7624 7625 /* 7626 * Inform scsi mid-layer that we did reset and allow to handle 7627 * Unit Attention properly. 7628 */ 7629 scsi_report_bus_reset(hba->host, 0); 7630 if (err) { 7631 hba->ufshcd_state = UFSHCD_STATE_ERROR; 7632 hba->saved_err |= saved_err; 7633 hba->saved_uic_err |= saved_uic_err; 7634 } 7635 spin_unlock_irqrestore(hba->host->host_lock, flags); 7636 7637 return err; 7638 } 7639 7640 /** 7641 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer 7642 * @cmd: SCSI command pointer 7643 * 7644 * Returns SUCCESS/FAILED 7645 */ 7646 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) 7647 { 7648 int err = SUCCESS; 7649 unsigned long flags; 7650 struct ufs_hba *hba; 7651 7652 hba = shost_priv(cmd->device->host); 7653 7654 spin_lock_irqsave(hba->host->host_lock, flags); 7655 hba->force_reset = true; 7656 ufshcd_schedule_eh_work(hba); 7657 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); 7658 spin_unlock_irqrestore(hba->host->host_lock, flags); 7659 7660 flush_work(&hba->eh_work); 7661 7662 spin_lock_irqsave(hba->host->host_lock, flags); 7663 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) 7664 err = FAILED; 7665 spin_unlock_irqrestore(hba->host->host_lock, flags); 7666 7667 return err; 7668 } 7669 7670 /** 7671 * ufshcd_get_max_icc_level - calculate the ICC level 7672 * @sup_curr_uA: max. current supported by the regulator 7673 * @start_scan: row at the desc table to start scan from 7674 * @buff: power descriptor buffer 7675 * 7676 * Returns calculated max ICC level for specific regulator 7677 */ 7678 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, 7679 const char *buff) 7680 { 7681 int i; 7682 int curr_uA; 7683 u16 data; 7684 u16 unit; 7685 7686 for (i = start_scan; i >= 0; i--) { 7687 data = get_unaligned_be16(&buff[2 * i]); 7688 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> 7689 ATTR_ICC_LVL_UNIT_OFFSET; 7690 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; 7691 switch (unit) { 7692 case UFSHCD_NANO_AMP: 7693 curr_uA = curr_uA / 1000; 7694 break; 7695 case UFSHCD_MILI_AMP: 7696 curr_uA = curr_uA * 1000; 7697 break; 7698 case UFSHCD_AMP: 7699 curr_uA = curr_uA * 1000 * 1000; 7700 break; 7701 case UFSHCD_MICRO_AMP: 7702 default: 7703 break; 7704 } 7705 if (sup_curr_uA >= curr_uA) 7706 break; 7707 } 7708 if (i < 0) { 7709 i = 0; 7710 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i); 7711 } 7712 7713 return (u32)i; 7714 } 7715 7716 /** 7717 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level 7718 * In case regulators are not initialized we'll return 0 7719 * @hba: per-adapter instance 7720 * @desc_buf: power descriptor buffer to extract ICC levels from. 7721 * 7722 * Returns calculated ICC level 7723 */ 7724 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, 7725 const u8 *desc_buf) 7726 { 7727 u32 icc_level = 0; 7728 7729 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || 7730 !hba->vreg_info.vccq2) { 7731 /* 7732 * Using dev_dbg to avoid messages during runtime PM to avoid 7733 * never-ending cycles of messages written back to storage by 7734 * user space causing runtime resume, causing more messages and 7735 * so on. 7736 */ 7737 dev_dbg(hba->dev, 7738 "%s: Regulator capability was not set, actvIccLevel=%d", 7739 __func__, icc_level); 7740 goto out; 7741 } 7742 7743 if (hba->vreg_info.vcc->max_uA) 7744 icc_level = ufshcd_get_max_icc_level( 7745 hba->vreg_info.vcc->max_uA, 7746 POWER_DESC_MAX_ACTV_ICC_LVLS - 1, 7747 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); 7748 7749 if (hba->vreg_info.vccq->max_uA) 7750 icc_level = ufshcd_get_max_icc_level( 7751 hba->vreg_info.vccq->max_uA, 7752 icc_level, 7753 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); 7754 7755 if (hba->vreg_info.vccq2->max_uA) 7756 icc_level = ufshcd_get_max_icc_level( 7757 hba->vreg_info.vccq2->max_uA, 7758 icc_level, 7759 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]); 7760 out: 7761 return icc_level; 7762 } 7763 7764 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) 7765 { 7766 int ret; 7767 u8 *desc_buf; 7768 u32 icc_level; 7769 7770 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 7771 if (!desc_buf) 7772 return; 7773 7774 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0, 7775 desc_buf, QUERY_DESC_MAX_SIZE); 7776 if (ret) { 7777 dev_err(hba->dev, 7778 "%s: Failed reading power descriptor ret = %d", 7779 __func__, ret); 7780 goto out; 7781 } 7782 7783 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf); 7784 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level); 7785 7786 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 7787 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level); 7788 7789 if (ret) 7790 dev_err(hba->dev, 7791 "%s: Failed configuring bActiveICCLevel = %d ret = %d", 7792 __func__, icc_level, ret); 7793 7794 out: 7795 kfree(desc_buf); 7796 } 7797 7798 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev) 7799 { 7800 scsi_autopm_get_device(sdev); 7801 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev); 7802 if (sdev->rpm_autosuspend) 7803 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev, 7804 RPM_AUTOSUSPEND_DELAY_MS); 7805 scsi_autopm_put_device(sdev); 7806 } 7807 7808 /** 7809 * ufshcd_scsi_add_wlus - Adds required W-LUs 7810 * @hba: per-adapter instance 7811 * 7812 * UFS device specification requires the UFS devices to support 4 well known 7813 * logical units: 7814 * "REPORT_LUNS" (address: 01h) 7815 * "UFS Device" (address: 50h) 7816 * "RPMB" (address: 44h) 7817 * "BOOT" (address: 30h) 7818 * UFS device's power management needs to be controlled by "POWER CONDITION" 7819 * field of SSU (START STOP UNIT) command. But this "power condition" field 7820 * will take effect only when its sent to "UFS device" well known logical unit 7821 * hence we require the scsi_device instance to represent this logical unit in 7822 * order for the UFS host driver to send the SSU command for power management. 7823 * 7824 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory 7825 * Block) LU so user space process can control this LU. User space may also 7826 * want to have access to BOOT LU. 7827 * 7828 * This function adds scsi device instances for each of all well known LUs 7829 * (except "REPORT LUNS" LU). 7830 * 7831 * Returns zero on success (all required W-LUs are added successfully), 7832 * non-zero error value on failure (if failed to add any of the required W-LU). 7833 */ 7834 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) 7835 { 7836 int ret = 0; 7837 struct scsi_device *sdev_boot, *sdev_rpmb; 7838 7839 hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0, 7840 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); 7841 if (IS_ERR(hba->ufs_device_wlun)) { 7842 ret = PTR_ERR(hba->ufs_device_wlun); 7843 hba->ufs_device_wlun = NULL; 7844 goto out; 7845 } 7846 scsi_device_put(hba->ufs_device_wlun); 7847 7848 sdev_rpmb = __scsi_add_device(hba->host, 0, 0, 7849 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); 7850 if (IS_ERR(sdev_rpmb)) { 7851 ret = PTR_ERR(sdev_rpmb); 7852 goto remove_ufs_device_wlun; 7853 } 7854 ufshcd_blk_pm_runtime_init(sdev_rpmb); 7855 scsi_device_put(sdev_rpmb); 7856 7857 sdev_boot = __scsi_add_device(hba->host, 0, 0, 7858 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); 7859 if (IS_ERR(sdev_boot)) { 7860 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); 7861 } else { 7862 ufshcd_blk_pm_runtime_init(sdev_boot); 7863 scsi_device_put(sdev_boot); 7864 } 7865 goto out; 7866 7867 remove_ufs_device_wlun: 7868 scsi_remove_device(hba->ufs_device_wlun); 7869 out: 7870 return ret; 7871 } 7872 7873 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf) 7874 { 7875 struct ufs_dev_info *dev_info = &hba->dev_info; 7876 u8 lun; 7877 u32 d_lu_wb_buf_alloc; 7878 u32 ext_ufs_feature; 7879 7880 if (!ufshcd_is_wb_allowed(hba)) 7881 return; 7882 7883 /* 7884 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or 7885 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES 7886 * enabled 7887 */ 7888 if (!(dev_info->wspecversion >= 0x310 || 7889 dev_info->wspecversion == 0x220 || 7890 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) 7891 goto wb_disabled; 7892 7893 ext_ufs_feature = get_unaligned_be32(desc_buf + 7894 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); 7895 7896 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP)) 7897 goto wb_disabled; 7898 7899 /* 7900 * WB may be supported but not configured while provisioning. The spec 7901 * says, in dedicated wb buffer mode, a max of 1 lun would have wb 7902 * buffer configured. 7903 */ 7904 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; 7905 7906 dev_info->b_presrv_uspc_en = 7907 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; 7908 7909 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) { 7910 if (!get_unaligned_be32(desc_buf + 7911 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS)) 7912 goto wb_disabled; 7913 } else { 7914 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) { 7915 d_lu_wb_buf_alloc = 0; 7916 ufshcd_read_unit_desc_param(hba, 7917 lun, 7918 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS, 7919 (u8 *)&d_lu_wb_buf_alloc, 7920 sizeof(d_lu_wb_buf_alloc)); 7921 if (d_lu_wb_buf_alloc) { 7922 dev_info->wb_dedicated_lu = lun; 7923 break; 7924 } 7925 } 7926 7927 if (!d_lu_wb_buf_alloc) 7928 goto wb_disabled; 7929 } 7930 7931 if (!ufshcd_is_wb_buf_lifetime_available(hba)) 7932 goto wb_disabled; 7933 7934 return; 7935 7936 wb_disabled: 7937 hba->caps &= ~UFSHCD_CAP_WB_EN; 7938 } 7939 7940 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) 7941 { 7942 struct ufs_dev_info *dev_info = &hba->dev_info; 7943 u32 ext_ufs_feature; 7944 u8 mask = 0; 7945 7946 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300) 7947 return; 7948 7949 ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); 7950 7951 if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF) 7952 mask |= MASK_EE_TOO_LOW_TEMP; 7953 7954 if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF) 7955 mask |= MASK_EE_TOO_HIGH_TEMP; 7956 7957 if (mask) { 7958 ufshcd_enable_ee(hba, mask); 7959 ufs_hwmon_probe(hba, mask); 7960 } 7961 } 7962 7963 static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf) 7964 { 7965 struct ufs_dev_info *dev_info = &hba->dev_info; 7966 u32 ext_ufs_feature; 7967 u32 ext_iid_en = 0; 7968 int err; 7969 7970 /* Only UFS-4.0 and above may support EXT_IID */ 7971 if (dev_info->wspecversion < 0x400) 7972 goto out; 7973 7974 ext_ufs_feature = get_unaligned_be32(desc_buf + 7975 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); 7976 if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP)) 7977 goto out; 7978 7979 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 7980 QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en); 7981 if (err) 7982 dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err); 7983 7984 out: 7985 dev_info->b_ext_iid_en = ext_iid_en; 7986 } 7987 7988 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, 7989 const struct ufs_dev_quirk *fixups) 7990 { 7991 const struct ufs_dev_quirk *f; 7992 struct ufs_dev_info *dev_info = &hba->dev_info; 7993 7994 if (!fixups) 7995 return; 7996 7997 for (f = fixups; f->quirk; f++) { 7998 if ((f->wmanufacturerid == dev_info->wmanufacturerid || 7999 f->wmanufacturerid == UFS_ANY_VENDOR) && 8000 ((dev_info->model && 8001 STR_PRFX_EQUAL(f->model, dev_info->model)) || 8002 !strcmp(f->model, UFS_ANY_MODEL))) 8003 hba->dev_quirks |= f->quirk; 8004 } 8005 } 8006 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks); 8007 8008 static void ufs_fixup_device_setup(struct ufs_hba *hba) 8009 { 8010 /* fix by general quirk table */ 8011 ufshcd_fixup_dev_quirks(hba, ufs_fixups); 8012 8013 /* allow vendors to fix quirks */ 8014 ufshcd_vops_fixup_dev_quirks(hba); 8015 } 8016 8017 static int ufs_get_device_desc(struct ufs_hba *hba) 8018 { 8019 int err; 8020 u8 model_index; 8021 u8 b_ufs_feature_sup; 8022 u8 *desc_buf; 8023 struct ufs_dev_info *dev_info = &hba->dev_info; 8024 8025 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 8026 if (!desc_buf) { 8027 err = -ENOMEM; 8028 goto out; 8029 } 8030 8031 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf, 8032 QUERY_DESC_MAX_SIZE); 8033 if (err) { 8034 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", 8035 __func__, err); 8036 goto out; 8037 } 8038 8039 /* 8040 * getting vendor (manufacturerID) and Bank Index in big endian 8041 * format 8042 */ 8043 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | 8044 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; 8045 8046 /* getting Specification Version in big endian format */ 8047 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | 8048 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; 8049 dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH]; 8050 b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT]; 8051 8052 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; 8053 8054 if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION && 8055 (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) { 8056 bool hpb_en = false; 8057 8058 ufshpb_get_dev_info(hba, desc_buf); 8059 8060 if (!ufshpb_is_legacy(hba)) 8061 err = ufshcd_query_flag_retry(hba, 8062 UPIU_QUERY_OPCODE_READ_FLAG, 8063 QUERY_FLAG_IDN_HPB_EN, 0, 8064 &hpb_en); 8065 8066 if (ufshpb_is_legacy(hba) || (!err && hpb_en)) 8067 dev_info->hpb_enabled = true; 8068 } 8069 8070 err = ufshcd_read_string_desc(hba, model_index, 8071 &dev_info->model, SD_ASCII_STD); 8072 if (err < 0) { 8073 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", 8074 __func__, err); 8075 goto out; 8076 } 8077 8078 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] + 8079 desc_buf[DEVICE_DESC_PARAM_NUM_WLU]; 8080 8081 ufs_fixup_device_setup(hba); 8082 8083 ufshcd_wb_probe(hba, desc_buf); 8084 8085 ufshcd_temp_notif_probe(hba, desc_buf); 8086 8087 if (hba->ext_iid_sup) 8088 ufshcd_ext_iid_probe(hba, desc_buf); 8089 8090 /* 8091 * ufshcd_read_string_desc returns size of the string 8092 * reset the error value 8093 */ 8094 err = 0; 8095 8096 out: 8097 kfree(desc_buf); 8098 return err; 8099 } 8100 8101 static void ufs_put_device_desc(struct ufs_hba *hba) 8102 { 8103 struct ufs_dev_info *dev_info = &hba->dev_info; 8104 8105 kfree(dev_info->model); 8106 dev_info->model = NULL; 8107 } 8108 8109 /** 8110 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro 8111 * @hba: per-adapter instance 8112 * 8113 * PA_TActivate parameter can be tuned manually if UniPro version is less than 8114 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's 8115 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce 8116 * the hibern8 exit latency. 8117 * 8118 * Returns zero on success, non-zero error value on failure. 8119 */ 8120 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) 8121 { 8122 int ret = 0; 8123 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate; 8124 8125 ret = ufshcd_dme_peer_get(hba, 8126 UIC_ARG_MIB_SEL( 8127 RX_MIN_ACTIVATETIME_CAPABILITY, 8128 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 8129 &peer_rx_min_activatetime); 8130 if (ret) 8131 goto out; 8132 8133 /* make sure proper unit conversion is applied */ 8134 tuned_pa_tactivate = 8135 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US) 8136 / PA_TACTIVATE_TIME_UNIT_US); 8137 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8138 tuned_pa_tactivate); 8139 8140 out: 8141 return ret; 8142 } 8143 8144 /** 8145 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro 8146 * @hba: per-adapter instance 8147 * 8148 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than 8149 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's 8150 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY. 8151 * This optimal value can help reduce the hibern8 exit latency. 8152 * 8153 * Returns zero on success, non-zero error value on failure. 8154 */ 8155 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) 8156 { 8157 int ret = 0; 8158 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0; 8159 u32 max_hibern8_time, tuned_pa_hibern8time; 8160 8161 ret = ufshcd_dme_get(hba, 8162 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY, 8163 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 8164 &local_tx_hibern8_time_cap); 8165 if (ret) 8166 goto out; 8167 8168 ret = ufshcd_dme_peer_get(hba, 8169 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY, 8170 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 8171 &peer_rx_hibern8_time_cap); 8172 if (ret) 8173 goto out; 8174 8175 max_hibern8_time = max(local_tx_hibern8_time_cap, 8176 peer_rx_hibern8_time_cap); 8177 /* make sure proper unit conversion is applied */ 8178 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US) 8179 / PA_HIBERN8_TIME_UNIT_US); 8180 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 8181 tuned_pa_hibern8time); 8182 out: 8183 return ret; 8184 } 8185 8186 /** 8187 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is 8188 * less than device PA_TACTIVATE time. 8189 * @hba: per-adapter instance 8190 * 8191 * Some UFS devices require host PA_TACTIVATE to be lower than device 8192 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk 8193 * for such devices. 8194 * 8195 * Returns zero on success, non-zero error value on failure. 8196 */ 8197 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) 8198 { 8199 int ret = 0; 8200 u32 granularity, peer_granularity; 8201 u32 pa_tactivate, peer_pa_tactivate; 8202 u32 pa_tactivate_us, peer_pa_tactivate_us; 8203 static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100}; 8204 8205 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), 8206 &granularity); 8207 if (ret) 8208 goto out; 8209 8210 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), 8211 &peer_granularity); 8212 if (ret) 8213 goto out; 8214 8215 if ((granularity < PA_GRANULARITY_MIN_VAL) || 8216 (granularity > PA_GRANULARITY_MAX_VAL)) { 8217 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", 8218 __func__, granularity); 8219 return -EINVAL; 8220 } 8221 8222 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) || 8223 (peer_granularity > PA_GRANULARITY_MAX_VAL)) { 8224 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", 8225 __func__, peer_granularity); 8226 return -EINVAL; 8227 } 8228 8229 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); 8230 if (ret) 8231 goto out; 8232 8233 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), 8234 &peer_pa_tactivate); 8235 if (ret) 8236 goto out; 8237 8238 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1]; 8239 peer_pa_tactivate_us = peer_pa_tactivate * 8240 gran_to_us_table[peer_granularity - 1]; 8241 8242 if (pa_tactivate_us >= peer_pa_tactivate_us) { 8243 u32 new_peer_pa_tactivate; 8244 8245 new_peer_pa_tactivate = pa_tactivate_us / 8246 gran_to_us_table[peer_granularity - 1]; 8247 new_peer_pa_tactivate++; 8248 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8249 new_peer_pa_tactivate); 8250 } 8251 8252 out: 8253 return ret; 8254 } 8255 8256 static void ufshcd_tune_unipro_params(struct ufs_hba *hba) 8257 { 8258 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { 8259 ufshcd_tune_pa_tactivate(hba); 8260 ufshcd_tune_pa_hibern8time(hba); 8261 } 8262 8263 ufshcd_vops_apply_dev_quirks(hba); 8264 8265 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) 8266 /* set 1ms timeout for PA_TACTIVATE */ 8267 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); 8268 8269 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) 8270 ufshcd_quirk_tune_host_pa_tactivate(hba); 8271 } 8272 8273 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) 8274 { 8275 hba->ufs_stats.hibern8_exit_cnt = 0; 8276 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); 8277 hba->req_abort_count = 0; 8278 } 8279 8280 static int ufshcd_device_geo_params_init(struct ufs_hba *hba) 8281 { 8282 int err; 8283 u8 *desc_buf; 8284 8285 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 8286 if (!desc_buf) { 8287 err = -ENOMEM; 8288 goto out; 8289 } 8290 8291 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0, 8292 desc_buf, QUERY_DESC_MAX_SIZE); 8293 if (err) { 8294 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", 8295 __func__, err); 8296 goto out; 8297 } 8298 8299 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1) 8300 hba->dev_info.max_lu_supported = 32; 8301 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) 8302 hba->dev_info.max_lu_supported = 8; 8303 8304 if (desc_buf[QUERY_DESC_LENGTH_OFFSET] >= 8305 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS) 8306 ufshpb_get_geo_info(hba, desc_buf); 8307 8308 out: 8309 kfree(desc_buf); 8310 return err; 8311 } 8312 8313 struct ufs_ref_clk { 8314 unsigned long freq_hz; 8315 enum ufs_ref_clk_freq val; 8316 }; 8317 8318 static const struct ufs_ref_clk ufs_ref_clk_freqs[] = { 8319 {19200000, REF_CLK_FREQ_19_2_MHZ}, 8320 {26000000, REF_CLK_FREQ_26_MHZ}, 8321 {38400000, REF_CLK_FREQ_38_4_MHZ}, 8322 {52000000, REF_CLK_FREQ_52_MHZ}, 8323 {0, REF_CLK_FREQ_INVAL}, 8324 }; 8325 8326 static enum ufs_ref_clk_freq 8327 ufs_get_bref_clk_from_hz(unsigned long freq) 8328 { 8329 int i; 8330 8331 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++) 8332 if (ufs_ref_clk_freqs[i].freq_hz == freq) 8333 return ufs_ref_clk_freqs[i].val; 8334 8335 return REF_CLK_FREQ_INVAL; 8336 } 8337 8338 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) 8339 { 8340 unsigned long freq; 8341 8342 freq = clk_get_rate(refclk); 8343 8344 hba->dev_ref_clk_freq = 8345 ufs_get_bref_clk_from_hz(freq); 8346 8347 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) 8348 dev_err(hba->dev, 8349 "invalid ref_clk setting = %ld\n", freq); 8350 } 8351 8352 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) 8353 { 8354 int err; 8355 u32 ref_clk; 8356 u32 freq = hba->dev_ref_clk_freq; 8357 8358 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 8359 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk); 8360 8361 if (err) { 8362 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", 8363 err); 8364 goto out; 8365 } 8366 8367 if (ref_clk == freq) 8368 goto out; /* nothing to update */ 8369 8370 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 8371 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq); 8372 8373 if (err) { 8374 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", 8375 ufs_ref_clk_freqs[freq].freq_hz); 8376 goto out; 8377 } 8378 8379 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", 8380 ufs_ref_clk_freqs[freq].freq_hz); 8381 8382 out: 8383 return err; 8384 } 8385 8386 static int ufshcd_device_params_init(struct ufs_hba *hba) 8387 { 8388 bool flag; 8389 int ret; 8390 8391 /* Init UFS geometry descriptor related parameters */ 8392 ret = ufshcd_device_geo_params_init(hba); 8393 if (ret) 8394 goto out; 8395 8396 /* Check and apply UFS device quirks */ 8397 ret = ufs_get_device_desc(hba); 8398 if (ret) { 8399 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", 8400 __func__, ret); 8401 goto out; 8402 } 8403 8404 ufshcd_get_ref_clk_gating_wait(hba); 8405 8406 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, 8407 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag)) 8408 hba->dev_info.f_power_on_wp_en = flag; 8409 8410 /* Probe maximum power mode co-supported by both UFS host and device */ 8411 if (ufshcd_get_max_pwr_mode(hba)) 8412 dev_err(hba->dev, 8413 "%s: Failed getting max supported power mode\n", 8414 __func__); 8415 out: 8416 return ret; 8417 } 8418 8419 /** 8420 * ufshcd_add_lus - probe and add UFS logical units 8421 * @hba: per-adapter instance 8422 */ 8423 static int ufshcd_add_lus(struct ufs_hba *hba) 8424 { 8425 int ret; 8426 8427 /* Add required well known logical units to scsi mid layer */ 8428 ret = ufshcd_scsi_add_wlus(hba); 8429 if (ret) 8430 goto out; 8431 8432 ufs_bsg_probe(hba); 8433 ufshpb_init(hba); 8434 scsi_scan_host(hba->host); 8435 pm_runtime_put_sync(hba->dev); 8436 8437 out: 8438 return ret; 8439 } 8440 8441 /* SDB - Single Doorbell */ 8442 static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs) 8443 { 8444 size_t ucdl_size, utrdl_size; 8445 8446 ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs; 8447 dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr, 8448 hba->ucdl_dma_addr); 8449 8450 utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs; 8451 dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr, 8452 hba->utrdl_dma_addr); 8453 8454 devm_kfree(hba->dev, hba->lrb); 8455 } 8456 8457 static int ufshcd_alloc_mcq(struct ufs_hba *hba) 8458 { 8459 int ret; 8460 int old_nutrs = hba->nutrs; 8461 8462 ret = ufshcd_mcq_decide_queue_depth(hba); 8463 if (ret < 0) 8464 return ret; 8465 8466 hba->nutrs = ret; 8467 ret = ufshcd_mcq_init(hba); 8468 if (ret) 8469 goto err; 8470 8471 /* 8472 * Previously allocated memory for nutrs may not be enough in MCQ mode. 8473 * Number of supported tags in MCQ mode may be larger than SDB mode. 8474 */ 8475 if (hba->nutrs != old_nutrs) { 8476 ufshcd_release_sdb_queue(hba, old_nutrs); 8477 ret = ufshcd_memory_alloc(hba); 8478 if (ret) 8479 goto err; 8480 ufshcd_host_memory_configure(hba); 8481 } 8482 8483 ret = ufshcd_mcq_memory_alloc(hba); 8484 if (ret) 8485 goto err; 8486 8487 return 0; 8488 err: 8489 hba->nutrs = old_nutrs; 8490 return ret; 8491 } 8492 8493 static void ufshcd_config_mcq(struct ufs_hba *hba) 8494 { 8495 int ret; 8496 8497 ret = ufshcd_mcq_vops_config_esi(hba); 8498 dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : ""); 8499 8500 ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS); 8501 ufshcd_mcq_make_queues_operational(hba); 8502 ufshcd_mcq_config_mac(hba, hba->nutrs); 8503 8504 hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; 8505 hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED; 8506 8507 /* Select MCQ mode */ 8508 ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1, 8509 REG_UFS_MEM_CFG); 8510 hba->mcq_enabled = true; 8511 8512 dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n", 8513 hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT], 8514 hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL], 8515 hba->nutrs); 8516 } 8517 8518 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) 8519 { 8520 int ret; 8521 struct Scsi_Host *host = hba->host; 8522 8523 hba->ufshcd_state = UFSHCD_STATE_RESET; 8524 8525 ret = ufshcd_link_startup(hba); 8526 if (ret) 8527 return ret; 8528 8529 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) 8530 return ret; 8531 8532 /* Debug counters initialization */ 8533 ufshcd_clear_dbg_ufs_stats(hba); 8534 8535 /* UniPro link is active now */ 8536 ufshcd_set_link_active(hba); 8537 8538 /* Reconfigure MCQ upon reset */ 8539 if (is_mcq_enabled(hba) && !init_dev_params) 8540 ufshcd_config_mcq(hba); 8541 8542 /* Verify device initialization by sending NOP OUT UPIU */ 8543 ret = ufshcd_verify_dev_init(hba); 8544 if (ret) 8545 return ret; 8546 8547 /* Initiate UFS initialization, and waiting until completion */ 8548 ret = ufshcd_complete_dev_init(hba); 8549 if (ret) 8550 return ret; 8551 8552 /* 8553 * Initialize UFS device parameters used by driver, these 8554 * parameters are associated with UFS descriptors. 8555 */ 8556 if (init_dev_params) { 8557 ret = ufshcd_device_params_init(hba); 8558 if (ret) 8559 return ret; 8560 if (is_mcq_supported(hba) && !hba->scsi_host_added) { 8561 ret = ufshcd_alloc_mcq(hba); 8562 if (!ret) { 8563 ufshcd_config_mcq(hba); 8564 } else { 8565 /* Continue with SDB mode */ 8566 use_mcq_mode = false; 8567 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", 8568 ret); 8569 } 8570 ret = scsi_add_host(host, hba->dev); 8571 if (ret) { 8572 dev_err(hba->dev, "scsi_add_host failed\n"); 8573 return ret; 8574 } 8575 hba->scsi_host_added = true; 8576 } else if (is_mcq_supported(hba)) { 8577 /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */ 8578 ufshcd_config_mcq(hba); 8579 } 8580 } 8581 8582 ufshcd_tune_unipro_params(hba); 8583 8584 /* UFS device is also active now */ 8585 ufshcd_set_ufs_dev_active(hba); 8586 ufshcd_force_reset_auto_bkops(hba); 8587 8588 /* Gear up to HS gear if supported */ 8589 if (hba->max_pwr_info.is_valid) { 8590 /* 8591 * Set the right value to bRefClkFreq before attempting to 8592 * switch to HS gears. 8593 */ 8594 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) 8595 ufshcd_set_dev_ref_clk(hba); 8596 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); 8597 if (ret) { 8598 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", 8599 __func__, ret); 8600 return ret; 8601 } 8602 } 8603 8604 return 0; 8605 } 8606 8607 /** 8608 * ufshcd_probe_hba - probe hba to detect device and initialize it 8609 * @hba: per-adapter instance 8610 * @init_dev_params: whether or not to call ufshcd_device_params_init(). 8611 * 8612 * Execute link-startup and verify device initialization 8613 */ 8614 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) 8615 { 8616 ktime_t start = ktime_get(); 8617 unsigned long flags; 8618 int ret; 8619 8620 ret = ufshcd_device_init(hba, init_dev_params); 8621 if (ret) 8622 goto out; 8623 8624 if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) { 8625 /* Reset the device and controller before doing reinit */ 8626 ufshcd_device_reset(hba); 8627 ufshcd_hba_stop(hba); 8628 ufshcd_vops_reinit_notify(hba); 8629 ret = ufshcd_hba_enable(hba); 8630 if (ret) { 8631 dev_err(hba->dev, "Host controller enable failed\n"); 8632 ufshcd_print_evt_hist(hba); 8633 ufshcd_print_host_state(hba); 8634 goto out; 8635 } 8636 8637 /* Reinit the device */ 8638 ret = ufshcd_device_init(hba, init_dev_params); 8639 if (ret) 8640 goto out; 8641 } 8642 8643 ufshcd_print_pwr_info(hba); 8644 8645 /* 8646 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec) 8647 * and for removable UFS card as well, hence always set the parameter. 8648 * Note: Error handler may issue the device reset hence resetting 8649 * bActiveICCLevel as well so it is always safe to set this here. 8650 */ 8651 ufshcd_set_active_icc_lvl(hba); 8652 8653 /* Enable UFS Write Booster if supported */ 8654 ufshcd_configure_wb(hba); 8655 8656 if (hba->ee_usr_mask) 8657 ufshcd_write_ee_control(hba); 8658 /* Enable Auto-Hibernate if configured */ 8659 ufshcd_auto_hibern8_enable(hba); 8660 8661 ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT); 8662 out: 8663 spin_lock_irqsave(hba->host->host_lock, flags); 8664 if (ret) 8665 hba->ufshcd_state = UFSHCD_STATE_ERROR; 8666 else if (hba->ufshcd_state == UFSHCD_STATE_RESET) 8667 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 8668 spin_unlock_irqrestore(hba->host->host_lock, flags); 8669 8670 trace_ufshcd_init(dev_name(hba->dev), ret, 8671 ktime_to_us(ktime_sub(ktime_get(), start)), 8672 hba->curr_dev_pwr_mode, hba->uic_link_state); 8673 return ret; 8674 } 8675 8676 /** 8677 * ufshcd_async_scan - asynchronous execution for probing hba 8678 * @data: data pointer to pass to this function 8679 * @cookie: cookie data 8680 */ 8681 static void ufshcd_async_scan(void *data, async_cookie_t cookie) 8682 { 8683 struct ufs_hba *hba = (struct ufs_hba *)data; 8684 int ret; 8685 8686 down(&hba->host_sem); 8687 /* Initialize hba, detect and initialize UFS device */ 8688 ret = ufshcd_probe_hba(hba, true); 8689 up(&hba->host_sem); 8690 if (ret) 8691 goto out; 8692 8693 /* Probe and add UFS logical units */ 8694 ret = ufshcd_add_lus(hba); 8695 out: 8696 /* 8697 * If we failed to initialize the device or the device is not 8698 * present, turn off the power/clocks etc. 8699 */ 8700 if (ret) { 8701 pm_runtime_put_sync(hba->dev); 8702 ufshcd_hba_exit(hba); 8703 } else { 8704 /* 8705 * Make sure that when reader code sees UFS initialization has finished, 8706 * all initialization steps have really been executed. 8707 */ 8708 smp_store_release(&hba->logical_unit_scan_finished, true); 8709 } 8710 } 8711 8712 static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd) 8713 { 8714 struct ufs_hba *hba = shost_priv(scmd->device->host); 8715 8716 if (!hba->system_suspending) { 8717 /* Activate the error handler in the SCSI core. */ 8718 return SCSI_EH_NOT_HANDLED; 8719 } 8720 8721 /* 8722 * If we get here we know that no TMFs are outstanding and also that 8723 * the only pending command is a START STOP UNIT command. Handle the 8724 * timeout of that command directly to prevent a deadlock between 8725 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler(). 8726 */ 8727 ufshcd_link_recovery(hba); 8728 dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n", 8729 __func__, hba->outstanding_tasks); 8730 8731 return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE; 8732 } 8733 8734 static const struct attribute_group *ufshcd_driver_groups[] = { 8735 &ufs_sysfs_unit_descriptor_group, 8736 &ufs_sysfs_lun_attributes_group, 8737 #ifdef CONFIG_SCSI_UFS_HPB 8738 &ufs_sysfs_hpb_stat_group, 8739 &ufs_sysfs_hpb_param_group, 8740 #endif 8741 NULL, 8742 }; 8743 8744 static struct ufs_hba_variant_params ufs_hba_vps = { 8745 .hba_enable_delay_us = 1000, 8746 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40), 8747 .devfreq_profile.polling_ms = 100, 8748 .devfreq_profile.target = ufshcd_devfreq_target, 8749 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status, 8750 .ondemand_data.upthreshold = 70, 8751 .ondemand_data.downdifferential = 5, 8752 }; 8753 8754 static const struct scsi_host_template ufshcd_driver_template = { 8755 .module = THIS_MODULE, 8756 .name = UFSHCD, 8757 .proc_name = UFSHCD, 8758 .map_queues = ufshcd_map_queues, 8759 .queuecommand = ufshcd_queuecommand, 8760 .mq_poll = ufshcd_poll, 8761 .slave_alloc = ufshcd_slave_alloc, 8762 .slave_configure = ufshcd_slave_configure, 8763 .slave_destroy = ufshcd_slave_destroy, 8764 .change_queue_depth = ufshcd_change_queue_depth, 8765 .eh_abort_handler = ufshcd_abort, 8766 .eh_device_reset_handler = ufshcd_eh_device_reset_handler, 8767 .eh_host_reset_handler = ufshcd_eh_host_reset_handler, 8768 .eh_timed_out = ufshcd_eh_timed_out, 8769 .this_id = -1, 8770 .sg_tablesize = SG_ALL, 8771 .cmd_per_lun = UFSHCD_CMD_PER_LUN, 8772 .can_queue = UFSHCD_CAN_QUEUE, 8773 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, 8774 .max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */ 8775 .max_host_blocked = 1, 8776 .track_queue_depth = 1, 8777 .skip_settle_delay = 1, 8778 .sdev_groups = ufshcd_driver_groups, 8779 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS, 8780 }; 8781 8782 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, 8783 int ua) 8784 { 8785 int ret; 8786 8787 if (!vreg) 8788 return 0; 8789 8790 /* 8791 * "set_load" operation shall be required on those regulators 8792 * which specifically configured current limitation. Otherwise 8793 * zero max_uA may cause unexpected behavior when regulator is 8794 * enabled or set as high power mode. 8795 */ 8796 if (!vreg->max_uA) 8797 return 0; 8798 8799 ret = regulator_set_load(vreg->reg, ua); 8800 if (ret < 0) { 8801 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n", 8802 __func__, vreg->name, ua, ret); 8803 } 8804 8805 return ret; 8806 } 8807 8808 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, 8809 struct ufs_vreg *vreg) 8810 { 8811 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); 8812 } 8813 8814 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 8815 struct ufs_vreg *vreg) 8816 { 8817 if (!vreg) 8818 return 0; 8819 8820 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 8821 } 8822 8823 static int ufshcd_config_vreg(struct device *dev, 8824 struct ufs_vreg *vreg, bool on) 8825 { 8826 if (regulator_count_voltages(vreg->reg) <= 0) 8827 return 0; 8828 8829 return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0); 8830 } 8831 8832 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) 8833 { 8834 int ret = 0; 8835 8836 if (!vreg || vreg->enabled) 8837 goto out; 8838 8839 ret = ufshcd_config_vreg(dev, vreg, true); 8840 if (!ret) 8841 ret = regulator_enable(vreg->reg); 8842 8843 if (!ret) 8844 vreg->enabled = true; 8845 else 8846 dev_err(dev, "%s: %s enable failed, err=%d\n", 8847 __func__, vreg->name, ret); 8848 out: 8849 return ret; 8850 } 8851 8852 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) 8853 { 8854 int ret = 0; 8855 8856 if (!vreg || !vreg->enabled || vreg->always_on) 8857 goto out; 8858 8859 ret = regulator_disable(vreg->reg); 8860 8861 if (!ret) { 8862 /* ignore errors on applying disable config */ 8863 ufshcd_config_vreg(dev, vreg, false); 8864 vreg->enabled = false; 8865 } else { 8866 dev_err(dev, "%s: %s disable failed, err=%d\n", 8867 __func__, vreg->name, ret); 8868 } 8869 out: 8870 return ret; 8871 } 8872 8873 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) 8874 { 8875 int ret = 0; 8876 struct device *dev = hba->dev; 8877 struct ufs_vreg_info *info = &hba->vreg_info; 8878 8879 ret = ufshcd_toggle_vreg(dev, info->vcc, on); 8880 if (ret) 8881 goto out; 8882 8883 ret = ufshcd_toggle_vreg(dev, info->vccq, on); 8884 if (ret) 8885 goto out; 8886 8887 ret = ufshcd_toggle_vreg(dev, info->vccq2, on); 8888 8889 out: 8890 if (ret) { 8891 ufshcd_toggle_vreg(dev, info->vccq2, false); 8892 ufshcd_toggle_vreg(dev, info->vccq, false); 8893 ufshcd_toggle_vreg(dev, info->vcc, false); 8894 } 8895 return ret; 8896 } 8897 8898 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) 8899 { 8900 struct ufs_vreg_info *info = &hba->vreg_info; 8901 8902 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); 8903 } 8904 8905 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) 8906 { 8907 int ret = 0; 8908 8909 if (!vreg) 8910 goto out; 8911 8912 vreg->reg = devm_regulator_get(dev, vreg->name); 8913 if (IS_ERR(vreg->reg)) { 8914 ret = PTR_ERR(vreg->reg); 8915 dev_err(dev, "%s: %s get failed, err=%d\n", 8916 __func__, vreg->name, ret); 8917 } 8918 out: 8919 return ret; 8920 } 8921 EXPORT_SYMBOL_GPL(ufshcd_get_vreg); 8922 8923 static int ufshcd_init_vreg(struct ufs_hba *hba) 8924 { 8925 int ret = 0; 8926 struct device *dev = hba->dev; 8927 struct ufs_vreg_info *info = &hba->vreg_info; 8928 8929 ret = ufshcd_get_vreg(dev, info->vcc); 8930 if (ret) 8931 goto out; 8932 8933 ret = ufshcd_get_vreg(dev, info->vccq); 8934 if (!ret) 8935 ret = ufshcd_get_vreg(dev, info->vccq2); 8936 out: 8937 return ret; 8938 } 8939 8940 static int ufshcd_init_hba_vreg(struct ufs_hba *hba) 8941 { 8942 struct ufs_vreg_info *info = &hba->vreg_info; 8943 8944 return ufshcd_get_vreg(hba->dev, info->vdd_hba); 8945 } 8946 8947 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) 8948 { 8949 int ret = 0; 8950 struct ufs_clk_info *clki; 8951 struct list_head *head = &hba->clk_list_head; 8952 unsigned long flags; 8953 ktime_t start = ktime_get(); 8954 bool clk_state_changed = false; 8955 8956 if (list_empty(head)) 8957 goto out; 8958 8959 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); 8960 if (ret) 8961 return ret; 8962 8963 list_for_each_entry(clki, head, list) { 8964 if (!IS_ERR_OR_NULL(clki->clk)) { 8965 /* 8966 * Don't disable clocks which are needed 8967 * to keep the link active. 8968 */ 8969 if (ufshcd_is_link_active(hba) && 8970 clki->keep_link_active) 8971 continue; 8972 8973 clk_state_changed = on ^ clki->enabled; 8974 if (on && !clki->enabled) { 8975 ret = clk_prepare_enable(clki->clk); 8976 if (ret) { 8977 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", 8978 __func__, clki->name, ret); 8979 goto out; 8980 } 8981 } else if (!on && clki->enabled) { 8982 clk_disable_unprepare(clki->clk); 8983 } 8984 clki->enabled = on; 8985 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, 8986 clki->name, on ? "en" : "dis"); 8987 } 8988 } 8989 8990 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); 8991 if (ret) 8992 return ret; 8993 8994 out: 8995 if (ret) { 8996 list_for_each_entry(clki, head, list) { 8997 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) 8998 clk_disable_unprepare(clki->clk); 8999 } 9000 } else if (!ret && on) { 9001 spin_lock_irqsave(hba->host->host_lock, flags); 9002 hba->clk_gating.state = CLKS_ON; 9003 trace_ufshcd_clk_gating(dev_name(hba->dev), 9004 hba->clk_gating.state); 9005 spin_unlock_irqrestore(hba->host->host_lock, flags); 9006 } 9007 9008 if (clk_state_changed) 9009 trace_ufshcd_profile_clk_gating(dev_name(hba->dev), 9010 (on ? "on" : "off"), 9011 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 9012 return ret; 9013 } 9014 9015 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba) 9016 { 9017 u32 freq; 9018 int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq); 9019 9020 if (ret) { 9021 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret); 9022 return REF_CLK_FREQ_INVAL; 9023 } 9024 9025 return ufs_get_bref_clk_from_hz(freq); 9026 } 9027 9028 static int ufshcd_init_clocks(struct ufs_hba *hba) 9029 { 9030 int ret = 0; 9031 struct ufs_clk_info *clki; 9032 struct device *dev = hba->dev; 9033 struct list_head *head = &hba->clk_list_head; 9034 9035 if (list_empty(head)) 9036 goto out; 9037 9038 list_for_each_entry(clki, head, list) { 9039 if (!clki->name) 9040 continue; 9041 9042 clki->clk = devm_clk_get(dev, clki->name); 9043 if (IS_ERR(clki->clk)) { 9044 ret = PTR_ERR(clki->clk); 9045 dev_err(dev, "%s: %s clk get failed, %d\n", 9046 __func__, clki->name, ret); 9047 goto out; 9048 } 9049 9050 /* 9051 * Parse device ref clk freq as per device tree "ref_clk". 9052 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL 9053 * in ufshcd_alloc_host(). 9054 */ 9055 if (!strcmp(clki->name, "ref_clk")) 9056 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk); 9057 9058 if (clki->max_freq) { 9059 ret = clk_set_rate(clki->clk, clki->max_freq); 9060 if (ret) { 9061 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", 9062 __func__, clki->name, 9063 clki->max_freq, ret); 9064 goto out; 9065 } 9066 clki->curr_freq = clki->max_freq; 9067 } 9068 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, 9069 clki->name, clk_get_rate(clki->clk)); 9070 } 9071 out: 9072 return ret; 9073 } 9074 9075 static int ufshcd_variant_hba_init(struct ufs_hba *hba) 9076 { 9077 int err = 0; 9078 9079 if (!hba->vops) 9080 goto out; 9081 9082 err = ufshcd_vops_init(hba); 9083 if (err) 9084 dev_err(hba->dev, "%s: variant %s init failed err %d\n", 9085 __func__, ufshcd_get_var_name(hba), err); 9086 out: 9087 return err; 9088 } 9089 9090 static void ufshcd_variant_hba_exit(struct ufs_hba *hba) 9091 { 9092 if (!hba->vops) 9093 return; 9094 9095 ufshcd_vops_exit(hba); 9096 } 9097 9098 static int ufshcd_hba_init(struct ufs_hba *hba) 9099 { 9100 int err; 9101 9102 /* 9103 * Handle host controller power separately from the UFS device power 9104 * rails as it will help controlling the UFS host controller power 9105 * collapse easily which is different than UFS device power collapse. 9106 * Also, enable the host controller power before we go ahead with rest 9107 * of the initialization here. 9108 */ 9109 err = ufshcd_init_hba_vreg(hba); 9110 if (err) 9111 goto out; 9112 9113 err = ufshcd_setup_hba_vreg(hba, true); 9114 if (err) 9115 goto out; 9116 9117 err = ufshcd_init_clocks(hba); 9118 if (err) 9119 goto out_disable_hba_vreg; 9120 9121 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) 9122 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba); 9123 9124 err = ufshcd_setup_clocks(hba, true); 9125 if (err) 9126 goto out_disable_hba_vreg; 9127 9128 err = ufshcd_init_vreg(hba); 9129 if (err) 9130 goto out_disable_clks; 9131 9132 err = ufshcd_setup_vreg(hba, true); 9133 if (err) 9134 goto out_disable_clks; 9135 9136 err = ufshcd_variant_hba_init(hba); 9137 if (err) 9138 goto out_disable_vreg; 9139 9140 ufs_debugfs_hba_init(hba); 9141 9142 hba->is_powered = true; 9143 goto out; 9144 9145 out_disable_vreg: 9146 ufshcd_setup_vreg(hba, false); 9147 out_disable_clks: 9148 ufshcd_setup_clocks(hba, false); 9149 out_disable_hba_vreg: 9150 ufshcd_setup_hba_vreg(hba, false); 9151 out: 9152 return err; 9153 } 9154 9155 static void ufshcd_hba_exit(struct ufs_hba *hba) 9156 { 9157 if (hba->is_powered) { 9158 ufshcd_exit_clk_scaling(hba); 9159 ufshcd_exit_clk_gating(hba); 9160 if (hba->eh_wq) 9161 destroy_workqueue(hba->eh_wq); 9162 ufs_debugfs_hba_exit(hba); 9163 ufshcd_variant_hba_exit(hba); 9164 ufshcd_setup_vreg(hba, false); 9165 ufshcd_setup_clocks(hba, false); 9166 ufshcd_setup_hba_vreg(hba, false); 9167 hba->is_powered = false; 9168 ufs_put_device_desc(hba); 9169 } 9170 } 9171 9172 static int ufshcd_execute_start_stop(struct scsi_device *sdev, 9173 enum ufs_dev_pwr_mode pwr_mode, 9174 struct scsi_sense_hdr *sshdr) 9175 { 9176 const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 }; 9177 const struct scsi_exec_args args = { 9178 .sshdr = sshdr, 9179 .req_flags = BLK_MQ_REQ_PM, 9180 .scmd_flags = SCMD_FAIL_IF_RECOVERING, 9181 }; 9182 9183 return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL, 9184 /*bufflen=*/0, /*timeout=*/HZ, /*retries=*/0, &args); 9185 } 9186 9187 /** 9188 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device 9189 * power mode 9190 * @hba: per adapter instance 9191 * @pwr_mode: device power mode to set 9192 * 9193 * Returns 0 if requested power mode is set successfully 9194 * Returns < 0 if failed to set the requested power mode 9195 */ 9196 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, 9197 enum ufs_dev_pwr_mode pwr_mode) 9198 { 9199 struct scsi_sense_hdr sshdr; 9200 struct scsi_device *sdp; 9201 unsigned long flags; 9202 int ret, retries; 9203 9204 spin_lock_irqsave(hba->host->host_lock, flags); 9205 sdp = hba->ufs_device_wlun; 9206 if (sdp && scsi_device_online(sdp)) 9207 ret = scsi_device_get(sdp); 9208 else 9209 ret = -ENODEV; 9210 spin_unlock_irqrestore(hba->host->host_lock, flags); 9211 9212 if (ret) 9213 return ret; 9214 9215 /* 9216 * If scsi commands fail, the scsi mid-layer schedules scsi error- 9217 * handling, which would wait for host to be resumed. Since we know 9218 * we are functional while we are here, skip host resume in error 9219 * handling context. 9220 */ 9221 hba->host->eh_noresume = 1; 9222 9223 /* 9224 * Current function would be generally called from the power management 9225 * callbacks hence set the RQF_PM flag so that it doesn't resume the 9226 * already suspended childs. 9227 */ 9228 for (retries = 3; retries > 0; --retries) { 9229 ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr); 9230 /* 9231 * scsi_execute() only returns a negative value if the request 9232 * queue is dying. 9233 */ 9234 if (ret <= 0) 9235 break; 9236 } 9237 if (ret) { 9238 sdev_printk(KERN_WARNING, sdp, 9239 "START_STOP failed for power mode: %d, result %x\n", 9240 pwr_mode, ret); 9241 if (ret > 0) { 9242 if (scsi_sense_valid(&sshdr)) 9243 scsi_print_sense_hdr(sdp, NULL, &sshdr); 9244 ret = -EIO; 9245 } 9246 } else { 9247 hba->curr_dev_pwr_mode = pwr_mode; 9248 } 9249 9250 scsi_device_put(sdp); 9251 hba->host->eh_noresume = 0; 9252 return ret; 9253 } 9254 9255 static int ufshcd_link_state_transition(struct ufs_hba *hba, 9256 enum uic_link_state req_link_state, 9257 bool check_for_bkops) 9258 { 9259 int ret = 0; 9260 9261 if (req_link_state == hba->uic_link_state) 9262 return 0; 9263 9264 if (req_link_state == UIC_LINK_HIBERN8_STATE) { 9265 ret = ufshcd_uic_hibern8_enter(hba); 9266 if (!ret) { 9267 ufshcd_set_link_hibern8(hba); 9268 } else { 9269 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", 9270 __func__, ret); 9271 goto out; 9272 } 9273 } 9274 /* 9275 * If autobkops is enabled, link can't be turned off because 9276 * turning off the link would also turn off the device, except in the 9277 * case of DeepSleep where the device is expected to remain powered. 9278 */ 9279 else if ((req_link_state == UIC_LINK_OFF_STATE) && 9280 (!check_for_bkops || !hba->auto_bkops_enabled)) { 9281 /* 9282 * Let's make sure that link is in low power mode, we are doing 9283 * this currently by putting the link in Hibern8. Otherway to 9284 * put the link in low power mode is to send the DME end point 9285 * to device and then send the DME reset command to local 9286 * unipro. But putting the link in hibern8 is much faster. 9287 * 9288 * Note also that putting the link in Hibern8 is a requirement 9289 * for entering DeepSleep. 9290 */ 9291 ret = ufshcd_uic_hibern8_enter(hba); 9292 if (ret) { 9293 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", 9294 __func__, ret); 9295 goto out; 9296 } 9297 /* 9298 * Change controller state to "reset state" which 9299 * should also put the link in off/reset state 9300 */ 9301 ufshcd_hba_stop(hba); 9302 /* 9303 * TODO: Check if we need any delay to make sure that 9304 * controller is reset 9305 */ 9306 ufshcd_set_link_off(hba); 9307 } 9308 9309 out: 9310 return ret; 9311 } 9312 9313 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) 9314 { 9315 bool vcc_off = false; 9316 9317 /* 9318 * It seems some UFS devices may keep drawing more than sleep current 9319 * (atleast for 500us) from UFS rails (especially from VCCQ rail). 9320 * To avoid this situation, add 2ms delay before putting these UFS 9321 * rails in LPM mode. 9322 */ 9323 if (!ufshcd_is_link_active(hba) && 9324 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) 9325 usleep_range(2000, 2100); 9326 9327 /* 9328 * If UFS device is either in UFS_Sleep turn off VCC rail to save some 9329 * power. 9330 * 9331 * If UFS device and link is in OFF state, all power supplies (VCC, 9332 * VCCQ, VCCQ2) can be turned off if power on write protect is not 9333 * required. If UFS link is inactive (Hibern8 or OFF state) and device 9334 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. 9335 * 9336 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway 9337 * in low power state which would save some power. 9338 * 9339 * If Write Booster is enabled and the device needs to flush the WB 9340 * buffer OR if bkops status is urgent for WB, keep Vcc on. 9341 */ 9342 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && 9343 !hba->dev_info.is_lu_power_on_wp) { 9344 ufshcd_setup_vreg(hba, false); 9345 vcc_off = true; 9346 } else if (!ufshcd_is_ufs_dev_active(hba)) { 9347 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); 9348 vcc_off = true; 9349 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) { 9350 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); 9351 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); 9352 } 9353 } 9354 9355 /* 9356 * Some UFS devices require delay after VCC power rail is turned-off. 9357 */ 9358 if (vcc_off && hba->vreg_info.vcc && 9359 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM) 9360 usleep_range(5000, 5100); 9361 } 9362 9363 #ifdef CONFIG_PM 9364 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) 9365 { 9366 int ret = 0; 9367 9368 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && 9369 !hba->dev_info.is_lu_power_on_wp) { 9370 ret = ufshcd_setup_vreg(hba, true); 9371 } else if (!ufshcd_is_ufs_dev_active(hba)) { 9372 if (!ufshcd_is_link_active(hba)) { 9373 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); 9374 if (ret) 9375 goto vcc_disable; 9376 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); 9377 if (ret) 9378 goto vccq_lpm; 9379 } 9380 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); 9381 } 9382 goto out; 9383 9384 vccq_lpm: 9385 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); 9386 vcc_disable: 9387 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); 9388 out: 9389 return ret; 9390 } 9391 #endif /* CONFIG_PM */ 9392 9393 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) 9394 { 9395 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) 9396 ufshcd_setup_hba_vreg(hba, false); 9397 } 9398 9399 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) 9400 { 9401 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) 9402 ufshcd_setup_hba_vreg(hba, true); 9403 } 9404 9405 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 9406 { 9407 int ret = 0; 9408 bool check_for_bkops; 9409 enum ufs_pm_level pm_lvl; 9410 enum ufs_dev_pwr_mode req_dev_pwr_mode; 9411 enum uic_link_state req_link_state; 9412 9413 hba->pm_op_in_progress = true; 9414 if (pm_op != UFS_SHUTDOWN_PM) { 9415 pm_lvl = pm_op == UFS_RUNTIME_PM ? 9416 hba->rpm_lvl : hba->spm_lvl; 9417 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); 9418 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); 9419 } else { 9420 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; 9421 req_link_state = UIC_LINK_OFF_STATE; 9422 } 9423 9424 ufshpb_suspend(hba); 9425 9426 /* 9427 * If we can't transition into any of the low power modes 9428 * just gate the clocks. 9429 */ 9430 ufshcd_hold(hba, false); 9431 hba->clk_gating.is_suspended = true; 9432 9433 if (ufshcd_is_clkscaling_supported(hba)) 9434 ufshcd_clk_scaling_suspend(hba, true); 9435 9436 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && 9437 req_link_state == UIC_LINK_ACTIVE_STATE) { 9438 goto vops_suspend; 9439 } 9440 9441 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && 9442 (req_link_state == hba->uic_link_state)) 9443 goto enable_scaling; 9444 9445 /* UFS device & link must be active before we enter in this function */ 9446 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { 9447 ret = -EINVAL; 9448 goto enable_scaling; 9449 } 9450 9451 if (pm_op == UFS_RUNTIME_PM) { 9452 if (ufshcd_can_autobkops_during_suspend(hba)) { 9453 /* 9454 * The device is idle with no requests in the queue, 9455 * allow background operations if bkops status shows 9456 * that performance might be impacted. 9457 */ 9458 ret = ufshcd_urgent_bkops(hba); 9459 if (ret) 9460 goto enable_scaling; 9461 } else { 9462 /* make sure that auto bkops is disabled */ 9463 ufshcd_disable_auto_bkops(hba); 9464 } 9465 /* 9466 * If device needs to do BKOP or WB buffer flush during 9467 * Hibern8, keep device power mode as "active power mode" 9468 * and VCC supply. 9469 */ 9470 hba->dev_info.b_rpm_dev_flush_capable = 9471 hba->auto_bkops_enabled || 9472 (((req_link_state == UIC_LINK_HIBERN8_STATE) || 9473 ((req_link_state == UIC_LINK_ACTIVE_STATE) && 9474 ufshcd_is_auto_hibern8_enabled(hba))) && 9475 ufshcd_wb_need_flush(hba)); 9476 } 9477 9478 flush_work(&hba->eeh_work); 9479 9480 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); 9481 if (ret) 9482 goto enable_scaling; 9483 9484 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { 9485 if (pm_op != UFS_RUNTIME_PM) 9486 /* ensure that bkops is disabled */ 9487 ufshcd_disable_auto_bkops(hba); 9488 9489 if (!hba->dev_info.b_rpm_dev_flush_capable) { 9490 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); 9491 if (ret && pm_op != UFS_SHUTDOWN_PM) { 9492 /* 9493 * If return err in suspend flow, IO will hang. 9494 * Trigger error handler and break suspend for 9495 * error recovery. 9496 */ 9497 ufshcd_force_error_recovery(hba); 9498 ret = -EBUSY; 9499 } 9500 if (ret) 9501 goto enable_scaling; 9502 } 9503 } 9504 9505 /* 9506 * In the case of DeepSleep, the device is expected to remain powered 9507 * with the link off, so do not check for bkops. 9508 */ 9509 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba); 9510 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops); 9511 if (ret && pm_op != UFS_SHUTDOWN_PM) { 9512 /* 9513 * If return err in suspend flow, IO will hang. 9514 * Trigger error handler and break suspend for 9515 * error recovery. 9516 */ 9517 ufshcd_force_error_recovery(hba); 9518 ret = -EBUSY; 9519 } 9520 if (ret) 9521 goto set_dev_active; 9522 9523 vops_suspend: 9524 /* 9525 * Call vendor specific suspend callback. As these callbacks may access 9526 * vendor specific host controller register space call them before the 9527 * host clocks are ON. 9528 */ 9529 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); 9530 if (ret) 9531 goto set_link_active; 9532 goto out; 9533 9534 set_link_active: 9535 /* 9536 * Device hardware reset is required to exit DeepSleep. Also, for 9537 * DeepSleep, the link is off so host reset and restore will be done 9538 * further below. 9539 */ 9540 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 9541 ufshcd_device_reset(hba); 9542 WARN_ON(!ufshcd_is_link_off(hba)); 9543 } 9544 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) 9545 ufshcd_set_link_active(hba); 9546 else if (ufshcd_is_link_off(hba)) 9547 ufshcd_host_reset_and_restore(hba); 9548 set_dev_active: 9549 /* Can also get here needing to exit DeepSleep */ 9550 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 9551 ufshcd_device_reset(hba); 9552 ufshcd_host_reset_and_restore(hba); 9553 } 9554 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) 9555 ufshcd_disable_auto_bkops(hba); 9556 enable_scaling: 9557 if (ufshcd_is_clkscaling_supported(hba)) 9558 ufshcd_clk_scaling_suspend(hba, false); 9559 9560 hba->dev_info.b_rpm_dev_flush_capable = false; 9561 out: 9562 if (hba->dev_info.b_rpm_dev_flush_capable) { 9563 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, 9564 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS)); 9565 } 9566 9567 if (ret) { 9568 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret); 9569 hba->clk_gating.is_suspended = false; 9570 ufshcd_release(hba); 9571 ufshpb_resume(hba); 9572 } 9573 hba->pm_op_in_progress = false; 9574 return ret; 9575 } 9576 9577 #ifdef CONFIG_PM 9578 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 9579 { 9580 int ret; 9581 enum uic_link_state old_link_state = hba->uic_link_state; 9582 9583 hba->pm_op_in_progress = true; 9584 9585 /* 9586 * Call vendor specific resume callback. As these callbacks may access 9587 * vendor specific host controller register space call them when the 9588 * host clocks are ON. 9589 */ 9590 ret = ufshcd_vops_resume(hba, pm_op); 9591 if (ret) 9592 goto out; 9593 9594 /* For DeepSleep, the only supported option is to have the link off */ 9595 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba)); 9596 9597 if (ufshcd_is_link_hibern8(hba)) { 9598 ret = ufshcd_uic_hibern8_exit(hba); 9599 if (!ret) { 9600 ufshcd_set_link_active(hba); 9601 } else { 9602 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", 9603 __func__, ret); 9604 goto vendor_suspend; 9605 } 9606 } else if (ufshcd_is_link_off(hba)) { 9607 /* 9608 * A full initialization of the host and the device is 9609 * required since the link was put to off during suspend. 9610 * Note, in the case of DeepSleep, the device will exit 9611 * DeepSleep due to device reset. 9612 */ 9613 ret = ufshcd_reset_and_restore(hba); 9614 /* 9615 * ufshcd_reset_and_restore() should have already 9616 * set the link state as active 9617 */ 9618 if (ret || !ufshcd_is_link_active(hba)) 9619 goto vendor_suspend; 9620 } 9621 9622 if (!ufshcd_is_ufs_dev_active(hba)) { 9623 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); 9624 if (ret) 9625 goto set_old_link_state; 9626 } 9627 9628 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) 9629 ufshcd_enable_auto_bkops(hba); 9630 else 9631 /* 9632 * If BKOPs operations are urgently needed at this moment then 9633 * keep auto-bkops enabled or else disable it. 9634 */ 9635 ufshcd_urgent_bkops(hba); 9636 9637 if (hba->ee_usr_mask) 9638 ufshcd_write_ee_control(hba); 9639 9640 if (ufshcd_is_clkscaling_supported(hba)) 9641 ufshcd_clk_scaling_suspend(hba, false); 9642 9643 if (hba->dev_info.b_rpm_dev_flush_capable) { 9644 hba->dev_info.b_rpm_dev_flush_capable = false; 9645 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); 9646 } 9647 9648 /* Enable Auto-Hibernate if configured */ 9649 ufshcd_auto_hibern8_enable(hba); 9650 9651 ufshpb_resume(hba); 9652 goto out; 9653 9654 set_old_link_state: 9655 ufshcd_link_state_transition(hba, old_link_state, 0); 9656 vendor_suspend: 9657 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); 9658 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); 9659 out: 9660 if (ret) 9661 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret); 9662 hba->clk_gating.is_suspended = false; 9663 ufshcd_release(hba); 9664 hba->pm_op_in_progress = false; 9665 return ret; 9666 } 9667 9668 static int ufshcd_wl_runtime_suspend(struct device *dev) 9669 { 9670 struct scsi_device *sdev = to_scsi_device(dev); 9671 struct ufs_hba *hba; 9672 int ret; 9673 ktime_t start = ktime_get(); 9674 9675 hba = shost_priv(sdev->host); 9676 9677 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM); 9678 if (ret) 9679 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9680 9681 trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret, 9682 ktime_to_us(ktime_sub(ktime_get(), start)), 9683 hba->curr_dev_pwr_mode, hba->uic_link_state); 9684 9685 return ret; 9686 } 9687 9688 static int ufshcd_wl_runtime_resume(struct device *dev) 9689 { 9690 struct scsi_device *sdev = to_scsi_device(dev); 9691 struct ufs_hba *hba; 9692 int ret = 0; 9693 ktime_t start = ktime_get(); 9694 9695 hba = shost_priv(sdev->host); 9696 9697 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM); 9698 if (ret) 9699 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9700 9701 trace_ufshcd_wl_runtime_resume(dev_name(dev), ret, 9702 ktime_to_us(ktime_sub(ktime_get(), start)), 9703 hba->curr_dev_pwr_mode, hba->uic_link_state); 9704 9705 return ret; 9706 } 9707 #endif 9708 9709 #ifdef CONFIG_PM_SLEEP 9710 static int ufshcd_wl_suspend(struct device *dev) 9711 { 9712 struct scsi_device *sdev = to_scsi_device(dev); 9713 struct ufs_hba *hba; 9714 int ret = 0; 9715 ktime_t start = ktime_get(); 9716 9717 hba = shost_priv(sdev->host); 9718 down(&hba->host_sem); 9719 hba->system_suspending = true; 9720 9721 if (pm_runtime_suspended(dev)) 9722 goto out; 9723 9724 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM); 9725 if (ret) { 9726 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9727 up(&hba->host_sem); 9728 } 9729 9730 out: 9731 if (!ret) 9732 hba->is_sys_suspended = true; 9733 trace_ufshcd_wl_suspend(dev_name(dev), ret, 9734 ktime_to_us(ktime_sub(ktime_get(), start)), 9735 hba->curr_dev_pwr_mode, hba->uic_link_state); 9736 9737 return ret; 9738 } 9739 9740 static int ufshcd_wl_resume(struct device *dev) 9741 { 9742 struct scsi_device *sdev = to_scsi_device(dev); 9743 struct ufs_hba *hba; 9744 int ret = 0; 9745 ktime_t start = ktime_get(); 9746 9747 hba = shost_priv(sdev->host); 9748 9749 if (pm_runtime_suspended(dev)) 9750 goto out; 9751 9752 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM); 9753 if (ret) 9754 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9755 out: 9756 trace_ufshcd_wl_resume(dev_name(dev), ret, 9757 ktime_to_us(ktime_sub(ktime_get(), start)), 9758 hba->curr_dev_pwr_mode, hba->uic_link_state); 9759 if (!ret) 9760 hba->is_sys_suspended = false; 9761 hba->system_suspending = false; 9762 up(&hba->host_sem); 9763 return ret; 9764 } 9765 #endif 9766 9767 static void ufshcd_wl_shutdown(struct device *dev) 9768 { 9769 struct scsi_device *sdev = to_scsi_device(dev); 9770 struct ufs_hba *hba; 9771 9772 hba = shost_priv(sdev->host); 9773 9774 down(&hba->host_sem); 9775 hba->shutting_down = true; 9776 up(&hba->host_sem); 9777 9778 /* Turn on everything while shutting down */ 9779 ufshcd_rpm_get_sync(hba); 9780 scsi_device_quiesce(sdev); 9781 shost_for_each_device(sdev, hba->host) { 9782 if (sdev == hba->ufs_device_wlun) 9783 continue; 9784 scsi_device_quiesce(sdev); 9785 } 9786 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); 9787 } 9788 9789 /** 9790 * ufshcd_suspend - helper function for suspend operations 9791 * @hba: per adapter instance 9792 * 9793 * This function will put disable irqs, turn off clocks 9794 * and set vreg and hba-vreg in lpm mode. 9795 */ 9796 static int ufshcd_suspend(struct ufs_hba *hba) 9797 { 9798 int ret; 9799 9800 if (!hba->is_powered) 9801 return 0; 9802 /* 9803 * Disable the host irq as host controller as there won't be any 9804 * host controller transaction expected till resume. 9805 */ 9806 ufshcd_disable_irq(hba); 9807 ret = ufshcd_setup_clocks(hba, false); 9808 if (ret) { 9809 ufshcd_enable_irq(hba); 9810 return ret; 9811 } 9812 if (ufshcd_is_clkgating_allowed(hba)) { 9813 hba->clk_gating.state = CLKS_OFF; 9814 trace_ufshcd_clk_gating(dev_name(hba->dev), 9815 hba->clk_gating.state); 9816 } 9817 9818 ufshcd_vreg_set_lpm(hba); 9819 /* Put the host controller in low power mode if possible */ 9820 ufshcd_hba_vreg_set_lpm(hba); 9821 return ret; 9822 } 9823 9824 #ifdef CONFIG_PM 9825 /** 9826 * ufshcd_resume - helper function for resume operations 9827 * @hba: per adapter instance 9828 * 9829 * This function basically turns on the regulators, clocks and 9830 * irqs of the hba. 9831 * 9832 * Returns 0 for success and non-zero for failure 9833 */ 9834 static int ufshcd_resume(struct ufs_hba *hba) 9835 { 9836 int ret; 9837 9838 if (!hba->is_powered) 9839 return 0; 9840 9841 ufshcd_hba_vreg_set_hpm(hba); 9842 ret = ufshcd_vreg_set_hpm(hba); 9843 if (ret) 9844 goto out; 9845 9846 /* Make sure clocks are enabled before accessing controller */ 9847 ret = ufshcd_setup_clocks(hba, true); 9848 if (ret) 9849 goto disable_vreg; 9850 9851 /* enable the host irq as host controller would be active soon */ 9852 ufshcd_enable_irq(hba); 9853 9854 goto out; 9855 9856 disable_vreg: 9857 ufshcd_vreg_set_lpm(hba); 9858 out: 9859 if (ret) 9860 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret); 9861 return ret; 9862 } 9863 #endif /* CONFIG_PM */ 9864 9865 #ifdef CONFIG_PM_SLEEP 9866 /** 9867 * ufshcd_system_suspend - system suspend callback 9868 * @dev: Device associated with the UFS controller. 9869 * 9870 * Executed before putting the system into a sleep state in which the contents 9871 * of main memory are preserved. 9872 * 9873 * Returns 0 for success and non-zero for failure 9874 */ 9875 int ufshcd_system_suspend(struct device *dev) 9876 { 9877 struct ufs_hba *hba = dev_get_drvdata(dev); 9878 int ret = 0; 9879 ktime_t start = ktime_get(); 9880 9881 if (pm_runtime_suspended(hba->dev)) 9882 goto out; 9883 9884 ret = ufshcd_suspend(hba); 9885 out: 9886 trace_ufshcd_system_suspend(dev_name(hba->dev), ret, 9887 ktime_to_us(ktime_sub(ktime_get(), start)), 9888 hba->curr_dev_pwr_mode, hba->uic_link_state); 9889 return ret; 9890 } 9891 EXPORT_SYMBOL(ufshcd_system_suspend); 9892 9893 /** 9894 * ufshcd_system_resume - system resume callback 9895 * @dev: Device associated with the UFS controller. 9896 * 9897 * Executed after waking the system up from a sleep state in which the contents 9898 * of main memory were preserved. 9899 * 9900 * Returns 0 for success and non-zero for failure 9901 */ 9902 int ufshcd_system_resume(struct device *dev) 9903 { 9904 struct ufs_hba *hba = dev_get_drvdata(dev); 9905 ktime_t start = ktime_get(); 9906 int ret = 0; 9907 9908 if (pm_runtime_suspended(hba->dev)) 9909 goto out; 9910 9911 ret = ufshcd_resume(hba); 9912 9913 out: 9914 trace_ufshcd_system_resume(dev_name(hba->dev), ret, 9915 ktime_to_us(ktime_sub(ktime_get(), start)), 9916 hba->curr_dev_pwr_mode, hba->uic_link_state); 9917 9918 return ret; 9919 } 9920 EXPORT_SYMBOL(ufshcd_system_resume); 9921 #endif /* CONFIG_PM_SLEEP */ 9922 9923 #ifdef CONFIG_PM 9924 /** 9925 * ufshcd_runtime_suspend - runtime suspend callback 9926 * @dev: Device associated with the UFS controller. 9927 * 9928 * Check the description of ufshcd_suspend() function for more details. 9929 * 9930 * Returns 0 for success and non-zero for failure 9931 */ 9932 int ufshcd_runtime_suspend(struct device *dev) 9933 { 9934 struct ufs_hba *hba = dev_get_drvdata(dev); 9935 int ret; 9936 ktime_t start = ktime_get(); 9937 9938 ret = ufshcd_suspend(hba); 9939 9940 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, 9941 ktime_to_us(ktime_sub(ktime_get(), start)), 9942 hba->curr_dev_pwr_mode, hba->uic_link_state); 9943 return ret; 9944 } 9945 EXPORT_SYMBOL(ufshcd_runtime_suspend); 9946 9947 /** 9948 * ufshcd_runtime_resume - runtime resume routine 9949 * @dev: Device associated with the UFS controller. 9950 * 9951 * This function basically brings controller 9952 * to active state. Following operations are done in this function: 9953 * 9954 * 1. Turn on all the controller related clocks 9955 * 2. Turn ON VCC rail 9956 */ 9957 int ufshcd_runtime_resume(struct device *dev) 9958 { 9959 struct ufs_hba *hba = dev_get_drvdata(dev); 9960 int ret; 9961 ktime_t start = ktime_get(); 9962 9963 ret = ufshcd_resume(hba); 9964 9965 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, 9966 ktime_to_us(ktime_sub(ktime_get(), start)), 9967 hba->curr_dev_pwr_mode, hba->uic_link_state); 9968 return ret; 9969 } 9970 EXPORT_SYMBOL(ufshcd_runtime_resume); 9971 #endif /* CONFIG_PM */ 9972 9973 /** 9974 * ufshcd_shutdown - shutdown routine 9975 * @hba: per adapter instance 9976 * 9977 * This function would turn off both UFS device and UFS hba 9978 * regulators. It would also disable clocks. 9979 * 9980 * Returns 0 always to allow force shutdown even in case of errors. 9981 */ 9982 int ufshcd_shutdown(struct ufs_hba *hba) 9983 { 9984 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) 9985 ufshcd_suspend(hba); 9986 9987 hba->is_powered = false; 9988 /* allow force shutdown even in case of errors */ 9989 return 0; 9990 } 9991 EXPORT_SYMBOL(ufshcd_shutdown); 9992 9993 /** 9994 * ufshcd_remove - de-allocate SCSI host and host memory space 9995 * data structure memory 9996 * @hba: per adapter instance 9997 */ 9998 void ufshcd_remove(struct ufs_hba *hba) 9999 { 10000 if (hba->ufs_device_wlun) 10001 ufshcd_rpm_get_sync(hba); 10002 ufs_hwmon_remove(hba); 10003 ufs_bsg_remove(hba); 10004 ufshpb_remove(hba); 10005 ufs_sysfs_remove_nodes(hba->dev); 10006 blk_mq_destroy_queue(hba->tmf_queue); 10007 blk_put_queue(hba->tmf_queue); 10008 blk_mq_free_tag_set(&hba->tmf_tag_set); 10009 scsi_remove_host(hba->host); 10010 /* disable interrupts */ 10011 ufshcd_disable_intr(hba, hba->intr_mask); 10012 ufshcd_hba_stop(hba); 10013 ufshcd_hba_exit(hba); 10014 } 10015 EXPORT_SYMBOL_GPL(ufshcd_remove); 10016 10017 #ifdef CONFIG_PM_SLEEP 10018 int ufshcd_system_freeze(struct device *dev) 10019 { 10020 10021 return ufshcd_system_suspend(dev); 10022 10023 } 10024 EXPORT_SYMBOL_GPL(ufshcd_system_freeze); 10025 10026 int ufshcd_system_restore(struct device *dev) 10027 { 10028 10029 struct ufs_hba *hba = dev_get_drvdata(dev); 10030 int ret; 10031 10032 ret = ufshcd_system_resume(dev); 10033 if (ret) 10034 return ret; 10035 10036 /* Configure UTRL and UTMRL base address registers */ 10037 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), 10038 REG_UTP_TRANSFER_REQ_LIST_BASE_L); 10039 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), 10040 REG_UTP_TRANSFER_REQ_LIST_BASE_H); 10041 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), 10042 REG_UTP_TASK_REQ_LIST_BASE_L); 10043 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), 10044 REG_UTP_TASK_REQ_LIST_BASE_H); 10045 /* 10046 * Make sure that UTRL and UTMRL base address registers 10047 * are updated with the latest queue addresses. Only after 10048 * updating these addresses, we can queue the new commands. 10049 */ 10050 mb(); 10051 10052 /* Resuming from hibernate, assume that link was OFF */ 10053 ufshcd_set_link_off(hba); 10054 10055 return 0; 10056 10057 } 10058 EXPORT_SYMBOL_GPL(ufshcd_system_restore); 10059 10060 int ufshcd_system_thaw(struct device *dev) 10061 { 10062 return ufshcd_system_resume(dev); 10063 } 10064 EXPORT_SYMBOL_GPL(ufshcd_system_thaw); 10065 #endif /* CONFIG_PM_SLEEP */ 10066 10067 /** 10068 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA) 10069 * @hba: pointer to Host Bus Adapter (HBA) 10070 */ 10071 void ufshcd_dealloc_host(struct ufs_hba *hba) 10072 { 10073 scsi_host_put(hba->host); 10074 } 10075 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); 10076 10077 /** 10078 * ufshcd_set_dma_mask - Set dma mask based on the controller 10079 * addressing capability 10080 * @hba: per adapter instance 10081 * 10082 * Returns 0 for success, non-zero for failure 10083 */ 10084 static int ufshcd_set_dma_mask(struct ufs_hba *hba) 10085 { 10086 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { 10087 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) 10088 return 0; 10089 } 10090 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); 10091 } 10092 10093 /** 10094 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) 10095 * @dev: pointer to device handle 10096 * @hba_handle: driver private handle 10097 * Returns 0 on success, non-zero value on failure 10098 */ 10099 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) 10100 { 10101 struct Scsi_Host *host; 10102 struct ufs_hba *hba; 10103 int err = 0; 10104 10105 if (!dev) { 10106 dev_err(dev, 10107 "Invalid memory reference for dev is NULL\n"); 10108 err = -ENODEV; 10109 goto out_error; 10110 } 10111 10112 host = scsi_host_alloc(&ufshcd_driver_template, 10113 sizeof(struct ufs_hba)); 10114 if (!host) { 10115 dev_err(dev, "scsi_host_alloc failed\n"); 10116 err = -ENOMEM; 10117 goto out_error; 10118 } 10119 host->nr_maps = HCTX_TYPE_POLL + 1; 10120 hba = shost_priv(host); 10121 hba->host = host; 10122 hba->dev = dev; 10123 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; 10124 hba->nop_out_timeout = NOP_OUT_TIMEOUT; 10125 ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry)); 10126 INIT_LIST_HEAD(&hba->clk_list_head); 10127 spin_lock_init(&hba->outstanding_lock); 10128 10129 *hba_handle = hba; 10130 10131 out_error: 10132 return err; 10133 } 10134 EXPORT_SYMBOL(ufshcd_alloc_host); 10135 10136 /* This function exists because blk_mq_alloc_tag_set() requires this. */ 10137 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx, 10138 const struct blk_mq_queue_data *qd) 10139 { 10140 WARN_ON_ONCE(true); 10141 return BLK_STS_NOTSUPP; 10142 } 10143 10144 static const struct blk_mq_ops ufshcd_tmf_ops = { 10145 .queue_rq = ufshcd_queue_tmf, 10146 }; 10147 10148 /** 10149 * ufshcd_init - Driver initialization routine 10150 * @hba: per-adapter instance 10151 * @mmio_base: base register address 10152 * @irq: Interrupt line of device 10153 * Returns 0 on success, non-zero value on failure 10154 */ 10155 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) 10156 { 10157 int err; 10158 struct Scsi_Host *host = hba->host; 10159 struct device *dev = hba->dev; 10160 char eh_wq_name[sizeof("ufs_eh_wq_00")]; 10161 10162 /* 10163 * dev_set_drvdata() must be called before any callbacks are registered 10164 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon, 10165 * sysfs). 10166 */ 10167 dev_set_drvdata(dev, hba); 10168 10169 if (!mmio_base) { 10170 dev_err(hba->dev, 10171 "Invalid memory reference for mmio_base is NULL\n"); 10172 err = -ENODEV; 10173 goto out_error; 10174 } 10175 10176 hba->mmio_base = mmio_base; 10177 hba->irq = irq; 10178 hba->vps = &ufs_hba_vps; 10179 10180 err = ufshcd_hba_init(hba); 10181 if (err) 10182 goto out_error; 10183 10184 /* Read capabilities registers */ 10185 err = ufshcd_hba_capabilities(hba); 10186 if (err) 10187 goto out_disable; 10188 10189 /* Get UFS version supported by the controller */ 10190 hba->ufs_version = ufshcd_get_ufs_version(hba); 10191 10192 /* Get Interrupt bit mask per version */ 10193 hba->intr_mask = ufshcd_get_intr_mask(hba); 10194 10195 err = ufshcd_set_dma_mask(hba); 10196 if (err) { 10197 dev_err(hba->dev, "set dma mask failed\n"); 10198 goto out_disable; 10199 } 10200 10201 /* Allocate memory for host memory space */ 10202 err = ufshcd_memory_alloc(hba); 10203 if (err) { 10204 dev_err(hba->dev, "Memory allocation failed\n"); 10205 goto out_disable; 10206 } 10207 10208 /* Configure LRB */ 10209 ufshcd_host_memory_configure(hba); 10210 10211 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; 10212 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; 10213 host->max_id = UFSHCD_MAX_ID; 10214 host->max_lun = UFS_MAX_LUNS; 10215 host->max_channel = UFSHCD_MAX_CHANNEL; 10216 host->unique_id = host->host_no; 10217 host->max_cmd_len = UFS_CDB_SIZE; 10218 10219 hba->max_pwr_info.is_valid = false; 10220 10221 /* Initialize work queues */ 10222 snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", 10223 hba->host->host_no); 10224 hba->eh_wq = create_singlethread_workqueue(eh_wq_name); 10225 if (!hba->eh_wq) { 10226 dev_err(hba->dev, "%s: failed to create eh workqueue\n", 10227 __func__); 10228 err = -ENOMEM; 10229 goto out_disable; 10230 } 10231 INIT_WORK(&hba->eh_work, ufshcd_err_handler); 10232 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); 10233 10234 sema_init(&hba->host_sem, 1); 10235 10236 /* Initialize UIC command mutex */ 10237 mutex_init(&hba->uic_cmd_mutex); 10238 10239 /* Initialize mutex for device management commands */ 10240 mutex_init(&hba->dev_cmd.lock); 10241 10242 /* Initialize mutex for exception event control */ 10243 mutex_init(&hba->ee_ctrl_mutex); 10244 10245 mutex_init(&hba->wb_mutex); 10246 init_rwsem(&hba->clk_scaling_lock); 10247 10248 ufshcd_init_clk_gating(hba); 10249 10250 ufshcd_init_clk_scaling(hba); 10251 10252 /* 10253 * In order to avoid any spurious interrupt immediately after 10254 * registering UFS controller interrupt handler, clear any pending UFS 10255 * interrupt status and disable all the UFS interrupts. 10256 */ 10257 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), 10258 REG_INTERRUPT_STATUS); 10259 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); 10260 /* 10261 * Make sure that UFS interrupts are disabled and any pending interrupt 10262 * status is cleared before registering UFS interrupt handler. 10263 */ 10264 mb(); 10265 10266 /* IRQ registration */ 10267 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); 10268 if (err) { 10269 dev_err(hba->dev, "request irq failed\n"); 10270 goto out_disable; 10271 } else { 10272 hba->is_irq_enabled = true; 10273 } 10274 10275 if (!is_mcq_supported(hba)) { 10276 err = scsi_add_host(host, hba->dev); 10277 if (err) { 10278 dev_err(hba->dev, "scsi_add_host failed\n"); 10279 goto out_disable; 10280 } 10281 } 10282 10283 hba->tmf_tag_set = (struct blk_mq_tag_set) { 10284 .nr_hw_queues = 1, 10285 .queue_depth = hba->nutmrs, 10286 .ops = &ufshcd_tmf_ops, 10287 .flags = BLK_MQ_F_NO_SCHED, 10288 }; 10289 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); 10290 if (err < 0) 10291 goto out_remove_scsi_host; 10292 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); 10293 if (IS_ERR(hba->tmf_queue)) { 10294 err = PTR_ERR(hba->tmf_queue); 10295 goto free_tmf_tag_set; 10296 } 10297 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, 10298 sizeof(*hba->tmf_rqs), GFP_KERNEL); 10299 if (!hba->tmf_rqs) { 10300 err = -ENOMEM; 10301 goto free_tmf_queue; 10302 } 10303 10304 /* Reset the attached device */ 10305 ufshcd_device_reset(hba); 10306 10307 ufshcd_init_crypto(hba); 10308 10309 /* Host controller enable */ 10310 err = ufshcd_hba_enable(hba); 10311 if (err) { 10312 dev_err(hba->dev, "Host controller enable failed\n"); 10313 ufshcd_print_evt_hist(hba); 10314 ufshcd_print_host_state(hba); 10315 goto free_tmf_queue; 10316 } 10317 10318 /* 10319 * Set the default power management level for runtime and system PM. 10320 * Default power saving mode is to keep UFS link in Hibern8 state 10321 * and UFS device in sleep state. 10322 */ 10323 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( 10324 UFS_SLEEP_PWR_MODE, 10325 UIC_LINK_HIBERN8_STATE); 10326 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( 10327 UFS_SLEEP_PWR_MODE, 10328 UIC_LINK_HIBERN8_STATE); 10329 10330 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, 10331 ufshcd_rpm_dev_flush_recheck_work); 10332 10333 /* Set the default auto-hiberate idle timer value to 150 ms */ 10334 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { 10335 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | 10336 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); 10337 } 10338 10339 /* Hold auto suspend until async scan completes */ 10340 pm_runtime_get_sync(dev); 10341 atomic_set(&hba->scsi_block_reqs_cnt, 0); 10342 /* 10343 * We are assuming that device wasn't put in sleep/power-down 10344 * state exclusively during the boot stage before kernel. 10345 * This assumption helps avoid doing link startup twice during 10346 * ufshcd_probe_hba(). 10347 */ 10348 ufshcd_set_ufs_dev_active(hba); 10349 10350 /* Initialize devfreq */ 10351 if (ufshcd_is_clkscaling_supported(hba)) { 10352 memcpy(&hba->clk_scaling.saved_pwr_info, 10353 &hba->pwr_info, 10354 sizeof(struct ufs_pa_layer_attr)); 10355 hba->clk_scaling.is_allowed = true; 10356 10357 err = ufshcd_devfreq_init(hba); 10358 if (err) 10359 goto rpm_put_sync; 10360 10361 hba->clk_scaling.is_enabled = true; 10362 ufshcd_init_clk_scaling_sysfs(hba); 10363 } 10364 10365 async_schedule(ufshcd_async_scan, hba); 10366 ufs_sysfs_add_nodes(hba->dev); 10367 10368 device_enable_async_suspend(dev); 10369 return 0; 10370 10371 rpm_put_sync: 10372 pm_runtime_put_sync(dev); 10373 free_tmf_queue: 10374 blk_mq_destroy_queue(hba->tmf_queue); 10375 blk_put_queue(hba->tmf_queue); 10376 free_tmf_tag_set: 10377 blk_mq_free_tag_set(&hba->tmf_tag_set); 10378 out_remove_scsi_host: 10379 scsi_remove_host(hba->host); 10380 out_disable: 10381 hba->is_irq_enabled = false; 10382 ufshcd_hba_exit(hba); 10383 out_error: 10384 return err; 10385 } 10386 EXPORT_SYMBOL_GPL(ufshcd_init); 10387 10388 void ufshcd_resume_complete(struct device *dev) 10389 { 10390 struct ufs_hba *hba = dev_get_drvdata(dev); 10391 10392 if (hba->complete_put) { 10393 ufshcd_rpm_put(hba); 10394 hba->complete_put = false; 10395 } 10396 } 10397 EXPORT_SYMBOL_GPL(ufshcd_resume_complete); 10398 10399 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba) 10400 { 10401 struct device *dev = &hba->ufs_device_wlun->sdev_gendev; 10402 enum ufs_dev_pwr_mode dev_pwr_mode; 10403 enum uic_link_state link_state; 10404 unsigned long flags; 10405 bool res; 10406 10407 spin_lock_irqsave(&dev->power.lock, flags); 10408 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl); 10409 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl); 10410 res = pm_runtime_suspended(dev) && 10411 hba->curr_dev_pwr_mode == dev_pwr_mode && 10412 hba->uic_link_state == link_state && 10413 !hba->dev_info.b_rpm_dev_flush_capable; 10414 spin_unlock_irqrestore(&dev->power.lock, flags); 10415 10416 return res; 10417 } 10418 10419 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm) 10420 { 10421 struct ufs_hba *hba = dev_get_drvdata(dev); 10422 int ret; 10423 10424 /* 10425 * SCSI assumes that runtime-pm and system-pm for scsi drivers 10426 * are same. And it doesn't wake up the device for system-suspend 10427 * if it's runtime suspended. But ufs doesn't follow that. 10428 * Refer ufshcd_resume_complete() 10429 */ 10430 if (hba->ufs_device_wlun) { 10431 /* Prevent runtime suspend */ 10432 ufshcd_rpm_get_noresume(hba); 10433 /* 10434 * Check if already runtime suspended in same state as system 10435 * suspend would be. 10436 */ 10437 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) { 10438 /* RPM state is not ok for SPM, so runtime resume */ 10439 ret = ufshcd_rpm_resume(hba); 10440 if (ret < 0 && ret != -EACCES) { 10441 ufshcd_rpm_put(hba); 10442 return ret; 10443 } 10444 } 10445 hba->complete_put = true; 10446 } 10447 return 0; 10448 } 10449 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare); 10450 10451 int ufshcd_suspend_prepare(struct device *dev) 10452 { 10453 return __ufshcd_suspend_prepare(dev, true); 10454 } 10455 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare); 10456 10457 #ifdef CONFIG_PM_SLEEP 10458 static int ufshcd_wl_poweroff(struct device *dev) 10459 { 10460 struct scsi_device *sdev = to_scsi_device(dev); 10461 struct ufs_hba *hba = shost_priv(sdev->host); 10462 10463 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); 10464 return 0; 10465 } 10466 #endif 10467 10468 static int ufshcd_wl_probe(struct device *dev) 10469 { 10470 struct scsi_device *sdev = to_scsi_device(dev); 10471 10472 if (!is_device_wlun(sdev)) 10473 return -ENODEV; 10474 10475 blk_pm_runtime_init(sdev->request_queue, dev); 10476 pm_runtime_set_autosuspend_delay(dev, 0); 10477 pm_runtime_allow(dev); 10478 10479 return 0; 10480 } 10481 10482 static int ufshcd_wl_remove(struct device *dev) 10483 { 10484 pm_runtime_forbid(dev); 10485 return 0; 10486 } 10487 10488 static const struct dev_pm_ops ufshcd_wl_pm_ops = { 10489 #ifdef CONFIG_PM_SLEEP 10490 .suspend = ufshcd_wl_suspend, 10491 .resume = ufshcd_wl_resume, 10492 .freeze = ufshcd_wl_suspend, 10493 .thaw = ufshcd_wl_resume, 10494 .poweroff = ufshcd_wl_poweroff, 10495 .restore = ufshcd_wl_resume, 10496 #endif 10497 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL) 10498 }; 10499 10500 /* 10501 * ufs_dev_wlun_template - describes ufs device wlun 10502 * ufs-device wlun - used to send pm commands 10503 * All luns are consumers of ufs-device wlun. 10504 * 10505 * Currently, no sd driver is present for wluns. 10506 * Hence the no specific pm operations are performed. 10507 * With ufs design, SSU should be sent to ufs-device wlun. 10508 * Hence register a scsi driver for ufs wluns only. 10509 */ 10510 static struct scsi_driver ufs_dev_wlun_template = { 10511 .gendrv = { 10512 .name = "ufs_device_wlun", 10513 .owner = THIS_MODULE, 10514 .probe = ufshcd_wl_probe, 10515 .remove = ufshcd_wl_remove, 10516 .pm = &ufshcd_wl_pm_ops, 10517 .shutdown = ufshcd_wl_shutdown, 10518 }, 10519 }; 10520 10521 static int __init ufshcd_core_init(void) 10522 { 10523 int ret; 10524 10525 ufs_debugfs_init(); 10526 10527 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); 10528 if (ret) 10529 ufs_debugfs_exit(); 10530 return ret; 10531 } 10532 10533 static void __exit ufshcd_core_exit(void) 10534 { 10535 ufs_debugfs_exit(); 10536 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv); 10537 } 10538 10539 module_init(ufshcd_core_init); 10540 module_exit(ufshcd_core_exit); 10541 10542 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); 10543 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); 10544 MODULE_DESCRIPTION("Generic UFS host controller driver Core"); 10545 MODULE_SOFTDEP("pre: governor_simpleondemand"); 10546 MODULE_LICENSE("GPL"); 10547