1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Universal Flash Storage Host controller driver Core 4 * Copyright (C) 2011-2013 Samsung India Software Operations 5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 6 * 7 * Authors: 8 * Santosh Yaraganavi <santosh.sy@samsung.com> 9 * Vinayak Holikatti <h.vinayak@samsung.com> 10 */ 11 12 #include <linux/async.h> 13 #include <linux/devfreq.h> 14 #include <linux/nls.h> 15 #include <linux/of.h> 16 #include <linux/bitfield.h> 17 #include <linux/blk-pm.h> 18 #include <linux/blkdev.h> 19 #include <linux/clk.h> 20 #include <linux/delay.h> 21 #include <linux/interrupt.h> 22 #include <linux/module.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/sched/clock.h> 25 #include <scsi/scsi_cmnd.h> 26 #include <scsi/scsi_dbg.h> 27 #include <scsi/scsi_driver.h> 28 #include <scsi/scsi_eh.h> 29 #include "ufshcd-priv.h" 30 #include <ufs/ufs_quirks.h> 31 #include <ufs/unipro.h> 32 #include "ufs-sysfs.h" 33 #include "ufs-debugfs.h" 34 #include "ufs-fault-injection.h" 35 #include "ufs_bsg.h" 36 #include "ufshcd-crypto.h" 37 #include <asm/unaligned.h> 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/ufs.h> 41 42 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 43 UTP_TASK_REQ_COMPL |\ 44 UFSHCD_ERROR_MASK) 45 46 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\ 47 UFSHCD_ERROR_MASK |\ 48 MCQ_CQ_EVENT_STATUS) 49 50 51 /* UIC command timeout, unit: ms */ 52 #define UIC_CMD_TIMEOUT 500 53 54 /* NOP OUT retries waiting for NOP IN response */ 55 #define NOP_OUT_RETRIES 10 56 /* Timeout after 50 msecs if NOP OUT hangs without response */ 57 #define NOP_OUT_TIMEOUT 50 /* msecs */ 58 59 /* Query request retries */ 60 #define QUERY_REQ_RETRIES 3 61 /* Query request timeout */ 62 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ 63 64 /* Advanced RPMB request timeout */ 65 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */ 66 67 /* Task management command timeout */ 68 #define TM_CMD_TIMEOUT 100 /* msecs */ 69 70 /* maximum number of retries for a general UIC command */ 71 #define UFS_UIC_COMMAND_RETRIES 3 72 73 /* maximum number of link-startup retries */ 74 #define DME_LINKSTARTUP_RETRIES 3 75 76 /* maximum number of reset retries before giving up */ 77 #define MAX_HOST_RESET_RETRIES 5 78 79 /* Maximum number of error handler retries before giving up */ 80 #define MAX_ERR_HANDLER_RETRIES 5 81 82 /* Expose the flag value from utp_upiu_query.value */ 83 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF 84 85 /* Interrupt aggregation default timeout, unit: 40us */ 86 #define INT_AGGR_DEF_TO 0x02 87 88 /* default delay of autosuspend: 2000 ms */ 89 #define RPM_AUTOSUSPEND_DELAY_MS 2000 90 91 /* Default delay of RPM device flush delayed work */ 92 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000 93 94 /* Default value of wait time before gating device ref clock */ 95 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */ 96 97 /* Polling time to wait for fDeviceInit */ 98 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */ 99 100 /* UFSHC 4.0 compliant HC support this mode. */ 101 static bool use_mcq_mode = true; 102 103 static bool is_mcq_supported(struct ufs_hba *hba) 104 { 105 return hba->mcq_sup && use_mcq_mode; 106 } 107 108 module_param(use_mcq_mode, bool, 0644); 109 MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default"); 110 111 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ 112 ({ \ 113 int _ret; \ 114 if (_on) \ 115 _ret = ufshcd_enable_vreg(_dev, _vreg); \ 116 else \ 117 _ret = ufshcd_disable_vreg(_dev, _vreg); \ 118 _ret; \ 119 }) 120 121 #define ufshcd_hex_dump(prefix_str, buf, len) do { \ 122 size_t __len = (len); \ 123 print_hex_dump(KERN_ERR, prefix_str, \ 124 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\ 125 16, 4, buf, __len, false); \ 126 } while (0) 127 128 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, 129 const char *prefix) 130 { 131 u32 *regs; 132 size_t pos; 133 134 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ 135 return -EINVAL; 136 137 regs = kzalloc(len, GFP_ATOMIC); 138 if (!regs) 139 return -ENOMEM; 140 141 for (pos = 0; pos < len; pos += 4) { 142 if (offset == 0 && 143 pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER && 144 pos <= REG_UIC_ERROR_CODE_DME) 145 continue; 146 regs[pos / 4] = ufshcd_readl(hba, offset + pos); 147 } 148 149 ufshcd_hex_dump(prefix, regs, len); 150 kfree(regs); 151 152 return 0; 153 } 154 EXPORT_SYMBOL_GPL(ufshcd_dump_regs); 155 156 enum { 157 UFSHCD_MAX_CHANNEL = 0, 158 UFSHCD_MAX_ID = 1, 159 UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED, 160 UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED, 161 }; 162 163 static const char *const ufshcd_state_name[] = { 164 [UFSHCD_STATE_RESET] = "reset", 165 [UFSHCD_STATE_OPERATIONAL] = "operational", 166 [UFSHCD_STATE_ERROR] = "error", 167 [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal", 168 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal", 169 }; 170 171 /* UFSHCD error handling flags */ 172 enum { 173 UFSHCD_EH_IN_PROGRESS = (1 << 0), 174 }; 175 176 /* UFSHCD UIC layer error flags */ 177 enum { 178 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ 179 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */ 180 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */ 181 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */ 182 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */ 183 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */ 184 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */ 185 }; 186 187 #define ufshcd_set_eh_in_progress(h) \ 188 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS) 189 #define ufshcd_eh_in_progress(h) \ 190 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS) 191 #define ufshcd_clear_eh_in_progress(h) \ 192 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) 193 194 const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { 195 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, 196 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, 197 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, 198 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, 199 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, 200 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, 201 /* 202 * For DeepSleep, the link is first put in hibern8 and then off. 203 * Leaving the link in hibern8 is not supported. 204 */ 205 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE}, 206 }; 207 208 static inline enum ufs_dev_pwr_mode 209 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) 210 { 211 return ufs_pm_lvl_states[lvl].dev_state; 212 } 213 214 static inline enum uic_link_state 215 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) 216 { 217 return ufs_pm_lvl_states[lvl].link_state; 218 } 219 220 static inline enum ufs_pm_level 221 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, 222 enum uic_link_state link_state) 223 { 224 enum ufs_pm_level lvl; 225 226 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) { 227 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) && 228 (ufs_pm_lvl_states[lvl].link_state == link_state)) 229 return lvl; 230 } 231 232 /* if no match found, return the level 0 */ 233 return UFS_PM_LVL_0; 234 } 235 236 static const struct ufs_dev_quirk ufs_fixups[] = { 237 /* UFS cards deviations table */ 238 { .wmanufacturerid = UFS_VENDOR_MICRON, 239 .model = UFS_ANY_MODEL, 240 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, 241 { .wmanufacturerid = UFS_VENDOR_SAMSUNG, 242 .model = UFS_ANY_MODEL, 243 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | 244 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | 245 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS }, 246 { .wmanufacturerid = UFS_VENDOR_SKHYNIX, 247 .model = UFS_ANY_MODEL, 248 .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME }, 249 { .wmanufacturerid = UFS_VENDOR_SKHYNIX, 250 .model = "hB8aL1" /*H28U62301AMR*/, 251 .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME }, 252 { .wmanufacturerid = UFS_VENDOR_TOSHIBA, 253 .model = UFS_ANY_MODEL, 254 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, 255 { .wmanufacturerid = UFS_VENDOR_TOSHIBA, 256 .model = "THGLF2G9C8KBADG", 257 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, 258 { .wmanufacturerid = UFS_VENDOR_TOSHIBA, 259 .model = "THGLF2G9D8KBADG", 260 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, 261 {} 262 }; 263 264 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba); 265 static void ufshcd_async_scan(void *data, async_cookie_t cookie); 266 static int ufshcd_reset_and_restore(struct ufs_hba *hba); 267 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); 268 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); 269 static void ufshcd_hba_exit(struct ufs_hba *hba); 270 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params); 271 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); 272 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); 273 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); 274 static void ufshcd_resume_clkscaling(struct ufs_hba *hba); 275 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); 276 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba); 277 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); 278 static irqreturn_t ufshcd_intr(int irq, void *__hba); 279 static int ufshcd_change_power_mode(struct ufs_hba *hba, 280 struct ufs_pa_layer_attr *pwr_mode); 281 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); 282 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); 283 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 284 struct ufs_vreg *vreg); 285 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba, 286 bool enable); 287 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba); 288 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba); 289 290 static inline void ufshcd_enable_irq(struct ufs_hba *hba) 291 { 292 if (!hba->is_irq_enabled) { 293 enable_irq(hba->irq); 294 hba->is_irq_enabled = true; 295 } 296 } 297 298 static inline void ufshcd_disable_irq(struct ufs_hba *hba) 299 { 300 if (hba->is_irq_enabled) { 301 disable_irq(hba->irq); 302 hba->is_irq_enabled = false; 303 } 304 } 305 306 static void ufshcd_configure_wb(struct ufs_hba *hba) 307 { 308 if (!ufshcd_is_wb_allowed(hba)) 309 return; 310 311 ufshcd_wb_toggle(hba, true); 312 313 ufshcd_wb_toggle_buf_flush_during_h8(hba, true); 314 315 if (ufshcd_is_wb_buf_flush_allowed(hba)) 316 ufshcd_wb_toggle_buf_flush(hba, true); 317 } 318 319 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) 320 { 321 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) 322 scsi_unblock_requests(hba->host); 323 } 324 325 static void ufshcd_scsi_block_requests(struct ufs_hba *hba) 326 { 327 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) 328 scsi_block_requests(hba->host); 329 } 330 331 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, 332 enum ufs_trace_str_t str_t) 333 { 334 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; 335 struct utp_upiu_header *header; 336 337 if (!trace_ufshcd_upiu_enabled()) 338 return; 339 340 if (str_t == UFS_CMD_SEND) 341 header = &rq->header; 342 else 343 header = &hba->lrb[tag].ucd_rsp_ptr->header; 344 345 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb, 346 UFS_TSF_CDB); 347 } 348 349 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, 350 enum ufs_trace_str_t str_t, 351 struct utp_upiu_req *rq_rsp) 352 { 353 if (!trace_ufshcd_upiu_enabled()) 354 return; 355 356 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header, 357 &rq_rsp->qr, UFS_TSF_OSF); 358 } 359 360 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, 361 enum ufs_trace_str_t str_t) 362 { 363 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag]; 364 365 if (!trace_ufshcd_upiu_enabled()) 366 return; 367 368 if (str_t == UFS_TM_SEND) 369 trace_ufshcd_upiu(dev_name(hba->dev), str_t, 370 &descp->upiu_req.req_header, 371 &descp->upiu_req.input_param1, 372 UFS_TSF_TM_INPUT); 373 else 374 trace_ufshcd_upiu(dev_name(hba->dev), str_t, 375 &descp->upiu_rsp.rsp_header, 376 &descp->upiu_rsp.output_param1, 377 UFS_TSF_TM_OUTPUT); 378 } 379 380 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, 381 const struct uic_command *ucmd, 382 enum ufs_trace_str_t str_t) 383 { 384 u32 cmd; 385 386 if (!trace_ufshcd_uic_command_enabled()) 387 return; 388 389 if (str_t == UFS_CMD_SEND) 390 cmd = ucmd->command; 391 else 392 cmd = ufshcd_readl(hba, REG_UIC_COMMAND); 393 394 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd, 395 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), 396 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), 397 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); 398 } 399 400 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, 401 enum ufs_trace_str_t str_t) 402 { 403 u64 lba = 0; 404 u8 opcode = 0, group_id = 0; 405 u32 doorbell = 0; 406 u32 intr; 407 int hwq_id = -1; 408 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; 409 struct scsi_cmnd *cmd = lrbp->cmd; 410 struct request *rq = scsi_cmd_to_rq(cmd); 411 int transfer_len = -1; 412 413 if (!cmd) 414 return; 415 416 /* trace UPIU also */ 417 ufshcd_add_cmd_upiu_trace(hba, tag, str_t); 418 if (!trace_ufshcd_command_enabled()) 419 return; 420 421 opcode = cmd->cmnd[0]; 422 423 if (opcode == READ_10 || opcode == WRITE_10) { 424 /* 425 * Currently we only fully trace read(10) and write(10) commands 426 */ 427 transfer_len = 428 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len); 429 lba = scsi_get_lba(cmd); 430 if (opcode == WRITE_10) 431 group_id = lrbp->cmd->cmnd[6]; 432 } else if (opcode == UNMAP) { 433 /* 434 * The number of Bytes to be unmapped beginning with the lba. 435 */ 436 transfer_len = blk_rq_bytes(rq); 437 lba = scsi_get_lba(cmd); 438 } 439 440 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 441 442 if (is_mcq_enabled(hba)) { 443 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); 444 445 hwq_id = hwq->id; 446 } else { 447 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 448 } 449 trace_ufshcd_command(dev_name(hba->dev), str_t, tag, 450 doorbell, hwq_id, transfer_len, intr, lba, opcode, group_id); 451 } 452 453 static void ufshcd_print_clk_freqs(struct ufs_hba *hba) 454 { 455 struct ufs_clk_info *clki; 456 struct list_head *head = &hba->clk_list_head; 457 458 if (list_empty(head)) 459 return; 460 461 list_for_each_entry(clki, head, list) { 462 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq && 463 clki->max_freq) 464 dev_err(hba->dev, "clk: %s, rate: %u\n", 465 clki->name, clki->curr_freq); 466 } 467 } 468 469 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id, 470 const char *err_name) 471 { 472 int i; 473 bool found = false; 474 const struct ufs_event_hist *e; 475 476 if (id >= UFS_EVT_CNT) 477 return; 478 479 e = &hba->ufs_stats.event[id]; 480 481 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) { 482 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH; 483 484 if (e->tstamp[p] == 0) 485 continue; 486 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, 487 e->val[p], div_u64(e->tstamp[p], 1000)); 488 found = true; 489 } 490 491 if (!found) 492 dev_err(hba->dev, "No record of %s\n", err_name); 493 else 494 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt); 495 } 496 497 static void ufshcd_print_evt_hist(struct ufs_hba *hba) 498 { 499 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); 500 501 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err"); 502 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err"); 503 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err"); 504 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err"); 505 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err"); 506 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR, 507 "auto_hibern8_err"); 508 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err"); 509 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL, 510 "link_startup_fail"); 511 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail"); 512 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR, 513 "suspend_fail"); 514 ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail"); 515 ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR, 516 "wlun suspend_fail"); 517 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset"); 518 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset"); 519 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort"); 520 521 ufshcd_vops_dbg_register_dump(hba); 522 } 523 524 static 525 void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt) 526 { 527 const struct ufshcd_lrb *lrbp; 528 int prdt_length; 529 530 lrbp = &hba->lrb[tag]; 531 532 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", 533 tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000)); 534 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", 535 tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000)); 536 dev_err(hba->dev, 537 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n", 538 tag, (u64)lrbp->utrd_dma_addr); 539 540 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr, 541 sizeof(struct utp_transfer_req_desc)); 542 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, 543 (u64)lrbp->ucd_req_dma_addr); 544 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr, 545 sizeof(struct utp_upiu_req)); 546 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, 547 (u64)lrbp->ucd_rsp_dma_addr); 548 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr, 549 sizeof(struct utp_upiu_rsp)); 550 551 prdt_length = le16_to_cpu( 552 lrbp->utr_descriptor_ptr->prd_table_length); 553 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) 554 prdt_length /= ufshcd_sg_entry_size(hba); 555 556 dev_err(hba->dev, 557 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", 558 tag, prdt_length, 559 (u64)lrbp->ucd_prdt_dma_addr); 560 561 if (pr_prdt) 562 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, 563 ufshcd_sg_entry_size(hba) * prdt_length); 564 } 565 566 static bool ufshcd_print_tr_iter(struct request *req, void *priv) 567 { 568 struct scsi_device *sdev = req->q->queuedata; 569 struct Scsi_Host *shost = sdev->host; 570 struct ufs_hba *hba = shost_priv(shost); 571 572 ufshcd_print_tr(hba, req->tag, *(bool *)priv); 573 574 return true; 575 } 576 577 /** 578 * ufshcd_print_trs_all - print trs for all started requests. 579 * @hba: per-adapter instance. 580 * @pr_prdt: need to print prdt or not. 581 */ 582 static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt) 583 { 584 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt); 585 } 586 587 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) 588 { 589 int tag; 590 591 for_each_set_bit(tag, &bitmap, hba->nutmrs) { 592 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; 593 594 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); 595 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp)); 596 } 597 } 598 599 static void ufshcd_print_host_state(struct ufs_hba *hba) 600 { 601 const struct scsi_device *sdev_ufs = hba->ufs_device_wlun; 602 603 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); 604 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", 605 hba->outstanding_reqs, hba->outstanding_tasks); 606 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", 607 hba->saved_err, hba->saved_uic_err); 608 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", 609 hba->curr_dev_pwr_mode, hba->uic_link_state); 610 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", 611 hba->pm_op_in_progress, hba->is_sys_suspended); 612 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", 613 hba->auto_bkops_enabled, hba->host->host_self_blocked); 614 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); 615 dev_err(hba->dev, 616 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n", 617 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000), 618 hba->ufs_stats.hibern8_exit_cnt); 619 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", 620 div_u64(hba->ufs_stats.last_intr_ts, 1000), 621 hba->ufs_stats.last_intr_status); 622 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", 623 hba->eh_flags, hba->req_abort_count); 624 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", 625 hba->ufs_version, hba->capabilities, hba->caps); 626 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, 627 hba->dev_quirks); 628 if (sdev_ufs) 629 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n", 630 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev); 631 632 ufshcd_print_clk_freqs(hba); 633 } 634 635 /** 636 * ufshcd_print_pwr_info - print power params as saved in hba 637 * power info 638 * @hba: per-adapter instance 639 */ 640 static void ufshcd_print_pwr_info(struct ufs_hba *hba) 641 { 642 static const char * const names[] = { 643 "INVALID MODE", 644 "FAST MODE", 645 "SLOW_MODE", 646 "INVALID MODE", 647 "FASTAUTO_MODE", 648 "SLOWAUTO_MODE", 649 "INVALID MODE", 650 }; 651 652 /* 653 * Using dev_dbg to avoid messages during runtime PM to avoid 654 * never-ending cycles of messages written back to storage by user space 655 * causing runtime resume, causing more messages and so on. 656 */ 657 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", 658 __func__, 659 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, 660 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, 661 names[hba->pwr_info.pwr_rx], 662 names[hba->pwr_info.pwr_tx], 663 hba->pwr_info.hs_rate); 664 } 665 666 static void ufshcd_device_reset(struct ufs_hba *hba) 667 { 668 int err; 669 670 err = ufshcd_vops_device_reset(hba); 671 672 if (!err) { 673 ufshcd_set_ufs_dev_active(hba); 674 if (ufshcd_is_wb_allowed(hba)) { 675 hba->dev_info.wb_enabled = false; 676 hba->dev_info.wb_buf_flush_enabled = false; 677 } 678 } 679 if (err != -EOPNOTSUPP) 680 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); 681 } 682 683 void ufshcd_delay_us(unsigned long us, unsigned long tolerance) 684 { 685 if (!us) 686 return; 687 688 if (us < 10) 689 udelay(us); 690 else 691 usleep_range(us, us + tolerance); 692 } 693 EXPORT_SYMBOL_GPL(ufshcd_delay_us); 694 695 /** 696 * ufshcd_wait_for_register - wait for register value to change 697 * @hba: per-adapter interface 698 * @reg: mmio register offset 699 * @mask: mask to apply to the read register value 700 * @val: value to wait for 701 * @interval_us: polling interval in microseconds 702 * @timeout_ms: timeout in milliseconds 703 * 704 * Return: -ETIMEDOUT on error, zero on success. 705 */ 706 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 707 u32 val, unsigned long interval_us, 708 unsigned long timeout_ms) 709 { 710 int err = 0; 711 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); 712 713 /* ignore bits that we don't intend to wait on */ 714 val = val & mask; 715 716 while ((ufshcd_readl(hba, reg) & mask) != val) { 717 usleep_range(interval_us, interval_us + 50); 718 if (time_after(jiffies, timeout)) { 719 if ((ufshcd_readl(hba, reg) & mask) != val) 720 err = -ETIMEDOUT; 721 break; 722 } 723 } 724 725 return err; 726 } 727 728 /** 729 * ufshcd_get_intr_mask - Get the interrupt bit mask 730 * @hba: Pointer to adapter instance 731 * 732 * Return: interrupt bit mask per version 733 */ 734 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) 735 { 736 if (hba->ufs_version == ufshci_version(1, 0)) 737 return INTERRUPT_MASK_ALL_VER_10; 738 if (hba->ufs_version <= ufshci_version(2, 0)) 739 return INTERRUPT_MASK_ALL_VER_11; 740 741 return INTERRUPT_MASK_ALL_VER_21; 742 } 743 744 /** 745 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA 746 * @hba: Pointer to adapter instance 747 * 748 * Return: UFSHCI version supported by the controller 749 */ 750 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) 751 { 752 u32 ufshci_ver; 753 754 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) 755 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba); 756 else 757 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION); 758 759 /* 760 * UFSHCI v1.x uses a different version scheme, in order 761 * to allow the use of comparisons with the ufshci_version 762 * function, we convert it to the same scheme as ufs 2.0+. 763 */ 764 if (ufshci_ver & 0x00010000) 765 return ufshci_version(1, ufshci_ver & 0x00000100); 766 767 return ufshci_ver; 768 } 769 770 /** 771 * ufshcd_is_device_present - Check if any device connected to 772 * the host controller 773 * @hba: pointer to adapter instance 774 * 775 * Return: true if device present, false if no device detected 776 */ 777 static inline bool ufshcd_is_device_present(struct ufs_hba *hba) 778 { 779 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT; 780 } 781 782 /** 783 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status 784 * @lrbp: pointer to local command reference block 785 * @cqe: pointer to the completion queue entry 786 * 787 * This function is used to get the OCS field from UTRD 788 * 789 * Return: the OCS field in the UTRD. 790 */ 791 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp, 792 struct cq_entry *cqe) 793 { 794 if (cqe) 795 return le32_to_cpu(cqe->status) & MASK_OCS; 796 797 return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS; 798 } 799 800 /** 801 * ufshcd_utrl_clear() - Clear requests from the controller request list. 802 * @hba: per adapter instance 803 * @mask: mask with one bit set for each request to be cleared 804 */ 805 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask) 806 { 807 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) 808 mask = ~mask; 809 /* 810 * From the UFSHCI specification: "UTP Transfer Request List CLear 811 * Register (UTRLCLR): This field is bit significant. Each bit 812 * corresponds to a slot in the UTP Transfer Request List, where bit 0 813 * corresponds to request slot 0. A bit in this field is set to ‘0’ 814 * by host software to indicate to the host controller that a transfer 815 * request slot is cleared. The host controller 816 * shall free up any resources associated to the request slot 817 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The 818 * host software indicates no change to request slots by setting the 819 * associated bits in this field to ‘1’. Bits in this field shall only 820 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’." 821 */ 822 ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR); 823 } 824 825 /** 826 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register 827 * @hba: per adapter instance 828 * @pos: position of the bit to be cleared 829 */ 830 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) 831 { 832 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) 833 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); 834 else 835 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); 836 } 837 838 /** 839 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY 840 * @reg: Register value of host controller status 841 * 842 * Return: 0 on success; a positive value if failed. 843 */ 844 static inline int ufshcd_get_lists_status(u32 reg) 845 { 846 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); 847 } 848 849 /** 850 * ufshcd_get_uic_cmd_result - Get the UIC command result 851 * @hba: Pointer to adapter instance 852 * 853 * This function gets the result of UIC command completion 854 * 855 * Return: 0 on success; non-zero value on error. 856 */ 857 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) 858 { 859 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & 860 MASK_UIC_COMMAND_RESULT; 861 } 862 863 /** 864 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command 865 * @hba: Pointer to adapter instance 866 * 867 * This function gets UIC command argument3 868 * 869 * Return: 0 on success; non-zero value on error. 870 */ 871 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) 872 { 873 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); 874 } 875 876 /** 877 * ufshcd_get_req_rsp - returns the TR response transaction type 878 * @ucd_rsp_ptr: pointer to response UPIU 879 * 880 * Return: UPIU type. 881 */ 882 static inline enum upiu_response_transaction 883 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) 884 { 885 return ucd_rsp_ptr->header.transaction_code; 886 } 887 888 /** 889 * ufshcd_is_exception_event - Check if the device raised an exception event 890 * @ucd_rsp_ptr: pointer to response UPIU 891 * 892 * The function checks if the device raised an exception event indicated in 893 * the Device Information field of response UPIU. 894 * 895 * Return: true if exception is raised, false otherwise. 896 */ 897 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) 898 { 899 return ucd_rsp_ptr->header.device_information & 1; 900 } 901 902 /** 903 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. 904 * @hba: per adapter instance 905 */ 906 static inline void 907 ufshcd_reset_intr_aggr(struct ufs_hba *hba) 908 { 909 ufshcd_writel(hba, INT_AGGR_ENABLE | 910 INT_AGGR_COUNTER_AND_TIMER_RESET, 911 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 912 } 913 914 /** 915 * ufshcd_config_intr_aggr - Configure interrupt aggregation values. 916 * @hba: per adapter instance 917 * @cnt: Interrupt aggregation counter threshold 918 * @tmout: Interrupt aggregation timeout value 919 */ 920 static inline void 921 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) 922 { 923 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | 924 INT_AGGR_COUNTER_THLD_VAL(cnt) | 925 INT_AGGR_TIMEOUT_VAL(tmout), 926 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 927 } 928 929 /** 930 * ufshcd_disable_intr_aggr - Disables interrupt aggregation. 931 * @hba: per adapter instance 932 */ 933 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) 934 { 935 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 936 } 937 938 /** 939 * ufshcd_enable_run_stop_reg - Enable run-stop registers, 940 * When run-stop registers are set to 1, it indicates the 941 * host controller that it can process the requests 942 * @hba: per adapter instance 943 */ 944 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) 945 { 946 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, 947 REG_UTP_TASK_REQ_LIST_RUN_STOP); 948 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, 949 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); 950 } 951 952 /** 953 * ufshcd_hba_start - Start controller initialization sequence 954 * @hba: per adapter instance 955 */ 956 static inline void ufshcd_hba_start(struct ufs_hba *hba) 957 { 958 u32 val = CONTROLLER_ENABLE; 959 960 if (ufshcd_crypto_enable(hba)) 961 val |= CRYPTO_GENERAL_ENABLE; 962 963 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); 964 } 965 966 /** 967 * ufshcd_is_hba_active - Get controller state 968 * @hba: per adapter instance 969 * 970 * Return: true if and only if the controller is active. 971 */ 972 bool ufshcd_is_hba_active(struct ufs_hba *hba) 973 { 974 return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE; 975 } 976 EXPORT_SYMBOL_GPL(ufshcd_is_hba_active); 977 978 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) 979 { 980 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ 981 if (hba->ufs_version <= ufshci_version(1, 1)) 982 return UFS_UNIPRO_VER_1_41; 983 else 984 return UFS_UNIPRO_VER_1_6; 985 } 986 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver); 987 988 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) 989 { 990 /* 991 * If both host and device support UniPro ver1.6 or later, PA layer 992 * parameters tuning happens during link startup itself. 993 * 994 * We can manually tune PA layer parameters if either host or device 995 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning 996 * logic simple, we will only do manual tuning if local unipro version 997 * doesn't support ver1.6 or later. 998 */ 999 return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6; 1000 } 1001 1002 /** 1003 * ufshcd_set_clk_freq - set UFS controller clock frequencies 1004 * @hba: per adapter instance 1005 * @scale_up: If True, set max possible frequency othewise set low frequency 1006 * 1007 * Return: 0 if successful; < 0 upon failure. 1008 */ 1009 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) 1010 { 1011 int ret = 0; 1012 struct ufs_clk_info *clki; 1013 struct list_head *head = &hba->clk_list_head; 1014 1015 if (list_empty(head)) 1016 goto out; 1017 1018 list_for_each_entry(clki, head, list) { 1019 if (!IS_ERR_OR_NULL(clki->clk)) { 1020 if (scale_up && clki->max_freq) { 1021 if (clki->curr_freq == clki->max_freq) 1022 continue; 1023 1024 ret = clk_set_rate(clki->clk, clki->max_freq); 1025 if (ret) { 1026 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", 1027 __func__, clki->name, 1028 clki->max_freq, ret); 1029 break; 1030 } 1031 trace_ufshcd_clk_scaling(dev_name(hba->dev), 1032 "scaled up", clki->name, 1033 clki->curr_freq, 1034 clki->max_freq); 1035 1036 clki->curr_freq = clki->max_freq; 1037 1038 } else if (!scale_up && clki->min_freq) { 1039 if (clki->curr_freq == clki->min_freq) 1040 continue; 1041 1042 ret = clk_set_rate(clki->clk, clki->min_freq); 1043 if (ret) { 1044 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", 1045 __func__, clki->name, 1046 clki->min_freq, ret); 1047 break; 1048 } 1049 trace_ufshcd_clk_scaling(dev_name(hba->dev), 1050 "scaled down", clki->name, 1051 clki->curr_freq, 1052 clki->min_freq); 1053 clki->curr_freq = clki->min_freq; 1054 } 1055 } 1056 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, 1057 clki->name, clk_get_rate(clki->clk)); 1058 } 1059 1060 out: 1061 return ret; 1062 } 1063 1064 /** 1065 * ufshcd_scale_clks - scale up or scale down UFS controller clocks 1066 * @hba: per adapter instance 1067 * @scale_up: True if scaling up and false if scaling down 1068 * 1069 * Return: 0 if successful; < 0 upon failure. 1070 */ 1071 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) 1072 { 1073 int ret = 0; 1074 ktime_t start = ktime_get(); 1075 1076 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); 1077 if (ret) 1078 goto out; 1079 1080 ret = ufshcd_set_clk_freq(hba, scale_up); 1081 if (ret) 1082 goto out; 1083 1084 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); 1085 if (ret) 1086 ufshcd_set_clk_freq(hba, !scale_up); 1087 1088 out: 1089 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), 1090 (scale_up ? "up" : "down"), 1091 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 1092 return ret; 1093 } 1094 1095 /** 1096 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not 1097 * @hba: per adapter instance 1098 * @scale_up: True if scaling up and false if scaling down 1099 * 1100 * Return: true if scaling is required, false otherwise. 1101 */ 1102 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, 1103 bool scale_up) 1104 { 1105 struct ufs_clk_info *clki; 1106 struct list_head *head = &hba->clk_list_head; 1107 1108 if (list_empty(head)) 1109 return false; 1110 1111 list_for_each_entry(clki, head, list) { 1112 if (!IS_ERR_OR_NULL(clki->clk)) { 1113 if (scale_up && clki->max_freq) { 1114 if (clki->curr_freq == clki->max_freq) 1115 continue; 1116 return true; 1117 } else if (!scale_up && clki->min_freq) { 1118 if (clki->curr_freq == clki->min_freq) 1119 continue; 1120 return true; 1121 } 1122 } 1123 } 1124 1125 return false; 1126 } 1127 1128 /* 1129 * Determine the number of pending commands by counting the bits in the SCSI 1130 * device budget maps. This approach has been selected because a bit is set in 1131 * the budget map before scsi_host_queue_ready() checks the host_self_blocked 1132 * flag. The host_self_blocked flag can be modified by calling 1133 * scsi_block_requests() or scsi_unblock_requests(). 1134 */ 1135 static u32 ufshcd_pending_cmds(struct ufs_hba *hba) 1136 { 1137 const struct scsi_device *sdev; 1138 u32 pending = 0; 1139 1140 lockdep_assert_held(hba->host->host_lock); 1141 __shost_for_each_device(sdev, hba->host) 1142 pending += sbitmap_weight(&sdev->budget_map); 1143 1144 return pending; 1145 } 1146 1147 /* 1148 * Wait until all pending SCSI commands and TMFs have finished or the timeout 1149 * has expired. 1150 * 1151 * Return: 0 upon success; -EBUSY upon timeout. 1152 */ 1153 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, 1154 u64 wait_timeout_us) 1155 { 1156 unsigned long flags; 1157 int ret = 0; 1158 u32 tm_doorbell; 1159 u32 tr_pending; 1160 bool timeout = false, do_last_check = false; 1161 ktime_t start; 1162 1163 ufshcd_hold(hba); 1164 spin_lock_irqsave(hba->host->host_lock, flags); 1165 /* 1166 * Wait for all the outstanding tasks/transfer requests. 1167 * Verify by checking the doorbell registers are clear. 1168 */ 1169 start = ktime_get(); 1170 do { 1171 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { 1172 ret = -EBUSY; 1173 goto out; 1174 } 1175 1176 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); 1177 tr_pending = ufshcd_pending_cmds(hba); 1178 if (!tm_doorbell && !tr_pending) { 1179 timeout = false; 1180 break; 1181 } else if (do_last_check) { 1182 break; 1183 } 1184 1185 spin_unlock_irqrestore(hba->host->host_lock, flags); 1186 io_schedule_timeout(msecs_to_jiffies(20)); 1187 if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1188 wait_timeout_us) { 1189 timeout = true; 1190 /* 1191 * We might have scheduled out for long time so make 1192 * sure to check if doorbells are cleared by this time 1193 * or not. 1194 */ 1195 do_last_check = true; 1196 } 1197 spin_lock_irqsave(hba->host->host_lock, flags); 1198 } while (tm_doorbell || tr_pending); 1199 1200 if (timeout) { 1201 dev_err(hba->dev, 1202 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n", 1203 __func__, tm_doorbell, tr_pending); 1204 ret = -EBUSY; 1205 } 1206 out: 1207 spin_unlock_irqrestore(hba->host->host_lock, flags); 1208 ufshcd_release(hba); 1209 return ret; 1210 } 1211 1212 /** 1213 * ufshcd_scale_gear - scale up/down UFS gear 1214 * @hba: per adapter instance 1215 * @scale_up: True for scaling up gear and false for scaling down 1216 * 1217 * Return: 0 for success; -EBUSY if scaling can't happen at this time; 1218 * non-zero for any other errors. 1219 */ 1220 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) 1221 { 1222 int ret = 0; 1223 struct ufs_pa_layer_attr new_pwr_info; 1224 1225 if (scale_up) { 1226 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info, 1227 sizeof(struct ufs_pa_layer_attr)); 1228 } else { 1229 memcpy(&new_pwr_info, &hba->pwr_info, 1230 sizeof(struct ufs_pa_layer_attr)); 1231 1232 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear || 1233 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) { 1234 /* save the current power mode */ 1235 memcpy(&hba->clk_scaling.saved_pwr_info, 1236 &hba->pwr_info, 1237 sizeof(struct ufs_pa_layer_attr)); 1238 1239 /* scale down gear */ 1240 new_pwr_info.gear_tx = hba->clk_scaling.min_gear; 1241 new_pwr_info.gear_rx = hba->clk_scaling.min_gear; 1242 } 1243 } 1244 1245 /* check if the power mode needs to be changed or not? */ 1246 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); 1247 if (ret) 1248 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", 1249 __func__, ret, 1250 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, 1251 new_pwr_info.gear_tx, new_pwr_info.gear_rx); 1252 1253 return ret; 1254 } 1255 1256 /* 1257 * Wait until all pending SCSI commands and TMFs have finished or the timeout 1258 * has expired. 1259 * 1260 * Return: 0 upon success; -EBUSY upon timeout. 1261 */ 1262 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) 1263 { 1264 int ret = 0; 1265 /* 1266 * make sure that there are no outstanding requests when 1267 * clock scaling is in progress 1268 */ 1269 ufshcd_scsi_block_requests(hba); 1270 mutex_lock(&hba->wb_mutex); 1271 down_write(&hba->clk_scaling_lock); 1272 1273 if (!hba->clk_scaling.is_allowed || 1274 ufshcd_wait_for_doorbell_clr(hba, timeout_us)) { 1275 ret = -EBUSY; 1276 up_write(&hba->clk_scaling_lock); 1277 mutex_unlock(&hba->wb_mutex); 1278 ufshcd_scsi_unblock_requests(hba); 1279 goto out; 1280 } 1281 1282 /* let's not get into low power until clock scaling is completed */ 1283 ufshcd_hold(hba); 1284 1285 out: 1286 return ret; 1287 } 1288 1289 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up) 1290 { 1291 up_write(&hba->clk_scaling_lock); 1292 1293 /* Enable Write Booster if we have scaled up else disable it */ 1294 if (ufshcd_enable_wb_if_scaling_up(hba) && !err) 1295 ufshcd_wb_toggle(hba, scale_up); 1296 1297 mutex_unlock(&hba->wb_mutex); 1298 1299 ufshcd_scsi_unblock_requests(hba); 1300 ufshcd_release(hba); 1301 } 1302 1303 /** 1304 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear 1305 * @hba: per adapter instance 1306 * @scale_up: True for scaling up and false for scalin down 1307 * 1308 * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero 1309 * for any other errors. 1310 */ 1311 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) 1312 { 1313 int ret = 0; 1314 1315 ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC); 1316 if (ret) 1317 return ret; 1318 1319 /* scale down the gear before scaling down clocks */ 1320 if (!scale_up) { 1321 ret = ufshcd_scale_gear(hba, false); 1322 if (ret) 1323 goto out_unprepare; 1324 } 1325 1326 ret = ufshcd_scale_clks(hba, scale_up); 1327 if (ret) { 1328 if (!scale_up) 1329 ufshcd_scale_gear(hba, true); 1330 goto out_unprepare; 1331 } 1332 1333 /* scale up the gear after scaling up clocks */ 1334 if (scale_up) { 1335 ret = ufshcd_scale_gear(hba, true); 1336 if (ret) { 1337 ufshcd_scale_clks(hba, false); 1338 goto out_unprepare; 1339 } 1340 } 1341 1342 out_unprepare: 1343 ufshcd_clock_scaling_unprepare(hba, ret, scale_up); 1344 return ret; 1345 } 1346 1347 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work) 1348 { 1349 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1350 clk_scaling.suspend_work); 1351 unsigned long irq_flags; 1352 1353 spin_lock_irqsave(hba->host->host_lock, irq_flags); 1354 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { 1355 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1356 return; 1357 } 1358 hba->clk_scaling.is_suspended = true; 1359 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1360 1361 __ufshcd_suspend_clkscaling(hba); 1362 } 1363 1364 static void ufshcd_clk_scaling_resume_work(struct work_struct *work) 1365 { 1366 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1367 clk_scaling.resume_work); 1368 unsigned long irq_flags; 1369 1370 spin_lock_irqsave(hba->host->host_lock, irq_flags); 1371 if (!hba->clk_scaling.is_suspended) { 1372 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1373 return; 1374 } 1375 hba->clk_scaling.is_suspended = false; 1376 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1377 1378 devfreq_resume_device(hba->devfreq); 1379 } 1380 1381 static int ufshcd_devfreq_target(struct device *dev, 1382 unsigned long *freq, u32 flags) 1383 { 1384 int ret = 0; 1385 struct ufs_hba *hba = dev_get_drvdata(dev); 1386 ktime_t start; 1387 bool scale_up, sched_clk_scaling_suspend_work = false; 1388 struct list_head *clk_list = &hba->clk_list_head; 1389 struct ufs_clk_info *clki; 1390 unsigned long irq_flags; 1391 1392 if (!ufshcd_is_clkscaling_supported(hba)) 1393 return -EINVAL; 1394 1395 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); 1396 /* Override with the closest supported frequency */ 1397 *freq = (unsigned long) clk_round_rate(clki->clk, *freq); 1398 spin_lock_irqsave(hba->host->host_lock, irq_flags); 1399 if (ufshcd_eh_in_progress(hba)) { 1400 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1401 return 0; 1402 } 1403 1404 if (!hba->clk_scaling.active_reqs) 1405 sched_clk_scaling_suspend_work = true; 1406 1407 if (list_empty(clk_list)) { 1408 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1409 goto out; 1410 } 1411 1412 /* Decide based on the rounded-off frequency and update */ 1413 scale_up = *freq == clki->max_freq; 1414 if (!scale_up) 1415 *freq = clki->min_freq; 1416 /* Update the frequency */ 1417 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { 1418 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1419 ret = 0; 1420 goto out; /* no state change required */ 1421 } 1422 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1423 1424 start = ktime_get(); 1425 ret = ufshcd_devfreq_scale(hba, scale_up); 1426 1427 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), 1428 (scale_up ? "up" : "down"), 1429 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 1430 1431 out: 1432 if (sched_clk_scaling_suspend_work) 1433 queue_work(hba->clk_scaling.workq, 1434 &hba->clk_scaling.suspend_work); 1435 1436 return ret; 1437 } 1438 1439 static int ufshcd_devfreq_get_dev_status(struct device *dev, 1440 struct devfreq_dev_status *stat) 1441 { 1442 struct ufs_hba *hba = dev_get_drvdata(dev); 1443 struct ufs_clk_scaling *scaling = &hba->clk_scaling; 1444 unsigned long flags; 1445 struct list_head *clk_list = &hba->clk_list_head; 1446 struct ufs_clk_info *clki; 1447 ktime_t curr_t; 1448 1449 if (!ufshcd_is_clkscaling_supported(hba)) 1450 return -EINVAL; 1451 1452 memset(stat, 0, sizeof(*stat)); 1453 1454 spin_lock_irqsave(hba->host->host_lock, flags); 1455 curr_t = ktime_get(); 1456 if (!scaling->window_start_t) 1457 goto start_window; 1458 1459 clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1460 /* 1461 * If current frequency is 0, then the ondemand governor considers 1462 * there's no initial frequency set. And it always requests to set 1463 * to max. frequency. 1464 */ 1465 stat->current_frequency = clki->curr_freq; 1466 if (scaling->is_busy_started) 1467 scaling->tot_busy_t += ktime_us_delta(curr_t, 1468 scaling->busy_start_t); 1469 1470 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t); 1471 stat->busy_time = scaling->tot_busy_t; 1472 start_window: 1473 scaling->window_start_t = curr_t; 1474 scaling->tot_busy_t = 0; 1475 1476 if (scaling->active_reqs) { 1477 scaling->busy_start_t = curr_t; 1478 scaling->is_busy_started = true; 1479 } else { 1480 scaling->busy_start_t = 0; 1481 scaling->is_busy_started = false; 1482 } 1483 spin_unlock_irqrestore(hba->host->host_lock, flags); 1484 return 0; 1485 } 1486 1487 static int ufshcd_devfreq_init(struct ufs_hba *hba) 1488 { 1489 struct list_head *clk_list = &hba->clk_list_head; 1490 struct ufs_clk_info *clki; 1491 struct devfreq *devfreq; 1492 int ret; 1493 1494 /* Skip devfreq if we don't have any clocks in the list */ 1495 if (list_empty(clk_list)) 1496 return 0; 1497 1498 clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1499 dev_pm_opp_add(hba->dev, clki->min_freq, 0); 1500 dev_pm_opp_add(hba->dev, clki->max_freq, 0); 1501 1502 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, 1503 &hba->vps->ondemand_data); 1504 devfreq = devfreq_add_device(hba->dev, 1505 &hba->vps->devfreq_profile, 1506 DEVFREQ_GOV_SIMPLE_ONDEMAND, 1507 &hba->vps->ondemand_data); 1508 if (IS_ERR(devfreq)) { 1509 ret = PTR_ERR(devfreq); 1510 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); 1511 1512 dev_pm_opp_remove(hba->dev, clki->min_freq); 1513 dev_pm_opp_remove(hba->dev, clki->max_freq); 1514 return ret; 1515 } 1516 1517 hba->devfreq = devfreq; 1518 1519 return 0; 1520 } 1521 1522 static void ufshcd_devfreq_remove(struct ufs_hba *hba) 1523 { 1524 struct list_head *clk_list = &hba->clk_list_head; 1525 struct ufs_clk_info *clki; 1526 1527 if (!hba->devfreq) 1528 return; 1529 1530 devfreq_remove_device(hba->devfreq); 1531 hba->devfreq = NULL; 1532 1533 clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1534 dev_pm_opp_remove(hba->dev, clki->min_freq); 1535 dev_pm_opp_remove(hba->dev, clki->max_freq); 1536 } 1537 1538 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) 1539 { 1540 unsigned long flags; 1541 1542 devfreq_suspend_device(hba->devfreq); 1543 spin_lock_irqsave(hba->host->host_lock, flags); 1544 hba->clk_scaling.window_start_t = 0; 1545 spin_unlock_irqrestore(hba->host->host_lock, flags); 1546 } 1547 1548 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) 1549 { 1550 unsigned long flags; 1551 bool suspend = false; 1552 1553 cancel_work_sync(&hba->clk_scaling.suspend_work); 1554 cancel_work_sync(&hba->clk_scaling.resume_work); 1555 1556 spin_lock_irqsave(hba->host->host_lock, flags); 1557 if (!hba->clk_scaling.is_suspended) { 1558 suspend = true; 1559 hba->clk_scaling.is_suspended = true; 1560 } 1561 spin_unlock_irqrestore(hba->host->host_lock, flags); 1562 1563 if (suspend) 1564 __ufshcd_suspend_clkscaling(hba); 1565 } 1566 1567 static void ufshcd_resume_clkscaling(struct ufs_hba *hba) 1568 { 1569 unsigned long flags; 1570 bool resume = false; 1571 1572 spin_lock_irqsave(hba->host->host_lock, flags); 1573 if (hba->clk_scaling.is_suspended) { 1574 resume = true; 1575 hba->clk_scaling.is_suspended = false; 1576 } 1577 spin_unlock_irqrestore(hba->host->host_lock, flags); 1578 1579 if (resume) 1580 devfreq_resume_device(hba->devfreq); 1581 } 1582 1583 static ssize_t ufshcd_clkscale_enable_show(struct device *dev, 1584 struct device_attribute *attr, char *buf) 1585 { 1586 struct ufs_hba *hba = dev_get_drvdata(dev); 1587 1588 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled); 1589 } 1590 1591 static ssize_t ufshcd_clkscale_enable_store(struct device *dev, 1592 struct device_attribute *attr, const char *buf, size_t count) 1593 { 1594 struct ufs_hba *hba = dev_get_drvdata(dev); 1595 u32 value; 1596 int err = 0; 1597 1598 if (kstrtou32(buf, 0, &value)) 1599 return -EINVAL; 1600 1601 down(&hba->host_sem); 1602 if (!ufshcd_is_user_access_allowed(hba)) { 1603 err = -EBUSY; 1604 goto out; 1605 } 1606 1607 value = !!value; 1608 if (value == hba->clk_scaling.is_enabled) 1609 goto out; 1610 1611 ufshcd_rpm_get_sync(hba); 1612 ufshcd_hold(hba); 1613 1614 hba->clk_scaling.is_enabled = value; 1615 1616 if (value) { 1617 ufshcd_resume_clkscaling(hba); 1618 } else { 1619 ufshcd_suspend_clkscaling(hba); 1620 err = ufshcd_devfreq_scale(hba, true); 1621 if (err) 1622 dev_err(hba->dev, "%s: failed to scale clocks up %d\n", 1623 __func__, err); 1624 } 1625 1626 ufshcd_release(hba); 1627 ufshcd_rpm_put_sync(hba); 1628 out: 1629 up(&hba->host_sem); 1630 return err ? err : count; 1631 } 1632 1633 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba) 1634 { 1635 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; 1636 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; 1637 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); 1638 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; 1639 hba->clk_scaling.enable_attr.attr.mode = 0644; 1640 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) 1641 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); 1642 } 1643 1644 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) 1645 { 1646 if (hba->clk_scaling.enable_attr.attr.name) 1647 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); 1648 } 1649 1650 static void ufshcd_init_clk_scaling(struct ufs_hba *hba) 1651 { 1652 char wq_name[sizeof("ufs_clkscaling_00")]; 1653 1654 if (!ufshcd_is_clkscaling_supported(hba)) 1655 return; 1656 1657 if (!hba->clk_scaling.min_gear) 1658 hba->clk_scaling.min_gear = UFS_HS_G1; 1659 1660 INIT_WORK(&hba->clk_scaling.suspend_work, 1661 ufshcd_clk_scaling_suspend_work); 1662 INIT_WORK(&hba->clk_scaling.resume_work, 1663 ufshcd_clk_scaling_resume_work); 1664 1665 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", 1666 hba->host->host_no); 1667 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); 1668 1669 hba->clk_scaling.is_initialized = true; 1670 } 1671 1672 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) 1673 { 1674 if (!hba->clk_scaling.is_initialized) 1675 return; 1676 1677 ufshcd_remove_clk_scaling_sysfs(hba); 1678 destroy_workqueue(hba->clk_scaling.workq); 1679 ufshcd_devfreq_remove(hba); 1680 hba->clk_scaling.is_initialized = false; 1681 } 1682 1683 static void ufshcd_ungate_work(struct work_struct *work) 1684 { 1685 int ret; 1686 unsigned long flags; 1687 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1688 clk_gating.ungate_work); 1689 1690 cancel_delayed_work_sync(&hba->clk_gating.gate_work); 1691 1692 spin_lock_irqsave(hba->host->host_lock, flags); 1693 if (hba->clk_gating.state == CLKS_ON) { 1694 spin_unlock_irqrestore(hba->host->host_lock, flags); 1695 return; 1696 } 1697 1698 spin_unlock_irqrestore(hba->host->host_lock, flags); 1699 ufshcd_hba_vreg_set_hpm(hba); 1700 ufshcd_setup_clocks(hba, true); 1701 1702 ufshcd_enable_irq(hba); 1703 1704 /* Exit from hibern8 */ 1705 if (ufshcd_can_hibern8_during_gating(hba)) { 1706 /* Prevent gating in this path */ 1707 hba->clk_gating.is_suspended = true; 1708 if (ufshcd_is_link_hibern8(hba)) { 1709 ret = ufshcd_uic_hibern8_exit(hba); 1710 if (ret) 1711 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", 1712 __func__, ret); 1713 else 1714 ufshcd_set_link_active(hba); 1715 } 1716 hba->clk_gating.is_suspended = false; 1717 } 1718 } 1719 1720 /** 1721 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. 1722 * Also, exit from hibern8 mode and set the link as active. 1723 * @hba: per adapter instance 1724 */ 1725 void ufshcd_hold(struct ufs_hba *hba) 1726 { 1727 bool flush_result; 1728 unsigned long flags; 1729 1730 if (!ufshcd_is_clkgating_allowed(hba) || 1731 !hba->clk_gating.is_initialized) 1732 return; 1733 spin_lock_irqsave(hba->host->host_lock, flags); 1734 hba->clk_gating.active_reqs++; 1735 1736 start: 1737 switch (hba->clk_gating.state) { 1738 case CLKS_ON: 1739 /* 1740 * Wait for the ungate work to complete if in progress. 1741 * Though the clocks may be in ON state, the link could 1742 * still be in hibner8 state if hibern8 is allowed 1743 * during clock gating. 1744 * Make sure we exit hibern8 state also in addition to 1745 * clocks being ON. 1746 */ 1747 if (ufshcd_can_hibern8_during_gating(hba) && 1748 ufshcd_is_link_hibern8(hba)) { 1749 spin_unlock_irqrestore(hba->host->host_lock, flags); 1750 flush_result = flush_work(&hba->clk_gating.ungate_work); 1751 if (hba->clk_gating.is_suspended && !flush_result) 1752 return; 1753 spin_lock_irqsave(hba->host->host_lock, flags); 1754 goto start; 1755 } 1756 break; 1757 case REQ_CLKS_OFF: 1758 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { 1759 hba->clk_gating.state = CLKS_ON; 1760 trace_ufshcd_clk_gating(dev_name(hba->dev), 1761 hba->clk_gating.state); 1762 break; 1763 } 1764 /* 1765 * If we are here, it means gating work is either done or 1766 * currently running. Hence, fall through to cancel gating 1767 * work and to enable clocks. 1768 */ 1769 fallthrough; 1770 case CLKS_OFF: 1771 hba->clk_gating.state = REQ_CLKS_ON; 1772 trace_ufshcd_clk_gating(dev_name(hba->dev), 1773 hba->clk_gating.state); 1774 queue_work(hba->clk_gating.clk_gating_workq, 1775 &hba->clk_gating.ungate_work); 1776 /* 1777 * fall through to check if we should wait for this 1778 * work to be done or not. 1779 */ 1780 fallthrough; 1781 case REQ_CLKS_ON: 1782 spin_unlock_irqrestore(hba->host->host_lock, flags); 1783 flush_work(&hba->clk_gating.ungate_work); 1784 /* Make sure state is CLKS_ON before returning */ 1785 spin_lock_irqsave(hba->host->host_lock, flags); 1786 goto start; 1787 default: 1788 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", 1789 __func__, hba->clk_gating.state); 1790 break; 1791 } 1792 spin_unlock_irqrestore(hba->host->host_lock, flags); 1793 } 1794 EXPORT_SYMBOL_GPL(ufshcd_hold); 1795 1796 static void ufshcd_gate_work(struct work_struct *work) 1797 { 1798 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1799 clk_gating.gate_work.work); 1800 unsigned long flags; 1801 int ret; 1802 1803 spin_lock_irqsave(hba->host->host_lock, flags); 1804 /* 1805 * In case you are here to cancel this work the gating state 1806 * would be marked as REQ_CLKS_ON. In this case save time by 1807 * skipping the gating work and exit after changing the clock 1808 * state to CLKS_ON. 1809 */ 1810 if (hba->clk_gating.is_suspended || 1811 (hba->clk_gating.state != REQ_CLKS_OFF)) { 1812 hba->clk_gating.state = CLKS_ON; 1813 trace_ufshcd_clk_gating(dev_name(hba->dev), 1814 hba->clk_gating.state); 1815 goto rel_lock; 1816 } 1817 1818 if (hba->clk_gating.active_reqs 1819 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL 1820 || hba->outstanding_reqs || hba->outstanding_tasks 1821 || hba->active_uic_cmd || hba->uic_async_done) 1822 goto rel_lock; 1823 1824 spin_unlock_irqrestore(hba->host->host_lock, flags); 1825 1826 /* put the link into hibern8 mode before turning off clocks */ 1827 if (ufshcd_can_hibern8_during_gating(hba)) { 1828 ret = ufshcd_uic_hibern8_enter(hba); 1829 if (ret) { 1830 hba->clk_gating.state = CLKS_ON; 1831 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", 1832 __func__, ret); 1833 trace_ufshcd_clk_gating(dev_name(hba->dev), 1834 hba->clk_gating.state); 1835 goto out; 1836 } 1837 ufshcd_set_link_hibern8(hba); 1838 } 1839 1840 ufshcd_disable_irq(hba); 1841 1842 ufshcd_setup_clocks(hba, false); 1843 1844 /* Put the host controller in low power mode if possible */ 1845 ufshcd_hba_vreg_set_lpm(hba); 1846 /* 1847 * In case you are here to cancel this work the gating state 1848 * would be marked as REQ_CLKS_ON. In this case keep the state 1849 * as REQ_CLKS_ON which would anyway imply that clocks are off 1850 * and a request to turn them on is pending. By doing this way, 1851 * we keep the state machine in tact and this would ultimately 1852 * prevent from doing cancel work multiple times when there are 1853 * new requests arriving before the current cancel work is done. 1854 */ 1855 spin_lock_irqsave(hba->host->host_lock, flags); 1856 if (hba->clk_gating.state == REQ_CLKS_OFF) { 1857 hba->clk_gating.state = CLKS_OFF; 1858 trace_ufshcd_clk_gating(dev_name(hba->dev), 1859 hba->clk_gating.state); 1860 } 1861 rel_lock: 1862 spin_unlock_irqrestore(hba->host->host_lock, flags); 1863 out: 1864 return; 1865 } 1866 1867 /* host lock must be held before calling this variant */ 1868 static void __ufshcd_release(struct ufs_hba *hba) 1869 { 1870 if (!ufshcd_is_clkgating_allowed(hba)) 1871 return; 1872 1873 hba->clk_gating.active_reqs--; 1874 1875 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || 1876 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || 1877 hba->outstanding_tasks || !hba->clk_gating.is_initialized || 1878 hba->active_uic_cmd || hba->uic_async_done || 1879 hba->clk_gating.state == CLKS_OFF) 1880 return; 1881 1882 hba->clk_gating.state = REQ_CLKS_OFF; 1883 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); 1884 queue_delayed_work(hba->clk_gating.clk_gating_workq, 1885 &hba->clk_gating.gate_work, 1886 msecs_to_jiffies(hba->clk_gating.delay_ms)); 1887 } 1888 1889 void ufshcd_release(struct ufs_hba *hba) 1890 { 1891 unsigned long flags; 1892 1893 spin_lock_irqsave(hba->host->host_lock, flags); 1894 __ufshcd_release(hba); 1895 spin_unlock_irqrestore(hba->host->host_lock, flags); 1896 } 1897 EXPORT_SYMBOL_GPL(ufshcd_release); 1898 1899 static ssize_t ufshcd_clkgate_delay_show(struct device *dev, 1900 struct device_attribute *attr, char *buf) 1901 { 1902 struct ufs_hba *hba = dev_get_drvdata(dev); 1903 1904 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms); 1905 } 1906 1907 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value) 1908 { 1909 struct ufs_hba *hba = dev_get_drvdata(dev); 1910 unsigned long flags; 1911 1912 spin_lock_irqsave(hba->host->host_lock, flags); 1913 hba->clk_gating.delay_ms = value; 1914 spin_unlock_irqrestore(hba->host->host_lock, flags); 1915 } 1916 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set); 1917 1918 static ssize_t ufshcd_clkgate_delay_store(struct device *dev, 1919 struct device_attribute *attr, const char *buf, size_t count) 1920 { 1921 unsigned long value; 1922 1923 if (kstrtoul(buf, 0, &value)) 1924 return -EINVAL; 1925 1926 ufshcd_clkgate_delay_set(dev, value); 1927 return count; 1928 } 1929 1930 static ssize_t ufshcd_clkgate_enable_show(struct device *dev, 1931 struct device_attribute *attr, char *buf) 1932 { 1933 struct ufs_hba *hba = dev_get_drvdata(dev); 1934 1935 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled); 1936 } 1937 1938 static ssize_t ufshcd_clkgate_enable_store(struct device *dev, 1939 struct device_attribute *attr, const char *buf, size_t count) 1940 { 1941 struct ufs_hba *hba = dev_get_drvdata(dev); 1942 unsigned long flags; 1943 u32 value; 1944 1945 if (kstrtou32(buf, 0, &value)) 1946 return -EINVAL; 1947 1948 value = !!value; 1949 1950 spin_lock_irqsave(hba->host->host_lock, flags); 1951 if (value == hba->clk_gating.is_enabled) 1952 goto out; 1953 1954 if (value) 1955 __ufshcd_release(hba); 1956 else 1957 hba->clk_gating.active_reqs++; 1958 1959 hba->clk_gating.is_enabled = value; 1960 out: 1961 spin_unlock_irqrestore(hba->host->host_lock, flags); 1962 return count; 1963 } 1964 1965 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba) 1966 { 1967 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; 1968 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; 1969 sysfs_attr_init(&hba->clk_gating.delay_attr.attr); 1970 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; 1971 hba->clk_gating.delay_attr.attr.mode = 0644; 1972 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) 1973 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); 1974 1975 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; 1976 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; 1977 sysfs_attr_init(&hba->clk_gating.enable_attr.attr); 1978 hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; 1979 hba->clk_gating.enable_attr.attr.mode = 0644; 1980 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) 1981 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); 1982 } 1983 1984 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) 1985 { 1986 if (hba->clk_gating.delay_attr.attr.name) 1987 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); 1988 if (hba->clk_gating.enable_attr.attr.name) 1989 device_remove_file(hba->dev, &hba->clk_gating.enable_attr); 1990 } 1991 1992 static void ufshcd_init_clk_gating(struct ufs_hba *hba) 1993 { 1994 char wq_name[sizeof("ufs_clk_gating_00")]; 1995 1996 if (!ufshcd_is_clkgating_allowed(hba)) 1997 return; 1998 1999 hba->clk_gating.state = CLKS_ON; 2000 2001 hba->clk_gating.delay_ms = 150; 2002 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); 2003 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); 2004 2005 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d", 2006 hba->host->host_no); 2007 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, 2008 WQ_MEM_RECLAIM | WQ_HIGHPRI); 2009 2010 ufshcd_init_clk_gating_sysfs(hba); 2011 2012 hba->clk_gating.is_enabled = true; 2013 hba->clk_gating.is_initialized = true; 2014 } 2015 2016 static void ufshcd_exit_clk_gating(struct ufs_hba *hba) 2017 { 2018 if (!hba->clk_gating.is_initialized) 2019 return; 2020 2021 ufshcd_remove_clk_gating_sysfs(hba); 2022 2023 /* Ungate the clock if necessary. */ 2024 ufshcd_hold(hba); 2025 hba->clk_gating.is_initialized = false; 2026 ufshcd_release(hba); 2027 2028 destroy_workqueue(hba->clk_gating.clk_gating_workq); 2029 } 2030 2031 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) 2032 { 2033 bool queue_resume_work = false; 2034 ktime_t curr_t = ktime_get(); 2035 unsigned long flags; 2036 2037 if (!ufshcd_is_clkscaling_supported(hba)) 2038 return; 2039 2040 spin_lock_irqsave(hba->host->host_lock, flags); 2041 if (!hba->clk_scaling.active_reqs++) 2042 queue_resume_work = true; 2043 2044 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { 2045 spin_unlock_irqrestore(hba->host->host_lock, flags); 2046 return; 2047 } 2048 2049 if (queue_resume_work) 2050 queue_work(hba->clk_scaling.workq, 2051 &hba->clk_scaling.resume_work); 2052 2053 if (!hba->clk_scaling.window_start_t) { 2054 hba->clk_scaling.window_start_t = curr_t; 2055 hba->clk_scaling.tot_busy_t = 0; 2056 hba->clk_scaling.is_busy_started = false; 2057 } 2058 2059 if (!hba->clk_scaling.is_busy_started) { 2060 hba->clk_scaling.busy_start_t = curr_t; 2061 hba->clk_scaling.is_busy_started = true; 2062 } 2063 spin_unlock_irqrestore(hba->host->host_lock, flags); 2064 } 2065 2066 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) 2067 { 2068 struct ufs_clk_scaling *scaling = &hba->clk_scaling; 2069 unsigned long flags; 2070 2071 if (!ufshcd_is_clkscaling_supported(hba)) 2072 return; 2073 2074 spin_lock_irqsave(hba->host->host_lock, flags); 2075 hba->clk_scaling.active_reqs--; 2076 if (!scaling->active_reqs && scaling->is_busy_started) { 2077 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), 2078 scaling->busy_start_t)); 2079 scaling->busy_start_t = 0; 2080 scaling->is_busy_started = false; 2081 } 2082 spin_unlock_irqrestore(hba->host->host_lock, flags); 2083 } 2084 2085 static inline int ufshcd_monitor_opcode2dir(u8 opcode) 2086 { 2087 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16) 2088 return READ; 2089 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16) 2090 return WRITE; 2091 else 2092 return -EINVAL; 2093 } 2094 2095 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba, 2096 struct ufshcd_lrb *lrbp) 2097 { 2098 const struct ufs_hba_monitor *m = &hba->monitor; 2099 2100 return (m->enabled && lrbp && lrbp->cmd && 2101 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) && 2102 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp)); 2103 } 2104 2105 static void ufshcd_start_monitor(struct ufs_hba *hba, 2106 const struct ufshcd_lrb *lrbp) 2107 { 2108 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); 2109 unsigned long flags; 2110 2111 spin_lock_irqsave(hba->host->host_lock, flags); 2112 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) 2113 hba->monitor.busy_start_ts[dir] = ktime_get(); 2114 spin_unlock_irqrestore(hba->host->host_lock, flags); 2115 } 2116 2117 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp) 2118 { 2119 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); 2120 unsigned long flags; 2121 2122 spin_lock_irqsave(hba->host->host_lock, flags); 2123 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { 2124 const struct request *req = scsi_cmd_to_rq(lrbp->cmd); 2125 struct ufs_hba_monitor *m = &hba->monitor; 2126 ktime_t now, inc, lat; 2127 2128 now = lrbp->compl_time_stamp; 2129 inc = ktime_sub(now, m->busy_start_ts[dir]); 2130 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc); 2131 m->nr_sec_rw[dir] += blk_rq_sectors(req); 2132 2133 /* Update latencies */ 2134 m->nr_req[dir]++; 2135 lat = ktime_sub(now, lrbp->issue_time_stamp); 2136 m->lat_sum[dir] += lat; 2137 if (m->lat_max[dir] < lat || !m->lat_max[dir]) 2138 m->lat_max[dir] = lat; 2139 if (m->lat_min[dir] > lat || !m->lat_min[dir]) 2140 m->lat_min[dir] = lat; 2141 2142 m->nr_queued[dir]--; 2143 /* Push forward the busy start of monitor */ 2144 m->busy_start_ts[dir] = now; 2145 } 2146 spin_unlock_irqrestore(hba->host->host_lock, flags); 2147 } 2148 2149 /** 2150 * ufshcd_send_command - Send SCSI or device management commands 2151 * @hba: per adapter instance 2152 * @task_tag: Task tag of the command 2153 * @hwq: pointer to hardware queue instance 2154 */ 2155 static inline 2156 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag, 2157 struct ufs_hw_queue *hwq) 2158 { 2159 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; 2160 unsigned long flags; 2161 2162 lrbp->issue_time_stamp = ktime_get(); 2163 lrbp->issue_time_stamp_local_clock = local_clock(); 2164 lrbp->compl_time_stamp = ktime_set(0, 0); 2165 lrbp->compl_time_stamp_local_clock = 0; 2166 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND); 2167 ufshcd_clk_scaling_start_busy(hba); 2168 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) 2169 ufshcd_start_monitor(hba, lrbp); 2170 2171 if (is_mcq_enabled(hba)) { 2172 int utrd_size = sizeof(struct utp_transfer_req_desc); 2173 struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr; 2174 struct utp_transfer_req_desc *dest = hwq->sqe_base_addr + hwq->sq_tail_slot; 2175 2176 spin_lock(&hwq->sq_lock); 2177 memcpy(dest, src, utrd_size); 2178 ufshcd_inc_sq_tail(hwq); 2179 spin_unlock(&hwq->sq_lock); 2180 } else { 2181 spin_lock_irqsave(&hba->outstanding_lock, flags); 2182 if (hba->vops && hba->vops->setup_xfer_req) 2183 hba->vops->setup_xfer_req(hba, lrbp->task_tag, 2184 !!lrbp->cmd); 2185 __set_bit(lrbp->task_tag, &hba->outstanding_reqs); 2186 ufshcd_writel(hba, 1 << lrbp->task_tag, 2187 REG_UTP_TRANSFER_REQ_DOOR_BELL); 2188 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 2189 } 2190 } 2191 2192 /** 2193 * ufshcd_copy_sense_data - Copy sense data in case of check condition 2194 * @lrbp: pointer to local reference block 2195 */ 2196 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) 2197 { 2198 u8 *const sense_buffer = lrbp->cmd->sense_buffer; 2199 u16 resp_len; 2200 int len; 2201 2202 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header.data_segment_length); 2203 if (sense_buffer && resp_len) { 2204 int len_to_copy; 2205 2206 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); 2207 len_to_copy = min_t(int, UFS_SENSE_SIZE, len); 2208 2209 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data, 2210 len_to_copy); 2211 } 2212 } 2213 2214 /** 2215 * ufshcd_copy_query_response() - Copy the Query Response and the data 2216 * descriptor 2217 * @hba: per adapter instance 2218 * @lrbp: pointer to local reference block 2219 * 2220 * Return: 0 upon success; < 0 upon failure. 2221 */ 2222 static 2223 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 2224 { 2225 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; 2226 2227 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); 2228 2229 /* Get the descriptor */ 2230 if (hba->dev_cmd.query.descriptor && 2231 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { 2232 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + 2233 GENERAL_UPIU_REQUEST_SIZE; 2234 u16 resp_len; 2235 u16 buf_len; 2236 2237 /* data segment length */ 2238 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header 2239 .data_segment_length); 2240 buf_len = be16_to_cpu( 2241 hba->dev_cmd.query.request.upiu_req.length); 2242 if (likely(buf_len >= resp_len)) { 2243 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); 2244 } else { 2245 dev_warn(hba->dev, 2246 "%s: rsp size %d is bigger than buffer size %d", 2247 __func__, resp_len, buf_len); 2248 return -EINVAL; 2249 } 2250 } 2251 2252 return 0; 2253 } 2254 2255 /** 2256 * ufshcd_hba_capabilities - Read controller capabilities 2257 * @hba: per adapter instance 2258 * 2259 * Return: 0 on success, negative on error. 2260 */ 2261 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) 2262 { 2263 int err; 2264 2265 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); 2266 if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) 2267 hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; 2268 2269 /* nutrs and nutmrs are 0 based values */ 2270 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; 2271 hba->nutmrs = 2272 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; 2273 hba->reserved_slot = hba->nutrs - 1; 2274 2275 /* Read crypto capabilities */ 2276 err = ufshcd_hba_init_crypto_capabilities(hba); 2277 if (err) { 2278 dev_err(hba->dev, "crypto setup failed\n"); 2279 return err; 2280 } 2281 2282 hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities); 2283 if (!hba->mcq_sup) 2284 return 0; 2285 2286 hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP); 2287 hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT, 2288 hba->mcq_capabilities); 2289 2290 return 0; 2291 } 2292 2293 /** 2294 * ufshcd_ready_for_uic_cmd - Check if controller is ready 2295 * to accept UIC commands 2296 * @hba: per adapter instance 2297 * 2298 * Return: true on success, else false. 2299 */ 2300 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) 2301 { 2302 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY; 2303 } 2304 2305 /** 2306 * ufshcd_get_upmcrs - Get the power mode change request status 2307 * @hba: Pointer to adapter instance 2308 * 2309 * This function gets the UPMCRS field of HCS register 2310 * 2311 * Return: value of UPMCRS field. 2312 */ 2313 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) 2314 { 2315 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; 2316 } 2317 2318 /** 2319 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer 2320 * @hba: per adapter instance 2321 * @uic_cmd: UIC command 2322 */ 2323 static inline void 2324 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 2325 { 2326 lockdep_assert_held(&hba->uic_cmd_mutex); 2327 2328 WARN_ON(hba->active_uic_cmd); 2329 2330 hba->active_uic_cmd = uic_cmd; 2331 2332 /* Write Args */ 2333 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); 2334 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); 2335 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); 2336 2337 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND); 2338 2339 /* Write UIC Cmd */ 2340 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, 2341 REG_UIC_COMMAND); 2342 } 2343 2344 /** 2345 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command 2346 * @hba: per adapter instance 2347 * @uic_cmd: UIC command 2348 * 2349 * Return: 0 only if success. 2350 */ 2351 static int 2352 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 2353 { 2354 int ret; 2355 unsigned long flags; 2356 2357 lockdep_assert_held(&hba->uic_cmd_mutex); 2358 2359 if (wait_for_completion_timeout(&uic_cmd->done, 2360 msecs_to_jiffies(UIC_CMD_TIMEOUT))) { 2361 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; 2362 } else { 2363 ret = -ETIMEDOUT; 2364 dev_err(hba->dev, 2365 "uic cmd 0x%x with arg3 0x%x completion timeout\n", 2366 uic_cmd->command, uic_cmd->argument3); 2367 2368 if (!uic_cmd->cmd_active) { 2369 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n", 2370 __func__); 2371 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; 2372 } 2373 } 2374 2375 spin_lock_irqsave(hba->host->host_lock, flags); 2376 hba->active_uic_cmd = NULL; 2377 spin_unlock_irqrestore(hba->host->host_lock, flags); 2378 2379 return ret; 2380 } 2381 2382 /** 2383 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result 2384 * @hba: per adapter instance 2385 * @uic_cmd: UIC command 2386 * @completion: initialize the completion only if this is set to true 2387 * 2388 * Return: 0 only if success. 2389 */ 2390 static int 2391 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, 2392 bool completion) 2393 { 2394 lockdep_assert_held(&hba->uic_cmd_mutex); 2395 lockdep_assert_held(hba->host->host_lock); 2396 2397 if (!ufshcd_ready_for_uic_cmd(hba)) { 2398 dev_err(hba->dev, 2399 "Controller not ready to accept UIC commands\n"); 2400 return -EIO; 2401 } 2402 2403 if (completion) 2404 init_completion(&uic_cmd->done); 2405 2406 uic_cmd->cmd_active = 1; 2407 ufshcd_dispatch_uic_cmd(hba, uic_cmd); 2408 2409 return 0; 2410 } 2411 2412 /** 2413 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result 2414 * @hba: per adapter instance 2415 * @uic_cmd: UIC command 2416 * 2417 * Return: 0 only if success. 2418 */ 2419 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 2420 { 2421 int ret; 2422 unsigned long flags; 2423 2424 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) 2425 return 0; 2426 2427 ufshcd_hold(hba); 2428 mutex_lock(&hba->uic_cmd_mutex); 2429 ufshcd_add_delay_before_dme_cmd(hba); 2430 2431 spin_lock_irqsave(hba->host->host_lock, flags); 2432 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); 2433 spin_unlock_irqrestore(hba->host->host_lock, flags); 2434 if (!ret) 2435 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); 2436 2437 mutex_unlock(&hba->uic_cmd_mutex); 2438 2439 ufshcd_release(hba); 2440 return ret; 2441 } 2442 2443 /** 2444 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format) 2445 * @hba: per-adapter instance 2446 * @lrbp: pointer to local reference block 2447 * @sg_entries: The number of sg lists actually used 2448 * @sg_list: Pointer to SG list 2449 */ 2450 static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries, 2451 struct scatterlist *sg_list) 2452 { 2453 struct ufshcd_sg_entry *prd; 2454 struct scatterlist *sg; 2455 int i; 2456 2457 if (sg_entries) { 2458 2459 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) 2460 lrbp->utr_descriptor_ptr->prd_table_length = 2461 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba)); 2462 else 2463 lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries); 2464 2465 prd = lrbp->ucd_prdt_ptr; 2466 2467 for_each_sg(sg_list, sg, sg_entries, i) { 2468 const unsigned int len = sg_dma_len(sg); 2469 2470 /* 2471 * From the UFSHCI spec: "Data Byte Count (DBC): A '0' 2472 * based value that indicates the length, in bytes, of 2473 * the data block. A maximum of length of 256KB may 2474 * exist for any entry. Bits 1:0 of this field shall be 2475 * 11b to indicate Dword granularity. A value of '3' 2476 * indicates 4 bytes, '7' indicates 8 bytes, etc." 2477 */ 2478 WARN_ONCE(len > SZ_256K, "len = %#x\n", len); 2479 prd->size = cpu_to_le32(len - 1); 2480 prd->addr = cpu_to_le64(sg->dma_address); 2481 prd->reserved = 0; 2482 prd = (void *)prd + ufshcd_sg_entry_size(hba); 2483 } 2484 } else { 2485 lrbp->utr_descriptor_ptr->prd_table_length = 0; 2486 } 2487 } 2488 2489 /** 2490 * ufshcd_map_sg - Map scatter-gather list to prdt 2491 * @hba: per adapter instance 2492 * @lrbp: pointer to local reference block 2493 * 2494 * Return: 0 in case of success, non-zero value in case of failure. 2495 */ 2496 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 2497 { 2498 struct scsi_cmnd *cmd = lrbp->cmd; 2499 int sg_segments = scsi_dma_map(cmd); 2500 2501 if (sg_segments < 0) 2502 return sg_segments; 2503 2504 ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd)); 2505 2506 return 0; 2507 } 2508 2509 /** 2510 * ufshcd_enable_intr - enable interrupts 2511 * @hba: per adapter instance 2512 * @intrs: interrupt bits 2513 */ 2514 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) 2515 { 2516 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 2517 2518 if (hba->ufs_version == ufshci_version(1, 0)) { 2519 u32 rw; 2520 rw = set & INTERRUPT_MASK_RW_VER_10; 2521 set = rw | ((set ^ intrs) & intrs); 2522 } else { 2523 set |= intrs; 2524 } 2525 2526 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); 2527 } 2528 2529 /** 2530 * ufshcd_disable_intr - disable interrupts 2531 * @hba: per adapter instance 2532 * @intrs: interrupt bits 2533 */ 2534 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) 2535 { 2536 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 2537 2538 if (hba->ufs_version == ufshci_version(1, 0)) { 2539 u32 rw; 2540 rw = (set & INTERRUPT_MASK_RW_VER_10) & 2541 ~(intrs & INTERRUPT_MASK_RW_VER_10); 2542 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); 2543 2544 } else { 2545 set &= ~intrs; 2546 } 2547 2548 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); 2549 } 2550 2551 /** 2552 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request 2553 * descriptor according to request 2554 * @lrbp: pointer to local reference block 2555 * @upiu_flags: flags required in the header 2556 * @cmd_dir: requests data direction 2557 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments) 2558 */ 2559 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags, 2560 enum dma_data_direction cmd_dir, int ehs_length) 2561 { 2562 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; 2563 struct request_desc_header *h = &req_desc->header; 2564 enum utp_data_direction data_direction; 2565 2566 *h = (typeof(*h)){ }; 2567 2568 if (cmd_dir == DMA_FROM_DEVICE) { 2569 data_direction = UTP_DEVICE_TO_HOST; 2570 *upiu_flags = UPIU_CMD_FLAGS_READ; 2571 } else if (cmd_dir == DMA_TO_DEVICE) { 2572 data_direction = UTP_HOST_TO_DEVICE; 2573 *upiu_flags = UPIU_CMD_FLAGS_WRITE; 2574 } else { 2575 data_direction = UTP_NO_DATA_TRANSFER; 2576 *upiu_flags = UPIU_CMD_FLAGS_NONE; 2577 } 2578 2579 h->command_type = lrbp->command_type; 2580 h->data_direction = data_direction; 2581 h->ehs_length = ehs_length; 2582 2583 if (lrbp->intr_cmd) 2584 h->interrupt = 1; 2585 2586 /* Prepare crypto related dwords */ 2587 ufshcd_prepare_req_desc_hdr_crypto(lrbp, h); 2588 2589 /* 2590 * assigning invalid value for command status. Controller 2591 * updates OCS on command completion, with the command 2592 * status 2593 */ 2594 h->ocs = OCS_INVALID_COMMAND_STATUS; 2595 2596 req_desc->prd_table_length = 0; 2597 } 2598 2599 /** 2600 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, 2601 * for scsi commands 2602 * @lrbp: local reference block pointer 2603 * @upiu_flags: flags 2604 */ 2605 static 2606 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags) 2607 { 2608 struct scsi_cmnd *cmd = lrbp->cmd; 2609 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 2610 unsigned short cdb_len; 2611 2612 ucd_req_ptr->header = (struct utp_upiu_header){ 2613 .transaction_code = UPIU_TRANSACTION_COMMAND, 2614 .flags = upiu_flags, 2615 .lun = lrbp->lun, 2616 .task_tag = lrbp->task_tag, 2617 .command_set_type = UPIU_COMMAND_SET_TYPE_SCSI, 2618 }; 2619 2620 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); 2621 2622 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); 2623 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); 2624 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); 2625 2626 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 2627 } 2628 2629 /** 2630 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request 2631 * @hba: UFS hba 2632 * @lrbp: local reference block pointer 2633 * @upiu_flags: flags 2634 */ 2635 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, 2636 struct ufshcd_lrb *lrbp, u8 upiu_flags) 2637 { 2638 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 2639 struct ufs_query *query = &hba->dev_cmd.query; 2640 u16 len = be16_to_cpu(query->request.upiu_req.length); 2641 2642 /* Query request header */ 2643 ucd_req_ptr->header = (struct utp_upiu_header){ 2644 .transaction_code = UPIU_TRANSACTION_QUERY_REQ, 2645 .flags = upiu_flags, 2646 .lun = lrbp->lun, 2647 .task_tag = lrbp->task_tag, 2648 .query_function = query->request.query_func, 2649 /* Data segment length only need for WRITE_DESC */ 2650 .data_segment_length = 2651 query->request.upiu_req.opcode == 2652 UPIU_QUERY_OPCODE_WRITE_DESC ? 2653 cpu_to_be16(len) : 2654 0, 2655 }; 2656 2657 /* Copy the Query Request buffer as is */ 2658 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, 2659 QUERY_OSF_SIZE); 2660 2661 /* Copy the Descriptor */ 2662 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) 2663 memcpy(ucd_req_ptr + 1, query->descriptor, len); 2664 2665 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 2666 } 2667 2668 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) 2669 { 2670 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 2671 2672 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); 2673 2674 ucd_req_ptr->header = (struct utp_upiu_header){ 2675 .transaction_code = UPIU_TRANSACTION_NOP_OUT, 2676 .task_tag = lrbp->task_tag, 2677 }; 2678 2679 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 2680 } 2681 2682 /** 2683 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU) 2684 * for Device Management Purposes 2685 * @hba: per adapter instance 2686 * @lrbp: pointer to local reference block 2687 * 2688 * Return: 0 upon success; < 0 upon failure. 2689 */ 2690 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, 2691 struct ufshcd_lrb *lrbp) 2692 { 2693 u8 upiu_flags; 2694 int ret = 0; 2695 2696 if (hba->ufs_version <= ufshci_version(1, 1)) 2697 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; 2698 else 2699 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 2700 2701 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0); 2702 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) 2703 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); 2704 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) 2705 ufshcd_prepare_utp_nop_upiu(lrbp); 2706 else 2707 ret = -EINVAL; 2708 2709 return ret; 2710 } 2711 2712 /** 2713 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU) 2714 * for SCSI Purposes 2715 * @hba: per adapter instance 2716 * @lrbp: pointer to local reference block 2717 * 2718 * Return: 0 upon success; < 0 upon failure. 2719 */ 2720 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 2721 { 2722 u8 upiu_flags; 2723 int ret = 0; 2724 2725 if (hba->ufs_version <= ufshci_version(1, 1)) 2726 lrbp->command_type = UTP_CMD_TYPE_SCSI; 2727 else 2728 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 2729 2730 if (likely(lrbp->cmd)) { 2731 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0); 2732 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); 2733 } else { 2734 ret = -EINVAL; 2735 } 2736 2737 return ret; 2738 } 2739 2740 /** 2741 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID 2742 * @upiu_wlun_id: UPIU W-LUN id 2743 * 2744 * Return: SCSI W-LUN id. 2745 */ 2746 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) 2747 { 2748 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; 2749 } 2750 2751 static inline bool is_device_wlun(struct scsi_device *sdev) 2752 { 2753 return sdev->lun == 2754 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN); 2755 } 2756 2757 /* 2758 * Associate the UFS controller queue with the default and poll HCTX types. 2759 * Initialize the mq_map[] arrays. 2760 */ 2761 static void ufshcd_map_queues(struct Scsi_Host *shost) 2762 { 2763 struct ufs_hba *hba = shost_priv(shost); 2764 int i, queue_offset = 0; 2765 2766 if (!is_mcq_supported(hba)) { 2767 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1; 2768 hba->nr_queues[HCTX_TYPE_READ] = 0; 2769 hba->nr_queues[HCTX_TYPE_POLL] = 1; 2770 hba->nr_hw_queues = 1; 2771 } 2772 2773 for (i = 0; i < shost->nr_maps; i++) { 2774 struct blk_mq_queue_map *map = &shost->tag_set.map[i]; 2775 2776 map->nr_queues = hba->nr_queues[i]; 2777 if (!map->nr_queues) 2778 continue; 2779 map->queue_offset = queue_offset; 2780 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba)) 2781 map->queue_offset = 0; 2782 2783 blk_mq_map_queues(map); 2784 queue_offset += map->nr_queues; 2785 } 2786 } 2787 2788 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) 2789 { 2790 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr + 2791 i * ufshcd_get_ucd_size(hba); 2792 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; 2793 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + 2794 i * ufshcd_get_ucd_size(hba); 2795 u16 response_offset = offsetof(struct utp_transfer_cmd_desc, 2796 response_upiu); 2797 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); 2798 2799 lrb->utr_descriptor_ptr = utrdlp + i; 2800 lrb->utrd_dma_addr = hba->utrdl_dma_addr + 2801 i * sizeof(struct utp_transfer_req_desc); 2802 lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu; 2803 lrb->ucd_req_dma_addr = cmd_desc_element_addr; 2804 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu; 2805 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset; 2806 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table; 2807 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset; 2808 } 2809 2810 /** 2811 * ufshcd_queuecommand - main entry point for SCSI requests 2812 * @host: SCSI host pointer 2813 * @cmd: command from SCSI Midlayer 2814 * 2815 * Return: 0 for success, non-zero in case of failure. 2816 */ 2817 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 2818 { 2819 struct ufs_hba *hba = shost_priv(host); 2820 int tag = scsi_cmd_to_rq(cmd)->tag; 2821 struct ufshcd_lrb *lrbp; 2822 int err = 0; 2823 struct ufs_hw_queue *hwq = NULL; 2824 2825 WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag); 2826 2827 switch (hba->ufshcd_state) { 2828 case UFSHCD_STATE_OPERATIONAL: 2829 break; 2830 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: 2831 /* 2832 * SCSI error handler can call ->queuecommand() while UFS error 2833 * handler is in progress. Error interrupts could change the 2834 * state from UFSHCD_STATE_RESET to 2835 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests 2836 * being issued in that case. 2837 */ 2838 if (ufshcd_eh_in_progress(hba)) { 2839 err = SCSI_MLQUEUE_HOST_BUSY; 2840 goto out; 2841 } 2842 break; 2843 case UFSHCD_STATE_EH_SCHEDULED_FATAL: 2844 /* 2845 * pm_runtime_get_sync() is used at error handling preparation 2846 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's 2847 * PM ops, it can never be finished if we let SCSI layer keep 2848 * retrying it, which gets err handler stuck forever. Neither 2849 * can we let the scsi cmd pass through, because UFS is in bad 2850 * state, the scsi cmd may eventually time out, which will get 2851 * err handler blocked for too long. So, just fail the scsi cmd 2852 * sent from PM ops, err handler can recover PM error anyways. 2853 */ 2854 if (hba->pm_op_in_progress) { 2855 hba->force_reset = true; 2856 set_host_byte(cmd, DID_BAD_TARGET); 2857 scsi_done(cmd); 2858 goto out; 2859 } 2860 fallthrough; 2861 case UFSHCD_STATE_RESET: 2862 err = SCSI_MLQUEUE_HOST_BUSY; 2863 goto out; 2864 case UFSHCD_STATE_ERROR: 2865 set_host_byte(cmd, DID_ERROR); 2866 scsi_done(cmd); 2867 goto out; 2868 } 2869 2870 hba->req_abort_count = 0; 2871 2872 ufshcd_hold(hba); 2873 2874 lrbp = &hba->lrb[tag]; 2875 lrbp->cmd = cmd; 2876 lrbp->task_tag = tag; 2877 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); 2878 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); 2879 2880 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp); 2881 2882 lrbp->req_abort_skip = false; 2883 2884 ufshcd_comp_scsi_upiu(hba, lrbp); 2885 2886 err = ufshcd_map_sg(hba, lrbp); 2887 if (err) { 2888 ufshcd_release(hba); 2889 goto out; 2890 } 2891 2892 if (is_mcq_enabled(hba)) 2893 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); 2894 2895 ufshcd_send_command(hba, tag, hwq); 2896 2897 out: 2898 if (ufs_trigger_eh()) { 2899 unsigned long flags; 2900 2901 spin_lock_irqsave(hba->host->host_lock, flags); 2902 ufshcd_schedule_eh_work(hba); 2903 spin_unlock_irqrestore(hba->host->host_lock, flags); 2904 } 2905 2906 return err; 2907 } 2908 2909 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, 2910 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) 2911 { 2912 lrbp->cmd = NULL; 2913 lrbp->task_tag = tag; 2914 lrbp->lun = 0; /* device management cmd is not specific to any LUN */ 2915 lrbp->intr_cmd = true; /* No interrupt aggregation */ 2916 ufshcd_prepare_lrbp_crypto(NULL, lrbp); 2917 hba->dev_cmd.type = cmd_type; 2918 2919 return ufshcd_compose_devman_upiu(hba, lrbp); 2920 } 2921 2922 /* 2923 * Check with the block layer if the command is inflight 2924 * @cmd: command to check. 2925 * 2926 * Return: true if command is inflight; false if not. 2927 */ 2928 bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd) 2929 { 2930 struct request *rq; 2931 2932 if (!cmd) 2933 return false; 2934 2935 rq = scsi_cmd_to_rq(cmd); 2936 if (!blk_mq_request_started(rq)) 2937 return false; 2938 2939 return true; 2940 } 2941 2942 /* 2943 * Clear the pending command in the controller and wait until 2944 * the controller confirms that the command has been cleared. 2945 * @hba: per adapter instance 2946 * @task_tag: The tag number of the command to be cleared. 2947 */ 2948 static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) 2949 { 2950 u32 mask = 1U << task_tag; 2951 unsigned long flags; 2952 int err; 2953 2954 if (is_mcq_enabled(hba)) { 2955 /* 2956 * MCQ mode. Clean up the MCQ resources similar to 2957 * what the ufshcd_utrl_clear() does for SDB mode. 2958 */ 2959 err = ufshcd_mcq_sq_cleanup(hba, task_tag); 2960 if (err) { 2961 dev_err(hba->dev, "%s: failed tag=%d. err=%d\n", 2962 __func__, task_tag, err); 2963 return err; 2964 } 2965 return 0; 2966 } 2967 2968 /* clear outstanding transaction before retry */ 2969 spin_lock_irqsave(hba->host->host_lock, flags); 2970 ufshcd_utrl_clear(hba, mask); 2971 spin_unlock_irqrestore(hba->host->host_lock, flags); 2972 2973 /* 2974 * wait for h/w to clear corresponding bit in door-bell. 2975 * max. wait is 1 sec. 2976 */ 2977 return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, 2978 mask, ~mask, 1000, 1000); 2979 } 2980 2981 /** 2982 * ufshcd_dev_cmd_completion() - handles device management command responses 2983 * @hba: per adapter instance 2984 * @lrbp: pointer to local reference block 2985 * 2986 * Return: 0 upon success; < 0 upon failure. 2987 */ 2988 static int 2989 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 2990 { 2991 enum upiu_response_transaction resp; 2992 int err = 0; 2993 2994 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); 2995 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); 2996 2997 switch (resp) { 2998 case UPIU_TRANSACTION_NOP_IN: 2999 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { 3000 err = -EINVAL; 3001 dev_err(hba->dev, "%s: unexpected response %x\n", 3002 __func__, resp); 3003 } 3004 break; 3005 case UPIU_TRANSACTION_QUERY_RSP: { 3006 u8 response = lrbp->ucd_rsp_ptr->header.response; 3007 3008 if (response == 0) 3009 err = ufshcd_copy_query_response(hba, lrbp); 3010 break; 3011 } 3012 case UPIU_TRANSACTION_REJECT_UPIU: 3013 /* TODO: handle Reject UPIU Response */ 3014 err = -EPERM; 3015 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", 3016 __func__); 3017 break; 3018 case UPIU_TRANSACTION_RESPONSE: 3019 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) { 3020 err = -EINVAL; 3021 dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp); 3022 } 3023 break; 3024 default: 3025 err = -EINVAL; 3026 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", 3027 __func__, resp); 3028 break; 3029 } 3030 3031 return err; 3032 } 3033 3034 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, 3035 struct ufshcd_lrb *lrbp, int max_timeout) 3036 { 3037 unsigned long time_left = msecs_to_jiffies(max_timeout); 3038 unsigned long flags; 3039 bool pending; 3040 int err; 3041 3042 retry: 3043 time_left = wait_for_completion_timeout(hba->dev_cmd.complete, 3044 time_left); 3045 3046 if (likely(time_left)) { 3047 /* 3048 * The completion handler called complete() and the caller of 3049 * this function still owns the @lrbp tag so the code below does 3050 * not trigger any race conditions. 3051 */ 3052 hba->dev_cmd.complete = NULL; 3053 err = ufshcd_get_tr_ocs(lrbp, NULL); 3054 if (!err) 3055 err = ufshcd_dev_cmd_completion(hba, lrbp); 3056 } else { 3057 err = -ETIMEDOUT; 3058 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", 3059 __func__, lrbp->task_tag); 3060 3061 /* MCQ mode */ 3062 if (is_mcq_enabled(hba)) { 3063 err = ufshcd_clear_cmd(hba, lrbp->task_tag); 3064 hba->dev_cmd.complete = NULL; 3065 return err; 3066 } 3067 3068 /* SDB mode */ 3069 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) { 3070 /* successfully cleared the command, retry if needed */ 3071 err = -EAGAIN; 3072 /* 3073 * Since clearing the command succeeded we also need to 3074 * clear the task tag bit from the outstanding_reqs 3075 * variable. 3076 */ 3077 spin_lock_irqsave(&hba->outstanding_lock, flags); 3078 pending = test_bit(lrbp->task_tag, 3079 &hba->outstanding_reqs); 3080 if (pending) { 3081 hba->dev_cmd.complete = NULL; 3082 __clear_bit(lrbp->task_tag, 3083 &hba->outstanding_reqs); 3084 } 3085 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 3086 3087 if (!pending) { 3088 /* 3089 * The completion handler ran while we tried to 3090 * clear the command. 3091 */ 3092 time_left = 1; 3093 goto retry; 3094 } 3095 } else { 3096 dev_err(hba->dev, "%s: failed to clear tag %d\n", 3097 __func__, lrbp->task_tag); 3098 3099 spin_lock_irqsave(&hba->outstanding_lock, flags); 3100 pending = test_bit(lrbp->task_tag, 3101 &hba->outstanding_reqs); 3102 if (pending) 3103 hba->dev_cmd.complete = NULL; 3104 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 3105 3106 if (!pending) { 3107 /* 3108 * The completion handler ran while we tried to 3109 * clear the command. 3110 */ 3111 time_left = 1; 3112 goto retry; 3113 } 3114 } 3115 } 3116 3117 return err; 3118 } 3119 3120 /** 3121 * ufshcd_exec_dev_cmd - API for sending device management requests 3122 * @hba: UFS hba 3123 * @cmd_type: specifies the type (NOP, Query...) 3124 * @timeout: timeout in milliseconds 3125 * 3126 * Return: 0 upon success; < 0 upon failure. 3127 * 3128 * NOTE: Since there is only one available tag for device management commands, 3129 * it is expected you hold the hba->dev_cmd.lock mutex. 3130 */ 3131 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, 3132 enum dev_cmd_type cmd_type, int timeout) 3133 { 3134 DECLARE_COMPLETION_ONSTACK(wait); 3135 const u32 tag = hba->reserved_slot; 3136 struct ufshcd_lrb *lrbp; 3137 int err; 3138 3139 /* Protects use of hba->reserved_slot. */ 3140 lockdep_assert_held(&hba->dev_cmd.lock); 3141 3142 down_read(&hba->clk_scaling_lock); 3143 3144 lrbp = &hba->lrb[tag]; 3145 lrbp->cmd = NULL; 3146 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); 3147 if (unlikely(err)) 3148 goto out; 3149 3150 hba->dev_cmd.complete = &wait; 3151 3152 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); 3153 3154 ufshcd_send_command(hba, tag, hba->dev_cmd_queue); 3155 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); 3156 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, 3157 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); 3158 3159 out: 3160 up_read(&hba->clk_scaling_lock); 3161 return err; 3162 } 3163 3164 /** 3165 * ufshcd_init_query() - init the query response and request parameters 3166 * @hba: per-adapter instance 3167 * @request: address of the request pointer to be initialized 3168 * @response: address of the response pointer to be initialized 3169 * @opcode: operation to perform 3170 * @idn: flag idn to access 3171 * @index: LU number to access 3172 * @selector: query/flag/descriptor further identification 3173 */ 3174 static inline void ufshcd_init_query(struct ufs_hba *hba, 3175 struct ufs_query_req **request, struct ufs_query_res **response, 3176 enum query_opcode opcode, u8 idn, u8 index, u8 selector) 3177 { 3178 *request = &hba->dev_cmd.query.request; 3179 *response = &hba->dev_cmd.query.response; 3180 memset(*request, 0, sizeof(struct ufs_query_req)); 3181 memset(*response, 0, sizeof(struct ufs_query_res)); 3182 (*request)->upiu_req.opcode = opcode; 3183 (*request)->upiu_req.idn = idn; 3184 (*request)->upiu_req.index = index; 3185 (*request)->upiu_req.selector = selector; 3186 } 3187 3188 static int ufshcd_query_flag_retry(struct ufs_hba *hba, 3189 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) 3190 { 3191 int ret; 3192 int retries; 3193 3194 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { 3195 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res); 3196 if (ret) 3197 dev_dbg(hba->dev, 3198 "%s: failed with error %d, retries %d\n", 3199 __func__, ret, retries); 3200 else 3201 break; 3202 } 3203 3204 if (ret) 3205 dev_err(hba->dev, 3206 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n", 3207 __func__, opcode, idn, ret, retries); 3208 return ret; 3209 } 3210 3211 /** 3212 * ufshcd_query_flag() - API function for sending flag query requests 3213 * @hba: per-adapter instance 3214 * @opcode: flag query to perform 3215 * @idn: flag idn to access 3216 * @index: flag index to access 3217 * @flag_res: the flag value after the query request completes 3218 * 3219 * Return: 0 for success, non-zero in case of failure. 3220 */ 3221 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 3222 enum flag_idn idn, u8 index, bool *flag_res) 3223 { 3224 struct ufs_query_req *request = NULL; 3225 struct ufs_query_res *response = NULL; 3226 int err, selector = 0; 3227 int timeout = QUERY_REQ_TIMEOUT; 3228 3229 BUG_ON(!hba); 3230 3231 ufshcd_hold(hba); 3232 mutex_lock(&hba->dev_cmd.lock); 3233 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 3234 selector); 3235 3236 switch (opcode) { 3237 case UPIU_QUERY_OPCODE_SET_FLAG: 3238 case UPIU_QUERY_OPCODE_CLEAR_FLAG: 3239 case UPIU_QUERY_OPCODE_TOGGLE_FLAG: 3240 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 3241 break; 3242 case UPIU_QUERY_OPCODE_READ_FLAG: 3243 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 3244 if (!flag_res) { 3245 /* No dummy reads */ 3246 dev_err(hba->dev, "%s: Invalid argument for read request\n", 3247 __func__); 3248 err = -EINVAL; 3249 goto out_unlock; 3250 } 3251 break; 3252 default: 3253 dev_err(hba->dev, 3254 "%s: Expected query flag opcode but got = %d\n", 3255 __func__, opcode); 3256 err = -EINVAL; 3257 goto out_unlock; 3258 } 3259 3260 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); 3261 3262 if (err) { 3263 dev_err(hba->dev, 3264 "%s: Sending flag query for idn %d failed, err = %d\n", 3265 __func__, idn, err); 3266 goto out_unlock; 3267 } 3268 3269 if (flag_res) 3270 *flag_res = (be32_to_cpu(response->upiu_res.value) & 3271 MASK_QUERY_UPIU_FLAG_LOC) & 0x1; 3272 3273 out_unlock: 3274 mutex_unlock(&hba->dev_cmd.lock); 3275 ufshcd_release(hba); 3276 return err; 3277 } 3278 3279 /** 3280 * ufshcd_query_attr - API function for sending attribute requests 3281 * @hba: per-adapter instance 3282 * @opcode: attribute opcode 3283 * @idn: attribute idn to access 3284 * @index: index field 3285 * @selector: selector field 3286 * @attr_val: the attribute value after the query request completes 3287 * 3288 * Return: 0 for success, non-zero in case of failure. 3289 */ 3290 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, 3291 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) 3292 { 3293 struct ufs_query_req *request = NULL; 3294 struct ufs_query_res *response = NULL; 3295 int err; 3296 3297 BUG_ON(!hba); 3298 3299 if (!attr_val) { 3300 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", 3301 __func__, opcode); 3302 return -EINVAL; 3303 } 3304 3305 ufshcd_hold(hba); 3306 3307 mutex_lock(&hba->dev_cmd.lock); 3308 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 3309 selector); 3310 3311 switch (opcode) { 3312 case UPIU_QUERY_OPCODE_WRITE_ATTR: 3313 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 3314 request->upiu_req.value = cpu_to_be32(*attr_val); 3315 break; 3316 case UPIU_QUERY_OPCODE_READ_ATTR: 3317 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 3318 break; 3319 default: 3320 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", 3321 __func__, opcode); 3322 err = -EINVAL; 3323 goto out_unlock; 3324 } 3325 3326 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); 3327 3328 if (err) { 3329 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", 3330 __func__, opcode, idn, index, err); 3331 goto out_unlock; 3332 } 3333 3334 *attr_val = be32_to_cpu(response->upiu_res.value); 3335 3336 out_unlock: 3337 mutex_unlock(&hba->dev_cmd.lock); 3338 ufshcd_release(hba); 3339 return err; 3340 } 3341 3342 /** 3343 * ufshcd_query_attr_retry() - API function for sending query 3344 * attribute with retries 3345 * @hba: per-adapter instance 3346 * @opcode: attribute opcode 3347 * @idn: attribute idn to access 3348 * @index: index field 3349 * @selector: selector field 3350 * @attr_val: the attribute value after the query request 3351 * completes 3352 * 3353 * Return: 0 for success, non-zero in case of failure. 3354 */ 3355 int ufshcd_query_attr_retry(struct ufs_hba *hba, 3356 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, 3357 u32 *attr_val) 3358 { 3359 int ret = 0; 3360 u32 retries; 3361 3362 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { 3363 ret = ufshcd_query_attr(hba, opcode, idn, index, 3364 selector, attr_val); 3365 if (ret) 3366 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", 3367 __func__, ret, retries); 3368 else 3369 break; 3370 } 3371 3372 if (ret) 3373 dev_err(hba->dev, 3374 "%s: query attribute, idn %d, failed with error %d after %d retries\n", 3375 __func__, idn, ret, QUERY_REQ_RETRIES); 3376 return ret; 3377 } 3378 3379 static int __ufshcd_query_descriptor(struct ufs_hba *hba, 3380 enum query_opcode opcode, enum desc_idn idn, u8 index, 3381 u8 selector, u8 *desc_buf, int *buf_len) 3382 { 3383 struct ufs_query_req *request = NULL; 3384 struct ufs_query_res *response = NULL; 3385 int err; 3386 3387 BUG_ON(!hba); 3388 3389 if (!desc_buf) { 3390 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", 3391 __func__, opcode); 3392 return -EINVAL; 3393 } 3394 3395 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { 3396 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", 3397 __func__, *buf_len); 3398 return -EINVAL; 3399 } 3400 3401 ufshcd_hold(hba); 3402 3403 mutex_lock(&hba->dev_cmd.lock); 3404 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 3405 selector); 3406 hba->dev_cmd.query.descriptor = desc_buf; 3407 request->upiu_req.length = cpu_to_be16(*buf_len); 3408 3409 switch (opcode) { 3410 case UPIU_QUERY_OPCODE_WRITE_DESC: 3411 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 3412 break; 3413 case UPIU_QUERY_OPCODE_READ_DESC: 3414 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 3415 break; 3416 default: 3417 dev_err(hba->dev, 3418 "%s: Expected query descriptor opcode but got = 0x%.2x\n", 3419 __func__, opcode); 3420 err = -EINVAL; 3421 goto out_unlock; 3422 } 3423 3424 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); 3425 3426 if (err) { 3427 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", 3428 __func__, opcode, idn, index, err); 3429 goto out_unlock; 3430 } 3431 3432 *buf_len = be16_to_cpu(response->upiu_res.length); 3433 3434 out_unlock: 3435 hba->dev_cmd.query.descriptor = NULL; 3436 mutex_unlock(&hba->dev_cmd.lock); 3437 ufshcd_release(hba); 3438 return err; 3439 } 3440 3441 /** 3442 * ufshcd_query_descriptor_retry - API function for sending descriptor requests 3443 * @hba: per-adapter instance 3444 * @opcode: attribute opcode 3445 * @idn: attribute idn to access 3446 * @index: index field 3447 * @selector: selector field 3448 * @desc_buf: the buffer that contains the descriptor 3449 * @buf_len: length parameter passed to the device 3450 * 3451 * The buf_len parameter will contain, on return, the length parameter 3452 * received on the response. 3453 * 3454 * Return: 0 for success, non-zero in case of failure. 3455 */ 3456 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, 3457 enum query_opcode opcode, 3458 enum desc_idn idn, u8 index, 3459 u8 selector, 3460 u8 *desc_buf, int *buf_len) 3461 { 3462 int err; 3463 int retries; 3464 3465 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { 3466 err = __ufshcd_query_descriptor(hba, opcode, idn, index, 3467 selector, desc_buf, buf_len); 3468 if (!err || err == -EINVAL) 3469 break; 3470 } 3471 3472 return err; 3473 } 3474 3475 /** 3476 * ufshcd_read_desc_param - read the specified descriptor parameter 3477 * @hba: Pointer to adapter instance 3478 * @desc_id: descriptor idn value 3479 * @desc_index: descriptor index 3480 * @param_offset: offset of the parameter to read 3481 * @param_read_buf: pointer to buffer where parameter would be read 3482 * @param_size: sizeof(param_read_buf) 3483 * 3484 * Return: 0 in case of success, non-zero otherwise. 3485 */ 3486 int ufshcd_read_desc_param(struct ufs_hba *hba, 3487 enum desc_idn desc_id, 3488 int desc_index, 3489 u8 param_offset, 3490 u8 *param_read_buf, 3491 u8 param_size) 3492 { 3493 int ret; 3494 u8 *desc_buf; 3495 int buff_len = QUERY_DESC_MAX_SIZE; 3496 bool is_kmalloc = true; 3497 3498 /* Safety check */ 3499 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) 3500 return -EINVAL; 3501 3502 /* Check whether we need temp memory */ 3503 if (param_offset != 0 || param_size < buff_len) { 3504 desc_buf = kzalloc(buff_len, GFP_KERNEL); 3505 if (!desc_buf) 3506 return -ENOMEM; 3507 } else { 3508 desc_buf = param_read_buf; 3509 is_kmalloc = false; 3510 } 3511 3512 /* Request for full descriptor */ 3513 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, 3514 desc_id, desc_index, 0, 3515 desc_buf, &buff_len); 3516 if (ret) { 3517 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n", 3518 __func__, desc_id, desc_index, param_offset, ret); 3519 goto out; 3520 } 3521 3522 /* Update descriptor length */ 3523 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET]; 3524 3525 if (param_offset >= buff_len) { 3526 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n", 3527 __func__, param_offset, desc_id, buff_len); 3528 ret = -EINVAL; 3529 goto out; 3530 } 3531 3532 /* Sanity check */ 3533 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { 3534 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", 3535 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); 3536 ret = -EINVAL; 3537 goto out; 3538 } 3539 3540 if (is_kmalloc) { 3541 /* Make sure we don't copy more data than available */ 3542 if (param_offset >= buff_len) 3543 ret = -EINVAL; 3544 else 3545 memcpy(param_read_buf, &desc_buf[param_offset], 3546 min_t(u32, param_size, buff_len - param_offset)); 3547 } 3548 out: 3549 if (is_kmalloc) 3550 kfree(desc_buf); 3551 return ret; 3552 } 3553 3554 /** 3555 * struct uc_string_id - unicode string 3556 * 3557 * @len: size of this descriptor inclusive 3558 * @type: descriptor type 3559 * @uc: unicode string character 3560 */ 3561 struct uc_string_id { 3562 u8 len; 3563 u8 type; 3564 wchar_t uc[]; 3565 } __packed; 3566 3567 /* replace non-printable or non-ASCII characters with spaces */ 3568 static inline char ufshcd_remove_non_printable(u8 ch) 3569 { 3570 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' '; 3571 } 3572 3573 /** 3574 * ufshcd_read_string_desc - read string descriptor 3575 * @hba: pointer to adapter instance 3576 * @desc_index: descriptor index 3577 * @buf: pointer to buffer where descriptor would be read, 3578 * the caller should free the memory. 3579 * @ascii: if true convert from unicode to ascii characters 3580 * null terminated string. 3581 * 3582 * Return: 3583 * * string size on success. 3584 * * -ENOMEM: on allocation failure 3585 * * -EINVAL: on a wrong parameter 3586 */ 3587 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, 3588 u8 **buf, bool ascii) 3589 { 3590 struct uc_string_id *uc_str; 3591 u8 *str; 3592 int ret; 3593 3594 if (!buf) 3595 return -EINVAL; 3596 3597 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 3598 if (!uc_str) 3599 return -ENOMEM; 3600 3601 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0, 3602 (u8 *)uc_str, QUERY_DESC_MAX_SIZE); 3603 if (ret < 0) { 3604 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", 3605 QUERY_REQ_RETRIES, ret); 3606 str = NULL; 3607 goto out; 3608 } 3609 3610 if (uc_str->len <= QUERY_DESC_HDR_SIZE) { 3611 dev_dbg(hba->dev, "String Desc is of zero length\n"); 3612 str = NULL; 3613 ret = 0; 3614 goto out; 3615 } 3616 3617 if (ascii) { 3618 ssize_t ascii_len; 3619 int i; 3620 /* remove header and divide by 2 to move from UTF16 to UTF8 */ 3621 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; 3622 str = kzalloc(ascii_len, GFP_KERNEL); 3623 if (!str) { 3624 ret = -ENOMEM; 3625 goto out; 3626 } 3627 3628 /* 3629 * the descriptor contains string in UTF16 format 3630 * we need to convert to utf-8 so it can be displayed 3631 */ 3632 ret = utf16s_to_utf8s(uc_str->uc, 3633 uc_str->len - QUERY_DESC_HDR_SIZE, 3634 UTF16_BIG_ENDIAN, str, ascii_len); 3635 3636 /* replace non-printable or non-ASCII characters with spaces */ 3637 for (i = 0; i < ret; i++) 3638 str[i] = ufshcd_remove_non_printable(str[i]); 3639 3640 str[ret++] = '\0'; 3641 3642 } else { 3643 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL); 3644 if (!str) { 3645 ret = -ENOMEM; 3646 goto out; 3647 } 3648 ret = uc_str->len; 3649 } 3650 out: 3651 *buf = str; 3652 kfree(uc_str); 3653 return ret; 3654 } 3655 3656 /** 3657 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter 3658 * @hba: Pointer to adapter instance 3659 * @lun: lun id 3660 * @param_offset: offset of the parameter to read 3661 * @param_read_buf: pointer to buffer where parameter would be read 3662 * @param_size: sizeof(param_read_buf) 3663 * 3664 * Return: 0 in case of success, non-zero otherwise. 3665 */ 3666 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, 3667 int lun, 3668 enum unit_desc_param param_offset, 3669 u8 *param_read_buf, 3670 u32 param_size) 3671 { 3672 /* 3673 * Unit descriptors are only available for general purpose LUs (LUN id 3674 * from 0 to 7) and RPMB Well known LU. 3675 */ 3676 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) 3677 return -EOPNOTSUPP; 3678 3679 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, 3680 param_offset, param_read_buf, param_size); 3681 } 3682 3683 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba) 3684 { 3685 int err = 0; 3686 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US; 3687 3688 if (hba->dev_info.wspecversion >= 0x300) { 3689 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 3690 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0, 3691 &gating_wait); 3692 if (err) 3693 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n", 3694 err, gating_wait); 3695 3696 if (gating_wait == 0) { 3697 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US; 3698 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n", 3699 gating_wait); 3700 } 3701 3702 hba->dev_info.clk_gating_wait_us = gating_wait; 3703 } 3704 3705 return err; 3706 } 3707 3708 /** 3709 * ufshcd_memory_alloc - allocate memory for host memory space data structures 3710 * @hba: per adapter instance 3711 * 3712 * 1. Allocate DMA memory for Command Descriptor array 3713 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT 3714 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). 3715 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List 3716 * (UTMRDL) 3717 * 4. Allocate memory for local reference block(lrb). 3718 * 3719 * Return: 0 for success, non-zero in case of failure. 3720 */ 3721 static int ufshcd_memory_alloc(struct ufs_hba *hba) 3722 { 3723 size_t utmrdl_size, utrdl_size, ucdl_size; 3724 3725 /* Allocate memory for UTP command descriptors */ 3726 ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs; 3727 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, 3728 ucdl_size, 3729 &hba->ucdl_dma_addr, 3730 GFP_KERNEL); 3731 3732 /* 3733 * UFSHCI requires UTP command descriptor to be 128 byte aligned. 3734 */ 3735 if (!hba->ucdl_base_addr || 3736 WARN_ON(hba->ucdl_dma_addr & (128 - 1))) { 3737 dev_err(hba->dev, 3738 "Command Descriptor Memory allocation failed\n"); 3739 goto out; 3740 } 3741 3742 /* 3743 * Allocate memory for UTP Transfer descriptors 3744 * UFSHCI requires 1KB alignment of UTRD 3745 */ 3746 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); 3747 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, 3748 utrdl_size, 3749 &hba->utrdl_dma_addr, 3750 GFP_KERNEL); 3751 if (!hba->utrdl_base_addr || 3752 WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) { 3753 dev_err(hba->dev, 3754 "Transfer Descriptor Memory allocation failed\n"); 3755 goto out; 3756 } 3757 3758 /* 3759 * Skip utmrdl allocation; it may have been 3760 * allocated during first pass and not released during 3761 * MCQ memory allocation. 3762 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq() 3763 */ 3764 if (hba->utmrdl_base_addr) 3765 goto skip_utmrdl; 3766 /* 3767 * Allocate memory for UTP Task Management descriptors 3768 * UFSHCI requires 1KB alignment of UTMRD 3769 */ 3770 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; 3771 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, 3772 utmrdl_size, 3773 &hba->utmrdl_dma_addr, 3774 GFP_KERNEL); 3775 if (!hba->utmrdl_base_addr || 3776 WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) { 3777 dev_err(hba->dev, 3778 "Task Management Descriptor Memory allocation failed\n"); 3779 goto out; 3780 } 3781 3782 skip_utmrdl: 3783 /* Allocate memory for local reference block */ 3784 hba->lrb = devm_kcalloc(hba->dev, 3785 hba->nutrs, sizeof(struct ufshcd_lrb), 3786 GFP_KERNEL); 3787 if (!hba->lrb) { 3788 dev_err(hba->dev, "LRB Memory allocation failed\n"); 3789 goto out; 3790 } 3791 return 0; 3792 out: 3793 return -ENOMEM; 3794 } 3795 3796 /** 3797 * ufshcd_host_memory_configure - configure local reference block with 3798 * memory offsets 3799 * @hba: per adapter instance 3800 * 3801 * Configure Host memory space 3802 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA 3803 * address. 3804 * 2. Update each UTRD with Response UPIU offset, Response UPIU length 3805 * and PRDT offset. 3806 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT 3807 * into local reference block. 3808 */ 3809 static void ufshcd_host_memory_configure(struct ufs_hba *hba) 3810 { 3811 struct utp_transfer_req_desc *utrdlp; 3812 dma_addr_t cmd_desc_dma_addr; 3813 dma_addr_t cmd_desc_element_addr; 3814 u16 response_offset; 3815 u16 prdt_offset; 3816 int cmd_desc_size; 3817 int i; 3818 3819 utrdlp = hba->utrdl_base_addr; 3820 3821 response_offset = 3822 offsetof(struct utp_transfer_cmd_desc, response_upiu); 3823 prdt_offset = 3824 offsetof(struct utp_transfer_cmd_desc, prd_table); 3825 3826 cmd_desc_size = ufshcd_get_ucd_size(hba); 3827 cmd_desc_dma_addr = hba->ucdl_dma_addr; 3828 3829 for (i = 0; i < hba->nutrs; i++) { 3830 /* Configure UTRD with command descriptor base address */ 3831 cmd_desc_element_addr = 3832 (cmd_desc_dma_addr + (cmd_desc_size * i)); 3833 utrdlp[i].command_desc_base_addr = 3834 cpu_to_le64(cmd_desc_element_addr); 3835 3836 /* Response upiu and prdt offset should be in double words */ 3837 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { 3838 utrdlp[i].response_upiu_offset = 3839 cpu_to_le16(response_offset); 3840 utrdlp[i].prd_table_offset = 3841 cpu_to_le16(prdt_offset); 3842 utrdlp[i].response_upiu_length = 3843 cpu_to_le16(ALIGNED_UPIU_SIZE); 3844 } else { 3845 utrdlp[i].response_upiu_offset = 3846 cpu_to_le16(response_offset >> 2); 3847 utrdlp[i].prd_table_offset = 3848 cpu_to_le16(prdt_offset >> 2); 3849 utrdlp[i].response_upiu_length = 3850 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); 3851 } 3852 3853 ufshcd_init_lrb(hba, &hba->lrb[i], i); 3854 } 3855 } 3856 3857 /** 3858 * ufshcd_dme_link_startup - Notify Unipro to perform link startup 3859 * @hba: per adapter instance 3860 * 3861 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, 3862 * in order to initialize the Unipro link startup procedure. 3863 * Once the Unipro links are up, the device connected to the controller 3864 * is detected. 3865 * 3866 * Return: 0 on success, non-zero value on failure. 3867 */ 3868 static int ufshcd_dme_link_startup(struct ufs_hba *hba) 3869 { 3870 struct uic_command uic_cmd = {0}; 3871 int ret; 3872 3873 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; 3874 3875 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3876 if (ret) 3877 dev_dbg(hba->dev, 3878 "dme-link-startup: error code %d\n", ret); 3879 return ret; 3880 } 3881 /** 3882 * ufshcd_dme_reset - UIC command for DME_RESET 3883 * @hba: per adapter instance 3884 * 3885 * DME_RESET command is issued in order to reset UniPro stack. 3886 * This function now deals with cold reset. 3887 * 3888 * Return: 0 on success, non-zero value on failure. 3889 */ 3890 static int ufshcd_dme_reset(struct ufs_hba *hba) 3891 { 3892 struct uic_command uic_cmd = {0}; 3893 int ret; 3894 3895 uic_cmd.command = UIC_CMD_DME_RESET; 3896 3897 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3898 if (ret) 3899 dev_err(hba->dev, 3900 "dme-reset: error code %d\n", ret); 3901 3902 return ret; 3903 } 3904 3905 int ufshcd_dme_configure_adapt(struct ufs_hba *hba, 3906 int agreed_gear, 3907 int adapt_val) 3908 { 3909 int ret; 3910 3911 if (agreed_gear < UFS_HS_G4) 3912 adapt_val = PA_NO_ADAPT; 3913 3914 ret = ufshcd_dme_set(hba, 3915 UIC_ARG_MIB(PA_TXHSADAPTTYPE), 3916 adapt_val); 3917 return ret; 3918 } 3919 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt); 3920 3921 /** 3922 * ufshcd_dme_enable - UIC command for DME_ENABLE 3923 * @hba: per adapter instance 3924 * 3925 * DME_ENABLE command is issued in order to enable UniPro stack. 3926 * 3927 * Return: 0 on success, non-zero value on failure. 3928 */ 3929 static int ufshcd_dme_enable(struct ufs_hba *hba) 3930 { 3931 struct uic_command uic_cmd = {0}; 3932 int ret; 3933 3934 uic_cmd.command = UIC_CMD_DME_ENABLE; 3935 3936 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3937 if (ret) 3938 dev_err(hba->dev, 3939 "dme-enable: error code %d\n", ret); 3940 3941 return ret; 3942 } 3943 3944 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) 3945 { 3946 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000 3947 unsigned long min_sleep_time_us; 3948 3949 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) 3950 return; 3951 3952 /* 3953 * last_dme_cmd_tstamp will be 0 only for 1st call to 3954 * this function 3955 */ 3956 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { 3957 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US; 3958 } else { 3959 unsigned long delta = 3960 (unsigned long) ktime_to_us( 3961 ktime_sub(ktime_get(), 3962 hba->last_dme_cmd_tstamp)); 3963 3964 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US) 3965 min_sleep_time_us = 3966 MIN_DELAY_BEFORE_DME_CMDS_US - delta; 3967 else 3968 return; /* no more delay required */ 3969 } 3970 3971 /* allow sleep for extra 50us if needed */ 3972 usleep_range(min_sleep_time_us, min_sleep_time_us + 50); 3973 } 3974 3975 /** 3976 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET 3977 * @hba: per adapter instance 3978 * @attr_sel: uic command argument1 3979 * @attr_set: attribute set type as uic command argument2 3980 * @mib_val: setting value as uic command argument3 3981 * @peer: indicate whether peer or local 3982 * 3983 * Return: 0 on success, non-zero value on failure. 3984 */ 3985 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, 3986 u8 attr_set, u32 mib_val, u8 peer) 3987 { 3988 struct uic_command uic_cmd = {0}; 3989 static const char *const action[] = { 3990 "dme-set", 3991 "dme-peer-set" 3992 }; 3993 const char *set = action[!!peer]; 3994 int ret; 3995 int retries = UFS_UIC_COMMAND_RETRIES; 3996 3997 uic_cmd.command = peer ? 3998 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; 3999 uic_cmd.argument1 = attr_sel; 4000 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); 4001 uic_cmd.argument3 = mib_val; 4002 4003 do { 4004 /* for peer attributes we retry upon failure */ 4005 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 4006 if (ret) 4007 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", 4008 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); 4009 } while (ret && peer && --retries); 4010 4011 if (ret) 4012 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", 4013 set, UIC_GET_ATTR_ID(attr_sel), mib_val, 4014 UFS_UIC_COMMAND_RETRIES - retries); 4015 4016 return ret; 4017 } 4018 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); 4019 4020 /** 4021 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET 4022 * @hba: per adapter instance 4023 * @attr_sel: uic command argument1 4024 * @mib_val: the value of the attribute as returned by the UIC command 4025 * @peer: indicate whether peer or local 4026 * 4027 * Return: 0 on success, non-zero value on failure. 4028 */ 4029 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 4030 u32 *mib_val, u8 peer) 4031 { 4032 struct uic_command uic_cmd = {0}; 4033 static const char *const action[] = { 4034 "dme-get", 4035 "dme-peer-get" 4036 }; 4037 const char *get = action[!!peer]; 4038 int ret; 4039 int retries = UFS_UIC_COMMAND_RETRIES; 4040 struct ufs_pa_layer_attr orig_pwr_info; 4041 struct ufs_pa_layer_attr temp_pwr_info; 4042 bool pwr_mode_change = false; 4043 4044 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { 4045 orig_pwr_info = hba->pwr_info; 4046 temp_pwr_info = orig_pwr_info; 4047 4048 if (orig_pwr_info.pwr_tx == FAST_MODE || 4049 orig_pwr_info.pwr_rx == FAST_MODE) { 4050 temp_pwr_info.pwr_tx = FASTAUTO_MODE; 4051 temp_pwr_info.pwr_rx = FASTAUTO_MODE; 4052 pwr_mode_change = true; 4053 } else if (orig_pwr_info.pwr_tx == SLOW_MODE || 4054 orig_pwr_info.pwr_rx == SLOW_MODE) { 4055 temp_pwr_info.pwr_tx = SLOWAUTO_MODE; 4056 temp_pwr_info.pwr_rx = SLOWAUTO_MODE; 4057 pwr_mode_change = true; 4058 } 4059 if (pwr_mode_change) { 4060 ret = ufshcd_change_power_mode(hba, &temp_pwr_info); 4061 if (ret) 4062 goto out; 4063 } 4064 } 4065 4066 uic_cmd.command = peer ? 4067 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; 4068 uic_cmd.argument1 = attr_sel; 4069 4070 do { 4071 /* for peer attributes we retry upon failure */ 4072 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 4073 if (ret) 4074 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", 4075 get, UIC_GET_ATTR_ID(attr_sel), ret); 4076 } while (ret && peer && --retries); 4077 4078 if (ret) 4079 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", 4080 get, UIC_GET_ATTR_ID(attr_sel), 4081 UFS_UIC_COMMAND_RETRIES - retries); 4082 4083 if (mib_val && !ret) 4084 *mib_val = uic_cmd.argument3; 4085 4086 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) 4087 && pwr_mode_change) 4088 ufshcd_change_power_mode(hba, &orig_pwr_info); 4089 out: 4090 return ret; 4091 } 4092 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); 4093 4094 /** 4095 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power 4096 * state) and waits for it to take effect. 4097 * 4098 * @hba: per adapter instance 4099 * @cmd: UIC command to execute 4100 * 4101 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & 4102 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host 4103 * and device UniPro link and hence it's final completion would be indicated by 4104 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in 4105 * addition to normal UIC command completion Status (UCCS). This function only 4106 * returns after the relevant status bits indicate the completion. 4107 * 4108 * Return: 0 on success, non-zero value on failure. 4109 */ 4110 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) 4111 { 4112 DECLARE_COMPLETION_ONSTACK(uic_async_done); 4113 unsigned long flags; 4114 u8 status; 4115 int ret; 4116 bool reenable_intr = false; 4117 4118 mutex_lock(&hba->uic_cmd_mutex); 4119 ufshcd_add_delay_before_dme_cmd(hba); 4120 4121 spin_lock_irqsave(hba->host->host_lock, flags); 4122 if (ufshcd_is_link_broken(hba)) { 4123 ret = -ENOLINK; 4124 goto out_unlock; 4125 } 4126 hba->uic_async_done = &uic_async_done; 4127 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { 4128 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); 4129 /* 4130 * Make sure UIC command completion interrupt is disabled before 4131 * issuing UIC command. 4132 */ 4133 wmb(); 4134 reenable_intr = true; 4135 } 4136 ret = __ufshcd_send_uic_cmd(hba, cmd, false); 4137 spin_unlock_irqrestore(hba->host->host_lock, flags); 4138 if (ret) { 4139 dev_err(hba->dev, 4140 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", 4141 cmd->command, cmd->argument3, ret); 4142 goto out; 4143 } 4144 4145 if (!wait_for_completion_timeout(hba->uic_async_done, 4146 msecs_to_jiffies(UIC_CMD_TIMEOUT))) { 4147 dev_err(hba->dev, 4148 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", 4149 cmd->command, cmd->argument3); 4150 4151 if (!cmd->cmd_active) { 4152 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", 4153 __func__); 4154 goto check_upmcrs; 4155 } 4156 4157 ret = -ETIMEDOUT; 4158 goto out; 4159 } 4160 4161 check_upmcrs: 4162 status = ufshcd_get_upmcrs(hba); 4163 if (status != PWR_LOCAL) { 4164 dev_err(hba->dev, 4165 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", 4166 cmd->command, status); 4167 ret = (status != PWR_OK) ? status : -1; 4168 } 4169 out: 4170 if (ret) { 4171 ufshcd_print_host_state(hba); 4172 ufshcd_print_pwr_info(hba); 4173 ufshcd_print_evt_hist(hba); 4174 } 4175 4176 spin_lock_irqsave(hba->host->host_lock, flags); 4177 hba->active_uic_cmd = NULL; 4178 hba->uic_async_done = NULL; 4179 if (reenable_intr) 4180 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); 4181 if (ret) { 4182 ufshcd_set_link_broken(hba); 4183 ufshcd_schedule_eh_work(hba); 4184 } 4185 out_unlock: 4186 spin_unlock_irqrestore(hba->host->host_lock, flags); 4187 mutex_unlock(&hba->uic_cmd_mutex); 4188 4189 return ret; 4190 } 4191 4192 /** 4193 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage 4194 * using DME_SET primitives. 4195 * @hba: per adapter instance 4196 * @mode: powr mode value 4197 * 4198 * Return: 0 on success, non-zero value on failure. 4199 */ 4200 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) 4201 { 4202 struct uic_command uic_cmd = {0}; 4203 int ret; 4204 4205 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { 4206 ret = ufshcd_dme_set(hba, 4207 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1); 4208 if (ret) { 4209 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", 4210 __func__, ret); 4211 goto out; 4212 } 4213 } 4214 4215 uic_cmd.command = UIC_CMD_DME_SET; 4216 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); 4217 uic_cmd.argument3 = mode; 4218 ufshcd_hold(hba); 4219 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4220 ufshcd_release(hba); 4221 4222 out: 4223 return ret; 4224 } 4225 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode); 4226 4227 int ufshcd_link_recovery(struct ufs_hba *hba) 4228 { 4229 int ret; 4230 unsigned long flags; 4231 4232 spin_lock_irqsave(hba->host->host_lock, flags); 4233 hba->ufshcd_state = UFSHCD_STATE_RESET; 4234 ufshcd_set_eh_in_progress(hba); 4235 spin_unlock_irqrestore(hba->host->host_lock, flags); 4236 4237 /* Reset the attached device */ 4238 ufshcd_device_reset(hba); 4239 4240 ret = ufshcd_host_reset_and_restore(hba); 4241 4242 spin_lock_irqsave(hba->host->host_lock, flags); 4243 if (ret) 4244 hba->ufshcd_state = UFSHCD_STATE_ERROR; 4245 ufshcd_clear_eh_in_progress(hba); 4246 spin_unlock_irqrestore(hba->host->host_lock, flags); 4247 4248 if (ret) 4249 dev_err(hba->dev, "%s: link recovery failed, err %d", 4250 __func__, ret); 4251 4252 return ret; 4253 } 4254 EXPORT_SYMBOL_GPL(ufshcd_link_recovery); 4255 4256 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) 4257 { 4258 int ret; 4259 struct uic_command uic_cmd = {0}; 4260 ktime_t start = ktime_get(); 4261 4262 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); 4263 4264 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; 4265 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4266 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", 4267 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 4268 4269 if (ret) 4270 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", 4271 __func__, ret); 4272 else 4273 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, 4274 POST_CHANGE); 4275 4276 return ret; 4277 } 4278 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter); 4279 4280 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) 4281 { 4282 struct uic_command uic_cmd = {0}; 4283 int ret; 4284 ktime_t start = ktime_get(); 4285 4286 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); 4287 4288 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; 4289 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4290 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", 4291 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 4292 4293 if (ret) { 4294 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", 4295 __func__, ret); 4296 } else { 4297 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, 4298 POST_CHANGE); 4299 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock(); 4300 hba->ufs_stats.hibern8_exit_cnt++; 4301 } 4302 4303 return ret; 4304 } 4305 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit); 4306 4307 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) 4308 { 4309 unsigned long flags; 4310 bool update = false; 4311 4312 if (!ufshcd_is_auto_hibern8_supported(hba)) 4313 return; 4314 4315 spin_lock_irqsave(hba->host->host_lock, flags); 4316 if (hba->ahit != ahit) { 4317 hba->ahit = ahit; 4318 update = true; 4319 } 4320 spin_unlock_irqrestore(hba->host->host_lock, flags); 4321 4322 if (update && 4323 !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) { 4324 ufshcd_rpm_get_sync(hba); 4325 ufshcd_hold(hba); 4326 ufshcd_auto_hibern8_enable(hba); 4327 ufshcd_release(hba); 4328 ufshcd_rpm_put_sync(hba); 4329 } 4330 } 4331 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update); 4332 4333 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) 4334 { 4335 if (!ufshcd_is_auto_hibern8_supported(hba)) 4336 return; 4337 4338 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); 4339 } 4340 4341 /** 4342 * ufshcd_init_pwr_info - setting the POR (power on reset) 4343 * values in hba power info 4344 * @hba: per-adapter instance 4345 */ 4346 static void ufshcd_init_pwr_info(struct ufs_hba *hba) 4347 { 4348 hba->pwr_info.gear_rx = UFS_PWM_G1; 4349 hba->pwr_info.gear_tx = UFS_PWM_G1; 4350 hba->pwr_info.lane_rx = UFS_LANE_1; 4351 hba->pwr_info.lane_tx = UFS_LANE_1; 4352 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; 4353 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; 4354 hba->pwr_info.hs_rate = 0; 4355 } 4356 4357 /** 4358 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device 4359 * @hba: per-adapter instance 4360 * 4361 * Return: 0 upon success; < 0 upon failure. 4362 */ 4363 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) 4364 { 4365 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; 4366 4367 if (hba->max_pwr_info.is_valid) 4368 return 0; 4369 4370 if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { 4371 pwr_info->pwr_tx = FASTAUTO_MODE; 4372 pwr_info->pwr_rx = FASTAUTO_MODE; 4373 } else { 4374 pwr_info->pwr_tx = FAST_MODE; 4375 pwr_info->pwr_rx = FAST_MODE; 4376 } 4377 pwr_info->hs_rate = PA_HS_MODE_B; 4378 4379 /* Get the connected lane count */ 4380 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), 4381 &pwr_info->lane_rx); 4382 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 4383 &pwr_info->lane_tx); 4384 4385 if (!pwr_info->lane_rx || !pwr_info->lane_tx) { 4386 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", 4387 __func__, 4388 pwr_info->lane_rx, 4389 pwr_info->lane_tx); 4390 return -EINVAL; 4391 } 4392 4393 /* 4394 * First, get the maximum gears of HS speed. 4395 * If a zero value, it means there is no HSGEAR capability. 4396 * Then, get the maximum gears of PWM speed. 4397 */ 4398 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); 4399 if (!pwr_info->gear_rx) { 4400 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 4401 &pwr_info->gear_rx); 4402 if (!pwr_info->gear_rx) { 4403 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", 4404 __func__, pwr_info->gear_rx); 4405 return -EINVAL; 4406 } 4407 pwr_info->pwr_rx = SLOW_MODE; 4408 } 4409 4410 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), 4411 &pwr_info->gear_tx); 4412 if (!pwr_info->gear_tx) { 4413 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 4414 &pwr_info->gear_tx); 4415 if (!pwr_info->gear_tx) { 4416 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", 4417 __func__, pwr_info->gear_tx); 4418 return -EINVAL; 4419 } 4420 pwr_info->pwr_tx = SLOW_MODE; 4421 } 4422 4423 hba->max_pwr_info.is_valid = true; 4424 return 0; 4425 } 4426 4427 static int ufshcd_change_power_mode(struct ufs_hba *hba, 4428 struct ufs_pa_layer_attr *pwr_mode) 4429 { 4430 int ret; 4431 4432 /* if already configured to the requested pwr_mode */ 4433 if (!hba->force_pmc && 4434 pwr_mode->gear_rx == hba->pwr_info.gear_rx && 4435 pwr_mode->gear_tx == hba->pwr_info.gear_tx && 4436 pwr_mode->lane_rx == hba->pwr_info.lane_rx && 4437 pwr_mode->lane_tx == hba->pwr_info.lane_tx && 4438 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && 4439 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && 4440 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { 4441 dev_dbg(hba->dev, "%s: power already configured\n", __func__); 4442 return 0; 4443 } 4444 4445 /* 4446 * Configure attributes for power mode change with below. 4447 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, 4448 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, 4449 * - PA_HSSERIES 4450 */ 4451 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); 4452 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), 4453 pwr_mode->lane_rx); 4454 if (pwr_mode->pwr_rx == FASTAUTO_MODE || 4455 pwr_mode->pwr_rx == FAST_MODE) 4456 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); 4457 else 4458 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false); 4459 4460 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); 4461 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), 4462 pwr_mode->lane_tx); 4463 if (pwr_mode->pwr_tx == FASTAUTO_MODE || 4464 pwr_mode->pwr_tx == FAST_MODE) 4465 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); 4466 else 4467 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false); 4468 4469 if (pwr_mode->pwr_rx == FASTAUTO_MODE || 4470 pwr_mode->pwr_tx == FASTAUTO_MODE || 4471 pwr_mode->pwr_rx == FAST_MODE || 4472 pwr_mode->pwr_tx == FAST_MODE) 4473 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), 4474 pwr_mode->hs_rate); 4475 4476 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { 4477 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 4478 DL_FC0ProtectionTimeOutVal_Default); 4479 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 4480 DL_TC0ReplayTimeOutVal_Default); 4481 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 4482 DL_AFC0ReqTimeOutVal_Default); 4483 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), 4484 DL_FC1ProtectionTimeOutVal_Default); 4485 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), 4486 DL_TC1ReplayTimeOutVal_Default); 4487 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), 4488 DL_AFC1ReqTimeOutVal_Default); 4489 4490 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), 4491 DL_FC0ProtectionTimeOutVal_Default); 4492 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), 4493 DL_TC0ReplayTimeOutVal_Default); 4494 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), 4495 DL_AFC0ReqTimeOutVal_Default); 4496 } 4497 4498 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 4499 | pwr_mode->pwr_tx); 4500 4501 if (ret) { 4502 dev_err(hba->dev, 4503 "%s: power mode change failed %d\n", __func__, ret); 4504 } else { 4505 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, 4506 pwr_mode); 4507 4508 memcpy(&hba->pwr_info, pwr_mode, 4509 sizeof(struct ufs_pa_layer_attr)); 4510 } 4511 4512 return ret; 4513 } 4514 4515 /** 4516 * ufshcd_config_pwr_mode - configure a new power mode 4517 * @hba: per-adapter instance 4518 * @desired_pwr_mode: desired power configuration 4519 * 4520 * Return: 0 upon success; < 0 upon failure. 4521 */ 4522 int ufshcd_config_pwr_mode(struct ufs_hba *hba, 4523 struct ufs_pa_layer_attr *desired_pwr_mode) 4524 { 4525 struct ufs_pa_layer_attr final_params = { 0 }; 4526 int ret; 4527 4528 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, 4529 desired_pwr_mode, &final_params); 4530 4531 if (ret) 4532 memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); 4533 4534 ret = ufshcd_change_power_mode(hba, &final_params); 4535 4536 return ret; 4537 } 4538 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); 4539 4540 /** 4541 * ufshcd_complete_dev_init() - checks device readiness 4542 * @hba: per-adapter instance 4543 * 4544 * Set fDeviceInit flag and poll until device toggles it. 4545 * 4546 * Return: 0 upon success; < 0 upon failure. 4547 */ 4548 static int ufshcd_complete_dev_init(struct ufs_hba *hba) 4549 { 4550 int err; 4551 bool flag_res = true; 4552 ktime_t timeout; 4553 4554 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, 4555 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL); 4556 if (err) { 4557 dev_err(hba->dev, 4558 "%s: setting fDeviceInit flag failed with error %d\n", 4559 __func__, err); 4560 goto out; 4561 } 4562 4563 /* Poll fDeviceInit flag to be cleared */ 4564 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT); 4565 do { 4566 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, 4567 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res); 4568 if (!flag_res) 4569 break; 4570 usleep_range(500, 1000); 4571 } while (ktime_before(ktime_get(), timeout)); 4572 4573 if (err) { 4574 dev_err(hba->dev, 4575 "%s: reading fDeviceInit flag failed with error %d\n", 4576 __func__, err); 4577 } else if (flag_res) { 4578 dev_err(hba->dev, 4579 "%s: fDeviceInit was not cleared by the device\n", 4580 __func__); 4581 err = -EBUSY; 4582 } 4583 out: 4584 return err; 4585 } 4586 4587 /** 4588 * ufshcd_make_hba_operational - Make UFS controller operational 4589 * @hba: per adapter instance 4590 * 4591 * To bring UFS host controller to operational state, 4592 * 1. Enable required interrupts 4593 * 2. Configure interrupt aggregation 4594 * 3. Program UTRL and UTMRL base address 4595 * 4. Configure run-stop-registers 4596 * 4597 * Return: 0 on success, non-zero value on failure. 4598 */ 4599 int ufshcd_make_hba_operational(struct ufs_hba *hba) 4600 { 4601 int err = 0; 4602 u32 reg; 4603 4604 /* Enable required interrupts */ 4605 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); 4606 4607 /* Configure interrupt aggregation */ 4608 if (ufshcd_is_intr_aggr_allowed(hba)) 4609 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); 4610 else 4611 ufshcd_disable_intr_aggr(hba); 4612 4613 /* Configure UTRL and UTMRL base address registers */ 4614 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), 4615 REG_UTP_TRANSFER_REQ_LIST_BASE_L); 4616 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), 4617 REG_UTP_TRANSFER_REQ_LIST_BASE_H); 4618 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), 4619 REG_UTP_TASK_REQ_LIST_BASE_L); 4620 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), 4621 REG_UTP_TASK_REQ_LIST_BASE_H); 4622 4623 /* 4624 * Make sure base address and interrupt setup are updated before 4625 * enabling the run/stop registers below. 4626 */ 4627 wmb(); 4628 4629 /* 4630 * UCRDY, UTMRLDY and UTRLRDY bits must be 1 4631 */ 4632 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); 4633 if (!(ufshcd_get_lists_status(reg))) { 4634 ufshcd_enable_run_stop_reg(hba); 4635 } else { 4636 dev_err(hba->dev, 4637 "Host controller not ready to process requests"); 4638 err = -EIO; 4639 } 4640 4641 return err; 4642 } 4643 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational); 4644 4645 /** 4646 * ufshcd_hba_stop - Send controller to reset state 4647 * @hba: per adapter instance 4648 */ 4649 void ufshcd_hba_stop(struct ufs_hba *hba) 4650 { 4651 unsigned long flags; 4652 int err; 4653 4654 /* 4655 * Obtain the host lock to prevent that the controller is disabled 4656 * while the UFS interrupt handler is active on another CPU. 4657 */ 4658 spin_lock_irqsave(hba->host->host_lock, flags); 4659 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); 4660 spin_unlock_irqrestore(hba->host->host_lock, flags); 4661 4662 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, 4663 CONTROLLER_ENABLE, CONTROLLER_DISABLE, 4664 10, 1); 4665 if (err) 4666 dev_err(hba->dev, "%s: Controller disable failed\n", __func__); 4667 } 4668 EXPORT_SYMBOL_GPL(ufshcd_hba_stop); 4669 4670 /** 4671 * ufshcd_hba_execute_hce - initialize the controller 4672 * @hba: per adapter instance 4673 * 4674 * The controller resets itself and controller firmware initialization 4675 * sequence kicks off. When controller is ready it will set 4676 * the Host Controller Enable bit to 1. 4677 * 4678 * Return: 0 on success, non-zero value on failure. 4679 */ 4680 static int ufshcd_hba_execute_hce(struct ufs_hba *hba) 4681 { 4682 int retry_outer = 3; 4683 int retry_inner; 4684 4685 start: 4686 if (ufshcd_is_hba_active(hba)) 4687 /* change controller state to "reset state" */ 4688 ufshcd_hba_stop(hba); 4689 4690 /* UniPro link is disabled at this point */ 4691 ufshcd_set_link_off(hba); 4692 4693 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); 4694 4695 /* start controller initialization sequence */ 4696 ufshcd_hba_start(hba); 4697 4698 /* 4699 * To initialize a UFS host controller HCE bit must be set to 1. 4700 * During initialization the HCE bit value changes from 1->0->1. 4701 * When the host controller completes initialization sequence 4702 * it sets the value of HCE bit to 1. The same HCE bit is read back 4703 * to check if the controller has completed initialization sequence. 4704 * So without this delay the value HCE = 1, set in the previous 4705 * instruction might be read back. 4706 * This delay can be changed based on the controller. 4707 */ 4708 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); 4709 4710 /* wait for the host controller to complete initialization */ 4711 retry_inner = 50; 4712 while (!ufshcd_is_hba_active(hba)) { 4713 if (retry_inner) { 4714 retry_inner--; 4715 } else { 4716 dev_err(hba->dev, 4717 "Controller enable failed\n"); 4718 if (retry_outer) { 4719 retry_outer--; 4720 goto start; 4721 } 4722 return -EIO; 4723 } 4724 usleep_range(1000, 1100); 4725 } 4726 4727 /* enable UIC related interrupts */ 4728 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); 4729 4730 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); 4731 4732 return 0; 4733 } 4734 4735 int ufshcd_hba_enable(struct ufs_hba *hba) 4736 { 4737 int ret; 4738 4739 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { 4740 ufshcd_set_link_off(hba); 4741 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); 4742 4743 /* enable UIC related interrupts */ 4744 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); 4745 ret = ufshcd_dme_reset(hba); 4746 if (ret) { 4747 dev_err(hba->dev, "DME_RESET failed\n"); 4748 return ret; 4749 } 4750 4751 ret = ufshcd_dme_enable(hba); 4752 if (ret) { 4753 dev_err(hba->dev, "Enabling DME failed\n"); 4754 return ret; 4755 } 4756 4757 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); 4758 } else { 4759 ret = ufshcd_hba_execute_hce(hba); 4760 } 4761 4762 return ret; 4763 } 4764 EXPORT_SYMBOL_GPL(ufshcd_hba_enable); 4765 4766 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) 4767 { 4768 int tx_lanes = 0, i, err = 0; 4769 4770 if (!peer) 4771 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 4772 &tx_lanes); 4773 else 4774 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 4775 &tx_lanes); 4776 for (i = 0; i < tx_lanes; i++) { 4777 if (!peer) 4778 err = ufshcd_dme_set(hba, 4779 UIC_ARG_MIB_SEL(TX_LCC_ENABLE, 4780 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), 4781 0); 4782 else 4783 err = ufshcd_dme_peer_set(hba, 4784 UIC_ARG_MIB_SEL(TX_LCC_ENABLE, 4785 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), 4786 0); 4787 if (err) { 4788 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", 4789 __func__, peer, i, err); 4790 break; 4791 } 4792 } 4793 4794 return err; 4795 } 4796 4797 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) 4798 { 4799 return ufshcd_disable_tx_lcc(hba, true); 4800 } 4801 4802 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val) 4803 { 4804 struct ufs_event_hist *e; 4805 4806 if (id >= UFS_EVT_CNT) 4807 return; 4808 4809 e = &hba->ufs_stats.event[id]; 4810 e->val[e->pos] = val; 4811 e->tstamp[e->pos] = local_clock(); 4812 e->cnt += 1; 4813 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH; 4814 4815 ufshcd_vops_event_notify(hba, id, &val); 4816 } 4817 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist); 4818 4819 /** 4820 * ufshcd_link_startup - Initialize unipro link startup 4821 * @hba: per adapter instance 4822 * 4823 * Return: 0 for success, non-zero in case of failure. 4824 */ 4825 static int ufshcd_link_startup(struct ufs_hba *hba) 4826 { 4827 int ret; 4828 int retries = DME_LINKSTARTUP_RETRIES; 4829 bool link_startup_again = false; 4830 4831 /* 4832 * If UFS device isn't active then we will have to issue link startup 4833 * 2 times to make sure the device state move to active. 4834 */ 4835 if (!ufshcd_is_ufs_dev_active(hba)) 4836 link_startup_again = true; 4837 4838 link_startup: 4839 do { 4840 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); 4841 4842 ret = ufshcd_dme_link_startup(hba); 4843 4844 /* check if device is detected by inter-connect layer */ 4845 if (!ret && !ufshcd_is_device_present(hba)) { 4846 ufshcd_update_evt_hist(hba, 4847 UFS_EVT_LINK_STARTUP_FAIL, 4848 0); 4849 dev_err(hba->dev, "%s: Device not present\n", __func__); 4850 ret = -ENXIO; 4851 goto out; 4852 } 4853 4854 /* 4855 * DME link lost indication is only received when link is up, 4856 * but we can't be sure if the link is up until link startup 4857 * succeeds. So reset the local Uni-Pro and try again. 4858 */ 4859 if (ret && retries && ufshcd_hba_enable(hba)) { 4860 ufshcd_update_evt_hist(hba, 4861 UFS_EVT_LINK_STARTUP_FAIL, 4862 (u32)ret); 4863 goto out; 4864 } 4865 } while (ret && retries--); 4866 4867 if (ret) { 4868 /* failed to get the link up... retire */ 4869 ufshcd_update_evt_hist(hba, 4870 UFS_EVT_LINK_STARTUP_FAIL, 4871 (u32)ret); 4872 goto out; 4873 } 4874 4875 if (link_startup_again) { 4876 link_startup_again = false; 4877 retries = DME_LINKSTARTUP_RETRIES; 4878 goto link_startup; 4879 } 4880 4881 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ 4882 ufshcd_init_pwr_info(hba); 4883 ufshcd_print_pwr_info(hba); 4884 4885 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { 4886 ret = ufshcd_disable_device_tx_lcc(hba); 4887 if (ret) 4888 goto out; 4889 } 4890 4891 /* Include any host controller configuration via UIC commands */ 4892 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); 4893 if (ret) 4894 goto out; 4895 4896 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */ 4897 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); 4898 ret = ufshcd_make_hba_operational(hba); 4899 out: 4900 if (ret) { 4901 dev_err(hba->dev, "link startup failed %d\n", ret); 4902 ufshcd_print_host_state(hba); 4903 ufshcd_print_pwr_info(hba); 4904 ufshcd_print_evt_hist(hba); 4905 } 4906 return ret; 4907 } 4908 4909 /** 4910 * ufshcd_verify_dev_init() - Verify device initialization 4911 * @hba: per-adapter instance 4912 * 4913 * Send NOP OUT UPIU and wait for NOP IN response to check whether the 4914 * device Transport Protocol (UTP) layer is ready after a reset. 4915 * If the UTP layer at the device side is not initialized, it may 4916 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT 4917 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. 4918 * 4919 * Return: 0 upon success; < 0 upon failure. 4920 */ 4921 static int ufshcd_verify_dev_init(struct ufs_hba *hba) 4922 { 4923 int err = 0; 4924 int retries; 4925 4926 ufshcd_hold(hba); 4927 mutex_lock(&hba->dev_cmd.lock); 4928 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { 4929 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, 4930 hba->nop_out_timeout); 4931 4932 if (!err || err == -ETIMEDOUT) 4933 break; 4934 4935 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); 4936 } 4937 mutex_unlock(&hba->dev_cmd.lock); 4938 ufshcd_release(hba); 4939 4940 if (err) 4941 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); 4942 return err; 4943 } 4944 4945 /** 4946 * ufshcd_setup_links - associate link b/w device wlun and other luns 4947 * @sdev: pointer to SCSI device 4948 * @hba: pointer to ufs hba 4949 */ 4950 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev) 4951 { 4952 struct device_link *link; 4953 4954 /* 4955 * Device wlun is the supplier & rest of the luns are consumers. 4956 * This ensures that device wlun suspends after all other luns. 4957 */ 4958 if (hba->ufs_device_wlun) { 4959 link = device_link_add(&sdev->sdev_gendev, 4960 &hba->ufs_device_wlun->sdev_gendev, 4961 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); 4962 if (!link) { 4963 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n", 4964 dev_name(&hba->ufs_device_wlun->sdev_gendev)); 4965 return; 4966 } 4967 hba->luns_avail--; 4968 /* Ignore REPORT_LUN wlun probing */ 4969 if (hba->luns_avail == 1) { 4970 ufshcd_rpm_put(hba); 4971 return; 4972 } 4973 } else { 4974 /* 4975 * Device wlun is probed. The assumption is that WLUNs are 4976 * scanned before other LUNs. 4977 */ 4978 hba->luns_avail--; 4979 } 4980 } 4981 4982 /** 4983 * ufshcd_lu_init - Initialize the relevant parameters of the LU 4984 * @hba: per-adapter instance 4985 * @sdev: pointer to SCSI device 4986 */ 4987 static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev) 4988 { 4989 int len = QUERY_DESC_MAX_SIZE; 4990 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); 4991 u8 lun_qdepth = hba->nutrs; 4992 u8 *desc_buf; 4993 int ret; 4994 4995 desc_buf = kzalloc(len, GFP_KERNEL); 4996 if (!desc_buf) 4997 goto set_qdepth; 4998 4999 ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len); 5000 if (ret < 0) { 5001 if (ret == -EOPNOTSUPP) 5002 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */ 5003 lun_qdepth = 1; 5004 kfree(desc_buf); 5005 goto set_qdepth; 5006 } 5007 5008 if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) { 5009 /* 5010 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will 5011 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth 5012 */ 5013 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs); 5014 } 5015 /* 5016 * According to UFS device specification, the write protection mode is only supported by 5017 * normal LU, not supported by WLUN. 5018 */ 5019 if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported && 5020 !hba->dev_info.is_lu_power_on_wp && 5021 desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP) 5022 hba->dev_info.is_lu_power_on_wp = true; 5023 5024 /* In case of RPMB LU, check if advanced RPMB mode is enabled */ 5025 if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN && 5026 desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4)) 5027 hba->dev_info.b_advanced_rpmb_en = true; 5028 5029 5030 kfree(desc_buf); 5031 set_qdepth: 5032 /* 5033 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose 5034 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue. 5035 */ 5036 dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth); 5037 scsi_change_queue_depth(sdev, lun_qdepth); 5038 } 5039 5040 /** 5041 * ufshcd_slave_alloc - handle initial SCSI device configurations 5042 * @sdev: pointer to SCSI device 5043 * 5044 * Return: success. 5045 */ 5046 static int ufshcd_slave_alloc(struct scsi_device *sdev) 5047 { 5048 struct ufs_hba *hba; 5049 5050 hba = shost_priv(sdev->host); 5051 5052 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ 5053 sdev->use_10_for_ms = 1; 5054 5055 /* DBD field should be set to 1 in mode sense(10) */ 5056 sdev->set_dbd_for_ms = 1; 5057 5058 /* allow SCSI layer to restart the device in case of errors */ 5059 sdev->allow_restart = 1; 5060 5061 /* REPORT SUPPORTED OPERATION CODES is not supported */ 5062 sdev->no_report_opcodes = 1; 5063 5064 /* WRITE_SAME command is not supported */ 5065 sdev->no_write_same = 1; 5066 5067 ufshcd_lu_init(hba, sdev); 5068 5069 ufshcd_setup_links(hba, sdev); 5070 5071 return 0; 5072 } 5073 5074 /** 5075 * ufshcd_change_queue_depth - change queue depth 5076 * @sdev: pointer to SCSI device 5077 * @depth: required depth to set 5078 * 5079 * Change queue depth and make sure the max. limits are not crossed. 5080 * 5081 * Return: new queue depth. 5082 */ 5083 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) 5084 { 5085 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue)); 5086 } 5087 5088 /** 5089 * ufshcd_slave_configure - adjust SCSI device configurations 5090 * @sdev: pointer to SCSI device 5091 * 5092 * Return: 0 (success). 5093 */ 5094 static int ufshcd_slave_configure(struct scsi_device *sdev) 5095 { 5096 struct ufs_hba *hba = shost_priv(sdev->host); 5097 struct request_queue *q = sdev->request_queue; 5098 5099 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); 5100 if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT) 5101 blk_queue_update_dma_alignment(q, SZ_4K - 1); 5102 /* 5103 * Block runtime-pm until all consumers are added. 5104 * Refer ufshcd_setup_links(). 5105 */ 5106 if (is_device_wlun(sdev)) 5107 pm_runtime_get_noresume(&sdev->sdev_gendev); 5108 else if (ufshcd_is_rpm_autosuspend_allowed(hba)) 5109 sdev->rpm_autosuspend = 1; 5110 /* 5111 * Do not print messages during runtime PM to avoid never-ending cycles 5112 * of messages written back to storage by user space causing runtime 5113 * resume, causing more messages and so on. 5114 */ 5115 sdev->silence_suspend = 1; 5116 5117 ufshcd_crypto_register(hba, q); 5118 5119 return 0; 5120 } 5121 5122 /** 5123 * ufshcd_slave_destroy - remove SCSI device configurations 5124 * @sdev: pointer to SCSI device 5125 */ 5126 static void ufshcd_slave_destroy(struct scsi_device *sdev) 5127 { 5128 struct ufs_hba *hba; 5129 unsigned long flags; 5130 5131 hba = shost_priv(sdev->host); 5132 5133 /* Drop the reference as it won't be needed anymore */ 5134 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { 5135 spin_lock_irqsave(hba->host->host_lock, flags); 5136 hba->ufs_device_wlun = NULL; 5137 spin_unlock_irqrestore(hba->host->host_lock, flags); 5138 } else if (hba->ufs_device_wlun) { 5139 struct device *supplier = NULL; 5140 5141 /* Ensure UFS Device WLUN exists and does not disappear */ 5142 spin_lock_irqsave(hba->host->host_lock, flags); 5143 if (hba->ufs_device_wlun) { 5144 supplier = &hba->ufs_device_wlun->sdev_gendev; 5145 get_device(supplier); 5146 } 5147 spin_unlock_irqrestore(hba->host->host_lock, flags); 5148 5149 if (supplier) { 5150 /* 5151 * If a LUN fails to probe (e.g. absent BOOT WLUN), the 5152 * device will not have been registered but can still 5153 * have a device link holding a reference to the device. 5154 */ 5155 device_link_remove(&sdev->sdev_gendev, supplier); 5156 put_device(supplier); 5157 } 5158 } 5159 } 5160 5161 /** 5162 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status 5163 * @lrbp: pointer to local reference block of completed command 5164 * @scsi_status: SCSI command status 5165 * 5166 * Return: value base on SCSI command status. 5167 */ 5168 static inline int 5169 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) 5170 { 5171 int result = 0; 5172 5173 switch (scsi_status) { 5174 case SAM_STAT_CHECK_CONDITION: 5175 ufshcd_copy_sense_data(lrbp); 5176 fallthrough; 5177 case SAM_STAT_GOOD: 5178 result |= DID_OK << 16 | scsi_status; 5179 break; 5180 case SAM_STAT_TASK_SET_FULL: 5181 case SAM_STAT_BUSY: 5182 case SAM_STAT_TASK_ABORTED: 5183 ufshcd_copy_sense_data(lrbp); 5184 result |= scsi_status; 5185 break; 5186 default: 5187 result |= DID_ERROR << 16; 5188 break; 5189 } /* end of switch */ 5190 5191 return result; 5192 } 5193 5194 /** 5195 * ufshcd_transfer_rsp_status - Get overall status of the response 5196 * @hba: per adapter instance 5197 * @lrbp: pointer to local reference block of completed command 5198 * @cqe: pointer to the completion queue entry 5199 * 5200 * Return: result of the command to notify SCSI midlayer. 5201 */ 5202 static inline int 5203 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, 5204 struct cq_entry *cqe) 5205 { 5206 int result = 0; 5207 int scsi_status; 5208 enum utp_ocs ocs; 5209 u8 upiu_flags; 5210 u32 resid; 5211 5212 upiu_flags = lrbp->ucd_rsp_ptr->header.flags; 5213 resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count); 5214 /* 5215 * Test !overflow instead of underflow to support UFS devices that do 5216 * not set either flag. 5217 */ 5218 if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW)) 5219 scsi_set_resid(lrbp->cmd, resid); 5220 5221 /* overall command status of utrd */ 5222 ocs = ufshcd_get_tr_ocs(lrbp, cqe); 5223 5224 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) { 5225 if (lrbp->ucd_rsp_ptr->header.response || 5226 lrbp->ucd_rsp_ptr->header.status) 5227 ocs = OCS_SUCCESS; 5228 } 5229 5230 switch (ocs) { 5231 case OCS_SUCCESS: 5232 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); 5233 switch (ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr)) { 5234 case UPIU_TRANSACTION_RESPONSE: 5235 /* 5236 * get the result based on SCSI status response 5237 * to notify the SCSI midlayer of the command status 5238 */ 5239 scsi_status = lrbp->ucd_rsp_ptr->header.status; 5240 result = ufshcd_scsi_cmd_status(lrbp, scsi_status); 5241 5242 /* 5243 * Currently we are only supporting BKOPs exception 5244 * events hence we can ignore BKOPs exception event 5245 * during power management callbacks. BKOPs exception 5246 * event is not expected to be raised in runtime suspend 5247 * callback as it allows the urgent bkops. 5248 * During system suspend, we are anyway forcefully 5249 * disabling the bkops and if urgent bkops is needed 5250 * it will be enabled on system resume. Long term 5251 * solution could be to abort the system suspend if 5252 * UFS device needs urgent BKOPs. 5253 */ 5254 if (!hba->pm_op_in_progress && 5255 !ufshcd_eh_in_progress(hba) && 5256 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) 5257 /* Flushed in suspend */ 5258 schedule_work(&hba->eeh_work); 5259 break; 5260 case UPIU_TRANSACTION_REJECT_UPIU: 5261 /* TODO: handle Reject UPIU Response */ 5262 result = DID_ERROR << 16; 5263 dev_err(hba->dev, 5264 "Reject UPIU not fully implemented\n"); 5265 break; 5266 default: 5267 dev_err(hba->dev, 5268 "Unexpected request response code = %x\n", 5269 result); 5270 result = DID_ERROR << 16; 5271 break; 5272 } 5273 break; 5274 case OCS_ABORTED: 5275 result |= DID_ABORT << 16; 5276 break; 5277 case OCS_INVALID_COMMAND_STATUS: 5278 result |= DID_REQUEUE << 16; 5279 break; 5280 case OCS_INVALID_CMD_TABLE_ATTR: 5281 case OCS_INVALID_PRDT_ATTR: 5282 case OCS_MISMATCH_DATA_BUF_SIZE: 5283 case OCS_MISMATCH_RESP_UPIU_SIZE: 5284 case OCS_PEER_COMM_FAILURE: 5285 case OCS_FATAL_ERROR: 5286 case OCS_DEVICE_FATAL_ERROR: 5287 case OCS_INVALID_CRYPTO_CONFIG: 5288 case OCS_GENERAL_CRYPTO_ERROR: 5289 default: 5290 result |= DID_ERROR << 16; 5291 dev_err(hba->dev, 5292 "OCS error from controller = %x for tag %d\n", 5293 ocs, lrbp->task_tag); 5294 ufshcd_print_evt_hist(hba); 5295 ufshcd_print_host_state(hba); 5296 break; 5297 } /* end of switch */ 5298 5299 if ((host_byte(result) != DID_OK) && 5300 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) 5301 ufshcd_print_tr(hba, lrbp->task_tag, true); 5302 return result; 5303 } 5304 5305 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, 5306 u32 intr_mask) 5307 { 5308 if (!ufshcd_is_auto_hibern8_supported(hba) || 5309 !ufshcd_is_auto_hibern8_enabled(hba)) 5310 return false; 5311 5312 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK)) 5313 return false; 5314 5315 if (hba->active_uic_cmd && 5316 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || 5317 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) 5318 return false; 5319 5320 return true; 5321 } 5322 5323 /** 5324 * ufshcd_uic_cmd_compl - handle completion of uic command 5325 * @hba: per adapter instance 5326 * @intr_status: interrupt status generated by the controller 5327 * 5328 * Return: 5329 * IRQ_HANDLED - If interrupt is valid 5330 * IRQ_NONE - If invalid interrupt 5331 */ 5332 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) 5333 { 5334 irqreturn_t retval = IRQ_NONE; 5335 5336 spin_lock(hba->host->host_lock); 5337 if (ufshcd_is_auto_hibern8_error(hba, intr_status)) 5338 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); 5339 5340 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { 5341 hba->active_uic_cmd->argument2 |= 5342 ufshcd_get_uic_cmd_result(hba); 5343 hba->active_uic_cmd->argument3 = 5344 ufshcd_get_dme_attr_val(hba); 5345 if (!hba->uic_async_done) 5346 hba->active_uic_cmd->cmd_active = 0; 5347 complete(&hba->active_uic_cmd->done); 5348 retval = IRQ_HANDLED; 5349 } 5350 5351 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { 5352 hba->active_uic_cmd->cmd_active = 0; 5353 complete(hba->uic_async_done); 5354 retval = IRQ_HANDLED; 5355 } 5356 5357 if (retval == IRQ_HANDLED) 5358 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, 5359 UFS_CMD_COMP); 5360 spin_unlock(hba->host->host_lock); 5361 return retval; 5362 } 5363 5364 /* Release the resources allocated for processing a SCSI command. */ 5365 void ufshcd_release_scsi_cmd(struct ufs_hba *hba, 5366 struct ufshcd_lrb *lrbp) 5367 { 5368 struct scsi_cmnd *cmd = lrbp->cmd; 5369 5370 scsi_dma_unmap(cmd); 5371 ufshcd_release(hba); 5372 ufshcd_clk_scaling_update_busy(hba); 5373 } 5374 5375 /** 5376 * ufshcd_compl_one_cqe - handle a completion queue entry 5377 * @hba: per adapter instance 5378 * @task_tag: the task tag of the request to be completed 5379 * @cqe: pointer to the completion queue entry 5380 */ 5381 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, 5382 struct cq_entry *cqe) 5383 { 5384 struct ufshcd_lrb *lrbp; 5385 struct scsi_cmnd *cmd; 5386 enum utp_ocs ocs; 5387 5388 lrbp = &hba->lrb[task_tag]; 5389 lrbp->compl_time_stamp = ktime_get(); 5390 cmd = lrbp->cmd; 5391 if (cmd) { 5392 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) 5393 ufshcd_update_monitor(hba, lrbp); 5394 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP); 5395 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe); 5396 ufshcd_release_scsi_cmd(hba, lrbp); 5397 /* Do not touch lrbp after scsi done */ 5398 scsi_done(cmd); 5399 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || 5400 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { 5401 if (hba->dev_cmd.complete) { 5402 if (cqe) { 5403 ocs = le32_to_cpu(cqe->status) & MASK_OCS; 5404 lrbp->utr_descriptor_ptr->header.ocs = ocs; 5405 } 5406 complete(hba->dev_cmd.complete); 5407 ufshcd_clk_scaling_update_busy(hba); 5408 } 5409 } 5410 } 5411 5412 /** 5413 * __ufshcd_transfer_req_compl - handle SCSI and query command completion 5414 * @hba: per adapter instance 5415 * @completed_reqs: bitmask that indicates which requests to complete 5416 */ 5417 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, 5418 unsigned long completed_reqs) 5419 { 5420 int tag; 5421 5422 for_each_set_bit(tag, &completed_reqs, hba->nutrs) 5423 ufshcd_compl_one_cqe(hba, tag, NULL); 5424 } 5425 5426 /* Any value that is not an existing queue number is fine for this constant. */ 5427 enum { 5428 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1 5429 }; 5430 5431 static void ufshcd_clear_polled(struct ufs_hba *hba, 5432 unsigned long *completed_reqs) 5433 { 5434 int tag; 5435 5436 for_each_set_bit(tag, completed_reqs, hba->nutrs) { 5437 struct scsi_cmnd *cmd = hba->lrb[tag].cmd; 5438 5439 if (!cmd) 5440 continue; 5441 if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED) 5442 __clear_bit(tag, completed_reqs); 5443 } 5444 } 5445 5446 /* 5447 * Return: > 0 if one or more commands have been completed or 0 if no 5448 * requests have been completed. 5449 */ 5450 static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) 5451 { 5452 struct ufs_hba *hba = shost_priv(shost); 5453 unsigned long completed_reqs, flags; 5454 u32 tr_doorbell; 5455 struct ufs_hw_queue *hwq; 5456 5457 if (is_mcq_enabled(hba)) { 5458 hwq = &hba->uhq[queue_num]; 5459 5460 return ufshcd_mcq_poll_cqe_lock(hba, hwq); 5461 } 5462 5463 spin_lock_irqsave(&hba->outstanding_lock, flags); 5464 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 5465 completed_reqs = ~tr_doorbell & hba->outstanding_reqs; 5466 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, 5467 "completed: %#lx; outstanding: %#lx\n", completed_reqs, 5468 hba->outstanding_reqs); 5469 if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) { 5470 /* Do not complete polled requests from interrupt context. */ 5471 ufshcd_clear_polled(hba, &completed_reqs); 5472 } 5473 hba->outstanding_reqs &= ~completed_reqs; 5474 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 5475 5476 if (completed_reqs) 5477 __ufshcd_transfer_req_compl(hba, completed_reqs); 5478 5479 return completed_reqs != 0; 5480 } 5481 5482 /** 5483 * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is 5484 * invoked from the error handler context or ufshcd_host_reset_and_restore() 5485 * to complete the pending transfers and free the resources associated with 5486 * the scsi command. 5487 * 5488 * @hba: per adapter instance 5489 * @force_compl: This flag is set to true when invoked 5490 * from ufshcd_host_reset_and_restore() in which case it requires special 5491 * handling because the host controller has been reset by ufshcd_hba_stop(). 5492 */ 5493 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, 5494 bool force_compl) 5495 { 5496 struct ufs_hw_queue *hwq; 5497 struct ufshcd_lrb *lrbp; 5498 struct scsi_cmnd *cmd; 5499 unsigned long flags; 5500 u32 hwq_num, utag; 5501 int tag; 5502 5503 for (tag = 0; tag < hba->nutrs; tag++) { 5504 lrbp = &hba->lrb[tag]; 5505 cmd = lrbp->cmd; 5506 if (!ufshcd_cmd_inflight(cmd) || 5507 test_bit(SCMD_STATE_COMPLETE, &cmd->state)) 5508 continue; 5509 5510 utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); 5511 hwq_num = blk_mq_unique_tag_to_hwq(utag); 5512 hwq = &hba->uhq[hwq_num]; 5513 5514 if (force_compl) { 5515 ufshcd_mcq_compl_all_cqes_lock(hba, hwq); 5516 /* 5517 * For those cmds of which the cqes are not present 5518 * in the cq, complete them explicitly. 5519 */ 5520 if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) { 5521 spin_lock_irqsave(&hwq->cq_lock, flags); 5522 set_host_byte(cmd, DID_REQUEUE); 5523 ufshcd_release_scsi_cmd(hba, lrbp); 5524 scsi_done(cmd); 5525 spin_unlock_irqrestore(&hwq->cq_lock, flags); 5526 } 5527 } else { 5528 ufshcd_mcq_poll_cqe_lock(hba, hwq); 5529 } 5530 } 5531 } 5532 5533 /** 5534 * ufshcd_transfer_req_compl - handle SCSI and query command completion 5535 * @hba: per adapter instance 5536 * 5537 * Return: 5538 * IRQ_HANDLED - If interrupt is valid 5539 * IRQ_NONE - If invalid interrupt 5540 */ 5541 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) 5542 { 5543 /* Resetting interrupt aggregation counters first and reading the 5544 * DOOR_BELL afterward allows us to handle all the completed requests. 5545 * In order to prevent other interrupts starvation the DB is read once 5546 * after reset. The down side of this solution is the possibility of 5547 * false interrupt if device completes another request after resetting 5548 * aggregation and before reading the DB. 5549 */ 5550 if (ufshcd_is_intr_aggr_allowed(hba) && 5551 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) 5552 ufshcd_reset_intr_aggr(hba); 5553 5554 if (ufs_fail_completion()) 5555 return IRQ_HANDLED; 5556 5557 /* 5558 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we 5559 * do not want polling to trigger spurious interrupt complaints. 5560 */ 5561 ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT); 5562 5563 return IRQ_HANDLED; 5564 } 5565 5566 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask) 5567 { 5568 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 5569 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 5570 &ee_ctrl_mask); 5571 } 5572 5573 int ufshcd_write_ee_control(struct ufs_hba *hba) 5574 { 5575 int err; 5576 5577 mutex_lock(&hba->ee_ctrl_mutex); 5578 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask); 5579 mutex_unlock(&hba->ee_ctrl_mutex); 5580 if (err) 5581 dev_err(hba->dev, "%s: failed to write ee control %d\n", 5582 __func__, err); 5583 return err; 5584 } 5585 5586 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, 5587 const u16 *other_mask, u16 set, u16 clr) 5588 { 5589 u16 new_mask, ee_ctrl_mask; 5590 int err = 0; 5591 5592 mutex_lock(&hba->ee_ctrl_mutex); 5593 new_mask = (*mask & ~clr) | set; 5594 ee_ctrl_mask = new_mask | *other_mask; 5595 if (ee_ctrl_mask != hba->ee_ctrl_mask) 5596 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask); 5597 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */ 5598 if (!err) { 5599 hba->ee_ctrl_mask = ee_ctrl_mask; 5600 *mask = new_mask; 5601 } 5602 mutex_unlock(&hba->ee_ctrl_mutex); 5603 return err; 5604 } 5605 5606 /** 5607 * ufshcd_disable_ee - disable exception event 5608 * @hba: per-adapter instance 5609 * @mask: exception event to disable 5610 * 5611 * Disables exception event in the device so that the EVENT_ALERT 5612 * bit is not set. 5613 * 5614 * Return: zero on success, non-zero error value on failure. 5615 */ 5616 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) 5617 { 5618 return ufshcd_update_ee_drv_mask(hba, 0, mask); 5619 } 5620 5621 /** 5622 * ufshcd_enable_ee - enable exception event 5623 * @hba: per-adapter instance 5624 * @mask: exception event to enable 5625 * 5626 * Enable corresponding exception event in the device to allow 5627 * device to alert host in critical scenarios. 5628 * 5629 * Return: zero on success, non-zero error value on failure. 5630 */ 5631 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) 5632 { 5633 return ufshcd_update_ee_drv_mask(hba, mask, 0); 5634 } 5635 5636 /** 5637 * ufshcd_enable_auto_bkops - Allow device managed BKOPS 5638 * @hba: per-adapter instance 5639 * 5640 * Allow device to manage background operations on its own. Enabling 5641 * this might lead to inconsistent latencies during normal data transfers 5642 * as the device is allowed to manage its own way of handling background 5643 * operations. 5644 * 5645 * Return: zero on success, non-zero on failure. 5646 */ 5647 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) 5648 { 5649 int err = 0; 5650 5651 if (hba->auto_bkops_enabled) 5652 goto out; 5653 5654 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, 5655 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); 5656 if (err) { 5657 dev_err(hba->dev, "%s: failed to enable bkops %d\n", 5658 __func__, err); 5659 goto out; 5660 } 5661 5662 hba->auto_bkops_enabled = true; 5663 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); 5664 5665 /* No need of URGENT_BKOPS exception from the device */ 5666 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); 5667 if (err) 5668 dev_err(hba->dev, "%s: failed to disable exception event %d\n", 5669 __func__, err); 5670 out: 5671 return err; 5672 } 5673 5674 /** 5675 * ufshcd_disable_auto_bkops - block device in doing background operations 5676 * @hba: per-adapter instance 5677 * 5678 * Disabling background operations improves command response latency but 5679 * has drawback of device moving into critical state where the device is 5680 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the 5681 * host is idle so that BKOPS are managed effectively without any negative 5682 * impacts. 5683 * 5684 * Return: zero on success, non-zero on failure. 5685 */ 5686 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) 5687 { 5688 int err = 0; 5689 5690 if (!hba->auto_bkops_enabled) 5691 goto out; 5692 5693 /* 5694 * If host assisted BKOPs is to be enabled, make sure 5695 * urgent bkops exception is allowed. 5696 */ 5697 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); 5698 if (err) { 5699 dev_err(hba->dev, "%s: failed to enable exception event %d\n", 5700 __func__, err); 5701 goto out; 5702 } 5703 5704 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, 5705 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); 5706 if (err) { 5707 dev_err(hba->dev, "%s: failed to disable bkops %d\n", 5708 __func__, err); 5709 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); 5710 goto out; 5711 } 5712 5713 hba->auto_bkops_enabled = false; 5714 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); 5715 hba->is_urgent_bkops_lvl_checked = false; 5716 out: 5717 return err; 5718 } 5719 5720 /** 5721 * ufshcd_force_reset_auto_bkops - force reset auto bkops state 5722 * @hba: per adapter instance 5723 * 5724 * After a device reset the device may toggle the BKOPS_EN flag 5725 * to default value. The s/w tracking variables should be updated 5726 * as well. This function would change the auto-bkops state based on 5727 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND. 5728 */ 5729 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) 5730 { 5731 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { 5732 hba->auto_bkops_enabled = false; 5733 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; 5734 ufshcd_enable_auto_bkops(hba); 5735 } else { 5736 hba->auto_bkops_enabled = true; 5737 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; 5738 ufshcd_disable_auto_bkops(hba); 5739 } 5740 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; 5741 hba->is_urgent_bkops_lvl_checked = false; 5742 } 5743 5744 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) 5745 { 5746 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5747 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); 5748 } 5749 5750 /** 5751 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status 5752 * @hba: per-adapter instance 5753 * @status: bkops_status value 5754 * 5755 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn 5756 * flag in the device to permit background operations if the device 5757 * bkops_status is greater than or equal to "status" argument passed to 5758 * this function, disable otherwise. 5759 * 5760 * Return: 0 for success, non-zero in case of failure. 5761 * 5762 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag 5763 * to know whether auto bkops is enabled or disabled after this function 5764 * returns control to it. 5765 */ 5766 static int ufshcd_bkops_ctrl(struct ufs_hba *hba, 5767 enum bkops_status status) 5768 { 5769 int err; 5770 u32 curr_status = 0; 5771 5772 err = ufshcd_get_bkops_status(hba, &curr_status); 5773 if (err) { 5774 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", 5775 __func__, err); 5776 goto out; 5777 } else if (curr_status > BKOPS_STATUS_MAX) { 5778 dev_err(hba->dev, "%s: invalid BKOPS status %d\n", 5779 __func__, curr_status); 5780 err = -EINVAL; 5781 goto out; 5782 } 5783 5784 if (curr_status >= status) 5785 err = ufshcd_enable_auto_bkops(hba); 5786 else 5787 err = ufshcd_disable_auto_bkops(hba); 5788 out: 5789 return err; 5790 } 5791 5792 /** 5793 * ufshcd_urgent_bkops - handle urgent bkops exception event 5794 * @hba: per-adapter instance 5795 * 5796 * Enable fBackgroundOpsEn flag in the device to permit background 5797 * operations. 5798 * 5799 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled 5800 * and negative error value for any other failure. 5801 * 5802 * Return: 0 upon success; < 0 upon failure. 5803 */ 5804 static int ufshcd_urgent_bkops(struct ufs_hba *hba) 5805 { 5806 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); 5807 } 5808 5809 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) 5810 { 5811 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5812 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); 5813 } 5814 5815 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) 5816 { 5817 int err; 5818 u32 curr_status = 0; 5819 5820 if (hba->is_urgent_bkops_lvl_checked) 5821 goto enable_auto_bkops; 5822 5823 err = ufshcd_get_bkops_status(hba, &curr_status); 5824 if (err) { 5825 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", 5826 __func__, err); 5827 goto out; 5828 } 5829 5830 /* 5831 * We are seeing that some devices are raising the urgent bkops 5832 * exception events even when BKOPS status doesn't indicate performace 5833 * impacted or critical. Handle these device by determining their urgent 5834 * bkops status at runtime. 5835 */ 5836 if (curr_status < BKOPS_STATUS_PERF_IMPACT) { 5837 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", 5838 __func__, curr_status); 5839 /* update the current status as the urgent bkops level */ 5840 hba->urgent_bkops_lvl = curr_status; 5841 hba->is_urgent_bkops_lvl_checked = true; 5842 } 5843 5844 enable_auto_bkops: 5845 err = ufshcd_enable_auto_bkops(hba); 5846 out: 5847 if (err < 0) 5848 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", 5849 __func__, err); 5850 } 5851 5852 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status) 5853 { 5854 u32 value; 5855 5856 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5857 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value)) 5858 return; 5859 5860 dev_info(hba->dev, "exception Tcase %d\n", value - 80); 5861 5862 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP); 5863 5864 /* 5865 * A placeholder for the platform vendors to add whatever additional 5866 * steps required 5867 */ 5868 } 5869 5870 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) 5871 { 5872 u8 index; 5873 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG : 5874 UPIU_QUERY_OPCODE_CLEAR_FLAG; 5875 5876 index = ufshcd_wb_get_query_index(hba); 5877 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL); 5878 } 5879 5880 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable) 5881 { 5882 int ret; 5883 5884 if (!ufshcd_is_wb_allowed(hba) || 5885 hba->dev_info.wb_enabled == enable) 5886 return 0; 5887 5888 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN); 5889 if (ret) { 5890 dev_err(hba->dev, "%s: Write Booster %s failed %d\n", 5891 __func__, enable ? "enabling" : "disabling", ret); 5892 return ret; 5893 } 5894 5895 hba->dev_info.wb_enabled = enable; 5896 dev_dbg(hba->dev, "%s: Write Booster %s\n", 5897 __func__, enable ? "enabled" : "disabled"); 5898 5899 return ret; 5900 } 5901 5902 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba, 5903 bool enable) 5904 { 5905 int ret; 5906 5907 ret = __ufshcd_wb_toggle(hba, enable, 5908 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8); 5909 if (ret) { 5910 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n", 5911 __func__, enable ? "enabling" : "disabling", ret); 5912 return; 5913 } 5914 dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n", 5915 __func__, enable ? "enabled" : "disabled"); 5916 } 5917 5918 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable) 5919 { 5920 int ret; 5921 5922 if (!ufshcd_is_wb_allowed(hba) || 5923 hba->dev_info.wb_buf_flush_enabled == enable) 5924 return 0; 5925 5926 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN); 5927 if (ret) { 5928 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n", 5929 __func__, enable ? "enabling" : "disabling", ret); 5930 return ret; 5931 } 5932 5933 hba->dev_info.wb_buf_flush_enabled = enable; 5934 dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n", 5935 __func__, enable ? "enabled" : "disabled"); 5936 5937 return ret; 5938 } 5939 5940 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, 5941 u32 avail_buf) 5942 { 5943 u32 cur_buf; 5944 int ret; 5945 u8 index; 5946 5947 index = ufshcd_wb_get_query_index(hba); 5948 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5949 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE, 5950 index, 0, &cur_buf); 5951 if (ret) { 5952 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n", 5953 __func__, ret); 5954 return false; 5955 } 5956 5957 if (!cur_buf) { 5958 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n", 5959 cur_buf); 5960 return false; 5961 } 5962 /* Let it continue to flush when available buffer exceeds threshold */ 5963 return avail_buf < hba->vps->wb_flush_threshold; 5964 } 5965 5966 static void ufshcd_wb_force_disable(struct ufs_hba *hba) 5967 { 5968 if (ufshcd_is_wb_buf_flush_allowed(hba)) 5969 ufshcd_wb_toggle_buf_flush(hba, false); 5970 5971 ufshcd_wb_toggle_buf_flush_during_h8(hba, false); 5972 ufshcd_wb_toggle(hba, false); 5973 hba->caps &= ~UFSHCD_CAP_WB_EN; 5974 5975 dev_info(hba->dev, "%s: WB force disabled\n", __func__); 5976 } 5977 5978 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba) 5979 { 5980 u32 lifetime; 5981 int ret; 5982 u8 index; 5983 5984 index = ufshcd_wb_get_query_index(hba); 5985 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5986 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST, 5987 index, 0, &lifetime); 5988 if (ret) { 5989 dev_err(hba->dev, 5990 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n", 5991 __func__, ret); 5992 return false; 5993 } 5994 5995 if (lifetime == UFS_WB_EXCEED_LIFETIME) { 5996 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n", 5997 __func__, lifetime); 5998 return false; 5999 } 6000 6001 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n", 6002 __func__, lifetime); 6003 6004 return true; 6005 } 6006 6007 static bool ufshcd_wb_need_flush(struct ufs_hba *hba) 6008 { 6009 int ret; 6010 u32 avail_buf; 6011 u8 index; 6012 6013 if (!ufshcd_is_wb_allowed(hba)) 6014 return false; 6015 6016 if (!ufshcd_is_wb_buf_lifetime_available(hba)) { 6017 ufshcd_wb_force_disable(hba); 6018 return false; 6019 } 6020 6021 /* 6022 * The ufs device needs the vcc to be ON to flush. 6023 * With user-space reduction enabled, it's enough to enable flush 6024 * by checking only the available buffer. The threshold 6025 * defined here is > 90% full. 6026 * With user-space preserved enabled, the current-buffer 6027 * should be checked too because the wb buffer size can reduce 6028 * when disk tends to be full. This info is provided by current 6029 * buffer (dCurrentWriteBoosterBufferSize). There's no point in 6030 * keeping vcc on when current buffer is empty. 6031 */ 6032 index = ufshcd_wb_get_query_index(hba); 6033 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 6034 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE, 6035 index, 0, &avail_buf); 6036 if (ret) { 6037 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n", 6038 __func__, ret); 6039 return false; 6040 } 6041 6042 if (!hba->dev_info.b_presrv_uspc_en) 6043 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10); 6044 6045 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); 6046 } 6047 6048 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work) 6049 { 6050 struct ufs_hba *hba = container_of(to_delayed_work(work), 6051 struct ufs_hba, 6052 rpm_dev_flush_recheck_work); 6053 /* 6054 * To prevent unnecessary VCC power drain after device finishes 6055 * WriteBooster buffer flush or Auto BKOPs, force runtime resume 6056 * after a certain delay to recheck the threshold by next runtime 6057 * suspend. 6058 */ 6059 ufshcd_rpm_get_sync(hba); 6060 ufshcd_rpm_put_sync(hba); 6061 } 6062 6063 /** 6064 * ufshcd_exception_event_handler - handle exceptions raised by device 6065 * @work: pointer to work data 6066 * 6067 * Read bExceptionEventStatus attribute from the device and handle the 6068 * exception event accordingly. 6069 */ 6070 static void ufshcd_exception_event_handler(struct work_struct *work) 6071 { 6072 struct ufs_hba *hba; 6073 int err; 6074 u32 status = 0; 6075 hba = container_of(work, struct ufs_hba, eeh_work); 6076 6077 ufshcd_scsi_block_requests(hba); 6078 err = ufshcd_get_ee_status(hba, &status); 6079 if (err) { 6080 dev_err(hba->dev, "%s: failed to get exception status %d\n", 6081 __func__, err); 6082 goto out; 6083 } 6084 6085 trace_ufshcd_exception_event(dev_name(hba->dev), status); 6086 6087 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS) 6088 ufshcd_bkops_exception_event_handler(hba); 6089 6090 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP) 6091 ufshcd_temp_exception_event_handler(hba, status); 6092 6093 ufs_debugfs_exception_event(hba, status); 6094 out: 6095 ufshcd_scsi_unblock_requests(hba); 6096 } 6097 6098 /* Complete requests that have door-bell cleared */ 6099 static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl) 6100 { 6101 if (is_mcq_enabled(hba)) 6102 ufshcd_mcq_compl_pending_transfer(hba, force_compl); 6103 else 6104 ufshcd_transfer_req_compl(hba); 6105 6106 ufshcd_tmc_handler(hba); 6107 } 6108 6109 /** 6110 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is 6111 * to recover from the DL NAC errors or not. 6112 * @hba: per-adapter instance 6113 * 6114 * Return: true if error handling is required, false otherwise. 6115 */ 6116 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) 6117 { 6118 unsigned long flags; 6119 bool err_handling = true; 6120 6121 spin_lock_irqsave(hba->host->host_lock, flags); 6122 /* 6123 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the 6124 * device fatal error and/or DL NAC & REPLAY timeout errors. 6125 */ 6126 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) 6127 goto out; 6128 6129 if ((hba->saved_err & DEVICE_FATAL_ERROR) || 6130 ((hba->saved_err & UIC_ERROR) && 6131 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) 6132 goto out; 6133 6134 if ((hba->saved_err & UIC_ERROR) && 6135 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { 6136 int err; 6137 /* 6138 * wait for 50ms to see if we can get any other errors or not. 6139 */ 6140 spin_unlock_irqrestore(hba->host->host_lock, flags); 6141 msleep(50); 6142 spin_lock_irqsave(hba->host->host_lock, flags); 6143 6144 /* 6145 * now check if we have got any other severe errors other than 6146 * DL NAC error? 6147 */ 6148 if ((hba->saved_err & INT_FATAL_ERRORS) || 6149 ((hba->saved_err & UIC_ERROR) && 6150 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) 6151 goto out; 6152 6153 /* 6154 * As DL NAC is the only error received so far, send out NOP 6155 * command to confirm if link is still active or not. 6156 * - If we don't get any response then do error recovery. 6157 * - If we get response then clear the DL NAC error bit. 6158 */ 6159 6160 spin_unlock_irqrestore(hba->host->host_lock, flags); 6161 err = ufshcd_verify_dev_init(hba); 6162 spin_lock_irqsave(hba->host->host_lock, flags); 6163 6164 if (err) 6165 goto out; 6166 6167 /* Link seems to be alive hence ignore the DL NAC errors */ 6168 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) 6169 hba->saved_err &= ~UIC_ERROR; 6170 /* clear NAC error */ 6171 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; 6172 if (!hba->saved_uic_err) 6173 err_handling = false; 6174 } 6175 out: 6176 spin_unlock_irqrestore(hba->host->host_lock, flags); 6177 return err_handling; 6178 } 6179 6180 /* host lock must be held before calling this func */ 6181 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) 6182 { 6183 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || 6184 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); 6185 } 6186 6187 void ufshcd_schedule_eh_work(struct ufs_hba *hba) 6188 { 6189 lockdep_assert_held(hba->host->host_lock); 6190 6191 /* handle fatal errors only when link is not in error state */ 6192 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { 6193 if (hba->force_reset || ufshcd_is_link_broken(hba) || 6194 ufshcd_is_saved_err_fatal(hba)) 6195 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; 6196 else 6197 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; 6198 queue_work(hba->eh_wq, &hba->eh_work); 6199 } 6200 } 6201 6202 static void ufshcd_force_error_recovery(struct ufs_hba *hba) 6203 { 6204 spin_lock_irq(hba->host->host_lock); 6205 hba->force_reset = true; 6206 ufshcd_schedule_eh_work(hba); 6207 spin_unlock_irq(hba->host->host_lock); 6208 } 6209 6210 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) 6211 { 6212 mutex_lock(&hba->wb_mutex); 6213 down_write(&hba->clk_scaling_lock); 6214 hba->clk_scaling.is_allowed = allow; 6215 up_write(&hba->clk_scaling_lock); 6216 mutex_unlock(&hba->wb_mutex); 6217 } 6218 6219 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) 6220 { 6221 if (suspend) { 6222 if (hba->clk_scaling.is_enabled) 6223 ufshcd_suspend_clkscaling(hba); 6224 ufshcd_clk_scaling_allow(hba, false); 6225 } else { 6226 ufshcd_clk_scaling_allow(hba, true); 6227 if (hba->clk_scaling.is_enabled) 6228 ufshcd_resume_clkscaling(hba); 6229 } 6230 } 6231 6232 static void ufshcd_err_handling_prepare(struct ufs_hba *hba) 6233 { 6234 ufshcd_rpm_get_sync(hba); 6235 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) || 6236 hba->is_sys_suspended) { 6237 enum ufs_pm_op pm_op; 6238 6239 /* 6240 * Don't assume anything of resume, if 6241 * resume fails, irq and clocks can be OFF, and powers 6242 * can be OFF or in LPM. 6243 */ 6244 ufshcd_setup_hba_vreg(hba, true); 6245 ufshcd_enable_irq(hba); 6246 ufshcd_setup_vreg(hba, true); 6247 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); 6248 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); 6249 ufshcd_hold(hba); 6250 if (!ufshcd_is_clkgating_allowed(hba)) 6251 ufshcd_setup_clocks(hba, true); 6252 ufshcd_release(hba); 6253 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; 6254 ufshcd_vops_resume(hba, pm_op); 6255 } else { 6256 ufshcd_hold(hba); 6257 if (ufshcd_is_clkscaling_supported(hba) && 6258 hba->clk_scaling.is_enabled) 6259 ufshcd_suspend_clkscaling(hba); 6260 ufshcd_clk_scaling_allow(hba, false); 6261 } 6262 ufshcd_scsi_block_requests(hba); 6263 /* Wait for ongoing ufshcd_queuecommand() calls to finish. */ 6264 blk_mq_wait_quiesce_done(&hba->host->tag_set); 6265 cancel_work_sync(&hba->eeh_work); 6266 } 6267 6268 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) 6269 { 6270 ufshcd_scsi_unblock_requests(hba); 6271 ufshcd_release(hba); 6272 if (ufshcd_is_clkscaling_supported(hba)) 6273 ufshcd_clk_scaling_suspend(hba, false); 6274 ufshcd_rpm_put(hba); 6275 } 6276 6277 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) 6278 { 6279 return (!hba->is_powered || hba->shutting_down || 6280 !hba->ufs_device_wlun || 6281 hba->ufshcd_state == UFSHCD_STATE_ERROR || 6282 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || 6283 ufshcd_is_link_broken(hba)))); 6284 } 6285 6286 #ifdef CONFIG_PM 6287 static void ufshcd_recover_pm_error(struct ufs_hba *hba) 6288 { 6289 struct Scsi_Host *shost = hba->host; 6290 struct scsi_device *sdev; 6291 struct request_queue *q; 6292 int ret; 6293 6294 hba->is_sys_suspended = false; 6295 /* 6296 * Set RPM status of wlun device to RPM_ACTIVE, 6297 * this also clears its runtime error. 6298 */ 6299 ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); 6300 6301 /* hba device might have a runtime error otherwise */ 6302 if (ret) 6303 ret = pm_runtime_set_active(hba->dev); 6304 /* 6305 * If wlun device had runtime error, we also need to resume those 6306 * consumer scsi devices in case any of them has failed to be 6307 * resumed due to supplier runtime resume failure. This is to unblock 6308 * blk_queue_enter in case there are bios waiting inside it. 6309 */ 6310 if (!ret) { 6311 shost_for_each_device(sdev, shost) { 6312 q = sdev->request_queue; 6313 if (q->dev && (q->rpm_status == RPM_SUSPENDED || 6314 q->rpm_status == RPM_SUSPENDING)) 6315 pm_request_resume(q->dev); 6316 } 6317 } 6318 } 6319 #else 6320 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba) 6321 { 6322 } 6323 #endif 6324 6325 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba) 6326 { 6327 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info; 6328 u32 mode; 6329 6330 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode); 6331 6332 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK)) 6333 return true; 6334 6335 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK)) 6336 return true; 6337 6338 return false; 6339 } 6340 6341 static bool ufshcd_abort_one(struct request *rq, void *priv) 6342 { 6343 int *ret = priv; 6344 u32 tag = rq->tag; 6345 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 6346 struct scsi_device *sdev = cmd->device; 6347 struct Scsi_Host *shost = sdev->host; 6348 struct ufs_hba *hba = shost_priv(shost); 6349 6350 *ret = ufshcd_try_to_abort_task(hba, tag); 6351 dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag, 6352 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1, 6353 *ret ? "failed" : "succeeded"); 6354 return *ret == 0; 6355 } 6356 6357 /** 6358 * ufshcd_abort_all - Abort all pending commands. 6359 * @hba: Host bus adapter pointer. 6360 * 6361 * Return: true if and only if the host controller needs to be reset. 6362 */ 6363 static bool ufshcd_abort_all(struct ufs_hba *hba) 6364 { 6365 int tag, ret = 0; 6366 6367 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret); 6368 if (ret) 6369 goto out; 6370 6371 /* Clear pending task management requests */ 6372 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { 6373 ret = ufshcd_clear_tm_cmd(hba, tag); 6374 if (ret) 6375 goto out; 6376 } 6377 6378 out: 6379 /* Complete the requests that are cleared by s/w */ 6380 ufshcd_complete_requests(hba, false); 6381 6382 return ret != 0; 6383 } 6384 6385 /** 6386 * ufshcd_err_handler - handle UFS errors that require s/w attention 6387 * @work: pointer to work structure 6388 */ 6389 static void ufshcd_err_handler(struct work_struct *work) 6390 { 6391 int retries = MAX_ERR_HANDLER_RETRIES; 6392 struct ufs_hba *hba; 6393 unsigned long flags; 6394 bool needs_restore; 6395 bool needs_reset; 6396 int pmc_err; 6397 6398 hba = container_of(work, struct ufs_hba, eh_work); 6399 6400 dev_info(hba->dev, 6401 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n", 6402 __func__, ufshcd_state_name[hba->ufshcd_state], 6403 hba->is_powered, hba->shutting_down, hba->saved_err, 6404 hba->saved_uic_err, hba->force_reset, 6405 ufshcd_is_link_broken(hba) ? "; link is broken" : ""); 6406 6407 down(&hba->host_sem); 6408 spin_lock_irqsave(hba->host->host_lock, flags); 6409 if (ufshcd_err_handling_should_stop(hba)) { 6410 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) 6411 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 6412 spin_unlock_irqrestore(hba->host->host_lock, flags); 6413 up(&hba->host_sem); 6414 return; 6415 } 6416 ufshcd_set_eh_in_progress(hba); 6417 spin_unlock_irqrestore(hba->host->host_lock, flags); 6418 ufshcd_err_handling_prepare(hba); 6419 /* Complete requests that have door-bell cleared by h/w */ 6420 ufshcd_complete_requests(hba, false); 6421 spin_lock_irqsave(hba->host->host_lock, flags); 6422 again: 6423 needs_restore = false; 6424 needs_reset = false; 6425 6426 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) 6427 hba->ufshcd_state = UFSHCD_STATE_RESET; 6428 /* 6429 * A full reset and restore might have happened after preparation 6430 * is finished, double check whether we should stop. 6431 */ 6432 if (ufshcd_err_handling_should_stop(hba)) 6433 goto skip_err_handling; 6434 6435 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { 6436 bool ret; 6437 6438 spin_unlock_irqrestore(hba->host->host_lock, flags); 6439 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */ 6440 ret = ufshcd_quirk_dl_nac_errors(hba); 6441 spin_lock_irqsave(hba->host->host_lock, flags); 6442 if (!ret && ufshcd_err_handling_should_stop(hba)) 6443 goto skip_err_handling; 6444 } 6445 6446 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || 6447 (hba->saved_uic_err && 6448 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { 6449 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR); 6450 6451 spin_unlock_irqrestore(hba->host->host_lock, flags); 6452 ufshcd_print_host_state(hba); 6453 ufshcd_print_pwr_info(hba); 6454 ufshcd_print_evt_hist(hba); 6455 ufshcd_print_tmrs(hba, hba->outstanding_tasks); 6456 ufshcd_print_trs_all(hba, pr_prdt); 6457 spin_lock_irqsave(hba->host->host_lock, flags); 6458 } 6459 6460 /* 6461 * if host reset is required then skip clearing the pending 6462 * transfers forcefully because they will get cleared during 6463 * host reset and restore 6464 */ 6465 if (hba->force_reset || ufshcd_is_link_broken(hba) || 6466 ufshcd_is_saved_err_fatal(hba) || 6467 ((hba->saved_err & UIC_ERROR) && 6468 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | 6469 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) { 6470 needs_reset = true; 6471 goto do_reset; 6472 } 6473 6474 /* 6475 * If LINERESET was caught, UFS might have been put to PWM mode, 6476 * check if power mode restore is needed. 6477 */ 6478 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) { 6479 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR; 6480 if (!hba->saved_uic_err) 6481 hba->saved_err &= ~UIC_ERROR; 6482 spin_unlock_irqrestore(hba->host->host_lock, flags); 6483 if (ufshcd_is_pwr_mode_restore_needed(hba)) 6484 needs_restore = true; 6485 spin_lock_irqsave(hba->host->host_lock, flags); 6486 if (!hba->saved_err && !needs_restore) 6487 goto skip_err_handling; 6488 } 6489 6490 hba->silence_err_logs = true; 6491 /* release lock as clear command might sleep */ 6492 spin_unlock_irqrestore(hba->host->host_lock, flags); 6493 6494 needs_reset = ufshcd_abort_all(hba); 6495 6496 spin_lock_irqsave(hba->host->host_lock, flags); 6497 hba->silence_err_logs = false; 6498 if (needs_reset) 6499 goto do_reset; 6500 6501 /* 6502 * After all reqs and tasks are cleared from doorbell, 6503 * now it is safe to retore power mode. 6504 */ 6505 if (needs_restore) { 6506 spin_unlock_irqrestore(hba->host->host_lock, flags); 6507 /* 6508 * Hold the scaling lock just in case dev cmds 6509 * are sent via bsg and/or sysfs. 6510 */ 6511 down_write(&hba->clk_scaling_lock); 6512 hba->force_pmc = true; 6513 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); 6514 if (pmc_err) { 6515 needs_reset = true; 6516 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", 6517 __func__, pmc_err); 6518 } 6519 hba->force_pmc = false; 6520 ufshcd_print_pwr_info(hba); 6521 up_write(&hba->clk_scaling_lock); 6522 spin_lock_irqsave(hba->host->host_lock, flags); 6523 } 6524 6525 do_reset: 6526 /* Fatal errors need reset */ 6527 if (needs_reset) { 6528 int err; 6529 6530 hba->force_reset = false; 6531 spin_unlock_irqrestore(hba->host->host_lock, flags); 6532 err = ufshcd_reset_and_restore(hba); 6533 if (err) 6534 dev_err(hba->dev, "%s: reset and restore failed with err %d\n", 6535 __func__, err); 6536 else 6537 ufshcd_recover_pm_error(hba); 6538 spin_lock_irqsave(hba->host->host_lock, flags); 6539 } 6540 6541 skip_err_handling: 6542 if (!needs_reset) { 6543 if (hba->ufshcd_state == UFSHCD_STATE_RESET) 6544 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 6545 if (hba->saved_err || hba->saved_uic_err) 6546 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", 6547 __func__, hba->saved_err, hba->saved_uic_err); 6548 } 6549 /* Exit in an operational state or dead */ 6550 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && 6551 hba->ufshcd_state != UFSHCD_STATE_ERROR) { 6552 if (--retries) 6553 goto again; 6554 hba->ufshcd_state = UFSHCD_STATE_ERROR; 6555 } 6556 ufshcd_clear_eh_in_progress(hba); 6557 spin_unlock_irqrestore(hba->host->host_lock, flags); 6558 ufshcd_err_handling_unprepare(hba); 6559 up(&hba->host_sem); 6560 6561 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__, 6562 ufshcd_state_name[hba->ufshcd_state]); 6563 } 6564 6565 /** 6566 * ufshcd_update_uic_error - check and set fatal UIC error flags. 6567 * @hba: per-adapter instance 6568 * 6569 * Return: 6570 * IRQ_HANDLED - If interrupt is valid 6571 * IRQ_NONE - If invalid interrupt 6572 */ 6573 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) 6574 { 6575 u32 reg; 6576 irqreturn_t retval = IRQ_NONE; 6577 6578 /* PHY layer error */ 6579 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); 6580 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && 6581 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) { 6582 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg); 6583 /* 6584 * To know whether this error is fatal or not, DB timeout 6585 * must be checked but this error is handled separately. 6586 */ 6587 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK) 6588 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", 6589 __func__); 6590 6591 /* Got a LINERESET indication. */ 6592 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) { 6593 struct uic_command *cmd = NULL; 6594 6595 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR; 6596 if (hba->uic_async_done && hba->active_uic_cmd) 6597 cmd = hba->active_uic_cmd; 6598 /* 6599 * Ignore the LINERESET during power mode change 6600 * operation via DME_SET command. 6601 */ 6602 if (cmd && (cmd->command == UIC_CMD_DME_SET)) 6603 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR; 6604 } 6605 retval |= IRQ_HANDLED; 6606 } 6607 6608 /* PA_INIT_ERROR is fatal and needs UIC reset */ 6609 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); 6610 if ((reg & UIC_DATA_LINK_LAYER_ERROR) && 6611 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) { 6612 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg); 6613 6614 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) 6615 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; 6616 else if (hba->dev_quirks & 6617 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { 6618 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) 6619 hba->uic_error |= 6620 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; 6621 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) 6622 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; 6623 } 6624 retval |= IRQ_HANDLED; 6625 } 6626 6627 /* UIC NL/TL/DME errors needs software retry */ 6628 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); 6629 if ((reg & UIC_NETWORK_LAYER_ERROR) && 6630 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) { 6631 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg); 6632 hba->uic_error |= UFSHCD_UIC_NL_ERROR; 6633 retval |= IRQ_HANDLED; 6634 } 6635 6636 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); 6637 if ((reg & UIC_TRANSPORT_LAYER_ERROR) && 6638 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) { 6639 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg); 6640 hba->uic_error |= UFSHCD_UIC_TL_ERROR; 6641 retval |= IRQ_HANDLED; 6642 } 6643 6644 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); 6645 if ((reg & UIC_DME_ERROR) && 6646 (reg & UIC_DME_ERROR_CODE_MASK)) { 6647 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg); 6648 hba->uic_error |= UFSHCD_UIC_DME_ERROR; 6649 retval |= IRQ_HANDLED; 6650 } 6651 6652 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", 6653 __func__, hba->uic_error); 6654 return retval; 6655 } 6656 6657 /** 6658 * ufshcd_check_errors - Check for errors that need s/w attention 6659 * @hba: per-adapter instance 6660 * @intr_status: interrupt status generated by the controller 6661 * 6662 * Return: 6663 * IRQ_HANDLED - If interrupt is valid 6664 * IRQ_NONE - If invalid interrupt 6665 */ 6666 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) 6667 { 6668 bool queue_eh_work = false; 6669 irqreturn_t retval = IRQ_NONE; 6670 6671 spin_lock(hba->host->host_lock); 6672 hba->errors |= UFSHCD_ERROR_MASK & intr_status; 6673 6674 if (hba->errors & INT_FATAL_ERRORS) { 6675 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR, 6676 hba->errors); 6677 queue_eh_work = true; 6678 } 6679 6680 if (hba->errors & UIC_ERROR) { 6681 hba->uic_error = 0; 6682 retval = ufshcd_update_uic_error(hba); 6683 if (hba->uic_error) 6684 queue_eh_work = true; 6685 } 6686 6687 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) { 6688 dev_err(hba->dev, 6689 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n", 6690 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? 6691 "Enter" : "Exit", 6692 hba->errors, ufshcd_get_upmcrs(hba)); 6693 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR, 6694 hba->errors); 6695 ufshcd_set_link_broken(hba); 6696 queue_eh_work = true; 6697 } 6698 6699 if (queue_eh_work) { 6700 /* 6701 * update the transfer error masks to sticky bits, let's do this 6702 * irrespective of current ufshcd_state. 6703 */ 6704 hba->saved_err |= hba->errors; 6705 hba->saved_uic_err |= hba->uic_error; 6706 6707 /* dump controller state before resetting */ 6708 if ((hba->saved_err & 6709 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || 6710 (hba->saved_uic_err && 6711 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { 6712 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", 6713 __func__, hba->saved_err, 6714 hba->saved_uic_err); 6715 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, 6716 "host_regs: "); 6717 ufshcd_print_pwr_info(hba); 6718 } 6719 ufshcd_schedule_eh_work(hba); 6720 retval |= IRQ_HANDLED; 6721 } 6722 /* 6723 * if (!queue_eh_work) - 6724 * Other errors are either non-fatal where host recovers 6725 * itself without s/w intervention or errors that will be 6726 * handled by the SCSI core layer. 6727 */ 6728 hba->errors = 0; 6729 hba->uic_error = 0; 6730 spin_unlock(hba->host->host_lock); 6731 return retval; 6732 } 6733 6734 /** 6735 * ufshcd_tmc_handler - handle task management function completion 6736 * @hba: per adapter instance 6737 * 6738 * Return: 6739 * IRQ_HANDLED - If interrupt is valid 6740 * IRQ_NONE - If invalid interrupt 6741 */ 6742 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) 6743 { 6744 unsigned long flags, pending, issued; 6745 irqreturn_t ret = IRQ_NONE; 6746 int tag; 6747 6748 spin_lock_irqsave(hba->host->host_lock, flags); 6749 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); 6750 issued = hba->outstanding_tasks & ~pending; 6751 for_each_set_bit(tag, &issued, hba->nutmrs) { 6752 struct request *req = hba->tmf_rqs[tag]; 6753 struct completion *c = req->end_io_data; 6754 6755 complete(c); 6756 ret = IRQ_HANDLED; 6757 } 6758 spin_unlock_irqrestore(hba->host->host_lock, flags); 6759 6760 return ret; 6761 } 6762 6763 /** 6764 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events 6765 * @hba: per adapter instance 6766 * 6767 * Return: IRQ_HANDLED if interrupt is handled. 6768 */ 6769 static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba) 6770 { 6771 struct ufs_hw_queue *hwq; 6772 unsigned long outstanding_cqs; 6773 unsigned int nr_queues; 6774 int i, ret; 6775 u32 events; 6776 6777 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs); 6778 if (ret) 6779 outstanding_cqs = (1U << hba->nr_hw_queues) - 1; 6780 6781 /* Exclude the poll queues */ 6782 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; 6783 for_each_set_bit(i, &outstanding_cqs, nr_queues) { 6784 hwq = &hba->uhq[i]; 6785 6786 events = ufshcd_mcq_read_cqis(hba, i); 6787 if (events) 6788 ufshcd_mcq_write_cqis(hba, events, i); 6789 6790 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS) 6791 ufshcd_mcq_poll_cqe_lock(hba, hwq); 6792 } 6793 6794 return IRQ_HANDLED; 6795 } 6796 6797 /** 6798 * ufshcd_sl_intr - Interrupt service routine 6799 * @hba: per adapter instance 6800 * @intr_status: contains interrupts generated by the controller 6801 * 6802 * Return: 6803 * IRQ_HANDLED - If interrupt is valid 6804 * IRQ_NONE - If invalid interrupt 6805 */ 6806 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) 6807 { 6808 irqreturn_t retval = IRQ_NONE; 6809 6810 if (intr_status & UFSHCD_UIC_MASK) 6811 retval |= ufshcd_uic_cmd_compl(hba, intr_status); 6812 6813 if (intr_status & UFSHCD_ERROR_MASK || hba->errors) 6814 retval |= ufshcd_check_errors(hba, intr_status); 6815 6816 if (intr_status & UTP_TASK_REQ_COMPL) 6817 retval |= ufshcd_tmc_handler(hba); 6818 6819 if (intr_status & UTP_TRANSFER_REQ_COMPL) 6820 retval |= ufshcd_transfer_req_compl(hba); 6821 6822 if (intr_status & MCQ_CQ_EVENT_STATUS) 6823 retval |= ufshcd_handle_mcq_cq_events(hba); 6824 6825 return retval; 6826 } 6827 6828 /** 6829 * ufshcd_intr - Main interrupt service routine 6830 * @irq: irq number 6831 * @__hba: pointer to adapter instance 6832 * 6833 * Return: 6834 * IRQ_HANDLED - If interrupt is valid 6835 * IRQ_NONE - If invalid interrupt 6836 */ 6837 static irqreturn_t ufshcd_intr(int irq, void *__hba) 6838 { 6839 u32 intr_status, enabled_intr_status = 0; 6840 irqreturn_t retval = IRQ_NONE; 6841 struct ufs_hba *hba = __hba; 6842 int retries = hba->nutrs; 6843 6844 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 6845 hba->ufs_stats.last_intr_status = intr_status; 6846 hba->ufs_stats.last_intr_ts = local_clock(); 6847 6848 /* 6849 * There could be max of hba->nutrs reqs in flight and in worst case 6850 * if the reqs get finished 1 by 1 after the interrupt status is 6851 * read, make sure we handle them by checking the interrupt status 6852 * again in a loop until we process all of the reqs before returning. 6853 */ 6854 while (intr_status && retries--) { 6855 enabled_intr_status = 6856 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 6857 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); 6858 if (enabled_intr_status) 6859 retval |= ufshcd_sl_intr(hba, enabled_intr_status); 6860 6861 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 6862 } 6863 6864 if (enabled_intr_status && retval == IRQ_NONE && 6865 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) || 6866 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) { 6867 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", 6868 __func__, 6869 intr_status, 6870 hba->ufs_stats.last_intr_status, 6871 enabled_intr_status); 6872 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); 6873 } 6874 6875 return retval; 6876 } 6877 6878 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) 6879 { 6880 int err = 0; 6881 u32 mask = 1 << tag; 6882 unsigned long flags; 6883 6884 if (!test_bit(tag, &hba->outstanding_tasks)) 6885 goto out; 6886 6887 spin_lock_irqsave(hba->host->host_lock, flags); 6888 ufshcd_utmrl_clear(hba, tag); 6889 spin_unlock_irqrestore(hba->host->host_lock, flags); 6890 6891 /* poll for max. 1 sec to clear door bell register by h/w */ 6892 err = ufshcd_wait_for_register(hba, 6893 REG_UTP_TASK_REQ_DOOR_BELL, 6894 mask, 0, 1000, 1000); 6895 6896 dev_err(hba->dev, "Clearing task management function with tag %d %s\n", 6897 tag, err ? "succeeded" : "failed"); 6898 6899 out: 6900 return err; 6901 } 6902 6903 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, 6904 struct utp_task_req_desc *treq, u8 tm_function) 6905 { 6906 struct request_queue *q = hba->tmf_queue; 6907 struct Scsi_Host *host = hba->host; 6908 DECLARE_COMPLETION_ONSTACK(wait); 6909 struct request *req; 6910 unsigned long flags; 6911 int task_tag, err; 6912 6913 /* 6914 * blk_mq_alloc_request() is used here only to get a free tag. 6915 */ 6916 req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0); 6917 if (IS_ERR(req)) 6918 return PTR_ERR(req); 6919 6920 req->end_io_data = &wait; 6921 ufshcd_hold(hba); 6922 6923 spin_lock_irqsave(host->host_lock, flags); 6924 6925 task_tag = req->tag; 6926 WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n", 6927 task_tag); 6928 hba->tmf_rqs[req->tag] = req; 6929 treq->upiu_req.req_header.task_tag = task_tag; 6930 6931 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); 6932 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); 6933 6934 /* send command to the controller */ 6935 __set_bit(task_tag, &hba->outstanding_tasks); 6936 6937 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); 6938 /* Make sure that doorbell is committed immediately */ 6939 wmb(); 6940 6941 spin_unlock_irqrestore(host->host_lock, flags); 6942 6943 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); 6944 6945 /* wait until the task management command is completed */ 6946 err = wait_for_completion_io_timeout(&wait, 6947 msecs_to_jiffies(TM_CMD_TIMEOUT)); 6948 if (!err) { 6949 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); 6950 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", 6951 __func__, tm_function); 6952 if (ufshcd_clear_tm_cmd(hba, task_tag)) 6953 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", 6954 __func__, task_tag); 6955 err = -ETIMEDOUT; 6956 } else { 6957 err = 0; 6958 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); 6959 6960 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); 6961 } 6962 6963 spin_lock_irqsave(hba->host->host_lock, flags); 6964 hba->tmf_rqs[req->tag] = NULL; 6965 __clear_bit(task_tag, &hba->outstanding_tasks); 6966 spin_unlock_irqrestore(hba->host->host_lock, flags); 6967 6968 ufshcd_release(hba); 6969 blk_mq_free_request(req); 6970 6971 return err; 6972 } 6973 6974 /** 6975 * ufshcd_issue_tm_cmd - issues task management commands to controller 6976 * @hba: per adapter instance 6977 * @lun_id: LUN ID to which TM command is sent 6978 * @task_id: task ID to which the TM command is applicable 6979 * @tm_function: task management function opcode 6980 * @tm_response: task management service response return value 6981 * 6982 * Return: non-zero value on error, zero on success. 6983 */ 6984 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, 6985 u8 tm_function, u8 *tm_response) 6986 { 6987 struct utp_task_req_desc treq = { }; 6988 enum utp_ocs ocs_value; 6989 int err; 6990 6991 /* Configure task request descriptor */ 6992 treq.header.interrupt = 1; 6993 treq.header.ocs = OCS_INVALID_COMMAND_STATUS; 6994 6995 /* Configure task request UPIU */ 6996 treq.upiu_req.req_header.transaction_code = UPIU_TRANSACTION_TASK_REQ; 6997 treq.upiu_req.req_header.lun = lun_id; 6998 treq.upiu_req.req_header.tm_function = tm_function; 6999 7000 /* 7001 * The host shall provide the same value for LUN field in the basic 7002 * header and for Input Parameter. 7003 */ 7004 treq.upiu_req.input_param1 = cpu_to_be32(lun_id); 7005 treq.upiu_req.input_param2 = cpu_to_be32(task_id); 7006 7007 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function); 7008 if (err == -ETIMEDOUT) 7009 return err; 7010 7011 ocs_value = treq.header.ocs & MASK_OCS; 7012 if (ocs_value != OCS_SUCCESS) 7013 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", 7014 __func__, ocs_value); 7015 else if (tm_response) 7016 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) & 7017 MASK_TM_SERVICE_RESP; 7018 return err; 7019 } 7020 7021 /** 7022 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests 7023 * @hba: per-adapter instance 7024 * @req_upiu: upiu request 7025 * @rsp_upiu: upiu reply 7026 * @desc_buff: pointer to descriptor buffer, NULL if NA 7027 * @buff_len: descriptor size, 0 if NA 7028 * @cmd_type: specifies the type (NOP, Query...) 7029 * @desc_op: descriptor operation 7030 * 7031 * Those type of requests uses UTP Transfer Request Descriptor - utrd. 7032 * Therefore, it "rides" the device management infrastructure: uses its tag and 7033 * tasks work queues. 7034 * 7035 * Since there is only one available tag for device management commands, 7036 * the caller is expected to hold the hba->dev_cmd.lock mutex. 7037 * 7038 * Return: 0 upon success; < 0 upon failure. 7039 */ 7040 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, 7041 struct utp_upiu_req *req_upiu, 7042 struct utp_upiu_req *rsp_upiu, 7043 u8 *desc_buff, int *buff_len, 7044 enum dev_cmd_type cmd_type, 7045 enum query_opcode desc_op) 7046 { 7047 DECLARE_COMPLETION_ONSTACK(wait); 7048 const u32 tag = hba->reserved_slot; 7049 struct ufshcd_lrb *lrbp; 7050 int err = 0; 7051 u8 upiu_flags; 7052 7053 /* Protects use of hba->reserved_slot. */ 7054 lockdep_assert_held(&hba->dev_cmd.lock); 7055 7056 down_read(&hba->clk_scaling_lock); 7057 7058 lrbp = &hba->lrb[tag]; 7059 lrbp->cmd = NULL; 7060 lrbp->task_tag = tag; 7061 lrbp->lun = 0; 7062 lrbp->intr_cmd = true; 7063 ufshcd_prepare_lrbp_crypto(NULL, lrbp); 7064 hba->dev_cmd.type = cmd_type; 7065 7066 if (hba->ufs_version <= ufshci_version(1, 1)) 7067 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; 7068 else 7069 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 7070 7071 /* update the task tag in the request upiu */ 7072 req_upiu->header.task_tag = tag; 7073 7074 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0); 7075 7076 /* just copy the upiu request as it is */ 7077 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); 7078 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) { 7079 /* The Data Segment Area is optional depending upon the query 7080 * function value. for WRITE DESCRIPTOR, the data segment 7081 * follows right after the tsf. 7082 */ 7083 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len); 7084 *buff_len = 0; 7085 } 7086 7087 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 7088 7089 hba->dev_cmd.complete = &wait; 7090 7091 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); 7092 7093 ufshcd_send_command(hba, tag, hba->dev_cmd_queue); 7094 /* 7095 * ignore the returning value here - ufshcd_check_query_response is 7096 * bound to fail since dev_cmd.query and dev_cmd.type were left empty. 7097 * read the response directly ignoring all errors. 7098 */ 7099 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT); 7100 7101 /* just copy the upiu response as it is */ 7102 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); 7103 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) { 7104 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu); 7105 u16 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header 7106 .data_segment_length); 7107 7108 if (*buff_len >= resp_len) { 7109 memcpy(desc_buff, descp, resp_len); 7110 *buff_len = resp_len; 7111 } else { 7112 dev_warn(hba->dev, 7113 "%s: rsp size %d is bigger than buffer size %d", 7114 __func__, resp_len, *buff_len); 7115 *buff_len = 0; 7116 err = -EINVAL; 7117 } 7118 } 7119 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, 7120 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); 7121 7122 up_read(&hba->clk_scaling_lock); 7123 return err; 7124 } 7125 7126 /** 7127 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands 7128 * @hba: per-adapter instance 7129 * @req_upiu: upiu request 7130 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands 7131 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target 7132 * @desc_buff: pointer to descriptor buffer, NULL if NA 7133 * @buff_len: descriptor size, 0 if NA 7134 * @desc_op: descriptor operation 7135 * 7136 * Supports UTP Transfer requests (nop and query), and UTP Task 7137 * Management requests. 7138 * It is up to the caller to fill the upiu conent properly, as it will 7139 * be copied without any further input validations. 7140 * 7141 * Return: 0 upon success; < 0 upon failure. 7142 */ 7143 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, 7144 struct utp_upiu_req *req_upiu, 7145 struct utp_upiu_req *rsp_upiu, 7146 enum upiu_request_transaction msgcode, 7147 u8 *desc_buff, int *buff_len, 7148 enum query_opcode desc_op) 7149 { 7150 int err; 7151 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY; 7152 struct utp_task_req_desc treq = { }; 7153 enum utp_ocs ocs_value; 7154 u8 tm_f = req_upiu->header.tm_function; 7155 7156 switch (msgcode) { 7157 case UPIU_TRANSACTION_NOP_OUT: 7158 cmd_type = DEV_CMD_TYPE_NOP; 7159 fallthrough; 7160 case UPIU_TRANSACTION_QUERY_REQ: 7161 ufshcd_hold(hba); 7162 mutex_lock(&hba->dev_cmd.lock); 7163 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, 7164 desc_buff, buff_len, 7165 cmd_type, desc_op); 7166 mutex_unlock(&hba->dev_cmd.lock); 7167 ufshcd_release(hba); 7168 7169 break; 7170 case UPIU_TRANSACTION_TASK_REQ: 7171 treq.header.interrupt = 1; 7172 treq.header.ocs = OCS_INVALID_COMMAND_STATUS; 7173 7174 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu)); 7175 7176 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f); 7177 if (err == -ETIMEDOUT) 7178 break; 7179 7180 ocs_value = treq.header.ocs & MASK_OCS; 7181 if (ocs_value != OCS_SUCCESS) { 7182 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, 7183 ocs_value); 7184 break; 7185 } 7186 7187 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu)); 7188 7189 break; 7190 default: 7191 err = -EINVAL; 7192 7193 break; 7194 } 7195 7196 return err; 7197 } 7198 7199 /** 7200 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request 7201 * @hba: per adapter instance 7202 * @req_upiu: upiu request 7203 * @rsp_upiu: upiu reply 7204 * @req_ehs: EHS field which contains Advanced RPMB Request Message 7205 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message 7206 * @sg_cnt: The number of sg lists actually used 7207 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation 7208 * @dir: DMA direction 7209 * 7210 * Return: zero on success, non-zero on failure. 7211 */ 7212 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, 7213 struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs, 7214 struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list, 7215 enum dma_data_direction dir) 7216 { 7217 DECLARE_COMPLETION_ONSTACK(wait); 7218 const u32 tag = hba->reserved_slot; 7219 struct ufshcd_lrb *lrbp; 7220 int err = 0; 7221 int result; 7222 u8 upiu_flags; 7223 u8 *ehs_data; 7224 u16 ehs_len; 7225 7226 /* Protects use of hba->reserved_slot. */ 7227 ufshcd_hold(hba); 7228 mutex_lock(&hba->dev_cmd.lock); 7229 down_read(&hba->clk_scaling_lock); 7230 7231 lrbp = &hba->lrb[tag]; 7232 lrbp->cmd = NULL; 7233 lrbp->task_tag = tag; 7234 lrbp->lun = UFS_UPIU_RPMB_WLUN; 7235 7236 lrbp->intr_cmd = true; 7237 ufshcd_prepare_lrbp_crypto(NULL, lrbp); 7238 hba->dev_cmd.type = DEV_CMD_TYPE_RPMB; 7239 7240 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */ 7241 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 7242 7243 /* 7244 * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes 7245 * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1, 7246 * HW controller takes EHS length from UTRD. 7247 */ 7248 if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) 7249 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2); 7250 else 7251 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0); 7252 7253 /* update the task tag */ 7254 req_upiu->header.task_tag = tag; 7255 7256 /* copy the UPIU(contains CDB) request as it is */ 7257 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); 7258 /* Copy EHS, starting with byte32, immediately after the CDB package */ 7259 memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs)); 7260 7261 if (dir != DMA_NONE && sg_list) 7262 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list); 7263 7264 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 7265 7266 hba->dev_cmd.complete = &wait; 7267 7268 ufshcd_send_command(hba, tag, hba->dev_cmd_queue); 7269 7270 err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT); 7271 7272 if (!err) { 7273 /* Just copy the upiu response as it is */ 7274 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); 7275 /* Get the response UPIU result */ 7276 result = (lrbp->ucd_rsp_ptr->header.response << 8) | 7277 lrbp->ucd_rsp_ptr->header.status; 7278 7279 ehs_len = lrbp->ucd_rsp_ptr->header.ehs_length; 7280 /* 7281 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data 7282 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB 7283 * Message is 02h 7284 */ 7285 if (ehs_len == 2 && rsp_ehs) { 7286 /* 7287 * ucd_rsp_ptr points to a buffer with a length of 512 bytes 7288 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32 7289 */ 7290 ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE; 7291 memcpy(rsp_ehs, ehs_data, ehs_len * 32); 7292 } 7293 } 7294 7295 up_read(&hba->clk_scaling_lock); 7296 mutex_unlock(&hba->dev_cmd.lock); 7297 ufshcd_release(hba); 7298 return err ? : result; 7299 } 7300 7301 /** 7302 * ufshcd_eh_device_reset_handler() - Reset a single logical unit. 7303 * @cmd: SCSI command pointer 7304 * 7305 * Return: SUCCESS or FAILED. 7306 */ 7307 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) 7308 { 7309 unsigned long flags, pending_reqs = 0, not_cleared = 0; 7310 struct Scsi_Host *host; 7311 struct ufs_hba *hba; 7312 struct ufs_hw_queue *hwq; 7313 struct ufshcd_lrb *lrbp; 7314 u32 pos, not_cleared_mask = 0; 7315 int err; 7316 u8 resp = 0xF, lun; 7317 7318 host = cmd->device->host; 7319 hba = shost_priv(host); 7320 7321 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); 7322 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); 7323 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { 7324 if (!err) 7325 err = resp; 7326 goto out; 7327 } 7328 7329 if (is_mcq_enabled(hba)) { 7330 for (pos = 0; pos < hba->nutrs; pos++) { 7331 lrbp = &hba->lrb[pos]; 7332 if (ufshcd_cmd_inflight(lrbp->cmd) && 7333 lrbp->lun == lun) { 7334 ufshcd_clear_cmd(hba, pos); 7335 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); 7336 ufshcd_mcq_poll_cqe_lock(hba, hwq); 7337 } 7338 } 7339 err = 0; 7340 goto out; 7341 } 7342 7343 /* clear the commands that were pending for corresponding LUN */ 7344 spin_lock_irqsave(&hba->outstanding_lock, flags); 7345 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) 7346 if (hba->lrb[pos].lun == lun) 7347 __set_bit(pos, &pending_reqs); 7348 hba->outstanding_reqs &= ~pending_reqs; 7349 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 7350 7351 for_each_set_bit(pos, &pending_reqs, hba->nutrs) { 7352 if (ufshcd_clear_cmd(hba, pos) < 0) { 7353 spin_lock_irqsave(&hba->outstanding_lock, flags); 7354 not_cleared = 1U << pos & 7355 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 7356 hba->outstanding_reqs |= not_cleared; 7357 not_cleared_mask |= not_cleared; 7358 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 7359 7360 dev_err(hba->dev, "%s: failed to clear request %d\n", 7361 __func__, pos); 7362 } 7363 } 7364 __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask); 7365 7366 out: 7367 hba->req_abort_count = 0; 7368 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err); 7369 if (!err) { 7370 err = SUCCESS; 7371 } else { 7372 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); 7373 err = FAILED; 7374 } 7375 return err; 7376 } 7377 7378 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) 7379 { 7380 struct ufshcd_lrb *lrbp; 7381 int tag; 7382 7383 for_each_set_bit(tag, &bitmap, hba->nutrs) { 7384 lrbp = &hba->lrb[tag]; 7385 lrbp->req_abort_skip = true; 7386 } 7387 } 7388 7389 /** 7390 * ufshcd_try_to_abort_task - abort a specific task 7391 * @hba: Pointer to adapter instance 7392 * @tag: Task tag/index to be aborted 7393 * 7394 * Abort the pending command in device by sending UFS_ABORT_TASK task management 7395 * command, and in host controller by clearing the door-bell register. There can 7396 * be race between controller sending the command to the device while abort is 7397 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is 7398 * really issued and then try to abort it. 7399 * 7400 * Return: zero on success, non-zero on failure. 7401 */ 7402 int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) 7403 { 7404 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; 7405 int err = 0; 7406 int poll_cnt; 7407 u8 resp = 0xF; 7408 u32 reg; 7409 7410 for (poll_cnt = 100; poll_cnt; poll_cnt--) { 7411 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, 7412 UFS_QUERY_TASK, &resp); 7413 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { 7414 /* cmd pending in the device */ 7415 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", 7416 __func__, tag); 7417 break; 7418 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { 7419 /* 7420 * cmd not pending in the device, check if it is 7421 * in transition. 7422 */ 7423 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", 7424 __func__, tag); 7425 if (is_mcq_enabled(hba)) { 7426 /* MCQ mode */ 7427 if (ufshcd_cmd_inflight(lrbp->cmd)) { 7428 /* sleep for max. 200us same delay as in SDB mode */ 7429 usleep_range(100, 200); 7430 continue; 7431 } 7432 /* command completed already */ 7433 dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n", 7434 __func__, tag); 7435 goto out; 7436 } 7437 7438 /* Single Doorbell Mode */ 7439 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 7440 if (reg & (1 << tag)) { 7441 /* sleep for max. 200us to stabilize */ 7442 usleep_range(100, 200); 7443 continue; 7444 } 7445 /* command completed already */ 7446 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", 7447 __func__, tag); 7448 goto out; 7449 } else { 7450 dev_err(hba->dev, 7451 "%s: no response from device. tag = %d, err %d\n", 7452 __func__, tag, err); 7453 if (!err) 7454 err = resp; /* service response error */ 7455 goto out; 7456 } 7457 } 7458 7459 if (!poll_cnt) { 7460 err = -EBUSY; 7461 goto out; 7462 } 7463 7464 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, 7465 UFS_ABORT_TASK, &resp); 7466 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { 7467 if (!err) { 7468 err = resp; /* service response error */ 7469 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", 7470 __func__, tag, err); 7471 } 7472 goto out; 7473 } 7474 7475 err = ufshcd_clear_cmd(hba, tag); 7476 if (err) 7477 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", 7478 __func__, tag, err); 7479 7480 out: 7481 return err; 7482 } 7483 7484 /** 7485 * ufshcd_abort - scsi host template eh_abort_handler callback 7486 * @cmd: SCSI command pointer 7487 * 7488 * Return: SUCCESS or FAILED. 7489 */ 7490 static int ufshcd_abort(struct scsi_cmnd *cmd) 7491 { 7492 struct Scsi_Host *host = cmd->device->host; 7493 struct ufs_hba *hba = shost_priv(host); 7494 int tag = scsi_cmd_to_rq(cmd)->tag; 7495 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; 7496 unsigned long flags; 7497 int err = FAILED; 7498 bool outstanding; 7499 u32 reg; 7500 7501 WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); 7502 7503 ufshcd_hold(hba); 7504 7505 if (!is_mcq_enabled(hba)) { 7506 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 7507 if (!test_bit(tag, &hba->outstanding_reqs)) { 7508 /* If command is already aborted/completed, return FAILED. */ 7509 dev_err(hba->dev, 7510 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", 7511 __func__, tag, hba->outstanding_reqs, reg); 7512 goto release; 7513 } 7514 } 7515 7516 /* Print Transfer Request of aborted task */ 7517 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); 7518 7519 /* 7520 * Print detailed info about aborted request. 7521 * As more than one request might get aborted at the same time, 7522 * print full information only for the first aborted request in order 7523 * to reduce repeated printouts. For other aborted requests only print 7524 * basic details. 7525 */ 7526 scsi_print_command(cmd); 7527 if (!hba->req_abort_count) { 7528 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag); 7529 ufshcd_print_evt_hist(hba); 7530 ufshcd_print_host_state(hba); 7531 ufshcd_print_pwr_info(hba); 7532 ufshcd_print_tr(hba, tag, true); 7533 } else { 7534 ufshcd_print_tr(hba, tag, false); 7535 } 7536 hba->req_abort_count++; 7537 7538 if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) { 7539 /* only execute this code in single doorbell mode */ 7540 dev_err(hba->dev, 7541 "%s: cmd was completed, but without a notifying intr, tag = %d", 7542 __func__, tag); 7543 __ufshcd_transfer_req_compl(hba, 1UL << tag); 7544 goto release; 7545 } 7546 7547 /* 7548 * Task abort to the device W-LUN is illegal. When this command 7549 * will fail, due to spec violation, scsi err handling next step 7550 * will be to send LU reset which, again, is a spec violation. 7551 * To avoid these unnecessary/illegal steps, first we clean up 7552 * the lrb taken by this cmd and re-set it in outstanding_reqs, 7553 * then queue the eh_work and bail. 7554 */ 7555 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { 7556 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); 7557 7558 spin_lock_irqsave(host->host_lock, flags); 7559 hba->force_reset = true; 7560 ufshcd_schedule_eh_work(hba); 7561 spin_unlock_irqrestore(host->host_lock, flags); 7562 goto release; 7563 } 7564 7565 if (is_mcq_enabled(hba)) { 7566 /* MCQ mode. Branch off to handle abort for mcq mode */ 7567 err = ufshcd_mcq_abort(cmd); 7568 goto release; 7569 } 7570 7571 /* Skip task abort in case previous aborts failed and report failure */ 7572 if (lrbp->req_abort_skip) { 7573 dev_err(hba->dev, "%s: skipping abort\n", __func__); 7574 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); 7575 goto release; 7576 } 7577 7578 err = ufshcd_try_to_abort_task(hba, tag); 7579 if (err) { 7580 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); 7581 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); 7582 err = FAILED; 7583 goto release; 7584 } 7585 7586 /* 7587 * Clear the corresponding bit from outstanding_reqs since the command 7588 * has been aborted successfully. 7589 */ 7590 spin_lock_irqsave(&hba->outstanding_lock, flags); 7591 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs); 7592 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 7593 7594 if (outstanding) 7595 ufshcd_release_scsi_cmd(hba, lrbp); 7596 7597 err = SUCCESS; 7598 7599 release: 7600 /* Matches the ufshcd_hold() call at the start of this function. */ 7601 ufshcd_release(hba); 7602 return err; 7603 } 7604 7605 /** 7606 * ufshcd_host_reset_and_restore - reset and restore host controller 7607 * @hba: per-adapter instance 7608 * 7609 * Note that host controller reset may issue DME_RESET to 7610 * local and remote (device) Uni-Pro stack and the attributes 7611 * are reset to default state. 7612 * 7613 * Return: zero on success, non-zero on failure. 7614 */ 7615 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) 7616 { 7617 int err; 7618 7619 /* 7620 * Stop the host controller and complete the requests 7621 * cleared by h/w 7622 */ 7623 ufshcd_hba_stop(hba); 7624 hba->silence_err_logs = true; 7625 ufshcd_complete_requests(hba, true); 7626 hba->silence_err_logs = false; 7627 7628 /* scale up clocks to max frequency before full reinitialization */ 7629 ufshcd_scale_clks(hba, true); 7630 7631 err = ufshcd_hba_enable(hba); 7632 7633 /* Establish the link again and restore the device */ 7634 if (!err) 7635 err = ufshcd_probe_hba(hba, false); 7636 7637 if (err) 7638 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); 7639 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err); 7640 return err; 7641 } 7642 7643 /** 7644 * ufshcd_reset_and_restore - reset and re-initialize host/device 7645 * @hba: per-adapter instance 7646 * 7647 * Reset and recover device, host and re-establish link. This 7648 * is helpful to recover the communication in fatal error conditions. 7649 * 7650 * Return: zero on success, non-zero on failure. 7651 */ 7652 static int ufshcd_reset_and_restore(struct ufs_hba *hba) 7653 { 7654 u32 saved_err = 0; 7655 u32 saved_uic_err = 0; 7656 int err = 0; 7657 unsigned long flags; 7658 int retries = MAX_HOST_RESET_RETRIES; 7659 7660 spin_lock_irqsave(hba->host->host_lock, flags); 7661 do { 7662 /* 7663 * This is a fresh start, cache and clear saved error first, 7664 * in case new error generated during reset and restore. 7665 */ 7666 saved_err |= hba->saved_err; 7667 saved_uic_err |= hba->saved_uic_err; 7668 hba->saved_err = 0; 7669 hba->saved_uic_err = 0; 7670 hba->force_reset = false; 7671 hba->ufshcd_state = UFSHCD_STATE_RESET; 7672 spin_unlock_irqrestore(hba->host->host_lock, flags); 7673 7674 /* Reset the attached device */ 7675 ufshcd_device_reset(hba); 7676 7677 err = ufshcd_host_reset_and_restore(hba); 7678 7679 spin_lock_irqsave(hba->host->host_lock, flags); 7680 if (err) 7681 continue; 7682 /* Do not exit unless operational or dead */ 7683 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && 7684 hba->ufshcd_state != UFSHCD_STATE_ERROR && 7685 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL) 7686 err = -EAGAIN; 7687 } while (err && --retries); 7688 7689 /* 7690 * Inform scsi mid-layer that we did reset and allow to handle 7691 * Unit Attention properly. 7692 */ 7693 scsi_report_bus_reset(hba->host, 0); 7694 if (err) { 7695 hba->ufshcd_state = UFSHCD_STATE_ERROR; 7696 hba->saved_err |= saved_err; 7697 hba->saved_uic_err |= saved_uic_err; 7698 } 7699 spin_unlock_irqrestore(hba->host->host_lock, flags); 7700 7701 return err; 7702 } 7703 7704 /** 7705 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer 7706 * @cmd: SCSI command pointer 7707 * 7708 * Return: SUCCESS or FAILED. 7709 */ 7710 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) 7711 { 7712 int err = SUCCESS; 7713 unsigned long flags; 7714 struct ufs_hba *hba; 7715 7716 hba = shost_priv(cmd->device->host); 7717 7718 spin_lock_irqsave(hba->host->host_lock, flags); 7719 hba->force_reset = true; 7720 ufshcd_schedule_eh_work(hba); 7721 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); 7722 spin_unlock_irqrestore(hba->host->host_lock, flags); 7723 7724 flush_work(&hba->eh_work); 7725 7726 spin_lock_irqsave(hba->host->host_lock, flags); 7727 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) 7728 err = FAILED; 7729 spin_unlock_irqrestore(hba->host->host_lock, flags); 7730 7731 return err; 7732 } 7733 7734 /** 7735 * ufshcd_get_max_icc_level - calculate the ICC level 7736 * @sup_curr_uA: max. current supported by the regulator 7737 * @start_scan: row at the desc table to start scan from 7738 * @buff: power descriptor buffer 7739 * 7740 * Return: calculated max ICC level for specific regulator. 7741 */ 7742 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, 7743 const char *buff) 7744 { 7745 int i; 7746 int curr_uA; 7747 u16 data; 7748 u16 unit; 7749 7750 for (i = start_scan; i >= 0; i--) { 7751 data = get_unaligned_be16(&buff[2 * i]); 7752 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> 7753 ATTR_ICC_LVL_UNIT_OFFSET; 7754 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; 7755 switch (unit) { 7756 case UFSHCD_NANO_AMP: 7757 curr_uA = curr_uA / 1000; 7758 break; 7759 case UFSHCD_MILI_AMP: 7760 curr_uA = curr_uA * 1000; 7761 break; 7762 case UFSHCD_AMP: 7763 curr_uA = curr_uA * 1000 * 1000; 7764 break; 7765 case UFSHCD_MICRO_AMP: 7766 default: 7767 break; 7768 } 7769 if (sup_curr_uA >= curr_uA) 7770 break; 7771 } 7772 if (i < 0) { 7773 i = 0; 7774 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i); 7775 } 7776 7777 return (u32)i; 7778 } 7779 7780 /** 7781 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level 7782 * In case regulators are not initialized we'll return 0 7783 * @hba: per-adapter instance 7784 * @desc_buf: power descriptor buffer to extract ICC levels from. 7785 * 7786 * Return: calculated ICC level. 7787 */ 7788 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, 7789 const u8 *desc_buf) 7790 { 7791 u32 icc_level = 0; 7792 7793 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || 7794 !hba->vreg_info.vccq2) { 7795 /* 7796 * Using dev_dbg to avoid messages during runtime PM to avoid 7797 * never-ending cycles of messages written back to storage by 7798 * user space causing runtime resume, causing more messages and 7799 * so on. 7800 */ 7801 dev_dbg(hba->dev, 7802 "%s: Regulator capability was not set, actvIccLevel=%d", 7803 __func__, icc_level); 7804 goto out; 7805 } 7806 7807 if (hba->vreg_info.vcc->max_uA) 7808 icc_level = ufshcd_get_max_icc_level( 7809 hba->vreg_info.vcc->max_uA, 7810 POWER_DESC_MAX_ACTV_ICC_LVLS - 1, 7811 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); 7812 7813 if (hba->vreg_info.vccq->max_uA) 7814 icc_level = ufshcd_get_max_icc_level( 7815 hba->vreg_info.vccq->max_uA, 7816 icc_level, 7817 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); 7818 7819 if (hba->vreg_info.vccq2->max_uA) 7820 icc_level = ufshcd_get_max_icc_level( 7821 hba->vreg_info.vccq2->max_uA, 7822 icc_level, 7823 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]); 7824 out: 7825 return icc_level; 7826 } 7827 7828 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) 7829 { 7830 int ret; 7831 u8 *desc_buf; 7832 u32 icc_level; 7833 7834 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 7835 if (!desc_buf) 7836 return; 7837 7838 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0, 7839 desc_buf, QUERY_DESC_MAX_SIZE); 7840 if (ret) { 7841 dev_err(hba->dev, 7842 "%s: Failed reading power descriptor ret = %d", 7843 __func__, ret); 7844 goto out; 7845 } 7846 7847 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf); 7848 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level); 7849 7850 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 7851 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level); 7852 7853 if (ret) 7854 dev_err(hba->dev, 7855 "%s: Failed configuring bActiveICCLevel = %d ret = %d", 7856 __func__, icc_level, ret); 7857 7858 out: 7859 kfree(desc_buf); 7860 } 7861 7862 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev) 7863 { 7864 scsi_autopm_get_device(sdev); 7865 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev); 7866 if (sdev->rpm_autosuspend) 7867 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev, 7868 RPM_AUTOSUSPEND_DELAY_MS); 7869 scsi_autopm_put_device(sdev); 7870 } 7871 7872 /** 7873 * ufshcd_scsi_add_wlus - Adds required W-LUs 7874 * @hba: per-adapter instance 7875 * 7876 * UFS device specification requires the UFS devices to support 4 well known 7877 * logical units: 7878 * "REPORT_LUNS" (address: 01h) 7879 * "UFS Device" (address: 50h) 7880 * "RPMB" (address: 44h) 7881 * "BOOT" (address: 30h) 7882 * UFS device's power management needs to be controlled by "POWER CONDITION" 7883 * field of SSU (START STOP UNIT) command. But this "power condition" field 7884 * will take effect only when its sent to "UFS device" well known logical unit 7885 * hence we require the scsi_device instance to represent this logical unit in 7886 * order for the UFS host driver to send the SSU command for power management. 7887 * 7888 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory 7889 * Block) LU so user space process can control this LU. User space may also 7890 * want to have access to BOOT LU. 7891 * 7892 * This function adds scsi device instances for each of all well known LUs 7893 * (except "REPORT LUNS" LU). 7894 * 7895 * Return: zero on success (all required W-LUs are added successfully), 7896 * non-zero error value on failure (if failed to add any of the required W-LU). 7897 */ 7898 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) 7899 { 7900 int ret = 0; 7901 struct scsi_device *sdev_boot, *sdev_rpmb; 7902 7903 hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0, 7904 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); 7905 if (IS_ERR(hba->ufs_device_wlun)) { 7906 ret = PTR_ERR(hba->ufs_device_wlun); 7907 hba->ufs_device_wlun = NULL; 7908 goto out; 7909 } 7910 scsi_device_put(hba->ufs_device_wlun); 7911 7912 sdev_rpmb = __scsi_add_device(hba->host, 0, 0, 7913 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); 7914 if (IS_ERR(sdev_rpmb)) { 7915 ret = PTR_ERR(sdev_rpmb); 7916 goto remove_ufs_device_wlun; 7917 } 7918 ufshcd_blk_pm_runtime_init(sdev_rpmb); 7919 scsi_device_put(sdev_rpmb); 7920 7921 sdev_boot = __scsi_add_device(hba->host, 0, 0, 7922 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); 7923 if (IS_ERR(sdev_boot)) { 7924 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); 7925 } else { 7926 ufshcd_blk_pm_runtime_init(sdev_boot); 7927 scsi_device_put(sdev_boot); 7928 } 7929 goto out; 7930 7931 remove_ufs_device_wlun: 7932 scsi_remove_device(hba->ufs_device_wlun); 7933 out: 7934 return ret; 7935 } 7936 7937 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf) 7938 { 7939 struct ufs_dev_info *dev_info = &hba->dev_info; 7940 u8 lun; 7941 u32 d_lu_wb_buf_alloc; 7942 u32 ext_ufs_feature; 7943 7944 if (!ufshcd_is_wb_allowed(hba)) 7945 return; 7946 7947 /* 7948 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or 7949 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES 7950 * enabled 7951 */ 7952 if (!(dev_info->wspecversion >= 0x310 || 7953 dev_info->wspecversion == 0x220 || 7954 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) 7955 goto wb_disabled; 7956 7957 ext_ufs_feature = get_unaligned_be32(desc_buf + 7958 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); 7959 7960 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP)) 7961 goto wb_disabled; 7962 7963 /* 7964 * WB may be supported but not configured while provisioning. The spec 7965 * says, in dedicated wb buffer mode, a max of 1 lun would have wb 7966 * buffer configured. 7967 */ 7968 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; 7969 7970 dev_info->b_presrv_uspc_en = 7971 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; 7972 7973 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) { 7974 if (!get_unaligned_be32(desc_buf + 7975 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS)) 7976 goto wb_disabled; 7977 } else { 7978 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) { 7979 d_lu_wb_buf_alloc = 0; 7980 ufshcd_read_unit_desc_param(hba, 7981 lun, 7982 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS, 7983 (u8 *)&d_lu_wb_buf_alloc, 7984 sizeof(d_lu_wb_buf_alloc)); 7985 if (d_lu_wb_buf_alloc) { 7986 dev_info->wb_dedicated_lu = lun; 7987 break; 7988 } 7989 } 7990 7991 if (!d_lu_wb_buf_alloc) 7992 goto wb_disabled; 7993 } 7994 7995 if (!ufshcd_is_wb_buf_lifetime_available(hba)) 7996 goto wb_disabled; 7997 7998 return; 7999 8000 wb_disabled: 8001 hba->caps &= ~UFSHCD_CAP_WB_EN; 8002 } 8003 8004 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) 8005 { 8006 struct ufs_dev_info *dev_info = &hba->dev_info; 8007 u32 ext_ufs_feature; 8008 u8 mask = 0; 8009 8010 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300) 8011 return; 8012 8013 ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); 8014 8015 if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF) 8016 mask |= MASK_EE_TOO_LOW_TEMP; 8017 8018 if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF) 8019 mask |= MASK_EE_TOO_HIGH_TEMP; 8020 8021 if (mask) { 8022 ufshcd_enable_ee(hba, mask); 8023 ufs_hwmon_probe(hba, mask); 8024 } 8025 } 8026 8027 static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf) 8028 { 8029 struct ufs_dev_info *dev_info = &hba->dev_info; 8030 u32 ext_ufs_feature; 8031 u32 ext_iid_en = 0; 8032 int err; 8033 8034 /* Only UFS-4.0 and above may support EXT_IID */ 8035 if (dev_info->wspecversion < 0x400) 8036 goto out; 8037 8038 ext_ufs_feature = get_unaligned_be32(desc_buf + 8039 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); 8040 if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP)) 8041 goto out; 8042 8043 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 8044 QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en); 8045 if (err) 8046 dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err); 8047 8048 out: 8049 dev_info->b_ext_iid_en = ext_iid_en; 8050 } 8051 8052 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, 8053 const struct ufs_dev_quirk *fixups) 8054 { 8055 const struct ufs_dev_quirk *f; 8056 struct ufs_dev_info *dev_info = &hba->dev_info; 8057 8058 if (!fixups) 8059 return; 8060 8061 for (f = fixups; f->quirk; f++) { 8062 if ((f->wmanufacturerid == dev_info->wmanufacturerid || 8063 f->wmanufacturerid == UFS_ANY_VENDOR) && 8064 ((dev_info->model && 8065 STR_PRFX_EQUAL(f->model, dev_info->model)) || 8066 !strcmp(f->model, UFS_ANY_MODEL))) 8067 hba->dev_quirks |= f->quirk; 8068 } 8069 } 8070 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks); 8071 8072 static void ufs_fixup_device_setup(struct ufs_hba *hba) 8073 { 8074 /* fix by general quirk table */ 8075 ufshcd_fixup_dev_quirks(hba, ufs_fixups); 8076 8077 /* allow vendors to fix quirks */ 8078 ufshcd_vops_fixup_dev_quirks(hba); 8079 } 8080 8081 static int ufs_get_device_desc(struct ufs_hba *hba) 8082 { 8083 int err; 8084 u8 model_index; 8085 u8 *desc_buf; 8086 struct ufs_dev_info *dev_info = &hba->dev_info; 8087 8088 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 8089 if (!desc_buf) { 8090 err = -ENOMEM; 8091 goto out; 8092 } 8093 8094 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf, 8095 QUERY_DESC_MAX_SIZE); 8096 if (err) { 8097 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", 8098 __func__, err); 8099 goto out; 8100 } 8101 8102 /* 8103 * getting vendor (manufacturerID) and Bank Index in big endian 8104 * format 8105 */ 8106 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | 8107 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; 8108 8109 /* getting Specification Version in big endian format */ 8110 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | 8111 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; 8112 dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH]; 8113 8114 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; 8115 8116 err = ufshcd_read_string_desc(hba, model_index, 8117 &dev_info->model, SD_ASCII_STD); 8118 if (err < 0) { 8119 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", 8120 __func__, err); 8121 goto out; 8122 } 8123 8124 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] + 8125 desc_buf[DEVICE_DESC_PARAM_NUM_WLU]; 8126 8127 ufs_fixup_device_setup(hba); 8128 8129 ufshcd_wb_probe(hba, desc_buf); 8130 8131 ufshcd_temp_notif_probe(hba, desc_buf); 8132 8133 if (hba->ext_iid_sup) 8134 ufshcd_ext_iid_probe(hba, desc_buf); 8135 8136 /* 8137 * ufshcd_read_string_desc returns size of the string 8138 * reset the error value 8139 */ 8140 err = 0; 8141 8142 out: 8143 kfree(desc_buf); 8144 return err; 8145 } 8146 8147 static void ufs_put_device_desc(struct ufs_hba *hba) 8148 { 8149 struct ufs_dev_info *dev_info = &hba->dev_info; 8150 8151 kfree(dev_info->model); 8152 dev_info->model = NULL; 8153 } 8154 8155 /** 8156 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro 8157 * @hba: per-adapter instance 8158 * 8159 * PA_TActivate parameter can be tuned manually if UniPro version is less than 8160 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's 8161 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce 8162 * the hibern8 exit latency. 8163 * 8164 * Return: zero on success, non-zero error value on failure. 8165 */ 8166 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) 8167 { 8168 int ret = 0; 8169 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate; 8170 8171 ret = ufshcd_dme_peer_get(hba, 8172 UIC_ARG_MIB_SEL( 8173 RX_MIN_ACTIVATETIME_CAPABILITY, 8174 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 8175 &peer_rx_min_activatetime); 8176 if (ret) 8177 goto out; 8178 8179 /* make sure proper unit conversion is applied */ 8180 tuned_pa_tactivate = 8181 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US) 8182 / PA_TACTIVATE_TIME_UNIT_US); 8183 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8184 tuned_pa_tactivate); 8185 8186 out: 8187 return ret; 8188 } 8189 8190 /** 8191 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro 8192 * @hba: per-adapter instance 8193 * 8194 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than 8195 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's 8196 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY. 8197 * This optimal value can help reduce the hibern8 exit latency. 8198 * 8199 * Return: zero on success, non-zero error value on failure. 8200 */ 8201 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) 8202 { 8203 int ret = 0; 8204 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0; 8205 u32 max_hibern8_time, tuned_pa_hibern8time; 8206 8207 ret = ufshcd_dme_get(hba, 8208 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY, 8209 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 8210 &local_tx_hibern8_time_cap); 8211 if (ret) 8212 goto out; 8213 8214 ret = ufshcd_dme_peer_get(hba, 8215 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY, 8216 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 8217 &peer_rx_hibern8_time_cap); 8218 if (ret) 8219 goto out; 8220 8221 max_hibern8_time = max(local_tx_hibern8_time_cap, 8222 peer_rx_hibern8_time_cap); 8223 /* make sure proper unit conversion is applied */ 8224 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US) 8225 / PA_HIBERN8_TIME_UNIT_US); 8226 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 8227 tuned_pa_hibern8time); 8228 out: 8229 return ret; 8230 } 8231 8232 /** 8233 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is 8234 * less than device PA_TACTIVATE time. 8235 * @hba: per-adapter instance 8236 * 8237 * Some UFS devices require host PA_TACTIVATE to be lower than device 8238 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk 8239 * for such devices. 8240 * 8241 * Return: zero on success, non-zero error value on failure. 8242 */ 8243 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) 8244 { 8245 int ret = 0; 8246 u32 granularity, peer_granularity; 8247 u32 pa_tactivate, peer_pa_tactivate; 8248 u32 pa_tactivate_us, peer_pa_tactivate_us; 8249 static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100}; 8250 8251 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), 8252 &granularity); 8253 if (ret) 8254 goto out; 8255 8256 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), 8257 &peer_granularity); 8258 if (ret) 8259 goto out; 8260 8261 if ((granularity < PA_GRANULARITY_MIN_VAL) || 8262 (granularity > PA_GRANULARITY_MAX_VAL)) { 8263 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", 8264 __func__, granularity); 8265 return -EINVAL; 8266 } 8267 8268 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) || 8269 (peer_granularity > PA_GRANULARITY_MAX_VAL)) { 8270 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", 8271 __func__, peer_granularity); 8272 return -EINVAL; 8273 } 8274 8275 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); 8276 if (ret) 8277 goto out; 8278 8279 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), 8280 &peer_pa_tactivate); 8281 if (ret) 8282 goto out; 8283 8284 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1]; 8285 peer_pa_tactivate_us = peer_pa_tactivate * 8286 gran_to_us_table[peer_granularity - 1]; 8287 8288 if (pa_tactivate_us >= peer_pa_tactivate_us) { 8289 u32 new_peer_pa_tactivate; 8290 8291 new_peer_pa_tactivate = pa_tactivate_us / 8292 gran_to_us_table[peer_granularity - 1]; 8293 new_peer_pa_tactivate++; 8294 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8295 new_peer_pa_tactivate); 8296 } 8297 8298 out: 8299 return ret; 8300 } 8301 8302 static void ufshcd_tune_unipro_params(struct ufs_hba *hba) 8303 { 8304 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { 8305 ufshcd_tune_pa_tactivate(hba); 8306 ufshcd_tune_pa_hibern8time(hba); 8307 } 8308 8309 ufshcd_vops_apply_dev_quirks(hba); 8310 8311 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) 8312 /* set 1ms timeout for PA_TACTIVATE */ 8313 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); 8314 8315 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) 8316 ufshcd_quirk_tune_host_pa_tactivate(hba); 8317 } 8318 8319 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) 8320 { 8321 hba->ufs_stats.hibern8_exit_cnt = 0; 8322 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); 8323 hba->req_abort_count = 0; 8324 } 8325 8326 static int ufshcd_device_geo_params_init(struct ufs_hba *hba) 8327 { 8328 int err; 8329 u8 *desc_buf; 8330 8331 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 8332 if (!desc_buf) { 8333 err = -ENOMEM; 8334 goto out; 8335 } 8336 8337 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0, 8338 desc_buf, QUERY_DESC_MAX_SIZE); 8339 if (err) { 8340 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", 8341 __func__, err); 8342 goto out; 8343 } 8344 8345 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1) 8346 hba->dev_info.max_lu_supported = 32; 8347 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) 8348 hba->dev_info.max_lu_supported = 8; 8349 8350 out: 8351 kfree(desc_buf); 8352 return err; 8353 } 8354 8355 struct ufs_ref_clk { 8356 unsigned long freq_hz; 8357 enum ufs_ref_clk_freq val; 8358 }; 8359 8360 static const struct ufs_ref_clk ufs_ref_clk_freqs[] = { 8361 {19200000, REF_CLK_FREQ_19_2_MHZ}, 8362 {26000000, REF_CLK_FREQ_26_MHZ}, 8363 {38400000, REF_CLK_FREQ_38_4_MHZ}, 8364 {52000000, REF_CLK_FREQ_52_MHZ}, 8365 {0, REF_CLK_FREQ_INVAL}, 8366 }; 8367 8368 static enum ufs_ref_clk_freq 8369 ufs_get_bref_clk_from_hz(unsigned long freq) 8370 { 8371 int i; 8372 8373 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++) 8374 if (ufs_ref_clk_freqs[i].freq_hz == freq) 8375 return ufs_ref_clk_freqs[i].val; 8376 8377 return REF_CLK_FREQ_INVAL; 8378 } 8379 8380 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) 8381 { 8382 unsigned long freq; 8383 8384 freq = clk_get_rate(refclk); 8385 8386 hba->dev_ref_clk_freq = 8387 ufs_get_bref_clk_from_hz(freq); 8388 8389 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) 8390 dev_err(hba->dev, 8391 "invalid ref_clk setting = %ld\n", freq); 8392 } 8393 8394 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) 8395 { 8396 int err; 8397 u32 ref_clk; 8398 u32 freq = hba->dev_ref_clk_freq; 8399 8400 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 8401 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk); 8402 8403 if (err) { 8404 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", 8405 err); 8406 goto out; 8407 } 8408 8409 if (ref_clk == freq) 8410 goto out; /* nothing to update */ 8411 8412 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 8413 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq); 8414 8415 if (err) { 8416 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", 8417 ufs_ref_clk_freqs[freq].freq_hz); 8418 goto out; 8419 } 8420 8421 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", 8422 ufs_ref_clk_freqs[freq].freq_hz); 8423 8424 out: 8425 return err; 8426 } 8427 8428 static int ufshcd_device_params_init(struct ufs_hba *hba) 8429 { 8430 bool flag; 8431 int ret; 8432 8433 /* Init UFS geometry descriptor related parameters */ 8434 ret = ufshcd_device_geo_params_init(hba); 8435 if (ret) 8436 goto out; 8437 8438 /* Check and apply UFS device quirks */ 8439 ret = ufs_get_device_desc(hba); 8440 if (ret) { 8441 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", 8442 __func__, ret); 8443 goto out; 8444 } 8445 8446 ufshcd_get_ref_clk_gating_wait(hba); 8447 8448 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, 8449 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag)) 8450 hba->dev_info.f_power_on_wp_en = flag; 8451 8452 /* Probe maximum power mode co-supported by both UFS host and device */ 8453 if (ufshcd_get_max_pwr_mode(hba)) 8454 dev_err(hba->dev, 8455 "%s: Failed getting max supported power mode\n", 8456 __func__); 8457 out: 8458 return ret; 8459 } 8460 8461 static void ufshcd_set_timestamp_attr(struct ufs_hba *hba) 8462 { 8463 int err; 8464 struct ufs_query_req *request = NULL; 8465 struct ufs_query_res *response = NULL; 8466 struct ufs_dev_info *dev_info = &hba->dev_info; 8467 struct utp_upiu_query_v4_0 *upiu_data; 8468 8469 if (dev_info->wspecversion < 0x400) 8470 return; 8471 8472 ufshcd_hold(hba); 8473 8474 mutex_lock(&hba->dev_cmd.lock); 8475 8476 ufshcd_init_query(hba, &request, &response, 8477 UPIU_QUERY_OPCODE_WRITE_ATTR, 8478 QUERY_ATTR_IDN_TIMESTAMP, 0, 0); 8479 8480 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 8481 8482 upiu_data = (struct utp_upiu_query_v4_0 *)&request->upiu_req; 8483 8484 put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3); 8485 8486 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); 8487 8488 if (err) 8489 dev_err(hba->dev, "%s: failed to set timestamp %d\n", 8490 __func__, err); 8491 8492 mutex_unlock(&hba->dev_cmd.lock); 8493 ufshcd_release(hba); 8494 } 8495 8496 /** 8497 * ufshcd_add_lus - probe and add UFS logical units 8498 * @hba: per-adapter instance 8499 * 8500 * Return: 0 upon success; < 0 upon failure. 8501 */ 8502 static int ufshcd_add_lus(struct ufs_hba *hba) 8503 { 8504 int ret; 8505 8506 /* Add required well known logical units to scsi mid layer */ 8507 ret = ufshcd_scsi_add_wlus(hba); 8508 if (ret) 8509 goto out; 8510 8511 /* Initialize devfreq after UFS device is detected */ 8512 if (ufshcd_is_clkscaling_supported(hba)) { 8513 memcpy(&hba->clk_scaling.saved_pwr_info, 8514 &hba->pwr_info, 8515 sizeof(struct ufs_pa_layer_attr)); 8516 hba->clk_scaling.is_allowed = true; 8517 8518 ret = ufshcd_devfreq_init(hba); 8519 if (ret) 8520 goto out; 8521 8522 hba->clk_scaling.is_enabled = true; 8523 ufshcd_init_clk_scaling_sysfs(hba); 8524 } 8525 8526 ufs_bsg_probe(hba); 8527 scsi_scan_host(hba->host); 8528 pm_runtime_put_sync(hba->dev); 8529 8530 out: 8531 return ret; 8532 } 8533 8534 /* SDB - Single Doorbell */ 8535 static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs) 8536 { 8537 size_t ucdl_size, utrdl_size; 8538 8539 ucdl_size = ufshcd_get_ucd_size(hba) * nutrs; 8540 dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr, 8541 hba->ucdl_dma_addr); 8542 8543 utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs; 8544 dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr, 8545 hba->utrdl_dma_addr); 8546 8547 devm_kfree(hba->dev, hba->lrb); 8548 } 8549 8550 static int ufshcd_alloc_mcq(struct ufs_hba *hba) 8551 { 8552 int ret; 8553 int old_nutrs = hba->nutrs; 8554 8555 ret = ufshcd_mcq_decide_queue_depth(hba); 8556 if (ret < 0) 8557 return ret; 8558 8559 hba->nutrs = ret; 8560 ret = ufshcd_mcq_init(hba); 8561 if (ret) 8562 goto err; 8563 8564 /* 8565 * Previously allocated memory for nutrs may not be enough in MCQ mode. 8566 * Number of supported tags in MCQ mode may be larger than SDB mode. 8567 */ 8568 if (hba->nutrs != old_nutrs) { 8569 ufshcd_release_sdb_queue(hba, old_nutrs); 8570 ret = ufshcd_memory_alloc(hba); 8571 if (ret) 8572 goto err; 8573 ufshcd_host_memory_configure(hba); 8574 } 8575 8576 ret = ufshcd_mcq_memory_alloc(hba); 8577 if (ret) 8578 goto err; 8579 8580 return 0; 8581 err: 8582 hba->nutrs = old_nutrs; 8583 return ret; 8584 } 8585 8586 static void ufshcd_config_mcq(struct ufs_hba *hba) 8587 { 8588 int ret; 8589 u32 intrs; 8590 8591 ret = ufshcd_mcq_vops_config_esi(hba); 8592 dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : ""); 8593 8594 intrs = UFSHCD_ENABLE_MCQ_INTRS; 8595 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR) 8596 intrs &= ~MCQ_CQ_EVENT_STATUS; 8597 ufshcd_enable_intr(hba, intrs); 8598 ufshcd_mcq_make_queues_operational(hba); 8599 ufshcd_mcq_config_mac(hba, hba->nutrs); 8600 8601 hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; 8602 hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED; 8603 8604 /* Select MCQ mode */ 8605 ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1, 8606 REG_UFS_MEM_CFG); 8607 hba->mcq_enabled = true; 8608 8609 dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n", 8610 hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT], 8611 hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL], 8612 hba->nutrs); 8613 } 8614 8615 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) 8616 { 8617 int ret; 8618 struct Scsi_Host *host = hba->host; 8619 8620 hba->ufshcd_state = UFSHCD_STATE_RESET; 8621 8622 ret = ufshcd_link_startup(hba); 8623 if (ret) 8624 return ret; 8625 8626 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) 8627 return ret; 8628 8629 /* Debug counters initialization */ 8630 ufshcd_clear_dbg_ufs_stats(hba); 8631 8632 /* UniPro link is active now */ 8633 ufshcd_set_link_active(hba); 8634 8635 /* Reconfigure MCQ upon reset */ 8636 if (is_mcq_enabled(hba) && !init_dev_params) 8637 ufshcd_config_mcq(hba); 8638 8639 /* Verify device initialization by sending NOP OUT UPIU */ 8640 ret = ufshcd_verify_dev_init(hba); 8641 if (ret) 8642 return ret; 8643 8644 /* Initiate UFS initialization, and waiting until completion */ 8645 ret = ufshcd_complete_dev_init(hba); 8646 if (ret) 8647 return ret; 8648 8649 /* 8650 * Initialize UFS device parameters used by driver, these 8651 * parameters are associated with UFS descriptors. 8652 */ 8653 if (init_dev_params) { 8654 ret = ufshcd_device_params_init(hba); 8655 if (ret) 8656 return ret; 8657 if (is_mcq_supported(hba) && !hba->scsi_host_added) { 8658 ret = ufshcd_alloc_mcq(hba); 8659 if (!ret) { 8660 ufshcd_config_mcq(hba); 8661 } else { 8662 /* Continue with SDB mode */ 8663 use_mcq_mode = false; 8664 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", 8665 ret); 8666 } 8667 ret = scsi_add_host(host, hba->dev); 8668 if (ret) { 8669 dev_err(hba->dev, "scsi_add_host failed\n"); 8670 return ret; 8671 } 8672 hba->scsi_host_added = true; 8673 } else if (is_mcq_supported(hba)) { 8674 /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */ 8675 ufshcd_config_mcq(hba); 8676 } 8677 } 8678 8679 ufshcd_tune_unipro_params(hba); 8680 8681 /* UFS device is also active now */ 8682 ufshcd_set_ufs_dev_active(hba); 8683 ufshcd_force_reset_auto_bkops(hba); 8684 8685 ufshcd_set_timestamp_attr(hba); 8686 8687 /* Gear up to HS gear if supported */ 8688 if (hba->max_pwr_info.is_valid) { 8689 /* 8690 * Set the right value to bRefClkFreq before attempting to 8691 * switch to HS gears. 8692 */ 8693 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) 8694 ufshcd_set_dev_ref_clk(hba); 8695 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); 8696 if (ret) { 8697 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", 8698 __func__, ret); 8699 return ret; 8700 } 8701 } 8702 8703 return 0; 8704 } 8705 8706 /** 8707 * ufshcd_probe_hba - probe hba to detect device and initialize it 8708 * @hba: per-adapter instance 8709 * @init_dev_params: whether or not to call ufshcd_device_params_init(). 8710 * 8711 * Execute link-startup and verify device initialization 8712 * 8713 * Return: 0 upon success; < 0 upon failure. 8714 */ 8715 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) 8716 { 8717 ktime_t start = ktime_get(); 8718 unsigned long flags; 8719 int ret; 8720 8721 ret = ufshcd_device_init(hba, init_dev_params); 8722 if (ret) 8723 goto out; 8724 8725 if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) { 8726 /* Reset the device and controller before doing reinit */ 8727 ufshcd_device_reset(hba); 8728 ufshcd_hba_stop(hba); 8729 ufshcd_vops_reinit_notify(hba); 8730 ret = ufshcd_hba_enable(hba); 8731 if (ret) { 8732 dev_err(hba->dev, "Host controller enable failed\n"); 8733 ufshcd_print_evt_hist(hba); 8734 ufshcd_print_host_state(hba); 8735 goto out; 8736 } 8737 8738 /* Reinit the device */ 8739 ret = ufshcd_device_init(hba, init_dev_params); 8740 if (ret) 8741 goto out; 8742 } 8743 8744 ufshcd_print_pwr_info(hba); 8745 8746 /* 8747 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec) 8748 * and for removable UFS card as well, hence always set the parameter. 8749 * Note: Error handler may issue the device reset hence resetting 8750 * bActiveICCLevel as well so it is always safe to set this here. 8751 */ 8752 ufshcd_set_active_icc_lvl(hba); 8753 8754 /* Enable UFS Write Booster if supported */ 8755 ufshcd_configure_wb(hba); 8756 8757 if (hba->ee_usr_mask) 8758 ufshcd_write_ee_control(hba); 8759 /* Enable Auto-Hibernate if configured */ 8760 ufshcd_auto_hibern8_enable(hba); 8761 8762 out: 8763 spin_lock_irqsave(hba->host->host_lock, flags); 8764 if (ret) 8765 hba->ufshcd_state = UFSHCD_STATE_ERROR; 8766 else if (hba->ufshcd_state == UFSHCD_STATE_RESET) 8767 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 8768 spin_unlock_irqrestore(hba->host->host_lock, flags); 8769 8770 trace_ufshcd_init(dev_name(hba->dev), ret, 8771 ktime_to_us(ktime_sub(ktime_get(), start)), 8772 hba->curr_dev_pwr_mode, hba->uic_link_state); 8773 return ret; 8774 } 8775 8776 /** 8777 * ufshcd_async_scan - asynchronous execution for probing hba 8778 * @data: data pointer to pass to this function 8779 * @cookie: cookie data 8780 */ 8781 static void ufshcd_async_scan(void *data, async_cookie_t cookie) 8782 { 8783 struct ufs_hba *hba = (struct ufs_hba *)data; 8784 int ret; 8785 8786 down(&hba->host_sem); 8787 /* Initialize hba, detect and initialize UFS device */ 8788 ret = ufshcd_probe_hba(hba, true); 8789 up(&hba->host_sem); 8790 if (ret) 8791 goto out; 8792 8793 /* Probe and add UFS logical units */ 8794 ret = ufshcd_add_lus(hba); 8795 out: 8796 /* 8797 * If we failed to initialize the device or the device is not 8798 * present, turn off the power/clocks etc. 8799 */ 8800 if (ret) { 8801 pm_runtime_put_sync(hba->dev); 8802 ufshcd_hba_exit(hba); 8803 } 8804 } 8805 8806 static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd) 8807 { 8808 struct ufs_hba *hba = shost_priv(scmd->device->host); 8809 8810 if (!hba->system_suspending) { 8811 /* Activate the error handler in the SCSI core. */ 8812 return SCSI_EH_NOT_HANDLED; 8813 } 8814 8815 /* 8816 * If we get here we know that no TMFs are outstanding and also that 8817 * the only pending command is a START STOP UNIT command. Handle the 8818 * timeout of that command directly to prevent a deadlock between 8819 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler(). 8820 */ 8821 ufshcd_link_recovery(hba); 8822 dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n", 8823 __func__, hba->outstanding_tasks); 8824 8825 return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE; 8826 } 8827 8828 static const struct attribute_group *ufshcd_driver_groups[] = { 8829 &ufs_sysfs_unit_descriptor_group, 8830 &ufs_sysfs_lun_attributes_group, 8831 NULL, 8832 }; 8833 8834 static struct ufs_hba_variant_params ufs_hba_vps = { 8835 .hba_enable_delay_us = 1000, 8836 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40), 8837 .devfreq_profile.polling_ms = 100, 8838 .devfreq_profile.target = ufshcd_devfreq_target, 8839 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status, 8840 .ondemand_data.upthreshold = 70, 8841 .ondemand_data.downdifferential = 5, 8842 }; 8843 8844 static const struct scsi_host_template ufshcd_driver_template = { 8845 .module = THIS_MODULE, 8846 .name = UFSHCD, 8847 .proc_name = UFSHCD, 8848 .map_queues = ufshcd_map_queues, 8849 .queuecommand = ufshcd_queuecommand, 8850 .mq_poll = ufshcd_poll, 8851 .slave_alloc = ufshcd_slave_alloc, 8852 .slave_configure = ufshcd_slave_configure, 8853 .slave_destroy = ufshcd_slave_destroy, 8854 .change_queue_depth = ufshcd_change_queue_depth, 8855 .eh_abort_handler = ufshcd_abort, 8856 .eh_device_reset_handler = ufshcd_eh_device_reset_handler, 8857 .eh_host_reset_handler = ufshcd_eh_host_reset_handler, 8858 .eh_timed_out = ufshcd_eh_timed_out, 8859 .this_id = -1, 8860 .sg_tablesize = SG_ALL, 8861 .cmd_per_lun = UFSHCD_CMD_PER_LUN, 8862 .can_queue = UFSHCD_CAN_QUEUE, 8863 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, 8864 .max_sectors = SZ_1M / SECTOR_SIZE, 8865 .max_host_blocked = 1, 8866 .track_queue_depth = 1, 8867 .skip_settle_delay = 1, 8868 .sdev_groups = ufshcd_driver_groups, 8869 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS, 8870 }; 8871 8872 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, 8873 int ua) 8874 { 8875 int ret; 8876 8877 if (!vreg) 8878 return 0; 8879 8880 /* 8881 * "set_load" operation shall be required on those regulators 8882 * which specifically configured current limitation. Otherwise 8883 * zero max_uA may cause unexpected behavior when regulator is 8884 * enabled or set as high power mode. 8885 */ 8886 if (!vreg->max_uA) 8887 return 0; 8888 8889 ret = regulator_set_load(vreg->reg, ua); 8890 if (ret < 0) { 8891 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n", 8892 __func__, vreg->name, ua, ret); 8893 } 8894 8895 return ret; 8896 } 8897 8898 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, 8899 struct ufs_vreg *vreg) 8900 { 8901 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); 8902 } 8903 8904 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 8905 struct ufs_vreg *vreg) 8906 { 8907 if (!vreg) 8908 return 0; 8909 8910 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 8911 } 8912 8913 static int ufshcd_config_vreg(struct device *dev, 8914 struct ufs_vreg *vreg, bool on) 8915 { 8916 if (regulator_count_voltages(vreg->reg) <= 0) 8917 return 0; 8918 8919 return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0); 8920 } 8921 8922 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) 8923 { 8924 int ret = 0; 8925 8926 if (!vreg || vreg->enabled) 8927 goto out; 8928 8929 ret = ufshcd_config_vreg(dev, vreg, true); 8930 if (!ret) 8931 ret = regulator_enable(vreg->reg); 8932 8933 if (!ret) 8934 vreg->enabled = true; 8935 else 8936 dev_err(dev, "%s: %s enable failed, err=%d\n", 8937 __func__, vreg->name, ret); 8938 out: 8939 return ret; 8940 } 8941 8942 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) 8943 { 8944 int ret = 0; 8945 8946 if (!vreg || !vreg->enabled || vreg->always_on) 8947 goto out; 8948 8949 ret = regulator_disable(vreg->reg); 8950 8951 if (!ret) { 8952 /* ignore errors on applying disable config */ 8953 ufshcd_config_vreg(dev, vreg, false); 8954 vreg->enabled = false; 8955 } else { 8956 dev_err(dev, "%s: %s disable failed, err=%d\n", 8957 __func__, vreg->name, ret); 8958 } 8959 out: 8960 return ret; 8961 } 8962 8963 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) 8964 { 8965 int ret = 0; 8966 struct device *dev = hba->dev; 8967 struct ufs_vreg_info *info = &hba->vreg_info; 8968 8969 ret = ufshcd_toggle_vreg(dev, info->vcc, on); 8970 if (ret) 8971 goto out; 8972 8973 ret = ufshcd_toggle_vreg(dev, info->vccq, on); 8974 if (ret) 8975 goto out; 8976 8977 ret = ufshcd_toggle_vreg(dev, info->vccq2, on); 8978 8979 out: 8980 if (ret) { 8981 ufshcd_toggle_vreg(dev, info->vccq2, false); 8982 ufshcd_toggle_vreg(dev, info->vccq, false); 8983 ufshcd_toggle_vreg(dev, info->vcc, false); 8984 } 8985 return ret; 8986 } 8987 8988 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) 8989 { 8990 struct ufs_vreg_info *info = &hba->vreg_info; 8991 8992 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); 8993 } 8994 8995 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) 8996 { 8997 int ret = 0; 8998 8999 if (!vreg) 9000 goto out; 9001 9002 vreg->reg = devm_regulator_get(dev, vreg->name); 9003 if (IS_ERR(vreg->reg)) { 9004 ret = PTR_ERR(vreg->reg); 9005 dev_err(dev, "%s: %s get failed, err=%d\n", 9006 __func__, vreg->name, ret); 9007 } 9008 out: 9009 return ret; 9010 } 9011 EXPORT_SYMBOL_GPL(ufshcd_get_vreg); 9012 9013 static int ufshcd_init_vreg(struct ufs_hba *hba) 9014 { 9015 int ret = 0; 9016 struct device *dev = hba->dev; 9017 struct ufs_vreg_info *info = &hba->vreg_info; 9018 9019 ret = ufshcd_get_vreg(dev, info->vcc); 9020 if (ret) 9021 goto out; 9022 9023 ret = ufshcd_get_vreg(dev, info->vccq); 9024 if (!ret) 9025 ret = ufshcd_get_vreg(dev, info->vccq2); 9026 out: 9027 return ret; 9028 } 9029 9030 static int ufshcd_init_hba_vreg(struct ufs_hba *hba) 9031 { 9032 struct ufs_vreg_info *info = &hba->vreg_info; 9033 9034 return ufshcd_get_vreg(hba->dev, info->vdd_hba); 9035 } 9036 9037 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) 9038 { 9039 int ret = 0; 9040 struct ufs_clk_info *clki; 9041 struct list_head *head = &hba->clk_list_head; 9042 unsigned long flags; 9043 ktime_t start = ktime_get(); 9044 bool clk_state_changed = false; 9045 9046 if (list_empty(head)) 9047 goto out; 9048 9049 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); 9050 if (ret) 9051 return ret; 9052 9053 list_for_each_entry(clki, head, list) { 9054 if (!IS_ERR_OR_NULL(clki->clk)) { 9055 /* 9056 * Don't disable clocks which are needed 9057 * to keep the link active. 9058 */ 9059 if (ufshcd_is_link_active(hba) && 9060 clki->keep_link_active) 9061 continue; 9062 9063 clk_state_changed = on ^ clki->enabled; 9064 if (on && !clki->enabled) { 9065 ret = clk_prepare_enable(clki->clk); 9066 if (ret) { 9067 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", 9068 __func__, clki->name, ret); 9069 goto out; 9070 } 9071 } else if (!on && clki->enabled) { 9072 clk_disable_unprepare(clki->clk); 9073 } 9074 clki->enabled = on; 9075 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, 9076 clki->name, on ? "en" : "dis"); 9077 } 9078 } 9079 9080 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); 9081 if (ret) 9082 return ret; 9083 9084 out: 9085 if (ret) { 9086 list_for_each_entry(clki, head, list) { 9087 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) 9088 clk_disable_unprepare(clki->clk); 9089 } 9090 } else if (!ret && on) { 9091 spin_lock_irqsave(hba->host->host_lock, flags); 9092 hba->clk_gating.state = CLKS_ON; 9093 trace_ufshcd_clk_gating(dev_name(hba->dev), 9094 hba->clk_gating.state); 9095 spin_unlock_irqrestore(hba->host->host_lock, flags); 9096 } 9097 9098 if (clk_state_changed) 9099 trace_ufshcd_profile_clk_gating(dev_name(hba->dev), 9100 (on ? "on" : "off"), 9101 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 9102 return ret; 9103 } 9104 9105 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba) 9106 { 9107 u32 freq; 9108 int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq); 9109 9110 if (ret) { 9111 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret); 9112 return REF_CLK_FREQ_INVAL; 9113 } 9114 9115 return ufs_get_bref_clk_from_hz(freq); 9116 } 9117 9118 static int ufshcd_init_clocks(struct ufs_hba *hba) 9119 { 9120 int ret = 0; 9121 struct ufs_clk_info *clki; 9122 struct device *dev = hba->dev; 9123 struct list_head *head = &hba->clk_list_head; 9124 9125 if (list_empty(head)) 9126 goto out; 9127 9128 list_for_each_entry(clki, head, list) { 9129 if (!clki->name) 9130 continue; 9131 9132 clki->clk = devm_clk_get(dev, clki->name); 9133 if (IS_ERR(clki->clk)) { 9134 ret = PTR_ERR(clki->clk); 9135 dev_err(dev, "%s: %s clk get failed, %d\n", 9136 __func__, clki->name, ret); 9137 goto out; 9138 } 9139 9140 /* 9141 * Parse device ref clk freq as per device tree "ref_clk". 9142 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL 9143 * in ufshcd_alloc_host(). 9144 */ 9145 if (!strcmp(clki->name, "ref_clk")) 9146 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk); 9147 9148 if (clki->max_freq) { 9149 ret = clk_set_rate(clki->clk, clki->max_freq); 9150 if (ret) { 9151 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", 9152 __func__, clki->name, 9153 clki->max_freq, ret); 9154 goto out; 9155 } 9156 clki->curr_freq = clki->max_freq; 9157 } 9158 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, 9159 clki->name, clk_get_rate(clki->clk)); 9160 } 9161 out: 9162 return ret; 9163 } 9164 9165 static int ufshcd_variant_hba_init(struct ufs_hba *hba) 9166 { 9167 int err = 0; 9168 9169 if (!hba->vops) 9170 goto out; 9171 9172 err = ufshcd_vops_init(hba); 9173 if (err) 9174 dev_err_probe(hba->dev, err, 9175 "%s: variant %s init failed with err %d\n", 9176 __func__, ufshcd_get_var_name(hba), err); 9177 out: 9178 return err; 9179 } 9180 9181 static void ufshcd_variant_hba_exit(struct ufs_hba *hba) 9182 { 9183 if (!hba->vops) 9184 return; 9185 9186 ufshcd_vops_exit(hba); 9187 } 9188 9189 static int ufshcd_hba_init(struct ufs_hba *hba) 9190 { 9191 int err; 9192 9193 /* 9194 * Handle host controller power separately from the UFS device power 9195 * rails as it will help controlling the UFS host controller power 9196 * collapse easily which is different than UFS device power collapse. 9197 * Also, enable the host controller power before we go ahead with rest 9198 * of the initialization here. 9199 */ 9200 err = ufshcd_init_hba_vreg(hba); 9201 if (err) 9202 goto out; 9203 9204 err = ufshcd_setup_hba_vreg(hba, true); 9205 if (err) 9206 goto out; 9207 9208 err = ufshcd_init_clocks(hba); 9209 if (err) 9210 goto out_disable_hba_vreg; 9211 9212 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) 9213 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba); 9214 9215 err = ufshcd_setup_clocks(hba, true); 9216 if (err) 9217 goto out_disable_hba_vreg; 9218 9219 err = ufshcd_init_vreg(hba); 9220 if (err) 9221 goto out_disable_clks; 9222 9223 err = ufshcd_setup_vreg(hba, true); 9224 if (err) 9225 goto out_disable_clks; 9226 9227 err = ufshcd_variant_hba_init(hba); 9228 if (err) 9229 goto out_disable_vreg; 9230 9231 ufs_debugfs_hba_init(hba); 9232 9233 hba->is_powered = true; 9234 goto out; 9235 9236 out_disable_vreg: 9237 ufshcd_setup_vreg(hba, false); 9238 out_disable_clks: 9239 ufshcd_setup_clocks(hba, false); 9240 out_disable_hba_vreg: 9241 ufshcd_setup_hba_vreg(hba, false); 9242 out: 9243 return err; 9244 } 9245 9246 static void ufshcd_hba_exit(struct ufs_hba *hba) 9247 { 9248 if (hba->is_powered) { 9249 ufshcd_exit_clk_scaling(hba); 9250 ufshcd_exit_clk_gating(hba); 9251 if (hba->eh_wq) 9252 destroy_workqueue(hba->eh_wq); 9253 ufs_debugfs_hba_exit(hba); 9254 ufshcd_variant_hba_exit(hba); 9255 ufshcd_setup_vreg(hba, false); 9256 ufshcd_setup_clocks(hba, false); 9257 ufshcd_setup_hba_vreg(hba, false); 9258 hba->is_powered = false; 9259 ufs_put_device_desc(hba); 9260 } 9261 } 9262 9263 static int ufshcd_execute_start_stop(struct scsi_device *sdev, 9264 enum ufs_dev_pwr_mode pwr_mode, 9265 struct scsi_sense_hdr *sshdr) 9266 { 9267 const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 }; 9268 const struct scsi_exec_args args = { 9269 .sshdr = sshdr, 9270 .req_flags = BLK_MQ_REQ_PM, 9271 .scmd_flags = SCMD_FAIL_IF_RECOVERING, 9272 }; 9273 9274 return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL, 9275 /*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0, 9276 &args); 9277 } 9278 9279 /** 9280 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device 9281 * power mode 9282 * @hba: per adapter instance 9283 * @pwr_mode: device power mode to set 9284 * 9285 * Return: 0 if requested power mode is set successfully; 9286 * < 0 if failed to set the requested power mode. 9287 */ 9288 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, 9289 enum ufs_dev_pwr_mode pwr_mode) 9290 { 9291 struct scsi_sense_hdr sshdr; 9292 struct scsi_device *sdp; 9293 unsigned long flags; 9294 int ret, retries; 9295 9296 spin_lock_irqsave(hba->host->host_lock, flags); 9297 sdp = hba->ufs_device_wlun; 9298 if (sdp && scsi_device_online(sdp)) 9299 ret = scsi_device_get(sdp); 9300 else 9301 ret = -ENODEV; 9302 spin_unlock_irqrestore(hba->host->host_lock, flags); 9303 9304 if (ret) 9305 return ret; 9306 9307 /* 9308 * If scsi commands fail, the scsi mid-layer schedules scsi error- 9309 * handling, which would wait for host to be resumed. Since we know 9310 * we are functional while we are here, skip host resume in error 9311 * handling context. 9312 */ 9313 hba->host->eh_noresume = 1; 9314 9315 /* 9316 * Current function would be generally called from the power management 9317 * callbacks hence set the RQF_PM flag so that it doesn't resume the 9318 * already suspended childs. 9319 */ 9320 for (retries = 3; retries > 0; --retries) { 9321 ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr); 9322 /* 9323 * scsi_execute() only returns a negative value if the request 9324 * queue is dying. 9325 */ 9326 if (ret <= 0) 9327 break; 9328 } 9329 if (ret) { 9330 sdev_printk(KERN_WARNING, sdp, 9331 "START_STOP failed for power mode: %d, result %x\n", 9332 pwr_mode, ret); 9333 if (ret > 0) { 9334 if (scsi_sense_valid(&sshdr)) 9335 scsi_print_sense_hdr(sdp, NULL, &sshdr); 9336 ret = -EIO; 9337 } 9338 } else { 9339 hba->curr_dev_pwr_mode = pwr_mode; 9340 } 9341 9342 scsi_device_put(sdp); 9343 hba->host->eh_noresume = 0; 9344 return ret; 9345 } 9346 9347 static int ufshcd_link_state_transition(struct ufs_hba *hba, 9348 enum uic_link_state req_link_state, 9349 bool check_for_bkops) 9350 { 9351 int ret = 0; 9352 9353 if (req_link_state == hba->uic_link_state) 9354 return 0; 9355 9356 if (req_link_state == UIC_LINK_HIBERN8_STATE) { 9357 ret = ufshcd_uic_hibern8_enter(hba); 9358 if (!ret) { 9359 ufshcd_set_link_hibern8(hba); 9360 } else { 9361 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", 9362 __func__, ret); 9363 goto out; 9364 } 9365 } 9366 /* 9367 * If autobkops is enabled, link can't be turned off because 9368 * turning off the link would also turn off the device, except in the 9369 * case of DeepSleep where the device is expected to remain powered. 9370 */ 9371 else if ((req_link_state == UIC_LINK_OFF_STATE) && 9372 (!check_for_bkops || !hba->auto_bkops_enabled)) { 9373 /* 9374 * Let's make sure that link is in low power mode, we are doing 9375 * this currently by putting the link in Hibern8. Otherway to 9376 * put the link in low power mode is to send the DME end point 9377 * to device and then send the DME reset command to local 9378 * unipro. But putting the link in hibern8 is much faster. 9379 * 9380 * Note also that putting the link in Hibern8 is a requirement 9381 * for entering DeepSleep. 9382 */ 9383 ret = ufshcd_uic_hibern8_enter(hba); 9384 if (ret) { 9385 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", 9386 __func__, ret); 9387 goto out; 9388 } 9389 /* 9390 * Change controller state to "reset state" which 9391 * should also put the link in off/reset state 9392 */ 9393 ufshcd_hba_stop(hba); 9394 /* 9395 * TODO: Check if we need any delay to make sure that 9396 * controller is reset 9397 */ 9398 ufshcd_set_link_off(hba); 9399 } 9400 9401 out: 9402 return ret; 9403 } 9404 9405 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) 9406 { 9407 bool vcc_off = false; 9408 9409 /* 9410 * It seems some UFS devices may keep drawing more than sleep current 9411 * (atleast for 500us) from UFS rails (especially from VCCQ rail). 9412 * To avoid this situation, add 2ms delay before putting these UFS 9413 * rails in LPM mode. 9414 */ 9415 if (!ufshcd_is_link_active(hba) && 9416 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) 9417 usleep_range(2000, 2100); 9418 9419 /* 9420 * If UFS device is either in UFS_Sleep turn off VCC rail to save some 9421 * power. 9422 * 9423 * If UFS device and link is in OFF state, all power supplies (VCC, 9424 * VCCQ, VCCQ2) can be turned off if power on write protect is not 9425 * required. If UFS link is inactive (Hibern8 or OFF state) and device 9426 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. 9427 * 9428 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway 9429 * in low power state which would save some power. 9430 * 9431 * If Write Booster is enabled and the device needs to flush the WB 9432 * buffer OR if bkops status is urgent for WB, keep Vcc on. 9433 */ 9434 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && 9435 !hba->dev_info.is_lu_power_on_wp) { 9436 ufshcd_setup_vreg(hba, false); 9437 vcc_off = true; 9438 } else if (!ufshcd_is_ufs_dev_active(hba)) { 9439 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); 9440 vcc_off = true; 9441 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) { 9442 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); 9443 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); 9444 } 9445 } 9446 9447 /* 9448 * Some UFS devices require delay after VCC power rail is turned-off. 9449 */ 9450 if (vcc_off && hba->vreg_info.vcc && 9451 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM) 9452 usleep_range(5000, 5100); 9453 } 9454 9455 #ifdef CONFIG_PM 9456 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) 9457 { 9458 int ret = 0; 9459 9460 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && 9461 !hba->dev_info.is_lu_power_on_wp) { 9462 ret = ufshcd_setup_vreg(hba, true); 9463 } else if (!ufshcd_is_ufs_dev_active(hba)) { 9464 if (!ufshcd_is_link_active(hba)) { 9465 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); 9466 if (ret) 9467 goto vcc_disable; 9468 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); 9469 if (ret) 9470 goto vccq_lpm; 9471 } 9472 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); 9473 } 9474 goto out; 9475 9476 vccq_lpm: 9477 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); 9478 vcc_disable: 9479 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); 9480 out: 9481 return ret; 9482 } 9483 #endif /* CONFIG_PM */ 9484 9485 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) 9486 { 9487 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) 9488 ufshcd_setup_hba_vreg(hba, false); 9489 } 9490 9491 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) 9492 { 9493 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) 9494 ufshcd_setup_hba_vreg(hba, true); 9495 } 9496 9497 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 9498 { 9499 int ret = 0; 9500 bool check_for_bkops; 9501 enum ufs_pm_level pm_lvl; 9502 enum ufs_dev_pwr_mode req_dev_pwr_mode; 9503 enum uic_link_state req_link_state; 9504 9505 hba->pm_op_in_progress = true; 9506 if (pm_op != UFS_SHUTDOWN_PM) { 9507 pm_lvl = pm_op == UFS_RUNTIME_PM ? 9508 hba->rpm_lvl : hba->spm_lvl; 9509 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); 9510 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); 9511 } else { 9512 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; 9513 req_link_state = UIC_LINK_OFF_STATE; 9514 } 9515 9516 /* 9517 * If we can't transition into any of the low power modes 9518 * just gate the clocks. 9519 */ 9520 ufshcd_hold(hba); 9521 hba->clk_gating.is_suspended = true; 9522 9523 if (ufshcd_is_clkscaling_supported(hba)) 9524 ufshcd_clk_scaling_suspend(hba, true); 9525 9526 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && 9527 req_link_state == UIC_LINK_ACTIVE_STATE) { 9528 goto vops_suspend; 9529 } 9530 9531 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && 9532 (req_link_state == hba->uic_link_state)) 9533 goto enable_scaling; 9534 9535 /* UFS device & link must be active before we enter in this function */ 9536 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { 9537 ret = -EINVAL; 9538 goto enable_scaling; 9539 } 9540 9541 if (pm_op == UFS_RUNTIME_PM) { 9542 if (ufshcd_can_autobkops_during_suspend(hba)) { 9543 /* 9544 * The device is idle with no requests in the queue, 9545 * allow background operations if bkops status shows 9546 * that performance might be impacted. 9547 */ 9548 ret = ufshcd_urgent_bkops(hba); 9549 if (ret) { 9550 /* 9551 * If return err in suspend flow, IO will hang. 9552 * Trigger error handler and break suspend for 9553 * error recovery. 9554 */ 9555 ufshcd_force_error_recovery(hba); 9556 ret = -EBUSY; 9557 goto enable_scaling; 9558 } 9559 } else { 9560 /* make sure that auto bkops is disabled */ 9561 ufshcd_disable_auto_bkops(hba); 9562 } 9563 /* 9564 * If device needs to do BKOP or WB buffer flush during 9565 * Hibern8, keep device power mode as "active power mode" 9566 * and VCC supply. 9567 */ 9568 hba->dev_info.b_rpm_dev_flush_capable = 9569 hba->auto_bkops_enabled || 9570 (((req_link_state == UIC_LINK_HIBERN8_STATE) || 9571 ((req_link_state == UIC_LINK_ACTIVE_STATE) && 9572 ufshcd_is_auto_hibern8_enabled(hba))) && 9573 ufshcd_wb_need_flush(hba)); 9574 } 9575 9576 flush_work(&hba->eeh_work); 9577 9578 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); 9579 if (ret) 9580 goto enable_scaling; 9581 9582 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { 9583 if (pm_op != UFS_RUNTIME_PM) 9584 /* ensure that bkops is disabled */ 9585 ufshcd_disable_auto_bkops(hba); 9586 9587 if (!hba->dev_info.b_rpm_dev_flush_capable) { 9588 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); 9589 if (ret && pm_op != UFS_SHUTDOWN_PM) { 9590 /* 9591 * If return err in suspend flow, IO will hang. 9592 * Trigger error handler and break suspend for 9593 * error recovery. 9594 */ 9595 ufshcd_force_error_recovery(hba); 9596 ret = -EBUSY; 9597 } 9598 if (ret) 9599 goto enable_scaling; 9600 } 9601 } 9602 9603 /* 9604 * In the case of DeepSleep, the device is expected to remain powered 9605 * with the link off, so do not check for bkops. 9606 */ 9607 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba); 9608 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops); 9609 if (ret && pm_op != UFS_SHUTDOWN_PM) { 9610 /* 9611 * If return err in suspend flow, IO will hang. 9612 * Trigger error handler and break suspend for 9613 * error recovery. 9614 */ 9615 ufshcd_force_error_recovery(hba); 9616 ret = -EBUSY; 9617 } 9618 if (ret) 9619 goto set_dev_active; 9620 9621 vops_suspend: 9622 /* 9623 * Call vendor specific suspend callback. As these callbacks may access 9624 * vendor specific host controller register space call them before the 9625 * host clocks are ON. 9626 */ 9627 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); 9628 if (ret) 9629 goto set_link_active; 9630 goto out; 9631 9632 set_link_active: 9633 /* 9634 * Device hardware reset is required to exit DeepSleep. Also, for 9635 * DeepSleep, the link is off so host reset and restore will be done 9636 * further below. 9637 */ 9638 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 9639 ufshcd_device_reset(hba); 9640 WARN_ON(!ufshcd_is_link_off(hba)); 9641 } 9642 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) 9643 ufshcd_set_link_active(hba); 9644 else if (ufshcd_is_link_off(hba)) 9645 ufshcd_host_reset_and_restore(hba); 9646 set_dev_active: 9647 /* Can also get here needing to exit DeepSleep */ 9648 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 9649 ufshcd_device_reset(hba); 9650 ufshcd_host_reset_and_restore(hba); 9651 } 9652 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) 9653 ufshcd_disable_auto_bkops(hba); 9654 enable_scaling: 9655 if (ufshcd_is_clkscaling_supported(hba)) 9656 ufshcd_clk_scaling_suspend(hba, false); 9657 9658 hba->dev_info.b_rpm_dev_flush_capable = false; 9659 out: 9660 if (hba->dev_info.b_rpm_dev_flush_capable) { 9661 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, 9662 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS)); 9663 } 9664 9665 if (ret) { 9666 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret); 9667 hba->clk_gating.is_suspended = false; 9668 ufshcd_release(hba); 9669 } 9670 hba->pm_op_in_progress = false; 9671 return ret; 9672 } 9673 9674 #ifdef CONFIG_PM 9675 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 9676 { 9677 int ret; 9678 enum uic_link_state old_link_state = hba->uic_link_state; 9679 9680 hba->pm_op_in_progress = true; 9681 9682 /* 9683 * Call vendor specific resume callback. As these callbacks may access 9684 * vendor specific host controller register space call them when the 9685 * host clocks are ON. 9686 */ 9687 ret = ufshcd_vops_resume(hba, pm_op); 9688 if (ret) 9689 goto out; 9690 9691 /* For DeepSleep, the only supported option is to have the link off */ 9692 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba)); 9693 9694 if (ufshcd_is_link_hibern8(hba)) { 9695 ret = ufshcd_uic_hibern8_exit(hba); 9696 if (!ret) { 9697 ufshcd_set_link_active(hba); 9698 } else { 9699 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", 9700 __func__, ret); 9701 goto vendor_suspend; 9702 } 9703 } else if (ufshcd_is_link_off(hba)) { 9704 /* 9705 * A full initialization of the host and the device is 9706 * required since the link was put to off during suspend. 9707 * Note, in the case of DeepSleep, the device will exit 9708 * DeepSleep due to device reset. 9709 */ 9710 ret = ufshcd_reset_and_restore(hba); 9711 /* 9712 * ufshcd_reset_and_restore() should have already 9713 * set the link state as active 9714 */ 9715 if (ret || !ufshcd_is_link_active(hba)) 9716 goto vendor_suspend; 9717 } 9718 9719 if (!ufshcd_is_ufs_dev_active(hba)) { 9720 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); 9721 if (ret) 9722 goto set_old_link_state; 9723 ufshcd_set_timestamp_attr(hba); 9724 } 9725 9726 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) 9727 ufshcd_enable_auto_bkops(hba); 9728 else 9729 /* 9730 * If BKOPs operations are urgently needed at this moment then 9731 * keep auto-bkops enabled or else disable it. 9732 */ 9733 ufshcd_urgent_bkops(hba); 9734 9735 if (hba->ee_usr_mask) 9736 ufshcd_write_ee_control(hba); 9737 9738 if (ufshcd_is_clkscaling_supported(hba)) 9739 ufshcd_clk_scaling_suspend(hba, false); 9740 9741 if (hba->dev_info.b_rpm_dev_flush_capable) { 9742 hba->dev_info.b_rpm_dev_flush_capable = false; 9743 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); 9744 } 9745 9746 /* Enable Auto-Hibernate if configured */ 9747 ufshcd_auto_hibern8_enable(hba); 9748 9749 goto out; 9750 9751 set_old_link_state: 9752 ufshcd_link_state_transition(hba, old_link_state, 0); 9753 vendor_suspend: 9754 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); 9755 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); 9756 out: 9757 if (ret) 9758 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret); 9759 hba->clk_gating.is_suspended = false; 9760 ufshcd_release(hba); 9761 hba->pm_op_in_progress = false; 9762 return ret; 9763 } 9764 9765 static int ufshcd_wl_runtime_suspend(struct device *dev) 9766 { 9767 struct scsi_device *sdev = to_scsi_device(dev); 9768 struct ufs_hba *hba; 9769 int ret; 9770 ktime_t start = ktime_get(); 9771 9772 hba = shost_priv(sdev->host); 9773 9774 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM); 9775 if (ret) 9776 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9777 9778 trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret, 9779 ktime_to_us(ktime_sub(ktime_get(), start)), 9780 hba->curr_dev_pwr_mode, hba->uic_link_state); 9781 9782 return ret; 9783 } 9784 9785 static int ufshcd_wl_runtime_resume(struct device *dev) 9786 { 9787 struct scsi_device *sdev = to_scsi_device(dev); 9788 struct ufs_hba *hba; 9789 int ret = 0; 9790 ktime_t start = ktime_get(); 9791 9792 hba = shost_priv(sdev->host); 9793 9794 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM); 9795 if (ret) 9796 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9797 9798 trace_ufshcd_wl_runtime_resume(dev_name(dev), ret, 9799 ktime_to_us(ktime_sub(ktime_get(), start)), 9800 hba->curr_dev_pwr_mode, hba->uic_link_state); 9801 9802 return ret; 9803 } 9804 #endif 9805 9806 #ifdef CONFIG_PM_SLEEP 9807 static int ufshcd_wl_suspend(struct device *dev) 9808 { 9809 struct scsi_device *sdev = to_scsi_device(dev); 9810 struct ufs_hba *hba; 9811 int ret = 0; 9812 ktime_t start = ktime_get(); 9813 9814 hba = shost_priv(sdev->host); 9815 down(&hba->host_sem); 9816 hba->system_suspending = true; 9817 9818 if (pm_runtime_suspended(dev)) 9819 goto out; 9820 9821 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM); 9822 if (ret) { 9823 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9824 up(&hba->host_sem); 9825 } 9826 9827 out: 9828 if (!ret) 9829 hba->is_sys_suspended = true; 9830 trace_ufshcd_wl_suspend(dev_name(dev), ret, 9831 ktime_to_us(ktime_sub(ktime_get(), start)), 9832 hba->curr_dev_pwr_mode, hba->uic_link_state); 9833 9834 return ret; 9835 } 9836 9837 static int ufshcd_wl_resume(struct device *dev) 9838 { 9839 struct scsi_device *sdev = to_scsi_device(dev); 9840 struct ufs_hba *hba; 9841 int ret = 0; 9842 ktime_t start = ktime_get(); 9843 9844 hba = shost_priv(sdev->host); 9845 9846 if (pm_runtime_suspended(dev)) 9847 goto out; 9848 9849 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM); 9850 if (ret) 9851 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9852 out: 9853 trace_ufshcd_wl_resume(dev_name(dev), ret, 9854 ktime_to_us(ktime_sub(ktime_get(), start)), 9855 hba->curr_dev_pwr_mode, hba->uic_link_state); 9856 if (!ret) 9857 hba->is_sys_suspended = false; 9858 hba->system_suspending = false; 9859 up(&hba->host_sem); 9860 return ret; 9861 } 9862 #endif 9863 9864 /** 9865 * ufshcd_suspend - helper function for suspend operations 9866 * @hba: per adapter instance 9867 * 9868 * This function will put disable irqs, turn off clocks 9869 * and set vreg and hba-vreg in lpm mode. 9870 * 9871 * Return: 0 upon success; < 0 upon failure. 9872 */ 9873 static int ufshcd_suspend(struct ufs_hba *hba) 9874 { 9875 int ret; 9876 9877 if (!hba->is_powered) 9878 return 0; 9879 /* 9880 * Disable the host irq as host controller as there won't be any 9881 * host controller transaction expected till resume. 9882 */ 9883 ufshcd_disable_irq(hba); 9884 ret = ufshcd_setup_clocks(hba, false); 9885 if (ret) { 9886 ufshcd_enable_irq(hba); 9887 return ret; 9888 } 9889 if (ufshcd_is_clkgating_allowed(hba)) { 9890 hba->clk_gating.state = CLKS_OFF; 9891 trace_ufshcd_clk_gating(dev_name(hba->dev), 9892 hba->clk_gating.state); 9893 } 9894 9895 ufshcd_vreg_set_lpm(hba); 9896 /* Put the host controller in low power mode if possible */ 9897 ufshcd_hba_vreg_set_lpm(hba); 9898 return ret; 9899 } 9900 9901 #ifdef CONFIG_PM 9902 /** 9903 * ufshcd_resume - helper function for resume operations 9904 * @hba: per adapter instance 9905 * 9906 * This function basically turns on the regulators, clocks and 9907 * irqs of the hba. 9908 * 9909 * Return: 0 for success and non-zero for failure. 9910 */ 9911 static int ufshcd_resume(struct ufs_hba *hba) 9912 { 9913 int ret; 9914 9915 if (!hba->is_powered) 9916 return 0; 9917 9918 ufshcd_hba_vreg_set_hpm(hba); 9919 ret = ufshcd_vreg_set_hpm(hba); 9920 if (ret) 9921 goto out; 9922 9923 /* Make sure clocks are enabled before accessing controller */ 9924 ret = ufshcd_setup_clocks(hba, true); 9925 if (ret) 9926 goto disable_vreg; 9927 9928 /* enable the host irq as host controller would be active soon */ 9929 ufshcd_enable_irq(hba); 9930 9931 goto out; 9932 9933 disable_vreg: 9934 ufshcd_vreg_set_lpm(hba); 9935 out: 9936 if (ret) 9937 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret); 9938 return ret; 9939 } 9940 #endif /* CONFIG_PM */ 9941 9942 #ifdef CONFIG_PM_SLEEP 9943 /** 9944 * ufshcd_system_suspend - system suspend callback 9945 * @dev: Device associated with the UFS controller. 9946 * 9947 * Executed before putting the system into a sleep state in which the contents 9948 * of main memory are preserved. 9949 * 9950 * Return: 0 for success and non-zero for failure. 9951 */ 9952 int ufshcd_system_suspend(struct device *dev) 9953 { 9954 struct ufs_hba *hba = dev_get_drvdata(dev); 9955 int ret = 0; 9956 ktime_t start = ktime_get(); 9957 9958 if (pm_runtime_suspended(hba->dev)) 9959 goto out; 9960 9961 ret = ufshcd_suspend(hba); 9962 out: 9963 trace_ufshcd_system_suspend(dev_name(hba->dev), ret, 9964 ktime_to_us(ktime_sub(ktime_get(), start)), 9965 hba->curr_dev_pwr_mode, hba->uic_link_state); 9966 return ret; 9967 } 9968 EXPORT_SYMBOL(ufshcd_system_suspend); 9969 9970 /** 9971 * ufshcd_system_resume - system resume callback 9972 * @dev: Device associated with the UFS controller. 9973 * 9974 * Executed after waking the system up from a sleep state in which the contents 9975 * of main memory were preserved. 9976 * 9977 * Return: 0 for success and non-zero for failure. 9978 */ 9979 int ufshcd_system_resume(struct device *dev) 9980 { 9981 struct ufs_hba *hba = dev_get_drvdata(dev); 9982 ktime_t start = ktime_get(); 9983 int ret = 0; 9984 9985 if (pm_runtime_suspended(hba->dev)) 9986 goto out; 9987 9988 ret = ufshcd_resume(hba); 9989 9990 out: 9991 trace_ufshcd_system_resume(dev_name(hba->dev), ret, 9992 ktime_to_us(ktime_sub(ktime_get(), start)), 9993 hba->curr_dev_pwr_mode, hba->uic_link_state); 9994 9995 return ret; 9996 } 9997 EXPORT_SYMBOL(ufshcd_system_resume); 9998 #endif /* CONFIG_PM_SLEEP */ 9999 10000 #ifdef CONFIG_PM 10001 /** 10002 * ufshcd_runtime_suspend - runtime suspend callback 10003 * @dev: Device associated with the UFS controller. 10004 * 10005 * Check the description of ufshcd_suspend() function for more details. 10006 * 10007 * Return: 0 for success and non-zero for failure. 10008 */ 10009 int ufshcd_runtime_suspend(struct device *dev) 10010 { 10011 struct ufs_hba *hba = dev_get_drvdata(dev); 10012 int ret; 10013 ktime_t start = ktime_get(); 10014 10015 ret = ufshcd_suspend(hba); 10016 10017 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, 10018 ktime_to_us(ktime_sub(ktime_get(), start)), 10019 hba->curr_dev_pwr_mode, hba->uic_link_state); 10020 return ret; 10021 } 10022 EXPORT_SYMBOL(ufshcd_runtime_suspend); 10023 10024 /** 10025 * ufshcd_runtime_resume - runtime resume routine 10026 * @dev: Device associated with the UFS controller. 10027 * 10028 * This function basically brings controller 10029 * to active state. Following operations are done in this function: 10030 * 10031 * 1. Turn on all the controller related clocks 10032 * 2. Turn ON VCC rail 10033 * 10034 * Return: 0 upon success; < 0 upon failure. 10035 */ 10036 int ufshcd_runtime_resume(struct device *dev) 10037 { 10038 struct ufs_hba *hba = dev_get_drvdata(dev); 10039 int ret; 10040 ktime_t start = ktime_get(); 10041 10042 ret = ufshcd_resume(hba); 10043 10044 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, 10045 ktime_to_us(ktime_sub(ktime_get(), start)), 10046 hba->curr_dev_pwr_mode, hba->uic_link_state); 10047 return ret; 10048 } 10049 EXPORT_SYMBOL(ufshcd_runtime_resume); 10050 #endif /* CONFIG_PM */ 10051 10052 static void ufshcd_wl_shutdown(struct device *dev) 10053 { 10054 struct scsi_device *sdev = to_scsi_device(dev); 10055 struct ufs_hba *hba = shost_priv(sdev->host); 10056 10057 down(&hba->host_sem); 10058 hba->shutting_down = true; 10059 up(&hba->host_sem); 10060 10061 /* Turn on everything while shutting down */ 10062 ufshcd_rpm_get_sync(hba); 10063 scsi_device_quiesce(sdev); 10064 shost_for_each_device(sdev, hba->host) { 10065 if (sdev == hba->ufs_device_wlun) 10066 continue; 10067 scsi_device_quiesce(sdev); 10068 } 10069 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); 10070 10071 /* 10072 * Next, turn off the UFS controller and the UFS regulators. Disable 10073 * clocks. 10074 */ 10075 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) 10076 ufshcd_suspend(hba); 10077 10078 hba->is_powered = false; 10079 } 10080 10081 /** 10082 * ufshcd_remove - de-allocate SCSI host and host memory space 10083 * data structure memory 10084 * @hba: per adapter instance 10085 */ 10086 void ufshcd_remove(struct ufs_hba *hba) 10087 { 10088 if (hba->ufs_device_wlun) 10089 ufshcd_rpm_get_sync(hba); 10090 ufs_hwmon_remove(hba); 10091 ufs_bsg_remove(hba); 10092 ufs_sysfs_remove_nodes(hba->dev); 10093 blk_mq_destroy_queue(hba->tmf_queue); 10094 blk_put_queue(hba->tmf_queue); 10095 blk_mq_free_tag_set(&hba->tmf_tag_set); 10096 scsi_remove_host(hba->host); 10097 /* disable interrupts */ 10098 ufshcd_disable_intr(hba, hba->intr_mask); 10099 ufshcd_hba_stop(hba); 10100 ufshcd_hba_exit(hba); 10101 } 10102 EXPORT_SYMBOL_GPL(ufshcd_remove); 10103 10104 #ifdef CONFIG_PM_SLEEP 10105 int ufshcd_system_freeze(struct device *dev) 10106 { 10107 10108 return ufshcd_system_suspend(dev); 10109 10110 } 10111 EXPORT_SYMBOL_GPL(ufshcd_system_freeze); 10112 10113 int ufshcd_system_restore(struct device *dev) 10114 { 10115 10116 struct ufs_hba *hba = dev_get_drvdata(dev); 10117 int ret; 10118 10119 ret = ufshcd_system_resume(dev); 10120 if (ret) 10121 return ret; 10122 10123 /* Configure UTRL and UTMRL base address registers */ 10124 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), 10125 REG_UTP_TRANSFER_REQ_LIST_BASE_L); 10126 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), 10127 REG_UTP_TRANSFER_REQ_LIST_BASE_H); 10128 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), 10129 REG_UTP_TASK_REQ_LIST_BASE_L); 10130 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), 10131 REG_UTP_TASK_REQ_LIST_BASE_H); 10132 /* 10133 * Make sure that UTRL and UTMRL base address registers 10134 * are updated with the latest queue addresses. Only after 10135 * updating these addresses, we can queue the new commands. 10136 */ 10137 mb(); 10138 10139 /* Resuming from hibernate, assume that link was OFF */ 10140 ufshcd_set_link_off(hba); 10141 10142 return 0; 10143 10144 } 10145 EXPORT_SYMBOL_GPL(ufshcd_system_restore); 10146 10147 int ufshcd_system_thaw(struct device *dev) 10148 { 10149 return ufshcd_system_resume(dev); 10150 } 10151 EXPORT_SYMBOL_GPL(ufshcd_system_thaw); 10152 #endif /* CONFIG_PM_SLEEP */ 10153 10154 /** 10155 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA) 10156 * @hba: pointer to Host Bus Adapter (HBA) 10157 */ 10158 void ufshcd_dealloc_host(struct ufs_hba *hba) 10159 { 10160 scsi_host_put(hba->host); 10161 } 10162 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); 10163 10164 /** 10165 * ufshcd_set_dma_mask - Set dma mask based on the controller 10166 * addressing capability 10167 * @hba: per adapter instance 10168 * 10169 * Return: 0 for success, non-zero for failure. 10170 */ 10171 static int ufshcd_set_dma_mask(struct ufs_hba *hba) 10172 { 10173 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { 10174 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) 10175 return 0; 10176 } 10177 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); 10178 } 10179 10180 /** 10181 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) 10182 * @dev: pointer to device handle 10183 * @hba_handle: driver private handle 10184 * 10185 * Return: 0 on success, non-zero value on failure. 10186 */ 10187 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) 10188 { 10189 struct Scsi_Host *host; 10190 struct ufs_hba *hba; 10191 int err = 0; 10192 10193 if (!dev) { 10194 dev_err(dev, 10195 "Invalid memory reference for dev is NULL\n"); 10196 err = -ENODEV; 10197 goto out_error; 10198 } 10199 10200 host = scsi_host_alloc(&ufshcd_driver_template, 10201 sizeof(struct ufs_hba)); 10202 if (!host) { 10203 dev_err(dev, "scsi_host_alloc failed\n"); 10204 err = -ENOMEM; 10205 goto out_error; 10206 } 10207 host->nr_maps = HCTX_TYPE_POLL + 1; 10208 hba = shost_priv(host); 10209 hba->host = host; 10210 hba->dev = dev; 10211 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; 10212 hba->nop_out_timeout = NOP_OUT_TIMEOUT; 10213 ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry)); 10214 INIT_LIST_HEAD(&hba->clk_list_head); 10215 spin_lock_init(&hba->outstanding_lock); 10216 10217 *hba_handle = hba; 10218 10219 out_error: 10220 return err; 10221 } 10222 EXPORT_SYMBOL(ufshcd_alloc_host); 10223 10224 /* This function exists because blk_mq_alloc_tag_set() requires this. */ 10225 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx, 10226 const struct blk_mq_queue_data *qd) 10227 { 10228 WARN_ON_ONCE(true); 10229 return BLK_STS_NOTSUPP; 10230 } 10231 10232 static const struct blk_mq_ops ufshcd_tmf_ops = { 10233 .queue_rq = ufshcd_queue_tmf, 10234 }; 10235 10236 /** 10237 * ufshcd_init - Driver initialization routine 10238 * @hba: per-adapter instance 10239 * @mmio_base: base register address 10240 * @irq: Interrupt line of device 10241 * 10242 * Return: 0 on success, non-zero value on failure. 10243 */ 10244 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) 10245 { 10246 int err; 10247 struct Scsi_Host *host = hba->host; 10248 struct device *dev = hba->dev; 10249 char eh_wq_name[sizeof("ufs_eh_wq_00")]; 10250 10251 /* 10252 * dev_set_drvdata() must be called before any callbacks are registered 10253 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon, 10254 * sysfs). 10255 */ 10256 dev_set_drvdata(dev, hba); 10257 10258 if (!mmio_base) { 10259 dev_err(hba->dev, 10260 "Invalid memory reference for mmio_base is NULL\n"); 10261 err = -ENODEV; 10262 goto out_error; 10263 } 10264 10265 hba->mmio_base = mmio_base; 10266 hba->irq = irq; 10267 hba->vps = &ufs_hba_vps; 10268 10269 err = ufshcd_hba_init(hba); 10270 if (err) 10271 goto out_error; 10272 10273 /* Read capabilities registers */ 10274 err = ufshcd_hba_capabilities(hba); 10275 if (err) 10276 goto out_disable; 10277 10278 /* Get UFS version supported by the controller */ 10279 hba->ufs_version = ufshcd_get_ufs_version(hba); 10280 10281 /* Get Interrupt bit mask per version */ 10282 hba->intr_mask = ufshcd_get_intr_mask(hba); 10283 10284 err = ufshcd_set_dma_mask(hba); 10285 if (err) { 10286 dev_err(hba->dev, "set dma mask failed\n"); 10287 goto out_disable; 10288 } 10289 10290 /* Allocate memory for host memory space */ 10291 err = ufshcd_memory_alloc(hba); 10292 if (err) { 10293 dev_err(hba->dev, "Memory allocation failed\n"); 10294 goto out_disable; 10295 } 10296 10297 /* Configure LRB */ 10298 ufshcd_host_memory_configure(hba); 10299 10300 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; 10301 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; 10302 host->max_id = UFSHCD_MAX_ID; 10303 host->max_lun = UFS_MAX_LUNS; 10304 host->max_channel = UFSHCD_MAX_CHANNEL; 10305 host->unique_id = host->host_no; 10306 host->max_cmd_len = UFS_CDB_SIZE; 10307 host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING); 10308 10309 hba->max_pwr_info.is_valid = false; 10310 10311 /* Initialize work queues */ 10312 snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", 10313 hba->host->host_no); 10314 hba->eh_wq = create_singlethread_workqueue(eh_wq_name); 10315 if (!hba->eh_wq) { 10316 dev_err(hba->dev, "%s: failed to create eh workqueue\n", 10317 __func__); 10318 err = -ENOMEM; 10319 goto out_disable; 10320 } 10321 INIT_WORK(&hba->eh_work, ufshcd_err_handler); 10322 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); 10323 10324 sema_init(&hba->host_sem, 1); 10325 10326 /* Initialize UIC command mutex */ 10327 mutex_init(&hba->uic_cmd_mutex); 10328 10329 /* Initialize mutex for device management commands */ 10330 mutex_init(&hba->dev_cmd.lock); 10331 10332 /* Initialize mutex for exception event control */ 10333 mutex_init(&hba->ee_ctrl_mutex); 10334 10335 mutex_init(&hba->wb_mutex); 10336 init_rwsem(&hba->clk_scaling_lock); 10337 10338 ufshcd_init_clk_gating(hba); 10339 10340 ufshcd_init_clk_scaling(hba); 10341 10342 /* 10343 * In order to avoid any spurious interrupt immediately after 10344 * registering UFS controller interrupt handler, clear any pending UFS 10345 * interrupt status and disable all the UFS interrupts. 10346 */ 10347 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), 10348 REG_INTERRUPT_STATUS); 10349 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); 10350 /* 10351 * Make sure that UFS interrupts are disabled and any pending interrupt 10352 * status is cleared before registering UFS interrupt handler. 10353 */ 10354 mb(); 10355 10356 /* IRQ registration */ 10357 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); 10358 if (err) { 10359 dev_err(hba->dev, "request irq failed\n"); 10360 goto out_disable; 10361 } else { 10362 hba->is_irq_enabled = true; 10363 } 10364 10365 if (!is_mcq_supported(hba)) { 10366 err = scsi_add_host(host, hba->dev); 10367 if (err) { 10368 dev_err(hba->dev, "scsi_add_host failed\n"); 10369 goto out_disable; 10370 } 10371 } 10372 10373 hba->tmf_tag_set = (struct blk_mq_tag_set) { 10374 .nr_hw_queues = 1, 10375 .queue_depth = hba->nutmrs, 10376 .ops = &ufshcd_tmf_ops, 10377 .flags = BLK_MQ_F_NO_SCHED, 10378 }; 10379 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); 10380 if (err < 0) 10381 goto out_remove_scsi_host; 10382 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); 10383 if (IS_ERR(hba->tmf_queue)) { 10384 err = PTR_ERR(hba->tmf_queue); 10385 goto free_tmf_tag_set; 10386 } 10387 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, 10388 sizeof(*hba->tmf_rqs), GFP_KERNEL); 10389 if (!hba->tmf_rqs) { 10390 err = -ENOMEM; 10391 goto free_tmf_queue; 10392 } 10393 10394 /* Reset the attached device */ 10395 ufshcd_device_reset(hba); 10396 10397 ufshcd_init_crypto(hba); 10398 10399 /* Host controller enable */ 10400 err = ufshcd_hba_enable(hba); 10401 if (err) { 10402 dev_err(hba->dev, "Host controller enable failed\n"); 10403 ufshcd_print_evt_hist(hba); 10404 ufshcd_print_host_state(hba); 10405 goto free_tmf_queue; 10406 } 10407 10408 /* 10409 * Set the default power management level for runtime and system PM. 10410 * Default power saving mode is to keep UFS link in Hibern8 state 10411 * and UFS device in sleep state. 10412 */ 10413 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( 10414 UFS_SLEEP_PWR_MODE, 10415 UIC_LINK_HIBERN8_STATE); 10416 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( 10417 UFS_SLEEP_PWR_MODE, 10418 UIC_LINK_HIBERN8_STATE); 10419 10420 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, 10421 ufshcd_rpm_dev_flush_recheck_work); 10422 10423 /* Set the default auto-hiberate idle timer value to 150 ms */ 10424 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { 10425 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | 10426 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); 10427 } 10428 10429 /* Hold auto suspend until async scan completes */ 10430 pm_runtime_get_sync(dev); 10431 atomic_set(&hba->scsi_block_reqs_cnt, 0); 10432 /* 10433 * We are assuming that device wasn't put in sleep/power-down 10434 * state exclusively during the boot stage before kernel. 10435 * This assumption helps avoid doing link startup twice during 10436 * ufshcd_probe_hba(). 10437 */ 10438 ufshcd_set_ufs_dev_active(hba); 10439 10440 async_schedule(ufshcd_async_scan, hba); 10441 ufs_sysfs_add_nodes(hba->dev); 10442 10443 device_enable_async_suspend(dev); 10444 return 0; 10445 10446 free_tmf_queue: 10447 blk_mq_destroy_queue(hba->tmf_queue); 10448 blk_put_queue(hba->tmf_queue); 10449 free_tmf_tag_set: 10450 blk_mq_free_tag_set(&hba->tmf_tag_set); 10451 out_remove_scsi_host: 10452 scsi_remove_host(hba->host); 10453 out_disable: 10454 hba->is_irq_enabled = false; 10455 ufshcd_hba_exit(hba); 10456 out_error: 10457 return err; 10458 } 10459 EXPORT_SYMBOL_GPL(ufshcd_init); 10460 10461 void ufshcd_resume_complete(struct device *dev) 10462 { 10463 struct ufs_hba *hba = dev_get_drvdata(dev); 10464 10465 if (hba->complete_put) { 10466 ufshcd_rpm_put(hba); 10467 hba->complete_put = false; 10468 } 10469 } 10470 EXPORT_SYMBOL_GPL(ufshcd_resume_complete); 10471 10472 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba) 10473 { 10474 struct device *dev = &hba->ufs_device_wlun->sdev_gendev; 10475 enum ufs_dev_pwr_mode dev_pwr_mode; 10476 enum uic_link_state link_state; 10477 unsigned long flags; 10478 bool res; 10479 10480 spin_lock_irqsave(&dev->power.lock, flags); 10481 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl); 10482 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl); 10483 res = pm_runtime_suspended(dev) && 10484 hba->curr_dev_pwr_mode == dev_pwr_mode && 10485 hba->uic_link_state == link_state && 10486 !hba->dev_info.b_rpm_dev_flush_capable; 10487 spin_unlock_irqrestore(&dev->power.lock, flags); 10488 10489 return res; 10490 } 10491 10492 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm) 10493 { 10494 struct ufs_hba *hba = dev_get_drvdata(dev); 10495 int ret; 10496 10497 /* 10498 * SCSI assumes that runtime-pm and system-pm for scsi drivers 10499 * are same. And it doesn't wake up the device for system-suspend 10500 * if it's runtime suspended. But ufs doesn't follow that. 10501 * Refer ufshcd_resume_complete() 10502 */ 10503 if (hba->ufs_device_wlun) { 10504 /* Prevent runtime suspend */ 10505 ufshcd_rpm_get_noresume(hba); 10506 /* 10507 * Check if already runtime suspended in same state as system 10508 * suspend would be. 10509 */ 10510 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) { 10511 /* RPM state is not ok for SPM, so runtime resume */ 10512 ret = ufshcd_rpm_resume(hba); 10513 if (ret < 0 && ret != -EACCES) { 10514 ufshcd_rpm_put(hba); 10515 return ret; 10516 } 10517 } 10518 hba->complete_put = true; 10519 } 10520 return 0; 10521 } 10522 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare); 10523 10524 int ufshcd_suspend_prepare(struct device *dev) 10525 { 10526 return __ufshcd_suspend_prepare(dev, true); 10527 } 10528 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare); 10529 10530 #ifdef CONFIG_PM_SLEEP 10531 static int ufshcd_wl_poweroff(struct device *dev) 10532 { 10533 struct scsi_device *sdev = to_scsi_device(dev); 10534 struct ufs_hba *hba = shost_priv(sdev->host); 10535 10536 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); 10537 return 0; 10538 } 10539 #endif 10540 10541 static int ufshcd_wl_probe(struct device *dev) 10542 { 10543 struct scsi_device *sdev = to_scsi_device(dev); 10544 10545 if (!is_device_wlun(sdev)) 10546 return -ENODEV; 10547 10548 blk_pm_runtime_init(sdev->request_queue, dev); 10549 pm_runtime_set_autosuspend_delay(dev, 0); 10550 pm_runtime_allow(dev); 10551 10552 return 0; 10553 } 10554 10555 static int ufshcd_wl_remove(struct device *dev) 10556 { 10557 pm_runtime_forbid(dev); 10558 return 0; 10559 } 10560 10561 static const struct dev_pm_ops ufshcd_wl_pm_ops = { 10562 #ifdef CONFIG_PM_SLEEP 10563 .suspend = ufshcd_wl_suspend, 10564 .resume = ufshcd_wl_resume, 10565 .freeze = ufshcd_wl_suspend, 10566 .thaw = ufshcd_wl_resume, 10567 .poweroff = ufshcd_wl_poweroff, 10568 .restore = ufshcd_wl_resume, 10569 #endif 10570 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL) 10571 }; 10572 10573 static void ufshcd_check_header_layout(void) 10574 { 10575 /* 10576 * gcc compilers before version 10 cannot do constant-folding for 10577 * sub-byte bitfields. Hence skip the layout checks for gcc 9 and 10578 * before. 10579 */ 10580 if (IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 100000) 10581 return; 10582 10583 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){ 10584 .cci = 3})[0] != 3); 10585 10586 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){ 10587 .ehs_length = 2})[1] != 2); 10588 10589 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){ 10590 .enable_crypto = 1})[2] 10591 != 0x80); 10592 10593 BUILD_BUG_ON((((u8 *)&(struct request_desc_header){ 10594 .command_type = 5, 10595 .data_direction = 3, 10596 .interrupt = 1, 10597 })[3]) != ((5 << 4) | (3 << 1) | 1)); 10598 10599 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){ 10600 .dunl = cpu_to_le32(0xdeadbeef)})[1] != 10601 cpu_to_le32(0xdeadbeef)); 10602 10603 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){ 10604 .ocs = 4})[8] != 4); 10605 10606 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){ 10607 .cds = 5})[9] != 5); 10608 10609 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){ 10610 .dunu = cpu_to_le32(0xbadcafe)})[3] != 10611 cpu_to_le32(0xbadcafe)); 10612 10613 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){ 10614 .iid = 0xf })[4] != 0xf0); 10615 10616 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){ 10617 .command_set_type = 0xf })[4] != 0xf); 10618 } 10619 10620 /* 10621 * ufs_dev_wlun_template - describes ufs device wlun 10622 * ufs-device wlun - used to send pm commands 10623 * All luns are consumers of ufs-device wlun. 10624 * 10625 * Currently, no sd driver is present for wluns. 10626 * Hence the no specific pm operations are performed. 10627 * With ufs design, SSU should be sent to ufs-device wlun. 10628 * Hence register a scsi driver for ufs wluns only. 10629 */ 10630 static struct scsi_driver ufs_dev_wlun_template = { 10631 .gendrv = { 10632 .name = "ufs_device_wlun", 10633 .owner = THIS_MODULE, 10634 .probe = ufshcd_wl_probe, 10635 .remove = ufshcd_wl_remove, 10636 .pm = &ufshcd_wl_pm_ops, 10637 .shutdown = ufshcd_wl_shutdown, 10638 }, 10639 }; 10640 10641 static int __init ufshcd_core_init(void) 10642 { 10643 int ret; 10644 10645 ufshcd_check_header_layout(); 10646 10647 ufs_debugfs_init(); 10648 10649 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); 10650 if (ret) 10651 ufs_debugfs_exit(); 10652 return ret; 10653 } 10654 10655 static void __exit ufshcd_core_exit(void) 10656 { 10657 ufs_debugfs_exit(); 10658 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv); 10659 } 10660 10661 module_init(ufshcd_core_init); 10662 module_exit(ufshcd_core_exit); 10663 10664 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); 10665 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); 10666 MODULE_DESCRIPTION("Generic UFS host controller driver Core"); 10667 MODULE_SOFTDEP("pre: governor_simpleondemand"); 10668 MODULE_LICENSE("GPL"); 10669