1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Universal Flash Storage Host controller driver Core
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6 *
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
10 */
11
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
15 #include <linux/of.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/sched/clock.h>
25 #include <linux/iopoll.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_dbg.h>
28 #include <scsi/scsi_driver.h>
29 #include <scsi/scsi_eh.h>
30 #include "ufshcd-priv.h"
31 #include <ufs/ufs_quirks.h>
32 #include <ufs/unipro.h>
33 #include "ufs-sysfs.h"
34 #include "ufs-debugfs.h"
35 #include "ufs-fault-injection.h"
36 #include "ufs_bsg.h"
37 #include "ufshcd-crypto.h"
38 #include <asm/unaligned.h>
39
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/ufs.h>
42
43 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
44 UTP_TASK_REQ_COMPL |\
45 UFSHCD_ERROR_MASK)
46
47 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
48 UFSHCD_ERROR_MASK |\
49 MCQ_CQ_EVENT_STATUS)
50
51
52 /* UIC command timeout, unit: ms */
53 #define UIC_CMD_TIMEOUT 500
54
55 /* NOP OUT retries waiting for NOP IN response */
56 #define NOP_OUT_RETRIES 10
57 /* Timeout after 50 msecs if NOP OUT hangs without response */
58 #define NOP_OUT_TIMEOUT 50 /* msecs */
59
60 /* Query request retries */
61 #define QUERY_REQ_RETRIES 3
62 /* Query request timeout */
63 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
64
65 /* Advanced RPMB request timeout */
66 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
67
68 /* Task management command timeout */
69 #define TM_CMD_TIMEOUT 100 /* msecs */
70
71 /* maximum number of retries for a general UIC command */
72 #define UFS_UIC_COMMAND_RETRIES 3
73
74 /* maximum number of link-startup retries */
75 #define DME_LINKSTARTUP_RETRIES 3
76
77 /* maximum number of reset retries before giving up */
78 #define MAX_HOST_RESET_RETRIES 5
79
80 /* Maximum number of error handler retries before giving up */
81 #define MAX_ERR_HANDLER_RETRIES 5
82
83 /* Expose the flag value from utp_upiu_query.value */
84 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
85
86 /* Interrupt aggregation default timeout, unit: 40us */
87 #define INT_AGGR_DEF_TO 0x02
88
89 /* default delay of autosuspend: 2000 ms */
90 #define RPM_AUTOSUSPEND_DELAY_MS 2000
91
92 /* Default delay of RPM device flush delayed work */
93 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
94
95 /* Default value of wait time before gating device ref clock */
96 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
97
98 /* Polling time to wait for fDeviceInit */
99 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
100
101 /* Default RTC update every 10 seconds */
102 #define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
103
104 /* UFSHC 4.0 compliant HC support this mode. */
105 static bool use_mcq_mode = true;
106
is_mcq_supported(struct ufs_hba * hba)107 static bool is_mcq_supported(struct ufs_hba *hba)
108 {
109 return hba->mcq_sup && use_mcq_mode;
110 }
111
112 module_param(use_mcq_mode, bool, 0644);
113 MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
114
115 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
116 ({ \
117 int _ret; \
118 if (_on) \
119 _ret = ufshcd_enable_vreg(_dev, _vreg); \
120 else \
121 _ret = ufshcd_disable_vreg(_dev, _vreg); \
122 _ret; \
123 })
124
125 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
126 size_t __len = (len); \
127 print_hex_dump(KERN_ERR, prefix_str, \
128 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
129 16, 4, buf, __len, false); \
130 } while (0)
131
ufshcd_dump_regs(struct ufs_hba * hba,size_t offset,size_t len,const char * prefix)132 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
133 const char *prefix)
134 {
135 u32 *regs;
136 size_t pos;
137
138 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
139 return -EINVAL;
140
141 regs = kzalloc(len, GFP_ATOMIC);
142 if (!regs)
143 return -ENOMEM;
144
145 for (pos = 0; pos < len; pos += 4) {
146 if (offset == 0 &&
147 pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
148 pos <= REG_UIC_ERROR_CODE_DME)
149 continue;
150 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
151 }
152
153 ufshcd_hex_dump(prefix, regs, len);
154 kfree(regs);
155
156 return 0;
157 }
158 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
159
160 enum {
161 UFSHCD_MAX_CHANNEL = 0,
162 UFSHCD_MAX_ID = 1,
163 UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
164 UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
165 };
166
167 static const char *const ufshcd_state_name[] = {
168 [UFSHCD_STATE_RESET] = "reset",
169 [UFSHCD_STATE_OPERATIONAL] = "operational",
170 [UFSHCD_STATE_ERROR] = "error",
171 [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
172 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
173 };
174
175 /* UFSHCD error handling flags */
176 enum {
177 UFSHCD_EH_IN_PROGRESS = (1 << 0),
178 };
179
180 /* UFSHCD UIC layer error flags */
181 enum {
182 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
183 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
184 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
185 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
186 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
187 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
188 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
189 };
190
191 #define ufshcd_set_eh_in_progress(h) \
192 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
193 #define ufshcd_eh_in_progress(h) \
194 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
195 #define ufshcd_clear_eh_in_progress(h) \
196 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
197
198 const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
199 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
200 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
201 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
202 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
203 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
204 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
205 /*
206 * For DeepSleep, the link is first put in hibern8 and then off.
207 * Leaving the link in hibern8 is not supported.
208 */
209 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
210 };
211
212 static inline enum ufs_dev_pwr_mode
ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)213 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
214 {
215 return ufs_pm_lvl_states[lvl].dev_state;
216 }
217
218 static inline enum uic_link_state
ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)219 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
220 {
221 return ufs_pm_lvl_states[lvl].link_state;
222 }
223
224 static inline enum ufs_pm_level
ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,enum uic_link_state link_state)225 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
226 enum uic_link_state link_state)
227 {
228 enum ufs_pm_level lvl;
229
230 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
231 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
232 (ufs_pm_lvl_states[lvl].link_state == link_state))
233 return lvl;
234 }
235
236 /* if no match found, return the level 0 */
237 return UFS_PM_LVL_0;
238 }
239
ufshcd_has_pending_tasks(struct ufs_hba * hba)240 static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
241 {
242 return hba->outstanding_tasks || hba->active_uic_cmd ||
243 hba->uic_async_done;
244 }
245
ufshcd_is_ufs_dev_busy(struct ufs_hba * hba)246 static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
247 {
248 return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
249 }
250
251 static const struct ufs_dev_quirk ufs_fixups[] = {
252 /* UFS cards deviations table */
253 { .wmanufacturerid = UFS_VENDOR_MICRON,
254 .model = UFS_ANY_MODEL,
255 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
256 { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
257 .model = UFS_ANY_MODEL,
258 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
259 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
260 UFS_DEVICE_QUIRK_PA_HIBER8TIME |
261 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
262 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
263 .model = UFS_ANY_MODEL,
264 .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
265 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
266 .model = "hB8aL1" /*H28U62301AMR*/,
267 .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
268 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
269 .model = UFS_ANY_MODEL,
270 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
271 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
272 .model = "THGLF2G9C8KBADG",
273 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
274 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
275 .model = "THGLF2G9D8KBADG",
276 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
277 {}
278 };
279
280 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
281 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
282 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
283 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
284 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
285 static void ufshcd_hba_exit(struct ufs_hba *hba);
286 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
287 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
288 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
289 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
290 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
291 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
292 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
293 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
294 static irqreturn_t ufshcd_intr(int irq, void *__hba);
295 static int ufshcd_change_power_mode(struct ufs_hba *hba,
296 struct ufs_pa_layer_attr *pwr_mode);
297 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
298 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
299 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
300 struct ufs_vreg *vreg);
301 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
302 bool enable);
303 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
304 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
305
ufshcd_enable_irq(struct ufs_hba * hba)306 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
307 {
308 if (!hba->is_irq_enabled) {
309 enable_irq(hba->irq);
310 hba->is_irq_enabled = true;
311 }
312 }
313
ufshcd_disable_irq(struct ufs_hba * hba)314 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
315 {
316 if (hba->is_irq_enabled) {
317 disable_irq(hba->irq);
318 hba->is_irq_enabled = false;
319 }
320 }
321
ufshcd_configure_wb(struct ufs_hba * hba)322 static void ufshcd_configure_wb(struct ufs_hba *hba)
323 {
324 if (!ufshcd_is_wb_allowed(hba))
325 return;
326
327 ufshcd_wb_toggle(hba, true);
328
329 ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
330
331 if (ufshcd_is_wb_buf_flush_allowed(hba))
332 ufshcd_wb_toggle_buf_flush(hba, true);
333 }
334
ufshcd_scsi_unblock_requests(struct ufs_hba * hba)335 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
336 {
337 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
338 scsi_unblock_requests(hba->host);
339 }
340
ufshcd_scsi_block_requests(struct ufs_hba * hba)341 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
342 {
343 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
344 scsi_block_requests(hba->host);
345 }
346
ufshcd_add_cmd_upiu_trace(struct ufs_hba * hba,unsigned int tag,enum ufs_trace_str_t str_t)347 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
348 enum ufs_trace_str_t str_t)
349 {
350 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
351 struct utp_upiu_header *header;
352
353 if (!trace_ufshcd_upiu_enabled())
354 return;
355
356 if (str_t == UFS_CMD_SEND)
357 header = &rq->header;
358 else
359 header = &hba->lrb[tag].ucd_rsp_ptr->header;
360
361 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
362 UFS_TSF_CDB);
363 }
364
ufshcd_add_query_upiu_trace(struct ufs_hba * hba,enum ufs_trace_str_t str_t,struct utp_upiu_req * rq_rsp)365 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
366 enum ufs_trace_str_t str_t,
367 struct utp_upiu_req *rq_rsp)
368 {
369 if (!trace_ufshcd_upiu_enabled())
370 return;
371
372 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
373 &rq_rsp->qr, UFS_TSF_OSF);
374 }
375
ufshcd_add_tm_upiu_trace(struct ufs_hba * hba,unsigned int tag,enum ufs_trace_str_t str_t)376 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
377 enum ufs_trace_str_t str_t)
378 {
379 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
380
381 if (!trace_ufshcd_upiu_enabled())
382 return;
383
384 if (str_t == UFS_TM_SEND)
385 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
386 &descp->upiu_req.req_header,
387 &descp->upiu_req.input_param1,
388 UFS_TSF_TM_INPUT);
389 else
390 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
391 &descp->upiu_rsp.rsp_header,
392 &descp->upiu_rsp.output_param1,
393 UFS_TSF_TM_OUTPUT);
394 }
395
ufshcd_add_uic_command_trace(struct ufs_hba * hba,const struct uic_command * ucmd,enum ufs_trace_str_t str_t)396 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
397 const struct uic_command *ucmd,
398 enum ufs_trace_str_t str_t)
399 {
400 u32 cmd;
401
402 if (!trace_ufshcd_uic_command_enabled())
403 return;
404
405 if (str_t == UFS_CMD_SEND)
406 cmd = ucmd->command;
407 else
408 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
409
410 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
411 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
412 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
413 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
414 }
415
ufshcd_add_command_trace(struct ufs_hba * hba,unsigned int tag,enum ufs_trace_str_t str_t)416 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
417 enum ufs_trace_str_t str_t)
418 {
419 u64 lba = 0;
420 u8 opcode = 0, group_id = 0;
421 u32 doorbell = 0;
422 u32 intr;
423 int hwq_id = -1;
424 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
425 struct scsi_cmnd *cmd = lrbp->cmd;
426 struct request *rq = scsi_cmd_to_rq(cmd);
427 int transfer_len = -1;
428
429 if (!cmd)
430 return;
431
432 /* trace UPIU also */
433 ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
434 if (!trace_ufshcd_command_enabled())
435 return;
436
437 opcode = cmd->cmnd[0];
438
439 if (opcode == READ_10 || opcode == WRITE_10) {
440 /*
441 * Currently we only fully trace read(10) and write(10) commands
442 */
443 transfer_len =
444 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
445 lba = scsi_get_lba(cmd);
446 if (opcode == WRITE_10)
447 group_id = lrbp->cmd->cmnd[6];
448 } else if (opcode == UNMAP) {
449 /*
450 * The number of Bytes to be unmapped beginning with the lba.
451 */
452 transfer_len = blk_rq_bytes(rq);
453 lba = scsi_get_lba(cmd);
454 }
455
456 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
457
458 if (is_mcq_enabled(hba)) {
459 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
460
461 hwq_id = hwq->id;
462 } else {
463 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
464 }
465 trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
466 doorbell, hwq_id, transfer_len, intr, lba, opcode, group_id);
467 }
468
ufshcd_print_clk_freqs(struct ufs_hba * hba)469 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
470 {
471 struct ufs_clk_info *clki;
472 struct list_head *head = &hba->clk_list_head;
473
474 if (list_empty(head))
475 return;
476
477 list_for_each_entry(clki, head, list) {
478 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
479 clki->max_freq)
480 dev_err(hba->dev, "clk: %s, rate: %u\n",
481 clki->name, clki->curr_freq);
482 }
483 }
484
ufshcd_print_evt(struct ufs_hba * hba,u32 id,const char * err_name)485 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
486 const char *err_name)
487 {
488 int i;
489 bool found = false;
490 const struct ufs_event_hist *e;
491
492 if (id >= UFS_EVT_CNT)
493 return;
494
495 e = &hba->ufs_stats.event[id];
496
497 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
498 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
499
500 if (e->tstamp[p] == 0)
501 continue;
502 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
503 e->val[p], div_u64(e->tstamp[p], 1000));
504 found = true;
505 }
506
507 if (!found)
508 dev_err(hba->dev, "No record of %s\n", err_name);
509 else
510 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
511 }
512
ufshcd_print_evt_hist(struct ufs_hba * hba)513 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
514 {
515 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
516
517 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
518 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
519 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
520 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
521 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
522 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
523 "auto_hibern8_err");
524 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
525 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
526 "link_startup_fail");
527 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
528 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
529 "suspend_fail");
530 ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail");
531 ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR,
532 "wlun suspend_fail");
533 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
534 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
535 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
536
537 ufshcd_vops_dbg_register_dump(hba);
538 }
539
540 static
ufshcd_print_tr(struct ufs_hba * hba,int tag,bool pr_prdt)541 void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
542 {
543 const struct ufshcd_lrb *lrbp;
544 int prdt_length;
545
546 lrbp = &hba->lrb[tag];
547
548 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
549 tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
550 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
551 tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
552 dev_err(hba->dev,
553 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
554 tag, (u64)lrbp->utrd_dma_addr);
555
556 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
557 sizeof(struct utp_transfer_req_desc));
558 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
559 (u64)lrbp->ucd_req_dma_addr);
560 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
561 sizeof(struct utp_upiu_req));
562 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
563 (u64)lrbp->ucd_rsp_dma_addr);
564 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
565 sizeof(struct utp_upiu_rsp));
566
567 prdt_length = le16_to_cpu(
568 lrbp->utr_descriptor_ptr->prd_table_length);
569 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
570 prdt_length /= ufshcd_sg_entry_size(hba);
571
572 dev_err(hba->dev,
573 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
574 tag, prdt_length,
575 (u64)lrbp->ucd_prdt_dma_addr);
576
577 if (pr_prdt)
578 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
579 ufshcd_sg_entry_size(hba) * prdt_length);
580 }
581
ufshcd_print_tr_iter(struct request * req,void * priv)582 static bool ufshcd_print_tr_iter(struct request *req, void *priv)
583 {
584 struct scsi_device *sdev = req->q->queuedata;
585 struct Scsi_Host *shost = sdev->host;
586 struct ufs_hba *hba = shost_priv(shost);
587
588 ufshcd_print_tr(hba, req->tag, *(bool *)priv);
589
590 return true;
591 }
592
593 /**
594 * ufshcd_print_trs_all - print trs for all started requests.
595 * @hba: per-adapter instance.
596 * @pr_prdt: need to print prdt or not.
597 */
ufshcd_print_trs_all(struct ufs_hba * hba,bool pr_prdt)598 static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt)
599 {
600 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt);
601 }
602
ufshcd_print_tmrs(struct ufs_hba * hba,unsigned long bitmap)603 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
604 {
605 int tag;
606
607 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
608 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
609
610 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
611 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
612 }
613 }
614
ufshcd_print_host_state(struct ufs_hba * hba)615 static void ufshcd_print_host_state(struct ufs_hba *hba)
616 {
617 const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
618
619 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
620 dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
621 scsi_host_busy(hba->host), hba->outstanding_tasks);
622 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
623 hba->saved_err, hba->saved_uic_err);
624 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
625 hba->curr_dev_pwr_mode, hba->uic_link_state);
626 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
627 hba->pm_op_in_progress, hba->is_sys_suspended);
628 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
629 hba->auto_bkops_enabled, hba->host->host_self_blocked);
630 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
631 dev_err(hba->dev,
632 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
633 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
634 hba->ufs_stats.hibern8_exit_cnt);
635 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
636 div_u64(hba->ufs_stats.last_intr_ts, 1000),
637 hba->ufs_stats.last_intr_status);
638 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
639 hba->eh_flags, hba->req_abort_count);
640 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
641 hba->ufs_version, hba->capabilities, hba->caps);
642 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
643 hba->dev_quirks);
644 if (sdev_ufs)
645 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
646 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
647
648 ufshcd_print_clk_freqs(hba);
649 }
650
651 /**
652 * ufshcd_print_pwr_info - print power params as saved in hba
653 * power info
654 * @hba: per-adapter instance
655 */
ufshcd_print_pwr_info(struct ufs_hba * hba)656 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
657 {
658 static const char * const names[] = {
659 "INVALID MODE",
660 "FAST MODE",
661 "SLOW_MODE",
662 "INVALID MODE",
663 "FASTAUTO_MODE",
664 "SLOWAUTO_MODE",
665 "INVALID MODE",
666 };
667
668 /*
669 * Using dev_dbg to avoid messages during runtime PM to avoid
670 * never-ending cycles of messages written back to storage by user space
671 * causing runtime resume, causing more messages and so on.
672 */
673 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
674 __func__,
675 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
676 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
677 names[hba->pwr_info.pwr_rx],
678 names[hba->pwr_info.pwr_tx],
679 hba->pwr_info.hs_rate);
680 }
681
ufshcd_device_reset(struct ufs_hba * hba)682 static void ufshcd_device_reset(struct ufs_hba *hba)
683 {
684 int err;
685
686 err = ufshcd_vops_device_reset(hba);
687
688 if (!err) {
689 ufshcd_set_ufs_dev_active(hba);
690 if (ufshcd_is_wb_allowed(hba)) {
691 hba->dev_info.wb_enabled = false;
692 hba->dev_info.wb_buf_flush_enabled = false;
693 }
694 if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
695 hba->dev_info.rtc_time_baseline = 0;
696 }
697 if (err != -EOPNOTSUPP)
698 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
699 }
700
ufshcd_delay_us(unsigned long us,unsigned long tolerance)701 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
702 {
703 if (!us)
704 return;
705
706 if (us < 10)
707 udelay(us);
708 else
709 usleep_range(us, us + tolerance);
710 }
711 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
712
713 /**
714 * ufshcd_wait_for_register - wait for register value to change
715 * @hba: per-adapter interface
716 * @reg: mmio register offset
717 * @mask: mask to apply to the read register value
718 * @val: value to wait for
719 * @interval_us: polling interval in microseconds
720 * @timeout_ms: timeout in milliseconds
721 *
722 * Return: -ETIMEDOUT on error, zero on success.
723 */
ufshcd_wait_for_register(struct ufs_hba * hba,u32 reg,u32 mask,u32 val,unsigned long interval_us,unsigned long timeout_ms)724 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
725 u32 val, unsigned long interval_us,
726 unsigned long timeout_ms)
727 {
728 int err = 0;
729 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
730
731 /* ignore bits that we don't intend to wait on */
732 val = val & mask;
733
734 while ((ufshcd_readl(hba, reg) & mask) != val) {
735 usleep_range(interval_us, interval_us + 50);
736 if (time_after(jiffies, timeout)) {
737 if ((ufshcd_readl(hba, reg) & mask) != val)
738 err = -ETIMEDOUT;
739 break;
740 }
741 }
742
743 return err;
744 }
745
746 /**
747 * ufshcd_get_intr_mask - Get the interrupt bit mask
748 * @hba: Pointer to adapter instance
749 *
750 * Return: interrupt bit mask per version
751 */
ufshcd_get_intr_mask(struct ufs_hba * hba)752 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
753 {
754 if (hba->ufs_version == ufshci_version(1, 0))
755 return INTERRUPT_MASK_ALL_VER_10;
756 if (hba->ufs_version <= ufshci_version(2, 0))
757 return INTERRUPT_MASK_ALL_VER_11;
758
759 return INTERRUPT_MASK_ALL_VER_21;
760 }
761
762 /**
763 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
764 * @hba: Pointer to adapter instance
765 *
766 * Return: UFSHCI version supported by the controller
767 */
ufshcd_get_ufs_version(struct ufs_hba * hba)768 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
769 {
770 u32 ufshci_ver;
771
772 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
773 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
774 else
775 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
776
777 /*
778 * UFSHCI v1.x uses a different version scheme, in order
779 * to allow the use of comparisons with the ufshci_version
780 * function, we convert it to the same scheme as ufs 2.0+.
781 */
782 if (ufshci_ver & 0x00010000)
783 return ufshci_version(1, ufshci_ver & 0x00000100);
784
785 return ufshci_ver;
786 }
787
788 /**
789 * ufshcd_is_device_present - Check if any device connected to
790 * the host controller
791 * @hba: pointer to adapter instance
792 *
793 * Return: true if device present, false if no device detected
794 */
ufshcd_is_device_present(struct ufs_hba * hba)795 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
796 {
797 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
798 }
799
800 /**
801 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
802 * @lrbp: pointer to local command reference block
803 * @cqe: pointer to the completion queue entry
804 *
805 * This function is used to get the OCS field from UTRD
806 *
807 * Return: the OCS field in the UTRD.
808 */
ufshcd_get_tr_ocs(struct ufshcd_lrb * lrbp,struct cq_entry * cqe)809 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
810 struct cq_entry *cqe)
811 {
812 if (cqe)
813 return le32_to_cpu(cqe->status) & MASK_OCS;
814
815 return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
816 }
817
818 /**
819 * ufshcd_utrl_clear() - Clear requests from the controller request list.
820 * @hba: per adapter instance
821 * @mask: mask with one bit set for each request to be cleared
822 */
ufshcd_utrl_clear(struct ufs_hba * hba,u32 mask)823 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
824 {
825 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
826 mask = ~mask;
827 /*
828 * From the UFSHCI specification: "UTP Transfer Request List CLear
829 * Register (UTRLCLR): This field is bit significant. Each bit
830 * corresponds to a slot in the UTP Transfer Request List, where bit 0
831 * corresponds to request slot 0. A bit in this field is set to ‘0’
832 * by host software to indicate to the host controller that a transfer
833 * request slot is cleared. The host controller
834 * shall free up any resources associated to the request slot
835 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
836 * host software indicates no change to request slots by setting the
837 * associated bits in this field to ‘1’. Bits in this field shall only
838 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
839 */
840 ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
841 }
842
843 /**
844 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
845 * @hba: per adapter instance
846 * @pos: position of the bit to be cleared
847 */
ufshcd_utmrl_clear(struct ufs_hba * hba,u32 pos)848 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
849 {
850 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
851 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
852 else
853 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
854 }
855
856 /**
857 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
858 * @reg: Register value of host controller status
859 *
860 * Return: 0 on success; a positive value if failed.
861 */
ufshcd_get_lists_status(u32 reg)862 static inline int ufshcd_get_lists_status(u32 reg)
863 {
864 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
865 }
866
867 /**
868 * ufshcd_get_uic_cmd_result - Get the UIC command result
869 * @hba: Pointer to adapter instance
870 *
871 * This function gets the result of UIC command completion
872 *
873 * Return: 0 on success; non-zero value on error.
874 */
ufshcd_get_uic_cmd_result(struct ufs_hba * hba)875 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
876 {
877 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
878 MASK_UIC_COMMAND_RESULT;
879 }
880
881 /**
882 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
883 * @hba: Pointer to adapter instance
884 *
885 * This function gets UIC command argument3
886 *
887 * Return: 0 on success; non-zero value on error.
888 */
ufshcd_get_dme_attr_val(struct ufs_hba * hba)889 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
890 {
891 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
892 }
893
894 /**
895 * ufshcd_get_req_rsp - returns the TR response transaction type
896 * @ucd_rsp_ptr: pointer to response UPIU
897 *
898 * Return: UPIU type.
899 */
900 static inline enum upiu_response_transaction
ufshcd_get_req_rsp(struct utp_upiu_rsp * ucd_rsp_ptr)901 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
902 {
903 return ucd_rsp_ptr->header.transaction_code;
904 }
905
906 /**
907 * ufshcd_is_exception_event - Check if the device raised an exception event
908 * @ucd_rsp_ptr: pointer to response UPIU
909 *
910 * The function checks if the device raised an exception event indicated in
911 * the Device Information field of response UPIU.
912 *
913 * Return: true if exception is raised, false otherwise.
914 */
ufshcd_is_exception_event(struct utp_upiu_rsp * ucd_rsp_ptr)915 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
916 {
917 return ucd_rsp_ptr->header.device_information & 1;
918 }
919
920 /**
921 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
922 * @hba: per adapter instance
923 */
924 static inline void
ufshcd_reset_intr_aggr(struct ufs_hba * hba)925 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
926 {
927 ufshcd_writel(hba, INT_AGGR_ENABLE |
928 INT_AGGR_COUNTER_AND_TIMER_RESET,
929 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
930 }
931
932 /**
933 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
934 * @hba: per adapter instance
935 * @cnt: Interrupt aggregation counter threshold
936 * @tmout: Interrupt aggregation timeout value
937 */
938 static inline void
ufshcd_config_intr_aggr(struct ufs_hba * hba,u8 cnt,u8 tmout)939 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
940 {
941 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
942 INT_AGGR_COUNTER_THLD_VAL(cnt) |
943 INT_AGGR_TIMEOUT_VAL(tmout),
944 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
945 }
946
947 /**
948 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
949 * @hba: per adapter instance
950 */
ufshcd_disable_intr_aggr(struct ufs_hba * hba)951 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
952 {
953 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
954 }
955
956 /**
957 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
958 * When run-stop registers are set to 1, it indicates the
959 * host controller that it can process the requests
960 * @hba: per adapter instance
961 */
ufshcd_enable_run_stop_reg(struct ufs_hba * hba)962 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
963 {
964 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
965 REG_UTP_TASK_REQ_LIST_RUN_STOP);
966 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
967 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
968 }
969
970 /**
971 * ufshcd_hba_start - Start controller initialization sequence
972 * @hba: per adapter instance
973 */
ufshcd_hba_start(struct ufs_hba * hba)974 static inline void ufshcd_hba_start(struct ufs_hba *hba)
975 {
976 u32 val = CONTROLLER_ENABLE;
977
978 if (ufshcd_crypto_enable(hba))
979 val |= CRYPTO_GENERAL_ENABLE;
980
981 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
982 }
983
984 /**
985 * ufshcd_is_hba_active - Get controller state
986 * @hba: per adapter instance
987 *
988 * Return: true if and only if the controller is active.
989 */
ufshcd_is_hba_active(struct ufs_hba * hba)990 bool ufshcd_is_hba_active(struct ufs_hba *hba)
991 {
992 return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
993 }
994 EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
995
ufshcd_get_local_unipro_ver(struct ufs_hba * hba)996 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
997 {
998 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
999 if (hba->ufs_version <= ufshci_version(1, 1))
1000 return UFS_UNIPRO_VER_1_41;
1001 else
1002 return UFS_UNIPRO_VER_1_6;
1003 }
1004 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
1005
ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba * hba)1006 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
1007 {
1008 /*
1009 * If both host and device support UniPro ver1.6 or later, PA layer
1010 * parameters tuning happens during link startup itself.
1011 *
1012 * We can manually tune PA layer parameters if either host or device
1013 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1014 * logic simple, we will only do manual tuning if local unipro version
1015 * doesn't support ver1.6 or later.
1016 */
1017 return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
1018 }
1019
1020 /**
1021 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1022 * @hba: per adapter instance
1023 * @scale_up: If True, set max possible frequency othewise set low frequency
1024 *
1025 * Return: 0 if successful; < 0 upon failure.
1026 */
ufshcd_set_clk_freq(struct ufs_hba * hba,bool scale_up)1027 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1028 {
1029 int ret = 0;
1030 struct ufs_clk_info *clki;
1031 struct list_head *head = &hba->clk_list_head;
1032
1033 if (list_empty(head))
1034 goto out;
1035
1036 list_for_each_entry(clki, head, list) {
1037 if (!IS_ERR_OR_NULL(clki->clk)) {
1038 if (scale_up && clki->max_freq) {
1039 if (clki->curr_freq == clki->max_freq)
1040 continue;
1041
1042 ret = clk_set_rate(clki->clk, clki->max_freq);
1043 if (ret) {
1044 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1045 __func__, clki->name,
1046 clki->max_freq, ret);
1047 break;
1048 }
1049 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1050 "scaled up", clki->name,
1051 clki->curr_freq,
1052 clki->max_freq);
1053
1054 clki->curr_freq = clki->max_freq;
1055
1056 } else if (!scale_up && clki->min_freq) {
1057 if (clki->curr_freq == clki->min_freq)
1058 continue;
1059
1060 ret = clk_set_rate(clki->clk, clki->min_freq);
1061 if (ret) {
1062 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1063 __func__, clki->name,
1064 clki->min_freq, ret);
1065 break;
1066 }
1067 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1068 "scaled down", clki->name,
1069 clki->curr_freq,
1070 clki->min_freq);
1071 clki->curr_freq = clki->min_freq;
1072 }
1073 }
1074 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1075 clki->name, clk_get_rate(clki->clk));
1076 }
1077
1078 out:
1079 return ret;
1080 }
1081
1082 /**
1083 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1084 * @hba: per adapter instance
1085 * @scale_up: True if scaling up and false if scaling down
1086 *
1087 * Return: 0 if successful; < 0 upon failure.
1088 */
ufshcd_scale_clks(struct ufs_hba * hba,bool scale_up)1089 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1090 {
1091 int ret = 0;
1092 ktime_t start = ktime_get();
1093
1094 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1095 if (ret)
1096 goto out;
1097
1098 ret = ufshcd_set_clk_freq(hba, scale_up);
1099 if (ret)
1100 goto out;
1101
1102 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1103 if (ret)
1104 ufshcd_set_clk_freq(hba, !scale_up);
1105
1106 out:
1107 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1108 (scale_up ? "up" : "down"),
1109 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1110 return ret;
1111 }
1112
1113 /**
1114 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1115 * @hba: per adapter instance
1116 * @scale_up: True if scaling up and false if scaling down
1117 *
1118 * Return: true if scaling is required, false otherwise.
1119 */
ufshcd_is_devfreq_scaling_required(struct ufs_hba * hba,bool scale_up)1120 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1121 bool scale_up)
1122 {
1123 struct ufs_clk_info *clki;
1124 struct list_head *head = &hba->clk_list_head;
1125
1126 if (list_empty(head))
1127 return false;
1128
1129 list_for_each_entry(clki, head, list) {
1130 if (!IS_ERR_OR_NULL(clki->clk)) {
1131 if (scale_up && clki->max_freq) {
1132 if (clki->curr_freq == clki->max_freq)
1133 continue;
1134 return true;
1135 } else if (!scale_up && clki->min_freq) {
1136 if (clki->curr_freq == clki->min_freq)
1137 continue;
1138 return true;
1139 }
1140 }
1141 }
1142
1143 return false;
1144 }
1145
1146 /*
1147 * Determine the number of pending commands by counting the bits in the SCSI
1148 * device budget maps. This approach has been selected because a bit is set in
1149 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1150 * flag. The host_self_blocked flag can be modified by calling
1151 * scsi_block_requests() or scsi_unblock_requests().
1152 */
ufshcd_pending_cmds(struct ufs_hba * hba)1153 static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
1154 {
1155 const struct scsi_device *sdev;
1156 u32 pending = 0;
1157
1158 lockdep_assert_held(hba->host->host_lock);
1159 __shost_for_each_device(sdev, hba->host)
1160 pending += sbitmap_weight(&sdev->budget_map);
1161
1162 return pending;
1163 }
1164
1165 /*
1166 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1167 * has expired.
1168 *
1169 * Return: 0 upon success; -EBUSY upon timeout.
1170 */
ufshcd_wait_for_doorbell_clr(struct ufs_hba * hba,u64 wait_timeout_us)1171 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1172 u64 wait_timeout_us)
1173 {
1174 unsigned long flags;
1175 int ret = 0;
1176 u32 tm_doorbell;
1177 u32 tr_pending;
1178 bool timeout = false, do_last_check = false;
1179 ktime_t start;
1180
1181 ufshcd_hold(hba);
1182 spin_lock_irqsave(hba->host->host_lock, flags);
1183 /*
1184 * Wait for all the outstanding tasks/transfer requests.
1185 * Verify by checking the doorbell registers are clear.
1186 */
1187 start = ktime_get();
1188 do {
1189 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1190 ret = -EBUSY;
1191 goto out;
1192 }
1193
1194 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1195 tr_pending = ufshcd_pending_cmds(hba);
1196 if (!tm_doorbell && !tr_pending) {
1197 timeout = false;
1198 break;
1199 } else if (do_last_check) {
1200 break;
1201 }
1202
1203 spin_unlock_irqrestore(hba->host->host_lock, flags);
1204 io_schedule_timeout(msecs_to_jiffies(20));
1205 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1206 wait_timeout_us) {
1207 timeout = true;
1208 /*
1209 * We might have scheduled out for long time so make
1210 * sure to check if doorbells are cleared by this time
1211 * or not.
1212 */
1213 do_last_check = true;
1214 }
1215 spin_lock_irqsave(hba->host->host_lock, flags);
1216 } while (tm_doorbell || tr_pending);
1217
1218 if (timeout) {
1219 dev_err(hba->dev,
1220 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1221 __func__, tm_doorbell, tr_pending);
1222 ret = -EBUSY;
1223 }
1224 out:
1225 spin_unlock_irqrestore(hba->host->host_lock, flags);
1226 ufshcd_release(hba);
1227 return ret;
1228 }
1229
1230 /**
1231 * ufshcd_scale_gear - scale up/down UFS gear
1232 * @hba: per adapter instance
1233 * @scale_up: True for scaling up gear and false for scaling down
1234 *
1235 * Return: 0 for success; -EBUSY if scaling can't happen at this time;
1236 * non-zero for any other errors.
1237 */
ufshcd_scale_gear(struct ufs_hba * hba,bool scale_up)1238 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1239 {
1240 int ret = 0;
1241 struct ufs_pa_layer_attr new_pwr_info;
1242
1243 if (scale_up) {
1244 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
1245 sizeof(struct ufs_pa_layer_attr));
1246 } else {
1247 memcpy(&new_pwr_info, &hba->pwr_info,
1248 sizeof(struct ufs_pa_layer_attr));
1249
1250 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1251 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1252 /* save the current power mode */
1253 memcpy(&hba->clk_scaling.saved_pwr_info,
1254 &hba->pwr_info,
1255 sizeof(struct ufs_pa_layer_attr));
1256
1257 /* scale down gear */
1258 new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1259 new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1260 }
1261 }
1262
1263 /* check if the power mode needs to be changed or not? */
1264 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1265 if (ret)
1266 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1267 __func__, ret,
1268 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1269 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1270
1271 return ret;
1272 }
1273
1274 /*
1275 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1276 * has expired.
1277 *
1278 * Return: 0 upon success; -EBUSY upon timeout.
1279 */
ufshcd_clock_scaling_prepare(struct ufs_hba * hba,u64 timeout_us)1280 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
1281 {
1282 int ret = 0;
1283 /*
1284 * make sure that there are no outstanding requests when
1285 * clock scaling is in progress
1286 */
1287 blk_mq_quiesce_tagset(&hba->host->tag_set);
1288 mutex_lock(&hba->wb_mutex);
1289 down_write(&hba->clk_scaling_lock);
1290
1291 if (!hba->clk_scaling.is_allowed ||
1292 ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
1293 ret = -EBUSY;
1294 up_write(&hba->clk_scaling_lock);
1295 mutex_unlock(&hba->wb_mutex);
1296 blk_mq_unquiesce_tagset(&hba->host->tag_set);
1297 goto out;
1298 }
1299
1300 /* let's not get into low power until clock scaling is completed */
1301 ufshcd_hold(hba);
1302
1303 out:
1304 return ret;
1305 }
1306
ufshcd_clock_scaling_unprepare(struct ufs_hba * hba,int err,bool scale_up)1307 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
1308 {
1309 up_write(&hba->clk_scaling_lock);
1310
1311 /* Enable Write Booster if we have scaled up else disable it */
1312 if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
1313 ufshcd_wb_toggle(hba, scale_up);
1314
1315 mutex_unlock(&hba->wb_mutex);
1316
1317 blk_mq_unquiesce_tagset(&hba->host->tag_set);
1318 ufshcd_release(hba);
1319 }
1320
1321 /**
1322 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1323 * @hba: per adapter instance
1324 * @scale_up: True for scaling up and false for scalin down
1325 *
1326 * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
1327 * for any other errors.
1328 */
ufshcd_devfreq_scale(struct ufs_hba * hba,bool scale_up)1329 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1330 {
1331 int ret = 0;
1332
1333 ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
1334 if (ret)
1335 return ret;
1336
1337 /* scale down the gear before scaling down clocks */
1338 if (!scale_up) {
1339 ret = ufshcd_scale_gear(hba, false);
1340 if (ret)
1341 goto out_unprepare;
1342 }
1343
1344 ret = ufshcd_scale_clks(hba, scale_up);
1345 if (ret) {
1346 if (!scale_up)
1347 ufshcd_scale_gear(hba, true);
1348 goto out_unprepare;
1349 }
1350
1351 /* scale up the gear after scaling up clocks */
1352 if (scale_up) {
1353 ret = ufshcd_scale_gear(hba, true);
1354 if (ret) {
1355 ufshcd_scale_clks(hba, false);
1356 goto out_unprepare;
1357 }
1358 }
1359
1360 out_unprepare:
1361 ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
1362 return ret;
1363 }
1364
ufshcd_clk_scaling_suspend_work(struct work_struct * work)1365 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1366 {
1367 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1368 clk_scaling.suspend_work);
1369 unsigned long irq_flags;
1370
1371 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1372 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1373 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1374 return;
1375 }
1376 hba->clk_scaling.is_suspended = true;
1377 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1378
1379 __ufshcd_suspend_clkscaling(hba);
1380 }
1381
ufshcd_clk_scaling_resume_work(struct work_struct * work)1382 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1383 {
1384 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1385 clk_scaling.resume_work);
1386 unsigned long irq_flags;
1387
1388 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1389 if (!hba->clk_scaling.is_suspended) {
1390 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1391 return;
1392 }
1393 hba->clk_scaling.is_suspended = false;
1394 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1395
1396 devfreq_resume_device(hba->devfreq);
1397 }
1398
ufshcd_devfreq_target(struct device * dev,unsigned long * freq,u32 flags)1399 static int ufshcd_devfreq_target(struct device *dev,
1400 unsigned long *freq, u32 flags)
1401 {
1402 int ret = 0;
1403 struct ufs_hba *hba = dev_get_drvdata(dev);
1404 ktime_t start;
1405 bool scale_up, sched_clk_scaling_suspend_work = false;
1406 struct list_head *clk_list = &hba->clk_list_head;
1407 struct ufs_clk_info *clki;
1408 unsigned long irq_flags;
1409
1410 if (!ufshcd_is_clkscaling_supported(hba))
1411 return -EINVAL;
1412
1413 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1414 /* Override with the closest supported frequency */
1415 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1416 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1417 if (ufshcd_eh_in_progress(hba)) {
1418 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1419 return 0;
1420 }
1421
1422 if (!hba->clk_scaling.active_reqs)
1423 sched_clk_scaling_suspend_work = true;
1424
1425 if (list_empty(clk_list)) {
1426 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1427 goto out;
1428 }
1429
1430 /* Decide based on the rounded-off frequency and update */
1431 scale_up = *freq == clki->max_freq;
1432 if (!scale_up)
1433 *freq = clki->min_freq;
1434 /* Update the frequency */
1435 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1436 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1437 ret = 0;
1438 goto out; /* no state change required */
1439 }
1440 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1441
1442 start = ktime_get();
1443 ret = ufshcd_devfreq_scale(hba, scale_up);
1444
1445 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1446 (scale_up ? "up" : "down"),
1447 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1448
1449 out:
1450 if (sched_clk_scaling_suspend_work)
1451 queue_work(hba->clk_scaling.workq,
1452 &hba->clk_scaling.suspend_work);
1453
1454 return ret;
1455 }
1456
ufshcd_devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)1457 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1458 struct devfreq_dev_status *stat)
1459 {
1460 struct ufs_hba *hba = dev_get_drvdata(dev);
1461 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1462 unsigned long flags;
1463 struct list_head *clk_list = &hba->clk_list_head;
1464 struct ufs_clk_info *clki;
1465 ktime_t curr_t;
1466
1467 if (!ufshcd_is_clkscaling_supported(hba))
1468 return -EINVAL;
1469
1470 memset(stat, 0, sizeof(*stat));
1471
1472 spin_lock_irqsave(hba->host->host_lock, flags);
1473 curr_t = ktime_get();
1474 if (!scaling->window_start_t)
1475 goto start_window;
1476
1477 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1478 /*
1479 * If current frequency is 0, then the ondemand governor considers
1480 * there's no initial frequency set. And it always requests to set
1481 * to max. frequency.
1482 */
1483 stat->current_frequency = clki->curr_freq;
1484 if (scaling->is_busy_started)
1485 scaling->tot_busy_t += ktime_us_delta(curr_t,
1486 scaling->busy_start_t);
1487
1488 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1489 stat->busy_time = scaling->tot_busy_t;
1490 start_window:
1491 scaling->window_start_t = curr_t;
1492 scaling->tot_busy_t = 0;
1493
1494 if (scaling->active_reqs) {
1495 scaling->busy_start_t = curr_t;
1496 scaling->is_busy_started = true;
1497 } else {
1498 scaling->busy_start_t = 0;
1499 scaling->is_busy_started = false;
1500 }
1501 spin_unlock_irqrestore(hba->host->host_lock, flags);
1502 return 0;
1503 }
1504
ufshcd_devfreq_init(struct ufs_hba * hba)1505 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1506 {
1507 struct list_head *clk_list = &hba->clk_list_head;
1508 struct ufs_clk_info *clki;
1509 struct devfreq *devfreq;
1510 int ret;
1511
1512 /* Skip devfreq if we don't have any clocks in the list */
1513 if (list_empty(clk_list))
1514 return 0;
1515
1516 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1517 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1518 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1519
1520 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1521 &hba->vps->ondemand_data);
1522 devfreq = devfreq_add_device(hba->dev,
1523 &hba->vps->devfreq_profile,
1524 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1525 &hba->vps->ondemand_data);
1526 if (IS_ERR(devfreq)) {
1527 ret = PTR_ERR(devfreq);
1528 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1529
1530 dev_pm_opp_remove(hba->dev, clki->min_freq);
1531 dev_pm_opp_remove(hba->dev, clki->max_freq);
1532 return ret;
1533 }
1534
1535 hba->devfreq = devfreq;
1536
1537 return 0;
1538 }
1539
ufshcd_devfreq_remove(struct ufs_hba * hba)1540 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1541 {
1542 struct list_head *clk_list = &hba->clk_list_head;
1543 struct ufs_clk_info *clki;
1544
1545 if (!hba->devfreq)
1546 return;
1547
1548 devfreq_remove_device(hba->devfreq);
1549 hba->devfreq = NULL;
1550
1551 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1552 dev_pm_opp_remove(hba->dev, clki->min_freq);
1553 dev_pm_opp_remove(hba->dev, clki->max_freq);
1554 }
1555
__ufshcd_suspend_clkscaling(struct ufs_hba * hba)1556 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1557 {
1558 unsigned long flags;
1559
1560 devfreq_suspend_device(hba->devfreq);
1561 spin_lock_irqsave(hba->host->host_lock, flags);
1562 hba->clk_scaling.window_start_t = 0;
1563 spin_unlock_irqrestore(hba->host->host_lock, flags);
1564 }
1565
ufshcd_suspend_clkscaling(struct ufs_hba * hba)1566 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1567 {
1568 unsigned long flags;
1569 bool suspend = false;
1570
1571 cancel_work_sync(&hba->clk_scaling.suspend_work);
1572 cancel_work_sync(&hba->clk_scaling.resume_work);
1573
1574 spin_lock_irqsave(hba->host->host_lock, flags);
1575 if (!hba->clk_scaling.is_suspended) {
1576 suspend = true;
1577 hba->clk_scaling.is_suspended = true;
1578 }
1579 spin_unlock_irqrestore(hba->host->host_lock, flags);
1580
1581 if (suspend)
1582 __ufshcd_suspend_clkscaling(hba);
1583 }
1584
ufshcd_resume_clkscaling(struct ufs_hba * hba)1585 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1586 {
1587 unsigned long flags;
1588 bool resume = false;
1589
1590 spin_lock_irqsave(hba->host->host_lock, flags);
1591 if (hba->clk_scaling.is_suspended) {
1592 resume = true;
1593 hba->clk_scaling.is_suspended = false;
1594 }
1595 spin_unlock_irqrestore(hba->host->host_lock, flags);
1596
1597 if (resume)
1598 devfreq_resume_device(hba->devfreq);
1599 }
1600
ufshcd_clkscale_enable_show(struct device * dev,struct device_attribute * attr,char * buf)1601 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1602 struct device_attribute *attr, char *buf)
1603 {
1604 struct ufs_hba *hba = dev_get_drvdata(dev);
1605
1606 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
1607 }
1608
ufshcd_clkscale_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1609 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1610 struct device_attribute *attr, const char *buf, size_t count)
1611 {
1612 struct ufs_hba *hba = dev_get_drvdata(dev);
1613 u32 value;
1614 int err = 0;
1615
1616 if (kstrtou32(buf, 0, &value))
1617 return -EINVAL;
1618
1619 down(&hba->host_sem);
1620 if (!ufshcd_is_user_access_allowed(hba)) {
1621 err = -EBUSY;
1622 goto out;
1623 }
1624
1625 value = !!value;
1626 if (value == hba->clk_scaling.is_enabled)
1627 goto out;
1628
1629 ufshcd_rpm_get_sync(hba);
1630 ufshcd_hold(hba);
1631
1632 hba->clk_scaling.is_enabled = value;
1633
1634 if (value) {
1635 ufshcd_resume_clkscaling(hba);
1636 } else {
1637 ufshcd_suspend_clkscaling(hba);
1638 err = ufshcd_devfreq_scale(hba, true);
1639 if (err)
1640 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1641 __func__, err);
1642 }
1643
1644 ufshcd_release(hba);
1645 ufshcd_rpm_put_sync(hba);
1646 out:
1647 up(&hba->host_sem);
1648 return err ? err : count;
1649 }
1650
ufshcd_init_clk_scaling_sysfs(struct ufs_hba * hba)1651 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1652 {
1653 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1654 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1655 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1656 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1657 hba->clk_scaling.enable_attr.attr.mode = 0644;
1658 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1659 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1660 }
1661
ufshcd_remove_clk_scaling_sysfs(struct ufs_hba * hba)1662 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1663 {
1664 if (hba->clk_scaling.enable_attr.attr.name)
1665 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1666 }
1667
ufshcd_init_clk_scaling(struct ufs_hba * hba)1668 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1669 {
1670 char wq_name[sizeof("ufs_clkscaling_00")];
1671
1672 if (!ufshcd_is_clkscaling_supported(hba))
1673 return;
1674
1675 if (!hba->clk_scaling.min_gear)
1676 hba->clk_scaling.min_gear = UFS_HS_G1;
1677
1678 INIT_WORK(&hba->clk_scaling.suspend_work,
1679 ufshcd_clk_scaling_suspend_work);
1680 INIT_WORK(&hba->clk_scaling.resume_work,
1681 ufshcd_clk_scaling_resume_work);
1682
1683 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1684 hba->host->host_no);
1685 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1686
1687 hba->clk_scaling.is_initialized = true;
1688 }
1689
ufshcd_exit_clk_scaling(struct ufs_hba * hba)1690 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1691 {
1692 if (!hba->clk_scaling.is_initialized)
1693 return;
1694
1695 ufshcd_remove_clk_scaling_sysfs(hba);
1696 destroy_workqueue(hba->clk_scaling.workq);
1697 ufshcd_devfreq_remove(hba);
1698 hba->clk_scaling.is_initialized = false;
1699 }
1700
ufshcd_ungate_work(struct work_struct * work)1701 static void ufshcd_ungate_work(struct work_struct *work)
1702 {
1703 int ret;
1704 unsigned long flags;
1705 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1706 clk_gating.ungate_work);
1707
1708 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1709
1710 spin_lock_irqsave(hba->host->host_lock, flags);
1711 if (hba->clk_gating.state == CLKS_ON) {
1712 spin_unlock_irqrestore(hba->host->host_lock, flags);
1713 return;
1714 }
1715
1716 spin_unlock_irqrestore(hba->host->host_lock, flags);
1717 ufshcd_hba_vreg_set_hpm(hba);
1718 ufshcd_setup_clocks(hba, true);
1719
1720 ufshcd_enable_irq(hba);
1721
1722 /* Exit from hibern8 */
1723 if (ufshcd_can_hibern8_during_gating(hba)) {
1724 /* Prevent gating in this path */
1725 hba->clk_gating.is_suspended = true;
1726 if (ufshcd_is_link_hibern8(hba)) {
1727 ret = ufshcd_uic_hibern8_exit(hba);
1728 if (ret)
1729 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1730 __func__, ret);
1731 else
1732 ufshcd_set_link_active(hba);
1733 }
1734 hba->clk_gating.is_suspended = false;
1735 }
1736 }
1737
1738 /**
1739 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1740 * Also, exit from hibern8 mode and set the link as active.
1741 * @hba: per adapter instance
1742 */
ufshcd_hold(struct ufs_hba * hba)1743 void ufshcd_hold(struct ufs_hba *hba)
1744 {
1745 bool flush_result;
1746 unsigned long flags;
1747
1748 if (!ufshcd_is_clkgating_allowed(hba) ||
1749 !hba->clk_gating.is_initialized)
1750 return;
1751 spin_lock_irqsave(hba->host->host_lock, flags);
1752 hba->clk_gating.active_reqs++;
1753
1754 start:
1755 switch (hba->clk_gating.state) {
1756 case CLKS_ON:
1757 /*
1758 * Wait for the ungate work to complete if in progress.
1759 * Though the clocks may be in ON state, the link could
1760 * still be in hibner8 state if hibern8 is allowed
1761 * during clock gating.
1762 * Make sure we exit hibern8 state also in addition to
1763 * clocks being ON.
1764 */
1765 if (ufshcd_can_hibern8_during_gating(hba) &&
1766 ufshcd_is_link_hibern8(hba)) {
1767 spin_unlock_irqrestore(hba->host->host_lock, flags);
1768 flush_result = flush_work(&hba->clk_gating.ungate_work);
1769 if (hba->clk_gating.is_suspended && !flush_result)
1770 return;
1771 spin_lock_irqsave(hba->host->host_lock, flags);
1772 goto start;
1773 }
1774 break;
1775 case REQ_CLKS_OFF:
1776 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1777 hba->clk_gating.state = CLKS_ON;
1778 trace_ufshcd_clk_gating(dev_name(hba->dev),
1779 hba->clk_gating.state);
1780 break;
1781 }
1782 /*
1783 * If we are here, it means gating work is either done or
1784 * currently running. Hence, fall through to cancel gating
1785 * work and to enable clocks.
1786 */
1787 fallthrough;
1788 case CLKS_OFF:
1789 hba->clk_gating.state = REQ_CLKS_ON;
1790 trace_ufshcd_clk_gating(dev_name(hba->dev),
1791 hba->clk_gating.state);
1792 queue_work(hba->clk_gating.clk_gating_workq,
1793 &hba->clk_gating.ungate_work);
1794 /*
1795 * fall through to check if we should wait for this
1796 * work to be done or not.
1797 */
1798 fallthrough;
1799 case REQ_CLKS_ON:
1800 spin_unlock_irqrestore(hba->host->host_lock, flags);
1801 flush_work(&hba->clk_gating.ungate_work);
1802 /* Make sure state is CLKS_ON before returning */
1803 spin_lock_irqsave(hba->host->host_lock, flags);
1804 goto start;
1805 default:
1806 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1807 __func__, hba->clk_gating.state);
1808 break;
1809 }
1810 spin_unlock_irqrestore(hba->host->host_lock, flags);
1811 }
1812 EXPORT_SYMBOL_GPL(ufshcd_hold);
1813
ufshcd_gate_work(struct work_struct * work)1814 static void ufshcd_gate_work(struct work_struct *work)
1815 {
1816 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1817 clk_gating.gate_work.work);
1818 unsigned long flags;
1819 int ret;
1820
1821 spin_lock_irqsave(hba->host->host_lock, flags);
1822 /*
1823 * In case you are here to cancel this work the gating state
1824 * would be marked as REQ_CLKS_ON. In this case save time by
1825 * skipping the gating work and exit after changing the clock
1826 * state to CLKS_ON.
1827 */
1828 if (hba->clk_gating.is_suspended ||
1829 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1830 hba->clk_gating.state = CLKS_ON;
1831 trace_ufshcd_clk_gating(dev_name(hba->dev),
1832 hba->clk_gating.state);
1833 goto rel_lock;
1834 }
1835
1836 if (ufshcd_is_ufs_dev_busy(hba) ||
1837 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1838 hba->clk_gating.active_reqs)
1839 goto rel_lock;
1840
1841 spin_unlock_irqrestore(hba->host->host_lock, flags);
1842
1843 /* put the link into hibern8 mode before turning off clocks */
1844 if (ufshcd_can_hibern8_during_gating(hba)) {
1845 ret = ufshcd_uic_hibern8_enter(hba);
1846 if (ret) {
1847 hba->clk_gating.state = CLKS_ON;
1848 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1849 __func__, ret);
1850 trace_ufshcd_clk_gating(dev_name(hba->dev),
1851 hba->clk_gating.state);
1852 goto out;
1853 }
1854 ufshcd_set_link_hibern8(hba);
1855 }
1856
1857 ufshcd_disable_irq(hba);
1858
1859 ufshcd_setup_clocks(hba, false);
1860
1861 /* Put the host controller in low power mode if possible */
1862 ufshcd_hba_vreg_set_lpm(hba);
1863 /*
1864 * In case you are here to cancel this work the gating state
1865 * would be marked as REQ_CLKS_ON. In this case keep the state
1866 * as REQ_CLKS_ON which would anyway imply that clocks are off
1867 * and a request to turn them on is pending. By doing this way,
1868 * we keep the state machine in tact and this would ultimately
1869 * prevent from doing cancel work multiple times when there are
1870 * new requests arriving before the current cancel work is done.
1871 */
1872 spin_lock_irqsave(hba->host->host_lock, flags);
1873 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1874 hba->clk_gating.state = CLKS_OFF;
1875 trace_ufshcd_clk_gating(dev_name(hba->dev),
1876 hba->clk_gating.state);
1877 }
1878 rel_lock:
1879 spin_unlock_irqrestore(hba->host->host_lock, flags);
1880 out:
1881 return;
1882 }
1883
1884 /* host lock must be held before calling this variant */
__ufshcd_release(struct ufs_hba * hba)1885 static void __ufshcd_release(struct ufs_hba *hba)
1886 {
1887 if (!ufshcd_is_clkgating_allowed(hba))
1888 return;
1889
1890 hba->clk_gating.active_reqs--;
1891
1892 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1893 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1894 ufshcd_has_pending_tasks(hba) || !hba->clk_gating.is_initialized ||
1895 hba->clk_gating.state == CLKS_OFF)
1896 return;
1897
1898 hba->clk_gating.state = REQ_CLKS_OFF;
1899 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1900 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1901 &hba->clk_gating.gate_work,
1902 msecs_to_jiffies(hba->clk_gating.delay_ms));
1903 }
1904
ufshcd_release(struct ufs_hba * hba)1905 void ufshcd_release(struct ufs_hba *hba)
1906 {
1907 unsigned long flags;
1908
1909 spin_lock_irqsave(hba->host->host_lock, flags);
1910 __ufshcd_release(hba);
1911 spin_unlock_irqrestore(hba->host->host_lock, flags);
1912 }
1913 EXPORT_SYMBOL_GPL(ufshcd_release);
1914
ufshcd_clkgate_delay_show(struct device * dev,struct device_attribute * attr,char * buf)1915 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1916 struct device_attribute *attr, char *buf)
1917 {
1918 struct ufs_hba *hba = dev_get_drvdata(dev);
1919
1920 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
1921 }
1922
ufshcd_clkgate_delay_set(struct device * dev,unsigned long value)1923 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
1924 {
1925 struct ufs_hba *hba = dev_get_drvdata(dev);
1926 unsigned long flags;
1927
1928 spin_lock_irqsave(hba->host->host_lock, flags);
1929 hba->clk_gating.delay_ms = value;
1930 spin_unlock_irqrestore(hba->host->host_lock, flags);
1931 }
1932 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
1933
ufshcd_clkgate_delay_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1934 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1935 struct device_attribute *attr, const char *buf, size_t count)
1936 {
1937 unsigned long value;
1938
1939 if (kstrtoul(buf, 0, &value))
1940 return -EINVAL;
1941
1942 ufshcd_clkgate_delay_set(dev, value);
1943 return count;
1944 }
1945
ufshcd_clkgate_enable_show(struct device * dev,struct device_attribute * attr,char * buf)1946 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1947 struct device_attribute *attr, char *buf)
1948 {
1949 struct ufs_hba *hba = dev_get_drvdata(dev);
1950
1951 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
1952 }
1953
ufshcd_clkgate_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1954 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1955 struct device_attribute *attr, const char *buf, size_t count)
1956 {
1957 struct ufs_hba *hba = dev_get_drvdata(dev);
1958 unsigned long flags;
1959 u32 value;
1960
1961 if (kstrtou32(buf, 0, &value))
1962 return -EINVAL;
1963
1964 value = !!value;
1965
1966 spin_lock_irqsave(hba->host->host_lock, flags);
1967 if (value == hba->clk_gating.is_enabled)
1968 goto out;
1969
1970 if (value)
1971 __ufshcd_release(hba);
1972 else
1973 hba->clk_gating.active_reqs++;
1974
1975 hba->clk_gating.is_enabled = value;
1976 out:
1977 spin_unlock_irqrestore(hba->host->host_lock, flags);
1978 return count;
1979 }
1980
ufshcd_init_clk_gating_sysfs(struct ufs_hba * hba)1981 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
1982 {
1983 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1984 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1985 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1986 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1987 hba->clk_gating.delay_attr.attr.mode = 0644;
1988 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1989 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1990
1991 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1992 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1993 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1994 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1995 hba->clk_gating.enable_attr.attr.mode = 0644;
1996 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1997 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1998 }
1999
ufshcd_remove_clk_gating_sysfs(struct ufs_hba * hba)2000 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
2001 {
2002 if (hba->clk_gating.delay_attr.attr.name)
2003 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
2004 if (hba->clk_gating.enable_attr.attr.name)
2005 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
2006 }
2007
ufshcd_init_clk_gating(struct ufs_hba * hba)2008 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
2009 {
2010 char wq_name[sizeof("ufs_clk_gating_00")];
2011
2012 if (!ufshcd_is_clkgating_allowed(hba))
2013 return;
2014
2015 hba->clk_gating.state = CLKS_ON;
2016
2017 hba->clk_gating.delay_ms = 150;
2018 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
2019 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
2020
2021 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
2022 hba->host->host_no);
2023 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
2024 WQ_MEM_RECLAIM | WQ_HIGHPRI);
2025
2026 ufshcd_init_clk_gating_sysfs(hba);
2027
2028 hba->clk_gating.is_enabled = true;
2029 hba->clk_gating.is_initialized = true;
2030 }
2031
ufshcd_exit_clk_gating(struct ufs_hba * hba)2032 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
2033 {
2034 if (!hba->clk_gating.is_initialized)
2035 return;
2036
2037 ufshcd_remove_clk_gating_sysfs(hba);
2038
2039 /* Ungate the clock if necessary. */
2040 ufshcd_hold(hba);
2041 hba->clk_gating.is_initialized = false;
2042 ufshcd_release(hba);
2043
2044 destroy_workqueue(hba->clk_gating.clk_gating_workq);
2045 }
2046
ufshcd_clk_scaling_start_busy(struct ufs_hba * hba)2047 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2048 {
2049 bool queue_resume_work = false;
2050 ktime_t curr_t = ktime_get();
2051 unsigned long flags;
2052
2053 if (!ufshcd_is_clkscaling_supported(hba))
2054 return;
2055
2056 spin_lock_irqsave(hba->host->host_lock, flags);
2057 if (!hba->clk_scaling.active_reqs++)
2058 queue_resume_work = true;
2059
2060 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
2061 spin_unlock_irqrestore(hba->host->host_lock, flags);
2062 return;
2063 }
2064
2065 if (queue_resume_work)
2066 queue_work(hba->clk_scaling.workq,
2067 &hba->clk_scaling.resume_work);
2068
2069 if (!hba->clk_scaling.window_start_t) {
2070 hba->clk_scaling.window_start_t = curr_t;
2071 hba->clk_scaling.tot_busy_t = 0;
2072 hba->clk_scaling.is_busy_started = false;
2073 }
2074
2075 if (!hba->clk_scaling.is_busy_started) {
2076 hba->clk_scaling.busy_start_t = curr_t;
2077 hba->clk_scaling.is_busy_started = true;
2078 }
2079 spin_unlock_irqrestore(hba->host->host_lock, flags);
2080 }
2081
ufshcd_clk_scaling_update_busy(struct ufs_hba * hba)2082 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2083 {
2084 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2085 unsigned long flags;
2086
2087 if (!ufshcd_is_clkscaling_supported(hba))
2088 return;
2089
2090 spin_lock_irqsave(hba->host->host_lock, flags);
2091 hba->clk_scaling.active_reqs--;
2092 if (!scaling->active_reqs && scaling->is_busy_started) {
2093 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2094 scaling->busy_start_t));
2095 scaling->busy_start_t = 0;
2096 scaling->is_busy_started = false;
2097 }
2098 spin_unlock_irqrestore(hba->host->host_lock, flags);
2099 }
2100
ufshcd_monitor_opcode2dir(u8 opcode)2101 static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2102 {
2103 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2104 return READ;
2105 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2106 return WRITE;
2107 else
2108 return -EINVAL;
2109 }
2110
ufshcd_should_inform_monitor(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2111 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2112 struct ufshcd_lrb *lrbp)
2113 {
2114 const struct ufs_hba_monitor *m = &hba->monitor;
2115
2116 return (m->enabled && lrbp && lrbp->cmd &&
2117 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2118 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2119 }
2120
ufshcd_start_monitor(struct ufs_hba * hba,const struct ufshcd_lrb * lrbp)2121 static void ufshcd_start_monitor(struct ufs_hba *hba,
2122 const struct ufshcd_lrb *lrbp)
2123 {
2124 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2125 unsigned long flags;
2126
2127 spin_lock_irqsave(hba->host->host_lock, flags);
2128 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2129 hba->monitor.busy_start_ts[dir] = ktime_get();
2130 spin_unlock_irqrestore(hba->host->host_lock, flags);
2131 }
2132
ufshcd_update_monitor(struct ufs_hba * hba,const struct ufshcd_lrb * lrbp)2133 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
2134 {
2135 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2136 unsigned long flags;
2137
2138 spin_lock_irqsave(hba->host->host_lock, flags);
2139 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2140 const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
2141 struct ufs_hba_monitor *m = &hba->monitor;
2142 ktime_t now, inc, lat;
2143
2144 now = lrbp->compl_time_stamp;
2145 inc = ktime_sub(now, m->busy_start_ts[dir]);
2146 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2147 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2148
2149 /* Update latencies */
2150 m->nr_req[dir]++;
2151 lat = ktime_sub(now, lrbp->issue_time_stamp);
2152 m->lat_sum[dir] += lat;
2153 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2154 m->lat_max[dir] = lat;
2155 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2156 m->lat_min[dir] = lat;
2157
2158 m->nr_queued[dir]--;
2159 /* Push forward the busy start of monitor */
2160 m->busy_start_ts[dir] = now;
2161 }
2162 spin_unlock_irqrestore(hba->host->host_lock, flags);
2163 }
2164
2165 /**
2166 * ufshcd_send_command - Send SCSI or device management commands
2167 * @hba: per adapter instance
2168 * @task_tag: Task tag of the command
2169 * @hwq: pointer to hardware queue instance
2170 */
2171 static inline
ufshcd_send_command(struct ufs_hba * hba,unsigned int task_tag,struct ufs_hw_queue * hwq)2172 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
2173 struct ufs_hw_queue *hwq)
2174 {
2175 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2176 unsigned long flags;
2177
2178 lrbp->issue_time_stamp = ktime_get();
2179 lrbp->issue_time_stamp_local_clock = local_clock();
2180 lrbp->compl_time_stamp = ktime_set(0, 0);
2181 lrbp->compl_time_stamp_local_clock = 0;
2182 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2183 ufshcd_clk_scaling_start_busy(hba);
2184 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2185 ufshcd_start_monitor(hba, lrbp);
2186
2187 if (is_mcq_enabled(hba)) {
2188 int utrd_size = sizeof(struct utp_transfer_req_desc);
2189 struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
2190 struct utp_transfer_req_desc *dest;
2191
2192 spin_lock(&hwq->sq_lock);
2193 dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
2194 memcpy(dest, src, utrd_size);
2195 ufshcd_inc_sq_tail(hwq);
2196 spin_unlock(&hwq->sq_lock);
2197 } else {
2198 spin_lock_irqsave(&hba->outstanding_lock, flags);
2199 if (hba->vops && hba->vops->setup_xfer_req)
2200 hba->vops->setup_xfer_req(hba, lrbp->task_tag,
2201 !!lrbp->cmd);
2202 __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
2203 ufshcd_writel(hba, 1 << lrbp->task_tag,
2204 REG_UTP_TRANSFER_REQ_DOOR_BELL);
2205 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2206 }
2207 }
2208
2209 /**
2210 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2211 * @lrbp: pointer to local reference block
2212 */
ufshcd_copy_sense_data(struct ufshcd_lrb * lrbp)2213 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2214 {
2215 u8 *const sense_buffer = lrbp->cmd->sense_buffer;
2216 u16 resp_len;
2217 int len;
2218
2219 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header.data_segment_length);
2220 if (sense_buffer && resp_len) {
2221 int len_to_copy;
2222
2223 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2224 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2225
2226 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2227 len_to_copy);
2228 }
2229 }
2230
2231 /**
2232 * ufshcd_copy_query_response() - Copy the Query Response and the data
2233 * descriptor
2234 * @hba: per adapter instance
2235 * @lrbp: pointer to local reference block
2236 *
2237 * Return: 0 upon success; < 0 upon failure.
2238 */
2239 static
ufshcd_copy_query_response(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2240 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2241 {
2242 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2243
2244 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2245
2246 /* Get the descriptor */
2247 if (hba->dev_cmd.query.descriptor &&
2248 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2249 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2250 GENERAL_UPIU_REQUEST_SIZE;
2251 u16 resp_len;
2252 u16 buf_len;
2253
2254 /* data segment length */
2255 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
2256 .data_segment_length);
2257 buf_len = be16_to_cpu(
2258 hba->dev_cmd.query.request.upiu_req.length);
2259 if (likely(buf_len >= resp_len)) {
2260 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2261 } else {
2262 dev_warn(hba->dev,
2263 "%s: rsp size %d is bigger than buffer size %d",
2264 __func__, resp_len, buf_len);
2265 return -EINVAL;
2266 }
2267 }
2268
2269 return 0;
2270 }
2271
2272 /**
2273 * ufshcd_hba_capabilities - Read controller capabilities
2274 * @hba: per adapter instance
2275 *
2276 * Return: 0 on success, negative on error.
2277 */
ufshcd_hba_capabilities(struct ufs_hba * hba)2278 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2279 {
2280 int err;
2281
2282 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2283
2284 /* nutrs and nutmrs are 0 based values */
2285 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2286 hba->nutmrs =
2287 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2288 hba->reserved_slot = hba->nutrs - 1;
2289
2290 /* Read crypto capabilities */
2291 err = ufshcd_hba_init_crypto_capabilities(hba);
2292 if (err) {
2293 dev_err(hba->dev, "crypto setup failed\n");
2294 return err;
2295 }
2296
2297 /*
2298 * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and
2299 * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which
2300 * means we can simply read values regardless of version.
2301 */
2302 hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
2303 /*
2304 * 0h: legacy single doorbell support is available
2305 * 1h: indicate that legacy single doorbell support has been removed
2306 */
2307 hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities);
2308 if (!hba->mcq_sup)
2309 return 0;
2310
2311 hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
2312 hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
2313 hba->mcq_capabilities);
2314
2315 return 0;
2316 }
2317
2318 /**
2319 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2320 * to accept UIC commands
2321 * @hba: per adapter instance
2322 *
2323 * Return: true on success, else false.
2324 */
ufshcd_ready_for_uic_cmd(struct ufs_hba * hba)2325 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2326 {
2327 u32 val;
2328 int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
2329 500, UIC_CMD_TIMEOUT * 1000, false, hba,
2330 REG_CONTROLLER_STATUS);
2331 return ret == 0 ? true : false;
2332 }
2333
2334 /**
2335 * ufshcd_get_upmcrs - Get the power mode change request status
2336 * @hba: Pointer to adapter instance
2337 *
2338 * This function gets the UPMCRS field of HCS register
2339 *
2340 * Return: value of UPMCRS field.
2341 */
ufshcd_get_upmcrs(struct ufs_hba * hba)2342 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2343 {
2344 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2345 }
2346
2347 /**
2348 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2349 * @hba: per adapter instance
2350 * @uic_cmd: UIC command
2351 */
2352 static inline void
ufshcd_dispatch_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2353 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2354 {
2355 lockdep_assert_held(&hba->uic_cmd_mutex);
2356
2357 WARN_ON(hba->active_uic_cmd);
2358
2359 hba->active_uic_cmd = uic_cmd;
2360
2361 /* Write Args */
2362 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2363 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2364 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2365
2366 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2367
2368 /* Write UIC Cmd */
2369 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2370 REG_UIC_COMMAND);
2371 }
2372
2373 /**
2374 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2375 * @hba: per adapter instance
2376 * @uic_cmd: UIC command
2377 *
2378 * Return: 0 only if success.
2379 */
2380 static int
ufshcd_wait_for_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2381 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2382 {
2383 int ret;
2384 unsigned long flags;
2385
2386 lockdep_assert_held(&hba->uic_cmd_mutex);
2387
2388 if (wait_for_completion_timeout(&uic_cmd->done,
2389 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2390 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2391 } else {
2392 ret = -ETIMEDOUT;
2393 dev_err(hba->dev,
2394 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2395 uic_cmd->command, uic_cmd->argument3);
2396
2397 if (!uic_cmd->cmd_active) {
2398 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2399 __func__);
2400 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2401 }
2402 }
2403
2404 spin_lock_irqsave(hba->host->host_lock, flags);
2405 hba->active_uic_cmd = NULL;
2406 spin_unlock_irqrestore(hba->host->host_lock, flags);
2407
2408 return ret;
2409 }
2410
2411 /**
2412 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2413 * @hba: per adapter instance
2414 * @uic_cmd: UIC command
2415 *
2416 * Return: 0 only if success.
2417 */
2418 static int
__ufshcd_send_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2419 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2420 {
2421 lockdep_assert_held(&hba->uic_cmd_mutex);
2422
2423 if (!ufshcd_ready_for_uic_cmd(hba)) {
2424 dev_err(hba->dev,
2425 "Controller not ready to accept UIC commands\n");
2426 return -EIO;
2427 }
2428
2429 init_completion(&uic_cmd->done);
2430
2431 uic_cmd->cmd_active = 1;
2432 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2433
2434 return 0;
2435 }
2436
2437 /**
2438 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2439 * @hba: per adapter instance
2440 * @uic_cmd: UIC command
2441 *
2442 * Return: 0 only if success.
2443 */
ufshcd_send_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2444 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2445 {
2446 int ret;
2447
2448 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2449 return 0;
2450
2451 ufshcd_hold(hba);
2452 mutex_lock(&hba->uic_cmd_mutex);
2453 ufshcd_add_delay_before_dme_cmd(hba);
2454
2455 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
2456 if (!ret)
2457 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2458
2459 mutex_unlock(&hba->uic_cmd_mutex);
2460
2461 ufshcd_release(hba);
2462 return ret;
2463 }
2464
2465 /**
2466 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2467 * @hba: per-adapter instance
2468 * @lrbp: pointer to local reference block
2469 * @sg_entries: The number of sg lists actually used
2470 * @sg_list: Pointer to SG list
2471 */
ufshcd_sgl_to_prdt(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,int sg_entries,struct scatterlist * sg_list)2472 static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
2473 struct scatterlist *sg_list)
2474 {
2475 struct ufshcd_sg_entry *prd;
2476 struct scatterlist *sg;
2477 int i;
2478
2479 if (sg_entries) {
2480
2481 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2482 lrbp->utr_descriptor_ptr->prd_table_length =
2483 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
2484 else
2485 lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
2486
2487 prd = lrbp->ucd_prdt_ptr;
2488
2489 for_each_sg(sg_list, sg, sg_entries, i) {
2490 const unsigned int len = sg_dma_len(sg);
2491
2492 /*
2493 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2494 * based value that indicates the length, in bytes, of
2495 * the data block. A maximum of length of 256KB may
2496 * exist for any entry. Bits 1:0 of this field shall be
2497 * 11b to indicate Dword granularity. A value of '3'
2498 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2499 */
2500 WARN_ONCE(len > SZ_256K, "len = %#x\n", len);
2501 prd->size = cpu_to_le32(len - 1);
2502 prd->addr = cpu_to_le64(sg->dma_address);
2503 prd->reserved = 0;
2504 prd = (void *)prd + ufshcd_sg_entry_size(hba);
2505 }
2506 } else {
2507 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2508 }
2509 }
2510
2511 /**
2512 * ufshcd_map_sg - Map scatter-gather list to prdt
2513 * @hba: per adapter instance
2514 * @lrbp: pointer to local reference block
2515 *
2516 * Return: 0 in case of success, non-zero value in case of failure.
2517 */
ufshcd_map_sg(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2518 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2519 {
2520 struct scsi_cmnd *cmd = lrbp->cmd;
2521 int sg_segments = scsi_dma_map(cmd);
2522
2523 if (sg_segments < 0)
2524 return sg_segments;
2525
2526 ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
2527
2528 return 0;
2529 }
2530
2531 /**
2532 * ufshcd_enable_intr - enable interrupts
2533 * @hba: per adapter instance
2534 * @intrs: interrupt bits
2535 */
ufshcd_enable_intr(struct ufs_hba * hba,u32 intrs)2536 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2537 {
2538 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2539
2540 if (hba->ufs_version == ufshci_version(1, 0)) {
2541 u32 rw;
2542 rw = set & INTERRUPT_MASK_RW_VER_10;
2543 set = rw | ((set ^ intrs) & intrs);
2544 } else {
2545 set |= intrs;
2546 }
2547
2548 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2549 }
2550
2551 /**
2552 * ufshcd_disable_intr - disable interrupts
2553 * @hba: per adapter instance
2554 * @intrs: interrupt bits
2555 */
ufshcd_disable_intr(struct ufs_hba * hba,u32 intrs)2556 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2557 {
2558 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2559
2560 if (hba->ufs_version == ufshci_version(1, 0)) {
2561 u32 rw;
2562 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2563 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2564 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2565
2566 } else {
2567 set &= ~intrs;
2568 }
2569
2570 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2571 }
2572
2573 /**
2574 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2575 * descriptor according to request
2576 * @lrbp: pointer to local reference block
2577 * @upiu_flags: flags required in the header
2578 * @cmd_dir: requests data direction
2579 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2580 */
ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb * lrbp,u8 * upiu_flags,enum dma_data_direction cmd_dir,int ehs_length)2581 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
2582 enum dma_data_direction cmd_dir, int ehs_length)
2583 {
2584 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2585 struct request_desc_header *h = &req_desc->header;
2586 enum utp_data_direction data_direction;
2587
2588 *h = (typeof(*h)){ };
2589
2590 if (cmd_dir == DMA_FROM_DEVICE) {
2591 data_direction = UTP_DEVICE_TO_HOST;
2592 *upiu_flags = UPIU_CMD_FLAGS_READ;
2593 } else if (cmd_dir == DMA_TO_DEVICE) {
2594 data_direction = UTP_HOST_TO_DEVICE;
2595 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2596 } else {
2597 data_direction = UTP_NO_DATA_TRANSFER;
2598 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2599 }
2600
2601 h->command_type = lrbp->command_type;
2602 h->data_direction = data_direction;
2603 h->ehs_length = ehs_length;
2604
2605 if (lrbp->intr_cmd)
2606 h->interrupt = 1;
2607
2608 /* Prepare crypto related dwords */
2609 ufshcd_prepare_req_desc_hdr_crypto(lrbp, h);
2610
2611 /*
2612 * assigning invalid value for command status. Controller
2613 * updates OCS on command completion, with the command
2614 * status
2615 */
2616 h->ocs = OCS_INVALID_COMMAND_STATUS;
2617
2618 req_desc->prd_table_length = 0;
2619 }
2620
2621 /**
2622 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2623 * for scsi commands
2624 * @lrbp: local reference block pointer
2625 * @upiu_flags: flags
2626 */
2627 static
ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb * lrbp,u8 upiu_flags)2628 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2629 {
2630 struct scsi_cmnd *cmd = lrbp->cmd;
2631 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2632 unsigned short cdb_len;
2633
2634 ucd_req_ptr->header = (struct utp_upiu_header){
2635 .transaction_code = UPIU_TRANSACTION_COMMAND,
2636 .flags = upiu_flags,
2637 .lun = lrbp->lun,
2638 .task_tag = lrbp->task_tag,
2639 .command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
2640 };
2641
2642 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2643
2644 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2645 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2646 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2647
2648 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2649 }
2650
2651 /**
2652 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2653 * @hba: UFS hba
2654 * @lrbp: local reference block pointer
2655 * @upiu_flags: flags
2656 */
ufshcd_prepare_utp_query_req_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,u8 upiu_flags)2657 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2658 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2659 {
2660 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2661 struct ufs_query *query = &hba->dev_cmd.query;
2662 u16 len = be16_to_cpu(query->request.upiu_req.length);
2663
2664 /* Query request header */
2665 ucd_req_ptr->header = (struct utp_upiu_header){
2666 .transaction_code = UPIU_TRANSACTION_QUERY_REQ,
2667 .flags = upiu_flags,
2668 .lun = lrbp->lun,
2669 .task_tag = lrbp->task_tag,
2670 .query_function = query->request.query_func,
2671 /* Data segment length only need for WRITE_DESC */
2672 .data_segment_length =
2673 query->request.upiu_req.opcode ==
2674 UPIU_QUERY_OPCODE_WRITE_DESC ?
2675 cpu_to_be16(len) :
2676 0,
2677 };
2678
2679 /* Copy the Query Request buffer as is */
2680 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2681 QUERY_OSF_SIZE);
2682
2683 /* Copy the Descriptor */
2684 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2685 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2686
2687 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2688 }
2689
ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb * lrbp)2690 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2691 {
2692 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2693
2694 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2695
2696 ucd_req_ptr->header = (struct utp_upiu_header){
2697 .transaction_code = UPIU_TRANSACTION_NOP_OUT,
2698 .task_tag = lrbp->task_tag,
2699 };
2700
2701 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2702 }
2703
2704 /**
2705 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2706 * for Device Management Purposes
2707 * @hba: per adapter instance
2708 * @lrbp: pointer to local reference block
2709 *
2710 * Return: 0 upon success; < 0 upon failure.
2711 */
ufshcd_compose_devman_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2712 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2713 struct ufshcd_lrb *lrbp)
2714 {
2715 u8 upiu_flags;
2716 int ret = 0;
2717
2718 if (hba->ufs_version <= ufshci_version(1, 1))
2719 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2720 else
2721 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2722
2723 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
2724 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2725 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2726 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2727 ufshcd_prepare_utp_nop_upiu(lrbp);
2728 else
2729 ret = -EINVAL;
2730
2731 return ret;
2732 }
2733
2734 /**
2735 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2736 * for SCSI Purposes
2737 * @hba: per adapter instance
2738 * @lrbp: pointer to local reference block
2739 *
2740 * Return: 0 upon success; < 0 upon failure.
2741 */
ufshcd_comp_scsi_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2742 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2743 {
2744 u8 upiu_flags;
2745 int ret = 0;
2746
2747 if (hba->ufs_version <= ufshci_version(1, 1))
2748 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2749 else
2750 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2751
2752 if (likely(lrbp->cmd)) {
2753 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
2754 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2755 } else {
2756 ret = -EINVAL;
2757 }
2758
2759 return ret;
2760 }
2761
2762 /**
2763 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2764 * @upiu_wlun_id: UPIU W-LUN id
2765 *
2766 * Return: SCSI W-LUN id.
2767 */
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)2768 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2769 {
2770 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2771 }
2772
is_device_wlun(struct scsi_device * sdev)2773 static inline bool is_device_wlun(struct scsi_device *sdev)
2774 {
2775 return sdev->lun ==
2776 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2777 }
2778
2779 /*
2780 * Associate the UFS controller queue with the default and poll HCTX types.
2781 * Initialize the mq_map[] arrays.
2782 */
ufshcd_map_queues(struct Scsi_Host * shost)2783 static void ufshcd_map_queues(struct Scsi_Host *shost)
2784 {
2785 struct ufs_hba *hba = shost_priv(shost);
2786 int i, queue_offset = 0;
2787
2788 if (!is_mcq_supported(hba)) {
2789 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
2790 hba->nr_queues[HCTX_TYPE_READ] = 0;
2791 hba->nr_queues[HCTX_TYPE_POLL] = 1;
2792 hba->nr_hw_queues = 1;
2793 }
2794
2795 for (i = 0; i < shost->nr_maps; i++) {
2796 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
2797
2798 map->nr_queues = hba->nr_queues[i];
2799 if (!map->nr_queues)
2800 continue;
2801 map->queue_offset = queue_offset;
2802 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
2803 map->queue_offset = 0;
2804
2805 blk_mq_map_queues(map);
2806 queue_offset += map->nr_queues;
2807 }
2808 }
2809
ufshcd_init_lrb(struct ufs_hba * hba,struct ufshcd_lrb * lrb,int i)2810 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2811 {
2812 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
2813 i * ufshcd_get_ucd_size(hba);
2814 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2815 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2816 i * ufshcd_get_ucd_size(hba);
2817 u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset);
2818 u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset);
2819
2820 lrb->utr_descriptor_ptr = utrdlp + i;
2821 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2822 i * sizeof(struct utp_transfer_req_desc);
2823 lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
2824 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2825 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
2826 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2827 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
2828 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2829 }
2830
2831 /**
2832 * ufshcd_queuecommand - main entry point for SCSI requests
2833 * @host: SCSI host pointer
2834 * @cmd: command from SCSI Midlayer
2835 *
2836 * Return: 0 for success, non-zero in case of failure.
2837 */
ufshcd_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * cmd)2838 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2839 {
2840 struct ufs_hba *hba = shost_priv(host);
2841 int tag = scsi_cmd_to_rq(cmd)->tag;
2842 struct ufshcd_lrb *lrbp;
2843 int err = 0;
2844 struct ufs_hw_queue *hwq = NULL;
2845
2846 WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
2847
2848 switch (hba->ufshcd_state) {
2849 case UFSHCD_STATE_OPERATIONAL:
2850 break;
2851 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2852 /*
2853 * SCSI error handler can call ->queuecommand() while UFS error
2854 * handler is in progress. Error interrupts could change the
2855 * state from UFSHCD_STATE_RESET to
2856 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2857 * being issued in that case.
2858 */
2859 if (ufshcd_eh_in_progress(hba)) {
2860 err = SCSI_MLQUEUE_HOST_BUSY;
2861 goto out;
2862 }
2863 break;
2864 case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2865 /*
2866 * pm_runtime_get_sync() is used at error handling preparation
2867 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2868 * PM ops, it can never be finished if we let SCSI layer keep
2869 * retrying it, which gets err handler stuck forever. Neither
2870 * can we let the scsi cmd pass through, because UFS is in bad
2871 * state, the scsi cmd may eventually time out, which will get
2872 * err handler blocked for too long. So, just fail the scsi cmd
2873 * sent from PM ops, err handler can recover PM error anyways.
2874 */
2875 if (hba->pm_op_in_progress) {
2876 hba->force_reset = true;
2877 set_host_byte(cmd, DID_BAD_TARGET);
2878 scsi_done(cmd);
2879 goto out;
2880 }
2881 fallthrough;
2882 case UFSHCD_STATE_RESET:
2883 err = SCSI_MLQUEUE_HOST_BUSY;
2884 goto out;
2885 case UFSHCD_STATE_ERROR:
2886 set_host_byte(cmd, DID_ERROR);
2887 scsi_done(cmd);
2888 goto out;
2889 }
2890
2891 hba->req_abort_count = 0;
2892
2893 ufshcd_hold(hba);
2894
2895 lrbp = &hba->lrb[tag];
2896 lrbp->cmd = cmd;
2897 lrbp->task_tag = tag;
2898 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2899 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
2900
2901 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
2902
2903 lrbp->req_abort_skip = false;
2904
2905 ufshcd_comp_scsi_upiu(hba, lrbp);
2906
2907 err = ufshcd_map_sg(hba, lrbp);
2908 if (err) {
2909 ufshcd_release(hba);
2910 goto out;
2911 }
2912
2913 if (is_mcq_enabled(hba))
2914 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
2915
2916 ufshcd_send_command(hba, tag, hwq);
2917
2918 out:
2919 if (ufs_trigger_eh()) {
2920 unsigned long flags;
2921
2922 spin_lock_irqsave(hba->host->host_lock, flags);
2923 ufshcd_schedule_eh_work(hba);
2924 spin_unlock_irqrestore(hba->host->host_lock, flags);
2925 }
2926
2927 return err;
2928 }
2929
ufshcd_compose_dev_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,enum dev_cmd_type cmd_type,int tag)2930 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2931 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2932 {
2933 lrbp->cmd = NULL;
2934 lrbp->task_tag = tag;
2935 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2936 lrbp->intr_cmd = true; /* No interrupt aggregation */
2937 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
2938 hba->dev_cmd.type = cmd_type;
2939
2940 return ufshcd_compose_devman_upiu(hba, lrbp);
2941 }
2942
2943 /*
2944 * Check with the block layer if the command is inflight
2945 * @cmd: command to check.
2946 *
2947 * Return: true if command is inflight; false if not.
2948 */
ufshcd_cmd_inflight(struct scsi_cmnd * cmd)2949 bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
2950 {
2951 struct request *rq;
2952
2953 if (!cmd)
2954 return false;
2955
2956 rq = scsi_cmd_to_rq(cmd);
2957 if (!blk_mq_request_started(rq))
2958 return false;
2959
2960 return true;
2961 }
2962
2963 /*
2964 * Clear the pending command in the controller and wait until
2965 * the controller confirms that the command has been cleared.
2966 * @hba: per adapter instance
2967 * @task_tag: The tag number of the command to be cleared.
2968 */
ufshcd_clear_cmd(struct ufs_hba * hba,u32 task_tag)2969 static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
2970 {
2971 u32 mask;
2972 unsigned long flags;
2973 int err;
2974
2975 if (is_mcq_enabled(hba)) {
2976 /*
2977 * MCQ mode. Clean up the MCQ resources similar to
2978 * what the ufshcd_utrl_clear() does for SDB mode.
2979 */
2980 err = ufshcd_mcq_sq_cleanup(hba, task_tag);
2981 if (err) {
2982 dev_err(hba->dev, "%s: failed tag=%d. err=%d\n",
2983 __func__, task_tag, err);
2984 return err;
2985 }
2986 return 0;
2987 }
2988
2989 mask = 1U << task_tag;
2990
2991 /* clear outstanding transaction before retry */
2992 spin_lock_irqsave(hba->host->host_lock, flags);
2993 ufshcd_utrl_clear(hba, mask);
2994 spin_unlock_irqrestore(hba->host->host_lock, flags);
2995
2996 /*
2997 * wait for h/w to clear corresponding bit in door-bell.
2998 * max. wait is 1 sec.
2999 */
3000 return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
3001 mask, ~mask, 1000, 1000);
3002 }
3003
3004 /**
3005 * ufshcd_dev_cmd_completion() - handles device management command responses
3006 * @hba: per adapter instance
3007 * @lrbp: pointer to local reference block
3008 *
3009 * Return: 0 upon success; < 0 upon failure.
3010 */
3011 static int
ufshcd_dev_cmd_completion(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)3012 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3013 {
3014 enum upiu_response_transaction resp;
3015 int err = 0;
3016
3017 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
3018 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3019
3020 switch (resp) {
3021 case UPIU_TRANSACTION_NOP_IN:
3022 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3023 err = -EINVAL;
3024 dev_err(hba->dev, "%s: unexpected response %x\n",
3025 __func__, resp);
3026 }
3027 break;
3028 case UPIU_TRANSACTION_QUERY_RSP: {
3029 u8 response = lrbp->ucd_rsp_ptr->header.response;
3030
3031 if (response == 0) {
3032 err = ufshcd_copy_query_response(hba, lrbp);
3033 } else {
3034 err = -EINVAL;
3035 dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n",
3036 __func__, response);
3037 }
3038 break;
3039 }
3040 case UPIU_TRANSACTION_REJECT_UPIU:
3041 /* TODO: handle Reject UPIU Response */
3042 err = -EPERM;
3043 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3044 __func__);
3045 break;
3046 case UPIU_TRANSACTION_RESPONSE:
3047 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
3048 err = -EINVAL;
3049 dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
3050 }
3051 break;
3052 default:
3053 err = -EINVAL;
3054 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3055 __func__, resp);
3056 break;
3057 }
3058
3059 return err;
3060 }
3061
ufshcd_wait_for_dev_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,int max_timeout)3062 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3063 struct ufshcd_lrb *lrbp, int max_timeout)
3064 {
3065 unsigned long time_left = msecs_to_jiffies(max_timeout);
3066 unsigned long flags;
3067 bool pending;
3068 int err;
3069
3070 retry:
3071 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
3072 time_left);
3073
3074 if (likely(time_left)) {
3075 /*
3076 * The completion handler called complete() and the caller of
3077 * this function still owns the @lrbp tag so the code below does
3078 * not trigger any race conditions.
3079 */
3080 hba->dev_cmd.complete = NULL;
3081 err = ufshcd_get_tr_ocs(lrbp, NULL);
3082 if (!err)
3083 err = ufshcd_dev_cmd_completion(hba, lrbp);
3084 } else {
3085 err = -ETIMEDOUT;
3086 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3087 __func__, lrbp->task_tag);
3088
3089 /* MCQ mode */
3090 if (is_mcq_enabled(hba)) {
3091 /* successfully cleared the command, retry if needed */
3092 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
3093 err = -EAGAIN;
3094 hba->dev_cmd.complete = NULL;
3095 return err;
3096 }
3097
3098 /* SDB mode */
3099 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
3100 /* successfully cleared the command, retry if needed */
3101 err = -EAGAIN;
3102 /*
3103 * Since clearing the command succeeded we also need to
3104 * clear the task tag bit from the outstanding_reqs
3105 * variable.
3106 */
3107 spin_lock_irqsave(&hba->outstanding_lock, flags);
3108 pending = test_bit(lrbp->task_tag,
3109 &hba->outstanding_reqs);
3110 if (pending) {
3111 hba->dev_cmd.complete = NULL;
3112 __clear_bit(lrbp->task_tag,
3113 &hba->outstanding_reqs);
3114 }
3115 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3116
3117 if (!pending) {
3118 /*
3119 * The completion handler ran while we tried to
3120 * clear the command.
3121 */
3122 time_left = 1;
3123 goto retry;
3124 }
3125 } else {
3126 dev_err(hba->dev, "%s: failed to clear tag %d\n",
3127 __func__, lrbp->task_tag);
3128
3129 spin_lock_irqsave(&hba->outstanding_lock, flags);
3130 pending = test_bit(lrbp->task_tag,
3131 &hba->outstanding_reqs);
3132 if (pending)
3133 hba->dev_cmd.complete = NULL;
3134 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3135
3136 if (!pending) {
3137 /*
3138 * The completion handler ran while we tried to
3139 * clear the command.
3140 */
3141 time_left = 1;
3142 goto retry;
3143 }
3144 }
3145 }
3146
3147 return err;
3148 }
3149
3150 /**
3151 * ufshcd_exec_dev_cmd - API for sending device management requests
3152 * @hba: UFS hba
3153 * @cmd_type: specifies the type (NOP, Query...)
3154 * @timeout: timeout in milliseconds
3155 *
3156 * Return: 0 upon success; < 0 upon failure.
3157 *
3158 * NOTE: Since there is only one available tag for device management commands,
3159 * it is expected you hold the hba->dev_cmd.lock mutex.
3160 */
ufshcd_exec_dev_cmd(struct ufs_hba * hba,enum dev_cmd_type cmd_type,int timeout)3161 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3162 enum dev_cmd_type cmd_type, int timeout)
3163 {
3164 DECLARE_COMPLETION_ONSTACK(wait);
3165 const u32 tag = hba->reserved_slot;
3166 struct ufshcd_lrb *lrbp;
3167 int err;
3168
3169 /* Protects use of hba->reserved_slot. */
3170 lockdep_assert_held(&hba->dev_cmd.lock);
3171
3172 down_read(&hba->clk_scaling_lock);
3173
3174 lrbp = &hba->lrb[tag];
3175 lrbp->cmd = NULL;
3176 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3177 if (unlikely(err))
3178 goto out;
3179
3180 hba->dev_cmd.complete = &wait;
3181
3182 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
3183
3184 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
3185 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3186 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
3187 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
3188
3189 out:
3190 up_read(&hba->clk_scaling_lock);
3191 return err;
3192 }
3193
3194 /**
3195 * ufshcd_init_query() - init the query response and request parameters
3196 * @hba: per-adapter instance
3197 * @request: address of the request pointer to be initialized
3198 * @response: address of the response pointer to be initialized
3199 * @opcode: operation to perform
3200 * @idn: flag idn to access
3201 * @index: LU number to access
3202 * @selector: query/flag/descriptor further identification
3203 */
ufshcd_init_query(struct ufs_hba * hba,struct ufs_query_req ** request,struct ufs_query_res ** response,enum query_opcode opcode,u8 idn,u8 index,u8 selector)3204 static inline void ufshcd_init_query(struct ufs_hba *hba,
3205 struct ufs_query_req **request, struct ufs_query_res **response,
3206 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3207 {
3208 *request = &hba->dev_cmd.query.request;
3209 *response = &hba->dev_cmd.query.response;
3210 memset(*request, 0, sizeof(struct ufs_query_req));
3211 memset(*response, 0, sizeof(struct ufs_query_res));
3212 (*request)->upiu_req.opcode = opcode;
3213 (*request)->upiu_req.idn = idn;
3214 (*request)->upiu_req.index = index;
3215 (*request)->upiu_req.selector = selector;
3216 }
3217
ufshcd_query_flag_retry(struct ufs_hba * hba,enum query_opcode opcode,enum flag_idn idn,u8 index,bool * flag_res)3218 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3219 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
3220 {
3221 int ret;
3222 int retries;
3223
3224 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3225 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
3226 if (ret)
3227 dev_dbg(hba->dev,
3228 "%s: failed with error %d, retries %d\n",
3229 __func__, ret, retries);
3230 else
3231 break;
3232 }
3233
3234 if (ret)
3235 dev_err(hba->dev,
3236 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3237 __func__, opcode, idn, ret, retries);
3238 return ret;
3239 }
3240
3241 /**
3242 * ufshcd_query_flag() - API function for sending flag query requests
3243 * @hba: per-adapter instance
3244 * @opcode: flag query to perform
3245 * @idn: flag idn to access
3246 * @index: flag index to access
3247 * @flag_res: the flag value after the query request completes
3248 *
3249 * Return: 0 for success, non-zero in case of failure.
3250 */
ufshcd_query_flag(struct ufs_hba * hba,enum query_opcode opcode,enum flag_idn idn,u8 index,bool * flag_res)3251 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3252 enum flag_idn idn, u8 index, bool *flag_res)
3253 {
3254 struct ufs_query_req *request = NULL;
3255 struct ufs_query_res *response = NULL;
3256 int err, selector = 0;
3257 int timeout = QUERY_REQ_TIMEOUT;
3258
3259 BUG_ON(!hba);
3260
3261 ufshcd_hold(hba);
3262 mutex_lock(&hba->dev_cmd.lock);
3263 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3264 selector);
3265
3266 switch (opcode) {
3267 case UPIU_QUERY_OPCODE_SET_FLAG:
3268 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3269 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3270 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3271 break;
3272 case UPIU_QUERY_OPCODE_READ_FLAG:
3273 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3274 if (!flag_res) {
3275 /* No dummy reads */
3276 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3277 __func__);
3278 err = -EINVAL;
3279 goto out_unlock;
3280 }
3281 break;
3282 default:
3283 dev_err(hba->dev,
3284 "%s: Expected query flag opcode but got = %d\n",
3285 __func__, opcode);
3286 err = -EINVAL;
3287 goto out_unlock;
3288 }
3289
3290 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3291
3292 if (err) {
3293 dev_err(hba->dev,
3294 "%s: Sending flag query for idn %d failed, err = %d\n",
3295 __func__, idn, err);
3296 goto out_unlock;
3297 }
3298
3299 if (flag_res)
3300 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3301 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3302
3303 out_unlock:
3304 mutex_unlock(&hba->dev_cmd.lock);
3305 ufshcd_release(hba);
3306 return err;
3307 }
3308
3309 /**
3310 * ufshcd_query_attr - API function for sending attribute requests
3311 * @hba: per-adapter instance
3312 * @opcode: attribute opcode
3313 * @idn: attribute idn to access
3314 * @index: index field
3315 * @selector: selector field
3316 * @attr_val: the attribute value after the query request completes
3317 *
3318 * Return: 0 for success, non-zero in case of failure.
3319 */
ufshcd_query_attr(struct ufs_hba * hba,enum query_opcode opcode,enum attr_idn idn,u8 index,u8 selector,u32 * attr_val)3320 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3321 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3322 {
3323 struct ufs_query_req *request = NULL;
3324 struct ufs_query_res *response = NULL;
3325 int err;
3326
3327 BUG_ON(!hba);
3328
3329 if (!attr_val) {
3330 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3331 __func__, opcode);
3332 return -EINVAL;
3333 }
3334
3335 ufshcd_hold(hba);
3336
3337 mutex_lock(&hba->dev_cmd.lock);
3338 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3339 selector);
3340
3341 switch (opcode) {
3342 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3343 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3344 request->upiu_req.value = cpu_to_be32(*attr_val);
3345 break;
3346 case UPIU_QUERY_OPCODE_READ_ATTR:
3347 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3348 break;
3349 default:
3350 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3351 __func__, opcode);
3352 err = -EINVAL;
3353 goto out_unlock;
3354 }
3355
3356 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3357
3358 if (err) {
3359 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3360 __func__, opcode, idn, index, err);
3361 goto out_unlock;
3362 }
3363
3364 *attr_val = be32_to_cpu(response->upiu_res.value);
3365
3366 out_unlock:
3367 mutex_unlock(&hba->dev_cmd.lock);
3368 ufshcd_release(hba);
3369 return err;
3370 }
3371
3372 /**
3373 * ufshcd_query_attr_retry() - API function for sending query
3374 * attribute with retries
3375 * @hba: per-adapter instance
3376 * @opcode: attribute opcode
3377 * @idn: attribute idn to access
3378 * @index: index field
3379 * @selector: selector field
3380 * @attr_val: the attribute value after the query request
3381 * completes
3382 *
3383 * Return: 0 for success, non-zero in case of failure.
3384 */
ufshcd_query_attr_retry(struct ufs_hba * hba,enum query_opcode opcode,enum attr_idn idn,u8 index,u8 selector,u32 * attr_val)3385 int ufshcd_query_attr_retry(struct ufs_hba *hba,
3386 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3387 u32 *attr_val)
3388 {
3389 int ret = 0;
3390 u32 retries;
3391
3392 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3393 ret = ufshcd_query_attr(hba, opcode, idn, index,
3394 selector, attr_val);
3395 if (ret)
3396 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3397 __func__, ret, retries);
3398 else
3399 break;
3400 }
3401
3402 if (ret)
3403 dev_err(hba->dev,
3404 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3405 __func__, idn, ret, QUERY_REQ_RETRIES);
3406 return ret;
3407 }
3408
__ufshcd_query_descriptor(struct ufs_hba * hba,enum query_opcode opcode,enum desc_idn idn,u8 index,u8 selector,u8 * desc_buf,int * buf_len)3409 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3410 enum query_opcode opcode, enum desc_idn idn, u8 index,
3411 u8 selector, u8 *desc_buf, int *buf_len)
3412 {
3413 struct ufs_query_req *request = NULL;
3414 struct ufs_query_res *response = NULL;
3415 int err;
3416
3417 BUG_ON(!hba);
3418
3419 if (!desc_buf) {
3420 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3421 __func__, opcode);
3422 return -EINVAL;
3423 }
3424
3425 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3426 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3427 __func__, *buf_len);
3428 return -EINVAL;
3429 }
3430
3431 ufshcd_hold(hba);
3432
3433 mutex_lock(&hba->dev_cmd.lock);
3434 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3435 selector);
3436 hba->dev_cmd.query.descriptor = desc_buf;
3437 request->upiu_req.length = cpu_to_be16(*buf_len);
3438
3439 switch (opcode) {
3440 case UPIU_QUERY_OPCODE_WRITE_DESC:
3441 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3442 break;
3443 case UPIU_QUERY_OPCODE_READ_DESC:
3444 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3445 break;
3446 default:
3447 dev_err(hba->dev,
3448 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3449 __func__, opcode);
3450 err = -EINVAL;
3451 goto out_unlock;
3452 }
3453
3454 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3455
3456 if (err) {
3457 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3458 __func__, opcode, idn, index, err);
3459 goto out_unlock;
3460 }
3461
3462 *buf_len = be16_to_cpu(response->upiu_res.length);
3463
3464 out_unlock:
3465 hba->dev_cmd.query.descriptor = NULL;
3466 mutex_unlock(&hba->dev_cmd.lock);
3467 ufshcd_release(hba);
3468 return err;
3469 }
3470
3471 /**
3472 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3473 * @hba: per-adapter instance
3474 * @opcode: attribute opcode
3475 * @idn: attribute idn to access
3476 * @index: index field
3477 * @selector: selector field
3478 * @desc_buf: the buffer that contains the descriptor
3479 * @buf_len: length parameter passed to the device
3480 *
3481 * The buf_len parameter will contain, on return, the length parameter
3482 * received on the response.
3483 *
3484 * Return: 0 for success, non-zero in case of failure.
3485 */
ufshcd_query_descriptor_retry(struct ufs_hba * hba,enum query_opcode opcode,enum desc_idn idn,u8 index,u8 selector,u8 * desc_buf,int * buf_len)3486 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3487 enum query_opcode opcode,
3488 enum desc_idn idn, u8 index,
3489 u8 selector,
3490 u8 *desc_buf, int *buf_len)
3491 {
3492 int err;
3493 int retries;
3494
3495 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3496 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3497 selector, desc_buf, buf_len);
3498 if (!err || err == -EINVAL)
3499 break;
3500 }
3501
3502 return err;
3503 }
3504
3505 /**
3506 * ufshcd_read_desc_param - read the specified descriptor parameter
3507 * @hba: Pointer to adapter instance
3508 * @desc_id: descriptor idn value
3509 * @desc_index: descriptor index
3510 * @param_offset: offset of the parameter to read
3511 * @param_read_buf: pointer to buffer where parameter would be read
3512 * @param_size: sizeof(param_read_buf)
3513 *
3514 * Return: 0 in case of success, non-zero otherwise.
3515 */
ufshcd_read_desc_param(struct ufs_hba * hba,enum desc_idn desc_id,int desc_index,u8 param_offset,u8 * param_read_buf,u8 param_size)3516 int ufshcd_read_desc_param(struct ufs_hba *hba,
3517 enum desc_idn desc_id,
3518 int desc_index,
3519 u8 param_offset,
3520 u8 *param_read_buf,
3521 u8 param_size)
3522 {
3523 int ret;
3524 u8 *desc_buf;
3525 int buff_len = QUERY_DESC_MAX_SIZE;
3526 bool is_kmalloc = true;
3527
3528 /* Safety check */
3529 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3530 return -EINVAL;
3531
3532 /* Check whether we need temp memory */
3533 if (param_offset != 0 || param_size < buff_len) {
3534 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3535 if (!desc_buf)
3536 return -ENOMEM;
3537 } else {
3538 desc_buf = param_read_buf;
3539 is_kmalloc = false;
3540 }
3541
3542 /* Request for full descriptor */
3543 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3544 desc_id, desc_index, 0,
3545 desc_buf, &buff_len);
3546 if (ret) {
3547 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3548 __func__, desc_id, desc_index, param_offset, ret);
3549 goto out;
3550 }
3551
3552 /* Update descriptor length */
3553 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3554
3555 if (param_offset >= buff_len) {
3556 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3557 __func__, param_offset, desc_id, buff_len);
3558 ret = -EINVAL;
3559 goto out;
3560 }
3561
3562 /* Sanity check */
3563 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3564 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3565 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3566 ret = -EINVAL;
3567 goto out;
3568 }
3569
3570 if (is_kmalloc) {
3571 /* Make sure we don't copy more data than available */
3572 if (param_offset >= buff_len)
3573 ret = -EINVAL;
3574 else
3575 memcpy(param_read_buf, &desc_buf[param_offset],
3576 min_t(u32, param_size, buff_len - param_offset));
3577 }
3578 out:
3579 if (is_kmalloc)
3580 kfree(desc_buf);
3581 return ret;
3582 }
3583
3584 /**
3585 * struct uc_string_id - unicode string
3586 *
3587 * @len: size of this descriptor inclusive
3588 * @type: descriptor type
3589 * @uc: unicode string character
3590 */
3591 struct uc_string_id {
3592 u8 len;
3593 u8 type;
3594 wchar_t uc[];
3595 } __packed;
3596
3597 /* replace non-printable or non-ASCII characters with spaces */
ufshcd_remove_non_printable(u8 ch)3598 static inline char ufshcd_remove_non_printable(u8 ch)
3599 {
3600 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3601 }
3602
3603 /**
3604 * ufshcd_read_string_desc - read string descriptor
3605 * @hba: pointer to adapter instance
3606 * @desc_index: descriptor index
3607 * @buf: pointer to buffer where descriptor would be read,
3608 * the caller should free the memory.
3609 * @ascii: if true convert from unicode to ascii characters
3610 * null terminated string.
3611 *
3612 * Return:
3613 * * string size on success.
3614 * * -ENOMEM: on allocation failure
3615 * * -EINVAL: on a wrong parameter
3616 */
ufshcd_read_string_desc(struct ufs_hba * hba,u8 desc_index,u8 ** buf,bool ascii)3617 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3618 u8 **buf, bool ascii)
3619 {
3620 struct uc_string_id *uc_str;
3621 u8 *str;
3622 int ret;
3623
3624 if (!buf)
3625 return -EINVAL;
3626
3627 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3628 if (!uc_str)
3629 return -ENOMEM;
3630
3631 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3632 (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3633 if (ret < 0) {
3634 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3635 QUERY_REQ_RETRIES, ret);
3636 str = NULL;
3637 goto out;
3638 }
3639
3640 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3641 dev_dbg(hba->dev, "String Desc is of zero length\n");
3642 str = NULL;
3643 ret = 0;
3644 goto out;
3645 }
3646
3647 if (ascii) {
3648 ssize_t ascii_len;
3649 int i;
3650 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3651 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3652 str = kzalloc(ascii_len, GFP_KERNEL);
3653 if (!str) {
3654 ret = -ENOMEM;
3655 goto out;
3656 }
3657
3658 /*
3659 * the descriptor contains string in UTF16 format
3660 * we need to convert to utf-8 so it can be displayed
3661 */
3662 ret = utf16s_to_utf8s(uc_str->uc,
3663 uc_str->len - QUERY_DESC_HDR_SIZE,
3664 UTF16_BIG_ENDIAN, str, ascii_len - 1);
3665
3666 /* replace non-printable or non-ASCII characters with spaces */
3667 for (i = 0; i < ret; i++)
3668 str[i] = ufshcd_remove_non_printable(str[i]);
3669
3670 str[ret++] = '\0';
3671
3672 } else {
3673 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3674 if (!str) {
3675 ret = -ENOMEM;
3676 goto out;
3677 }
3678 ret = uc_str->len;
3679 }
3680 out:
3681 *buf = str;
3682 kfree(uc_str);
3683 return ret;
3684 }
3685
3686 /**
3687 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3688 * @hba: Pointer to adapter instance
3689 * @lun: lun id
3690 * @param_offset: offset of the parameter to read
3691 * @param_read_buf: pointer to buffer where parameter would be read
3692 * @param_size: sizeof(param_read_buf)
3693 *
3694 * Return: 0 in case of success, non-zero otherwise.
3695 */
ufshcd_read_unit_desc_param(struct ufs_hba * hba,int lun,enum unit_desc_param param_offset,u8 * param_read_buf,u32 param_size)3696 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3697 int lun,
3698 enum unit_desc_param param_offset,
3699 u8 *param_read_buf,
3700 u32 param_size)
3701 {
3702 /*
3703 * Unit descriptors are only available for general purpose LUs (LUN id
3704 * from 0 to 7) and RPMB Well known LU.
3705 */
3706 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
3707 return -EOPNOTSUPP;
3708
3709 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3710 param_offset, param_read_buf, param_size);
3711 }
3712
ufshcd_get_ref_clk_gating_wait(struct ufs_hba * hba)3713 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3714 {
3715 int err = 0;
3716 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3717
3718 if (hba->dev_info.wspecversion >= 0x300) {
3719 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3720 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3721 &gating_wait);
3722 if (err)
3723 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3724 err, gating_wait);
3725
3726 if (gating_wait == 0) {
3727 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3728 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3729 gating_wait);
3730 }
3731
3732 hba->dev_info.clk_gating_wait_us = gating_wait;
3733 }
3734
3735 return err;
3736 }
3737
3738 /**
3739 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3740 * @hba: per adapter instance
3741 *
3742 * 1. Allocate DMA memory for Command Descriptor array
3743 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3744 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3745 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3746 * (UTMRDL)
3747 * 4. Allocate memory for local reference block(lrb).
3748 *
3749 * Return: 0 for success, non-zero in case of failure.
3750 */
ufshcd_memory_alloc(struct ufs_hba * hba)3751 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3752 {
3753 size_t utmrdl_size, utrdl_size, ucdl_size;
3754
3755 /* Allocate memory for UTP command descriptors */
3756 ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs;
3757 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3758 ucdl_size,
3759 &hba->ucdl_dma_addr,
3760 GFP_KERNEL);
3761
3762 /*
3763 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3764 */
3765 if (!hba->ucdl_base_addr ||
3766 WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
3767 dev_err(hba->dev,
3768 "Command Descriptor Memory allocation failed\n");
3769 goto out;
3770 }
3771
3772 /*
3773 * Allocate memory for UTP Transfer descriptors
3774 * UFSHCI requires 1KB alignment of UTRD
3775 */
3776 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3777 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3778 utrdl_size,
3779 &hba->utrdl_dma_addr,
3780 GFP_KERNEL);
3781 if (!hba->utrdl_base_addr ||
3782 WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) {
3783 dev_err(hba->dev,
3784 "Transfer Descriptor Memory allocation failed\n");
3785 goto out;
3786 }
3787
3788 /*
3789 * Skip utmrdl allocation; it may have been
3790 * allocated during first pass and not released during
3791 * MCQ memory allocation.
3792 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3793 */
3794 if (hba->utmrdl_base_addr)
3795 goto skip_utmrdl;
3796 /*
3797 * Allocate memory for UTP Task Management descriptors
3798 * UFSHCI requires 1KB alignment of UTMRD
3799 */
3800 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3801 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3802 utmrdl_size,
3803 &hba->utmrdl_dma_addr,
3804 GFP_KERNEL);
3805 if (!hba->utmrdl_base_addr ||
3806 WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) {
3807 dev_err(hba->dev,
3808 "Task Management Descriptor Memory allocation failed\n");
3809 goto out;
3810 }
3811
3812 skip_utmrdl:
3813 /* Allocate memory for local reference block */
3814 hba->lrb = devm_kcalloc(hba->dev,
3815 hba->nutrs, sizeof(struct ufshcd_lrb),
3816 GFP_KERNEL);
3817 if (!hba->lrb) {
3818 dev_err(hba->dev, "LRB Memory allocation failed\n");
3819 goto out;
3820 }
3821 return 0;
3822 out:
3823 return -ENOMEM;
3824 }
3825
3826 /**
3827 * ufshcd_host_memory_configure - configure local reference block with
3828 * memory offsets
3829 * @hba: per adapter instance
3830 *
3831 * Configure Host memory space
3832 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3833 * address.
3834 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3835 * and PRDT offset.
3836 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3837 * into local reference block.
3838 */
ufshcd_host_memory_configure(struct ufs_hba * hba)3839 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3840 {
3841 struct utp_transfer_req_desc *utrdlp;
3842 dma_addr_t cmd_desc_dma_addr;
3843 dma_addr_t cmd_desc_element_addr;
3844 u16 response_offset;
3845 u16 prdt_offset;
3846 int cmd_desc_size;
3847 int i;
3848
3849 utrdlp = hba->utrdl_base_addr;
3850
3851 response_offset =
3852 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3853 prdt_offset =
3854 offsetof(struct utp_transfer_cmd_desc, prd_table);
3855
3856 cmd_desc_size = ufshcd_get_ucd_size(hba);
3857 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3858
3859 for (i = 0; i < hba->nutrs; i++) {
3860 /* Configure UTRD with command descriptor base address */
3861 cmd_desc_element_addr =
3862 (cmd_desc_dma_addr + (cmd_desc_size * i));
3863 utrdlp[i].command_desc_base_addr =
3864 cpu_to_le64(cmd_desc_element_addr);
3865
3866 /* Response upiu and prdt offset should be in double words */
3867 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3868 utrdlp[i].response_upiu_offset =
3869 cpu_to_le16(response_offset);
3870 utrdlp[i].prd_table_offset =
3871 cpu_to_le16(prdt_offset);
3872 utrdlp[i].response_upiu_length =
3873 cpu_to_le16(ALIGNED_UPIU_SIZE);
3874 } else {
3875 utrdlp[i].response_upiu_offset =
3876 cpu_to_le16(response_offset >> 2);
3877 utrdlp[i].prd_table_offset =
3878 cpu_to_le16(prdt_offset >> 2);
3879 utrdlp[i].response_upiu_length =
3880 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3881 }
3882
3883 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3884 }
3885 }
3886
3887 /**
3888 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3889 * @hba: per adapter instance
3890 *
3891 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3892 * in order to initialize the Unipro link startup procedure.
3893 * Once the Unipro links are up, the device connected to the controller
3894 * is detected.
3895 *
3896 * Return: 0 on success, non-zero value on failure.
3897 */
ufshcd_dme_link_startup(struct ufs_hba * hba)3898 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3899 {
3900 struct uic_command uic_cmd = {0};
3901 int ret;
3902
3903 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3904
3905 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3906 if (ret)
3907 dev_dbg(hba->dev,
3908 "dme-link-startup: error code %d\n", ret);
3909 return ret;
3910 }
3911 /**
3912 * ufshcd_dme_reset - UIC command for DME_RESET
3913 * @hba: per adapter instance
3914 *
3915 * DME_RESET command is issued in order to reset UniPro stack.
3916 * This function now deals with cold reset.
3917 *
3918 * Return: 0 on success, non-zero value on failure.
3919 */
ufshcd_dme_reset(struct ufs_hba * hba)3920 static int ufshcd_dme_reset(struct ufs_hba *hba)
3921 {
3922 struct uic_command uic_cmd = {0};
3923 int ret;
3924
3925 uic_cmd.command = UIC_CMD_DME_RESET;
3926
3927 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3928 if (ret)
3929 dev_err(hba->dev,
3930 "dme-reset: error code %d\n", ret);
3931
3932 return ret;
3933 }
3934
ufshcd_dme_configure_adapt(struct ufs_hba * hba,int agreed_gear,int adapt_val)3935 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3936 int agreed_gear,
3937 int adapt_val)
3938 {
3939 int ret;
3940
3941 if (agreed_gear < UFS_HS_G4)
3942 adapt_val = PA_NO_ADAPT;
3943
3944 ret = ufshcd_dme_set(hba,
3945 UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3946 adapt_val);
3947 return ret;
3948 }
3949 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3950
3951 /**
3952 * ufshcd_dme_enable - UIC command for DME_ENABLE
3953 * @hba: per adapter instance
3954 *
3955 * DME_ENABLE command is issued in order to enable UniPro stack.
3956 *
3957 * Return: 0 on success, non-zero value on failure.
3958 */
ufshcd_dme_enable(struct ufs_hba * hba)3959 static int ufshcd_dme_enable(struct ufs_hba *hba)
3960 {
3961 struct uic_command uic_cmd = {0};
3962 int ret;
3963
3964 uic_cmd.command = UIC_CMD_DME_ENABLE;
3965
3966 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3967 if (ret)
3968 dev_err(hba->dev,
3969 "dme-enable: error code %d\n", ret);
3970
3971 return ret;
3972 }
3973
ufshcd_add_delay_before_dme_cmd(struct ufs_hba * hba)3974 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3975 {
3976 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3977 unsigned long min_sleep_time_us;
3978
3979 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3980 return;
3981
3982 /*
3983 * last_dme_cmd_tstamp will be 0 only for 1st call to
3984 * this function
3985 */
3986 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3987 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3988 } else {
3989 unsigned long delta =
3990 (unsigned long) ktime_to_us(
3991 ktime_sub(ktime_get(),
3992 hba->last_dme_cmd_tstamp));
3993
3994 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3995 min_sleep_time_us =
3996 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3997 else
3998 min_sleep_time_us = 0; /* no more delay required */
3999 }
4000
4001 if (min_sleep_time_us > 0) {
4002 /* allow sleep for extra 50us if needed */
4003 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
4004 }
4005
4006 /* update the last_dme_cmd_tstamp */
4007 hba->last_dme_cmd_tstamp = ktime_get();
4008 }
4009
4010 /**
4011 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4012 * @hba: per adapter instance
4013 * @attr_sel: uic command argument1
4014 * @attr_set: attribute set type as uic command argument2
4015 * @mib_val: setting value as uic command argument3
4016 * @peer: indicate whether peer or local
4017 *
4018 * Return: 0 on success, non-zero value on failure.
4019 */
ufshcd_dme_set_attr(struct ufs_hba * hba,u32 attr_sel,u8 attr_set,u32 mib_val,u8 peer)4020 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
4021 u8 attr_set, u32 mib_val, u8 peer)
4022 {
4023 struct uic_command uic_cmd = {0};
4024 static const char *const action[] = {
4025 "dme-set",
4026 "dme-peer-set"
4027 };
4028 const char *set = action[!!peer];
4029 int ret;
4030 int retries = UFS_UIC_COMMAND_RETRIES;
4031
4032 uic_cmd.command = peer ?
4033 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
4034 uic_cmd.argument1 = attr_sel;
4035 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
4036 uic_cmd.argument3 = mib_val;
4037
4038 do {
4039 /* for peer attributes we retry upon failure */
4040 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4041 if (ret)
4042 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4043 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4044 } while (ret && peer && --retries);
4045
4046 if (ret)
4047 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4048 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4049 UFS_UIC_COMMAND_RETRIES - retries);
4050
4051 return ret;
4052 }
4053 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4054
4055 /**
4056 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4057 * @hba: per adapter instance
4058 * @attr_sel: uic command argument1
4059 * @mib_val: the value of the attribute as returned by the UIC command
4060 * @peer: indicate whether peer or local
4061 *
4062 * Return: 0 on success, non-zero value on failure.
4063 */
ufshcd_dme_get_attr(struct ufs_hba * hba,u32 attr_sel,u32 * mib_val,u8 peer)4064 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4065 u32 *mib_val, u8 peer)
4066 {
4067 struct uic_command uic_cmd = {0};
4068 static const char *const action[] = {
4069 "dme-get",
4070 "dme-peer-get"
4071 };
4072 const char *get = action[!!peer];
4073 int ret;
4074 int retries = UFS_UIC_COMMAND_RETRIES;
4075 struct ufs_pa_layer_attr orig_pwr_info;
4076 struct ufs_pa_layer_attr temp_pwr_info;
4077 bool pwr_mode_change = false;
4078
4079 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4080 orig_pwr_info = hba->pwr_info;
4081 temp_pwr_info = orig_pwr_info;
4082
4083 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4084 orig_pwr_info.pwr_rx == FAST_MODE) {
4085 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4086 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4087 pwr_mode_change = true;
4088 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4089 orig_pwr_info.pwr_rx == SLOW_MODE) {
4090 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4091 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4092 pwr_mode_change = true;
4093 }
4094 if (pwr_mode_change) {
4095 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4096 if (ret)
4097 goto out;
4098 }
4099 }
4100
4101 uic_cmd.command = peer ?
4102 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
4103 uic_cmd.argument1 = attr_sel;
4104
4105 do {
4106 /* for peer attributes we retry upon failure */
4107 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4108 if (ret)
4109 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4110 get, UIC_GET_ATTR_ID(attr_sel), ret);
4111 } while (ret && peer && --retries);
4112
4113 if (ret)
4114 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
4115 get, UIC_GET_ATTR_ID(attr_sel),
4116 UFS_UIC_COMMAND_RETRIES - retries);
4117
4118 if (mib_val && !ret)
4119 *mib_val = uic_cmd.argument3;
4120
4121 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4122 && pwr_mode_change)
4123 ufshcd_change_power_mode(hba, &orig_pwr_info);
4124 out:
4125 return ret;
4126 }
4127 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4128
4129 /**
4130 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4131 * state) and waits for it to take effect.
4132 *
4133 * @hba: per adapter instance
4134 * @cmd: UIC command to execute
4135 *
4136 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4137 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4138 * and device UniPro link and hence it's final completion would be indicated by
4139 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4140 * addition to normal UIC command completion Status (UCCS). This function only
4141 * returns after the relevant status bits indicate the completion.
4142 *
4143 * Return: 0 on success, non-zero value on failure.
4144 */
ufshcd_uic_pwr_ctrl(struct ufs_hba * hba,struct uic_command * cmd)4145 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4146 {
4147 DECLARE_COMPLETION_ONSTACK(uic_async_done);
4148 unsigned long flags;
4149 u8 status;
4150 int ret;
4151 bool reenable_intr = false;
4152
4153 mutex_lock(&hba->uic_cmd_mutex);
4154 ufshcd_add_delay_before_dme_cmd(hba);
4155
4156 spin_lock_irqsave(hba->host->host_lock, flags);
4157 if (ufshcd_is_link_broken(hba)) {
4158 ret = -ENOLINK;
4159 goto out_unlock;
4160 }
4161 hba->uic_async_done = &uic_async_done;
4162 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4163 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4164 /*
4165 * Make sure UIC command completion interrupt is disabled before
4166 * issuing UIC command.
4167 */
4168 ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
4169 reenable_intr = true;
4170 }
4171 spin_unlock_irqrestore(hba->host->host_lock, flags);
4172 ret = __ufshcd_send_uic_cmd(hba, cmd);
4173 if (ret) {
4174 dev_err(hba->dev,
4175 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4176 cmd->command, cmd->argument3, ret);
4177 goto out;
4178 }
4179
4180 if (!wait_for_completion_timeout(hba->uic_async_done,
4181 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4182 dev_err(hba->dev,
4183 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4184 cmd->command, cmd->argument3);
4185
4186 if (!cmd->cmd_active) {
4187 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4188 __func__);
4189 goto check_upmcrs;
4190 }
4191
4192 ret = -ETIMEDOUT;
4193 goto out;
4194 }
4195
4196 check_upmcrs:
4197 status = ufshcd_get_upmcrs(hba);
4198 if (status != PWR_LOCAL) {
4199 dev_err(hba->dev,
4200 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4201 cmd->command, status);
4202 ret = (status != PWR_OK) ? status : -1;
4203 }
4204 out:
4205 if (ret) {
4206 ufshcd_print_host_state(hba);
4207 ufshcd_print_pwr_info(hba);
4208 ufshcd_print_evt_hist(hba);
4209 }
4210
4211 spin_lock_irqsave(hba->host->host_lock, flags);
4212 hba->active_uic_cmd = NULL;
4213 hba->uic_async_done = NULL;
4214 if (reenable_intr)
4215 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4216 if (ret) {
4217 ufshcd_set_link_broken(hba);
4218 ufshcd_schedule_eh_work(hba);
4219 }
4220 out_unlock:
4221 spin_unlock_irqrestore(hba->host->host_lock, flags);
4222 mutex_unlock(&hba->uic_cmd_mutex);
4223
4224 return ret;
4225 }
4226
4227 /**
4228 * ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result
4229 * @hba: per adapter instance
4230 * @uic_cmd: UIC command
4231 *
4232 * Return: 0 only if success.
4233 */
ufshcd_send_bsg_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)4234 int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
4235 {
4236 int ret;
4237
4238 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
4239 return 0;
4240
4241 ufshcd_hold(hba);
4242
4243 if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) &&
4244 uic_cmd->command == UIC_CMD_DME_SET) {
4245 ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
4246 goto out;
4247 }
4248
4249 mutex_lock(&hba->uic_cmd_mutex);
4250 ufshcd_add_delay_before_dme_cmd(hba);
4251
4252 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
4253 if (!ret)
4254 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
4255
4256 mutex_unlock(&hba->uic_cmd_mutex);
4257
4258 out:
4259 ufshcd_release(hba);
4260 return ret;
4261 }
4262
4263 /**
4264 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4265 * using DME_SET primitives.
4266 * @hba: per adapter instance
4267 * @mode: powr mode value
4268 *
4269 * Return: 0 on success, non-zero value on failure.
4270 */
ufshcd_uic_change_pwr_mode(struct ufs_hba * hba,u8 mode)4271 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4272 {
4273 struct uic_command uic_cmd = {0};
4274 int ret;
4275
4276 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4277 ret = ufshcd_dme_set(hba,
4278 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4279 if (ret) {
4280 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4281 __func__, ret);
4282 goto out;
4283 }
4284 }
4285
4286 uic_cmd.command = UIC_CMD_DME_SET;
4287 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4288 uic_cmd.argument3 = mode;
4289 ufshcd_hold(hba);
4290 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4291 ufshcd_release(hba);
4292
4293 out:
4294 return ret;
4295 }
4296 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
4297
ufshcd_link_recovery(struct ufs_hba * hba)4298 int ufshcd_link_recovery(struct ufs_hba *hba)
4299 {
4300 int ret;
4301 unsigned long flags;
4302
4303 spin_lock_irqsave(hba->host->host_lock, flags);
4304 hba->ufshcd_state = UFSHCD_STATE_RESET;
4305 ufshcd_set_eh_in_progress(hba);
4306 spin_unlock_irqrestore(hba->host->host_lock, flags);
4307
4308 /* Reset the attached device */
4309 ufshcd_device_reset(hba);
4310
4311 ret = ufshcd_host_reset_and_restore(hba);
4312
4313 spin_lock_irqsave(hba->host->host_lock, flags);
4314 if (ret)
4315 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4316 ufshcd_clear_eh_in_progress(hba);
4317 spin_unlock_irqrestore(hba->host->host_lock, flags);
4318
4319 if (ret)
4320 dev_err(hba->dev, "%s: link recovery failed, err %d",
4321 __func__, ret);
4322
4323 return ret;
4324 }
4325 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4326
ufshcd_uic_hibern8_enter(struct ufs_hba * hba)4327 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4328 {
4329 int ret;
4330 struct uic_command uic_cmd = {0};
4331 ktime_t start = ktime_get();
4332
4333 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4334
4335 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4336 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4337 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4338 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4339
4340 if (ret)
4341 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4342 __func__, ret);
4343 else
4344 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4345 POST_CHANGE);
4346
4347 return ret;
4348 }
4349 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
4350
ufshcd_uic_hibern8_exit(struct ufs_hba * hba)4351 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4352 {
4353 struct uic_command uic_cmd = {0};
4354 int ret;
4355 ktime_t start = ktime_get();
4356
4357 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4358
4359 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4360 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4361 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4362 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4363
4364 if (ret) {
4365 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4366 __func__, ret);
4367 } else {
4368 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4369 POST_CHANGE);
4370 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
4371 hba->ufs_stats.hibern8_exit_cnt++;
4372 }
4373
4374 return ret;
4375 }
4376 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4377
ufshcd_auto_hibern8_update(struct ufs_hba * hba,u32 ahit)4378 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4379 {
4380 unsigned long flags;
4381 bool update = false;
4382
4383 if (!ufshcd_is_auto_hibern8_supported(hba))
4384 return;
4385
4386 spin_lock_irqsave(hba->host->host_lock, flags);
4387 if (hba->ahit != ahit) {
4388 hba->ahit = ahit;
4389 update = true;
4390 }
4391 spin_unlock_irqrestore(hba->host->host_lock, flags);
4392
4393 if (update &&
4394 !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
4395 ufshcd_rpm_get_sync(hba);
4396 ufshcd_hold(hba);
4397 ufshcd_auto_hibern8_enable(hba);
4398 ufshcd_release(hba);
4399 ufshcd_rpm_put_sync(hba);
4400 }
4401 }
4402 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4403
ufshcd_auto_hibern8_enable(struct ufs_hba * hba)4404 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4405 {
4406 if (!ufshcd_is_auto_hibern8_supported(hba))
4407 return;
4408
4409 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4410 }
4411
4412 /**
4413 * ufshcd_init_pwr_info - setting the POR (power on reset)
4414 * values in hba power info
4415 * @hba: per-adapter instance
4416 */
ufshcd_init_pwr_info(struct ufs_hba * hba)4417 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4418 {
4419 hba->pwr_info.gear_rx = UFS_PWM_G1;
4420 hba->pwr_info.gear_tx = UFS_PWM_G1;
4421 hba->pwr_info.lane_rx = UFS_LANE_1;
4422 hba->pwr_info.lane_tx = UFS_LANE_1;
4423 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4424 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4425 hba->pwr_info.hs_rate = 0;
4426 }
4427
4428 /**
4429 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4430 * @hba: per-adapter instance
4431 *
4432 * Return: 0 upon success; < 0 upon failure.
4433 */
ufshcd_get_max_pwr_mode(struct ufs_hba * hba)4434 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4435 {
4436 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4437
4438 if (hba->max_pwr_info.is_valid)
4439 return 0;
4440
4441 if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
4442 pwr_info->pwr_tx = FASTAUTO_MODE;
4443 pwr_info->pwr_rx = FASTAUTO_MODE;
4444 } else {
4445 pwr_info->pwr_tx = FAST_MODE;
4446 pwr_info->pwr_rx = FAST_MODE;
4447 }
4448 pwr_info->hs_rate = PA_HS_MODE_B;
4449
4450 /* Get the connected lane count */
4451 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4452 &pwr_info->lane_rx);
4453 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4454 &pwr_info->lane_tx);
4455
4456 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4457 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4458 __func__,
4459 pwr_info->lane_rx,
4460 pwr_info->lane_tx);
4461 return -EINVAL;
4462 }
4463
4464 /*
4465 * First, get the maximum gears of HS speed.
4466 * If a zero value, it means there is no HSGEAR capability.
4467 * Then, get the maximum gears of PWM speed.
4468 */
4469 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4470 if (!pwr_info->gear_rx) {
4471 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4472 &pwr_info->gear_rx);
4473 if (!pwr_info->gear_rx) {
4474 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4475 __func__, pwr_info->gear_rx);
4476 return -EINVAL;
4477 }
4478 pwr_info->pwr_rx = SLOW_MODE;
4479 }
4480
4481 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4482 &pwr_info->gear_tx);
4483 if (!pwr_info->gear_tx) {
4484 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4485 &pwr_info->gear_tx);
4486 if (!pwr_info->gear_tx) {
4487 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4488 __func__, pwr_info->gear_tx);
4489 return -EINVAL;
4490 }
4491 pwr_info->pwr_tx = SLOW_MODE;
4492 }
4493
4494 hba->max_pwr_info.is_valid = true;
4495 return 0;
4496 }
4497
ufshcd_change_power_mode(struct ufs_hba * hba,struct ufs_pa_layer_attr * pwr_mode)4498 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4499 struct ufs_pa_layer_attr *pwr_mode)
4500 {
4501 int ret;
4502
4503 /* if already configured to the requested pwr_mode */
4504 if (!hba->force_pmc &&
4505 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4506 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4507 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4508 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4509 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4510 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4511 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4512 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4513 return 0;
4514 }
4515
4516 /*
4517 * Configure attributes for power mode change with below.
4518 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4519 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4520 * - PA_HSSERIES
4521 */
4522 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4523 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4524 pwr_mode->lane_rx);
4525 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4526 pwr_mode->pwr_rx == FAST_MODE)
4527 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
4528 else
4529 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
4530
4531 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4532 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4533 pwr_mode->lane_tx);
4534 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4535 pwr_mode->pwr_tx == FAST_MODE)
4536 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
4537 else
4538 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
4539
4540 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4541 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4542 pwr_mode->pwr_rx == FAST_MODE ||
4543 pwr_mode->pwr_tx == FAST_MODE)
4544 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4545 pwr_mode->hs_rate);
4546
4547 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4548 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4549 DL_FC0ProtectionTimeOutVal_Default);
4550 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4551 DL_TC0ReplayTimeOutVal_Default);
4552 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4553 DL_AFC0ReqTimeOutVal_Default);
4554 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4555 DL_FC1ProtectionTimeOutVal_Default);
4556 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4557 DL_TC1ReplayTimeOutVal_Default);
4558 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4559 DL_AFC1ReqTimeOutVal_Default);
4560
4561 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4562 DL_FC0ProtectionTimeOutVal_Default);
4563 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4564 DL_TC0ReplayTimeOutVal_Default);
4565 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4566 DL_AFC0ReqTimeOutVal_Default);
4567 }
4568
4569 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4570 | pwr_mode->pwr_tx);
4571
4572 if (ret) {
4573 dev_err(hba->dev,
4574 "%s: power mode change failed %d\n", __func__, ret);
4575 } else {
4576 memcpy(&hba->pwr_info, pwr_mode,
4577 sizeof(struct ufs_pa_layer_attr));
4578 }
4579
4580 return ret;
4581 }
4582
4583 /**
4584 * ufshcd_config_pwr_mode - configure a new power mode
4585 * @hba: per-adapter instance
4586 * @desired_pwr_mode: desired power configuration
4587 *
4588 * Return: 0 upon success; < 0 upon failure.
4589 */
ufshcd_config_pwr_mode(struct ufs_hba * hba,struct ufs_pa_layer_attr * desired_pwr_mode)4590 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4591 struct ufs_pa_layer_attr *desired_pwr_mode)
4592 {
4593 struct ufs_pa_layer_attr final_params = { 0 };
4594 int ret;
4595
4596 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4597 desired_pwr_mode, &final_params);
4598
4599 if (ret)
4600 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4601
4602 ret = ufshcd_change_power_mode(hba, &final_params);
4603
4604 if (!ret)
4605 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4606 &final_params);
4607
4608 return ret;
4609 }
4610 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4611
4612 /**
4613 * ufshcd_complete_dev_init() - checks device readiness
4614 * @hba: per-adapter instance
4615 *
4616 * Set fDeviceInit flag and poll until device toggles it.
4617 *
4618 * Return: 0 upon success; < 0 upon failure.
4619 */
ufshcd_complete_dev_init(struct ufs_hba * hba)4620 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4621 {
4622 int err;
4623 bool flag_res = true;
4624 ktime_t timeout;
4625
4626 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4627 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4628 if (err) {
4629 dev_err(hba->dev,
4630 "%s: setting fDeviceInit flag failed with error %d\n",
4631 __func__, err);
4632 goto out;
4633 }
4634
4635 /* Poll fDeviceInit flag to be cleared */
4636 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4637 do {
4638 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4639 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4640 if (!flag_res)
4641 break;
4642 usleep_range(500, 1000);
4643 } while (ktime_before(ktime_get(), timeout));
4644
4645 if (err) {
4646 dev_err(hba->dev,
4647 "%s: reading fDeviceInit flag failed with error %d\n",
4648 __func__, err);
4649 } else if (flag_res) {
4650 dev_err(hba->dev,
4651 "%s: fDeviceInit was not cleared by the device\n",
4652 __func__);
4653 err = -EBUSY;
4654 }
4655 out:
4656 return err;
4657 }
4658
4659 /**
4660 * ufshcd_make_hba_operational - Make UFS controller operational
4661 * @hba: per adapter instance
4662 *
4663 * To bring UFS host controller to operational state,
4664 * 1. Enable required interrupts
4665 * 2. Configure interrupt aggregation
4666 * 3. Program UTRL and UTMRL base address
4667 * 4. Configure run-stop-registers
4668 *
4669 * Return: 0 on success, non-zero value on failure.
4670 */
ufshcd_make_hba_operational(struct ufs_hba * hba)4671 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4672 {
4673 int err = 0;
4674 u32 reg;
4675
4676 /* Enable required interrupts */
4677 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4678
4679 /* Configure interrupt aggregation */
4680 if (ufshcd_is_intr_aggr_allowed(hba))
4681 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4682 else
4683 ufshcd_disable_intr_aggr(hba);
4684
4685 /* Configure UTRL and UTMRL base address registers */
4686 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4687 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4688 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4689 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4690 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4691 REG_UTP_TASK_REQ_LIST_BASE_L);
4692 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4693 REG_UTP_TASK_REQ_LIST_BASE_H);
4694
4695 /*
4696 * Make sure base address and interrupt setup are updated before
4697 * enabling the run/stop registers below.
4698 */
4699 wmb();
4700
4701 /*
4702 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4703 */
4704 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4705 if (!(ufshcd_get_lists_status(reg))) {
4706 ufshcd_enable_run_stop_reg(hba);
4707 } else {
4708 dev_err(hba->dev,
4709 "Host controller not ready to process requests");
4710 err = -EIO;
4711 }
4712
4713 return err;
4714 }
4715 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4716
4717 /**
4718 * ufshcd_hba_stop - Send controller to reset state
4719 * @hba: per adapter instance
4720 */
ufshcd_hba_stop(struct ufs_hba * hba)4721 void ufshcd_hba_stop(struct ufs_hba *hba)
4722 {
4723 unsigned long flags;
4724 int err;
4725
4726 /*
4727 * Obtain the host lock to prevent that the controller is disabled
4728 * while the UFS interrupt handler is active on another CPU.
4729 */
4730 spin_lock_irqsave(hba->host->host_lock, flags);
4731 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4732 spin_unlock_irqrestore(hba->host->host_lock, flags);
4733
4734 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4735 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4736 10, 1);
4737 if (err)
4738 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4739 }
4740 EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
4741
4742 /**
4743 * ufshcd_hba_execute_hce - initialize the controller
4744 * @hba: per adapter instance
4745 *
4746 * The controller resets itself and controller firmware initialization
4747 * sequence kicks off. When controller is ready it will set
4748 * the Host Controller Enable bit to 1.
4749 *
4750 * Return: 0 on success, non-zero value on failure.
4751 */
ufshcd_hba_execute_hce(struct ufs_hba * hba)4752 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4753 {
4754 int retry_outer = 3;
4755 int retry_inner;
4756
4757 start:
4758 if (ufshcd_is_hba_active(hba))
4759 /* change controller state to "reset state" */
4760 ufshcd_hba_stop(hba);
4761
4762 /* UniPro link is disabled at this point */
4763 ufshcd_set_link_off(hba);
4764
4765 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4766
4767 /* start controller initialization sequence */
4768 ufshcd_hba_start(hba);
4769
4770 /*
4771 * To initialize a UFS host controller HCE bit must be set to 1.
4772 * During initialization the HCE bit value changes from 1->0->1.
4773 * When the host controller completes initialization sequence
4774 * it sets the value of HCE bit to 1. The same HCE bit is read back
4775 * to check if the controller has completed initialization sequence.
4776 * So without this delay the value HCE = 1, set in the previous
4777 * instruction might be read back.
4778 * This delay can be changed based on the controller.
4779 */
4780 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4781
4782 /* wait for the host controller to complete initialization */
4783 retry_inner = 50;
4784 while (!ufshcd_is_hba_active(hba)) {
4785 if (retry_inner) {
4786 retry_inner--;
4787 } else {
4788 dev_err(hba->dev,
4789 "Controller enable failed\n");
4790 if (retry_outer) {
4791 retry_outer--;
4792 goto start;
4793 }
4794 return -EIO;
4795 }
4796 usleep_range(1000, 1100);
4797 }
4798
4799 /* enable UIC related interrupts */
4800 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4801
4802 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4803
4804 return 0;
4805 }
4806
ufshcd_hba_enable(struct ufs_hba * hba)4807 int ufshcd_hba_enable(struct ufs_hba *hba)
4808 {
4809 int ret;
4810
4811 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4812 ufshcd_set_link_off(hba);
4813 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4814
4815 /* enable UIC related interrupts */
4816 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4817 ret = ufshcd_dme_reset(hba);
4818 if (ret) {
4819 dev_err(hba->dev, "DME_RESET failed\n");
4820 return ret;
4821 }
4822
4823 ret = ufshcd_dme_enable(hba);
4824 if (ret) {
4825 dev_err(hba->dev, "Enabling DME failed\n");
4826 return ret;
4827 }
4828
4829 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4830 } else {
4831 ret = ufshcd_hba_execute_hce(hba);
4832 }
4833
4834 return ret;
4835 }
4836 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4837
ufshcd_disable_tx_lcc(struct ufs_hba * hba,bool peer)4838 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4839 {
4840 int tx_lanes = 0, i, err = 0;
4841
4842 if (!peer)
4843 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4844 &tx_lanes);
4845 else
4846 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4847 &tx_lanes);
4848 for (i = 0; i < tx_lanes; i++) {
4849 if (!peer)
4850 err = ufshcd_dme_set(hba,
4851 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4852 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4853 0);
4854 else
4855 err = ufshcd_dme_peer_set(hba,
4856 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4857 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4858 0);
4859 if (err) {
4860 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4861 __func__, peer, i, err);
4862 break;
4863 }
4864 }
4865
4866 return err;
4867 }
4868
ufshcd_disable_device_tx_lcc(struct ufs_hba * hba)4869 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4870 {
4871 return ufshcd_disable_tx_lcc(hba, true);
4872 }
4873
ufshcd_update_evt_hist(struct ufs_hba * hba,u32 id,u32 val)4874 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4875 {
4876 struct ufs_event_hist *e;
4877
4878 if (id >= UFS_EVT_CNT)
4879 return;
4880
4881 e = &hba->ufs_stats.event[id];
4882 e->val[e->pos] = val;
4883 e->tstamp[e->pos] = local_clock();
4884 e->cnt += 1;
4885 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4886
4887 ufshcd_vops_event_notify(hba, id, &val);
4888 }
4889 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4890
4891 /**
4892 * ufshcd_link_startup - Initialize unipro link startup
4893 * @hba: per adapter instance
4894 *
4895 * Return: 0 for success, non-zero in case of failure.
4896 */
ufshcd_link_startup(struct ufs_hba * hba)4897 static int ufshcd_link_startup(struct ufs_hba *hba)
4898 {
4899 int ret;
4900 int retries = DME_LINKSTARTUP_RETRIES;
4901 bool link_startup_again = false;
4902
4903 /*
4904 * If UFS device isn't active then we will have to issue link startup
4905 * 2 times to make sure the device state move to active.
4906 */
4907 if (!ufshcd_is_ufs_dev_active(hba))
4908 link_startup_again = true;
4909
4910 link_startup:
4911 do {
4912 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4913
4914 ret = ufshcd_dme_link_startup(hba);
4915
4916 /* check if device is detected by inter-connect layer */
4917 if (!ret && !ufshcd_is_device_present(hba)) {
4918 ufshcd_update_evt_hist(hba,
4919 UFS_EVT_LINK_STARTUP_FAIL,
4920 0);
4921 dev_err(hba->dev, "%s: Device not present\n", __func__);
4922 ret = -ENXIO;
4923 goto out;
4924 }
4925
4926 /*
4927 * DME link lost indication is only received when link is up,
4928 * but we can't be sure if the link is up until link startup
4929 * succeeds. So reset the local Uni-Pro and try again.
4930 */
4931 if (ret && retries && ufshcd_hba_enable(hba)) {
4932 ufshcd_update_evt_hist(hba,
4933 UFS_EVT_LINK_STARTUP_FAIL,
4934 (u32)ret);
4935 goto out;
4936 }
4937 } while (ret && retries--);
4938
4939 if (ret) {
4940 /* failed to get the link up... retire */
4941 ufshcd_update_evt_hist(hba,
4942 UFS_EVT_LINK_STARTUP_FAIL,
4943 (u32)ret);
4944 goto out;
4945 }
4946
4947 if (link_startup_again) {
4948 link_startup_again = false;
4949 retries = DME_LINKSTARTUP_RETRIES;
4950 goto link_startup;
4951 }
4952
4953 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4954 ufshcd_init_pwr_info(hba);
4955 ufshcd_print_pwr_info(hba);
4956
4957 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4958 ret = ufshcd_disable_device_tx_lcc(hba);
4959 if (ret)
4960 goto out;
4961 }
4962
4963 /* Include any host controller configuration via UIC commands */
4964 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4965 if (ret)
4966 goto out;
4967
4968 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4969 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4970 ret = ufshcd_make_hba_operational(hba);
4971 out:
4972 if (ret) {
4973 dev_err(hba->dev, "link startup failed %d\n", ret);
4974 ufshcd_print_host_state(hba);
4975 ufshcd_print_pwr_info(hba);
4976 ufshcd_print_evt_hist(hba);
4977 }
4978 return ret;
4979 }
4980
4981 /**
4982 * ufshcd_verify_dev_init() - Verify device initialization
4983 * @hba: per-adapter instance
4984 *
4985 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4986 * device Transport Protocol (UTP) layer is ready after a reset.
4987 * If the UTP layer at the device side is not initialized, it may
4988 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4989 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4990 *
4991 * Return: 0 upon success; < 0 upon failure.
4992 */
ufshcd_verify_dev_init(struct ufs_hba * hba)4993 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4994 {
4995 int err = 0;
4996 int retries;
4997
4998 ufshcd_hold(hba);
4999 mutex_lock(&hba->dev_cmd.lock);
5000 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
5001 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
5002 hba->nop_out_timeout);
5003
5004 if (!err || err == -ETIMEDOUT)
5005 break;
5006
5007 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
5008 }
5009 mutex_unlock(&hba->dev_cmd.lock);
5010 ufshcd_release(hba);
5011
5012 if (err)
5013 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
5014 return err;
5015 }
5016
5017 /**
5018 * ufshcd_setup_links - associate link b/w device wlun and other luns
5019 * @sdev: pointer to SCSI device
5020 * @hba: pointer to ufs hba
5021 */
ufshcd_setup_links(struct ufs_hba * hba,struct scsi_device * sdev)5022 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
5023 {
5024 struct device_link *link;
5025
5026 /*
5027 * Device wlun is the supplier & rest of the luns are consumers.
5028 * This ensures that device wlun suspends after all other luns.
5029 */
5030 if (hba->ufs_device_wlun) {
5031 link = device_link_add(&sdev->sdev_gendev,
5032 &hba->ufs_device_wlun->sdev_gendev,
5033 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
5034 if (!link) {
5035 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
5036 dev_name(&hba->ufs_device_wlun->sdev_gendev));
5037 return;
5038 }
5039 hba->luns_avail--;
5040 /* Ignore REPORT_LUN wlun probing */
5041 if (hba->luns_avail == 1) {
5042 ufshcd_rpm_put(hba);
5043 return;
5044 }
5045 } else {
5046 /*
5047 * Device wlun is probed. The assumption is that WLUNs are
5048 * scanned before other LUNs.
5049 */
5050 hba->luns_avail--;
5051 }
5052 }
5053
5054 /**
5055 * ufshcd_lu_init - Initialize the relevant parameters of the LU
5056 * @hba: per-adapter instance
5057 * @sdev: pointer to SCSI device
5058 */
ufshcd_lu_init(struct ufs_hba * hba,struct scsi_device * sdev)5059 static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
5060 {
5061 int len = QUERY_DESC_MAX_SIZE;
5062 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
5063 u8 lun_qdepth = hba->nutrs;
5064 u8 *desc_buf;
5065 int ret;
5066
5067 desc_buf = kzalloc(len, GFP_KERNEL);
5068 if (!desc_buf)
5069 goto set_qdepth;
5070
5071 ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len);
5072 if (ret < 0) {
5073 if (ret == -EOPNOTSUPP)
5074 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5075 lun_qdepth = 1;
5076 kfree(desc_buf);
5077 goto set_qdepth;
5078 }
5079
5080 if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) {
5081 /*
5082 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5083 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5084 */
5085 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs);
5086 }
5087 /*
5088 * According to UFS device specification, the write protection mode is only supported by
5089 * normal LU, not supported by WLUN.
5090 */
5091 if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported &&
5092 !hba->dev_info.is_lu_power_on_wp &&
5093 desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
5094 hba->dev_info.is_lu_power_on_wp = true;
5095
5096 /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5097 if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
5098 desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
5099 hba->dev_info.b_advanced_rpmb_en = true;
5100
5101
5102 kfree(desc_buf);
5103 set_qdepth:
5104 /*
5105 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5106 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5107 */
5108 dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth);
5109 scsi_change_queue_depth(sdev, lun_qdepth);
5110 }
5111
5112 /**
5113 * ufshcd_slave_alloc - handle initial SCSI device configurations
5114 * @sdev: pointer to SCSI device
5115 *
5116 * Return: success.
5117 */
ufshcd_slave_alloc(struct scsi_device * sdev)5118 static int ufshcd_slave_alloc(struct scsi_device *sdev)
5119 {
5120 struct ufs_hba *hba;
5121
5122 hba = shost_priv(sdev->host);
5123
5124 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5125 sdev->use_10_for_ms = 1;
5126
5127 /* DBD field should be set to 1 in mode sense(10) */
5128 sdev->set_dbd_for_ms = 1;
5129
5130 /* allow SCSI layer to restart the device in case of errors */
5131 sdev->allow_restart = 1;
5132
5133 /* REPORT SUPPORTED OPERATION CODES is not supported */
5134 sdev->no_report_opcodes = 1;
5135
5136 /* WRITE_SAME command is not supported */
5137 sdev->no_write_same = 1;
5138
5139 ufshcd_lu_init(hba, sdev);
5140
5141 ufshcd_setup_links(hba, sdev);
5142
5143 return 0;
5144 }
5145
5146 /**
5147 * ufshcd_change_queue_depth - change queue depth
5148 * @sdev: pointer to SCSI device
5149 * @depth: required depth to set
5150 *
5151 * Change queue depth and make sure the max. limits are not crossed.
5152 *
5153 * Return: new queue depth.
5154 */
ufshcd_change_queue_depth(struct scsi_device * sdev,int depth)5155 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
5156 {
5157 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
5158 }
5159
5160 /**
5161 * ufshcd_slave_configure - adjust SCSI device configurations
5162 * @sdev: pointer to SCSI device
5163 *
5164 * Return: 0 (success).
5165 */
ufshcd_slave_configure(struct scsi_device * sdev)5166 static int ufshcd_slave_configure(struct scsi_device *sdev)
5167 {
5168 struct ufs_hba *hba = shost_priv(sdev->host);
5169 struct request_queue *q = sdev->request_queue;
5170
5171 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5172 if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
5173 blk_queue_update_dma_alignment(q, SZ_4K - 1);
5174 /*
5175 * Block runtime-pm until all consumers are added.
5176 * Refer ufshcd_setup_links().
5177 */
5178 if (is_device_wlun(sdev))
5179 pm_runtime_get_noresume(&sdev->sdev_gendev);
5180 else if (ufshcd_is_rpm_autosuspend_allowed(hba))
5181 sdev->rpm_autosuspend = 1;
5182 /*
5183 * Do not print messages during runtime PM to avoid never-ending cycles
5184 * of messages written back to storage by user space causing runtime
5185 * resume, causing more messages and so on.
5186 */
5187 sdev->silence_suspend = 1;
5188
5189 ufshcd_crypto_register(hba, q);
5190
5191 return 0;
5192 }
5193
5194 /**
5195 * ufshcd_slave_destroy - remove SCSI device configurations
5196 * @sdev: pointer to SCSI device
5197 */
ufshcd_slave_destroy(struct scsi_device * sdev)5198 static void ufshcd_slave_destroy(struct scsi_device *sdev)
5199 {
5200 struct ufs_hba *hba;
5201 unsigned long flags;
5202
5203 hba = shost_priv(sdev->host);
5204
5205 /* Drop the reference as it won't be needed anymore */
5206 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5207 spin_lock_irqsave(hba->host->host_lock, flags);
5208 hba->ufs_device_wlun = NULL;
5209 spin_unlock_irqrestore(hba->host->host_lock, flags);
5210 } else if (hba->ufs_device_wlun) {
5211 struct device *supplier = NULL;
5212
5213 /* Ensure UFS Device WLUN exists and does not disappear */
5214 spin_lock_irqsave(hba->host->host_lock, flags);
5215 if (hba->ufs_device_wlun) {
5216 supplier = &hba->ufs_device_wlun->sdev_gendev;
5217 get_device(supplier);
5218 }
5219 spin_unlock_irqrestore(hba->host->host_lock, flags);
5220
5221 if (supplier) {
5222 /*
5223 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5224 * device will not have been registered but can still
5225 * have a device link holding a reference to the device.
5226 */
5227 device_link_remove(&sdev->sdev_gendev, supplier);
5228 put_device(supplier);
5229 }
5230 }
5231 }
5232
5233 /**
5234 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5235 * @lrbp: pointer to local reference block of completed command
5236 * @scsi_status: SCSI command status
5237 *
5238 * Return: value base on SCSI command status.
5239 */
5240 static inline int
ufshcd_scsi_cmd_status(struct ufshcd_lrb * lrbp,int scsi_status)5241 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5242 {
5243 int result = 0;
5244
5245 switch (scsi_status) {
5246 case SAM_STAT_CHECK_CONDITION:
5247 ufshcd_copy_sense_data(lrbp);
5248 fallthrough;
5249 case SAM_STAT_GOOD:
5250 result |= DID_OK << 16 | scsi_status;
5251 break;
5252 case SAM_STAT_TASK_SET_FULL:
5253 case SAM_STAT_BUSY:
5254 case SAM_STAT_TASK_ABORTED:
5255 ufshcd_copy_sense_data(lrbp);
5256 result |= scsi_status;
5257 break;
5258 default:
5259 result |= DID_ERROR << 16;
5260 break;
5261 } /* end of switch */
5262
5263 return result;
5264 }
5265
5266 /**
5267 * ufshcd_transfer_rsp_status - Get overall status of the response
5268 * @hba: per adapter instance
5269 * @lrbp: pointer to local reference block of completed command
5270 * @cqe: pointer to the completion queue entry
5271 *
5272 * Return: result of the command to notify SCSI midlayer.
5273 */
5274 static inline int
ufshcd_transfer_rsp_status(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,struct cq_entry * cqe)5275 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
5276 struct cq_entry *cqe)
5277 {
5278 int result = 0;
5279 int scsi_status;
5280 enum utp_ocs ocs;
5281 u8 upiu_flags;
5282 u32 resid;
5283
5284 upiu_flags = lrbp->ucd_rsp_ptr->header.flags;
5285 resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count);
5286 /*
5287 * Test !overflow instead of underflow to support UFS devices that do
5288 * not set either flag.
5289 */
5290 if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
5291 scsi_set_resid(lrbp->cmd, resid);
5292
5293 /* overall command status of utrd */
5294 ocs = ufshcd_get_tr_ocs(lrbp, cqe);
5295
5296 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5297 if (lrbp->ucd_rsp_ptr->header.response ||
5298 lrbp->ucd_rsp_ptr->header.status)
5299 ocs = OCS_SUCCESS;
5300 }
5301
5302 switch (ocs) {
5303 case OCS_SUCCESS:
5304 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5305 switch (ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr)) {
5306 case UPIU_TRANSACTION_RESPONSE:
5307 /*
5308 * get the result based on SCSI status response
5309 * to notify the SCSI midlayer of the command status
5310 */
5311 scsi_status = lrbp->ucd_rsp_ptr->header.status;
5312 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5313
5314 /*
5315 * Currently we are only supporting BKOPs exception
5316 * events hence we can ignore BKOPs exception event
5317 * during power management callbacks. BKOPs exception
5318 * event is not expected to be raised in runtime suspend
5319 * callback as it allows the urgent bkops.
5320 * During system suspend, we are anyway forcefully
5321 * disabling the bkops and if urgent bkops is needed
5322 * it will be enabled on system resume. Long term
5323 * solution could be to abort the system suspend if
5324 * UFS device needs urgent BKOPs.
5325 */
5326 if (!hba->pm_op_in_progress &&
5327 !ufshcd_eh_in_progress(hba) &&
5328 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5329 /* Flushed in suspend */
5330 schedule_work(&hba->eeh_work);
5331 break;
5332 case UPIU_TRANSACTION_REJECT_UPIU:
5333 /* TODO: handle Reject UPIU Response */
5334 result = DID_ERROR << 16;
5335 dev_err(hba->dev,
5336 "Reject UPIU not fully implemented\n");
5337 break;
5338 default:
5339 dev_err(hba->dev,
5340 "Unexpected request response code = %x\n",
5341 result);
5342 result = DID_ERROR << 16;
5343 break;
5344 }
5345 break;
5346 case OCS_ABORTED:
5347 result |= DID_ABORT << 16;
5348 break;
5349 case OCS_INVALID_COMMAND_STATUS:
5350 result |= DID_REQUEUE << 16;
5351 break;
5352 case OCS_INVALID_CMD_TABLE_ATTR:
5353 case OCS_INVALID_PRDT_ATTR:
5354 case OCS_MISMATCH_DATA_BUF_SIZE:
5355 case OCS_MISMATCH_RESP_UPIU_SIZE:
5356 case OCS_PEER_COMM_FAILURE:
5357 case OCS_FATAL_ERROR:
5358 case OCS_DEVICE_FATAL_ERROR:
5359 case OCS_INVALID_CRYPTO_CONFIG:
5360 case OCS_GENERAL_CRYPTO_ERROR:
5361 default:
5362 result |= DID_ERROR << 16;
5363 dev_err(hba->dev,
5364 "OCS error from controller = %x for tag %d\n",
5365 ocs, lrbp->task_tag);
5366 ufshcd_print_evt_hist(hba);
5367 ufshcd_print_host_state(hba);
5368 break;
5369 } /* end of switch */
5370
5371 if ((host_byte(result) != DID_OK) &&
5372 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
5373 ufshcd_print_tr(hba, lrbp->task_tag, true);
5374 return result;
5375 }
5376
ufshcd_is_auto_hibern8_error(struct ufs_hba * hba,u32 intr_mask)5377 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5378 u32 intr_mask)
5379 {
5380 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5381 !ufshcd_is_auto_hibern8_enabled(hba))
5382 return false;
5383
5384 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5385 return false;
5386
5387 if (hba->active_uic_cmd &&
5388 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5389 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5390 return false;
5391
5392 return true;
5393 }
5394
5395 /**
5396 * ufshcd_uic_cmd_compl - handle completion of uic command
5397 * @hba: per adapter instance
5398 * @intr_status: interrupt status generated by the controller
5399 *
5400 * Return:
5401 * IRQ_HANDLED - If interrupt is valid
5402 * IRQ_NONE - If invalid interrupt
5403 */
ufshcd_uic_cmd_compl(struct ufs_hba * hba,u32 intr_status)5404 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5405 {
5406 irqreturn_t retval = IRQ_NONE;
5407
5408 spin_lock(hba->host->host_lock);
5409 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5410 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5411
5412 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5413 hba->active_uic_cmd->argument2 |=
5414 ufshcd_get_uic_cmd_result(hba);
5415 hba->active_uic_cmd->argument3 =
5416 ufshcd_get_dme_attr_val(hba);
5417 if (!hba->uic_async_done)
5418 hba->active_uic_cmd->cmd_active = 0;
5419 complete(&hba->active_uic_cmd->done);
5420 retval = IRQ_HANDLED;
5421 }
5422
5423 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5424 hba->active_uic_cmd->cmd_active = 0;
5425 complete(hba->uic_async_done);
5426 retval = IRQ_HANDLED;
5427 }
5428
5429 if (retval == IRQ_HANDLED)
5430 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5431 UFS_CMD_COMP);
5432 spin_unlock(hba->host->host_lock);
5433 return retval;
5434 }
5435
5436 /* Release the resources allocated for processing a SCSI command. */
ufshcd_release_scsi_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)5437 void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5438 struct ufshcd_lrb *lrbp)
5439 {
5440 struct scsi_cmnd *cmd = lrbp->cmd;
5441
5442 scsi_dma_unmap(cmd);
5443 ufshcd_release(hba);
5444 ufshcd_clk_scaling_update_busy(hba);
5445 }
5446
5447 /**
5448 * ufshcd_compl_one_cqe - handle a completion queue entry
5449 * @hba: per adapter instance
5450 * @task_tag: the task tag of the request to be completed
5451 * @cqe: pointer to the completion queue entry
5452 */
ufshcd_compl_one_cqe(struct ufs_hba * hba,int task_tag,struct cq_entry * cqe)5453 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
5454 struct cq_entry *cqe)
5455 {
5456 struct ufshcd_lrb *lrbp;
5457 struct scsi_cmnd *cmd;
5458 enum utp_ocs ocs;
5459
5460 lrbp = &hba->lrb[task_tag];
5461 lrbp->compl_time_stamp = ktime_get();
5462 lrbp->compl_time_stamp_local_clock = local_clock();
5463 cmd = lrbp->cmd;
5464 if (cmd) {
5465 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5466 ufshcd_update_monitor(hba, lrbp);
5467 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
5468 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
5469 ufshcd_release_scsi_cmd(hba, lrbp);
5470 /* Do not touch lrbp after scsi done */
5471 scsi_done(cmd);
5472 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5473 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5474 if (hba->dev_cmd.complete) {
5475 if (cqe) {
5476 ocs = le32_to_cpu(cqe->status) & MASK_OCS;
5477 lrbp->utr_descriptor_ptr->header.ocs = ocs;
5478 }
5479 complete(hba->dev_cmd.complete);
5480 ufshcd_clk_scaling_update_busy(hba);
5481 }
5482 }
5483 }
5484
5485 /**
5486 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5487 * @hba: per adapter instance
5488 * @completed_reqs: bitmask that indicates which requests to complete
5489 */
__ufshcd_transfer_req_compl(struct ufs_hba * hba,unsigned long completed_reqs)5490 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5491 unsigned long completed_reqs)
5492 {
5493 int tag;
5494
5495 for_each_set_bit(tag, &completed_reqs, hba->nutrs)
5496 ufshcd_compl_one_cqe(hba, tag, NULL);
5497 }
5498
5499 /* Any value that is not an existing queue number is fine for this constant. */
5500 enum {
5501 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
5502 };
5503
ufshcd_clear_polled(struct ufs_hba * hba,unsigned long * completed_reqs)5504 static void ufshcd_clear_polled(struct ufs_hba *hba,
5505 unsigned long *completed_reqs)
5506 {
5507 int tag;
5508
5509 for_each_set_bit(tag, completed_reqs, hba->nutrs) {
5510 struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
5511
5512 if (!cmd)
5513 continue;
5514 if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED)
5515 __clear_bit(tag, completed_reqs);
5516 }
5517 }
5518
5519 /*
5520 * Return: > 0 if one or more commands have been completed or 0 if no
5521 * requests have been completed.
5522 */
ufshcd_poll(struct Scsi_Host * shost,unsigned int queue_num)5523 static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
5524 {
5525 struct ufs_hba *hba = shost_priv(shost);
5526 unsigned long completed_reqs, flags;
5527 u32 tr_doorbell;
5528 struct ufs_hw_queue *hwq;
5529
5530 if (is_mcq_enabled(hba)) {
5531 hwq = &hba->uhq[queue_num];
5532
5533 return ufshcd_mcq_poll_cqe_lock(hba, hwq);
5534 }
5535
5536 spin_lock_irqsave(&hba->outstanding_lock, flags);
5537 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5538 completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5539 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5540 "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5541 hba->outstanding_reqs);
5542 if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) {
5543 /* Do not complete polled requests from interrupt context. */
5544 ufshcd_clear_polled(hba, &completed_reqs);
5545 }
5546 hba->outstanding_reqs &= ~completed_reqs;
5547 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5548
5549 if (completed_reqs)
5550 __ufshcd_transfer_req_compl(hba, completed_reqs);
5551
5552 return completed_reqs != 0;
5553 }
5554
5555 /**
5556 * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
5557 * invoked from the error handler context or ufshcd_host_reset_and_restore()
5558 * to complete the pending transfers and free the resources associated with
5559 * the scsi command.
5560 *
5561 * @hba: per adapter instance
5562 * @force_compl: This flag is set to true when invoked
5563 * from ufshcd_host_reset_and_restore() in which case it requires special
5564 * handling because the host controller has been reset by ufshcd_hba_stop().
5565 */
ufshcd_mcq_compl_pending_transfer(struct ufs_hba * hba,bool force_compl)5566 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
5567 bool force_compl)
5568 {
5569 struct ufs_hw_queue *hwq;
5570 struct ufshcd_lrb *lrbp;
5571 struct scsi_cmnd *cmd;
5572 unsigned long flags;
5573 u32 hwq_num, utag;
5574 int tag;
5575
5576 for (tag = 0; tag < hba->nutrs; tag++) {
5577 lrbp = &hba->lrb[tag];
5578 cmd = lrbp->cmd;
5579 if (!ufshcd_cmd_inflight(cmd) ||
5580 test_bit(SCMD_STATE_COMPLETE, &cmd->state))
5581 continue;
5582
5583 utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
5584 hwq_num = blk_mq_unique_tag_to_hwq(utag);
5585 hwq = &hba->uhq[hwq_num];
5586
5587 if (force_compl) {
5588 ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
5589 /*
5590 * For those cmds of which the cqes are not present
5591 * in the cq, complete them explicitly.
5592 */
5593 if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
5594 spin_lock_irqsave(&hwq->cq_lock, flags);
5595 set_host_byte(cmd, DID_REQUEUE);
5596 ufshcd_release_scsi_cmd(hba, lrbp);
5597 scsi_done(cmd);
5598 spin_unlock_irqrestore(&hwq->cq_lock, flags);
5599 }
5600 } else {
5601 ufshcd_mcq_poll_cqe_lock(hba, hwq);
5602 }
5603 }
5604 }
5605
5606 /**
5607 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5608 * @hba: per adapter instance
5609 *
5610 * Return:
5611 * IRQ_HANDLED - If interrupt is valid
5612 * IRQ_NONE - If invalid interrupt
5613 */
ufshcd_transfer_req_compl(struct ufs_hba * hba)5614 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5615 {
5616 /* Resetting interrupt aggregation counters first and reading the
5617 * DOOR_BELL afterward allows us to handle all the completed requests.
5618 * In order to prevent other interrupts starvation the DB is read once
5619 * after reset. The down side of this solution is the possibility of
5620 * false interrupt if device completes another request after resetting
5621 * aggregation and before reading the DB.
5622 */
5623 if (ufshcd_is_intr_aggr_allowed(hba) &&
5624 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5625 ufshcd_reset_intr_aggr(hba);
5626
5627 if (ufs_fail_completion())
5628 return IRQ_HANDLED;
5629
5630 /*
5631 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5632 * do not want polling to trigger spurious interrupt complaints.
5633 */
5634 ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
5635
5636 return IRQ_HANDLED;
5637 }
5638
__ufshcd_write_ee_control(struct ufs_hba * hba,u32 ee_ctrl_mask)5639 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
5640 {
5641 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5642 QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5643 &ee_ctrl_mask);
5644 }
5645
ufshcd_write_ee_control(struct ufs_hba * hba)5646 int ufshcd_write_ee_control(struct ufs_hba *hba)
5647 {
5648 int err;
5649
5650 mutex_lock(&hba->ee_ctrl_mutex);
5651 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5652 mutex_unlock(&hba->ee_ctrl_mutex);
5653 if (err)
5654 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5655 __func__, err);
5656 return err;
5657 }
5658
ufshcd_update_ee_control(struct ufs_hba * hba,u16 * mask,const u16 * other_mask,u16 set,u16 clr)5659 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
5660 const u16 *other_mask, u16 set, u16 clr)
5661 {
5662 u16 new_mask, ee_ctrl_mask;
5663 int err = 0;
5664
5665 mutex_lock(&hba->ee_ctrl_mutex);
5666 new_mask = (*mask & ~clr) | set;
5667 ee_ctrl_mask = new_mask | *other_mask;
5668 if (ee_ctrl_mask != hba->ee_ctrl_mask)
5669 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5670 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5671 if (!err) {
5672 hba->ee_ctrl_mask = ee_ctrl_mask;
5673 *mask = new_mask;
5674 }
5675 mutex_unlock(&hba->ee_ctrl_mutex);
5676 return err;
5677 }
5678
5679 /**
5680 * ufshcd_disable_ee - disable exception event
5681 * @hba: per-adapter instance
5682 * @mask: exception event to disable
5683 *
5684 * Disables exception event in the device so that the EVENT_ALERT
5685 * bit is not set.
5686 *
5687 * Return: zero on success, non-zero error value on failure.
5688 */
ufshcd_disable_ee(struct ufs_hba * hba,u16 mask)5689 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5690 {
5691 return ufshcd_update_ee_drv_mask(hba, 0, mask);
5692 }
5693
5694 /**
5695 * ufshcd_enable_ee - enable exception event
5696 * @hba: per-adapter instance
5697 * @mask: exception event to enable
5698 *
5699 * Enable corresponding exception event in the device to allow
5700 * device to alert host in critical scenarios.
5701 *
5702 * Return: zero on success, non-zero error value on failure.
5703 */
ufshcd_enable_ee(struct ufs_hba * hba,u16 mask)5704 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5705 {
5706 return ufshcd_update_ee_drv_mask(hba, mask, 0);
5707 }
5708
5709 /**
5710 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5711 * @hba: per-adapter instance
5712 *
5713 * Allow device to manage background operations on its own. Enabling
5714 * this might lead to inconsistent latencies during normal data transfers
5715 * as the device is allowed to manage its own way of handling background
5716 * operations.
5717 *
5718 * Return: zero on success, non-zero on failure.
5719 */
ufshcd_enable_auto_bkops(struct ufs_hba * hba)5720 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5721 {
5722 int err = 0;
5723
5724 if (hba->auto_bkops_enabled)
5725 goto out;
5726
5727 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5728 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5729 if (err) {
5730 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5731 __func__, err);
5732 goto out;
5733 }
5734
5735 hba->auto_bkops_enabled = true;
5736 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5737
5738 /* No need of URGENT_BKOPS exception from the device */
5739 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5740 if (err)
5741 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5742 __func__, err);
5743 out:
5744 return err;
5745 }
5746
5747 /**
5748 * ufshcd_disable_auto_bkops - block device in doing background operations
5749 * @hba: per-adapter instance
5750 *
5751 * Disabling background operations improves command response latency but
5752 * has drawback of device moving into critical state where the device is
5753 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5754 * host is idle so that BKOPS are managed effectively without any negative
5755 * impacts.
5756 *
5757 * Return: zero on success, non-zero on failure.
5758 */
ufshcd_disable_auto_bkops(struct ufs_hba * hba)5759 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5760 {
5761 int err = 0;
5762
5763 if (!hba->auto_bkops_enabled)
5764 goto out;
5765
5766 /*
5767 * If host assisted BKOPs is to be enabled, make sure
5768 * urgent bkops exception is allowed.
5769 */
5770 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5771 if (err) {
5772 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5773 __func__, err);
5774 goto out;
5775 }
5776
5777 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5778 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5779 if (err) {
5780 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5781 __func__, err);
5782 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5783 goto out;
5784 }
5785
5786 hba->auto_bkops_enabled = false;
5787 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5788 hba->is_urgent_bkops_lvl_checked = false;
5789 out:
5790 return err;
5791 }
5792
5793 /**
5794 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5795 * @hba: per adapter instance
5796 *
5797 * After a device reset the device may toggle the BKOPS_EN flag
5798 * to default value. The s/w tracking variables should be updated
5799 * as well. This function would change the auto-bkops state based on
5800 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5801 */
ufshcd_force_reset_auto_bkops(struct ufs_hba * hba)5802 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5803 {
5804 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5805 hba->auto_bkops_enabled = false;
5806 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5807 ufshcd_enable_auto_bkops(hba);
5808 } else {
5809 hba->auto_bkops_enabled = true;
5810 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5811 ufshcd_disable_auto_bkops(hba);
5812 }
5813 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5814 hba->is_urgent_bkops_lvl_checked = false;
5815 }
5816
ufshcd_get_bkops_status(struct ufs_hba * hba,u32 * status)5817 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5818 {
5819 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5820 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5821 }
5822
5823 /**
5824 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5825 * @hba: per-adapter instance
5826 * @status: bkops_status value
5827 *
5828 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5829 * flag in the device to permit background operations if the device
5830 * bkops_status is greater than or equal to "status" argument passed to
5831 * this function, disable otherwise.
5832 *
5833 * Return: 0 for success, non-zero in case of failure.
5834 *
5835 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5836 * to know whether auto bkops is enabled or disabled after this function
5837 * returns control to it.
5838 */
ufshcd_bkops_ctrl(struct ufs_hba * hba,enum bkops_status status)5839 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5840 enum bkops_status status)
5841 {
5842 int err;
5843 u32 curr_status = 0;
5844
5845 err = ufshcd_get_bkops_status(hba, &curr_status);
5846 if (err) {
5847 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5848 __func__, err);
5849 goto out;
5850 } else if (curr_status > BKOPS_STATUS_MAX) {
5851 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5852 __func__, curr_status);
5853 err = -EINVAL;
5854 goto out;
5855 }
5856
5857 if (curr_status >= status)
5858 err = ufshcd_enable_auto_bkops(hba);
5859 else
5860 err = ufshcd_disable_auto_bkops(hba);
5861 out:
5862 return err;
5863 }
5864
5865 /**
5866 * ufshcd_urgent_bkops - handle urgent bkops exception event
5867 * @hba: per-adapter instance
5868 *
5869 * Enable fBackgroundOpsEn flag in the device to permit background
5870 * operations.
5871 *
5872 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5873 * and negative error value for any other failure.
5874 *
5875 * Return: 0 upon success; < 0 upon failure.
5876 */
ufshcd_urgent_bkops(struct ufs_hba * hba)5877 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5878 {
5879 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5880 }
5881
ufshcd_get_ee_status(struct ufs_hba * hba,u32 * status)5882 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5883 {
5884 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5885 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5886 }
5887
ufshcd_bkops_exception_event_handler(struct ufs_hba * hba)5888 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5889 {
5890 int err;
5891 u32 curr_status = 0;
5892
5893 if (hba->is_urgent_bkops_lvl_checked)
5894 goto enable_auto_bkops;
5895
5896 err = ufshcd_get_bkops_status(hba, &curr_status);
5897 if (err) {
5898 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5899 __func__, err);
5900 goto out;
5901 }
5902
5903 /*
5904 * We are seeing that some devices are raising the urgent bkops
5905 * exception events even when BKOPS status doesn't indicate performace
5906 * impacted or critical. Handle these device by determining their urgent
5907 * bkops status at runtime.
5908 */
5909 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5910 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5911 __func__, curr_status);
5912 /* update the current status as the urgent bkops level */
5913 hba->urgent_bkops_lvl = curr_status;
5914 hba->is_urgent_bkops_lvl_checked = true;
5915 }
5916
5917 enable_auto_bkops:
5918 err = ufshcd_enable_auto_bkops(hba);
5919 out:
5920 if (err < 0)
5921 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5922 __func__, err);
5923 }
5924
ufshcd_temp_exception_event_handler(struct ufs_hba * hba,u16 status)5925 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
5926 {
5927 u32 value;
5928
5929 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5930 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
5931 return;
5932
5933 dev_info(hba->dev, "exception Tcase %d\n", value - 80);
5934
5935 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
5936
5937 /*
5938 * A placeholder for the platform vendors to add whatever additional
5939 * steps required
5940 */
5941 }
5942
__ufshcd_wb_toggle(struct ufs_hba * hba,bool set,enum flag_idn idn)5943 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
5944 {
5945 u8 index;
5946 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
5947 UPIU_QUERY_OPCODE_CLEAR_FLAG;
5948
5949 index = ufshcd_wb_get_query_index(hba);
5950 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
5951 }
5952
ufshcd_wb_toggle(struct ufs_hba * hba,bool enable)5953 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
5954 {
5955 int ret;
5956
5957 if (!ufshcd_is_wb_allowed(hba) ||
5958 hba->dev_info.wb_enabled == enable)
5959 return 0;
5960
5961 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
5962 if (ret) {
5963 dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
5964 __func__, enable ? "enabling" : "disabling", ret);
5965 return ret;
5966 }
5967
5968 hba->dev_info.wb_enabled = enable;
5969 dev_dbg(hba->dev, "%s: Write Booster %s\n",
5970 __func__, enable ? "enabled" : "disabled");
5971
5972 return ret;
5973 }
5974
ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba * hba,bool enable)5975 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
5976 bool enable)
5977 {
5978 int ret;
5979
5980 ret = __ufshcd_wb_toggle(hba, enable,
5981 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
5982 if (ret) {
5983 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
5984 __func__, enable ? "enabling" : "disabling", ret);
5985 return;
5986 }
5987 dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
5988 __func__, enable ? "enabled" : "disabled");
5989 }
5990
ufshcd_wb_toggle_buf_flush(struct ufs_hba * hba,bool enable)5991 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
5992 {
5993 int ret;
5994
5995 if (!ufshcd_is_wb_allowed(hba) ||
5996 hba->dev_info.wb_buf_flush_enabled == enable)
5997 return 0;
5998
5999 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
6000 if (ret) {
6001 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
6002 __func__, enable ? "enabling" : "disabling", ret);
6003 return ret;
6004 }
6005
6006 hba->dev_info.wb_buf_flush_enabled = enable;
6007 dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
6008 __func__, enable ? "enabled" : "disabled");
6009
6010 return ret;
6011 }
6012
ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba * hba,u32 avail_buf)6013 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
6014 u32 avail_buf)
6015 {
6016 u32 cur_buf;
6017 int ret;
6018 u8 index;
6019
6020 index = ufshcd_wb_get_query_index(hba);
6021 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6022 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
6023 index, 0, &cur_buf);
6024 if (ret) {
6025 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
6026 __func__, ret);
6027 return false;
6028 }
6029
6030 if (!cur_buf) {
6031 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
6032 cur_buf);
6033 return false;
6034 }
6035 /* Let it continue to flush when available buffer exceeds threshold */
6036 return avail_buf < hba->vps->wb_flush_threshold;
6037 }
6038
ufshcd_wb_force_disable(struct ufs_hba * hba)6039 static void ufshcd_wb_force_disable(struct ufs_hba *hba)
6040 {
6041 if (ufshcd_is_wb_buf_flush_allowed(hba))
6042 ufshcd_wb_toggle_buf_flush(hba, false);
6043
6044 ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
6045 ufshcd_wb_toggle(hba, false);
6046 hba->caps &= ~UFSHCD_CAP_WB_EN;
6047
6048 dev_info(hba->dev, "%s: WB force disabled\n", __func__);
6049 }
6050
ufshcd_is_wb_buf_lifetime_available(struct ufs_hba * hba)6051 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
6052 {
6053 u32 lifetime;
6054 int ret;
6055 u8 index;
6056
6057 index = ufshcd_wb_get_query_index(hba);
6058 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6059 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
6060 index, 0, &lifetime);
6061 if (ret) {
6062 dev_err(hba->dev,
6063 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
6064 __func__, ret);
6065 return false;
6066 }
6067
6068 if (lifetime == UFS_WB_EXCEED_LIFETIME) {
6069 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
6070 __func__, lifetime);
6071 return false;
6072 }
6073
6074 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
6075 __func__, lifetime);
6076
6077 return true;
6078 }
6079
ufshcd_wb_need_flush(struct ufs_hba * hba)6080 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
6081 {
6082 int ret;
6083 u32 avail_buf;
6084 u8 index;
6085
6086 if (!ufshcd_is_wb_allowed(hba))
6087 return false;
6088
6089 if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
6090 ufshcd_wb_force_disable(hba);
6091 return false;
6092 }
6093
6094 /*
6095 * The ufs device needs the vcc to be ON to flush.
6096 * With user-space reduction enabled, it's enough to enable flush
6097 * by checking only the available buffer. The threshold
6098 * defined here is > 90% full.
6099 * With user-space preserved enabled, the current-buffer
6100 * should be checked too because the wb buffer size can reduce
6101 * when disk tends to be full. This info is provided by current
6102 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
6103 * keeping vcc on when current buffer is empty.
6104 */
6105 index = ufshcd_wb_get_query_index(hba);
6106 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6107 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
6108 index, 0, &avail_buf);
6109 if (ret) {
6110 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6111 __func__, ret);
6112 return false;
6113 }
6114
6115 if (!hba->dev_info.b_presrv_uspc_en)
6116 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
6117
6118 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
6119 }
6120
ufshcd_rpm_dev_flush_recheck_work(struct work_struct * work)6121 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
6122 {
6123 struct ufs_hba *hba = container_of(to_delayed_work(work),
6124 struct ufs_hba,
6125 rpm_dev_flush_recheck_work);
6126 /*
6127 * To prevent unnecessary VCC power drain after device finishes
6128 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6129 * after a certain delay to recheck the threshold by next runtime
6130 * suspend.
6131 */
6132 ufshcd_rpm_get_sync(hba);
6133 ufshcd_rpm_put_sync(hba);
6134 }
6135
6136 /**
6137 * ufshcd_exception_event_handler - handle exceptions raised by device
6138 * @work: pointer to work data
6139 *
6140 * Read bExceptionEventStatus attribute from the device and handle the
6141 * exception event accordingly.
6142 */
ufshcd_exception_event_handler(struct work_struct * work)6143 static void ufshcd_exception_event_handler(struct work_struct *work)
6144 {
6145 struct ufs_hba *hba;
6146 int err;
6147 u32 status = 0;
6148 hba = container_of(work, struct ufs_hba, eeh_work);
6149
6150 ufshcd_scsi_block_requests(hba);
6151 err = ufshcd_get_ee_status(hba, &status);
6152 if (err) {
6153 dev_err(hba->dev, "%s: failed to get exception status %d\n",
6154 __func__, err);
6155 goto out;
6156 }
6157
6158 trace_ufshcd_exception_event(dev_name(hba->dev), status);
6159
6160 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
6161 ufshcd_bkops_exception_event_handler(hba);
6162
6163 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
6164 ufshcd_temp_exception_event_handler(hba, status);
6165
6166 ufs_debugfs_exception_event(hba, status);
6167 out:
6168 ufshcd_scsi_unblock_requests(hba);
6169 }
6170
6171 /* Complete requests that have door-bell cleared */
ufshcd_complete_requests(struct ufs_hba * hba,bool force_compl)6172 static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
6173 {
6174 if (is_mcq_enabled(hba))
6175 ufshcd_mcq_compl_pending_transfer(hba, force_compl);
6176 else
6177 ufshcd_transfer_req_compl(hba);
6178
6179 ufshcd_tmc_handler(hba);
6180 }
6181
6182 /**
6183 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6184 * to recover from the DL NAC errors or not.
6185 * @hba: per-adapter instance
6186 *
6187 * Return: true if error handling is required, false otherwise.
6188 */
ufshcd_quirk_dl_nac_errors(struct ufs_hba * hba)6189 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6190 {
6191 unsigned long flags;
6192 bool err_handling = true;
6193
6194 spin_lock_irqsave(hba->host->host_lock, flags);
6195 /*
6196 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6197 * device fatal error and/or DL NAC & REPLAY timeout errors.
6198 */
6199 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6200 goto out;
6201
6202 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6203 ((hba->saved_err & UIC_ERROR) &&
6204 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
6205 goto out;
6206
6207 if ((hba->saved_err & UIC_ERROR) &&
6208 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6209 int err;
6210 /*
6211 * wait for 50ms to see if we can get any other errors or not.
6212 */
6213 spin_unlock_irqrestore(hba->host->host_lock, flags);
6214 msleep(50);
6215 spin_lock_irqsave(hba->host->host_lock, flags);
6216
6217 /*
6218 * now check if we have got any other severe errors other than
6219 * DL NAC error?
6220 */
6221 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6222 ((hba->saved_err & UIC_ERROR) &&
6223 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
6224 goto out;
6225
6226 /*
6227 * As DL NAC is the only error received so far, send out NOP
6228 * command to confirm if link is still active or not.
6229 * - If we don't get any response then do error recovery.
6230 * - If we get response then clear the DL NAC error bit.
6231 */
6232
6233 spin_unlock_irqrestore(hba->host->host_lock, flags);
6234 err = ufshcd_verify_dev_init(hba);
6235 spin_lock_irqsave(hba->host->host_lock, flags);
6236
6237 if (err)
6238 goto out;
6239
6240 /* Link seems to be alive hence ignore the DL NAC errors */
6241 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6242 hba->saved_err &= ~UIC_ERROR;
6243 /* clear NAC error */
6244 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6245 if (!hba->saved_uic_err)
6246 err_handling = false;
6247 }
6248 out:
6249 spin_unlock_irqrestore(hba->host->host_lock, flags);
6250 return err_handling;
6251 }
6252
6253 /* host lock must be held before calling this func */
ufshcd_is_saved_err_fatal(struct ufs_hba * hba)6254 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
6255 {
6256 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
6257 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
6258 }
6259
ufshcd_schedule_eh_work(struct ufs_hba * hba)6260 void ufshcd_schedule_eh_work(struct ufs_hba *hba)
6261 {
6262 lockdep_assert_held(hba->host->host_lock);
6263
6264 /* handle fatal errors only when link is not in error state */
6265 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6266 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6267 ufshcd_is_saved_err_fatal(hba))
6268 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
6269 else
6270 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
6271 queue_work(hba->eh_wq, &hba->eh_work);
6272 }
6273 }
6274
ufshcd_force_error_recovery(struct ufs_hba * hba)6275 static void ufshcd_force_error_recovery(struct ufs_hba *hba)
6276 {
6277 spin_lock_irq(hba->host->host_lock);
6278 hba->force_reset = true;
6279 ufshcd_schedule_eh_work(hba);
6280 spin_unlock_irq(hba->host->host_lock);
6281 }
6282
ufshcd_clk_scaling_allow(struct ufs_hba * hba,bool allow)6283 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
6284 {
6285 mutex_lock(&hba->wb_mutex);
6286 down_write(&hba->clk_scaling_lock);
6287 hba->clk_scaling.is_allowed = allow;
6288 up_write(&hba->clk_scaling_lock);
6289 mutex_unlock(&hba->wb_mutex);
6290 }
6291
ufshcd_clk_scaling_suspend(struct ufs_hba * hba,bool suspend)6292 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
6293 {
6294 if (suspend) {
6295 if (hba->clk_scaling.is_enabled)
6296 ufshcd_suspend_clkscaling(hba);
6297 ufshcd_clk_scaling_allow(hba, false);
6298 } else {
6299 ufshcd_clk_scaling_allow(hba, true);
6300 if (hba->clk_scaling.is_enabled)
6301 ufshcd_resume_clkscaling(hba);
6302 }
6303 }
6304
ufshcd_err_handling_prepare(struct ufs_hba * hba)6305 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
6306 {
6307 ufshcd_rpm_get_sync(hba);
6308 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
6309 hba->is_sys_suspended) {
6310 enum ufs_pm_op pm_op;
6311
6312 /*
6313 * Don't assume anything of resume, if
6314 * resume fails, irq and clocks can be OFF, and powers
6315 * can be OFF or in LPM.
6316 */
6317 ufshcd_setup_hba_vreg(hba, true);
6318 ufshcd_enable_irq(hba);
6319 ufshcd_setup_vreg(hba, true);
6320 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6321 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6322 ufshcd_hold(hba);
6323 if (!ufshcd_is_clkgating_allowed(hba))
6324 ufshcd_setup_clocks(hba, true);
6325 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
6326 ufshcd_vops_resume(hba, pm_op);
6327 } else {
6328 ufshcd_hold(hba);
6329 if (ufshcd_is_clkscaling_supported(hba) &&
6330 hba->clk_scaling.is_enabled)
6331 ufshcd_suspend_clkscaling(hba);
6332 ufshcd_clk_scaling_allow(hba, false);
6333 }
6334 ufshcd_scsi_block_requests(hba);
6335 /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
6336 blk_mq_wait_quiesce_done(&hba->host->tag_set);
6337 cancel_work_sync(&hba->eeh_work);
6338 }
6339
ufshcd_err_handling_unprepare(struct ufs_hba * hba)6340 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
6341 {
6342 ufshcd_scsi_unblock_requests(hba);
6343 ufshcd_release(hba);
6344 if (ufshcd_is_clkscaling_supported(hba))
6345 ufshcd_clk_scaling_suspend(hba, false);
6346 ufshcd_rpm_put(hba);
6347 }
6348
ufshcd_err_handling_should_stop(struct ufs_hba * hba)6349 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
6350 {
6351 return (!hba->is_powered || hba->shutting_down ||
6352 !hba->ufs_device_wlun ||
6353 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
6354 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
6355 ufshcd_is_link_broken(hba))));
6356 }
6357
6358 #ifdef CONFIG_PM
ufshcd_recover_pm_error(struct ufs_hba * hba)6359 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
6360 {
6361 struct Scsi_Host *shost = hba->host;
6362 struct scsi_device *sdev;
6363 struct request_queue *q;
6364 int ret;
6365
6366 hba->is_sys_suspended = false;
6367 /*
6368 * Set RPM status of wlun device to RPM_ACTIVE,
6369 * this also clears its runtime error.
6370 */
6371 ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
6372
6373 /* hba device might have a runtime error otherwise */
6374 if (ret)
6375 ret = pm_runtime_set_active(hba->dev);
6376 /*
6377 * If wlun device had runtime error, we also need to resume those
6378 * consumer scsi devices in case any of them has failed to be
6379 * resumed due to supplier runtime resume failure. This is to unblock
6380 * blk_queue_enter in case there are bios waiting inside it.
6381 */
6382 if (!ret) {
6383 shost_for_each_device(sdev, shost) {
6384 q = sdev->request_queue;
6385 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6386 q->rpm_status == RPM_SUSPENDING))
6387 pm_request_resume(q->dev);
6388 }
6389 }
6390 }
6391 #else
ufshcd_recover_pm_error(struct ufs_hba * hba)6392 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6393 {
6394 }
6395 #endif
6396
ufshcd_is_pwr_mode_restore_needed(struct ufs_hba * hba)6397 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6398 {
6399 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6400 u32 mode;
6401
6402 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6403
6404 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6405 return true;
6406
6407 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6408 return true;
6409
6410 return false;
6411 }
6412
ufshcd_abort_one(struct request * rq,void * priv)6413 static bool ufshcd_abort_one(struct request *rq, void *priv)
6414 {
6415 int *ret = priv;
6416 u32 tag = rq->tag;
6417 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
6418 struct scsi_device *sdev = cmd->device;
6419 struct Scsi_Host *shost = sdev->host;
6420 struct ufs_hba *hba = shost_priv(shost);
6421 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6422 struct ufs_hw_queue *hwq;
6423 unsigned long flags;
6424
6425 *ret = ufshcd_try_to_abort_task(hba, tag);
6426 dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
6427 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
6428 *ret ? "failed" : "succeeded");
6429
6430 /* Release cmd in MCQ mode if abort succeeds */
6431 if (is_mcq_enabled(hba) && (*ret == 0)) {
6432 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
6433 if (!hwq)
6434 return 0;
6435 spin_lock_irqsave(&hwq->cq_lock, flags);
6436 if (ufshcd_cmd_inflight(lrbp->cmd))
6437 ufshcd_release_scsi_cmd(hba, lrbp);
6438 spin_unlock_irqrestore(&hwq->cq_lock, flags);
6439 }
6440
6441 return *ret == 0;
6442 }
6443
6444 /**
6445 * ufshcd_abort_all - Abort all pending commands.
6446 * @hba: Host bus adapter pointer.
6447 *
6448 * Return: true if and only if the host controller needs to be reset.
6449 */
ufshcd_abort_all(struct ufs_hba * hba)6450 static bool ufshcd_abort_all(struct ufs_hba *hba)
6451 {
6452 int tag, ret = 0;
6453
6454 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret);
6455 if (ret)
6456 goto out;
6457
6458 /* Clear pending task management requests */
6459 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6460 ret = ufshcd_clear_tm_cmd(hba, tag);
6461 if (ret)
6462 goto out;
6463 }
6464
6465 out:
6466 /* Complete the requests that are cleared by s/w */
6467 ufshcd_complete_requests(hba, false);
6468
6469 return ret != 0;
6470 }
6471
6472 /**
6473 * ufshcd_err_handler - handle UFS errors that require s/w attention
6474 * @work: pointer to work structure
6475 */
ufshcd_err_handler(struct work_struct * work)6476 static void ufshcd_err_handler(struct work_struct *work)
6477 {
6478 int retries = MAX_ERR_HANDLER_RETRIES;
6479 struct ufs_hba *hba;
6480 unsigned long flags;
6481 bool needs_restore;
6482 bool needs_reset;
6483 int pmc_err;
6484
6485 hba = container_of(work, struct ufs_hba, eh_work);
6486
6487 dev_info(hba->dev,
6488 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6489 __func__, ufshcd_state_name[hba->ufshcd_state],
6490 hba->is_powered, hba->shutting_down, hba->saved_err,
6491 hba->saved_uic_err, hba->force_reset,
6492 ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6493
6494 down(&hba->host_sem);
6495 spin_lock_irqsave(hba->host->host_lock, flags);
6496 if (ufshcd_err_handling_should_stop(hba)) {
6497 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6498 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6499 spin_unlock_irqrestore(hba->host->host_lock, flags);
6500 up(&hba->host_sem);
6501 return;
6502 }
6503 spin_unlock_irqrestore(hba->host->host_lock, flags);
6504
6505 ufshcd_err_handling_prepare(hba);
6506
6507 spin_lock_irqsave(hba->host->host_lock, flags);
6508 ufshcd_set_eh_in_progress(hba);
6509 spin_unlock_irqrestore(hba->host->host_lock, flags);
6510
6511 /* Complete requests that have door-bell cleared by h/w */
6512 ufshcd_complete_requests(hba, false);
6513 spin_lock_irqsave(hba->host->host_lock, flags);
6514 again:
6515 needs_restore = false;
6516 needs_reset = false;
6517
6518 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6519 hba->ufshcd_state = UFSHCD_STATE_RESET;
6520 /*
6521 * A full reset and restore might have happened after preparation
6522 * is finished, double check whether we should stop.
6523 */
6524 if (ufshcd_err_handling_should_stop(hba))
6525 goto skip_err_handling;
6526
6527 if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) &&
6528 !hba->force_reset) {
6529 bool ret;
6530
6531 spin_unlock_irqrestore(hba->host->host_lock, flags);
6532 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6533 ret = ufshcd_quirk_dl_nac_errors(hba);
6534 spin_lock_irqsave(hba->host->host_lock, flags);
6535 if (!ret && ufshcd_err_handling_should_stop(hba))
6536 goto skip_err_handling;
6537 }
6538
6539 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6540 (hba->saved_uic_err &&
6541 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6542 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6543
6544 spin_unlock_irqrestore(hba->host->host_lock, flags);
6545 ufshcd_print_host_state(hba);
6546 ufshcd_print_pwr_info(hba);
6547 ufshcd_print_evt_hist(hba);
6548 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6549 ufshcd_print_trs_all(hba, pr_prdt);
6550 spin_lock_irqsave(hba->host->host_lock, flags);
6551 }
6552
6553 /*
6554 * if host reset is required then skip clearing the pending
6555 * transfers forcefully because they will get cleared during
6556 * host reset and restore
6557 */
6558 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6559 ufshcd_is_saved_err_fatal(hba) ||
6560 ((hba->saved_err & UIC_ERROR) &&
6561 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6562 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6563 needs_reset = true;
6564 goto do_reset;
6565 }
6566
6567 /*
6568 * If LINERESET was caught, UFS might have been put to PWM mode,
6569 * check if power mode restore is needed.
6570 */
6571 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6572 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6573 if (!hba->saved_uic_err)
6574 hba->saved_err &= ~UIC_ERROR;
6575 spin_unlock_irqrestore(hba->host->host_lock, flags);
6576 if (ufshcd_is_pwr_mode_restore_needed(hba))
6577 needs_restore = true;
6578 spin_lock_irqsave(hba->host->host_lock, flags);
6579 if (!hba->saved_err && !needs_restore)
6580 goto skip_err_handling;
6581 }
6582
6583 hba->silence_err_logs = true;
6584 /* release lock as clear command might sleep */
6585 spin_unlock_irqrestore(hba->host->host_lock, flags);
6586
6587 needs_reset = ufshcd_abort_all(hba);
6588
6589 spin_lock_irqsave(hba->host->host_lock, flags);
6590 hba->silence_err_logs = false;
6591 if (needs_reset)
6592 goto do_reset;
6593
6594 /*
6595 * After all reqs and tasks are cleared from doorbell,
6596 * now it is safe to retore power mode.
6597 */
6598 if (needs_restore) {
6599 spin_unlock_irqrestore(hba->host->host_lock, flags);
6600 /*
6601 * Hold the scaling lock just in case dev cmds
6602 * are sent via bsg and/or sysfs.
6603 */
6604 down_write(&hba->clk_scaling_lock);
6605 hba->force_pmc = true;
6606 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6607 if (pmc_err) {
6608 needs_reset = true;
6609 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6610 __func__, pmc_err);
6611 }
6612 hba->force_pmc = false;
6613 ufshcd_print_pwr_info(hba);
6614 up_write(&hba->clk_scaling_lock);
6615 spin_lock_irqsave(hba->host->host_lock, flags);
6616 }
6617
6618 do_reset:
6619 /* Fatal errors need reset */
6620 if (needs_reset) {
6621 int err;
6622
6623 hba->force_reset = false;
6624 spin_unlock_irqrestore(hba->host->host_lock, flags);
6625 err = ufshcd_reset_and_restore(hba);
6626 if (err)
6627 dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6628 __func__, err);
6629 else
6630 ufshcd_recover_pm_error(hba);
6631 spin_lock_irqsave(hba->host->host_lock, flags);
6632 }
6633
6634 skip_err_handling:
6635 if (!needs_reset) {
6636 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6637 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6638 if (hba->saved_err || hba->saved_uic_err)
6639 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6640 __func__, hba->saved_err, hba->saved_uic_err);
6641 }
6642 /* Exit in an operational state or dead */
6643 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6644 hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6645 if (--retries)
6646 goto again;
6647 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6648 }
6649 ufshcd_clear_eh_in_progress(hba);
6650 spin_unlock_irqrestore(hba->host->host_lock, flags);
6651 ufshcd_err_handling_unprepare(hba);
6652 up(&hba->host_sem);
6653
6654 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6655 ufshcd_state_name[hba->ufshcd_state]);
6656 }
6657
6658 /**
6659 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6660 * @hba: per-adapter instance
6661 *
6662 * Return:
6663 * IRQ_HANDLED - If interrupt is valid
6664 * IRQ_NONE - If invalid interrupt
6665 */
ufshcd_update_uic_error(struct ufs_hba * hba)6666 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6667 {
6668 u32 reg;
6669 irqreturn_t retval = IRQ_NONE;
6670
6671 /* PHY layer error */
6672 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6673 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6674 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6675 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6676 /*
6677 * To know whether this error is fatal or not, DB timeout
6678 * must be checked but this error is handled separately.
6679 */
6680 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6681 dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6682 __func__);
6683
6684 /* Got a LINERESET indication. */
6685 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6686 struct uic_command *cmd = NULL;
6687
6688 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6689 if (hba->uic_async_done && hba->active_uic_cmd)
6690 cmd = hba->active_uic_cmd;
6691 /*
6692 * Ignore the LINERESET during power mode change
6693 * operation via DME_SET command.
6694 */
6695 if (cmd && (cmd->command == UIC_CMD_DME_SET))
6696 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6697 }
6698 retval |= IRQ_HANDLED;
6699 }
6700
6701 /* PA_INIT_ERROR is fatal and needs UIC reset */
6702 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6703 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6704 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6705 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6706
6707 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6708 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6709 else if (hba->dev_quirks &
6710 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6711 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6712 hba->uic_error |=
6713 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6714 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6715 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6716 }
6717 retval |= IRQ_HANDLED;
6718 }
6719
6720 /* UIC NL/TL/DME errors needs software retry */
6721 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6722 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6723 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6724 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6725 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6726 retval |= IRQ_HANDLED;
6727 }
6728
6729 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6730 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6731 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6732 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6733 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6734 retval |= IRQ_HANDLED;
6735 }
6736
6737 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6738 if ((reg & UIC_DME_ERROR) &&
6739 (reg & UIC_DME_ERROR_CODE_MASK)) {
6740 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6741 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6742 retval |= IRQ_HANDLED;
6743 }
6744
6745 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6746 __func__, hba->uic_error);
6747 return retval;
6748 }
6749
6750 /**
6751 * ufshcd_check_errors - Check for errors that need s/w attention
6752 * @hba: per-adapter instance
6753 * @intr_status: interrupt status generated by the controller
6754 *
6755 * Return:
6756 * IRQ_HANDLED - If interrupt is valid
6757 * IRQ_NONE - If invalid interrupt
6758 */
ufshcd_check_errors(struct ufs_hba * hba,u32 intr_status)6759 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
6760 {
6761 bool queue_eh_work = false;
6762 irqreturn_t retval = IRQ_NONE;
6763
6764 spin_lock(hba->host->host_lock);
6765 hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6766
6767 if (hba->errors & INT_FATAL_ERRORS) {
6768 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6769 hba->errors);
6770 queue_eh_work = true;
6771 }
6772
6773 if (hba->errors & UIC_ERROR) {
6774 hba->uic_error = 0;
6775 retval = ufshcd_update_uic_error(hba);
6776 if (hba->uic_error)
6777 queue_eh_work = true;
6778 }
6779
6780 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6781 dev_err(hba->dev,
6782 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6783 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6784 "Enter" : "Exit",
6785 hba->errors, ufshcd_get_upmcrs(hba));
6786 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6787 hba->errors);
6788 ufshcd_set_link_broken(hba);
6789 queue_eh_work = true;
6790 }
6791
6792 if (queue_eh_work) {
6793 /*
6794 * update the transfer error masks to sticky bits, let's do this
6795 * irrespective of current ufshcd_state.
6796 */
6797 hba->saved_err |= hba->errors;
6798 hba->saved_uic_err |= hba->uic_error;
6799
6800 /* dump controller state before resetting */
6801 if ((hba->saved_err &
6802 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6803 (hba->saved_uic_err &&
6804 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6805 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6806 __func__, hba->saved_err,
6807 hba->saved_uic_err);
6808 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6809 "host_regs: ");
6810 ufshcd_print_pwr_info(hba);
6811 }
6812 ufshcd_schedule_eh_work(hba);
6813 retval |= IRQ_HANDLED;
6814 }
6815 /*
6816 * if (!queue_eh_work) -
6817 * Other errors are either non-fatal where host recovers
6818 * itself without s/w intervention or errors that will be
6819 * handled by the SCSI core layer.
6820 */
6821 hba->errors = 0;
6822 hba->uic_error = 0;
6823 spin_unlock(hba->host->host_lock);
6824 return retval;
6825 }
6826
6827 /**
6828 * ufshcd_tmc_handler - handle task management function completion
6829 * @hba: per adapter instance
6830 *
6831 * Return:
6832 * IRQ_HANDLED - If interrupt is valid
6833 * IRQ_NONE - If invalid interrupt
6834 */
ufshcd_tmc_handler(struct ufs_hba * hba)6835 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6836 {
6837 unsigned long flags, pending, issued;
6838 irqreturn_t ret = IRQ_NONE;
6839 int tag;
6840
6841 spin_lock_irqsave(hba->host->host_lock, flags);
6842 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6843 issued = hba->outstanding_tasks & ~pending;
6844 for_each_set_bit(tag, &issued, hba->nutmrs) {
6845 struct request *req = hba->tmf_rqs[tag];
6846 struct completion *c = req->end_io_data;
6847
6848 complete(c);
6849 ret = IRQ_HANDLED;
6850 }
6851 spin_unlock_irqrestore(hba->host->host_lock, flags);
6852
6853 return ret;
6854 }
6855
6856 /**
6857 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6858 * @hba: per adapter instance
6859 *
6860 * Return: IRQ_HANDLED if interrupt is handled.
6861 */
ufshcd_handle_mcq_cq_events(struct ufs_hba * hba)6862 static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
6863 {
6864 struct ufs_hw_queue *hwq;
6865 unsigned long outstanding_cqs;
6866 unsigned int nr_queues;
6867 int i, ret;
6868 u32 events;
6869
6870 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
6871 if (ret)
6872 outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
6873
6874 /* Exclude the poll queues */
6875 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
6876 for_each_set_bit(i, &outstanding_cqs, nr_queues) {
6877 hwq = &hba->uhq[i];
6878
6879 events = ufshcd_mcq_read_cqis(hba, i);
6880 if (events)
6881 ufshcd_mcq_write_cqis(hba, events, i);
6882
6883 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
6884 ufshcd_mcq_poll_cqe_lock(hba, hwq);
6885 }
6886
6887 return IRQ_HANDLED;
6888 }
6889
6890 /**
6891 * ufshcd_sl_intr - Interrupt service routine
6892 * @hba: per adapter instance
6893 * @intr_status: contains interrupts generated by the controller
6894 *
6895 * Return:
6896 * IRQ_HANDLED - If interrupt is valid
6897 * IRQ_NONE - If invalid interrupt
6898 */
ufshcd_sl_intr(struct ufs_hba * hba,u32 intr_status)6899 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6900 {
6901 irqreturn_t retval = IRQ_NONE;
6902
6903 if (intr_status & UFSHCD_UIC_MASK)
6904 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6905
6906 if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6907 retval |= ufshcd_check_errors(hba, intr_status);
6908
6909 if (intr_status & UTP_TASK_REQ_COMPL)
6910 retval |= ufshcd_tmc_handler(hba);
6911
6912 if (intr_status & UTP_TRANSFER_REQ_COMPL)
6913 retval |= ufshcd_transfer_req_compl(hba);
6914
6915 if (intr_status & MCQ_CQ_EVENT_STATUS)
6916 retval |= ufshcd_handle_mcq_cq_events(hba);
6917
6918 return retval;
6919 }
6920
6921 /**
6922 * ufshcd_intr - Main interrupt service routine
6923 * @irq: irq number
6924 * @__hba: pointer to adapter instance
6925 *
6926 * Return:
6927 * IRQ_HANDLED - If interrupt is valid
6928 * IRQ_NONE - If invalid interrupt
6929 */
ufshcd_intr(int irq,void * __hba)6930 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6931 {
6932 u32 intr_status, enabled_intr_status = 0;
6933 irqreturn_t retval = IRQ_NONE;
6934 struct ufs_hba *hba = __hba;
6935 int retries = hba->nutrs;
6936
6937 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6938 hba->ufs_stats.last_intr_status = intr_status;
6939 hba->ufs_stats.last_intr_ts = local_clock();
6940
6941 /*
6942 * There could be max of hba->nutrs reqs in flight and in worst case
6943 * if the reqs get finished 1 by 1 after the interrupt status is
6944 * read, make sure we handle them by checking the interrupt status
6945 * again in a loop until we process all of the reqs before returning.
6946 */
6947 while (intr_status && retries--) {
6948 enabled_intr_status =
6949 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6950 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6951 if (enabled_intr_status)
6952 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6953
6954 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6955 }
6956
6957 if (enabled_intr_status && retval == IRQ_NONE &&
6958 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
6959 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
6960 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6961 __func__,
6962 intr_status,
6963 hba->ufs_stats.last_intr_status,
6964 enabled_intr_status);
6965 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6966 }
6967
6968 return retval;
6969 }
6970
ufshcd_clear_tm_cmd(struct ufs_hba * hba,int tag)6971 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6972 {
6973 int err = 0;
6974 u32 mask = 1 << tag;
6975 unsigned long flags;
6976
6977 if (!test_bit(tag, &hba->outstanding_tasks))
6978 goto out;
6979
6980 spin_lock_irqsave(hba->host->host_lock, flags);
6981 ufshcd_utmrl_clear(hba, tag);
6982 spin_unlock_irqrestore(hba->host->host_lock, flags);
6983
6984 /* poll for max. 1 sec to clear door bell register by h/w */
6985 err = ufshcd_wait_for_register(hba,
6986 REG_UTP_TASK_REQ_DOOR_BELL,
6987 mask, 0, 1000, 1000);
6988
6989 dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
6990 tag, err < 0 ? "failed" : "succeeded");
6991
6992 out:
6993 return err;
6994 }
6995
__ufshcd_issue_tm_cmd(struct ufs_hba * hba,struct utp_task_req_desc * treq,u8 tm_function)6996 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6997 struct utp_task_req_desc *treq, u8 tm_function)
6998 {
6999 struct request_queue *q = hba->tmf_queue;
7000 struct Scsi_Host *host = hba->host;
7001 DECLARE_COMPLETION_ONSTACK(wait);
7002 struct request *req;
7003 unsigned long flags;
7004 int task_tag, err;
7005
7006 /*
7007 * blk_mq_alloc_request() is used here only to get a free tag.
7008 */
7009 req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
7010 if (IS_ERR(req))
7011 return PTR_ERR(req);
7012
7013 req->end_io_data = &wait;
7014 ufshcd_hold(hba);
7015
7016 spin_lock_irqsave(host->host_lock, flags);
7017
7018 task_tag = req->tag;
7019 WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
7020 task_tag);
7021 hba->tmf_rqs[req->tag] = req;
7022 treq->upiu_req.req_header.task_tag = task_tag;
7023
7024 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
7025 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
7026
7027 /* send command to the controller */
7028 __set_bit(task_tag, &hba->outstanding_tasks);
7029
7030 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
7031 /* Make sure that doorbell is committed immediately */
7032 wmb();
7033
7034 spin_unlock_irqrestore(host->host_lock, flags);
7035
7036 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
7037
7038 /* wait until the task management command is completed */
7039 err = wait_for_completion_io_timeout(&wait,
7040 msecs_to_jiffies(TM_CMD_TIMEOUT));
7041 if (!err) {
7042 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
7043 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
7044 __func__, tm_function);
7045 if (ufshcd_clear_tm_cmd(hba, task_tag))
7046 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
7047 __func__, task_tag);
7048 err = -ETIMEDOUT;
7049 } else {
7050 err = 0;
7051 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
7052
7053 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
7054 }
7055
7056 spin_lock_irqsave(hba->host->host_lock, flags);
7057 hba->tmf_rqs[req->tag] = NULL;
7058 __clear_bit(task_tag, &hba->outstanding_tasks);
7059 spin_unlock_irqrestore(hba->host->host_lock, flags);
7060
7061 ufshcd_release(hba);
7062 blk_mq_free_request(req);
7063
7064 return err;
7065 }
7066
7067 /**
7068 * ufshcd_issue_tm_cmd - issues task management commands to controller
7069 * @hba: per adapter instance
7070 * @lun_id: LUN ID to which TM command is sent
7071 * @task_id: task ID to which the TM command is applicable
7072 * @tm_function: task management function opcode
7073 * @tm_response: task management service response return value
7074 *
7075 * Return: non-zero value on error, zero on success.
7076 */
ufshcd_issue_tm_cmd(struct ufs_hba * hba,int lun_id,int task_id,u8 tm_function,u8 * tm_response)7077 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
7078 u8 tm_function, u8 *tm_response)
7079 {
7080 struct utp_task_req_desc treq = { };
7081 enum utp_ocs ocs_value;
7082 int err;
7083
7084 /* Configure task request descriptor */
7085 treq.header.interrupt = 1;
7086 treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7087
7088 /* Configure task request UPIU */
7089 treq.upiu_req.req_header.transaction_code = UPIU_TRANSACTION_TASK_REQ;
7090 treq.upiu_req.req_header.lun = lun_id;
7091 treq.upiu_req.req_header.tm_function = tm_function;
7092
7093 /*
7094 * The host shall provide the same value for LUN field in the basic
7095 * header and for Input Parameter.
7096 */
7097 treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
7098 treq.upiu_req.input_param2 = cpu_to_be32(task_id);
7099
7100 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
7101 if (err == -ETIMEDOUT)
7102 return err;
7103
7104 ocs_value = treq.header.ocs & MASK_OCS;
7105 if (ocs_value != OCS_SUCCESS)
7106 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
7107 __func__, ocs_value);
7108 else if (tm_response)
7109 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
7110 MASK_TM_SERVICE_RESP;
7111 return err;
7112 }
7113
7114 /**
7115 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
7116 * @hba: per-adapter instance
7117 * @req_upiu: upiu request
7118 * @rsp_upiu: upiu reply
7119 * @desc_buff: pointer to descriptor buffer, NULL if NA
7120 * @buff_len: descriptor size, 0 if NA
7121 * @cmd_type: specifies the type (NOP, Query...)
7122 * @desc_op: descriptor operation
7123 *
7124 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
7125 * Therefore, it "rides" the device management infrastructure: uses its tag and
7126 * tasks work queues.
7127 *
7128 * Since there is only one available tag for device management commands,
7129 * the caller is expected to hold the hba->dev_cmd.lock mutex.
7130 *
7131 * Return: 0 upon success; < 0 upon failure.
7132 */
ufshcd_issue_devman_upiu_cmd(struct ufs_hba * hba,struct utp_upiu_req * req_upiu,struct utp_upiu_req * rsp_upiu,u8 * desc_buff,int * buff_len,enum dev_cmd_type cmd_type,enum query_opcode desc_op)7133 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
7134 struct utp_upiu_req *req_upiu,
7135 struct utp_upiu_req *rsp_upiu,
7136 u8 *desc_buff, int *buff_len,
7137 enum dev_cmd_type cmd_type,
7138 enum query_opcode desc_op)
7139 {
7140 DECLARE_COMPLETION_ONSTACK(wait);
7141 const u32 tag = hba->reserved_slot;
7142 struct ufshcd_lrb *lrbp;
7143 int err = 0;
7144 u8 upiu_flags;
7145
7146 /* Protects use of hba->reserved_slot. */
7147 lockdep_assert_held(&hba->dev_cmd.lock);
7148
7149 down_read(&hba->clk_scaling_lock);
7150
7151 lrbp = &hba->lrb[tag];
7152 lrbp->cmd = NULL;
7153 lrbp->task_tag = tag;
7154 lrbp->lun = 0;
7155 lrbp->intr_cmd = true;
7156 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7157 hba->dev_cmd.type = cmd_type;
7158
7159 if (hba->ufs_version <= ufshci_version(1, 1))
7160 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
7161 else
7162 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7163
7164 /* update the task tag in the request upiu */
7165 req_upiu->header.task_tag = tag;
7166
7167 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
7168
7169 /* just copy the upiu request as it is */
7170 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7171 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
7172 /* The Data Segment Area is optional depending upon the query
7173 * function value. for WRITE DESCRIPTOR, the data segment
7174 * follows right after the tsf.
7175 */
7176 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
7177 *buff_len = 0;
7178 }
7179
7180 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7181
7182 hba->dev_cmd.complete = &wait;
7183
7184 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
7185
7186 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7187 /*
7188 * ignore the returning value here - ufshcd_check_query_response is
7189 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7190 * read the response directly ignoring all errors.
7191 */
7192 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
7193
7194 /* just copy the upiu response as it is */
7195 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7196 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
7197 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
7198 u16 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
7199 .data_segment_length);
7200
7201 if (*buff_len >= resp_len) {
7202 memcpy(desc_buff, descp, resp_len);
7203 *buff_len = resp_len;
7204 } else {
7205 dev_warn(hba->dev,
7206 "%s: rsp size %d is bigger than buffer size %d",
7207 __func__, resp_len, *buff_len);
7208 *buff_len = 0;
7209 err = -EINVAL;
7210 }
7211 }
7212 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
7213 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
7214
7215 up_read(&hba->clk_scaling_lock);
7216 return err;
7217 }
7218
7219 /**
7220 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7221 * @hba: per-adapter instance
7222 * @req_upiu: upiu request
7223 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
7224 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
7225 * @desc_buff: pointer to descriptor buffer, NULL if NA
7226 * @buff_len: descriptor size, 0 if NA
7227 * @desc_op: descriptor operation
7228 *
7229 * Supports UTP Transfer requests (nop and query), and UTP Task
7230 * Management requests.
7231 * It is up to the caller to fill the upiu conent properly, as it will
7232 * be copied without any further input validations.
7233 *
7234 * Return: 0 upon success; < 0 upon failure.
7235 */
ufshcd_exec_raw_upiu_cmd(struct ufs_hba * hba,struct utp_upiu_req * req_upiu,struct utp_upiu_req * rsp_upiu,enum upiu_request_transaction msgcode,u8 * desc_buff,int * buff_len,enum query_opcode desc_op)7236 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
7237 struct utp_upiu_req *req_upiu,
7238 struct utp_upiu_req *rsp_upiu,
7239 enum upiu_request_transaction msgcode,
7240 u8 *desc_buff, int *buff_len,
7241 enum query_opcode desc_op)
7242 {
7243 int err;
7244 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
7245 struct utp_task_req_desc treq = { };
7246 enum utp_ocs ocs_value;
7247 u8 tm_f = req_upiu->header.tm_function;
7248
7249 switch (msgcode) {
7250 case UPIU_TRANSACTION_NOP_OUT:
7251 cmd_type = DEV_CMD_TYPE_NOP;
7252 fallthrough;
7253 case UPIU_TRANSACTION_QUERY_REQ:
7254 ufshcd_hold(hba);
7255 mutex_lock(&hba->dev_cmd.lock);
7256 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
7257 desc_buff, buff_len,
7258 cmd_type, desc_op);
7259 mutex_unlock(&hba->dev_cmd.lock);
7260 ufshcd_release(hba);
7261
7262 break;
7263 case UPIU_TRANSACTION_TASK_REQ:
7264 treq.header.interrupt = 1;
7265 treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7266
7267 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
7268
7269 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
7270 if (err == -ETIMEDOUT)
7271 break;
7272
7273 ocs_value = treq.header.ocs & MASK_OCS;
7274 if (ocs_value != OCS_SUCCESS) {
7275 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
7276 ocs_value);
7277 break;
7278 }
7279
7280 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
7281
7282 break;
7283 default:
7284 err = -EINVAL;
7285
7286 break;
7287 }
7288
7289 return err;
7290 }
7291
7292 /**
7293 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7294 * @hba: per adapter instance
7295 * @req_upiu: upiu request
7296 * @rsp_upiu: upiu reply
7297 * @req_ehs: EHS field which contains Advanced RPMB Request Message
7298 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
7299 * @sg_cnt: The number of sg lists actually used
7300 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7301 * @dir: DMA direction
7302 *
7303 * Return: zero on success, non-zero on failure.
7304 */
ufshcd_advanced_rpmb_req_handler(struct ufs_hba * hba,struct utp_upiu_req * req_upiu,struct utp_upiu_req * rsp_upiu,struct ufs_ehs * req_ehs,struct ufs_ehs * rsp_ehs,int sg_cnt,struct scatterlist * sg_list,enum dma_data_direction dir)7305 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
7306 struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
7307 struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
7308 enum dma_data_direction dir)
7309 {
7310 DECLARE_COMPLETION_ONSTACK(wait);
7311 const u32 tag = hba->reserved_slot;
7312 struct ufshcd_lrb *lrbp;
7313 int err = 0;
7314 int result;
7315 u8 upiu_flags;
7316 u8 *ehs_data;
7317 u16 ehs_len;
7318
7319 /* Protects use of hba->reserved_slot. */
7320 ufshcd_hold(hba);
7321 mutex_lock(&hba->dev_cmd.lock);
7322 down_read(&hba->clk_scaling_lock);
7323
7324 lrbp = &hba->lrb[tag];
7325 lrbp->cmd = NULL;
7326 lrbp->task_tag = tag;
7327 lrbp->lun = UFS_UPIU_RPMB_WLUN;
7328
7329 lrbp->intr_cmd = true;
7330 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7331 hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
7332
7333 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7334 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7335
7336 /*
7337 * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
7338 * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
7339 * HW controller takes EHS length from UTRD.
7340 */
7341 if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
7342 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
7343 else
7344 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
7345
7346 /* update the task tag */
7347 req_upiu->header.task_tag = tag;
7348
7349 /* copy the UPIU(contains CDB) request as it is */
7350 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7351 /* Copy EHS, starting with byte32, immediately after the CDB package */
7352 memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
7353
7354 if (dir != DMA_NONE && sg_list)
7355 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
7356
7357 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7358
7359 hba->dev_cmd.complete = &wait;
7360
7361 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7362
7363 err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
7364
7365 if (!err) {
7366 /* Just copy the upiu response as it is */
7367 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7368 /* Get the response UPIU result */
7369 result = (lrbp->ucd_rsp_ptr->header.response << 8) |
7370 lrbp->ucd_rsp_ptr->header.status;
7371
7372 ehs_len = lrbp->ucd_rsp_ptr->header.ehs_length;
7373 /*
7374 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7375 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7376 * Message is 02h
7377 */
7378 if (ehs_len == 2 && rsp_ehs) {
7379 /*
7380 * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7381 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7382 */
7383 ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
7384 memcpy(rsp_ehs, ehs_data, ehs_len * 32);
7385 }
7386 }
7387
7388 up_read(&hba->clk_scaling_lock);
7389 mutex_unlock(&hba->dev_cmd.lock);
7390 ufshcd_release(hba);
7391 return err ? : result;
7392 }
7393
7394 /**
7395 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7396 * @cmd: SCSI command pointer
7397 *
7398 * Return: SUCCESS or FAILED.
7399 */
ufshcd_eh_device_reset_handler(struct scsi_cmnd * cmd)7400 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7401 {
7402 unsigned long flags, pending_reqs = 0, not_cleared = 0;
7403 struct Scsi_Host *host;
7404 struct ufs_hba *hba;
7405 struct ufs_hw_queue *hwq;
7406 struct ufshcd_lrb *lrbp;
7407 u32 pos, not_cleared_mask = 0;
7408 int err;
7409 u8 resp = 0xF, lun;
7410
7411 host = cmd->device->host;
7412 hba = shost_priv(host);
7413
7414 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
7415 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
7416 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7417 if (!err)
7418 err = resp;
7419 goto out;
7420 }
7421
7422 if (is_mcq_enabled(hba)) {
7423 for (pos = 0; pos < hba->nutrs; pos++) {
7424 lrbp = &hba->lrb[pos];
7425 if (ufshcd_cmd_inflight(lrbp->cmd) &&
7426 lrbp->lun == lun) {
7427 ufshcd_clear_cmd(hba, pos);
7428 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
7429 ufshcd_mcq_poll_cqe_lock(hba, hwq);
7430 }
7431 }
7432 err = 0;
7433 goto out;
7434 }
7435
7436 /* clear the commands that were pending for corresponding LUN */
7437 spin_lock_irqsave(&hba->outstanding_lock, flags);
7438 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
7439 if (hba->lrb[pos].lun == lun)
7440 __set_bit(pos, &pending_reqs);
7441 hba->outstanding_reqs &= ~pending_reqs;
7442 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7443
7444 for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
7445 if (ufshcd_clear_cmd(hba, pos) < 0) {
7446 spin_lock_irqsave(&hba->outstanding_lock, flags);
7447 not_cleared = 1U << pos &
7448 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7449 hba->outstanding_reqs |= not_cleared;
7450 not_cleared_mask |= not_cleared;
7451 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7452
7453 dev_err(hba->dev, "%s: failed to clear request %d\n",
7454 __func__, pos);
7455 }
7456 }
7457 __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
7458
7459 out:
7460 hba->req_abort_count = 0;
7461 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
7462 if (!err) {
7463 err = SUCCESS;
7464 } else {
7465 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7466 err = FAILED;
7467 }
7468 return err;
7469 }
7470
ufshcd_set_req_abort_skip(struct ufs_hba * hba,unsigned long bitmap)7471 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
7472 {
7473 struct ufshcd_lrb *lrbp;
7474 int tag;
7475
7476 for_each_set_bit(tag, &bitmap, hba->nutrs) {
7477 lrbp = &hba->lrb[tag];
7478 lrbp->req_abort_skip = true;
7479 }
7480 }
7481
7482 /**
7483 * ufshcd_try_to_abort_task - abort a specific task
7484 * @hba: Pointer to adapter instance
7485 * @tag: Task tag/index to be aborted
7486 *
7487 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7488 * command, and in host controller by clearing the door-bell register. There can
7489 * be race between controller sending the command to the device while abort is
7490 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7491 * really issued and then try to abort it.
7492 *
7493 * Return: zero on success, non-zero on failure.
7494 */
ufshcd_try_to_abort_task(struct ufs_hba * hba,int tag)7495 int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
7496 {
7497 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7498 int err = 0;
7499 int poll_cnt;
7500 u8 resp = 0xF;
7501 u32 reg;
7502
7503 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
7504 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7505 UFS_QUERY_TASK, &resp);
7506 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
7507 /* cmd pending in the device */
7508 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
7509 __func__, tag);
7510 break;
7511 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7512 /*
7513 * cmd not pending in the device, check if it is
7514 * in transition.
7515 */
7516 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
7517 __func__, tag);
7518 if (is_mcq_enabled(hba)) {
7519 /* MCQ mode */
7520 if (ufshcd_cmd_inflight(lrbp->cmd)) {
7521 /* sleep for max. 200us same delay as in SDB mode */
7522 usleep_range(100, 200);
7523 continue;
7524 }
7525 /* command completed already */
7526 dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
7527 __func__, tag);
7528 goto out;
7529 }
7530
7531 /* Single Doorbell Mode */
7532 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7533 if (reg & (1 << tag)) {
7534 /* sleep for max. 200us to stabilize */
7535 usleep_range(100, 200);
7536 continue;
7537 }
7538 /* command completed already */
7539 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
7540 __func__, tag);
7541 goto out;
7542 } else {
7543 dev_err(hba->dev,
7544 "%s: no response from device. tag = %d, err %d\n",
7545 __func__, tag, err);
7546 if (!err)
7547 err = resp; /* service response error */
7548 goto out;
7549 }
7550 }
7551
7552 if (!poll_cnt) {
7553 err = -EBUSY;
7554 goto out;
7555 }
7556
7557 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7558 UFS_ABORT_TASK, &resp);
7559 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7560 if (!err) {
7561 err = resp; /* service response error */
7562 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
7563 __func__, tag, err);
7564 }
7565 goto out;
7566 }
7567
7568 err = ufshcd_clear_cmd(hba, tag);
7569 if (err)
7570 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
7571 __func__, tag, err);
7572
7573 out:
7574 return err;
7575 }
7576
7577 /**
7578 * ufshcd_abort - scsi host template eh_abort_handler callback
7579 * @cmd: SCSI command pointer
7580 *
7581 * Return: SUCCESS or FAILED.
7582 */
ufshcd_abort(struct scsi_cmnd * cmd)7583 static int ufshcd_abort(struct scsi_cmnd *cmd)
7584 {
7585 struct Scsi_Host *host = cmd->device->host;
7586 struct ufs_hba *hba = shost_priv(host);
7587 int tag = scsi_cmd_to_rq(cmd)->tag;
7588 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7589 unsigned long flags;
7590 int err = FAILED;
7591 bool outstanding;
7592 u32 reg;
7593
7594 WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
7595
7596 ufshcd_hold(hba);
7597
7598 if (!is_mcq_enabled(hba)) {
7599 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7600 if (!test_bit(tag, &hba->outstanding_reqs)) {
7601 /* If command is already aborted/completed, return FAILED. */
7602 dev_err(hba->dev,
7603 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7604 __func__, tag, hba->outstanding_reqs, reg);
7605 goto release;
7606 }
7607 }
7608
7609 /* Print Transfer Request of aborted task */
7610 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
7611
7612 /*
7613 * Print detailed info about aborted request.
7614 * As more than one request might get aborted at the same time,
7615 * print full information only for the first aborted request in order
7616 * to reduce repeated printouts. For other aborted requests only print
7617 * basic details.
7618 */
7619 scsi_print_command(cmd);
7620 if (!hba->req_abort_count) {
7621 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7622 ufshcd_print_evt_hist(hba);
7623 ufshcd_print_host_state(hba);
7624 ufshcd_print_pwr_info(hba);
7625 ufshcd_print_tr(hba, tag, true);
7626 } else {
7627 ufshcd_print_tr(hba, tag, false);
7628 }
7629 hba->req_abort_count++;
7630
7631 if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
7632 /* only execute this code in single doorbell mode */
7633 dev_err(hba->dev,
7634 "%s: cmd was completed, but without a notifying intr, tag = %d",
7635 __func__, tag);
7636 __ufshcd_transfer_req_compl(hba, 1UL << tag);
7637 goto release;
7638 }
7639
7640 /*
7641 * Task abort to the device W-LUN is illegal. When this command
7642 * will fail, due to spec violation, scsi err handling next step
7643 * will be to send LU reset which, again, is a spec violation.
7644 * To avoid these unnecessary/illegal steps, first we clean up
7645 * the lrb taken by this cmd and re-set it in outstanding_reqs,
7646 * then queue the eh_work and bail.
7647 */
7648 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7649 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
7650
7651 spin_lock_irqsave(host->host_lock, flags);
7652 hba->force_reset = true;
7653 ufshcd_schedule_eh_work(hba);
7654 spin_unlock_irqrestore(host->host_lock, flags);
7655 goto release;
7656 }
7657
7658 if (is_mcq_enabled(hba)) {
7659 /* MCQ mode. Branch off to handle abort for mcq mode */
7660 err = ufshcd_mcq_abort(cmd);
7661 goto release;
7662 }
7663
7664 /* Skip task abort in case previous aborts failed and report failure */
7665 if (lrbp->req_abort_skip) {
7666 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7667 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7668 goto release;
7669 }
7670
7671 err = ufshcd_try_to_abort_task(hba, tag);
7672 if (err) {
7673 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7674 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7675 err = FAILED;
7676 goto release;
7677 }
7678
7679 /*
7680 * Clear the corresponding bit from outstanding_reqs since the command
7681 * has been aborted successfully.
7682 */
7683 spin_lock_irqsave(&hba->outstanding_lock, flags);
7684 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7685 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7686
7687 if (outstanding)
7688 ufshcd_release_scsi_cmd(hba, lrbp);
7689
7690 err = SUCCESS;
7691
7692 release:
7693 /* Matches the ufshcd_hold() call at the start of this function. */
7694 ufshcd_release(hba);
7695 return err;
7696 }
7697
7698 /**
7699 * ufshcd_host_reset_and_restore - reset and restore host controller
7700 * @hba: per-adapter instance
7701 *
7702 * Note that host controller reset may issue DME_RESET to
7703 * local and remote (device) Uni-Pro stack and the attributes
7704 * are reset to default state.
7705 *
7706 * Return: zero on success, non-zero on failure.
7707 */
ufshcd_host_reset_and_restore(struct ufs_hba * hba)7708 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7709 {
7710 int err;
7711
7712 /*
7713 * Stop the host controller and complete the requests
7714 * cleared by h/w
7715 */
7716 ufshcd_hba_stop(hba);
7717 hba->silence_err_logs = true;
7718 ufshcd_complete_requests(hba, true);
7719 hba->silence_err_logs = false;
7720
7721 /* scale up clocks to max frequency before full reinitialization */
7722 ufshcd_scale_clks(hba, true);
7723
7724 err = ufshcd_hba_enable(hba);
7725
7726 /* Establish the link again and restore the device */
7727 if (!err)
7728 err = ufshcd_probe_hba(hba, false);
7729
7730 if (err)
7731 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7732 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
7733 return err;
7734 }
7735
7736 /**
7737 * ufshcd_reset_and_restore - reset and re-initialize host/device
7738 * @hba: per-adapter instance
7739 *
7740 * Reset and recover device, host and re-establish link. This
7741 * is helpful to recover the communication in fatal error conditions.
7742 *
7743 * Return: zero on success, non-zero on failure.
7744 */
ufshcd_reset_and_restore(struct ufs_hba * hba)7745 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7746 {
7747 u32 saved_err = 0;
7748 u32 saved_uic_err = 0;
7749 int err = 0;
7750 unsigned long flags;
7751 int retries = MAX_HOST_RESET_RETRIES;
7752
7753 spin_lock_irqsave(hba->host->host_lock, flags);
7754 do {
7755 /*
7756 * This is a fresh start, cache and clear saved error first,
7757 * in case new error generated during reset and restore.
7758 */
7759 saved_err |= hba->saved_err;
7760 saved_uic_err |= hba->saved_uic_err;
7761 hba->saved_err = 0;
7762 hba->saved_uic_err = 0;
7763 hba->force_reset = false;
7764 hba->ufshcd_state = UFSHCD_STATE_RESET;
7765 spin_unlock_irqrestore(hba->host->host_lock, flags);
7766
7767 /* Reset the attached device */
7768 ufshcd_device_reset(hba);
7769
7770 err = ufshcd_host_reset_and_restore(hba);
7771
7772 spin_lock_irqsave(hba->host->host_lock, flags);
7773 if (err)
7774 continue;
7775 /* Do not exit unless operational or dead */
7776 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7777 hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7778 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7779 err = -EAGAIN;
7780 } while (err && --retries);
7781
7782 /*
7783 * Inform scsi mid-layer that we did reset and allow to handle
7784 * Unit Attention properly.
7785 */
7786 scsi_report_bus_reset(hba->host, 0);
7787 if (err) {
7788 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7789 hba->saved_err |= saved_err;
7790 hba->saved_uic_err |= saved_uic_err;
7791 }
7792 spin_unlock_irqrestore(hba->host->host_lock, flags);
7793
7794 return err;
7795 }
7796
7797 /**
7798 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7799 * @cmd: SCSI command pointer
7800 *
7801 * Return: SUCCESS or FAILED.
7802 */
ufshcd_eh_host_reset_handler(struct scsi_cmnd * cmd)7803 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7804 {
7805 int err = SUCCESS;
7806 unsigned long flags;
7807 struct ufs_hba *hba;
7808
7809 hba = shost_priv(cmd->device->host);
7810
7811 spin_lock_irqsave(hba->host->host_lock, flags);
7812 hba->force_reset = true;
7813 ufshcd_schedule_eh_work(hba);
7814 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7815 spin_unlock_irqrestore(hba->host->host_lock, flags);
7816
7817 flush_work(&hba->eh_work);
7818
7819 spin_lock_irqsave(hba->host->host_lock, flags);
7820 if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7821 err = FAILED;
7822 spin_unlock_irqrestore(hba->host->host_lock, flags);
7823
7824 return err;
7825 }
7826
7827 /**
7828 * ufshcd_get_max_icc_level - calculate the ICC level
7829 * @sup_curr_uA: max. current supported by the regulator
7830 * @start_scan: row at the desc table to start scan from
7831 * @buff: power descriptor buffer
7832 *
7833 * Return: calculated max ICC level for specific regulator.
7834 */
ufshcd_get_max_icc_level(int sup_curr_uA,u32 start_scan,const char * buff)7835 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
7836 const char *buff)
7837 {
7838 int i;
7839 int curr_uA;
7840 u16 data;
7841 u16 unit;
7842
7843 for (i = start_scan; i >= 0; i--) {
7844 data = get_unaligned_be16(&buff[2 * i]);
7845 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7846 ATTR_ICC_LVL_UNIT_OFFSET;
7847 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7848 switch (unit) {
7849 case UFSHCD_NANO_AMP:
7850 curr_uA = curr_uA / 1000;
7851 break;
7852 case UFSHCD_MILI_AMP:
7853 curr_uA = curr_uA * 1000;
7854 break;
7855 case UFSHCD_AMP:
7856 curr_uA = curr_uA * 1000 * 1000;
7857 break;
7858 case UFSHCD_MICRO_AMP:
7859 default:
7860 break;
7861 }
7862 if (sup_curr_uA >= curr_uA)
7863 break;
7864 }
7865 if (i < 0) {
7866 i = 0;
7867 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7868 }
7869
7870 return (u32)i;
7871 }
7872
7873 /**
7874 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7875 * In case regulators are not initialized we'll return 0
7876 * @hba: per-adapter instance
7877 * @desc_buf: power descriptor buffer to extract ICC levels from.
7878 *
7879 * Return: calculated ICC level.
7880 */
ufshcd_find_max_sup_active_icc_level(struct ufs_hba * hba,const u8 * desc_buf)7881 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7882 const u8 *desc_buf)
7883 {
7884 u32 icc_level = 0;
7885
7886 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7887 !hba->vreg_info.vccq2) {
7888 /*
7889 * Using dev_dbg to avoid messages during runtime PM to avoid
7890 * never-ending cycles of messages written back to storage by
7891 * user space causing runtime resume, causing more messages and
7892 * so on.
7893 */
7894 dev_dbg(hba->dev,
7895 "%s: Regulator capability was not set, actvIccLevel=%d",
7896 __func__, icc_level);
7897 goto out;
7898 }
7899
7900 if (hba->vreg_info.vcc->max_uA)
7901 icc_level = ufshcd_get_max_icc_level(
7902 hba->vreg_info.vcc->max_uA,
7903 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7904 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7905
7906 if (hba->vreg_info.vccq->max_uA)
7907 icc_level = ufshcd_get_max_icc_level(
7908 hba->vreg_info.vccq->max_uA,
7909 icc_level,
7910 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7911
7912 if (hba->vreg_info.vccq2->max_uA)
7913 icc_level = ufshcd_get_max_icc_level(
7914 hba->vreg_info.vccq2->max_uA,
7915 icc_level,
7916 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7917 out:
7918 return icc_level;
7919 }
7920
ufshcd_set_active_icc_lvl(struct ufs_hba * hba)7921 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7922 {
7923 int ret;
7924 u8 *desc_buf;
7925 u32 icc_level;
7926
7927 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7928 if (!desc_buf)
7929 return;
7930
7931 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7932 desc_buf, QUERY_DESC_MAX_SIZE);
7933 if (ret) {
7934 dev_err(hba->dev,
7935 "%s: Failed reading power descriptor ret = %d",
7936 __func__, ret);
7937 goto out;
7938 }
7939
7940 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
7941 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7942
7943 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7944 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7945
7946 if (ret)
7947 dev_err(hba->dev,
7948 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7949 __func__, icc_level, ret);
7950
7951 out:
7952 kfree(desc_buf);
7953 }
7954
ufshcd_blk_pm_runtime_init(struct scsi_device * sdev)7955 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7956 {
7957 scsi_autopm_get_device(sdev);
7958 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7959 if (sdev->rpm_autosuspend)
7960 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7961 RPM_AUTOSUSPEND_DELAY_MS);
7962 scsi_autopm_put_device(sdev);
7963 }
7964
7965 /**
7966 * ufshcd_scsi_add_wlus - Adds required W-LUs
7967 * @hba: per-adapter instance
7968 *
7969 * UFS device specification requires the UFS devices to support 4 well known
7970 * logical units:
7971 * "REPORT_LUNS" (address: 01h)
7972 * "UFS Device" (address: 50h)
7973 * "RPMB" (address: 44h)
7974 * "BOOT" (address: 30h)
7975 * UFS device's power management needs to be controlled by "POWER CONDITION"
7976 * field of SSU (START STOP UNIT) command. But this "power condition" field
7977 * will take effect only when its sent to "UFS device" well known logical unit
7978 * hence we require the scsi_device instance to represent this logical unit in
7979 * order for the UFS host driver to send the SSU command for power management.
7980 *
7981 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7982 * Block) LU so user space process can control this LU. User space may also
7983 * want to have access to BOOT LU.
7984 *
7985 * This function adds scsi device instances for each of all well known LUs
7986 * (except "REPORT LUNS" LU).
7987 *
7988 * Return: zero on success (all required W-LUs are added successfully),
7989 * non-zero error value on failure (if failed to add any of the required W-LU).
7990 */
ufshcd_scsi_add_wlus(struct ufs_hba * hba)7991 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7992 {
7993 int ret = 0;
7994 struct scsi_device *sdev_boot, *sdev_rpmb;
7995
7996 hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
7997 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7998 if (IS_ERR(hba->ufs_device_wlun)) {
7999 ret = PTR_ERR(hba->ufs_device_wlun);
8000 hba->ufs_device_wlun = NULL;
8001 goto out;
8002 }
8003 scsi_device_put(hba->ufs_device_wlun);
8004
8005 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
8006 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
8007 if (IS_ERR(sdev_rpmb)) {
8008 ret = PTR_ERR(sdev_rpmb);
8009 goto remove_ufs_device_wlun;
8010 }
8011 ufshcd_blk_pm_runtime_init(sdev_rpmb);
8012 scsi_device_put(sdev_rpmb);
8013
8014 sdev_boot = __scsi_add_device(hba->host, 0, 0,
8015 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
8016 if (IS_ERR(sdev_boot)) {
8017 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
8018 } else {
8019 ufshcd_blk_pm_runtime_init(sdev_boot);
8020 scsi_device_put(sdev_boot);
8021 }
8022 goto out;
8023
8024 remove_ufs_device_wlun:
8025 scsi_remove_device(hba->ufs_device_wlun);
8026 out:
8027 return ret;
8028 }
8029
ufshcd_wb_probe(struct ufs_hba * hba,const u8 * desc_buf)8030 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
8031 {
8032 struct ufs_dev_info *dev_info = &hba->dev_info;
8033 u8 lun;
8034 u32 d_lu_wb_buf_alloc;
8035 u32 ext_ufs_feature;
8036
8037 if (!ufshcd_is_wb_allowed(hba))
8038 return;
8039
8040 /*
8041 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
8042 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
8043 * enabled
8044 */
8045 if (!(dev_info->wspecversion >= 0x310 ||
8046 dev_info->wspecversion == 0x220 ||
8047 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
8048 goto wb_disabled;
8049
8050 ext_ufs_feature = get_unaligned_be32(desc_buf +
8051 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8052
8053 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
8054 goto wb_disabled;
8055
8056 /*
8057 * WB may be supported but not configured while provisioning. The spec
8058 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
8059 * buffer configured.
8060 */
8061 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
8062
8063 dev_info->b_presrv_uspc_en =
8064 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
8065
8066 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
8067 if (!get_unaligned_be32(desc_buf +
8068 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
8069 goto wb_disabled;
8070 } else {
8071 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
8072 d_lu_wb_buf_alloc = 0;
8073 ufshcd_read_unit_desc_param(hba,
8074 lun,
8075 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
8076 (u8 *)&d_lu_wb_buf_alloc,
8077 sizeof(d_lu_wb_buf_alloc));
8078 if (d_lu_wb_buf_alloc) {
8079 dev_info->wb_dedicated_lu = lun;
8080 break;
8081 }
8082 }
8083
8084 if (!d_lu_wb_buf_alloc)
8085 goto wb_disabled;
8086 }
8087
8088 if (!ufshcd_is_wb_buf_lifetime_available(hba))
8089 goto wb_disabled;
8090
8091 return;
8092
8093 wb_disabled:
8094 hba->caps &= ~UFSHCD_CAP_WB_EN;
8095 }
8096
ufshcd_temp_notif_probe(struct ufs_hba * hba,const u8 * desc_buf)8097 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
8098 {
8099 struct ufs_dev_info *dev_info = &hba->dev_info;
8100 u32 ext_ufs_feature;
8101 u8 mask = 0;
8102
8103 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
8104 return;
8105
8106 ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8107
8108 if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
8109 mask |= MASK_EE_TOO_LOW_TEMP;
8110
8111 if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
8112 mask |= MASK_EE_TOO_HIGH_TEMP;
8113
8114 if (mask) {
8115 ufshcd_enable_ee(hba, mask);
8116 ufs_hwmon_probe(hba, mask);
8117 }
8118 }
8119
ufshcd_ext_iid_probe(struct ufs_hba * hba,u8 * desc_buf)8120 static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
8121 {
8122 struct ufs_dev_info *dev_info = &hba->dev_info;
8123 u32 ext_ufs_feature;
8124 u32 ext_iid_en = 0;
8125 int err;
8126
8127 /* Only UFS-4.0 and above may support EXT_IID */
8128 if (dev_info->wspecversion < 0x400)
8129 goto out;
8130
8131 ext_ufs_feature = get_unaligned_be32(desc_buf +
8132 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8133 if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
8134 goto out;
8135
8136 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8137 QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
8138 if (err)
8139 dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
8140
8141 out:
8142 dev_info->b_ext_iid_en = ext_iid_en;
8143 }
8144
ufshcd_fixup_dev_quirks(struct ufs_hba * hba,const struct ufs_dev_quirk * fixups)8145 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
8146 const struct ufs_dev_quirk *fixups)
8147 {
8148 const struct ufs_dev_quirk *f;
8149 struct ufs_dev_info *dev_info = &hba->dev_info;
8150
8151 if (!fixups)
8152 return;
8153
8154 for (f = fixups; f->quirk; f++) {
8155 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
8156 f->wmanufacturerid == UFS_ANY_VENDOR) &&
8157 ((dev_info->model &&
8158 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
8159 !strcmp(f->model, UFS_ANY_MODEL)))
8160 hba->dev_quirks |= f->quirk;
8161 }
8162 }
8163 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
8164
ufs_fixup_device_setup(struct ufs_hba * hba)8165 static void ufs_fixup_device_setup(struct ufs_hba *hba)
8166 {
8167 /* fix by general quirk table */
8168 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
8169
8170 /* allow vendors to fix quirks */
8171 ufshcd_vops_fixup_dev_quirks(hba);
8172 }
8173
ufshcd_update_rtc(struct ufs_hba * hba)8174 static void ufshcd_update_rtc(struct ufs_hba *hba)
8175 {
8176 struct timespec64 ts64;
8177 int err;
8178 u32 val;
8179
8180 ktime_get_real_ts64(&ts64);
8181
8182 if (ts64.tv_sec < hba->dev_info.rtc_time_baseline) {
8183 dev_warn_once(hba->dev, "%s: Current time precedes previous setting!\n", __func__);
8184 return;
8185 }
8186
8187 /*
8188 * The Absolute RTC mode has a 136-year limit, spanning from 2010 to 2146. If a time beyond
8189 * 2146 is required, it is recommended to choose the relative RTC mode.
8190 */
8191 val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
8192
8193 /* Skip update RTC if RPM state is not RPM_ACTIVE */
8194 if (ufshcd_rpm_get_if_active(hba) <= 0)
8195 return;
8196
8197 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
8198 0, 0, &val);
8199 ufshcd_rpm_put(hba);
8200
8201 if (err)
8202 dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
8203 else if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
8204 hba->dev_info.rtc_time_baseline = ts64.tv_sec;
8205 }
8206
ufshcd_rtc_work(struct work_struct * work)8207 static void ufshcd_rtc_work(struct work_struct *work)
8208 {
8209 struct ufs_hba *hba;
8210
8211 hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
8212
8213 /* Update RTC only when there are no requests in progress and UFSHCI is operational */
8214 if (!ufshcd_is_ufs_dev_busy(hba) &&
8215 hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
8216 !hba->clk_gating.active_reqs)
8217 ufshcd_update_rtc(hba);
8218
8219 if (ufshcd_is_ufs_dev_active(hba))
8220 schedule_delayed_work(&hba->ufs_rtc_update_work,
8221 msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
8222 }
8223
ufs_init_rtc(struct ufs_hba * hba,u8 * desc_buf)8224 static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf)
8225 {
8226 u16 periodic_rtc_update = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_FRQ_RTC]);
8227 struct ufs_dev_info *dev_info = &hba->dev_info;
8228
8229 if (periodic_rtc_update & UFS_RTC_TIME_BASELINE) {
8230 dev_info->rtc_type = UFS_RTC_ABSOLUTE;
8231
8232 /*
8233 * The concept of measuring time in Linux as the number of seconds elapsed since
8234 * 00:00:00 UTC on January 1, 1970, and UFS ABS RTC is elapsed from January 1st
8235 * 2010 00:00, here we need to adjust ABS baseline.
8236 */
8237 dev_info->rtc_time_baseline = mktime64(2010, 1, 1, 0, 0, 0) -
8238 mktime64(1970, 1, 1, 0, 0, 0);
8239 } else {
8240 dev_info->rtc_type = UFS_RTC_RELATIVE;
8241 dev_info->rtc_time_baseline = 0;
8242 }
8243 }
8244
ufs_get_device_desc(struct ufs_hba * hba)8245 static int ufs_get_device_desc(struct ufs_hba *hba)
8246 {
8247 int err;
8248 u8 model_index;
8249 u8 *desc_buf;
8250 struct ufs_dev_info *dev_info = &hba->dev_info;
8251
8252 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8253 if (!desc_buf) {
8254 err = -ENOMEM;
8255 goto out;
8256 }
8257
8258 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
8259 QUERY_DESC_MAX_SIZE);
8260 if (err) {
8261 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
8262 __func__, err);
8263 goto out;
8264 }
8265
8266 /*
8267 * getting vendor (manufacturerID) and Bank Index in big endian
8268 * format
8269 */
8270 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
8271 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
8272
8273 /* getting Specification Version in big endian format */
8274 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
8275 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
8276 dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
8277
8278 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
8279
8280 err = ufshcd_read_string_desc(hba, model_index,
8281 &dev_info->model, SD_ASCII_STD);
8282 if (err < 0) {
8283 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
8284 __func__, err);
8285 goto out;
8286 }
8287
8288 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
8289 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
8290
8291 ufs_fixup_device_setup(hba);
8292
8293 ufshcd_wb_probe(hba, desc_buf);
8294
8295 ufshcd_temp_notif_probe(hba, desc_buf);
8296
8297 ufs_init_rtc(hba, desc_buf);
8298
8299 if (hba->ext_iid_sup)
8300 ufshcd_ext_iid_probe(hba, desc_buf);
8301
8302 /*
8303 * ufshcd_read_string_desc returns size of the string
8304 * reset the error value
8305 */
8306 err = 0;
8307
8308 out:
8309 kfree(desc_buf);
8310 return err;
8311 }
8312
ufs_put_device_desc(struct ufs_hba * hba)8313 static void ufs_put_device_desc(struct ufs_hba *hba)
8314 {
8315 struct ufs_dev_info *dev_info = &hba->dev_info;
8316
8317 kfree(dev_info->model);
8318 dev_info->model = NULL;
8319 }
8320
8321 /**
8322 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8323 * @hba: per-adapter instance
8324 *
8325 * PA_TActivate parameter can be tuned manually if UniPro version is less than
8326 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8327 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8328 * the hibern8 exit latency.
8329 *
8330 * Return: zero on success, non-zero error value on failure.
8331 */
ufshcd_tune_pa_tactivate(struct ufs_hba * hba)8332 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
8333 {
8334 int ret = 0;
8335 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
8336
8337 ret = ufshcd_dme_peer_get(hba,
8338 UIC_ARG_MIB_SEL(
8339 RX_MIN_ACTIVATETIME_CAPABILITY,
8340 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8341 &peer_rx_min_activatetime);
8342 if (ret)
8343 goto out;
8344
8345 /* make sure proper unit conversion is applied */
8346 tuned_pa_tactivate =
8347 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
8348 / PA_TACTIVATE_TIME_UNIT_US);
8349 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8350 tuned_pa_tactivate);
8351
8352 out:
8353 return ret;
8354 }
8355
8356 /**
8357 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8358 * @hba: per-adapter instance
8359 *
8360 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8361 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8362 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8363 * This optimal value can help reduce the hibern8 exit latency.
8364 *
8365 * Return: zero on success, non-zero error value on failure.
8366 */
ufshcd_tune_pa_hibern8time(struct ufs_hba * hba)8367 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
8368 {
8369 int ret = 0;
8370 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
8371 u32 max_hibern8_time, tuned_pa_hibern8time;
8372
8373 ret = ufshcd_dme_get(hba,
8374 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
8375 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8376 &local_tx_hibern8_time_cap);
8377 if (ret)
8378 goto out;
8379
8380 ret = ufshcd_dme_peer_get(hba,
8381 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
8382 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8383 &peer_rx_hibern8_time_cap);
8384 if (ret)
8385 goto out;
8386
8387 max_hibern8_time = max(local_tx_hibern8_time_cap,
8388 peer_rx_hibern8_time_cap);
8389 /* make sure proper unit conversion is applied */
8390 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
8391 / PA_HIBERN8_TIME_UNIT_US);
8392 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
8393 tuned_pa_hibern8time);
8394 out:
8395 return ret;
8396 }
8397
8398 /**
8399 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8400 * less than device PA_TACTIVATE time.
8401 * @hba: per-adapter instance
8402 *
8403 * Some UFS devices require host PA_TACTIVATE to be lower than device
8404 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8405 * for such devices.
8406 *
8407 * Return: zero on success, non-zero error value on failure.
8408 */
ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba * hba)8409 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
8410 {
8411 int ret = 0;
8412 u32 granularity, peer_granularity;
8413 u32 pa_tactivate, peer_pa_tactivate;
8414 u32 pa_tactivate_us, peer_pa_tactivate_us;
8415 static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
8416
8417 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8418 &granularity);
8419 if (ret)
8420 goto out;
8421
8422 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8423 &peer_granularity);
8424 if (ret)
8425 goto out;
8426
8427 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
8428 (granularity > PA_GRANULARITY_MAX_VAL)) {
8429 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
8430 __func__, granularity);
8431 return -EINVAL;
8432 }
8433
8434 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
8435 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
8436 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
8437 __func__, peer_granularity);
8438 return -EINVAL;
8439 }
8440
8441 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
8442 if (ret)
8443 goto out;
8444
8445 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
8446 &peer_pa_tactivate);
8447 if (ret)
8448 goto out;
8449
8450 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
8451 peer_pa_tactivate_us = peer_pa_tactivate *
8452 gran_to_us_table[peer_granularity - 1];
8453
8454 if (pa_tactivate_us >= peer_pa_tactivate_us) {
8455 u32 new_peer_pa_tactivate;
8456
8457 new_peer_pa_tactivate = pa_tactivate_us /
8458 gran_to_us_table[peer_granularity - 1];
8459 new_peer_pa_tactivate++;
8460 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8461 new_peer_pa_tactivate);
8462 }
8463
8464 out:
8465 return ret;
8466 }
8467
8468 /**
8469 * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
8470 * @hba: per-adapter instance
8471 *
8472 * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
8473 * to ensure proper hibernation timing. This function retrieves the current
8474 * PA_HIBERN8TIME value and increments it by 100us.
8475 */
ufshcd_quirk_override_pa_h8time(struct ufs_hba * hba)8476 static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
8477 {
8478 u32 pa_h8time;
8479 int ret;
8480
8481 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
8482 if (ret) {
8483 dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
8484 return;
8485 }
8486
8487 /* Increment by 1 to increase hibernation time by 100 µs */
8488 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
8489 if (ret)
8490 dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
8491 }
8492
ufshcd_tune_unipro_params(struct ufs_hba * hba)8493 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
8494 {
8495 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
8496 ufshcd_tune_pa_tactivate(hba);
8497 ufshcd_tune_pa_hibern8time(hba);
8498 }
8499
8500 ufshcd_vops_apply_dev_quirks(hba);
8501
8502 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
8503 /* set 1ms timeout for PA_TACTIVATE */
8504 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
8505
8506 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
8507 ufshcd_quirk_tune_host_pa_tactivate(hba);
8508
8509 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
8510 ufshcd_quirk_override_pa_h8time(hba);
8511 }
8512
ufshcd_clear_dbg_ufs_stats(struct ufs_hba * hba)8513 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
8514 {
8515 hba->ufs_stats.hibern8_exit_cnt = 0;
8516 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
8517 hba->req_abort_count = 0;
8518 }
8519
ufshcd_device_geo_params_init(struct ufs_hba * hba)8520 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
8521 {
8522 int err;
8523 u8 *desc_buf;
8524
8525 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8526 if (!desc_buf) {
8527 err = -ENOMEM;
8528 goto out;
8529 }
8530
8531 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
8532 desc_buf, QUERY_DESC_MAX_SIZE);
8533 if (err) {
8534 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
8535 __func__, err);
8536 goto out;
8537 }
8538
8539 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
8540 hba->dev_info.max_lu_supported = 32;
8541 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
8542 hba->dev_info.max_lu_supported = 8;
8543
8544 out:
8545 kfree(desc_buf);
8546 return err;
8547 }
8548
8549 struct ufs_ref_clk {
8550 unsigned long freq_hz;
8551 enum ufs_ref_clk_freq val;
8552 };
8553
8554 static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
8555 {19200000, REF_CLK_FREQ_19_2_MHZ},
8556 {26000000, REF_CLK_FREQ_26_MHZ},
8557 {38400000, REF_CLK_FREQ_38_4_MHZ},
8558 {52000000, REF_CLK_FREQ_52_MHZ},
8559 {0, REF_CLK_FREQ_INVAL},
8560 };
8561
8562 static enum ufs_ref_clk_freq
ufs_get_bref_clk_from_hz(unsigned long freq)8563 ufs_get_bref_clk_from_hz(unsigned long freq)
8564 {
8565 int i;
8566
8567 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
8568 if (ufs_ref_clk_freqs[i].freq_hz == freq)
8569 return ufs_ref_clk_freqs[i].val;
8570
8571 return REF_CLK_FREQ_INVAL;
8572 }
8573
ufshcd_parse_dev_ref_clk_freq(struct ufs_hba * hba,struct clk * refclk)8574 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
8575 {
8576 unsigned long freq;
8577
8578 freq = clk_get_rate(refclk);
8579
8580 hba->dev_ref_clk_freq =
8581 ufs_get_bref_clk_from_hz(freq);
8582
8583 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
8584 dev_err(hba->dev,
8585 "invalid ref_clk setting = %ld\n", freq);
8586 }
8587
ufshcd_set_dev_ref_clk(struct ufs_hba * hba)8588 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
8589 {
8590 int err;
8591 u32 ref_clk;
8592 u32 freq = hba->dev_ref_clk_freq;
8593
8594 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8595 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
8596
8597 if (err) {
8598 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
8599 err);
8600 goto out;
8601 }
8602
8603 if (ref_clk == freq)
8604 goto out; /* nothing to update */
8605
8606 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8607 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
8608
8609 if (err) {
8610 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
8611 ufs_ref_clk_freqs[freq].freq_hz);
8612 goto out;
8613 }
8614
8615 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
8616 ufs_ref_clk_freqs[freq].freq_hz);
8617
8618 out:
8619 return err;
8620 }
8621
ufshcd_device_params_init(struct ufs_hba * hba)8622 static int ufshcd_device_params_init(struct ufs_hba *hba)
8623 {
8624 bool flag;
8625 int ret;
8626
8627 /* Init UFS geometry descriptor related parameters */
8628 ret = ufshcd_device_geo_params_init(hba);
8629 if (ret)
8630 goto out;
8631
8632 /* Check and apply UFS device quirks */
8633 ret = ufs_get_device_desc(hba);
8634 if (ret) {
8635 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
8636 __func__, ret);
8637 goto out;
8638 }
8639
8640 ufshcd_get_ref_clk_gating_wait(hba);
8641
8642 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
8643 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
8644 hba->dev_info.f_power_on_wp_en = flag;
8645
8646 /* Probe maximum power mode co-supported by both UFS host and device */
8647 if (ufshcd_get_max_pwr_mode(hba))
8648 dev_err(hba->dev,
8649 "%s: Failed getting max supported power mode\n",
8650 __func__);
8651 out:
8652 return ret;
8653 }
8654
ufshcd_set_timestamp_attr(struct ufs_hba * hba)8655 static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
8656 {
8657 int err;
8658 struct ufs_query_req *request = NULL;
8659 struct ufs_query_res *response = NULL;
8660 struct ufs_dev_info *dev_info = &hba->dev_info;
8661 struct utp_upiu_query_v4_0 *upiu_data;
8662
8663 if (dev_info->wspecversion < 0x400)
8664 return;
8665
8666 ufshcd_hold(hba);
8667
8668 mutex_lock(&hba->dev_cmd.lock);
8669
8670 ufshcd_init_query(hba, &request, &response,
8671 UPIU_QUERY_OPCODE_WRITE_ATTR,
8672 QUERY_ATTR_IDN_TIMESTAMP, 0, 0);
8673
8674 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
8675
8676 upiu_data = (struct utp_upiu_query_v4_0 *)&request->upiu_req;
8677
8678 put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
8679
8680 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
8681
8682 if (err)
8683 dev_err(hba->dev, "%s: failed to set timestamp %d\n",
8684 __func__, err);
8685
8686 mutex_unlock(&hba->dev_cmd.lock);
8687 ufshcd_release(hba);
8688 }
8689
8690 /**
8691 * ufshcd_add_lus - probe and add UFS logical units
8692 * @hba: per-adapter instance
8693 *
8694 * Return: 0 upon success; < 0 upon failure.
8695 */
ufshcd_add_lus(struct ufs_hba * hba)8696 static int ufshcd_add_lus(struct ufs_hba *hba)
8697 {
8698 int ret;
8699
8700 /* Add required well known logical units to scsi mid layer */
8701 ret = ufshcd_scsi_add_wlus(hba);
8702 if (ret)
8703 goto out;
8704
8705 /* Initialize devfreq after UFS device is detected */
8706 if (ufshcd_is_clkscaling_supported(hba)) {
8707 memcpy(&hba->clk_scaling.saved_pwr_info,
8708 &hba->pwr_info,
8709 sizeof(struct ufs_pa_layer_attr));
8710 hba->clk_scaling.is_allowed = true;
8711
8712 ret = ufshcd_devfreq_init(hba);
8713 if (ret)
8714 goto out;
8715
8716 hba->clk_scaling.is_enabled = true;
8717 ufshcd_init_clk_scaling_sysfs(hba);
8718 }
8719
8720 /*
8721 * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev
8722 * pointer and hence must only be started after the WLUN pointer has
8723 * been initialized by ufshcd_scsi_add_wlus().
8724 */
8725 schedule_delayed_work(&hba->ufs_rtc_update_work,
8726 msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
8727
8728 ufs_bsg_probe(hba);
8729 scsi_scan_host(hba->host);
8730
8731 out:
8732 return ret;
8733 }
8734
8735 /* SDB - Single Doorbell */
ufshcd_release_sdb_queue(struct ufs_hba * hba,int nutrs)8736 static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
8737 {
8738 size_t ucdl_size, utrdl_size;
8739
8740 ucdl_size = ufshcd_get_ucd_size(hba) * nutrs;
8741 dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
8742 hba->ucdl_dma_addr);
8743
8744 utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
8745 dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
8746 hba->utrdl_dma_addr);
8747
8748 devm_kfree(hba->dev, hba->lrb);
8749 }
8750
ufshcd_alloc_mcq(struct ufs_hba * hba)8751 static int ufshcd_alloc_mcq(struct ufs_hba *hba)
8752 {
8753 int ret;
8754 int old_nutrs = hba->nutrs;
8755
8756 ret = ufshcd_mcq_decide_queue_depth(hba);
8757 if (ret < 0)
8758 return ret;
8759
8760 hba->nutrs = ret;
8761 ret = ufshcd_mcq_init(hba);
8762 if (ret)
8763 goto err;
8764
8765 /*
8766 * Previously allocated memory for nutrs may not be enough in MCQ mode.
8767 * Number of supported tags in MCQ mode may be larger than SDB mode.
8768 */
8769 if (hba->nutrs != old_nutrs) {
8770 ufshcd_release_sdb_queue(hba, old_nutrs);
8771 ret = ufshcd_memory_alloc(hba);
8772 if (ret)
8773 goto err;
8774 ufshcd_host_memory_configure(hba);
8775 }
8776
8777 ret = ufshcd_mcq_memory_alloc(hba);
8778 if (ret)
8779 goto err;
8780
8781 return 0;
8782 err:
8783 hba->nutrs = old_nutrs;
8784 return ret;
8785 }
8786
ufshcd_config_mcq(struct ufs_hba * hba)8787 static void ufshcd_config_mcq(struct ufs_hba *hba)
8788 {
8789 int ret;
8790 u32 intrs;
8791
8792 ret = ufshcd_mcq_vops_config_esi(hba);
8793 dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
8794
8795 intrs = UFSHCD_ENABLE_MCQ_INTRS;
8796 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
8797 intrs &= ~MCQ_CQ_EVENT_STATUS;
8798 ufshcd_enable_intr(hba, intrs);
8799 ufshcd_mcq_make_queues_operational(hba);
8800 ufshcd_mcq_config_mac(hba, hba->nutrs);
8801
8802 hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
8803 hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
8804
8805 /* Select MCQ mode */
8806 ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
8807 REG_UFS_MEM_CFG);
8808 hba->mcq_enabled = true;
8809
8810 dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8811 hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
8812 hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
8813 hba->nutrs);
8814 }
8815
ufshcd_device_init(struct ufs_hba * hba,bool init_dev_params)8816 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
8817 {
8818 int ret;
8819 struct Scsi_Host *host = hba->host;
8820
8821 hba->ufshcd_state = UFSHCD_STATE_RESET;
8822
8823 ret = ufshcd_link_startup(hba);
8824 if (ret)
8825 return ret;
8826
8827 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
8828 return ret;
8829
8830 /* Debug counters initialization */
8831 ufshcd_clear_dbg_ufs_stats(hba);
8832
8833 /* UniPro link is active now */
8834 ufshcd_set_link_active(hba);
8835
8836 /* Reconfigure MCQ upon reset */
8837 if (is_mcq_enabled(hba) && !init_dev_params)
8838 ufshcd_config_mcq(hba);
8839
8840 /* Verify device initialization by sending NOP OUT UPIU */
8841 ret = ufshcd_verify_dev_init(hba);
8842 if (ret)
8843 return ret;
8844
8845 /* Initiate UFS initialization, and waiting until completion */
8846 ret = ufshcd_complete_dev_init(hba);
8847 if (ret)
8848 return ret;
8849
8850 /*
8851 * Initialize UFS device parameters used by driver, these
8852 * parameters are associated with UFS descriptors.
8853 */
8854 if (init_dev_params) {
8855 ret = ufshcd_device_params_init(hba);
8856 if (ret)
8857 return ret;
8858 if (is_mcq_supported(hba) && !hba->scsi_host_added) {
8859 ret = ufshcd_alloc_mcq(hba);
8860 if (!ret) {
8861 ufshcd_config_mcq(hba);
8862 } else {
8863 /* Continue with SDB mode */
8864 use_mcq_mode = false;
8865 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
8866 ret);
8867 }
8868 ret = scsi_add_host(host, hba->dev);
8869 if (ret) {
8870 dev_err(hba->dev, "scsi_add_host failed\n");
8871 return ret;
8872 }
8873 hba->scsi_host_added = true;
8874 } else if (is_mcq_supported(hba)) {
8875 /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
8876 ufshcd_config_mcq(hba);
8877 }
8878 }
8879
8880 ufshcd_tune_unipro_params(hba);
8881
8882 /* UFS device is also active now */
8883 ufshcd_set_ufs_dev_active(hba);
8884 ufshcd_force_reset_auto_bkops(hba);
8885
8886 ufshcd_set_timestamp_attr(hba);
8887
8888 /* Gear up to HS gear if supported */
8889 if (hba->max_pwr_info.is_valid) {
8890 /*
8891 * Set the right value to bRefClkFreq before attempting to
8892 * switch to HS gears.
8893 */
8894 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8895 ufshcd_set_dev_ref_clk(hba);
8896 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8897 if (ret) {
8898 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8899 __func__, ret);
8900 return ret;
8901 }
8902 }
8903
8904 return 0;
8905 }
8906
8907 /**
8908 * ufshcd_probe_hba - probe hba to detect device and initialize it
8909 * @hba: per-adapter instance
8910 * @init_dev_params: whether or not to call ufshcd_device_params_init().
8911 *
8912 * Execute link-startup and verify device initialization
8913 *
8914 * Return: 0 upon success; < 0 upon failure.
8915 */
ufshcd_probe_hba(struct ufs_hba * hba,bool init_dev_params)8916 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
8917 {
8918 ktime_t start = ktime_get();
8919 unsigned long flags;
8920 int ret;
8921
8922 ret = ufshcd_device_init(hba, init_dev_params);
8923 if (ret)
8924 goto out;
8925
8926 if (!hba->pm_op_in_progress &&
8927 (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
8928 /* Reset the device and controller before doing reinit */
8929 ufshcd_device_reset(hba);
8930 ufs_put_device_desc(hba);
8931 ufshcd_hba_stop(hba);
8932 ret = ufshcd_hba_enable(hba);
8933 if (ret) {
8934 dev_err(hba->dev, "Host controller enable failed\n");
8935 ufshcd_print_evt_hist(hba);
8936 ufshcd_print_host_state(hba);
8937 goto out;
8938 }
8939
8940 /* Reinit the device */
8941 ret = ufshcd_device_init(hba, init_dev_params);
8942 if (ret)
8943 goto out;
8944 }
8945
8946 ufshcd_print_pwr_info(hba);
8947
8948 /*
8949 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8950 * and for removable UFS card as well, hence always set the parameter.
8951 * Note: Error handler may issue the device reset hence resetting
8952 * bActiveICCLevel as well so it is always safe to set this here.
8953 */
8954 ufshcd_set_active_icc_lvl(hba);
8955
8956 /* Enable UFS Write Booster if supported */
8957 ufshcd_configure_wb(hba);
8958
8959 if (hba->ee_usr_mask)
8960 ufshcd_write_ee_control(hba);
8961 /* Enable Auto-Hibernate if configured */
8962 ufshcd_auto_hibern8_enable(hba);
8963
8964 out:
8965 spin_lock_irqsave(hba->host->host_lock, flags);
8966 if (ret)
8967 hba->ufshcd_state = UFSHCD_STATE_ERROR;
8968 else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
8969 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
8970 spin_unlock_irqrestore(hba->host->host_lock, flags);
8971
8972 trace_ufshcd_init(dev_name(hba->dev), ret,
8973 ktime_to_us(ktime_sub(ktime_get(), start)),
8974 hba->curr_dev_pwr_mode, hba->uic_link_state);
8975 return ret;
8976 }
8977
8978 /**
8979 * ufshcd_async_scan - asynchronous execution for probing hba
8980 * @data: data pointer to pass to this function
8981 * @cookie: cookie data
8982 */
ufshcd_async_scan(void * data,async_cookie_t cookie)8983 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8984 {
8985 struct ufs_hba *hba = (struct ufs_hba *)data;
8986 int ret;
8987
8988 down(&hba->host_sem);
8989 /* Initialize hba, detect and initialize UFS device */
8990 ret = ufshcd_probe_hba(hba, true);
8991 up(&hba->host_sem);
8992 if (ret)
8993 goto out;
8994
8995 /* Probe and add UFS logical units */
8996 ret = ufshcd_add_lus(hba);
8997
8998 out:
8999 pm_runtime_put_sync(hba->dev);
9000
9001 if (ret)
9002 dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
9003 }
9004
ufshcd_eh_timed_out(struct scsi_cmnd * scmd)9005 static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
9006 {
9007 struct ufs_hba *hba = shost_priv(scmd->device->host);
9008
9009 if (!hba->system_suspending) {
9010 /* Activate the error handler in the SCSI core. */
9011 return SCSI_EH_NOT_HANDLED;
9012 }
9013
9014 /*
9015 * If we get here we know that no TMFs are outstanding and also that
9016 * the only pending command is a START STOP UNIT command. Handle the
9017 * timeout of that command directly to prevent a deadlock between
9018 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
9019 */
9020 ufshcd_link_recovery(hba);
9021 dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
9022 __func__, hba->outstanding_tasks);
9023
9024 return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
9025 }
9026
9027 static const struct attribute_group *ufshcd_driver_groups[] = {
9028 &ufs_sysfs_unit_descriptor_group,
9029 &ufs_sysfs_lun_attributes_group,
9030 NULL,
9031 };
9032
9033 static struct ufs_hba_variant_params ufs_hba_vps = {
9034 .hba_enable_delay_us = 1000,
9035 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
9036 .devfreq_profile.polling_ms = 100,
9037 .devfreq_profile.target = ufshcd_devfreq_target,
9038 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
9039 .ondemand_data.upthreshold = 70,
9040 .ondemand_data.downdifferential = 5,
9041 };
9042
9043 static const struct scsi_host_template ufshcd_driver_template = {
9044 .module = THIS_MODULE,
9045 .name = UFSHCD,
9046 .proc_name = UFSHCD,
9047 .map_queues = ufshcd_map_queues,
9048 .queuecommand = ufshcd_queuecommand,
9049 .mq_poll = ufshcd_poll,
9050 .slave_alloc = ufshcd_slave_alloc,
9051 .slave_configure = ufshcd_slave_configure,
9052 .slave_destroy = ufshcd_slave_destroy,
9053 .change_queue_depth = ufshcd_change_queue_depth,
9054 .eh_abort_handler = ufshcd_abort,
9055 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
9056 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
9057 .eh_timed_out = ufshcd_eh_timed_out,
9058 .this_id = -1,
9059 .sg_tablesize = SG_ALL,
9060 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
9061 .can_queue = UFSHCD_CAN_QUEUE,
9062 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
9063 .max_sectors = SZ_1M / SECTOR_SIZE,
9064 .max_host_blocked = 1,
9065 .track_queue_depth = 1,
9066 .skip_settle_delay = 1,
9067 .sdev_groups = ufshcd_driver_groups,
9068 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
9069 };
9070
ufshcd_config_vreg_load(struct device * dev,struct ufs_vreg * vreg,int ua)9071 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
9072 int ua)
9073 {
9074 int ret;
9075
9076 if (!vreg)
9077 return 0;
9078
9079 /*
9080 * "set_load" operation shall be required on those regulators
9081 * which specifically configured current limitation. Otherwise
9082 * zero max_uA may cause unexpected behavior when regulator is
9083 * enabled or set as high power mode.
9084 */
9085 if (!vreg->max_uA)
9086 return 0;
9087
9088 ret = regulator_set_load(vreg->reg, ua);
9089 if (ret < 0) {
9090 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
9091 __func__, vreg->name, ua, ret);
9092 }
9093
9094 return ret;
9095 }
9096
ufshcd_config_vreg_lpm(struct ufs_hba * hba,struct ufs_vreg * vreg)9097 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
9098 struct ufs_vreg *vreg)
9099 {
9100 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
9101 }
9102
ufshcd_config_vreg_hpm(struct ufs_hba * hba,struct ufs_vreg * vreg)9103 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
9104 struct ufs_vreg *vreg)
9105 {
9106 if (!vreg)
9107 return 0;
9108
9109 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
9110 }
9111
ufshcd_config_vreg(struct device * dev,struct ufs_vreg * vreg,bool on)9112 static int ufshcd_config_vreg(struct device *dev,
9113 struct ufs_vreg *vreg, bool on)
9114 {
9115 if (regulator_count_voltages(vreg->reg) <= 0)
9116 return 0;
9117
9118 return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
9119 }
9120
ufshcd_enable_vreg(struct device * dev,struct ufs_vreg * vreg)9121 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
9122 {
9123 int ret = 0;
9124
9125 if (!vreg || vreg->enabled)
9126 goto out;
9127
9128 ret = ufshcd_config_vreg(dev, vreg, true);
9129 if (!ret)
9130 ret = regulator_enable(vreg->reg);
9131
9132 if (!ret)
9133 vreg->enabled = true;
9134 else
9135 dev_err(dev, "%s: %s enable failed, err=%d\n",
9136 __func__, vreg->name, ret);
9137 out:
9138 return ret;
9139 }
9140
ufshcd_disable_vreg(struct device * dev,struct ufs_vreg * vreg)9141 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
9142 {
9143 int ret = 0;
9144
9145 if (!vreg || !vreg->enabled || vreg->always_on)
9146 goto out;
9147
9148 ret = regulator_disable(vreg->reg);
9149
9150 if (!ret) {
9151 /* ignore errors on applying disable config */
9152 ufshcd_config_vreg(dev, vreg, false);
9153 vreg->enabled = false;
9154 } else {
9155 dev_err(dev, "%s: %s disable failed, err=%d\n",
9156 __func__, vreg->name, ret);
9157 }
9158 out:
9159 return ret;
9160 }
9161
ufshcd_setup_vreg(struct ufs_hba * hba,bool on)9162 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
9163 {
9164 int ret = 0;
9165 struct device *dev = hba->dev;
9166 struct ufs_vreg_info *info = &hba->vreg_info;
9167
9168 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
9169 if (ret)
9170 goto out;
9171
9172 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
9173 if (ret)
9174 goto out;
9175
9176 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
9177
9178 out:
9179 if (ret) {
9180 ufshcd_toggle_vreg(dev, info->vccq2, false);
9181 ufshcd_toggle_vreg(dev, info->vccq, false);
9182 ufshcd_toggle_vreg(dev, info->vcc, false);
9183 }
9184 return ret;
9185 }
9186
ufshcd_setup_hba_vreg(struct ufs_hba * hba,bool on)9187 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
9188 {
9189 struct ufs_vreg_info *info = &hba->vreg_info;
9190
9191 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
9192 }
9193
ufshcd_get_vreg(struct device * dev,struct ufs_vreg * vreg)9194 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
9195 {
9196 int ret = 0;
9197
9198 if (!vreg)
9199 goto out;
9200
9201 vreg->reg = devm_regulator_get(dev, vreg->name);
9202 if (IS_ERR(vreg->reg)) {
9203 ret = PTR_ERR(vreg->reg);
9204 dev_err(dev, "%s: %s get failed, err=%d\n",
9205 __func__, vreg->name, ret);
9206 }
9207 out:
9208 return ret;
9209 }
9210 EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
9211
ufshcd_init_vreg(struct ufs_hba * hba)9212 static int ufshcd_init_vreg(struct ufs_hba *hba)
9213 {
9214 int ret = 0;
9215 struct device *dev = hba->dev;
9216 struct ufs_vreg_info *info = &hba->vreg_info;
9217
9218 ret = ufshcd_get_vreg(dev, info->vcc);
9219 if (ret)
9220 goto out;
9221
9222 ret = ufshcd_get_vreg(dev, info->vccq);
9223 if (!ret)
9224 ret = ufshcd_get_vreg(dev, info->vccq2);
9225 out:
9226 return ret;
9227 }
9228
ufshcd_init_hba_vreg(struct ufs_hba * hba)9229 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
9230 {
9231 struct ufs_vreg_info *info = &hba->vreg_info;
9232
9233 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
9234 }
9235
ufshcd_setup_clocks(struct ufs_hba * hba,bool on)9236 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
9237 {
9238 int ret = 0;
9239 struct ufs_clk_info *clki;
9240 struct list_head *head = &hba->clk_list_head;
9241 unsigned long flags;
9242 ktime_t start = ktime_get();
9243 bool clk_state_changed = false;
9244
9245 if (list_empty(head))
9246 goto out;
9247
9248 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
9249 if (ret)
9250 return ret;
9251
9252 list_for_each_entry(clki, head, list) {
9253 if (!IS_ERR_OR_NULL(clki->clk)) {
9254 /*
9255 * Don't disable clocks which are needed
9256 * to keep the link active.
9257 */
9258 if (ufshcd_is_link_active(hba) &&
9259 clki->keep_link_active)
9260 continue;
9261
9262 clk_state_changed = on ^ clki->enabled;
9263 if (on && !clki->enabled) {
9264 ret = clk_prepare_enable(clki->clk);
9265 if (ret) {
9266 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
9267 __func__, clki->name, ret);
9268 goto out;
9269 }
9270 } else if (!on && clki->enabled) {
9271 clk_disable_unprepare(clki->clk);
9272 }
9273 clki->enabled = on;
9274 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
9275 clki->name, on ? "en" : "dis");
9276 }
9277 }
9278
9279 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
9280 if (ret)
9281 return ret;
9282
9283 out:
9284 if (ret) {
9285 list_for_each_entry(clki, head, list) {
9286 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
9287 clk_disable_unprepare(clki->clk);
9288 }
9289 } else if (!ret && on) {
9290 spin_lock_irqsave(hba->host->host_lock, flags);
9291 hba->clk_gating.state = CLKS_ON;
9292 trace_ufshcd_clk_gating(dev_name(hba->dev),
9293 hba->clk_gating.state);
9294 spin_unlock_irqrestore(hba->host->host_lock, flags);
9295 }
9296
9297 if (clk_state_changed)
9298 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
9299 (on ? "on" : "off"),
9300 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
9301 return ret;
9302 }
9303
ufshcd_parse_ref_clk_property(struct ufs_hba * hba)9304 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
9305 {
9306 u32 freq;
9307 int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
9308
9309 if (ret) {
9310 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
9311 return REF_CLK_FREQ_INVAL;
9312 }
9313
9314 return ufs_get_bref_clk_from_hz(freq);
9315 }
9316
ufshcd_init_clocks(struct ufs_hba * hba)9317 static int ufshcd_init_clocks(struct ufs_hba *hba)
9318 {
9319 int ret = 0;
9320 struct ufs_clk_info *clki;
9321 struct device *dev = hba->dev;
9322 struct list_head *head = &hba->clk_list_head;
9323
9324 if (list_empty(head))
9325 goto out;
9326
9327 list_for_each_entry(clki, head, list) {
9328 if (!clki->name)
9329 continue;
9330
9331 clki->clk = devm_clk_get(dev, clki->name);
9332 if (IS_ERR(clki->clk)) {
9333 ret = PTR_ERR(clki->clk);
9334 dev_err(dev, "%s: %s clk get failed, %d\n",
9335 __func__, clki->name, ret);
9336 goto out;
9337 }
9338
9339 /*
9340 * Parse device ref clk freq as per device tree "ref_clk".
9341 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9342 * in ufshcd_alloc_host().
9343 */
9344 if (!strcmp(clki->name, "ref_clk"))
9345 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
9346
9347 if (clki->max_freq) {
9348 ret = clk_set_rate(clki->clk, clki->max_freq);
9349 if (ret) {
9350 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
9351 __func__, clki->name,
9352 clki->max_freq, ret);
9353 goto out;
9354 }
9355 clki->curr_freq = clki->max_freq;
9356 }
9357 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
9358 clki->name, clk_get_rate(clki->clk));
9359 }
9360 out:
9361 return ret;
9362 }
9363
ufshcd_variant_hba_init(struct ufs_hba * hba)9364 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
9365 {
9366 int err = 0;
9367
9368 if (!hba->vops)
9369 goto out;
9370
9371 err = ufshcd_vops_init(hba);
9372 if (err)
9373 dev_err_probe(hba->dev, err,
9374 "%s: variant %s init failed with err %d\n",
9375 __func__, ufshcd_get_var_name(hba), err);
9376 out:
9377 return err;
9378 }
9379
ufshcd_variant_hba_exit(struct ufs_hba * hba)9380 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
9381 {
9382 if (!hba->vops)
9383 return;
9384
9385 ufshcd_vops_exit(hba);
9386 }
9387
ufshcd_hba_init(struct ufs_hba * hba)9388 static int ufshcd_hba_init(struct ufs_hba *hba)
9389 {
9390 int err;
9391
9392 /*
9393 * Handle host controller power separately from the UFS device power
9394 * rails as it will help controlling the UFS host controller power
9395 * collapse easily which is different than UFS device power collapse.
9396 * Also, enable the host controller power before we go ahead with rest
9397 * of the initialization here.
9398 */
9399 err = ufshcd_init_hba_vreg(hba);
9400 if (err)
9401 goto out;
9402
9403 err = ufshcd_setup_hba_vreg(hba, true);
9404 if (err)
9405 goto out;
9406
9407 err = ufshcd_init_clocks(hba);
9408 if (err)
9409 goto out_disable_hba_vreg;
9410
9411 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
9412 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
9413
9414 err = ufshcd_setup_clocks(hba, true);
9415 if (err)
9416 goto out_disable_hba_vreg;
9417
9418 err = ufshcd_init_vreg(hba);
9419 if (err)
9420 goto out_disable_clks;
9421
9422 err = ufshcd_setup_vreg(hba, true);
9423 if (err)
9424 goto out_disable_clks;
9425
9426 err = ufshcd_variant_hba_init(hba);
9427 if (err)
9428 goto out_disable_vreg;
9429
9430 ufs_debugfs_hba_init(hba);
9431
9432 hba->is_powered = true;
9433 goto out;
9434
9435 out_disable_vreg:
9436 ufshcd_setup_vreg(hba, false);
9437 out_disable_clks:
9438 ufshcd_setup_clocks(hba, false);
9439 out_disable_hba_vreg:
9440 ufshcd_setup_hba_vreg(hba, false);
9441 out:
9442 return err;
9443 }
9444
ufshcd_hba_exit(struct ufs_hba * hba)9445 static void ufshcd_hba_exit(struct ufs_hba *hba)
9446 {
9447 if (hba->is_powered) {
9448 ufshcd_exit_clk_scaling(hba);
9449 ufshcd_exit_clk_gating(hba);
9450 if (hba->eh_wq)
9451 destroy_workqueue(hba->eh_wq);
9452 ufs_debugfs_hba_exit(hba);
9453 ufshcd_variant_hba_exit(hba);
9454 ufshcd_setup_vreg(hba, false);
9455 ufshcd_setup_clocks(hba, false);
9456 ufshcd_setup_hba_vreg(hba, false);
9457 hba->is_powered = false;
9458 ufs_put_device_desc(hba);
9459 }
9460 }
9461
ufshcd_execute_start_stop(struct scsi_device * sdev,enum ufs_dev_pwr_mode pwr_mode,struct scsi_sense_hdr * sshdr)9462 static int ufshcd_execute_start_stop(struct scsi_device *sdev,
9463 enum ufs_dev_pwr_mode pwr_mode,
9464 struct scsi_sense_hdr *sshdr)
9465 {
9466 const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
9467 const struct scsi_exec_args args = {
9468 .sshdr = sshdr,
9469 .req_flags = BLK_MQ_REQ_PM,
9470 .scmd_flags = SCMD_FAIL_IF_RECOVERING,
9471 };
9472
9473 return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL,
9474 /*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0,
9475 &args);
9476 }
9477
9478 /**
9479 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9480 * power mode
9481 * @hba: per adapter instance
9482 * @pwr_mode: device power mode to set
9483 *
9484 * Return: 0 if requested power mode is set successfully;
9485 * < 0 if failed to set the requested power mode.
9486 */
ufshcd_set_dev_pwr_mode(struct ufs_hba * hba,enum ufs_dev_pwr_mode pwr_mode)9487 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
9488 enum ufs_dev_pwr_mode pwr_mode)
9489 {
9490 struct scsi_sense_hdr sshdr;
9491 struct scsi_device *sdp;
9492 unsigned long flags;
9493 int ret, retries;
9494
9495 spin_lock_irqsave(hba->host->host_lock, flags);
9496 sdp = hba->ufs_device_wlun;
9497 if (sdp && scsi_device_online(sdp))
9498 ret = scsi_device_get(sdp);
9499 else
9500 ret = -ENODEV;
9501 spin_unlock_irqrestore(hba->host->host_lock, flags);
9502
9503 if (ret)
9504 return ret;
9505
9506 /*
9507 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9508 * handling, which would wait for host to be resumed. Since we know
9509 * we are functional while we are here, skip host resume in error
9510 * handling context.
9511 */
9512 hba->host->eh_noresume = 1;
9513
9514 /*
9515 * Current function would be generally called from the power management
9516 * callbacks hence set the RQF_PM flag so that it doesn't resume the
9517 * already suspended childs.
9518 */
9519 for (retries = 3; retries > 0; --retries) {
9520 ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
9521 /*
9522 * scsi_execute() only returns a negative value if the request
9523 * queue is dying.
9524 */
9525 if (ret <= 0)
9526 break;
9527 }
9528 if (ret) {
9529 sdev_printk(KERN_WARNING, sdp,
9530 "START_STOP failed for power mode: %d, result %x\n",
9531 pwr_mode, ret);
9532 if (ret > 0) {
9533 if (scsi_sense_valid(&sshdr))
9534 scsi_print_sense_hdr(sdp, NULL, &sshdr);
9535 ret = -EIO;
9536 }
9537 } else {
9538 hba->curr_dev_pwr_mode = pwr_mode;
9539 }
9540
9541 scsi_device_put(sdp);
9542 hba->host->eh_noresume = 0;
9543 return ret;
9544 }
9545
ufshcd_link_state_transition(struct ufs_hba * hba,enum uic_link_state req_link_state,bool check_for_bkops)9546 static int ufshcd_link_state_transition(struct ufs_hba *hba,
9547 enum uic_link_state req_link_state,
9548 bool check_for_bkops)
9549 {
9550 int ret = 0;
9551
9552 if (req_link_state == hba->uic_link_state)
9553 return 0;
9554
9555 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
9556 ret = ufshcd_uic_hibern8_enter(hba);
9557 if (!ret) {
9558 ufshcd_set_link_hibern8(hba);
9559 } else {
9560 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9561 __func__, ret);
9562 goto out;
9563 }
9564 }
9565 /*
9566 * If autobkops is enabled, link can't be turned off because
9567 * turning off the link would also turn off the device, except in the
9568 * case of DeepSleep where the device is expected to remain powered.
9569 */
9570 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
9571 (!check_for_bkops || !hba->auto_bkops_enabled)) {
9572 /*
9573 * Let's make sure that link is in low power mode, we are doing
9574 * this currently by putting the link in Hibern8. Otherway to
9575 * put the link in low power mode is to send the DME end point
9576 * to device and then send the DME reset command to local
9577 * unipro. But putting the link in hibern8 is much faster.
9578 *
9579 * Note also that putting the link in Hibern8 is a requirement
9580 * for entering DeepSleep.
9581 */
9582 ret = ufshcd_uic_hibern8_enter(hba);
9583 if (ret) {
9584 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9585 __func__, ret);
9586 goto out;
9587 }
9588 /*
9589 * Change controller state to "reset state" which
9590 * should also put the link in off/reset state
9591 */
9592 ufshcd_hba_stop(hba);
9593 /*
9594 * TODO: Check if we need any delay to make sure that
9595 * controller is reset
9596 */
9597 ufshcd_set_link_off(hba);
9598 }
9599
9600 out:
9601 return ret;
9602 }
9603
ufshcd_vreg_set_lpm(struct ufs_hba * hba)9604 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
9605 {
9606 bool vcc_off = false;
9607
9608 /*
9609 * It seems some UFS devices may keep drawing more than sleep current
9610 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9611 * To avoid this situation, add 2ms delay before putting these UFS
9612 * rails in LPM mode.
9613 */
9614 if (!ufshcd_is_link_active(hba) &&
9615 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
9616 usleep_range(2000, 2100);
9617
9618 /*
9619 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9620 * power.
9621 *
9622 * If UFS device and link is in OFF state, all power supplies (VCC,
9623 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9624 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9625 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9626 *
9627 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9628 * in low power state which would save some power.
9629 *
9630 * If Write Booster is enabled and the device needs to flush the WB
9631 * buffer OR if bkops status is urgent for WB, keep Vcc on.
9632 */
9633 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9634 !hba->dev_info.is_lu_power_on_wp) {
9635 ufshcd_setup_vreg(hba, false);
9636 vcc_off = true;
9637 } else if (!ufshcd_is_ufs_dev_active(hba)) {
9638 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9639 vcc_off = true;
9640 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
9641 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9642 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
9643 }
9644 }
9645
9646 /*
9647 * Some UFS devices require delay after VCC power rail is turned-off.
9648 */
9649 if (vcc_off && hba->vreg_info.vcc &&
9650 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
9651 usleep_range(5000, 5100);
9652 }
9653
9654 #ifdef CONFIG_PM
ufshcd_vreg_set_hpm(struct ufs_hba * hba)9655 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
9656 {
9657 int ret = 0;
9658
9659 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9660 !hba->dev_info.is_lu_power_on_wp) {
9661 ret = ufshcd_setup_vreg(hba, true);
9662 } else if (!ufshcd_is_ufs_dev_active(hba)) {
9663 if (!ufshcd_is_link_active(hba)) {
9664 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
9665 if (ret)
9666 goto vcc_disable;
9667 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
9668 if (ret)
9669 goto vccq_lpm;
9670 }
9671 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
9672 }
9673 goto out;
9674
9675 vccq_lpm:
9676 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9677 vcc_disable:
9678 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9679 out:
9680 return ret;
9681 }
9682 #endif /* CONFIG_PM */
9683
ufshcd_hba_vreg_set_lpm(struct ufs_hba * hba)9684 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
9685 {
9686 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9687 ufshcd_setup_hba_vreg(hba, false);
9688 }
9689
ufshcd_hba_vreg_set_hpm(struct ufs_hba * hba)9690 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
9691 {
9692 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9693 ufshcd_setup_hba_vreg(hba, true);
9694 }
9695
__ufshcd_wl_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op)9696 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9697 {
9698 int ret = 0;
9699 bool check_for_bkops;
9700 enum ufs_pm_level pm_lvl;
9701 enum ufs_dev_pwr_mode req_dev_pwr_mode;
9702 enum uic_link_state req_link_state;
9703
9704 hba->pm_op_in_progress = true;
9705 if (pm_op != UFS_SHUTDOWN_PM) {
9706 pm_lvl = pm_op == UFS_RUNTIME_PM ?
9707 hba->rpm_lvl : hba->spm_lvl;
9708 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
9709 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
9710 } else {
9711 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
9712 req_link_state = UIC_LINK_OFF_STATE;
9713 }
9714
9715 /*
9716 * If we can't transition into any of the low power modes
9717 * just gate the clocks.
9718 */
9719 ufshcd_hold(hba);
9720 hba->clk_gating.is_suspended = true;
9721
9722 if (ufshcd_is_clkscaling_supported(hba))
9723 ufshcd_clk_scaling_suspend(hba, true);
9724
9725 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
9726 req_link_state == UIC_LINK_ACTIVE_STATE) {
9727 goto vops_suspend;
9728 }
9729
9730 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
9731 (req_link_state == hba->uic_link_state))
9732 goto enable_scaling;
9733
9734 /* UFS device & link must be active before we enter in this function */
9735 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
9736 /* Wait err handler finish or trigger err recovery */
9737 if (!ufshcd_eh_in_progress(hba))
9738 ufshcd_force_error_recovery(hba);
9739 ret = -EBUSY;
9740 goto enable_scaling;
9741 }
9742
9743 if (pm_op == UFS_RUNTIME_PM) {
9744 if (ufshcd_can_autobkops_during_suspend(hba)) {
9745 /*
9746 * The device is idle with no requests in the queue,
9747 * allow background operations if bkops status shows
9748 * that performance might be impacted.
9749 */
9750 ret = ufshcd_urgent_bkops(hba);
9751 if (ret) {
9752 /*
9753 * If return err in suspend flow, IO will hang.
9754 * Trigger error handler and break suspend for
9755 * error recovery.
9756 */
9757 ufshcd_force_error_recovery(hba);
9758 ret = -EBUSY;
9759 goto enable_scaling;
9760 }
9761 } else {
9762 /* make sure that auto bkops is disabled */
9763 ufshcd_disable_auto_bkops(hba);
9764 }
9765 /*
9766 * If device needs to do BKOP or WB buffer flush during
9767 * Hibern8, keep device power mode as "active power mode"
9768 * and VCC supply.
9769 */
9770 hba->dev_info.b_rpm_dev_flush_capable =
9771 hba->auto_bkops_enabled ||
9772 (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
9773 ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
9774 ufshcd_is_auto_hibern8_enabled(hba))) &&
9775 ufshcd_wb_need_flush(hba));
9776 }
9777
9778 flush_work(&hba->eeh_work);
9779
9780 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9781 if (ret)
9782 goto enable_scaling;
9783
9784 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
9785 if (pm_op != UFS_RUNTIME_PM)
9786 /* ensure that bkops is disabled */
9787 ufshcd_disable_auto_bkops(hba);
9788
9789 if (!hba->dev_info.b_rpm_dev_flush_capable) {
9790 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
9791 if (ret && pm_op != UFS_SHUTDOWN_PM) {
9792 /*
9793 * If return err in suspend flow, IO will hang.
9794 * Trigger error handler and break suspend for
9795 * error recovery.
9796 */
9797 ufshcd_force_error_recovery(hba);
9798 ret = -EBUSY;
9799 }
9800 if (ret)
9801 goto enable_scaling;
9802 }
9803 }
9804
9805 /*
9806 * In the case of DeepSleep, the device is expected to remain powered
9807 * with the link off, so do not check for bkops.
9808 */
9809 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
9810 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
9811 if (ret && pm_op != UFS_SHUTDOWN_PM) {
9812 /*
9813 * If return err in suspend flow, IO will hang.
9814 * Trigger error handler and break suspend for
9815 * error recovery.
9816 */
9817 ufshcd_force_error_recovery(hba);
9818 ret = -EBUSY;
9819 }
9820 if (ret)
9821 goto set_dev_active;
9822
9823 vops_suspend:
9824 /*
9825 * Call vendor specific suspend callback. As these callbacks may access
9826 * vendor specific host controller register space call them before the
9827 * host clocks are ON.
9828 */
9829 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9830 if (ret)
9831 goto set_link_active;
9832
9833 cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
9834 goto out;
9835
9836 set_link_active:
9837 /*
9838 * Device hardware reset is required to exit DeepSleep. Also, for
9839 * DeepSleep, the link is off so host reset and restore will be done
9840 * further below.
9841 */
9842 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9843 ufshcd_device_reset(hba);
9844 WARN_ON(!ufshcd_is_link_off(hba));
9845 }
9846 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
9847 ufshcd_set_link_active(hba);
9848 else if (ufshcd_is_link_off(hba))
9849 ufshcd_host_reset_and_restore(hba);
9850 set_dev_active:
9851 /* Can also get here needing to exit DeepSleep */
9852 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9853 ufshcd_device_reset(hba);
9854 ufshcd_host_reset_and_restore(hba);
9855 }
9856 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
9857 ufshcd_disable_auto_bkops(hba);
9858 enable_scaling:
9859 if (ufshcd_is_clkscaling_supported(hba))
9860 ufshcd_clk_scaling_suspend(hba, false);
9861
9862 hba->dev_info.b_rpm_dev_flush_capable = false;
9863 out:
9864 if (hba->dev_info.b_rpm_dev_flush_capable) {
9865 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
9866 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
9867 }
9868
9869 if (ret) {
9870 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
9871 hba->clk_gating.is_suspended = false;
9872 ufshcd_release(hba);
9873 }
9874 hba->pm_op_in_progress = false;
9875 return ret;
9876 }
9877
9878 #ifdef CONFIG_PM
__ufshcd_wl_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)9879 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9880 {
9881 int ret;
9882 enum uic_link_state old_link_state = hba->uic_link_state;
9883
9884 hba->pm_op_in_progress = true;
9885
9886 /*
9887 * Call vendor specific resume callback. As these callbacks may access
9888 * vendor specific host controller register space call them when the
9889 * host clocks are ON.
9890 */
9891 ret = ufshcd_vops_resume(hba, pm_op);
9892 if (ret)
9893 goto out;
9894
9895 /* For DeepSleep, the only supported option is to have the link off */
9896 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
9897
9898 if (ufshcd_is_link_hibern8(hba)) {
9899 ret = ufshcd_uic_hibern8_exit(hba);
9900 if (!ret) {
9901 ufshcd_set_link_active(hba);
9902 } else {
9903 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
9904 __func__, ret);
9905 goto vendor_suspend;
9906 }
9907 } else if (ufshcd_is_link_off(hba)) {
9908 /*
9909 * A full initialization of the host and the device is
9910 * required since the link was put to off during suspend.
9911 * Note, in the case of DeepSleep, the device will exit
9912 * DeepSleep due to device reset.
9913 */
9914 ret = ufshcd_reset_and_restore(hba);
9915 /*
9916 * ufshcd_reset_and_restore() should have already
9917 * set the link state as active
9918 */
9919 if (ret || !ufshcd_is_link_active(hba))
9920 goto vendor_suspend;
9921 }
9922
9923 if (!ufshcd_is_ufs_dev_active(hba)) {
9924 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9925 if (ret)
9926 goto set_old_link_state;
9927 ufshcd_set_timestamp_attr(hba);
9928 schedule_delayed_work(&hba->ufs_rtc_update_work,
9929 msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
9930 }
9931
9932 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9933 ufshcd_enable_auto_bkops(hba);
9934 else
9935 /*
9936 * If BKOPs operations are urgently needed at this moment then
9937 * keep auto-bkops enabled or else disable it.
9938 */
9939 ufshcd_urgent_bkops(hba);
9940
9941 if (hba->ee_usr_mask)
9942 ufshcd_write_ee_control(hba);
9943
9944 if (ufshcd_is_clkscaling_supported(hba))
9945 ufshcd_clk_scaling_suspend(hba, false);
9946
9947 if (hba->dev_info.b_rpm_dev_flush_capable) {
9948 hba->dev_info.b_rpm_dev_flush_capable = false;
9949 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
9950 }
9951
9952 /* Enable Auto-Hibernate if configured */
9953 ufshcd_auto_hibern8_enable(hba);
9954
9955 goto out;
9956
9957 set_old_link_state:
9958 ufshcd_link_state_transition(hba, old_link_state, 0);
9959 vendor_suspend:
9960 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9961 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9962 out:
9963 if (ret)
9964 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
9965 hba->clk_gating.is_suspended = false;
9966 ufshcd_release(hba);
9967 hba->pm_op_in_progress = false;
9968 return ret;
9969 }
9970
ufshcd_wl_runtime_suspend(struct device * dev)9971 static int ufshcd_wl_runtime_suspend(struct device *dev)
9972 {
9973 struct scsi_device *sdev = to_scsi_device(dev);
9974 struct ufs_hba *hba;
9975 int ret;
9976 ktime_t start = ktime_get();
9977
9978 hba = shost_priv(sdev->host);
9979
9980 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
9981 if (ret)
9982 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9983
9984 trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
9985 ktime_to_us(ktime_sub(ktime_get(), start)),
9986 hba->curr_dev_pwr_mode, hba->uic_link_state);
9987
9988 return ret;
9989 }
9990
ufshcd_wl_runtime_resume(struct device * dev)9991 static int ufshcd_wl_runtime_resume(struct device *dev)
9992 {
9993 struct scsi_device *sdev = to_scsi_device(dev);
9994 struct ufs_hba *hba;
9995 int ret = 0;
9996 ktime_t start = ktime_get();
9997
9998 hba = shost_priv(sdev->host);
9999
10000 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
10001 if (ret)
10002 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10003
10004 trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
10005 ktime_to_us(ktime_sub(ktime_get(), start)),
10006 hba->curr_dev_pwr_mode, hba->uic_link_state);
10007
10008 return ret;
10009 }
10010 #endif
10011
10012 #ifdef CONFIG_PM_SLEEP
ufshcd_wl_suspend(struct device * dev)10013 static int ufshcd_wl_suspend(struct device *dev)
10014 {
10015 struct scsi_device *sdev = to_scsi_device(dev);
10016 struct ufs_hba *hba;
10017 int ret = 0;
10018 ktime_t start = ktime_get();
10019
10020 hba = shost_priv(sdev->host);
10021 down(&hba->host_sem);
10022 hba->system_suspending = true;
10023
10024 if (pm_runtime_suspended(dev))
10025 goto out;
10026
10027 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
10028 if (ret) {
10029 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10030 up(&hba->host_sem);
10031 }
10032
10033 out:
10034 if (!ret)
10035 hba->is_sys_suspended = true;
10036 trace_ufshcd_wl_suspend(dev_name(dev), ret,
10037 ktime_to_us(ktime_sub(ktime_get(), start)),
10038 hba->curr_dev_pwr_mode, hba->uic_link_state);
10039
10040 return ret;
10041 }
10042
ufshcd_wl_resume(struct device * dev)10043 static int ufshcd_wl_resume(struct device *dev)
10044 {
10045 struct scsi_device *sdev = to_scsi_device(dev);
10046 struct ufs_hba *hba;
10047 int ret = 0;
10048 ktime_t start = ktime_get();
10049
10050 hba = shost_priv(sdev->host);
10051
10052 if (pm_runtime_suspended(dev))
10053 goto out;
10054
10055 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
10056 if (ret)
10057 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10058 out:
10059 trace_ufshcd_wl_resume(dev_name(dev), ret,
10060 ktime_to_us(ktime_sub(ktime_get(), start)),
10061 hba->curr_dev_pwr_mode, hba->uic_link_state);
10062 if (!ret)
10063 hba->is_sys_suspended = false;
10064 hba->system_suspending = false;
10065 up(&hba->host_sem);
10066 return ret;
10067 }
10068 #endif
10069
10070 /**
10071 * ufshcd_suspend - helper function for suspend operations
10072 * @hba: per adapter instance
10073 *
10074 * This function will put disable irqs, turn off clocks
10075 * and set vreg and hba-vreg in lpm mode.
10076 *
10077 * Return: 0 upon success; < 0 upon failure.
10078 */
ufshcd_suspend(struct ufs_hba * hba)10079 static int ufshcd_suspend(struct ufs_hba *hba)
10080 {
10081 int ret;
10082
10083 if (!hba->is_powered)
10084 return 0;
10085 /*
10086 * Disable the host irq as host controller as there won't be any
10087 * host controller transaction expected till resume.
10088 */
10089 ufshcd_disable_irq(hba);
10090 ret = ufshcd_setup_clocks(hba, false);
10091 if (ret) {
10092 ufshcd_enable_irq(hba);
10093 return ret;
10094 }
10095 if (ufshcd_is_clkgating_allowed(hba)) {
10096 hba->clk_gating.state = CLKS_OFF;
10097 trace_ufshcd_clk_gating(dev_name(hba->dev),
10098 hba->clk_gating.state);
10099 }
10100
10101 ufshcd_vreg_set_lpm(hba);
10102 /* Put the host controller in low power mode if possible */
10103 ufshcd_hba_vreg_set_lpm(hba);
10104 return ret;
10105 }
10106
10107 #ifdef CONFIG_PM
10108 /**
10109 * ufshcd_resume - helper function for resume operations
10110 * @hba: per adapter instance
10111 *
10112 * This function basically turns on the regulators, clocks and
10113 * irqs of the hba.
10114 *
10115 * Return: 0 for success and non-zero for failure.
10116 */
ufshcd_resume(struct ufs_hba * hba)10117 static int ufshcd_resume(struct ufs_hba *hba)
10118 {
10119 int ret;
10120
10121 if (!hba->is_powered)
10122 return 0;
10123
10124 ufshcd_hba_vreg_set_hpm(hba);
10125 ret = ufshcd_vreg_set_hpm(hba);
10126 if (ret)
10127 goto out;
10128
10129 /* Make sure clocks are enabled before accessing controller */
10130 ret = ufshcd_setup_clocks(hba, true);
10131 if (ret)
10132 goto disable_vreg;
10133
10134 /* enable the host irq as host controller would be active soon */
10135 ufshcd_enable_irq(hba);
10136
10137 goto out;
10138
10139 disable_vreg:
10140 ufshcd_vreg_set_lpm(hba);
10141 out:
10142 if (ret)
10143 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
10144 return ret;
10145 }
10146 #endif /* CONFIG_PM */
10147
10148 #ifdef CONFIG_PM_SLEEP
10149 /**
10150 * ufshcd_system_suspend - system suspend callback
10151 * @dev: Device associated with the UFS controller.
10152 *
10153 * Executed before putting the system into a sleep state in which the contents
10154 * of main memory are preserved.
10155 *
10156 * Return: 0 for success and non-zero for failure.
10157 */
ufshcd_system_suspend(struct device * dev)10158 int ufshcd_system_suspend(struct device *dev)
10159 {
10160 struct ufs_hba *hba = dev_get_drvdata(dev);
10161 int ret = 0;
10162 ktime_t start = ktime_get();
10163
10164 if (pm_runtime_suspended(hba->dev))
10165 goto out;
10166
10167 ret = ufshcd_suspend(hba);
10168 out:
10169 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
10170 ktime_to_us(ktime_sub(ktime_get(), start)),
10171 hba->curr_dev_pwr_mode, hba->uic_link_state);
10172 return ret;
10173 }
10174 EXPORT_SYMBOL(ufshcd_system_suspend);
10175
10176 /**
10177 * ufshcd_system_resume - system resume callback
10178 * @dev: Device associated with the UFS controller.
10179 *
10180 * Executed after waking the system up from a sleep state in which the contents
10181 * of main memory were preserved.
10182 *
10183 * Return: 0 for success and non-zero for failure.
10184 */
ufshcd_system_resume(struct device * dev)10185 int ufshcd_system_resume(struct device *dev)
10186 {
10187 struct ufs_hba *hba = dev_get_drvdata(dev);
10188 ktime_t start = ktime_get();
10189 int ret = 0;
10190
10191 if (pm_runtime_suspended(hba->dev))
10192 goto out;
10193
10194 ret = ufshcd_resume(hba);
10195
10196 out:
10197 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
10198 ktime_to_us(ktime_sub(ktime_get(), start)),
10199 hba->curr_dev_pwr_mode, hba->uic_link_state);
10200
10201 return ret;
10202 }
10203 EXPORT_SYMBOL(ufshcd_system_resume);
10204 #endif /* CONFIG_PM_SLEEP */
10205
10206 #ifdef CONFIG_PM
10207 /**
10208 * ufshcd_runtime_suspend - runtime suspend callback
10209 * @dev: Device associated with the UFS controller.
10210 *
10211 * Check the description of ufshcd_suspend() function for more details.
10212 *
10213 * Return: 0 for success and non-zero for failure.
10214 */
ufshcd_runtime_suspend(struct device * dev)10215 int ufshcd_runtime_suspend(struct device *dev)
10216 {
10217 struct ufs_hba *hba = dev_get_drvdata(dev);
10218 int ret;
10219 ktime_t start = ktime_get();
10220
10221 ret = ufshcd_suspend(hba);
10222
10223 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
10224 ktime_to_us(ktime_sub(ktime_get(), start)),
10225 hba->curr_dev_pwr_mode, hba->uic_link_state);
10226 return ret;
10227 }
10228 EXPORT_SYMBOL(ufshcd_runtime_suspend);
10229
10230 /**
10231 * ufshcd_runtime_resume - runtime resume routine
10232 * @dev: Device associated with the UFS controller.
10233 *
10234 * This function basically brings controller
10235 * to active state. Following operations are done in this function:
10236 *
10237 * 1. Turn on all the controller related clocks
10238 * 2. Turn ON VCC rail
10239 *
10240 * Return: 0 upon success; < 0 upon failure.
10241 */
ufshcd_runtime_resume(struct device * dev)10242 int ufshcd_runtime_resume(struct device *dev)
10243 {
10244 struct ufs_hba *hba = dev_get_drvdata(dev);
10245 int ret;
10246 ktime_t start = ktime_get();
10247
10248 ret = ufshcd_resume(hba);
10249
10250 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
10251 ktime_to_us(ktime_sub(ktime_get(), start)),
10252 hba->curr_dev_pwr_mode, hba->uic_link_state);
10253 return ret;
10254 }
10255 EXPORT_SYMBOL(ufshcd_runtime_resume);
10256 #endif /* CONFIG_PM */
10257
ufshcd_wl_shutdown(struct device * dev)10258 static void ufshcd_wl_shutdown(struct device *dev)
10259 {
10260 struct scsi_device *sdev = to_scsi_device(dev);
10261 struct ufs_hba *hba = shost_priv(sdev->host);
10262
10263 down(&hba->host_sem);
10264 hba->shutting_down = true;
10265 up(&hba->host_sem);
10266
10267 /* Turn on everything while shutting down */
10268 ufshcd_rpm_get_sync(hba);
10269 scsi_device_quiesce(sdev);
10270 shost_for_each_device(sdev, hba->host) {
10271 if (sdev == hba->ufs_device_wlun)
10272 continue;
10273 mutex_lock(&sdev->state_mutex);
10274 scsi_device_set_state(sdev, SDEV_OFFLINE);
10275 mutex_unlock(&sdev->state_mutex);
10276 }
10277 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10278
10279 /*
10280 * Next, turn off the UFS controller and the UFS regulators. Disable
10281 * clocks.
10282 */
10283 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
10284 ufshcd_suspend(hba);
10285
10286 hba->is_powered = false;
10287 }
10288
10289 /**
10290 * ufshcd_remove - de-allocate SCSI host and host memory space
10291 * data structure memory
10292 * @hba: per adapter instance
10293 */
ufshcd_remove(struct ufs_hba * hba)10294 void ufshcd_remove(struct ufs_hba *hba)
10295 {
10296 if (hba->ufs_device_wlun)
10297 ufshcd_rpm_get_sync(hba);
10298 ufs_hwmon_remove(hba);
10299 ufs_bsg_remove(hba);
10300 ufs_sysfs_remove_nodes(hba->dev);
10301 cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
10302 blk_mq_destroy_queue(hba->tmf_queue);
10303 blk_put_queue(hba->tmf_queue);
10304 blk_mq_free_tag_set(&hba->tmf_tag_set);
10305 if (hba->scsi_host_added)
10306 scsi_remove_host(hba->host);
10307 /* disable interrupts */
10308 ufshcd_disable_intr(hba, hba->intr_mask);
10309 ufshcd_hba_stop(hba);
10310 ufshcd_hba_exit(hba);
10311 }
10312 EXPORT_SYMBOL_GPL(ufshcd_remove);
10313
10314 #ifdef CONFIG_PM_SLEEP
ufshcd_system_freeze(struct device * dev)10315 int ufshcd_system_freeze(struct device *dev)
10316 {
10317
10318 return ufshcd_system_suspend(dev);
10319
10320 }
10321 EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
10322
ufshcd_system_restore(struct device * dev)10323 int ufshcd_system_restore(struct device *dev)
10324 {
10325
10326 struct ufs_hba *hba = dev_get_drvdata(dev);
10327 int ret;
10328
10329 ret = ufshcd_system_resume(dev);
10330 if (ret)
10331 return ret;
10332
10333 /* Configure UTRL and UTMRL base address registers */
10334 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
10335 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
10336 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
10337 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
10338 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
10339 REG_UTP_TASK_REQ_LIST_BASE_L);
10340 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
10341 REG_UTP_TASK_REQ_LIST_BASE_H);
10342 /*
10343 * Make sure that UTRL and UTMRL base address registers
10344 * are updated with the latest queue addresses. Only after
10345 * updating these addresses, we can queue the new commands.
10346 */
10347 ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
10348
10349 return 0;
10350
10351 }
10352 EXPORT_SYMBOL_GPL(ufshcd_system_restore);
10353
ufshcd_system_thaw(struct device * dev)10354 int ufshcd_system_thaw(struct device *dev)
10355 {
10356 return ufshcd_system_resume(dev);
10357 }
10358 EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
10359 #endif /* CONFIG_PM_SLEEP */
10360
10361 /**
10362 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10363 * @hba: pointer to Host Bus Adapter (HBA)
10364 */
ufshcd_dealloc_host(struct ufs_hba * hba)10365 void ufshcd_dealloc_host(struct ufs_hba *hba)
10366 {
10367 scsi_host_put(hba->host);
10368 }
10369 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
10370
10371 /**
10372 * ufshcd_set_dma_mask - Set dma mask based on the controller
10373 * addressing capability
10374 * @hba: per adapter instance
10375 *
10376 * Return: 0 for success, non-zero for failure.
10377 */
ufshcd_set_dma_mask(struct ufs_hba * hba)10378 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
10379 {
10380 if (hba->vops && hba->vops->set_dma_mask)
10381 return hba->vops->set_dma_mask(hba);
10382 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
10383 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
10384 return 0;
10385 }
10386 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
10387 }
10388
10389 /**
10390 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10391 * @dev: pointer to device handle
10392 * @hba_handle: driver private handle
10393 *
10394 * Return: 0 on success, non-zero value on failure.
10395 */
ufshcd_alloc_host(struct device * dev,struct ufs_hba ** hba_handle)10396 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
10397 {
10398 struct Scsi_Host *host;
10399 struct ufs_hba *hba;
10400 int err = 0;
10401
10402 if (!dev) {
10403 dev_err(dev,
10404 "Invalid memory reference for dev is NULL\n");
10405 err = -ENODEV;
10406 goto out_error;
10407 }
10408
10409 host = scsi_host_alloc(&ufshcd_driver_template,
10410 sizeof(struct ufs_hba));
10411 if (!host) {
10412 dev_err(dev, "scsi_host_alloc failed\n");
10413 err = -ENOMEM;
10414 goto out_error;
10415 }
10416 host->nr_maps = HCTX_TYPE_POLL + 1;
10417 hba = shost_priv(host);
10418 hba->host = host;
10419 hba->dev = dev;
10420 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
10421 hba->nop_out_timeout = NOP_OUT_TIMEOUT;
10422 ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
10423 INIT_LIST_HEAD(&hba->clk_list_head);
10424 spin_lock_init(&hba->outstanding_lock);
10425
10426 *hba_handle = hba;
10427
10428 out_error:
10429 return err;
10430 }
10431 EXPORT_SYMBOL(ufshcd_alloc_host);
10432
10433 /* This function exists because blk_mq_alloc_tag_set() requires this. */
ufshcd_queue_tmf(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * qd)10434 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
10435 const struct blk_mq_queue_data *qd)
10436 {
10437 WARN_ON_ONCE(true);
10438 return BLK_STS_NOTSUPP;
10439 }
10440
10441 static const struct blk_mq_ops ufshcd_tmf_ops = {
10442 .queue_rq = ufshcd_queue_tmf,
10443 };
10444
10445 /**
10446 * ufshcd_init - Driver initialization routine
10447 * @hba: per-adapter instance
10448 * @mmio_base: base register address
10449 * @irq: Interrupt line of device
10450 *
10451 * Return: 0 on success, non-zero value on failure.
10452 */
ufshcd_init(struct ufs_hba * hba,void __iomem * mmio_base,unsigned int irq)10453 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
10454 {
10455 int err;
10456 struct Scsi_Host *host = hba->host;
10457 struct device *dev = hba->dev;
10458 char eh_wq_name[sizeof("ufs_eh_wq_00")];
10459
10460 /*
10461 * dev_set_drvdata() must be called before any callbacks are registered
10462 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10463 * sysfs).
10464 */
10465 dev_set_drvdata(dev, hba);
10466
10467 if (!mmio_base) {
10468 dev_err(hba->dev,
10469 "Invalid memory reference for mmio_base is NULL\n");
10470 err = -ENODEV;
10471 goto out_error;
10472 }
10473
10474 hba->mmio_base = mmio_base;
10475 hba->irq = irq;
10476 hba->vps = &ufs_hba_vps;
10477
10478 err = ufshcd_hba_init(hba);
10479 if (err)
10480 goto out_error;
10481
10482 /* Read capabilities registers */
10483 err = ufshcd_hba_capabilities(hba);
10484 if (err)
10485 goto out_disable;
10486
10487 /* Get UFS version supported by the controller */
10488 hba->ufs_version = ufshcd_get_ufs_version(hba);
10489
10490 /* Get Interrupt bit mask per version */
10491 hba->intr_mask = ufshcd_get_intr_mask(hba);
10492
10493 err = ufshcd_set_dma_mask(hba);
10494 if (err) {
10495 dev_err(hba->dev, "set dma mask failed\n");
10496 goto out_disable;
10497 }
10498
10499 /* Allocate memory for host memory space */
10500 err = ufshcd_memory_alloc(hba);
10501 if (err) {
10502 dev_err(hba->dev, "Memory allocation failed\n");
10503 goto out_disable;
10504 }
10505
10506 /* Configure LRB */
10507 ufshcd_host_memory_configure(hba);
10508
10509 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
10510 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
10511 host->max_id = UFSHCD_MAX_ID;
10512 host->max_lun = UFS_MAX_LUNS;
10513 host->max_channel = UFSHCD_MAX_CHANNEL;
10514 host->unique_id = host->host_no;
10515 host->max_cmd_len = UFS_CDB_SIZE;
10516 host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
10517
10518 hba->max_pwr_info.is_valid = false;
10519
10520 /* Initialize work queues */
10521 snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
10522 hba->host->host_no);
10523 hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
10524 if (!hba->eh_wq) {
10525 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
10526 __func__);
10527 err = -ENOMEM;
10528 goto out_disable;
10529 }
10530 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
10531 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
10532
10533 sema_init(&hba->host_sem, 1);
10534
10535 /* Initialize UIC command mutex */
10536 mutex_init(&hba->uic_cmd_mutex);
10537
10538 /* Initialize mutex for device management commands */
10539 mutex_init(&hba->dev_cmd.lock);
10540
10541 /* Initialize mutex for exception event control */
10542 mutex_init(&hba->ee_ctrl_mutex);
10543
10544 mutex_init(&hba->wb_mutex);
10545 init_rwsem(&hba->clk_scaling_lock);
10546
10547 ufshcd_init_clk_gating(hba);
10548
10549 ufshcd_init_clk_scaling(hba);
10550
10551 /*
10552 * In order to avoid any spurious interrupt immediately after
10553 * registering UFS controller interrupt handler, clear any pending UFS
10554 * interrupt status and disable all the UFS interrupts.
10555 */
10556 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10557 REG_INTERRUPT_STATUS);
10558 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10559 /*
10560 * Make sure that UFS interrupts are disabled and any pending interrupt
10561 * status is cleared before registering UFS interrupt handler.
10562 */
10563 ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
10564
10565 /* IRQ registration */
10566 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
10567 if (err) {
10568 dev_err(hba->dev, "request irq failed\n");
10569 goto out_disable;
10570 } else {
10571 hba->is_irq_enabled = true;
10572 }
10573
10574 if (!is_mcq_supported(hba)) {
10575 if (!hba->lsdb_sup) {
10576 dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n",
10577 __func__);
10578 err = -EINVAL;
10579 goto out_disable;
10580 }
10581 err = scsi_add_host(host, hba->dev);
10582 if (err) {
10583 dev_err(hba->dev, "scsi_add_host failed\n");
10584 goto out_disable;
10585 }
10586 hba->scsi_host_added = true;
10587 }
10588
10589 hba->tmf_tag_set = (struct blk_mq_tag_set) {
10590 .nr_hw_queues = 1,
10591 .queue_depth = hba->nutmrs,
10592 .ops = &ufshcd_tmf_ops,
10593 .flags = BLK_MQ_F_NO_SCHED,
10594 };
10595 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
10596 if (err < 0)
10597 goto out_remove_scsi_host;
10598 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
10599 if (IS_ERR(hba->tmf_queue)) {
10600 err = PTR_ERR(hba->tmf_queue);
10601 goto free_tmf_tag_set;
10602 }
10603 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
10604 sizeof(*hba->tmf_rqs), GFP_KERNEL);
10605 if (!hba->tmf_rqs) {
10606 err = -ENOMEM;
10607 goto free_tmf_queue;
10608 }
10609
10610 /* Reset the attached device */
10611 ufshcd_device_reset(hba);
10612
10613 ufshcd_init_crypto(hba);
10614
10615 /* Host controller enable */
10616 err = ufshcd_hba_enable(hba);
10617 if (err) {
10618 dev_err(hba->dev, "Host controller enable failed\n");
10619 ufshcd_print_evt_hist(hba);
10620 ufshcd_print_host_state(hba);
10621 goto free_tmf_queue;
10622 }
10623
10624 /*
10625 * Set the default power management level for runtime and system PM if
10626 * not set by the host controller drivers.
10627 * Default power saving mode is to keep UFS link in Hibern8 state
10628 * and UFS device in sleep state.
10629 */
10630 if (!hba->rpm_lvl)
10631 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10632 UFS_SLEEP_PWR_MODE,
10633 UIC_LINK_HIBERN8_STATE);
10634 if (!hba->spm_lvl)
10635 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10636 UFS_SLEEP_PWR_MODE,
10637 UIC_LINK_HIBERN8_STATE);
10638
10639 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
10640 INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
10641
10642 /* Set the default auto-hiberate idle timer value to 150 ms */
10643 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
10644 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
10645 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
10646 }
10647
10648 /* Hold auto suspend until async scan completes */
10649 pm_runtime_get_sync(dev);
10650 atomic_set(&hba->scsi_block_reqs_cnt, 0);
10651 /*
10652 * We are assuming that device wasn't put in sleep/power-down
10653 * state exclusively during the boot stage before kernel.
10654 * This assumption helps avoid doing link startup twice during
10655 * ufshcd_probe_hba().
10656 */
10657 ufshcd_set_ufs_dev_active(hba);
10658
10659 async_schedule(ufshcd_async_scan, hba);
10660 ufs_sysfs_add_nodes(hba->dev);
10661
10662 device_enable_async_suspend(dev);
10663 return 0;
10664
10665 free_tmf_queue:
10666 blk_mq_destroy_queue(hba->tmf_queue);
10667 blk_put_queue(hba->tmf_queue);
10668 free_tmf_tag_set:
10669 blk_mq_free_tag_set(&hba->tmf_tag_set);
10670 out_remove_scsi_host:
10671 if (hba->scsi_host_added)
10672 scsi_remove_host(hba->host);
10673 out_disable:
10674 hba->is_irq_enabled = false;
10675 ufshcd_hba_exit(hba);
10676 out_error:
10677 return err;
10678 }
10679 EXPORT_SYMBOL_GPL(ufshcd_init);
10680
ufshcd_resume_complete(struct device * dev)10681 void ufshcd_resume_complete(struct device *dev)
10682 {
10683 struct ufs_hba *hba = dev_get_drvdata(dev);
10684
10685 if (hba->complete_put) {
10686 ufshcd_rpm_put(hba);
10687 hba->complete_put = false;
10688 }
10689 }
10690 EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
10691
ufshcd_rpm_ok_for_spm(struct ufs_hba * hba)10692 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
10693 {
10694 struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
10695 enum ufs_dev_pwr_mode dev_pwr_mode;
10696 enum uic_link_state link_state;
10697 unsigned long flags;
10698 bool res;
10699
10700 spin_lock_irqsave(&dev->power.lock, flags);
10701 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
10702 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
10703 res = pm_runtime_suspended(dev) &&
10704 hba->curr_dev_pwr_mode == dev_pwr_mode &&
10705 hba->uic_link_state == link_state &&
10706 !hba->dev_info.b_rpm_dev_flush_capable;
10707 spin_unlock_irqrestore(&dev->power.lock, flags);
10708
10709 return res;
10710 }
10711
__ufshcd_suspend_prepare(struct device * dev,bool rpm_ok_for_spm)10712 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
10713 {
10714 struct ufs_hba *hba = dev_get_drvdata(dev);
10715 int ret;
10716
10717 /*
10718 * SCSI assumes that runtime-pm and system-pm for scsi drivers
10719 * are same. And it doesn't wake up the device for system-suspend
10720 * if it's runtime suspended. But ufs doesn't follow that.
10721 * Refer ufshcd_resume_complete()
10722 */
10723 if (hba->ufs_device_wlun) {
10724 /* Prevent runtime suspend */
10725 ufshcd_rpm_get_noresume(hba);
10726 /*
10727 * Check if already runtime suspended in same state as system
10728 * suspend would be.
10729 */
10730 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
10731 /* RPM state is not ok for SPM, so runtime resume */
10732 ret = ufshcd_rpm_resume(hba);
10733 if (ret < 0 && ret != -EACCES) {
10734 ufshcd_rpm_put(hba);
10735 return ret;
10736 }
10737 }
10738 hba->complete_put = true;
10739 }
10740 return 0;
10741 }
10742 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
10743
ufshcd_suspend_prepare(struct device * dev)10744 int ufshcd_suspend_prepare(struct device *dev)
10745 {
10746 return __ufshcd_suspend_prepare(dev, true);
10747 }
10748 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
10749
10750 #ifdef CONFIG_PM_SLEEP
ufshcd_wl_poweroff(struct device * dev)10751 static int ufshcd_wl_poweroff(struct device *dev)
10752 {
10753 struct scsi_device *sdev = to_scsi_device(dev);
10754 struct ufs_hba *hba = shost_priv(sdev->host);
10755
10756 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10757 return 0;
10758 }
10759 #endif
10760
ufshcd_wl_probe(struct device * dev)10761 static int ufshcd_wl_probe(struct device *dev)
10762 {
10763 struct scsi_device *sdev = to_scsi_device(dev);
10764
10765 if (!is_device_wlun(sdev))
10766 return -ENODEV;
10767
10768 blk_pm_runtime_init(sdev->request_queue, dev);
10769 pm_runtime_set_autosuspend_delay(dev, 0);
10770 pm_runtime_allow(dev);
10771
10772 return 0;
10773 }
10774
ufshcd_wl_remove(struct device * dev)10775 static int ufshcd_wl_remove(struct device *dev)
10776 {
10777 pm_runtime_forbid(dev);
10778 return 0;
10779 }
10780
10781 static const struct dev_pm_ops ufshcd_wl_pm_ops = {
10782 #ifdef CONFIG_PM_SLEEP
10783 .suspend = ufshcd_wl_suspend,
10784 .resume = ufshcd_wl_resume,
10785 .freeze = ufshcd_wl_suspend,
10786 .thaw = ufshcd_wl_resume,
10787 .poweroff = ufshcd_wl_poweroff,
10788 .restore = ufshcd_wl_resume,
10789 #endif
10790 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
10791 };
10792
ufshcd_check_header_layout(void)10793 static void ufshcd_check_header_layout(void)
10794 {
10795 /*
10796 * gcc compilers before version 10 cannot do constant-folding for
10797 * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
10798 * before.
10799 */
10800 if (IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 100000)
10801 return;
10802
10803 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10804 .cci = 3})[0] != 3);
10805
10806 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10807 .ehs_length = 2})[1] != 2);
10808
10809 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10810 .enable_crypto = 1})[2]
10811 != 0x80);
10812
10813 BUILD_BUG_ON((((u8 *)&(struct request_desc_header){
10814 .command_type = 5,
10815 .data_direction = 3,
10816 .interrupt = 1,
10817 })[3]) != ((5 << 4) | (3 << 1) | 1));
10818
10819 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10820 .dunl = cpu_to_le32(0xdeadbeef)})[1] !=
10821 cpu_to_le32(0xdeadbeef));
10822
10823 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10824 .ocs = 4})[8] != 4);
10825
10826 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10827 .cds = 5})[9] != 5);
10828
10829 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10830 .dunu = cpu_to_le32(0xbadcafe)})[3] !=
10831 cpu_to_le32(0xbadcafe));
10832
10833 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10834 .iid = 0xf })[4] != 0xf0);
10835
10836 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10837 .command_set_type = 0xf })[4] != 0xf);
10838 }
10839
10840 /*
10841 * ufs_dev_wlun_template - describes ufs device wlun
10842 * ufs-device wlun - used to send pm commands
10843 * All luns are consumers of ufs-device wlun.
10844 *
10845 * Currently, no sd driver is present for wluns.
10846 * Hence the no specific pm operations are performed.
10847 * With ufs design, SSU should be sent to ufs-device wlun.
10848 * Hence register a scsi driver for ufs wluns only.
10849 */
10850 static struct scsi_driver ufs_dev_wlun_template = {
10851 .gendrv = {
10852 .name = "ufs_device_wlun",
10853 .owner = THIS_MODULE,
10854 .probe = ufshcd_wl_probe,
10855 .remove = ufshcd_wl_remove,
10856 .pm = &ufshcd_wl_pm_ops,
10857 .shutdown = ufshcd_wl_shutdown,
10858 },
10859 };
10860
ufshcd_core_init(void)10861 static int __init ufshcd_core_init(void)
10862 {
10863 int ret;
10864
10865 ufshcd_check_header_layout();
10866
10867 ufs_debugfs_init();
10868
10869 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
10870 if (ret)
10871 ufs_debugfs_exit();
10872 return ret;
10873 }
10874
ufshcd_core_exit(void)10875 static void __exit ufshcd_core_exit(void)
10876 {
10877 ufs_debugfs_exit();
10878 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
10879 }
10880
10881 module_init(ufshcd_core_init);
10882 module_exit(ufshcd_core_exit);
10883
10884 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10885 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10886 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10887 MODULE_SOFTDEP("pre: governor_simpleondemand");
10888 MODULE_LICENSE("GPL");
10889