1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Universal Flash Storage Host controller driver Core
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6 *
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
10 */
11
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
15 #include <linux/of.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/sched/clock.h>
25 #include <linux/iopoll.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_dbg.h>
28 #include <scsi/scsi_driver.h>
29 #include <scsi/scsi_eh.h>
30 #include "ufshcd-priv.h"
31 #include <ufs/ufs_quirks.h>
32 #include <ufs/unipro.h>
33 #include "ufs-sysfs.h"
34 #include "ufs-debugfs.h"
35 #include "ufs-fault-injection.h"
36 #include "ufs_bsg.h"
37 #include "ufshcd-crypto.h"
38 #include <asm/unaligned.h>
39
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/ufs.h>
42
43 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
44 UTP_TASK_REQ_COMPL |\
45 UFSHCD_ERROR_MASK)
46
47 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
48 UFSHCD_ERROR_MASK |\
49 MCQ_CQ_EVENT_STATUS)
50
51
52 /* UIC command timeout, unit: ms */
53 #define UIC_CMD_TIMEOUT 500
54
55 /* NOP OUT retries waiting for NOP IN response */
56 #define NOP_OUT_RETRIES 10
57 /* Timeout after 50 msecs if NOP OUT hangs without response */
58 #define NOP_OUT_TIMEOUT 50 /* msecs */
59
60 /* Query request retries */
61 #define QUERY_REQ_RETRIES 3
62 /* Query request timeout */
63 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
64
65 /* Advanced RPMB request timeout */
66 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
67
68 /* Task management command timeout */
69 #define TM_CMD_TIMEOUT 100 /* msecs */
70
71 /* maximum number of retries for a general UIC command */
72 #define UFS_UIC_COMMAND_RETRIES 3
73
74 /* maximum number of link-startup retries */
75 #define DME_LINKSTARTUP_RETRIES 3
76
77 /* maximum number of reset retries before giving up */
78 #define MAX_HOST_RESET_RETRIES 5
79
80 /* Maximum number of error handler retries before giving up */
81 #define MAX_ERR_HANDLER_RETRIES 5
82
83 /* Expose the flag value from utp_upiu_query.value */
84 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
85
86 /* Interrupt aggregation default timeout, unit: 40us */
87 #define INT_AGGR_DEF_TO 0x02
88
89 /* default delay of autosuspend: 2000 ms */
90 #define RPM_AUTOSUSPEND_DELAY_MS 2000
91
92 /* Default delay of RPM device flush delayed work */
93 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
94
95 /* Default value of wait time before gating device ref clock */
96 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
97
98 /* Polling time to wait for fDeviceInit */
99 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
100
101 /* UFSHC 4.0 compliant HC support this mode. */
102 static bool use_mcq_mode = true;
103
is_mcq_supported(struct ufs_hba * hba)104 static bool is_mcq_supported(struct ufs_hba *hba)
105 {
106 return hba->mcq_sup && use_mcq_mode;
107 }
108
109 module_param(use_mcq_mode, bool, 0644);
110 MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
111
112 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
113 ({ \
114 int _ret; \
115 if (_on) \
116 _ret = ufshcd_enable_vreg(_dev, _vreg); \
117 else \
118 _ret = ufshcd_disable_vreg(_dev, _vreg); \
119 _ret; \
120 })
121
122 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
123 size_t __len = (len); \
124 print_hex_dump(KERN_ERR, prefix_str, \
125 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
126 16, 4, buf, __len, false); \
127 } while (0)
128
ufshcd_dump_regs(struct ufs_hba * hba,size_t offset,size_t len,const char * prefix)129 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
130 const char *prefix)
131 {
132 u32 *regs;
133 size_t pos;
134
135 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
136 return -EINVAL;
137
138 regs = kzalloc(len, GFP_ATOMIC);
139 if (!regs)
140 return -ENOMEM;
141
142 for (pos = 0; pos < len; pos += 4) {
143 if (offset == 0 &&
144 pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
145 pos <= REG_UIC_ERROR_CODE_DME)
146 continue;
147 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
148 }
149
150 ufshcd_hex_dump(prefix, regs, len);
151 kfree(regs);
152
153 return 0;
154 }
155 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
156
157 enum {
158 UFSHCD_MAX_CHANNEL = 0,
159 UFSHCD_MAX_ID = 1,
160 UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
161 UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
162 };
163
164 static const char *const ufshcd_state_name[] = {
165 [UFSHCD_STATE_RESET] = "reset",
166 [UFSHCD_STATE_OPERATIONAL] = "operational",
167 [UFSHCD_STATE_ERROR] = "error",
168 [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
169 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
170 };
171
172 /* UFSHCD error handling flags */
173 enum {
174 UFSHCD_EH_IN_PROGRESS = (1 << 0),
175 };
176
177 /* UFSHCD UIC layer error flags */
178 enum {
179 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
180 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
181 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
182 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
183 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
184 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
185 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
186 };
187
188 #define ufshcd_set_eh_in_progress(h) \
189 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
190 #define ufshcd_eh_in_progress(h) \
191 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
192 #define ufshcd_clear_eh_in_progress(h) \
193 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
194
195 const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
196 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
197 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
198 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
199 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
200 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
201 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
202 /*
203 * For DeepSleep, the link is first put in hibern8 and then off.
204 * Leaving the link in hibern8 is not supported.
205 */
206 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
207 };
208
209 static inline enum ufs_dev_pwr_mode
ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)210 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
211 {
212 return ufs_pm_lvl_states[lvl].dev_state;
213 }
214
215 static inline enum uic_link_state
ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)216 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
217 {
218 return ufs_pm_lvl_states[lvl].link_state;
219 }
220
221 static inline enum ufs_pm_level
ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,enum uic_link_state link_state)222 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
223 enum uic_link_state link_state)
224 {
225 enum ufs_pm_level lvl;
226
227 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
228 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
229 (ufs_pm_lvl_states[lvl].link_state == link_state))
230 return lvl;
231 }
232
233 /* if no match found, return the level 0 */
234 return UFS_PM_LVL_0;
235 }
236
237 static const struct ufs_dev_quirk ufs_fixups[] = {
238 /* UFS cards deviations table */
239 { .wmanufacturerid = UFS_VENDOR_MICRON,
240 .model = UFS_ANY_MODEL,
241 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
242 { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
243 .model = UFS_ANY_MODEL,
244 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
245 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
246 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
247 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
248 .model = UFS_ANY_MODEL,
249 .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
250 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
251 .model = "hB8aL1" /*H28U62301AMR*/,
252 .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
253 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
254 .model = UFS_ANY_MODEL,
255 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
256 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
257 .model = "THGLF2G9C8KBADG",
258 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
259 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
260 .model = "THGLF2G9D8KBADG",
261 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
262 {}
263 };
264
265 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
266 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
267 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
268 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
269 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
270 static void ufshcd_hba_exit(struct ufs_hba *hba);
271 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
272 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
273 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
274 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
275 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
276 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
277 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
278 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
279 static irqreturn_t ufshcd_intr(int irq, void *__hba);
280 static int ufshcd_change_power_mode(struct ufs_hba *hba,
281 struct ufs_pa_layer_attr *pwr_mode);
282 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
283 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
284 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
285 struct ufs_vreg *vreg);
286 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
287 bool enable);
288 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
289 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
290
ufshcd_enable_irq(struct ufs_hba * hba)291 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
292 {
293 if (!hba->is_irq_enabled) {
294 enable_irq(hba->irq);
295 hba->is_irq_enabled = true;
296 }
297 }
298
ufshcd_disable_irq(struct ufs_hba * hba)299 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
300 {
301 if (hba->is_irq_enabled) {
302 disable_irq(hba->irq);
303 hba->is_irq_enabled = false;
304 }
305 }
306
ufshcd_configure_wb(struct ufs_hba * hba)307 static void ufshcd_configure_wb(struct ufs_hba *hba)
308 {
309 if (!ufshcd_is_wb_allowed(hba))
310 return;
311
312 ufshcd_wb_toggle(hba, true);
313
314 ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
315
316 if (ufshcd_is_wb_buf_flush_allowed(hba))
317 ufshcd_wb_toggle_buf_flush(hba, true);
318 }
319
ufshcd_scsi_unblock_requests(struct ufs_hba * hba)320 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
321 {
322 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
323 scsi_unblock_requests(hba->host);
324 }
325
ufshcd_scsi_block_requests(struct ufs_hba * hba)326 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
327 {
328 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
329 scsi_block_requests(hba->host);
330 }
331
ufshcd_add_cmd_upiu_trace(struct ufs_hba * hba,unsigned int tag,enum ufs_trace_str_t str_t)332 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
333 enum ufs_trace_str_t str_t)
334 {
335 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
336 struct utp_upiu_header *header;
337
338 if (!trace_ufshcd_upiu_enabled())
339 return;
340
341 if (str_t == UFS_CMD_SEND)
342 header = &rq->header;
343 else
344 header = &hba->lrb[tag].ucd_rsp_ptr->header;
345
346 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
347 UFS_TSF_CDB);
348 }
349
ufshcd_add_query_upiu_trace(struct ufs_hba * hba,enum ufs_trace_str_t str_t,struct utp_upiu_req * rq_rsp)350 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
351 enum ufs_trace_str_t str_t,
352 struct utp_upiu_req *rq_rsp)
353 {
354 if (!trace_ufshcd_upiu_enabled())
355 return;
356
357 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
358 &rq_rsp->qr, UFS_TSF_OSF);
359 }
360
ufshcd_add_tm_upiu_trace(struct ufs_hba * hba,unsigned int tag,enum ufs_trace_str_t str_t)361 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
362 enum ufs_trace_str_t str_t)
363 {
364 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
365
366 if (!trace_ufshcd_upiu_enabled())
367 return;
368
369 if (str_t == UFS_TM_SEND)
370 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
371 &descp->upiu_req.req_header,
372 &descp->upiu_req.input_param1,
373 UFS_TSF_TM_INPUT);
374 else
375 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
376 &descp->upiu_rsp.rsp_header,
377 &descp->upiu_rsp.output_param1,
378 UFS_TSF_TM_OUTPUT);
379 }
380
ufshcd_add_uic_command_trace(struct ufs_hba * hba,const struct uic_command * ucmd,enum ufs_trace_str_t str_t)381 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
382 const struct uic_command *ucmd,
383 enum ufs_trace_str_t str_t)
384 {
385 u32 cmd;
386
387 if (!trace_ufshcd_uic_command_enabled())
388 return;
389
390 if (str_t == UFS_CMD_SEND)
391 cmd = ucmd->command;
392 else
393 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
394
395 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
396 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
397 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
398 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
399 }
400
ufshcd_add_command_trace(struct ufs_hba * hba,unsigned int tag,enum ufs_trace_str_t str_t)401 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
402 enum ufs_trace_str_t str_t)
403 {
404 u64 lba = 0;
405 u8 opcode = 0, group_id = 0;
406 u32 doorbell = 0;
407 u32 intr;
408 int hwq_id = -1;
409 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
410 struct scsi_cmnd *cmd = lrbp->cmd;
411 struct request *rq = scsi_cmd_to_rq(cmd);
412 int transfer_len = -1;
413
414 if (!cmd)
415 return;
416
417 /* trace UPIU also */
418 ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
419 if (!trace_ufshcd_command_enabled())
420 return;
421
422 opcode = cmd->cmnd[0];
423
424 if (opcode == READ_10 || opcode == WRITE_10) {
425 /*
426 * Currently we only fully trace read(10) and write(10) commands
427 */
428 transfer_len =
429 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
430 lba = scsi_get_lba(cmd);
431 if (opcode == WRITE_10)
432 group_id = lrbp->cmd->cmnd[6];
433 } else if (opcode == UNMAP) {
434 /*
435 * The number of Bytes to be unmapped beginning with the lba.
436 */
437 transfer_len = blk_rq_bytes(rq);
438 lba = scsi_get_lba(cmd);
439 }
440
441 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
442
443 if (is_mcq_enabled(hba)) {
444 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
445
446 hwq_id = hwq->id;
447 } else {
448 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
449 }
450 trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
451 doorbell, hwq_id, transfer_len, intr, lba, opcode, group_id);
452 }
453
ufshcd_print_clk_freqs(struct ufs_hba * hba)454 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
455 {
456 struct ufs_clk_info *clki;
457 struct list_head *head = &hba->clk_list_head;
458
459 if (list_empty(head))
460 return;
461
462 list_for_each_entry(clki, head, list) {
463 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
464 clki->max_freq)
465 dev_err(hba->dev, "clk: %s, rate: %u\n",
466 clki->name, clki->curr_freq);
467 }
468 }
469
ufshcd_print_evt(struct ufs_hba * hba,u32 id,const char * err_name)470 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
471 const char *err_name)
472 {
473 int i;
474 bool found = false;
475 const struct ufs_event_hist *e;
476
477 if (id >= UFS_EVT_CNT)
478 return;
479
480 e = &hba->ufs_stats.event[id];
481
482 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
483 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
484
485 if (e->tstamp[p] == 0)
486 continue;
487 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
488 e->val[p], div_u64(e->tstamp[p], 1000));
489 found = true;
490 }
491
492 if (!found)
493 dev_err(hba->dev, "No record of %s\n", err_name);
494 else
495 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
496 }
497
ufshcd_print_evt_hist(struct ufs_hba * hba)498 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
499 {
500 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
501
502 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
503 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
504 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
505 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
506 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
507 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
508 "auto_hibern8_err");
509 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
510 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
511 "link_startup_fail");
512 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
513 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
514 "suspend_fail");
515 ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail");
516 ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR,
517 "wlun suspend_fail");
518 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
519 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
520 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
521
522 ufshcd_vops_dbg_register_dump(hba);
523 }
524
525 static
ufshcd_print_tr(struct ufs_hba * hba,int tag,bool pr_prdt)526 void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
527 {
528 const struct ufshcd_lrb *lrbp;
529 int prdt_length;
530
531 lrbp = &hba->lrb[tag];
532
533 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
534 tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
535 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
536 tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
537 dev_err(hba->dev,
538 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
539 tag, (u64)lrbp->utrd_dma_addr);
540
541 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
542 sizeof(struct utp_transfer_req_desc));
543 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
544 (u64)lrbp->ucd_req_dma_addr);
545 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
546 sizeof(struct utp_upiu_req));
547 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
548 (u64)lrbp->ucd_rsp_dma_addr);
549 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
550 sizeof(struct utp_upiu_rsp));
551
552 prdt_length = le16_to_cpu(
553 lrbp->utr_descriptor_ptr->prd_table_length);
554 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
555 prdt_length /= ufshcd_sg_entry_size(hba);
556
557 dev_err(hba->dev,
558 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
559 tag, prdt_length,
560 (u64)lrbp->ucd_prdt_dma_addr);
561
562 if (pr_prdt)
563 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
564 ufshcd_sg_entry_size(hba) * prdt_length);
565 }
566
ufshcd_print_tr_iter(struct request * req,void * priv)567 static bool ufshcd_print_tr_iter(struct request *req, void *priv)
568 {
569 struct scsi_device *sdev = req->q->queuedata;
570 struct Scsi_Host *shost = sdev->host;
571 struct ufs_hba *hba = shost_priv(shost);
572
573 ufshcd_print_tr(hba, req->tag, *(bool *)priv);
574
575 return true;
576 }
577
578 /**
579 * ufshcd_print_trs_all - print trs for all started requests.
580 * @hba: per-adapter instance.
581 * @pr_prdt: need to print prdt or not.
582 */
ufshcd_print_trs_all(struct ufs_hba * hba,bool pr_prdt)583 static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt)
584 {
585 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt);
586 }
587
ufshcd_print_tmrs(struct ufs_hba * hba,unsigned long bitmap)588 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
589 {
590 int tag;
591
592 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
593 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
594
595 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
596 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
597 }
598 }
599
ufshcd_print_host_state(struct ufs_hba * hba)600 static void ufshcd_print_host_state(struct ufs_hba *hba)
601 {
602 const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
603
604 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
605 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
606 hba->outstanding_reqs, hba->outstanding_tasks);
607 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
608 hba->saved_err, hba->saved_uic_err);
609 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
610 hba->curr_dev_pwr_mode, hba->uic_link_state);
611 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
612 hba->pm_op_in_progress, hba->is_sys_suspended);
613 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
614 hba->auto_bkops_enabled, hba->host->host_self_blocked);
615 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
616 dev_err(hba->dev,
617 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
618 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
619 hba->ufs_stats.hibern8_exit_cnt);
620 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
621 div_u64(hba->ufs_stats.last_intr_ts, 1000),
622 hba->ufs_stats.last_intr_status);
623 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
624 hba->eh_flags, hba->req_abort_count);
625 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
626 hba->ufs_version, hba->capabilities, hba->caps);
627 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
628 hba->dev_quirks);
629 if (sdev_ufs)
630 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
631 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
632
633 ufshcd_print_clk_freqs(hba);
634 }
635
636 /**
637 * ufshcd_print_pwr_info - print power params as saved in hba
638 * power info
639 * @hba: per-adapter instance
640 */
ufshcd_print_pwr_info(struct ufs_hba * hba)641 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
642 {
643 static const char * const names[] = {
644 "INVALID MODE",
645 "FAST MODE",
646 "SLOW_MODE",
647 "INVALID MODE",
648 "FASTAUTO_MODE",
649 "SLOWAUTO_MODE",
650 "INVALID MODE",
651 };
652
653 /*
654 * Using dev_dbg to avoid messages during runtime PM to avoid
655 * never-ending cycles of messages written back to storage by user space
656 * causing runtime resume, causing more messages and so on.
657 */
658 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
659 __func__,
660 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
661 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
662 names[hba->pwr_info.pwr_rx],
663 names[hba->pwr_info.pwr_tx],
664 hba->pwr_info.hs_rate);
665 }
666
ufshcd_device_reset(struct ufs_hba * hba)667 static void ufshcd_device_reset(struct ufs_hba *hba)
668 {
669 int err;
670
671 err = ufshcd_vops_device_reset(hba);
672
673 if (!err) {
674 ufshcd_set_ufs_dev_active(hba);
675 if (ufshcd_is_wb_allowed(hba)) {
676 hba->dev_info.wb_enabled = false;
677 hba->dev_info.wb_buf_flush_enabled = false;
678 }
679 }
680 if (err != -EOPNOTSUPP)
681 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
682 }
683
ufshcd_delay_us(unsigned long us,unsigned long tolerance)684 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
685 {
686 if (!us)
687 return;
688
689 if (us < 10)
690 udelay(us);
691 else
692 usleep_range(us, us + tolerance);
693 }
694 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
695
696 /**
697 * ufshcd_wait_for_register - wait for register value to change
698 * @hba: per-adapter interface
699 * @reg: mmio register offset
700 * @mask: mask to apply to the read register value
701 * @val: value to wait for
702 * @interval_us: polling interval in microseconds
703 * @timeout_ms: timeout in milliseconds
704 *
705 * Return: -ETIMEDOUT on error, zero on success.
706 */
ufshcd_wait_for_register(struct ufs_hba * hba,u32 reg,u32 mask,u32 val,unsigned long interval_us,unsigned long timeout_ms)707 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
708 u32 val, unsigned long interval_us,
709 unsigned long timeout_ms)
710 {
711 int err = 0;
712 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
713
714 /* ignore bits that we don't intend to wait on */
715 val = val & mask;
716
717 while ((ufshcd_readl(hba, reg) & mask) != val) {
718 usleep_range(interval_us, interval_us + 50);
719 if (time_after(jiffies, timeout)) {
720 if ((ufshcd_readl(hba, reg) & mask) != val)
721 err = -ETIMEDOUT;
722 break;
723 }
724 }
725
726 return err;
727 }
728
729 /**
730 * ufshcd_get_intr_mask - Get the interrupt bit mask
731 * @hba: Pointer to adapter instance
732 *
733 * Return: interrupt bit mask per version
734 */
ufshcd_get_intr_mask(struct ufs_hba * hba)735 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
736 {
737 if (hba->ufs_version == ufshci_version(1, 0))
738 return INTERRUPT_MASK_ALL_VER_10;
739 if (hba->ufs_version <= ufshci_version(2, 0))
740 return INTERRUPT_MASK_ALL_VER_11;
741
742 return INTERRUPT_MASK_ALL_VER_21;
743 }
744
745 /**
746 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
747 * @hba: Pointer to adapter instance
748 *
749 * Return: UFSHCI version supported by the controller
750 */
ufshcd_get_ufs_version(struct ufs_hba * hba)751 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
752 {
753 u32 ufshci_ver;
754
755 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
756 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
757 else
758 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
759
760 /*
761 * UFSHCI v1.x uses a different version scheme, in order
762 * to allow the use of comparisons with the ufshci_version
763 * function, we convert it to the same scheme as ufs 2.0+.
764 */
765 if (ufshci_ver & 0x00010000)
766 return ufshci_version(1, ufshci_ver & 0x00000100);
767
768 return ufshci_ver;
769 }
770
771 /**
772 * ufshcd_is_device_present - Check if any device connected to
773 * the host controller
774 * @hba: pointer to adapter instance
775 *
776 * Return: true if device present, false if no device detected
777 */
ufshcd_is_device_present(struct ufs_hba * hba)778 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
779 {
780 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
781 }
782
783 /**
784 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
785 * @lrbp: pointer to local command reference block
786 * @cqe: pointer to the completion queue entry
787 *
788 * This function is used to get the OCS field from UTRD
789 *
790 * Return: the OCS field in the UTRD.
791 */
ufshcd_get_tr_ocs(struct ufshcd_lrb * lrbp,struct cq_entry * cqe)792 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
793 struct cq_entry *cqe)
794 {
795 if (cqe)
796 return le32_to_cpu(cqe->status) & MASK_OCS;
797
798 return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
799 }
800
801 /**
802 * ufshcd_utrl_clear() - Clear requests from the controller request list.
803 * @hba: per adapter instance
804 * @mask: mask with one bit set for each request to be cleared
805 */
ufshcd_utrl_clear(struct ufs_hba * hba,u32 mask)806 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
807 {
808 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
809 mask = ~mask;
810 /*
811 * From the UFSHCI specification: "UTP Transfer Request List CLear
812 * Register (UTRLCLR): This field is bit significant. Each bit
813 * corresponds to a slot in the UTP Transfer Request List, where bit 0
814 * corresponds to request slot 0. A bit in this field is set to ‘0’
815 * by host software to indicate to the host controller that a transfer
816 * request slot is cleared. The host controller
817 * shall free up any resources associated to the request slot
818 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
819 * host software indicates no change to request slots by setting the
820 * associated bits in this field to ‘1’. Bits in this field shall only
821 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
822 */
823 ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
824 }
825
826 /**
827 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
828 * @hba: per adapter instance
829 * @pos: position of the bit to be cleared
830 */
ufshcd_utmrl_clear(struct ufs_hba * hba,u32 pos)831 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
832 {
833 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
834 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
835 else
836 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
837 }
838
839 /**
840 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
841 * @reg: Register value of host controller status
842 *
843 * Return: 0 on success; a positive value if failed.
844 */
ufshcd_get_lists_status(u32 reg)845 static inline int ufshcd_get_lists_status(u32 reg)
846 {
847 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
848 }
849
850 /**
851 * ufshcd_get_uic_cmd_result - Get the UIC command result
852 * @hba: Pointer to adapter instance
853 *
854 * This function gets the result of UIC command completion
855 *
856 * Return: 0 on success; non-zero value on error.
857 */
ufshcd_get_uic_cmd_result(struct ufs_hba * hba)858 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
859 {
860 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
861 MASK_UIC_COMMAND_RESULT;
862 }
863
864 /**
865 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
866 * @hba: Pointer to adapter instance
867 *
868 * This function gets UIC command argument3
869 *
870 * Return: 0 on success; non-zero value on error.
871 */
ufshcd_get_dme_attr_val(struct ufs_hba * hba)872 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
873 {
874 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
875 }
876
877 /**
878 * ufshcd_get_req_rsp - returns the TR response transaction type
879 * @ucd_rsp_ptr: pointer to response UPIU
880 *
881 * Return: UPIU type.
882 */
883 static inline enum upiu_response_transaction
ufshcd_get_req_rsp(struct utp_upiu_rsp * ucd_rsp_ptr)884 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
885 {
886 return ucd_rsp_ptr->header.transaction_code;
887 }
888
889 /**
890 * ufshcd_is_exception_event - Check if the device raised an exception event
891 * @ucd_rsp_ptr: pointer to response UPIU
892 *
893 * The function checks if the device raised an exception event indicated in
894 * the Device Information field of response UPIU.
895 *
896 * Return: true if exception is raised, false otherwise.
897 */
ufshcd_is_exception_event(struct utp_upiu_rsp * ucd_rsp_ptr)898 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
899 {
900 return ucd_rsp_ptr->header.device_information & 1;
901 }
902
903 /**
904 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
905 * @hba: per adapter instance
906 */
907 static inline void
ufshcd_reset_intr_aggr(struct ufs_hba * hba)908 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
909 {
910 ufshcd_writel(hba, INT_AGGR_ENABLE |
911 INT_AGGR_COUNTER_AND_TIMER_RESET,
912 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
913 }
914
915 /**
916 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
917 * @hba: per adapter instance
918 * @cnt: Interrupt aggregation counter threshold
919 * @tmout: Interrupt aggregation timeout value
920 */
921 static inline void
ufshcd_config_intr_aggr(struct ufs_hba * hba,u8 cnt,u8 tmout)922 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
923 {
924 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
925 INT_AGGR_COUNTER_THLD_VAL(cnt) |
926 INT_AGGR_TIMEOUT_VAL(tmout),
927 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
928 }
929
930 /**
931 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
932 * @hba: per adapter instance
933 */
ufshcd_disable_intr_aggr(struct ufs_hba * hba)934 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
935 {
936 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
937 }
938
939 /**
940 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
941 * When run-stop registers are set to 1, it indicates the
942 * host controller that it can process the requests
943 * @hba: per adapter instance
944 */
ufshcd_enable_run_stop_reg(struct ufs_hba * hba)945 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
946 {
947 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
948 REG_UTP_TASK_REQ_LIST_RUN_STOP);
949 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
950 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
951 }
952
953 /**
954 * ufshcd_hba_start - Start controller initialization sequence
955 * @hba: per adapter instance
956 */
ufshcd_hba_start(struct ufs_hba * hba)957 static inline void ufshcd_hba_start(struct ufs_hba *hba)
958 {
959 u32 val = CONTROLLER_ENABLE;
960
961 if (ufshcd_crypto_enable(hba))
962 val |= CRYPTO_GENERAL_ENABLE;
963
964 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
965 }
966
967 /**
968 * ufshcd_is_hba_active - Get controller state
969 * @hba: per adapter instance
970 *
971 * Return: true if and only if the controller is active.
972 */
ufshcd_is_hba_active(struct ufs_hba * hba)973 bool ufshcd_is_hba_active(struct ufs_hba *hba)
974 {
975 return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
976 }
977 EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
978
ufshcd_get_local_unipro_ver(struct ufs_hba * hba)979 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
980 {
981 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
982 if (hba->ufs_version <= ufshci_version(1, 1))
983 return UFS_UNIPRO_VER_1_41;
984 else
985 return UFS_UNIPRO_VER_1_6;
986 }
987 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
988
ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba * hba)989 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
990 {
991 /*
992 * If both host and device support UniPro ver1.6 or later, PA layer
993 * parameters tuning happens during link startup itself.
994 *
995 * We can manually tune PA layer parameters if either host or device
996 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
997 * logic simple, we will only do manual tuning if local unipro version
998 * doesn't support ver1.6 or later.
999 */
1000 return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
1001 }
1002
1003 /**
1004 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1005 * @hba: per adapter instance
1006 * @scale_up: If True, set max possible frequency othewise set low frequency
1007 *
1008 * Return: 0 if successful; < 0 upon failure.
1009 */
ufshcd_set_clk_freq(struct ufs_hba * hba,bool scale_up)1010 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1011 {
1012 int ret = 0;
1013 struct ufs_clk_info *clki;
1014 struct list_head *head = &hba->clk_list_head;
1015
1016 if (list_empty(head))
1017 goto out;
1018
1019 list_for_each_entry(clki, head, list) {
1020 if (!IS_ERR_OR_NULL(clki->clk)) {
1021 if (scale_up && clki->max_freq) {
1022 if (clki->curr_freq == clki->max_freq)
1023 continue;
1024
1025 ret = clk_set_rate(clki->clk, clki->max_freq);
1026 if (ret) {
1027 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1028 __func__, clki->name,
1029 clki->max_freq, ret);
1030 break;
1031 }
1032 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1033 "scaled up", clki->name,
1034 clki->curr_freq,
1035 clki->max_freq);
1036
1037 clki->curr_freq = clki->max_freq;
1038
1039 } else if (!scale_up && clki->min_freq) {
1040 if (clki->curr_freq == clki->min_freq)
1041 continue;
1042
1043 ret = clk_set_rate(clki->clk, clki->min_freq);
1044 if (ret) {
1045 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1046 __func__, clki->name,
1047 clki->min_freq, ret);
1048 break;
1049 }
1050 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1051 "scaled down", clki->name,
1052 clki->curr_freq,
1053 clki->min_freq);
1054 clki->curr_freq = clki->min_freq;
1055 }
1056 }
1057 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1058 clki->name, clk_get_rate(clki->clk));
1059 }
1060
1061 out:
1062 return ret;
1063 }
1064
1065 /**
1066 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1067 * @hba: per adapter instance
1068 * @scale_up: True if scaling up and false if scaling down
1069 *
1070 * Return: 0 if successful; < 0 upon failure.
1071 */
ufshcd_scale_clks(struct ufs_hba * hba,bool scale_up)1072 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1073 {
1074 int ret = 0;
1075 ktime_t start = ktime_get();
1076
1077 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1078 if (ret)
1079 goto out;
1080
1081 ret = ufshcd_set_clk_freq(hba, scale_up);
1082 if (ret)
1083 goto out;
1084
1085 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1086 if (ret)
1087 ufshcd_set_clk_freq(hba, !scale_up);
1088
1089 out:
1090 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1091 (scale_up ? "up" : "down"),
1092 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1093 return ret;
1094 }
1095
1096 /**
1097 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1098 * @hba: per adapter instance
1099 * @scale_up: True if scaling up and false if scaling down
1100 *
1101 * Return: true if scaling is required, false otherwise.
1102 */
ufshcd_is_devfreq_scaling_required(struct ufs_hba * hba,bool scale_up)1103 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1104 bool scale_up)
1105 {
1106 struct ufs_clk_info *clki;
1107 struct list_head *head = &hba->clk_list_head;
1108
1109 if (list_empty(head))
1110 return false;
1111
1112 list_for_each_entry(clki, head, list) {
1113 if (!IS_ERR_OR_NULL(clki->clk)) {
1114 if (scale_up && clki->max_freq) {
1115 if (clki->curr_freq == clki->max_freq)
1116 continue;
1117 return true;
1118 } else if (!scale_up && clki->min_freq) {
1119 if (clki->curr_freq == clki->min_freq)
1120 continue;
1121 return true;
1122 }
1123 }
1124 }
1125
1126 return false;
1127 }
1128
1129 /*
1130 * Determine the number of pending commands by counting the bits in the SCSI
1131 * device budget maps. This approach has been selected because a bit is set in
1132 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1133 * flag. The host_self_blocked flag can be modified by calling
1134 * scsi_block_requests() or scsi_unblock_requests().
1135 */
ufshcd_pending_cmds(struct ufs_hba * hba)1136 static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
1137 {
1138 const struct scsi_device *sdev;
1139 u32 pending = 0;
1140
1141 lockdep_assert_held(hba->host->host_lock);
1142 __shost_for_each_device(sdev, hba->host)
1143 pending += sbitmap_weight(&sdev->budget_map);
1144
1145 return pending;
1146 }
1147
1148 /*
1149 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1150 * has expired.
1151 *
1152 * Return: 0 upon success; -EBUSY upon timeout.
1153 */
ufshcd_wait_for_doorbell_clr(struct ufs_hba * hba,u64 wait_timeout_us)1154 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1155 u64 wait_timeout_us)
1156 {
1157 unsigned long flags;
1158 int ret = 0;
1159 u32 tm_doorbell;
1160 u32 tr_pending;
1161 bool timeout = false, do_last_check = false;
1162 ktime_t start;
1163
1164 ufshcd_hold(hba);
1165 spin_lock_irqsave(hba->host->host_lock, flags);
1166 /*
1167 * Wait for all the outstanding tasks/transfer requests.
1168 * Verify by checking the doorbell registers are clear.
1169 */
1170 start = ktime_get();
1171 do {
1172 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1173 ret = -EBUSY;
1174 goto out;
1175 }
1176
1177 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1178 tr_pending = ufshcd_pending_cmds(hba);
1179 if (!tm_doorbell && !tr_pending) {
1180 timeout = false;
1181 break;
1182 } else if (do_last_check) {
1183 break;
1184 }
1185
1186 spin_unlock_irqrestore(hba->host->host_lock, flags);
1187 io_schedule_timeout(msecs_to_jiffies(20));
1188 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1189 wait_timeout_us) {
1190 timeout = true;
1191 /*
1192 * We might have scheduled out for long time so make
1193 * sure to check if doorbells are cleared by this time
1194 * or not.
1195 */
1196 do_last_check = true;
1197 }
1198 spin_lock_irqsave(hba->host->host_lock, flags);
1199 } while (tm_doorbell || tr_pending);
1200
1201 if (timeout) {
1202 dev_err(hba->dev,
1203 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1204 __func__, tm_doorbell, tr_pending);
1205 ret = -EBUSY;
1206 }
1207 out:
1208 spin_unlock_irqrestore(hba->host->host_lock, flags);
1209 ufshcd_release(hba);
1210 return ret;
1211 }
1212
1213 /**
1214 * ufshcd_scale_gear - scale up/down UFS gear
1215 * @hba: per adapter instance
1216 * @scale_up: True for scaling up gear and false for scaling down
1217 *
1218 * Return: 0 for success; -EBUSY if scaling can't happen at this time;
1219 * non-zero for any other errors.
1220 */
ufshcd_scale_gear(struct ufs_hba * hba,bool scale_up)1221 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1222 {
1223 int ret = 0;
1224 struct ufs_pa_layer_attr new_pwr_info;
1225
1226 if (scale_up) {
1227 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
1228 sizeof(struct ufs_pa_layer_attr));
1229 } else {
1230 memcpy(&new_pwr_info, &hba->pwr_info,
1231 sizeof(struct ufs_pa_layer_attr));
1232
1233 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1234 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1235 /* save the current power mode */
1236 memcpy(&hba->clk_scaling.saved_pwr_info,
1237 &hba->pwr_info,
1238 sizeof(struct ufs_pa_layer_attr));
1239
1240 /* scale down gear */
1241 new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1242 new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1243 }
1244 }
1245
1246 /* check if the power mode needs to be changed or not? */
1247 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1248 if (ret)
1249 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1250 __func__, ret,
1251 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1252 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1253
1254 return ret;
1255 }
1256
1257 /*
1258 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1259 * has expired.
1260 *
1261 * Return: 0 upon success; -EBUSY upon timeout.
1262 */
ufshcd_clock_scaling_prepare(struct ufs_hba * hba,u64 timeout_us)1263 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
1264 {
1265 int ret = 0;
1266 /*
1267 * make sure that there are no outstanding requests when
1268 * clock scaling is in progress
1269 */
1270 blk_mq_quiesce_tagset(&hba->host->tag_set);
1271 mutex_lock(&hba->wb_mutex);
1272 down_write(&hba->clk_scaling_lock);
1273
1274 if (!hba->clk_scaling.is_allowed ||
1275 ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
1276 ret = -EBUSY;
1277 up_write(&hba->clk_scaling_lock);
1278 mutex_unlock(&hba->wb_mutex);
1279 blk_mq_unquiesce_tagset(&hba->host->tag_set);
1280 goto out;
1281 }
1282
1283 /* let's not get into low power until clock scaling is completed */
1284 ufshcd_hold(hba);
1285
1286 out:
1287 return ret;
1288 }
1289
ufshcd_clock_scaling_unprepare(struct ufs_hba * hba,int err,bool scale_up)1290 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
1291 {
1292 up_write(&hba->clk_scaling_lock);
1293
1294 /* Enable Write Booster if we have scaled up else disable it */
1295 if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
1296 ufshcd_wb_toggle(hba, scale_up);
1297
1298 mutex_unlock(&hba->wb_mutex);
1299
1300 blk_mq_unquiesce_tagset(&hba->host->tag_set);
1301 ufshcd_release(hba);
1302 }
1303
1304 /**
1305 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1306 * @hba: per adapter instance
1307 * @scale_up: True for scaling up and false for scalin down
1308 *
1309 * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
1310 * for any other errors.
1311 */
ufshcd_devfreq_scale(struct ufs_hba * hba,bool scale_up)1312 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1313 {
1314 int ret = 0;
1315
1316 ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
1317 if (ret)
1318 return ret;
1319
1320 /* scale down the gear before scaling down clocks */
1321 if (!scale_up) {
1322 ret = ufshcd_scale_gear(hba, false);
1323 if (ret)
1324 goto out_unprepare;
1325 }
1326
1327 ret = ufshcd_scale_clks(hba, scale_up);
1328 if (ret) {
1329 if (!scale_up)
1330 ufshcd_scale_gear(hba, true);
1331 goto out_unprepare;
1332 }
1333
1334 /* scale up the gear after scaling up clocks */
1335 if (scale_up) {
1336 ret = ufshcd_scale_gear(hba, true);
1337 if (ret) {
1338 ufshcd_scale_clks(hba, false);
1339 goto out_unprepare;
1340 }
1341 }
1342
1343 out_unprepare:
1344 ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
1345 return ret;
1346 }
1347
ufshcd_clk_scaling_suspend_work(struct work_struct * work)1348 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1349 {
1350 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1351 clk_scaling.suspend_work);
1352 unsigned long irq_flags;
1353
1354 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1355 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1356 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1357 return;
1358 }
1359 hba->clk_scaling.is_suspended = true;
1360 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1361
1362 __ufshcd_suspend_clkscaling(hba);
1363 }
1364
ufshcd_clk_scaling_resume_work(struct work_struct * work)1365 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1366 {
1367 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1368 clk_scaling.resume_work);
1369 unsigned long irq_flags;
1370
1371 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1372 if (!hba->clk_scaling.is_suspended) {
1373 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1374 return;
1375 }
1376 hba->clk_scaling.is_suspended = false;
1377 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1378
1379 devfreq_resume_device(hba->devfreq);
1380 }
1381
ufshcd_devfreq_target(struct device * dev,unsigned long * freq,u32 flags)1382 static int ufshcd_devfreq_target(struct device *dev,
1383 unsigned long *freq, u32 flags)
1384 {
1385 int ret = 0;
1386 struct ufs_hba *hba = dev_get_drvdata(dev);
1387 ktime_t start;
1388 bool scale_up, sched_clk_scaling_suspend_work = false;
1389 struct list_head *clk_list = &hba->clk_list_head;
1390 struct ufs_clk_info *clki;
1391 unsigned long irq_flags;
1392
1393 if (!ufshcd_is_clkscaling_supported(hba))
1394 return -EINVAL;
1395
1396 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1397 /* Override with the closest supported frequency */
1398 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1399 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1400 if (ufshcd_eh_in_progress(hba)) {
1401 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1402 return 0;
1403 }
1404
1405 if (!hba->clk_scaling.active_reqs)
1406 sched_clk_scaling_suspend_work = true;
1407
1408 if (list_empty(clk_list)) {
1409 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1410 goto out;
1411 }
1412
1413 /* Decide based on the rounded-off frequency and update */
1414 scale_up = *freq == clki->max_freq;
1415 if (!scale_up)
1416 *freq = clki->min_freq;
1417 /* Update the frequency */
1418 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1419 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1420 ret = 0;
1421 goto out; /* no state change required */
1422 }
1423 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1424
1425 start = ktime_get();
1426 ret = ufshcd_devfreq_scale(hba, scale_up);
1427
1428 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1429 (scale_up ? "up" : "down"),
1430 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1431
1432 out:
1433 if (sched_clk_scaling_suspend_work)
1434 queue_work(hba->clk_scaling.workq,
1435 &hba->clk_scaling.suspend_work);
1436
1437 return ret;
1438 }
1439
ufshcd_devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)1440 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1441 struct devfreq_dev_status *stat)
1442 {
1443 struct ufs_hba *hba = dev_get_drvdata(dev);
1444 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1445 unsigned long flags;
1446 struct list_head *clk_list = &hba->clk_list_head;
1447 struct ufs_clk_info *clki;
1448 ktime_t curr_t;
1449
1450 if (!ufshcd_is_clkscaling_supported(hba))
1451 return -EINVAL;
1452
1453 memset(stat, 0, sizeof(*stat));
1454
1455 spin_lock_irqsave(hba->host->host_lock, flags);
1456 curr_t = ktime_get();
1457 if (!scaling->window_start_t)
1458 goto start_window;
1459
1460 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1461 /*
1462 * If current frequency is 0, then the ondemand governor considers
1463 * there's no initial frequency set. And it always requests to set
1464 * to max. frequency.
1465 */
1466 stat->current_frequency = clki->curr_freq;
1467 if (scaling->is_busy_started)
1468 scaling->tot_busy_t += ktime_us_delta(curr_t,
1469 scaling->busy_start_t);
1470
1471 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1472 stat->busy_time = scaling->tot_busy_t;
1473 start_window:
1474 scaling->window_start_t = curr_t;
1475 scaling->tot_busy_t = 0;
1476
1477 if (scaling->active_reqs) {
1478 scaling->busy_start_t = curr_t;
1479 scaling->is_busy_started = true;
1480 } else {
1481 scaling->busy_start_t = 0;
1482 scaling->is_busy_started = false;
1483 }
1484 spin_unlock_irqrestore(hba->host->host_lock, flags);
1485 return 0;
1486 }
1487
ufshcd_devfreq_init(struct ufs_hba * hba)1488 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1489 {
1490 struct list_head *clk_list = &hba->clk_list_head;
1491 struct ufs_clk_info *clki;
1492 struct devfreq *devfreq;
1493 int ret;
1494
1495 /* Skip devfreq if we don't have any clocks in the list */
1496 if (list_empty(clk_list))
1497 return 0;
1498
1499 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1500 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1501 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1502
1503 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1504 &hba->vps->ondemand_data);
1505 devfreq = devfreq_add_device(hba->dev,
1506 &hba->vps->devfreq_profile,
1507 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1508 &hba->vps->ondemand_data);
1509 if (IS_ERR(devfreq)) {
1510 ret = PTR_ERR(devfreq);
1511 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1512
1513 dev_pm_opp_remove(hba->dev, clki->min_freq);
1514 dev_pm_opp_remove(hba->dev, clki->max_freq);
1515 return ret;
1516 }
1517
1518 hba->devfreq = devfreq;
1519
1520 return 0;
1521 }
1522
ufshcd_devfreq_remove(struct ufs_hba * hba)1523 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1524 {
1525 struct list_head *clk_list = &hba->clk_list_head;
1526 struct ufs_clk_info *clki;
1527
1528 if (!hba->devfreq)
1529 return;
1530
1531 devfreq_remove_device(hba->devfreq);
1532 hba->devfreq = NULL;
1533
1534 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1535 dev_pm_opp_remove(hba->dev, clki->min_freq);
1536 dev_pm_opp_remove(hba->dev, clki->max_freq);
1537 }
1538
__ufshcd_suspend_clkscaling(struct ufs_hba * hba)1539 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1540 {
1541 unsigned long flags;
1542
1543 devfreq_suspend_device(hba->devfreq);
1544 spin_lock_irqsave(hba->host->host_lock, flags);
1545 hba->clk_scaling.window_start_t = 0;
1546 spin_unlock_irqrestore(hba->host->host_lock, flags);
1547 }
1548
ufshcd_suspend_clkscaling(struct ufs_hba * hba)1549 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1550 {
1551 unsigned long flags;
1552 bool suspend = false;
1553
1554 cancel_work_sync(&hba->clk_scaling.suspend_work);
1555 cancel_work_sync(&hba->clk_scaling.resume_work);
1556
1557 spin_lock_irqsave(hba->host->host_lock, flags);
1558 if (!hba->clk_scaling.is_suspended) {
1559 suspend = true;
1560 hba->clk_scaling.is_suspended = true;
1561 }
1562 spin_unlock_irqrestore(hba->host->host_lock, flags);
1563
1564 if (suspend)
1565 __ufshcd_suspend_clkscaling(hba);
1566 }
1567
ufshcd_resume_clkscaling(struct ufs_hba * hba)1568 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1569 {
1570 unsigned long flags;
1571 bool resume = false;
1572
1573 spin_lock_irqsave(hba->host->host_lock, flags);
1574 if (hba->clk_scaling.is_suspended) {
1575 resume = true;
1576 hba->clk_scaling.is_suspended = false;
1577 }
1578 spin_unlock_irqrestore(hba->host->host_lock, flags);
1579
1580 if (resume)
1581 devfreq_resume_device(hba->devfreq);
1582 }
1583
ufshcd_clkscale_enable_show(struct device * dev,struct device_attribute * attr,char * buf)1584 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1585 struct device_attribute *attr, char *buf)
1586 {
1587 struct ufs_hba *hba = dev_get_drvdata(dev);
1588
1589 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
1590 }
1591
ufshcd_clkscale_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1592 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1593 struct device_attribute *attr, const char *buf, size_t count)
1594 {
1595 struct ufs_hba *hba = dev_get_drvdata(dev);
1596 u32 value;
1597 int err = 0;
1598
1599 if (kstrtou32(buf, 0, &value))
1600 return -EINVAL;
1601
1602 down(&hba->host_sem);
1603 if (!ufshcd_is_user_access_allowed(hba)) {
1604 err = -EBUSY;
1605 goto out;
1606 }
1607
1608 value = !!value;
1609 if (value == hba->clk_scaling.is_enabled)
1610 goto out;
1611
1612 ufshcd_rpm_get_sync(hba);
1613 ufshcd_hold(hba);
1614
1615 hba->clk_scaling.is_enabled = value;
1616
1617 if (value) {
1618 ufshcd_resume_clkscaling(hba);
1619 } else {
1620 ufshcd_suspend_clkscaling(hba);
1621 err = ufshcd_devfreq_scale(hba, true);
1622 if (err)
1623 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1624 __func__, err);
1625 }
1626
1627 ufshcd_release(hba);
1628 ufshcd_rpm_put_sync(hba);
1629 out:
1630 up(&hba->host_sem);
1631 return err ? err : count;
1632 }
1633
ufshcd_init_clk_scaling_sysfs(struct ufs_hba * hba)1634 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1635 {
1636 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1637 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1638 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1639 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1640 hba->clk_scaling.enable_attr.attr.mode = 0644;
1641 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1642 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1643 }
1644
ufshcd_remove_clk_scaling_sysfs(struct ufs_hba * hba)1645 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1646 {
1647 if (hba->clk_scaling.enable_attr.attr.name)
1648 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1649 }
1650
ufshcd_init_clk_scaling(struct ufs_hba * hba)1651 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1652 {
1653 char wq_name[sizeof("ufs_clkscaling_00")];
1654
1655 if (!ufshcd_is_clkscaling_supported(hba))
1656 return;
1657
1658 if (!hba->clk_scaling.min_gear)
1659 hba->clk_scaling.min_gear = UFS_HS_G1;
1660
1661 INIT_WORK(&hba->clk_scaling.suspend_work,
1662 ufshcd_clk_scaling_suspend_work);
1663 INIT_WORK(&hba->clk_scaling.resume_work,
1664 ufshcd_clk_scaling_resume_work);
1665
1666 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1667 hba->host->host_no);
1668 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1669
1670 hba->clk_scaling.is_initialized = true;
1671 }
1672
ufshcd_exit_clk_scaling(struct ufs_hba * hba)1673 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1674 {
1675 if (!hba->clk_scaling.is_initialized)
1676 return;
1677
1678 ufshcd_remove_clk_scaling_sysfs(hba);
1679 destroy_workqueue(hba->clk_scaling.workq);
1680 ufshcd_devfreq_remove(hba);
1681 hba->clk_scaling.is_initialized = false;
1682 }
1683
ufshcd_ungate_work(struct work_struct * work)1684 static void ufshcd_ungate_work(struct work_struct *work)
1685 {
1686 int ret;
1687 unsigned long flags;
1688 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1689 clk_gating.ungate_work);
1690
1691 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1692
1693 spin_lock_irqsave(hba->host->host_lock, flags);
1694 if (hba->clk_gating.state == CLKS_ON) {
1695 spin_unlock_irqrestore(hba->host->host_lock, flags);
1696 return;
1697 }
1698
1699 spin_unlock_irqrestore(hba->host->host_lock, flags);
1700 ufshcd_hba_vreg_set_hpm(hba);
1701 ufshcd_setup_clocks(hba, true);
1702
1703 ufshcd_enable_irq(hba);
1704
1705 /* Exit from hibern8 */
1706 if (ufshcd_can_hibern8_during_gating(hba)) {
1707 /* Prevent gating in this path */
1708 hba->clk_gating.is_suspended = true;
1709 if (ufshcd_is_link_hibern8(hba)) {
1710 ret = ufshcd_uic_hibern8_exit(hba);
1711 if (ret)
1712 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1713 __func__, ret);
1714 else
1715 ufshcd_set_link_active(hba);
1716 }
1717 hba->clk_gating.is_suspended = false;
1718 }
1719 }
1720
1721 /**
1722 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1723 * Also, exit from hibern8 mode and set the link as active.
1724 * @hba: per adapter instance
1725 */
ufshcd_hold(struct ufs_hba * hba)1726 void ufshcd_hold(struct ufs_hba *hba)
1727 {
1728 bool flush_result;
1729 unsigned long flags;
1730
1731 if (!ufshcd_is_clkgating_allowed(hba) ||
1732 !hba->clk_gating.is_initialized)
1733 return;
1734 spin_lock_irqsave(hba->host->host_lock, flags);
1735 hba->clk_gating.active_reqs++;
1736
1737 start:
1738 switch (hba->clk_gating.state) {
1739 case CLKS_ON:
1740 /*
1741 * Wait for the ungate work to complete if in progress.
1742 * Though the clocks may be in ON state, the link could
1743 * still be in hibner8 state if hibern8 is allowed
1744 * during clock gating.
1745 * Make sure we exit hibern8 state also in addition to
1746 * clocks being ON.
1747 */
1748 if (ufshcd_can_hibern8_during_gating(hba) &&
1749 ufshcd_is_link_hibern8(hba)) {
1750 spin_unlock_irqrestore(hba->host->host_lock, flags);
1751 flush_result = flush_work(&hba->clk_gating.ungate_work);
1752 if (hba->clk_gating.is_suspended && !flush_result)
1753 return;
1754 spin_lock_irqsave(hba->host->host_lock, flags);
1755 goto start;
1756 }
1757 break;
1758 case REQ_CLKS_OFF:
1759 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1760 hba->clk_gating.state = CLKS_ON;
1761 trace_ufshcd_clk_gating(dev_name(hba->dev),
1762 hba->clk_gating.state);
1763 break;
1764 }
1765 /*
1766 * If we are here, it means gating work is either done or
1767 * currently running. Hence, fall through to cancel gating
1768 * work and to enable clocks.
1769 */
1770 fallthrough;
1771 case CLKS_OFF:
1772 hba->clk_gating.state = REQ_CLKS_ON;
1773 trace_ufshcd_clk_gating(dev_name(hba->dev),
1774 hba->clk_gating.state);
1775 queue_work(hba->clk_gating.clk_gating_workq,
1776 &hba->clk_gating.ungate_work);
1777 /*
1778 * fall through to check if we should wait for this
1779 * work to be done or not.
1780 */
1781 fallthrough;
1782 case REQ_CLKS_ON:
1783 spin_unlock_irqrestore(hba->host->host_lock, flags);
1784 flush_work(&hba->clk_gating.ungate_work);
1785 /* Make sure state is CLKS_ON before returning */
1786 spin_lock_irqsave(hba->host->host_lock, flags);
1787 goto start;
1788 default:
1789 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1790 __func__, hba->clk_gating.state);
1791 break;
1792 }
1793 spin_unlock_irqrestore(hba->host->host_lock, flags);
1794 }
1795 EXPORT_SYMBOL_GPL(ufshcd_hold);
1796
ufshcd_gate_work(struct work_struct * work)1797 static void ufshcd_gate_work(struct work_struct *work)
1798 {
1799 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1800 clk_gating.gate_work.work);
1801 unsigned long flags;
1802 int ret;
1803
1804 spin_lock_irqsave(hba->host->host_lock, flags);
1805 /*
1806 * In case you are here to cancel this work the gating state
1807 * would be marked as REQ_CLKS_ON. In this case save time by
1808 * skipping the gating work and exit after changing the clock
1809 * state to CLKS_ON.
1810 */
1811 if (hba->clk_gating.is_suspended ||
1812 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1813 hba->clk_gating.state = CLKS_ON;
1814 trace_ufshcd_clk_gating(dev_name(hba->dev),
1815 hba->clk_gating.state);
1816 goto rel_lock;
1817 }
1818
1819 if (hba->clk_gating.active_reqs
1820 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1821 || hba->outstanding_reqs || hba->outstanding_tasks
1822 || hba->active_uic_cmd || hba->uic_async_done)
1823 goto rel_lock;
1824
1825 spin_unlock_irqrestore(hba->host->host_lock, flags);
1826
1827 /* put the link into hibern8 mode before turning off clocks */
1828 if (ufshcd_can_hibern8_during_gating(hba)) {
1829 ret = ufshcd_uic_hibern8_enter(hba);
1830 if (ret) {
1831 hba->clk_gating.state = CLKS_ON;
1832 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1833 __func__, ret);
1834 trace_ufshcd_clk_gating(dev_name(hba->dev),
1835 hba->clk_gating.state);
1836 goto out;
1837 }
1838 ufshcd_set_link_hibern8(hba);
1839 }
1840
1841 ufshcd_disable_irq(hba);
1842
1843 ufshcd_setup_clocks(hba, false);
1844
1845 /* Put the host controller in low power mode if possible */
1846 ufshcd_hba_vreg_set_lpm(hba);
1847 /*
1848 * In case you are here to cancel this work the gating state
1849 * would be marked as REQ_CLKS_ON. In this case keep the state
1850 * as REQ_CLKS_ON which would anyway imply that clocks are off
1851 * and a request to turn them on is pending. By doing this way,
1852 * we keep the state machine in tact and this would ultimately
1853 * prevent from doing cancel work multiple times when there are
1854 * new requests arriving before the current cancel work is done.
1855 */
1856 spin_lock_irqsave(hba->host->host_lock, flags);
1857 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1858 hba->clk_gating.state = CLKS_OFF;
1859 trace_ufshcd_clk_gating(dev_name(hba->dev),
1860 hba->clk_gating.state);
1861 }
1862 rel_lock:
1863 spin_unlock_irqrestore(hba->host->host_lock, flags);
1864 out:
1865 return;
1866 }
1867
1868 /* host lock must be held before calling this variant */
__ufshcd_release(struct ufs_hba * hba)1869 static void __ufshcd_release(struct ufs_hba *hba)
1870 {
1871 if (!ufshcd_is_clkgating_allowed(hba))
1872 return;
1873
1874 hba->clk_gating.active_reqs--;
1875
1876 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1877 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1878 hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
1879 hba->active_uic_cmd || hba->uic_async_done ||
1880 hba->clk_gating.state == CLKS_OFF)
1881 return;
1882
1883 hba->clk_gating.state = REQ_CLKS_OFF;
1884 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1885 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1886 &hba->clk_gating.gate_work,
1887 msecs_to_jiffies(hba->clk_gating.delay_ms));
1888 }
1889
ufshcd_release(struct ufs_hba * hba)1890 void ufshcd_release(struct ufs_hba *hba)
1891 {
1892 unsigned long flags;
1893
1894 spin_lock_irqsave(hba->host->host_lock, flags);
1895 __ufshcd_release(hba);
1896 spin_unlock_irqrestore(hba->host->host_lock, flags);
1897 }
1898 EXPORT_SYMBOL_GPL(ufshcd_release);
1899
ufshcd_clkgate_delay_show(struct device * dev,struct device_attribute * attr,char * buf)1900 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1901 struct device_attribute *attr, char *buf)
1902 {
1903 struct ufs_hba *hba = dev_get_drvdata(dev);
1904
1905 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
1906 }
1907
ufshcd_clkgate_delay_set(struct device * dev,unsigned long value)1908 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
1909 {
1910 struct ufs_hba *hba = dev_get_drvdata(dev);
1911 unsigned long flags;
1912
1913 spin_lock_irqsave(hba->host->host_lock, flags);
1914 hba->clk_gating.delay_ms = value;
1915 spin_unlock_irqrestore(hba->host->host_lock, flags);
1916 }
1917 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
1918
ufshcd_clkgate_delay_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1919 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1920 struct device_attribute *attr, const char *buf, size_t count)
1921 {
1922 unsigned long value;
1923
1924 if (kstrtoul(buf, 0, &value))
1925 return -EINVAL;
1926
1927 ufshcd_clkgate_delay_set(dev, value);
1928 return count;
1929 }
1930
ufshcd_clkgate_enable_show(struct device * dev,struct device_attribute * attr,char * buf)1931 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1932 struct device_attribute *attr, char *buf)
1933 {
1934 struct ufs_hba *hba = dev_get_drvdata(dev);
1935
1936 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
1937 }
1938
ufshcd_clkgate_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1939 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1940 struct device_attribute *attr, const char *buf, size_t count)
1941 {
1942 struct ufs_hba *hba = dev_get_drvdata(dev);
1943 unsigned long flags;
1944 u32 value;
1945
1946 if (kstrtou32(buf, 0, &value))
1947 return -EINVAL;
1948
1949 value = !!value;
1950
1951 spin_lock_irqsave(hba->host->host_lock, flags);
1952 if (value == hba->clk_gating.is_enabled)
1953 goto out;
1954
1955 if (value)
1956 __ufshcd_release(hba);
1957 else
1958 hba->clk_gating.active_reqs++;
1959
1960 hba->clk_gating.is_enabled = value;
1961 out:
1962 spin_unlock_irqrestore(hba->host->host_lock, flags);
1963 return count;
1964 }
1965
ufshcd_init_clk_gating_sysfs(struct ufs_hba * hba)1966 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
1967 {
1968 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1969 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1970 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1971 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1972 hba->clk_gating.delay_attr.attr.mode = 0644;
1973 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1974 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1975
1976 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1977 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1978 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1979 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1980 hba->clk_gating.enable_attr.attr.mode = 0644;
1981 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1982 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1983 }
1984
ufshcd_remove_clk_gating_sysfs(struct ufs_hba * hba)1985 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
1986 {
1987 if (hba->clk_gating.delay_attr.attr.name)
1988 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1989 if (hba->clk_gating.enable_attr.attr.name)
1990 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1991 }
1992
ufshcd_init_clk_gating(struct ufs_hba * hba)1993 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1994 {
1995 char wq_name[sizeof("ufs_clk_gating_00")];
1996
1997 if (!ufshcd_is_clkgating_allowed(hba))
1998 return;
1999
2000 hba->clk_gating.state = CLKS_ON;
2001
2002 hba->clk_gating.delay_ms = 150;
2003 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
2004 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
2005
2006 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
2007 hba->host->host_no);
2008 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
2009 WQ_MEM_RECLAIM | WQ_HIGHPRI);
2010
2011 ufshcd_init_clk_gating_sysfs(hba);
2012
2013 hba->clk_gating.is_enabled = true;
2014 hba->clk_gating.is_initialized = true;
2015 }
2016
ufshcd_exit_clk_gating(struct ufs_hba * hba)2017 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
2018 {
2019 if (!hba->clk_gating.is_initialized)
2020 return;
2021
2022 ufshcd_remove_clk_gating_sysfs(hba);
2023
2024 /* Ungate the clock if necessary. */
2025 ufshcd_hold(hba);
2026 hba->clk_gating.is_initialized = false;
2027 ufshcd_release(hba);
2028
2029 destroy_workqueue(hba->clk_gating.clk_gating_workq);
2030 }
2031
ufshcd_clk_scaling_start_busy(struct ufs_hba * hba)2032 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2033 {
2034 bool queue_resume_work = false;
2035 ktime_t curr_t = ktime_get();
2036 unsigned long flags;
2037
2038 if (!ufshcd_is_clkscaling_supported(hba))
2039 return;
2040
2041 spin_lock_irqsave(hba->host->host_lock, flags);
2042 if (!hba->clk_scaling.active_reqs++)
2043 queue_resume_work = true;
2044
2045 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
2046 spin_unlock_irqrestore(hba->host->host_lock, flags);
2047 return;
2048 }
2049
2050 if (queue_resume_work)
2051 queue_work(hba->clk_scaling.workq,
2052 &hba->clk_scaling.resume_work);
2053
2054 if (!hba->clk_scaling.window_start_t) {
2055 hba->clk_scaling.window_start_t = curr_t;
2056 hba->clk_scaling.tot_busy_t = 0;
2057 hba->clk_scaling.is_busy_started = false;
2058 }
2059
2060 if (!hba->clk_scaling.is_busy_started) {
2061 hba->clk_scaling.busy_start_t = curr_t;
2062 hba->clk_scaling.is_busy_started = true;
2063 }
2064 spin_unlock_irqrestore(hba->host->host_lock, flags);
2065 }
2066
ufshcd_clk_scaling_update_busy(struct ufs_hba * hba)2067 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2068 {
2069 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2070 unsigned long flags;
2071
2072 if (!ufshcd_is_clkscaling_supported(hba))
2073 return;
2074
2075 spin_lock_irqsave(hba->host->host_lock, flags);
2076 hba->clk_scaling.active_reqs--;
2077 if (!scaling->active_reqs && scaling->is_busy_started) {
2078 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2079 scaling->busy_start_t));
2080 scaling->busy_start_t = 0;
2081 scaling->is_busy_started = false;
2082 }
2083 spin_unlock_irqrestore(hba->host->host_lock, flags);
2084 }
2085
ufshcd_monitor_opcode2dir(u8 opcode)2086 static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2087 {
2088 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2089 return READ;
2090 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2091 return WRITE;
2092 else
2093 return -EINVAL;
2094 }
2095
ufshcd_should_inform_monitor(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2096 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2097 struct ufshcd_lrb *lrbp)
2098 {
2099 const struct ufs_hba_monitor *m = &hba->monitor;
2100
2101 return (m->enabled && lrbp && lrbp->cmd &&
2102 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2103 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2104 }
2105
ufshcd_start_monitor(struct ufs_hba * hba,const struct ufshcd_lrb * lrbp)2106 static void ufshcd_start_monitor(struct ufs_hba *hba,
2107 const struct ufshcd_lrb *lrbp)
2108 {
2109 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2110 unsigned long flags;
2111
2112 spin_lock_irqsave(hba->host->host_lock, flags);
2113 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2114 hba->monitor.busy_start_ts[dir] = ktime_get();
2115 spin_unlock_irqrestore(hba->host->host_lock, flags);
2116 }
2117
ufshcd_update_monitor(struct ufs_hba * hba,const struct ufshcd_lrb * lrbp)2118 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
2119 {
2120 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2121 unsigned long flags;
2122
2123 spin_lock_irqsave(hba->host->host_lock, flags);
2124 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2125 const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
2126 struct ufs_hba_monitor *m = &hba->monitor;
2127 ktime_t now, inc, lat;
2128
2129 now = lrbp->compl_time_stamp;
2130 inc = ktime_sub(now, m->busy_start_ts[dir]);
2131 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2132 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2133
2134 /* Update latencies */
2135 m->nr_req[dir]++;
2136 lat = ktime_sub(now, lrbp->issue_time_stamp);
2137 m->lat_sum[dir] += lat;
2138 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2139 m->lat_max[dir] = lat;
2140 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2141 m->lat_min[dir] = lat;
2142
2143 m->nr_queued[dir]--;
2144 /* Push forward the busy start of monitor */
2145 m->busy_start_ts[dir] = now;
2146 }
2147 spin_unlock_irqrestore(hba->host->host_lock, flags);
2148 }
2149
2150 /**
2151 * ufshcd_send_command - Send SCSI or device management commands
2152 * @hba: per adapter instance
2153 * @task_tag: Task tag of the command
2154 * @hwq: pointer to hardware queue instance
2155 */
2156 static inline
ufshcd_send_command(struct ufs_hba * hba,unsigned int task_tag,struct ufs_hw_queue * hwq)2157 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
2158 struct ufs_hw_queue *hwq)
2159 {
2160 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2161 unsigned long flags;
2162
2163 lrbp->issue_time_stamp = ktime_get();
2164 lrbp->issue_time_stamp_local_clock = local_clock();
2165 lrbp->compl_time_stamp = ktime_set(0, 0);
2166 lrbp->compl_time_stamp_local_clock = 0;
2167 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2168 ufshcd_clk_scaling_start_busy(hba);
2169 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2170 ufshcd_start_monitor(hba, lrbp);
2171
2172 if (is_mcq_enabled(hba)) {
2173 int utrd_size = sizeof(struct utp_transfer_req_desc);
2174 struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
2175 struct utp_transfer_req_desc *dest;
2176
2177 spin_lock(&hwq->sq_lock);
2178 dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
2179 memcpy(dest, src, utrd_size);
2180 ufshcd_inc_sq_tail(hwq);
2181 spin_unlock(&hwq->sq_lock);
2182 } else {
2183 spin_lock_irqsave(&hba->outstanding_lock, flags);
2184 if (hba->vops && hba->vops->setup_xfer_req)
2185 hba->vops->setup_xfer_req(hba, lrbp->task_tag,
2186 !!lrbp->cmd);
2187 __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
2188 ufshcd_writel(hba, 1 << lrbp->task_tag,
2189 REG_UTP_TRANSFER_REQ_DOOR_BELL);
2190 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2191 }
2192 }
2193
2194 /**
2195 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2196 * @lrbp: pointer to local reference block
2197 */
ufshcd_copy_sense_data(struct ufshcd_lrb * lrbp)2198 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2199 {
2200 u8 *const sense_buffer = lrbp->cmd->sense_buffer;
2201 u16 resp_len;
2202 int len;
2203
2204 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header.data_segment_length);
2205 if (sense_buffer && resp_len) {
2206 int len_to_copy;
2207
2208 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2209 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2210
2211 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2212 len_to_copy);
2213 }
2214 }
2215
2216 /**
2217 * ufshcd_copy_query_response() - Copy the Query Response and the data
2218 * descriptor
2219 * @hba: per adapter instance
2220 * @lrbp: pointer to local reference block
2221 *
2222 * Return: 0 upon success; < 0 upon failure.
2223 */
2224 static
ufshcd_copy_query_response(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2225 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2226 {
2227 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2228
2229 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2230
2231 /* Get the descriptor */
2232 if (hba->dev_cmd.query.descriptor &&
2233 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2234 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2235 GENERAL_UPIU_REQUEST_SIZE;
2236 u16 resp_len;
2237 u16 buf_len;
2238
2239 /* data segment length */
2240 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
2241 .data_segment_length);
2242 buf_len = be16_to_cpu(
2243 hba->dev_cmd.query.request.upiu_req.length);
2244 if (likely(buf_len >= resp_len)) {
2245 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2246 } else {
2247 dev_warn(hba->dev,
2248 "%s: rsp size %d is bigger than buffer size %d",
2249 __func__, resp_len, buf_len);
2250 return -EINVAL;
2251 }
2252 }
2253
2254 return 0;
2255 }
2256
2257 /**
2258 * ufshcd_hba_capabilities - Read controller capabilities
2259 * @hba: per adapter instance
2260 *
2261 * Return: 0 on success, negative on error.
2262 */
ufshcd_hba_capabilities(struct ufs_hba * hba)2263 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2264 {
2265 int err;
2266
2267 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2268
2269 /* nutrs and nutmrs are 0 based values */
2270 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2271 hba->nutmrs =
2272 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2273 hba->reserved_slot = hba->nutrs - 1;
2274
2275 /* Read crypto capabilities */
2276 err = ufshcd_hba_init_crypto_capabilities(hba);
2277 if (err) {
2278 dev_err(hba->dev, "crypto setup failed\n");
2279 return err;
2280 }
2281
2282 /*
2283 * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and
2284 * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which
2285 * means we can simply read values regardless of version.
2286 */
2287 hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
2288 /*
2289 * 0h: legacy single doorbell support is available
2290 * 1h: indicate that legacy single doorbell support has been removed
2291 */
2292 hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities);
2293 if (!hba->mcq_sup)
2294 return 0;
2295
2296 hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
2297 hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
2298 hba->mcq_capabilities);
2299
2300 return 0;
2301 }
2302
2303 /**
2304 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2305 * to accept UIC commands
2306 * @hba: per adapter instance
2307 *
2308 * Return: true on success, else false.
2309 */
ufshcd_ready_for_uic_cmd(struct ufs_hba * hba)2310 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2311 {
2312 u32 val;
2313 int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
2314 500, UIC_CMD_TIMEOUT * 1000, false, hba,
2315 REG_CONTROLLER_STATUS);
2316 return ret == 0 ? true : false;
2317 }
2318
2319 /**
2320 * ufshcd_get_upmcrs - Get the power mode change request status
2321 * @hba: Pointer to adapter instance
2322 *
2323 * This function gets the UPMCRS field of HCS register
2324 *
2325 * Return: value of UPMCRS field.
2326 */
ufshcd_get_upmcrs(struct ufs_hba * hba)2327 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2328 {
2329 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2330 }
2331
2332 /**
2333 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2334 * @hba: per adapter instance
2335 * @uic_cmd: UIC command
2336 */
2337 static inline void
ufshcd_dispatch_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2338 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2339 {
2340 lockdep_assert_held(&hba->uic_cmd_mutex);
2341
2342 WARN_ON(hba->active_uic_cmd);
2343
2344 hba->active_uic_cmd = uic_cmd;
2345
2346 /* Write Args */
2347 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2348 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2349 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2350
2351 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2352
2353 /* Write UIC Cmd */
2354 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2355 REG_UIC_COMMAND);
2356 }
2357
2358 /**
2359 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2360 * @hba: per adapter instance
2361 * @uic_cmd: UIC command
2362 *
2363 * Return: 0 only if success.
2364 */
2365 static int
ufshcd_wait_for_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2366 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2367 {
2368 int ret;
2369 unsigned long flags;
2370
2371 lockdep_assert_held(&hba->uic_cmd_mutex);
2372
2373 if (wait_for_completion_timeout(&uic_cmd->done,
2374 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2375 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2376 } else {
2377 ret = -ETIMEDOUT;
2378 dev_err(hba->dev,
2379 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2380 uic_cmd->command, uic_cmd->argument3);
2381
2382 if (!uic_cmd->cmd_active) {
2383 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2384 __func__);
2385 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2386 }
2387 }
2388
2389 spin_lock_irqsave(hba->host->host_lock, flags);
2390 hba->active_uic_cmd = NULL;
2391 spin_unlock_irqrestore(hba->host->host_lock, flags);
2392
2393 return ret;
2394 }
2395
2396 /**
2397 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2398 * @hba: per adapter instance
2399 * @uic_cmd: UIC command
2400 *
2401 * Return: 0 only if success.
2402 */
2403 static int
__ufshcd_send_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2404 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2405 {
2406 lockdep_assert_held(&hba->uic_cmd_mutex);
2407
2408 if (!ufshcd_ready_for_uic_cmd(hba)) {
2409 dev_err(hba->dev,
2410 "Controller not ready to accept UIC commands\n");
2411 return -EIO;
2412 }
2413
2414 init_completion(&uic_cmd->done);
2415
2416 uic_cmd->cmd_active = 1;
2417 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2418
2419 return 0;
2420 }
2421
2422 /**
2423 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2424 * @hba: per adapter instance
2425 * @uic_cmd: UIC command
2426 *
2427 * Return: 0 only if success.
2428 */
ufshcd_send_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2429 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2430 {
2431 int ret;
2432
2433 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2434 return 0;
2435
2436 ufshcd_hold(hba);
2437 mutex_lock(&hba->uic_cmd_mutex);
2438 ufshcd_add_delay_before_dme_cmd(hba);
2439
2440 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
2441 if (!ret)
2442 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2443
2444 mutex_unlock(&hba->uic_cmd_mutex);
2445
2446 ufshcd_release(hba);
2447 return ret;
2448 }
2449
2450 /**
2451 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2452 * @hba: per-adapter instance
2453 * @lrbp: pointer to local reference block
2454 * @sg_entries: The number of sg lists actually used
2455 * @sg_list: Pointer to SG list
2456 */
ufshcd_sgl_to_prdt(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,int sg_entries,struct scatterlist * sg_list)2457 static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
2458 struct scatterlist *sg_list)
2459 {
2460 struct ufshcd_sg_entry *prd;
2461 struct scatterlist *sg;
2462 int i;
2463
2464 if (sg_entries) {
2465
2466 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2467 lrbp->utr_descriptor_ptr->prd_table_length =
2468 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
2469 else
2470 lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
2471
2472 prd = lrbp->ucd_prdt_ptr;
2473
2474 for_each_sg(sg_list, sg, sg_entries, i) {
2475 const unsigned int len = sg_dma_len(sg);
2476
2477 /*
2478 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2479 * based value that indicates the length, in bytes, of
2480 * the data block. A maximum of length of 256KB may
2481 * exist for any entry. Bits 1:0 of this field shall be
2482 * 11b to indicate Dword granularity. A value of '3'
2483 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2484 */
2485 WARN_ONCE(len > SZ_256K, "len = %#x\n", len);
2486 prd->size = cpu_to_le32(len - 1);
2487 prd->addr = cpu_to_le64(sg->dma_address);
2488 prd->reserved = 0;
2489 prd = (void *)prd + ufshcd_sg_entry_size(hba);
2490 }
2491 } else {
2492 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2493 }
2494 }
2495
2496 /**
2497 * ufshcd_map_sg - Map scatter-gather list to prdt
2498 * @hba: per adapter instance
2499 * @lrbp: pointer to local reference block
2500 *
2501 * Return: 0 in case of success, non-zero value in case of failure.
2502 */
ufshcd_map_sg(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2503 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2504 {
2505 struct scsi_cmnd *cmd = lrbp->cmd;
2506 int sg_segments = scsi_dma_map(cmd);
2507
2508 if (sg_segments < 0)
2509 return sg_segments;
2510
2511 ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
2512
2513 return 0;
2514 }
2515
2516 /**
2517 * ufshcd_enable_intr - enable interrupts
2518 * @hba: per adapter instance
2519 * @intrs: interrupt bits
2520 */
ufshcd_enable_intr(struct ufs_hba * hba,u32 intrs)2521 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2522 {
2523 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2524
2525 if (hba->ufs_version == ufshci_version(1, 0)) {
2526 u32 rw;
2527 rw = set & INTERRUPT_MASK_RW_VER_10;
2528 set = rw | ((set ^ intrs) & intrs);
2529 } else {
2530 set |= intrs;
2531 }
2532
2533 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2534 }
2535
2536 /**
2537 * ufshcd_disable_intr - disable interrupts
2538 * @hba: per adapter instance
2539 * @intrs: interrupt bits
2540 */
ufshcd_disable_intr(struct ufs_hba * hba,u32 intrs)2541 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2542 {
2543 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2544
2545 if (hba->ufs_version == ufshci_version(1, 0)) {
2546 u32 rw;
2547 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2548 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2549 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2550
2551 } else {
2552 set &= ~intrs;
2553 }
2554
2555 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2556 }
2557
2558 /**
2559 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2560 * descriptor according to request
2561 * @lrbp: pointer to local reference block
2562 * @upiu_flags: flags required in the header
2563 * @cmd_dir: requests data direction
2564 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2565 */
ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb * lrbp,u8 * upiu_flags,enum dma_data_direction cmd_dir,int ehs_length)2566 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
2567 enum dma_data_direction cmd_dir, int ehs_length)
2568 {
2569 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2570 struct request_desc_header *h = &req_desc->header;
2571 enum utp_data_direction data_direction;
2572
2573 *h = (typeof(*h)){ };
2574
2575 if (cmd_dir == DMA_FROM_DEVICE) {
2576 data_direction = UTP_DEVICE_TO_HOST;
2577 *upiu_flags = UPIU_CMD_FLAGS_READ;
2578 } else if (cmd_dir == DMA_TO_DEVICE) {
2579 data_direction = UTP_HOST_TO_DEVICE;
2580 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2581 } else {
2582 data_direction = UTP_NO_DATA_TRANSFER;
2583 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2584 }
2585
2586 h->command_type = lrbp->command_type;
2587 h->data_direction = data_direction;
2588 h->ehs_length = ehs_length;
2589
2590 if (lrbp->intr_cmd)
2591 h->interrupt = 1;
2592
2593 /* Prepare crypto related dwords */
2594 ufshcd_prepare_req_desc_hdr_crypto(lrbp, h);
2595
2596 /*
2597 * assigning invalid value for command status. Controller
2598 * updates OCS on command completion, with the command
2599 * status
2600 */
2601 h->ocs = OCS_INVALID_COMMAND_STATUS;
2602
2603 req_desc->prd_table_length = 0;
2604 }
2605
2606 /**
2607 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2608 * for scsi commands
2609 * @lrbp: local reference block pointer
2610 * @upiu_flags: flags
2611 */
2612 static
ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb * lrbp,u8 upiu_flags)2613 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2614 {
2615 struct scsi_cmnd *cmd = lrbp->cmd;
2616 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2617 unsigned short cdb_len;
2618
2619 ucd_req_ptr->header = (struct utp_upiu_header){
2620 .transaction_code = UPIU_TRANSACTION_COMMAND,
2621 .flags = upiu_flags,
2622 .lun = lrbp->lun,
2623 .task_tag = lrbp->task_tag,
2624 .command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
2625 };
2626
2627 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2628
2629 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2630 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2631 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2632
2633 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2634 }
2635
2636 /**
2637 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2638 * @hba: UFS hba
2639 * @lrbp: local reference block pointer
2640 * @upiu_flags: flags
2641 */
ufshcd_prepare_utp_query_req_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,u8 upiu_flags)2642 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2643 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2644 {
2645 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2646 struct ufs_query *query = &hba->dev_cmd.query;
2647 u16 len = be16_to_cpu(query->request.upiu_req.length);
2648
2649 /* Query request header */
2650 ucd_req_ptr->header = (struct utp_upiu_header){
2651 .transaction_code = UPIU_TRANSACTION_QUERY_REQ,
2652 .flags = upiu_flags,
2653 .lun = lrbp->lun,
2654 .task_tag = lrbp->task_tag,
2655 .query_function = query->request.query_func,
2656 /* Data segment length only need for WRITE_DESC */
2657 .data_segment_length =
2658 query->request.upiu_req.opcode ==
2659 UPIU_QUERY_OPCODE_WRITE_DESC ?
2660 cpu_to_be16(len) :
2661 0,
2662 };
2663
2664 /* Copy the Query Request buffer as is */
2665 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2666 QUERY_OSF_SIZE);
2667
2668 /* Copy the Descriptor */
2669 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2670 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2671
2672 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2673 }
2674
ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb * lrbp)2675 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2676 {
2677 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2678
2679 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2680
2681 ucd_req_ptr->header = (struct utp_upiu_header){
2682 .transaction_code = UPIU_TRANSACTION_NOP_OUT,
2683 .task_tag = lrbp->task_tag,
2684 };
2685
2686 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2687 }
2688
2689 /**
2690 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2691 * for Device Management Purposes
2692 * @hba: per adapter instance
2693 * @lrbp: pointer to local reference block
2694 *
2695 * Return: 0 upon success; < 0 upon failure.
2696 */
ufshcd_compose_devman_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2697 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2698 struct ufshcd_lrb *lrbp)
2699 {
2700 u8 upiu_flags;
2701 int ret = 0;
2702
2703 if (hba->ufs_version <= ufshci_version(1, 1))
2704 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2705 else
2706 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2707
2708 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
2709 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2710 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2711 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2712 ufshcd_prepare_utp_nop_upiu(lrbp);
2713 else
2714 ret = -EINVAL;
2715
2716 return ret;
2717 }
2718
2719 /**
2720 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2721 * for SCSI Purposes
2722 * @hba: per adapter instance
2723 * @lrbp: pointer to local reference block
2724 *
2725 * Return: 0 upon success; < 0 upon failure.
2726 */
ufshcd_comp_scsi_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2727 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2728 {
2729 u8 upiu_flags;
2730 int ret = 0;
2731
2732 if (hba->ufs_version <= ufshci_version(1, 1))
2733 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2734 else
2735 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2736
2737 if (likely(lrbp->cmd)) {
2738 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
2739 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2740 } else {
2741 ret = -EINVAL;
2742 }
2743
2744 return ret;
2745 }
2746
2747 /**
2748 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2749 * @upiu_wlun_id: UPIU W-LUN id
2750 *
2751 * Return: SCSI W-LUN id.
2752 */
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)2753 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2754 {
2755 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2756 }
2757
is_device_wlun(struct scsi_device * sdev)2758 static inline bool is_device_wlun(struct scsi_device *sdev)
2759 {
2760 return sdev->lun ==
2761 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2762 }
2763
2764 /*
2765 * Associate the UFS controller queue with the default and poll HCTX types.
2766 * Initialize the mq_map[] arrays.
2767 */
ufshcd_map_queues(struct Scsi_Host * shost)2768 static void ufshcd_map_queues(struct Scsi_Host *shost)
2769 {
2770 struct ufs_hba *hba = shost_priv(shost);
2771 int i, queue_offset = 0;
2772
2773 if (!is_mcq_supported(hba)) {
2774 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
2775 hba->nr_queues[HCTX_TYPE_READ] = 0;
2776 hba->nr_queues[HCTX_TYPE_POLL] = 1;
2777 hba->nr_hw_queues = 1;
2778 }
2779
2780 for (i = 0; i < shost->nr_maps; i++) {
2781 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
2782
2783 map->nr_queues = hba->nr_queues[i];
2784 if (!map->nr_queues)
2785 continue;
2786 map->queue_offset = queue_offset;
2787 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
2788 map->queue_offset = 0;
2789
2790 blk_mq_map_queues(map);
2791 queue_offset += map->nr_queues;
2792 }
2793 }
2794
ufshcd_init_lrb(struct ufs_hba * hba,struct ufshcd_lrb * lrb,int i)2795 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2796 {
2797 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
2798 i * ufshcd_get_ucd_size(hba);
2799 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2800 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2801 i * ufshcd_get_ucd_size(hba);
2802 u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset);
2803 u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset);
2804
2805 lrb->utr_descriptor_ptr = utrdlp + i;
2806 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2807 i * sizeof(struct utp_transfer_req_desc);
2808 lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
2809 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2810 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
2811 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2812 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
2813 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2814 }
2815
2816 /**
2817 * ufshcd_queuecommand - main entry point for SCSI requests
2818 * @host: SCSI host pointer
2819 * @cmd: command from SCSI Midlayer
2820 *
2821 * Return: 0 for success, non-zero in case of failure.
2822 */
ufshcd_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * cmd)2823 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2824 {
2825 struct ufs_hba *hba = shost_priv(host);
2826 int tag = scsi_cmd_to_rq(cmd)->tag;
2827 struct ufshcd_lrb *lrbp;
2828 int err = 0;
2829 struct ufs_hw_queue *hwq = NULL;
2830
2831 WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
2832
2833 switch (hba->ufshcd_state) {
2834 case UFSHCD_STATE_OPERATIONAL:
2835 break;
2836 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2837 /*
2838 * SCSI error handler can call ->queuecommand() while UFS error
2839 * handler is in progress. Error interrupts could change the
2840 * state from UFSHCD_STATE_RESET to
2841 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2842 * being issued in that case.
2843 */
2844 if (ufshcd_eh_in_progress(hba)) {
2845 err = SCSI_MLQUEUE_HOST_BUSY;
2846 goto out;
2847 }
2848 break;
2849 case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2850 /*
2851 * pm_runtime_get_sync() is used at error handling preparation
2852 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2853 * PM ops, it can never be finished if we let SCSI layer keep
2854 * retrying it, which gets err handler stuck forever. Neither
2855 * can we let the scsi cmd pass through, because UFS is in bad
2856 * state, the scsi cmd may eventually time out, which will get
2857 * err handler blocked for too long. So, just fail the scsi cmd
2858 * sent from PM ops, err handler can recover PM error anyways.
2859 */
2860 if (hba->pm_op_in_progress) {
2861 hba->force_reset = true;
2862 set_host_byte(cmd, DID_BAD_TARGET);
2863 scsi_done(cmd);
2864 goto out;
2865 }
2866 fallthrough;
2867 case UFSHCD_STATE_RESET:
2868 err = SCSI_MLQUEUE_HOST_BUSY;
2869 goto out;
2870 case UFSHCD_STATE_ERROR:
2871 set_host_byte(cmd, DID_ERROR);
2872 scsi_done(cmd);
2873 goto out;
2874 }
2875
2876 hba->req_abort_count = 0;
2877
2878 ufshcd_hold(hba);
2879
2880 lrbp = &hba->lrb[tag];
2881 lrbp->cmd = cmd;
2882 lrbp->task_tag = tag;
2883 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2884 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
2885
2886 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
2887
2888 lrbp->req_abort_skip = false;
2889
2890 ufshcd_comp_scsi_upiu(hba, lrbp);
2891
2892 err = ufshcd_map_sg(hba, lrbp);
2893 if (err) {
2894 ufshcd_release(hba);
2895 goto out;
2896 }
2897
2898 if (is_mcq_enabled(hba))
2899 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
2900
2901 ufshcd_send_command(hba, tag, hwq);
2902
2903 out:
2904 if (ufs_trigger_eh()) {
2905 unsigned long flags;
2906
2907 spin_lock_irqsave(hba->host->host_lock, flags);
2908 ufshcd_schedule_eh_work(hba);
2909 spin_unlock_irqrestore(hba->host->host_lock, flags);
2910 }
2911
2912 return err;
2913 }
2914
ufshcd_compose_dev_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,enum dev_cmd_type cmd_type,int tag)2915 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2916 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2917 {
2918 lrbp->cmd = NULL;
2919 lrbp->task_tag = tag;
2920 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2921 lrbp->intr_cmd = true; /* No interrupt aggregation */
2922 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
2923 hba->dev_cmd.type = cmd_type;
2924
2925 return ufshcd_compose_devman_upiu(hba, lrbp);
2926 }
2927
2928 /*
2929 * Check with the block layer if the command is inflight
2930 * @cmd: command to check.
2931 *
2932 * Return: true if command is inflight; false if not.
2933 */
ufshcd_cmd_inflight(struct scsi_cmnd * cmd)2934 bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
2935 {
2936 struct request *rq;
2937
2938 if (!cmd)
2939 return false;
2940
2941 rq = scsi_cmd_to_rq(cmd);
2942 if (!blk_mq_request_started(rq))
2943 return false;
2944
2945 return true;
2946 }
2947
2948 /*
2949 * Clear the pending command in the controller and wait until
2950 * the controller confirms that the command has been cleared.
2951 * @hba: per adapter instance
2952 * @task_tag: The tag number of the command to be cleared.
2953 */
ufshcd_clear_cmd(struct ufs_hba * hba,u32 task_tag)2954 static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
2955 {
2956 u32 mask;
2957 unsigned long flags;
2958 int err;
2959
2960 if (is_mcq_enabled(hba)) {
2961 /*
2962 * MCQ mode. Clean up the MCQ resources similar to
2963 * what the ufshcd_utrl_clear() does for SDB mode.
2964 */
2965 err = ufshcd_mcq_sq_cleanup(hba, task_tag);
2966 if (err) {
2967 dev_err(hba->dev, "%s: failed tag=%d. err=%d\n",
2968 __func__, task_tag, err);
2969 return err;
2970 }
2971 return 0;
2972 }
2973
2974 mask = 1U << task_tag;
2975
2976 /* clear outstanding transaction before retry */
2977 spin_lock_irqsave(hba->host->host_lock, flags);
2978 ufshcd_utrl_clear(hba, mask);
2979 spin_unlock_irqrestore(hba->host->host_lock, flags);
2980
2981 /*
2982 * wait for h/w to clear corresponding bit in door-bell.
2983 * max. wait is 1 sec.
2984 */
2985 return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
2986 mask, ~mask, 1000, 1000);
2987 }
2988
2989 /**
2990 * ufshcd_dev_cmd_completion() - handles device management command responses
2991 * @hba: per adapter instance
2992 * @lrbp: pointer to local reference block
2993 *
2994 * Return: 0 upon success; < 0 upon failure.
2995 */
2996 static int
ufshcd_dev_cmd_completion(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2997 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2998 {
2999 enum upiu_response_transaction resp;
3000 int err = 0;
3001
3002 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
3003 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3004
3005 switch (resp) {
3006 case UPIU_TRANSACTION_NOP_IN:
3007 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3008 err = -EINVAL;
3009 dev_err(hba->dev, "%s: unexpected response %x\n",
3010 __func__, resp);
3011 }
3012 break;
3013 case UPIU_TRANSACTION_QUERY_RSP: {
3014 u8 response = lrbp->ucd_rsp_ptr->header.response;
3015
3016 if (response == 0)
3017 err = ufshcd_copy_query_response(hba, lrbp);
3018 break;
3019 }
3020 case UPIU_TRANSACTION_REJECT_UPIU:
3021 /* TODO: handle Reject UPIU Response */
3022 err = -EPERM;
3023 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3024 __func__);
3025 break;
3026 case UPIU_TRANSACTION_RESPONSE:
3027 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
3028 err = -EINVAL;
3029 dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
3030 }
3031 break;
3032 default:
3033 err = -EINVAL;
3034 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3035 __func__, resp);
3036 break;
3037 }
3038
3039 return err;
3040 }
3041
ufshcd_wait_for_dev_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,int max_timeout)3042 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3043 struct ufshcd_lrb *lrbp, int max_timeout)
3044 {
3045 unsigned long time_left = msecs_to_jiffies(max_timeout);
3046 unsigned long flags;
3047 bool pending;
3048 int err;
3049
3050 retry:
3051 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
3052 time_left);
3053
3054 if (likely(time_left)) {
3055 /*
3056 * The completion handler called complete() and the caller of
3057 * this function still owns the @lrbp tag so the code below does
3058 * not trigger any race conditions.
3059 */
3060 hba->dev_cmd.complete = NULL;
3061 err = ufshcd_get_tr_ocs(lrbp, NULL);
3062 if (!err)
3063 err = ufshcd_dev_cmd_completion(hba, lrbp);
3064 } else {
3065 err = -ETIMEDOUT;
3066 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3067 __func__, lrbp->task_tag);
3068
3069 /* MCQ mode */
3070 if (is_mcq_enabled(hba)) {
3071 /* successfully cleared the command, retry if needed */
3072 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
3073 err = -EAGAIN;
3074 hba->dev_cmd.complete = NULL;
3075 return err;
3076 }
3077
3078 /* SDB mode */
3079 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
3080 /* successfully cleared the command, retry if needed */
3081 err = -EAGAIN;
3082 /*
3083 * Since clearing the command succeeded we also need to
3084 * clear the task tag bit from the outstanding_reqs
3085 * variable.
3086 */
3087 spin_lock_irqsave(&hba->outstanding_lock, flags);
3088 pending = test_bit(lrbp->task_tag,
3089 &hba->outstanding_reqs);
3090 if (pending) {
3091 hba->dev_cmd.complete = NULL;
3092 __clear_bit(lrbp->task_tag,
3093 &hba->outstanding_reqs);
3094 }
3095 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3096
3097 if (!pending) {
3098 /*
3099 * The completion handler ran while we tried to
3100 * clear the command.
3101 */
3102 time_left = 1;
3103 goto retry;
3104 }
3105 } else {
3106 dev_err(hba->dev, "%s: failed to clear tag %d\n",
3107 __func__, lrbp->task_tag);
3108
3109 spin_lock_irqsave(&hba->outstanding_lock, flags);
3110 pending = test_bit(lrbp->task_tag,
3111 &hba->outstanding_reqs);
3112 if (pending)
3113 hba->dev_cmd.complete = NULL;
3114 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3115
3116 if (!pending) {
3117 /*
3118 * The completion handler ran while we tried to
3119 * clear the command.
3120 */
3121 time_left = 1;
3122 goto retry;
3123 }
3124 }
3125 }
3126
3127 return err;
3128 }
3129
3130 /**
3131 * ufshcd_exec_dev_cmd - API for sending device management requests
3132 * @hba: UFS hba
3133 * @cmd_type: specifies the type (NOP, Query...)
3134 * @timeout: timeout in milliseconds
3135 *
3136 * Return: 0 upon success; < 0 upon failure.
3137 *
3138 * NOTE: Since there is only one available tag for device management commands,
3139 * it is expected you hold the hba->dev_cmd.lock mutex.
3140 */
ufshcd_exec_dev_cmd(struct ufs_hba * hba,enum dev_cmd_type cmd_type,int timeout)3141 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3142 enum dev_cmd_type cmd_type, int timeout)
3143 {
3144 DECLARE_COMPLETION_ONSTACK(wait);
3145 const u32 tag = hba->reserved_slot;
3146 struct ufshcd_lrb *lrbp;
3147 int err;
3148
3149 /* Protects use of hba->reserved_slot. */
3150 lockdep_assert_held(&hba->dev_cmd.lock);
3151
3152 down_read(&hba->clk_scaling_lock);
3153
3154 lrbp = &hba->lrb[tag];
3155 lrbp->cmd = NULL;
3156 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3157 if (unlikely(err))
3158 goto out;
3159
3160 hba->dev_cmd.complete = &wait;
3161
3162 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
3163
3164 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
3165 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3166 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
3167 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
3168
3169 out:
3170 up_read(&hba->clk_scaling_lock);
3171 return err;
3172 }
3173
3174 /**
3175 * ufshcd_init_query() - init the query response and request parameters
3176 * @hba: per-adapter instance
3177 * @request: address of the request pointer to be initialized
3178 * @response: address of the response pointer to be initialized
3179 * @opcode: operation to perform
3180 * @idn: flag idn to access
3181 * @index: LU number to access
3182 * @selector: query/flag/descriptor further identification
3183 */
ufshcd_init_query(struct ufs_hba * hba,struct ufs_query_req ** request,struct ufs_query_res ** response,enum query_opcode opcode,u8 idn,u8 index,u8 selector)3184 static inline void ufshcd_init_query(struct ufs_hba *hba,
3185 struct ufs_query_req **request, struct ufs_query_res **response,
3186 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3187 {
3188 *request = &hba->dev_cmd.query.request;
3189 *response = &hba->dev_cmd.query.response;
3190 memset(*request, 0, sizeof(struct ufs_query_req));
3191 memset(*response, 0, sizeof(struct ufs_query_res));
3192 (*request)->upiu_req.opcode = opcode;
3193 (*request)->upiu_req.idn = idn;
3194 (*request)->upiu_req.index = index;
3195 (*request)->upiu_req.selector = selector;
3196 }
3197
ufshcd_query_flag_retry(struct ufs_hba * hba,enum query_opcode opcode,enum flag_idn idn,u8 index,bool * flag_res)3198 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3199 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
3200 {
3201 int ret;
3202 int retries;
3203
3204 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3205 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
3206 if (ret)
3207 dev_dbg(hba->dev,
3208 "%s: failed with error %d, retries %d\n",
3209 __func__, ret, retries);
3210 else
3211 break;
3212 }
3213
3214 if (ret)
3215 dev_err(hba->dev,
3216 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3217 __func__, opcode, idn, ret, retries);
3218 return ret;
3219 }
3220
3221 /**
3222 * ufshcd_query_flag() - API function for sending flag query requests
3223 * @hba: per-adapter instance
3224 * @opcode: flag query to perform
3225 * @idn: flag idn to access
3226 * @index: flag index to access
3227 * @flag_res: the flag value after the query request completes
3228 *
3229 * Return: 0 for success, non-zero in case of failure.
3230 */
ufshcd_query_flag(struct ufs_hba * hba,enum query_opcode opcode,enum flag_idn idn,u8 index,bool * flag_res)3231 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3232 enum flag_idn idn, u8 index, bool *flag_res)
3233 {
3234 struct ufs_query_req *request = NULL;
3235 struct ufs_query_res *response = NULL;
3236 int err, selector = 0;
3237 int timeout = QUERY_REQ_TIMEOUT;
3238
3239 BUG_ON(!hba);
3240
3241 ufshcd_hold(hba);
3242 mutex_lock(&hba->dev_cmd.lock);
3243 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3244 selector);
3245
3246 switch (opcode) {
3247 case UPIU_QUERY_OPCODE_SET_FLAG:
3248 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3249 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3250 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3251 break;
3252 case UPIU_QUERY_OPCODE_READ_FLAG:
3253 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3254 if (!flag_res) {
3255 /* No dummy reads */
3256 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3257 __func__);
3258 err = -EINVAL;
3259 goto out_unlock;
3260 }
3261 break;
3262 default:
3263 dev_err(hba->dev,
3264 "%s: Expected query flag opcode but got = %d\n",
3265 __func__, opcode);
3266 err = -EINVAL;
3267 goto out_unlock;
3268 }
3269
3270 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3271
3272 if (err) {
3273 dev_err(hba->dev,
3274 "%s: Sending flag query for idn %d failed, err = %d\n",
3275 __func__, idn, err);
3276 goto out_unlock;
3277 }
3278
3279 if (flag_res)
3280 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3281 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3282
3283 out_unlock:
3284 mutex_unlock(&hba->dev_cmd.lock);
3285 ufshcd_release(hba);
3286 return err;
3287 }
3288
3289 /**
3290 * ufshcd_query_attr - API function for sending attribute requests
3291 * @hba: per-adapter instance
3292 * @opcode: attribute opcode
3293 * @idn: attribute idn to access
3294 * @index: index field
3295 * @selector: selector field
3296 * @attr_val: the attribute value after the query request completes
3297 *
3298 * Return: 0 for success, non-zero in case of failure.
3299 */
ufshcd_query_attr(struct ufs_hba * hba,enum query_opcode opcode,enum attr_idn idn,u8 index,u8 selector,u32 * attr_val)3300 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3301 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3302 {
3303 struct ufs_query_req *request = NULL;
3304 struct ufs_query_res *response = NULL;
3305 int err;
3306
3307 BUG_ON(!hba);
3308
3309 if (!attr_val) {
3310 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3311 __func__, opcode);
3312 return -EINVAL;
3313 }
3314
3315 ufshcd_hold(hba);
3316
3317 mutex_lock(&hba->dev_cmd.lock);
3318 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3319 selector);
3320
3321 switch (opcode) {
3322 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3323 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3324 request->upiu_req.value = cpu_to_be32(*attr_val);
3325 break;
3326 case UPIU_QUERY_OPCODE_READ_ATTR:
3327 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3328 break;
3329 default:
3330 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3331 __func__, opcode);
3332 err = -EINVAL;
3333 goto out_unlock;
3334 }
3335
3336 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3337
3338 if (err) {
3339 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3340 __func__, opcode, idn, index, err);
3341 goto out_unlock;
3342 }
3343
3344 *attr_val = be32_to_cpu(response->upiu_res.value);
3345
3346 out_unlock:
3347 mutex_unlock(&hba->dev_cmd.lock);
3348 ufshcd_release(hba);
3349 return err;
3350 }
3351
3352 /**
3353 * ufshcd_query_attr_retry() - API function for sending query
3354 * attribute with retries
3355 * @hba: per-adapter instance
3356 * @opcode: attribute opcode
3357 * @idn: attribute idn to access
3358 * @index: index field
3359 * @selector: selector field
3360 * @attr_val: the attribute value after the query request
3361 * completes
3362 *
3363 * Return: 0 for success, non-zero in case of failure.
3364 */
ufshcd_query_attr_retry(struct ufs_hba * hba,enum query_opcode opcode,enum attr_idn idn,u8 index,u8 selector,u32 * attr_val)3365 int ufshcd_query_attr_retry(struct ufs_hba *hba,
3366 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3367 u32 *attr_val)
3368 {
3369 int ret = 0;
3370 u32 retries;
3371
3372 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3373 ret = ufshcd_query_attr(hba, opcode, idn, index,
3374 selector, attr_val);
3375 if (ret)
3376 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3377 __func__, ret, retries);
3378 else
3379 break;
3380 }
3381
3382 if (ret)
3383 dev_err(hba->dev,
3384 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3385 __func__, idn, ret, QUERY_REQ_RETRIES);
3386 return ret;
3387 }
3388
__ufshcd_query_descriptor(struct ufs_hba * hba,enum query_opcode opcode,enum desc_idn idn,u8 index,u8 selector,u8 * desc_buf,int * buf_len)3389 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3390 enum query_opcode opcode, enum desc_idn idn, u8 index,
3391 u8 selector, u8 *desc_buf, int *buf_len)
3392 {
3393 struct ufs_query_req *request = NULL;
3394 struct ufs_query_res *response = NULL;
3395 int err;
3396
3397 BUG_ON(!hba);
3398
3399 if (!desc_buf) {
3400 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3401 __func__, opcode);
3402 return -EINVAL;
3403 }
3404
3405 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3406 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3407 __func__, *buf_len);
3408 return -EINVAL;
3409 }
3410
3411 ufshcd_hold(hba);
3412
3413 mutex_lock(&hba->dev_cmd.lock);
3414 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3415 selector);
3416 hba->dev_cmd.query.descriptor = desc_buf;
3417 request->upiu_req.length = cpu_to_be16(*buf_len);
3418
3419 switch (opcode) {
3420 case UPIU_QUERY_OPCODE_WRITE_DESC:
3421 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3422 break;
3423 case UPIU_QUERY_OPCODE_READ_DESC:
3424 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3425 break;
3426 default:
3427 dev_err(hba->dev,
3428 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3429 __func__, opcode);
3430 err = -EINVAL;
3431 goto out_unlock;
3432 }
3433
3434 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3435
3436 if (err) {
3437 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3438 __func__, opcode, idn, index, err);
3439 goto out_unlock;
3440 }
3441
3442 *buf_len = be16_to_cpu(response->upiu_res.length);
3443
3444 out_unlock:
3445 hba->dev_cmd.query.descriptor = NULL;
3446 mutex_unlock(&hba->dev_cmd.lock);
3447 ufshcd_release(hba);
3448 return err;
3449 }
3450
3451 /**
3452 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3453 * @hba: per-adapter instance
3454 * @opcode: attribute opcode
3455 * @idn: attribute idn to access
3456 * @index: index field
3457 * @selector: selector field
3458 * @desc_buf: the buffer that contains the descriptor
3459 * @buf_len: length parameter passed to the device
3460 *
3461 * The buf_len parameter will contain, on return, the length parameter
3462 * received on the response.
3463 *
3464 * Return: 0 for success, non-zero in case of failure.
3465 */
ufshcd_query_descriptor_retry(struct ufs_hba * hba,enum query_opcode opcode,enum desc_idn idn,u8 index,u8 selector,u8 * desc_buf,int * buf_len)3466 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3467 enum query_opcode opcode,
3468 enum desc_idn idn, u8 index,
3469 u8 selector,
3470 u8 *desc_buf, int *buf_len)
3471 {
3472 int err;
3473 int retries;
3474
3475 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3476 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3477 selector, desc_buf, buf_len);
3478 if (!err || err == -EINVAL)
3479 break;
3480 }
3481
3482 return err;
3483 }
3484
3485 /**
3486 * ufshcd_read_desc_param - read the specified descriptor parameter
3487 * @hba: Pointer to adapter instance
3488 * @desc_id: descriptor idn value
3489 * @desc_index: descriptor index
3490 * @param_offset: offset of the parameter to read
3491 * @param_read_buf: pointer to buffer where parameter would be read
3492 * @param_size: sizeof(param_read_buf)
3493 *
3494 * Return: 0 in case of success, non-zero otherwise.
3495 */
ufshcd_read_desc_param(struct ufs_hba * hba,enum desc_idn desc_id,int desc_index,u8 param_offset,u8 * param_read_buf,u8 param_size)3496 int ufshcd_read_desc_param(struct ufs_hba *hba,
3497 enum desc_idn desc_id,
3498 int desc_index,
3499 u8 param_offset,
3500 u8 *param_read_buf,
3501 u8 param_size)
3502 {
3503 int ret;
3504 u8 *desc_buf;
3505 int buff_len = QUERY_DESC_MAX_SIZE;
3506 bool is_kmalloc = true;
3507
3508 /* Safety check */
3509 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3510 return -EINVAL;
3511
3512 /* Check whether we need temp memory */
3513 if (param_offset != 0 || param_size < buff_len) {
3514 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3515 if (!desc_buf)
3516 return -ENOMEM;
3517 } else {
3518 desc_buf = param_read_buf;
3519 is_kmalloc = false;
3520 }
3521
3522 /* Request for full descriptor */
3523 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3524 desc_id, desc_index, 0,
3525 desc_buf, &buff_len);
3526 if (ret) {
3527 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3528 __func__, desc_id, desc_index, param_offset, ret);
3529 goto out;
3530 }
3531
3532 /* Update descriptor length */
3533 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3534
3535 if (param_offset >= buff_len) {
3536 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3537 __func__, param_offset, desc_id, buff_len);
3538 ret = -EINVAL;
3539 goto out;
3540 }
3541
3542 /* Sanity check */
3543 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3544 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3545 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3546 ret = -EINVAL;
3547 goto out;
3548 }
3549
3550 if (is_kmalloc) {
3551 /* Make sure we don't copy more data than available */
3552 if (param_offset >= buff_len)
3553 ret = -EINVAL;
3554 else
3555 memcpy(param_read_buf, &desc_buf[param_offset],
3556 min_t(u32, param_size, buff_len - param_offset));
3557 }
3558 out:
3559 if (is_kmalloc)
3560 kfree(desc_buf);
3561 return ret;
3562 }
3563
3564 /**
3565 * struct uc_string_id - unicode string
3566 *
3567 * @len: size of this descriptor inclusive
3568 * @type: descriptor type
3569 * @uc: unicode string character
3570 */
3571 struct uc_string_id {
3572 u8 len;
3573 u8 type;
3574 wchar_t uc[];
3575 } __packed;
3576
3577 /* replace non-printable or non-ASCII characters with spaces */
ufshcd_remove_non_printable(u8 ch)3578 static inline char ufshcd_remove_non_printable(u8 ch)
3579 {
3580 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3581 }
3582
3583 /**
3584 * ufshcd_read_string_desc - read string descriptor
3585 * @hba: pointer to adapter instance
3586 * @desc_index: descriptor index
3587 * @buf: pointer to buffer where descriptor would be read,
3588 * the caller should free the memory.
3589 * @ascii: if true convert from unicode to ascii characters
3590 * null terminated string.
3591 *
3592 * Return:
3593 * * string size on success.
3594 * * -ENOMEM: on allocation failure
3595 * * -EINVAL: on a wrong parameter
3596 */
ufshcd_read_string_desc(struct ufs_hba * hba,u8 desc_index,u8 ** buf,bool ascii)3597 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3598 u8 **buf, bool ascii)
3599 {
3600 struct uc_string_id *uc_str;
3601 u8 *str;
3602 int ret;
3603
3604 if (!buf)
3605 return -EINVAL;
3606
3607 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3608 if (!uc_str)
3609 return -ENOMEM;
3610
3611 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3612 (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3613 if (ret < 0) {
3614 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3615 QUERY_REQ_RETRIES, ret);
3616 str = NULL;
3617 goto out;
3618 }
3619
3620 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3621 dev_dbg(hba->dev, "String Desc is of zero length\n");
3622 str = NULL;
3623 ret = 0;
3624 goto out;
3625 }
3626
3627 if (ascii) {
3628 ssize_t ascii_len;
3629 int i;
3630 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3631 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3632 str = kzalloc(ascii_len, GFP_KERNEL);
3633 if (!str) {
3634 ret = -ENOMEM;
3635 goto out;
3636 }
3637
3638 /*
3639 * the descriptor contains string in UTF16 format
3640 * we need to convert to utf-8 so it can be displayed
3641 */
3642 ret = utf16s_to_utf8s(uc_str->uc,
3643 uc_str->len - QUERY_DESC_HDR_SIZE,
3644 UTF16_BIG_ENDIAN, str, ascii_len - 1);
3645
3646 /* replace non-printable or non-ASCII characters with spaces */
3647 for (i = 0; i < ret; i++)
3648 str[i] = ufshcd_remove_non_printable(str[i]);
3649
3650 str[ret++] = '\0';
3651
3652 } else {
3653 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3654 if (!str) {
3655 ret = -ENOMEM;
3656 goto out;
3657 }
3658 ret = uc_str->len;
3659 }
3660 out:
3661 *buf = str;
3662 kfree(uc_str);
3663 return ret;
3664 }
3665
3666 /**
3667 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3668 * @hba: Pointer to adapter instance
3669 * @lun: lun id
3670 * @param_offset: offset of the parameter to read
3671 * @param_read_buf: pointer to buffer where parameter would be read
3672 * @param_size: sizeof(param_read_buf)
3673 *
3674 * Return: 0 in case of success, non-zero otherwise.
3675 */
ufshcd_read_unit_desc_param(struct ufs_hba * hba,int lun,enum unit_desc_param param_offset,u8 * param_read_buf,u32 param_size)3676 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3677 int lun,
3678 enum unit_desc_param param_offset,
3679 u8 *param_read_buf,
3680 u32 param_size)
3681 {
3682 /*
3683 * Unit descriptors are only available for general purpose LUs (LUN id
3684 * from 0 to 7) and RPMB Well known LU.
3685 */
3686 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
3687 return -EOPNOTSUPP;
3688
3689 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3690 param_offset, param_read_buf, param_size);
3691 }
3692
ufshcd_get_ref_clk_gating_wait(struct ufs_hba * hba)3693 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3694 {
3695 int err = 0;
3696 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3697
3698 if (hba->dev_info.wspecversion >= 0x300) {
3699 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3700 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3701 &gating_wait);
3702 if (err)
3703 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3704 err, gating_wait);
3705
3706 if (gating_wait == 0) {
3707 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3708 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3709 gating_wait);
3710 }
3711
3712 hba->dev_info.clk_gating_wait_us = gating_wait;
3713 }
3714
3715 return err;
3716 }
3717
3718 /**
3719 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3720 * @hba: per adapter instance
3721 *
3722 * 1. Allocate DMA memory for Command Descriptor array
3723 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3724 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3725 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3726 * (UTMRDL)
3727 * 4. Allocate memory for local reference block(lrb).
3728 *
3729 * Return: 0 for success, non-zero in case of failure.
3730 */
ufshcd_memory_alloc(struct ufs_hba * hba)3731 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3732 {
3733 size_t utmrdl_size, utrdl_size, ucdl_size;
3734
3735 /* Allocate memory for UTP command descriptors */
3736 ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs;
3737 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3738 ucdl_size,
3739 &hba->ucdl_dma_addr,
3740 GFP_KERNEL);
3741
3742 /*
3743 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3744 */
3745 if (!hba->ucdl_base_addr ||
3746 WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
3747 dev_err(hba->dev,
3748 "Command Descriptor Memory allocation failed\n");
3749 goto out;
3750 }
3751
3752 /*
3753 * Allocate memory for UTP Transfer descriptors
3754 * UFSHCI requires 1KB alignment of UTRD
3755 */
3756 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3757 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3758 utrdl_size,
3759 &hba->utrdl_dma_addr,
3760 GFP_KERNEL);
3761 if (!hba->utrdl_base_addr ||
3762 WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) {
3763 dev_err(hba->dev,
3764 "Transfer Descriptor Memory allocation failed\n");
3765 goto out;
3766 }
3767
3768 /*
3769 * Skip utmrdl allocation; it may have been
3770 * allocated during first pass and not released during
3771 * MCQ memory allocation.
3772 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3773 */
3774 if (hba->utmrdl_base_addr)
3775 goto skip_utmrdl;
3776 /*
3777 * Allocate memory for UTP Task Management descriptors
3778 * UFSHCI requires 1KB alignment of UTMRD
3779 */
3780 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3781 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3782 utmrdl_size,
3783 &hba->utmrdl_dma_addr,
3784 GFP_KERNEL);
3785 if (!hba->utmrdl_base_addr ||
3786 WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) {
3787 dev_err(hba->dev,
3788 "Task Management Descriptor Memory allocation failed\n");
3789 goto out;
3790 }
3791
3792 skip_utmrdl:
3793 /* Allocate memory for local reference block */
3794 hba->lrb = devm_kcalloc(hba->dev,
3795 hba->nutrs, sizeof(struct ufshcd_lrb),
3796 GFP_KERNEL);
3797 if (!hba->lrb) {
3798 dev_err(hba->dev, "LRB Memory allocation failed\n");
3799 goto out;
3800 }
3801 return 0;
3802 out:
3803 return -ENOMEM;
3804 }
3805
3806 /**
3807 * ufshcd_host_memory_configure - configure local reference block with
3808 * memory offsets
3809 * @hba: per adapter instance
3810 *
3811 * Configure Host memory space
3812 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3813 * address.
3814 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3815 * and PRDT offset.
3816 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3817 * into local reference block.
3818 */
ufshcd_host_memory_configure(struct ufs_hba * hba)3819 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3820 {
3821 struct utp_transfer_req_desc *utrdlp;
3822 dma_addr_t cmd_desc_dma_addr;
3823 dma_addr_t cmd_desc_element_addr;
3824 u16 response_offset;
3825 u16 prdt_offset;
3826 int cmd_desc_size;
3827 int i;
3828
3829 utrdlp = hba->utrdl_base_addr;
3830
3831 response_offset =
3832 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3833 prdt_offset =
3834 offsetof(struct utp_transfer_cmd_desc, prd_table);
3835
3836 cmd_desc_size = ufshcd_get_ucd_size(hba);
3837 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3838
3839 for (i = 0; i < hba->nutrs; i++) {
3840 /* Configure UTRD with command descriptor base address */
3841 cmd_desc_element_addr =
3842 (cmd_desc_dma_addr + (cmd_desc_size * i));
3843 utrdlp[i].command_desc_base_addr =
3844 cpu_to_le64(cmd_desc_element_addr);
3845
3846 /* Response upiu and prdt offset should be in double words */
3847 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3848 utrdlp[i].response_upiu_offset =
3849 cpu_to_le16(response_offset);
3850 utrdlp[i].prd_table_offset =
3851 cpu_to_le16(prdt_offset);
3852 utrdlp[i].response_upiu_length =
3853 cpu_to_le16(ALIGNED_UPIU_SIZE);
3854 } else {
3855 utrdlp[i].response_upiu_offset =
3856 cpu_to_le16(response_offset >> 2);
3857 utrdlp[i].prd_table_offset =
3858 cpu_to_le16(prdt_offset >> 2);
3859 utrdlp[i].response_upiu_length =
3860 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3861 }
3862
3863 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3864 }
3865 }
3866
3867 /**
3868 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3869 * @hba: per adapter instance
3870 *
3871 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3872 * in order to initialize the Unipro link startup procedure.
3873 * Once the Unipro links are up, the device connected to the controller
3874 * is detected.
3875 *
3876 * Return: 0 on success, non-zero value on failure.
3877 */
ufshcd_dme_link_startup(struct ufs_hba * hba)3878 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3879 {
3880 struct uic_command uic_cmd = {0};
3881 int ret;
3882
3883 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3884
3885 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3886 if (ret)
3887 dev_dbg(hba->dev,
3888 "dme-link-startup: error code %d\n", ret);
3889 return ret;
3890 }
3891 /**
3892 * ufshcd_dme_reset - UIC command for DME_RESET
3893 * @hba: per adapter instance
3894 *
3895 * DME_RESET command is issued in order to reset UniPro stack.
3896 * This function now deals with cold reset.
3897 *
3898 * Return: 0 on success, non-zero value on failure.
3899 */
ufshcd_dme_reset(struct ufs_hba * hba)3900 static int ufshcd_dme_reset(struct ufs_hba *hba)
3901 {
3902 struct uic_command uic_cmd = {0};
3903 int ret;
3904
3905 uic_cmd.command = UIC_CMD_DME_RESET;
3906
3907 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3908 if (ret)
3909 dev_err(hba->dev,
3910 "dme-reset: error code %d\n", ret);
3911
3912 return ret;
3913 }
3914
ufshcd_dme_configure_adapt(struct ufs_hba * hba,int agreed_gear,int adapt_val)3915 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3916 int agreed_gear,
3917 int adapt_val)
3918 {
3919 int ret;
3920
3921 if (agreed_gear < UFS_HS_G4)
3922 adapt_val = PA_NO_ADAPT;
3923
3924 ret = ufshcd_dme_set(hba,
3925 UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3926 adapt_val);
3927 return ret;
3928 }
3929 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3930
3931 /**
3932 * ufshcd_dme_enable - UIC command for DME_ENABLE
3933 * @hba: per adapter instance
3934 *
3935 * DME_ENABLE command is issued in order to enable UniPro stack.
3936 *
3937 * Return: 0 on success, non-zero value on failure.
3938 */
ufshcd_dme_enable(struct ufs_hba * hba)3939 static int ufshcd_dme_enable(struct ufs_hba *hba)
3940 {
3941 struct uic_command uic_cmd = {0};
3942 int ret;
3943
3944 uic_cmd.command = UIC_CMD_DME_ENABLE;
3945
3946 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3947 if (ret)
3948 dev_err(hba->dev,
3949 "dme-enable: error code %d\n", ret);
3950
3951 return ret;
3952 }
3953
ufshcd_add_delay_before_dme_cmd(struct ufs_hba * hba)3954 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3955 {
3956 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3957 unsigned long min_sleep_time_us;
3958
3959 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3960 return;
3961
3962 /*
3963 * last_dme_cmd_tstamp will be 0 only for 1st call to
3964 * this function
3965 */
3966 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3967 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3968 } else {
3969 unsigned long delta =
3970 (unsigned long) ktime_to_us(
3971 ktime_sub(ktime_get(),
3972 hba->last_dme_cmd_tstamp));
3973
3974 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3975 min_sleep_time_us =
3976 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3977 else
3978 min_sleep_time_us = 0; /* no more delay required */
3979 }
3980
3981 if (min_sleep_time_us > 0) {
3982 /* allow sleep for extra 50us if needed */
3983 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3984 }
3985
3986 /* update the last_dme_cmd_tstamp */
3987 hba->last_dme_cmd_tstamp = ktime_get();
3988 }
3989
3990 /**
3991 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3992 * @hba: per adapter instance
3993 * @attr_sel: uic command argument1
3994 * @attr_set: attribute set type as uic command argument2
3995 * @mib_val: setting value as uic command argument3
3996 * @peer: indicate whether peer or local
3997 *
3998 * Return: 0 on success, non-zero value on failure.
3999 */
ufshcd_dme_set_attr(struct ufs_hba * hba,u32 attr_sel,u8 attr_set,u32 mib_val,u8 peer)4000 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
4001 u8 attr_set, u32 mib_val, u8 peer)
4002 {
4003 struct uic_command uic_cmd = {0};
4004 static const char *const action[] = {
4005 "dme-set",
4006 "dme-peer-set"
4007 };
4008 const char *set = action[!!peer];
4009 int ret;
4010 int retries = UFS_UIC_COMMAND_RETRIES;
4011
4012 uic_cmd.command = peer ?
4013 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
4014 uic_cmd.argument1 = attr_sel;
4015 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
4016 uic_cmd.argument3 = mib_val;
4017
4018 do {
4019 /* for peer attributes we retry upon failure */
4020 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4021 if (ret)
4022 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4023 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4024 } while (ret && peer && --retries);
4025
4026 if (ret)
4027 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4028 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4029 UFS_UIC_COMMAND_RETRIES - retries);
4030
4031 return ret;
4032 }
4033 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4034
4035 /**
4036 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4037 * @hba: per adapter instance
4038 * @attr_sel: uic command argument1
4039 * @mib_val: the value of the attribute as returned by the UIC command
4040 * @peer: indicate whether peer or local
4041 *
4042 * Return: 0 on success, non-zero value on failure.
4043 */
ufshcd_dme_get_attr(struct ufs_hba * hba,u32 attr_sel,u32 * mib_val,u8 peer)4044 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4045 u32 *mib_val, u8 peer)
4046 {
4047 struct uic_command uic_cmd = {0};
4048 static const char *const action[] = {
4049 "dme-get",
4050 "dme-peer-get"
4051 };
4052 const char *get = action[!!peer];
4053 int ret;
4054 int retries = UFS_UIC_COMMAND_RETRIES;
4055 struct ufs_pa_layer_attr orig_pwr_info;
4056 struct ufs_pa_layer_attr temp_pwr_info;
4057 bool pwr_mode_change = false;
4058
4059 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4060 orig_pwr_info = hba->pwr_info;
4061 temp_pwr_info = orig_pwr_info;
4062
4063 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4064 orig_pwr_info.pwr_rx == FAST_MODE) {
4065 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4066 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4067 pwr_mode_change = true;
4068 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4069 orig_pwr_info.pwr_rx == SLOW_MODE) {
4070 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4071 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4072 pwr_mode_change = true;
4073 }
4074 if (pwr_mode_change) {
4075 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4076 if (ret)
4077 goto out;
4078 }
4079 }
4080
4081 uic_cmd.command = peer ?
4082 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
4083 uic_cmd.argument1 = attr_sel;
4084
4085 do {
4086 /* for peer attributes we retry upon failure */
4087 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4088 if (ret)
4089 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4090 get, UIC_GET_ATTR_ID(attr_sel), ret);
4091 } while (ret && peer && --retries);
4092
4093 if (ret)
4094 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
4095 get, UIC_GET_ATTR_ID(attr_sel),
4096 UFS_UIC_COMMAND_RETRIES - retries);
4097
4098 if (mib_val && !ret)
4099 *mib_val = uic_cmd.argument3;
4100
4101 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4102 && pwr_mode_change)
4103 ufshcd_change_power_mode(hba, &orig_pwr_info);
4104 out:
4105 return ret;
4106 }
4107 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4108
4109 /**
4110 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4111 * state) and waits for it to take effect.
4112 *
4113 * @hba: per adapter instance
4114 * @cmd: UIC command to execute
4115 *
4116 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4117 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4118 * and device UniPro link and hence it's final completion would be indicated by
4119 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4120 * addition to normal UIC command completion Status (UCCS). This function only
4121 * returns after the relevant status bits indicate the completion.
4122 *
4123 * Return: 0 on success, non-zero value on failure.
4124 */
ufshcd_uic_pwr_ctrl(struct ufs_hba * hba,struct uic_command * cmd)4125 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4126 {
4127 DECLARE_COMPLETION_ONSTACK(uic_async_done);
4128 unsigned long flags;
4129 u8 status;
4130 int ret;
4131 bool reenable_intr = false;
4132
4133 mutex_lock(&hba->uic_cmd_mutex);
4134 ufshcd_add_delay_before_dme_cmd(hba);
4135
4136 spin_lock_irqsave(hba->host->host_lock, flags);
4137 if (ufshcd_is_link_broken(hba)) {
4138 ret = -ENOLINK;
4139 goto out_unlock;
4140 }
4141 hba->uic_async_done = &uic_async_done;
4142 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4143 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4144 /*
4145 * Make sure UIC command completion interrupt is disabled before
4146 * issuing UIC command.
4147 */
4148 ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
4149 reenable_intr = true;
4150 }
4151 spin_unlock_irqrestore(hba->host->host_lock, flags);
4152 ret = __ufshcd_send_uic_cmd(hba, cmd);
4153 if (ret) {
4154 dev_err(hba->dev,
4155 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4156 cmd->command, cmd->argument3, ret);
4157 goto out;
4158 }
4159
4160 if (!wait_for_completion_timeout(hba->uic_async_done,
4161 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4162 dev_err(hba->dev,
4163 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4164 cmd->command, cmd->argument3);
4165
4166 if (!cmd->cmd_active) {
4167 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4168 __func__);
4169 goto check_upmcrs;
4170 }
4171
4172 ret = -ETIMEDOUT;
4173 goto out;
4174 }
4175
4176 check_upmcrs:
4177 status = ufshcd_get_upmcrs(hba);
4178 if (status != PWR_LOCAL) {
4179 dev_err(hba->dev,
4180 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4181 cmd->command, status);
4182 ret = (status != PWR_OK) ? status : -1;
4183 }
4184 out:
4185 if (ret) {
4186 ufshcd_print_host_state(hba);
4187 ufshcd_print_pwr_info(hba);
4188 ufshcd_print_evt_hist(hba);
4189 }
4190
4191 spin_lock_irqsave(hba->host->host_lock, flags);
4192 hba->active_uic_cmd = NULL;
4193 hba->uic_async_done = NULL;
4194 if (reenable_intr)
4195 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4196 if (ret) {
4197 ufshcd_set_link_broken(hba);
4198 ufshcd_schedule_eh_work(hba);
4199 }
4200 out_unlock:
4201 spin_unlock_irqrestore(hba->host->host_lock, flags);
4202 mutex_unlock(&hba->uic_cmd_mutex);
4203
4204 return ret;
4205 }
4206
4207 /**
4208 * ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result
4209 * @hba: per adapter instance
4210 * @uic_cmd: UIC command
4211 *
4212 * Return: 0 only if success.
4213 */
ufshcd_send_bsg_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)4214 int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
4215 {
4216 int ret;
4217
4218 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
4219 return 0;
4220
4221 ufshcd_hold(hba);
4222
4223 if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) &&
4224 uic_cmd->command == UIC_CMD_DME_SET) {
4225 ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
4226 goto out;
4227 }
4228
4229 mutex_lock(&hba->uic_cmd_mutex);
4230 ufshcd_add_delay_before_dme_cmd(hba);
4231
4232 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
4233 if (!ret)
4234 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
4235
4236 mutex_unlock(&hba->uic_cmd_mutex);
4237
4238 out:
4239 ufshcd_release(hba);
4240 return ret;
4241 }
4242
4243 /**
4244 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4245 * using DME_SET primitives.
4246 * @hba: per adapter instance
4247 * @mode: powr mode value
4248 *
4249 * Return: 0 on success, non-zero value on failure.
4250 */
ufshcd_uic_change_pwr_mode(struct ufs_hba * hba,u8 mode)4251 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4252 {
4253 struct uic_command uic_cmd = {0};
4254 int ret;
4255
4256 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4257 ret = ufshcd_dme_set(hba,
4258 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4259 if (ret) {
4260 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4261 __func__, ret);
4262 goto out;
4263 }
4264 }
4265
4266 uic_cmd.command = UIC_CMD_DME_SET;
4267 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4268 uic_cmd.argument3 = mode;
4269 ufshcd_hold(hba);
4270 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4271 ufshcd_release(hba);
4272
4273 out:
4274 return ret;
4275 }
4276 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
4277
ufshcd_link_recovery(struct ufs_hba * hba)4278 int ufshcd_link_recovery(struct ufs_hba *hba)
4279 {
4280 int ret;
4281 unsigned long flags;
4282
4283 spin_lock_irqsave(hba->host->host_lock, flags);
4284 hba->ufshcd_state = UFSHCD_STATE_RESET;
4285 ufshcd_set_eh_in_progress(hba);
4286 spin_unlock_irqrestore(hba->host->host_lock, flags);
4287
4288 /* Reset the attached device */
4289 ufshcd_device_reset(hba);
4290
4291 ret = ufshcd_host_reset_and_restore(hba);
4292
4293 spin_lock_irqsave(hba->host->host_lock, flags);
4294 if (ret)
4295 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4296 ufshcd_clear_eh_in_progress(hba);
4297 spin_unlock_irqrestore(hba->host->host_lock, flags);
4298
4299 if (ret)
4300 dev_err(hba->dev, "%s: link recovery failed, err %d",
4301 __func__, ret);
4302
4303 return ret;
4304 }
4305 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4306
ufshcd_uic_hibern8_enter(struct ufs_hba * hba)4307 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4308 {
4309 int ret;
4310 struct uic_command uic_cmd = {0};
4311 ktime_t start = ktime_get();
4312
4313 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4314
4315 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4316 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4317 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4318 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4319
4320 if (ret)
4321 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4322 __func__, ret);
4323 else
4324 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4325 POST_CHANGE);
4326
4327 return ret;
4328 }
4329 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
4330
ufshcd_uic_hibern8_exit(struct ufs_hba * hba)4331 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4332 {
4333 struct uic_command uic_cmd = {0};
4334 int ret;
4335 ktime_t start = ktime_get();
4336
4337 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4338
4339 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4340 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4341 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4342 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4343
4344 if (ret) {
4345 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4346 __func__, ret);
4347 } else {
4348 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4349 POST_CHANGE);
4350 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
4351 hba->ufs_stats.hibern8_exit_cnt++;
4352 }
4353
4354 return ret;
4355 }
4356 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4357
ufshcd_auto_hibern8_update(struct ufs_hba * hba,u32 ahit)4358 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4359 {
4360 unsigned long flags;
4361 bool update = false;
4362
4363 if (!ufshcd_is_auto_hibern8_supported(hba))
4364 return;
4365
4366 spin_lock_irqsave(hba->host->host_lock, flags);
4367 if (hba->ahit != ahit) {
4368 hba->ahit = ahit;
4369 update = true;
4370 }
4371 spin_unlock_irqrestore(hba->host->host_lock, flags);
4372
4373 if (update &&
4374 !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
4375 ufshcd_rpm_get_sync(hba);
4376 ufshcd_hold(hba);
4377 ufshcd_auto_hibern8_enable(hba);
4378 ufshcd_release(hba);
4379 ufshcd_rpm_put_sync(hba);
4380 }
4381 }
4382 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4383
ufshcd_auto_hibern8_enable(struct ufs_hba * hba)4384 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4385 {
4386 if (!ufshcd_is_auto_hibern8_supported(hba))
4387 return;
4388
4389 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4390 }
4391
4392 /**
4393 * ufshcd_init_pwr_info - setting the POR (power on reset)
4394 * values in hba power info
4395 * @hba: per-adapter instance
4396 */
ufshcd_init_pwr_info(struct ufs_hba * hba)4397 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4398 {
4399 hba->pwr_info.gear_rx = UFS_PWM_G1;
4400 hba->pwr_info.gear_tx = UFS_PWM_G1;
4401 hba->pwr_info.lane_rx = UFS_LANE_1;
4402 hba->pwr_info.lane_tx = UFS_LANE_1;
4403 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4404 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4405 hba->pwr_info.hs_rate = 0;
4406 }
4407
4408 /**
4409 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4410 * @hba: per-adapter instance
4411 *
4412 * Return: 0 upon success; < 0 upon failure.
4413 */
ufshcd_get_max_pwr_mode(struct ufs_hba * hba)4414 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4415 {
4416 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4417
4418 if (hba->max_pwr_info.is_valid)
4419 return 0;
4420
4421 if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
4422 pwr_info->pwr_tx = FASTAUTO_MODE;
4423 pwr_info->pwr_rx = FASTAUTO_MODE;
4424 } else {
4425 pwr_info->pwr_tx = FAST_MODE;
4426 pwr_info->pwr_rx = FAST_MODE;
4427 }
4428 pwr_info->hs_rate = PA_HS_MODE_B;
4429
4430 /* Get the connected lane count */
4431 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4432 &pwr_info->lane_rx);
4433 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4434 &pwr_info->lane_tx);
4435
4436 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4437 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4438 __func__,
4439 pwr_info->lane_rx,
4440 pwr_info->lane_tx);
4441 return -EINVAL;
4442 }
4443
4444 /*
4445 * First, get the maximum gears of HS speed.
4446 * If a zero value, it means there is no HSGEAR capability.
4447 * Then, get the maximum gears of PWM speed.
4448 */
4449 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4450 if (!pwr_info->gear_rx) {
4451 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4452 &pwr_info->gear_rx);
4453 if (!pwr_info->gear_rx) {
4454 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4455 __func__, pwr_info->gear_rx);
4456 return -EINVAL;
4457 }
4458 pwr_info->pwr_rx = SLOW_MODE;
4459 }
4460
4461 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4462 &pwr_info->gear_tx);
4463 if (!pwr_info->gear_tx) {
4464 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4465 &pwr_info->gear_tx);
4466 if (!pwr_info->gear_tx) {
4467 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4468 __func__, pwr_info->gear_tx);
4469 return -EINVAL;
4470 }
4471 pwr_info->pwr_tx = SLOW_MODE;
4472 }
4473
4474 hba->max_pwr_info.is_valid = true;
4475 return 0;
4476 }
4477
ufshcd_change_power_mode(struct ufs_hba * hba,struct ufs_pa_layer_attr * pwr_mode)4478 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4479 struct ufs_pa_layer_attr *pwr_mode)
4480 {
4481 int ret;
4482
4483 /* if already configured to the requested pwr_mode */
4484 if (!hba->force_pmc &&
4485 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4486 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4487 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4488 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4489 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4490 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4491 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4492 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4493 return 0;
4494 }
4495
4496 /*
4497 * Configure attributes for power mode change with below.
4498 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4499 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4500 * - PA_HSSERIES
4501 */
4502 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4503 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4504 pwr_mode->lane_rx);
4505 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4506 pwr_mode->pwr_rx == FAST_MODE)
4507 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
4508 else
4509 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
4510
4511 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4512 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4513 pwr_mode->lane_tx);
4514 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4515 pwr_mode->pwr_tx == FAST_MODE)
4516 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
4517 else
4518 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
4519
4520 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4521 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4522 pwr_mode->pwr_rx == FAST_MODE ||
4523 pwr_mode->pwr_tx == FAST_MODE)
4524 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4525 pwr_mode->hs_rate);
4526
4527 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4528 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4529 DL_FC0ProtectionTimeOutVal_Default);
4530 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4531 DL_TC0ReplayTimeOutVal_Default);
4532 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4533 DL_AFC0ReqTimeOutVal_Default);
4534 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4535 DL_FC1ProtectionTimeOutVal_Default);
4536 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4537 DL_TC1ReplayTimeOutVal_Default);
4538 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4539 DL_AFC1ReqTimeOutVal_Default);
4540
4541 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4542 DL_FC0ProtectionTimeOutVal_Default);
4543 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4544 DL_TC0ReplayTimeOutVal_Default);
4545 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4546 DL_AFC0ReqTimeOutVal_Default);
4547 }
4548
4549 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4550 | pwr_mode->pwr_tx);
4551
4552 if (ret) {
4553 dev_err(hba->dev,
4554 "%s: power mode change failed %d\n", __func__, ret);
4555 } else {
4556 memcpy(&hba->pwr_info, pwr_mode,
4557 sizeof(struct ufs_pa_layer_attr));
4558 }
4559
4560 return ret;
4561 }
4562
4563 /**
4564 * ufshcd_config_pwr_mode - configure a new power mode
4565 * @hba: per-adapter instance
4566 * @desired_pwr_mode: desired power configuration
4567 *
4568 * Return: 0 upon success; < 0 upon failure.
4569 */
ufshcd_config_pwr_mode(struct ufs_hba * hba,struct ufs_pa_layer_attr * desired_pwr_mode)4570 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4571 struct ufs_pa_layer_attr *desired_pwr_mode)
4572 {
4573 struct ufs_pa_layer_attr final_params = { 0 };
4574 int ret;
4575
4576 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4577 desired_pwr_mode, &final_params);
4578
4579 if (ret)
4580 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4581
4582 ret = ufshcd_change_power_mode(hba, &final_params);
4583
4584 if (!ret)
4585 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4586 &final_params);
4587
4588 return ret;
4589 }
4590 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4591
4592 /**
4593 * ufshcd_complete_dev_init() - checks device readiness
4594 * @hba: per-adapter instance
4595 *
4596 * Set fDeviceInit flag and poll until device toggles it.
4597 *
4598 * Return: 0 upon success; < 0 upon failure.
4599 */
ufshcd_complete_dev_init(struct ufs_hba * hba)4600 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4601 {
4602 int err;
4603 bool flag_res = true;
4604 ktime_t timeout;
4605
4606 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4607 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4608 if (err) {
4609 dev_err(hba->dev,
4610 "%s: setting fDeviceInit flag failed with error %d\n",
4611 __func__, err);
4612 goto out;
4613 }
4614
4615 /* Poll fDeviceInit flag to be cleared */
4616 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4617 do {
4618 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4619 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4620 if (!flag_res)
4621 break;
4622 usleep_range(500, 1000);
4623 } while (ktime_before(ktime_get(), timeout));
4624
4625 if (err) {
4626 dev_err(hba->dev,
4627 "%s: reading fDeviceInit flag failed with error %d\n",
4628 __func__, err);
4629 } else if (flag_res) {
4630 dev_err(hba->dev,
4631 "%s: fDeviceInit was not cleared by the device\n",
4632 __func__);
4633 err = -EBUSY;
4634 }
4635 out:
4636 return err;
4637 }
4638
4639 /**
4640 * ufshcd_make_hba_operational - Make UFS controller operational
4641 * @hba: per adapter instance
4642 *
4643 * To bring UFS host controller to operational state,
4644 * 1. Enable required interrupts
4645 * 2. Configure interrupt aggregation
4646 * 3. Program UTRL and UTMRL base address
4647 * 4. Configure run-stop-registers
4648 *
4649 * Return: 0 on success, non-zero value on failure.
4650 */
ufshcd_make_hba_operational(struct ufs_hba * hba)4651 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4652 {
4653 int err = 0;
4654 u32 reg;
4655
4656 /* Enable required interrupts */
4657 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4658
4659 /* Configure interrupt aggregation */
4660 if (ufshcd_is_intr_aggr_allowed(hba))
4661 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4662 else
4663 ufshcd_disable_intr_aggr(hba);
4664
4665 /* Configure UTRL and UTMRL base address registers */
4666 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4667 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4668 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4669 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4670 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4671 REG_UTP_TASK_REQ_LIST_BASE_L);
4672 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4673 REG_UTP_TASK_REQ_LIST_BASE_H);
4674
4675 /*
4676 * Make sure base address and interrupt setup are updated before
4677 * enabling the run/stop registers below.
4678 */
4679 wmb();
4680
4681 /*
4682 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4683 */
4684 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4685 if (!(ufshcd_get_lists_status(reg))) {
4686 ufshcd_enable_run_stop_reg(hba);
4687 } else {
4688 dev_err(hba->dev,
4689 "Host controller not ready to process requests");
4690 err = -EIO;
4691 }
4692
4693 return err;
4694 }
4695 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4696
4697 /**
4698 * ufshcd_hba_stop - Send controller to reset state
4699 * @hba: per adapter instance
4700 */
ufshcd_hba_stop(struct ufs_hba * hba)4701 void ufshcd_hba_stop(struct ufs_hba *hba)
4702 {
4703 unsigned long flags;
4704 int err;
4705
4706 /*
4707 * Obtain the host lock to prevent that the controller is disabled
4708 * while the UFS interrupt handler is active on another CPU.
4709 */
4710 spin_lock_irqsave(hba->host->host_lock, flags);
4711 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4712 spin_unlock_irqrestore(hba->host->host_lock, flags);
4713
4714 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4715 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4716 10, 1);
4717 if (err)
4718 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4719 }
4720 EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
4721
4722 /**
4723 * ufshcd_hba_execute_hce - initialize the controller
4724 * @hba: per adapter instance
4725 *
4726 * The controller resets itself and controller firmware initialization
4727 * sequence kicks off. When controller is ready it will set
4728 * the Host Controller Enable bit to 1.
4729 *
4730 * Return: 0 on success, non-zero value on failure.
4731 */
ufshcd_hba_execute_hce(struct ufs_hba * hba)4732 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4733 {
4734 int retry_outer = 3;
4735 int retry_inner;
4736
4737 start:
4738 if (ufshcd_is_hba_active(hba))
4739 /* change controller state to "reset state" */
4740 ufshcd_hba_stop(hba);
4741
4742 /* UniPro link is disabled at this point */
4743 ufshcd_set_link_off(hba);
4744
4745 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4746
4747 /* start controller initialization sequence */
4748 ufshcd_hba_start(hba);
4749
4750 /*
4751 * To initialize a UFS host controller HCE bit must be set to 1.
4752 * During initialization the HCE bit value changes from 1->0->1.
4753 * When the host controller completes initialization sequence
4754 * it sets the value of HCE bit to 1. The same HCE bit is read back
4755 * to check if the controller has completed initialization sequence.
4756 * So without this delay the value HCE = 1, set in the previous
4757 * instruction might be read back.
4758 * This delay can be changed based on the controller.
4759 */
4760 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4761
4762 /* wait for the host controller to complete initialization */
4763 retry_inner = 50;
4764 while (!ufshcd_is_hba_active(hba)) {
4765 if (retry_inner) {
4766 retry_inner--;
4767 } else {
4768 dev_err(hba->dev,
4769 "Controller enable failed\n");
4770 if (retry_outer) {
4771 retry_outer--;
4772 goto start;
4773 }
4774 return -EIO;
4775 }
4776 usleep_range(1000, 1100);
4777 }
4778
4779 /* enable UIC related interrupts */
4780 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4781
4782 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4783
4784 return 0;
4785 }
4786
ufshcd_hba_enable(struct ufs_hba * hba)4787 int ufshcd_hba_enable(struct ufs_hba *hba)
4788 {
4789 int ret;
4790
4791 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4792 ufshcd_set_link_off(hba);
4793 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4794
4795 /* enable UIC related interrupts */
4796 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4797 ret = ufshcd_dme_reset(hba);
4798 if (ret) {
4799 dev_err(hba->dev, "DME_RESET failed\n");
4800 return ret;
4801 }
4802
4803 ret = ufshcd_dme_enable(hba);
4804 if (ret) {
4805 dev_err(hba->dev, "Enabling DME failed\n");
4806 return ret;
4807 }
4808
4809 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4810 } else {
4811 ret = ufshcd_hba_execute_hce(hba);
4812 }
4813
4814 return ret;
4815 }
4816 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4817
ufshcd_disable_tx_lcc(struct ufs_hba * hba,bool peer)4818 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4819 {
4820 int tx_lanes = 0, i, err = 0;
4821
4822 if (!peer)
4823 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4824 &tx_lanes);
4825 else
4826 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4827 &tx_lanes);
4828 for (i = 0; i < tx_lanes; i++) {
4829 if (!peer)
4830 err = ufshcd_dme_set(hba,
4831 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4832 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4833 0);
4834 else
4835 err = ufshcd_dme_peer_set(hba,
4836 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4837 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4838 0);
4839 if (err) {
4840 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4841 __func__, peer, i, err);
4842 break;
4843 }
4844 }
4845
4846 return err;
4847 }
4848
ufshcd_disable_device_tx_lcc(struct ufs_hba * hba)4849 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4850 {
4851 return ufshcd_disable_tx_lcc(hba, true);
4852 }
4853
ufshcd_update_evt_hist(struct ufs_hba * hba,u32 id,u32 val)4854 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4855 {
4856 struct ufs_event_hist *e;
4857
4858 if (id >= UFS_EVT_CNT)
4859 return;
4860
4861 e = &hba->ufs_stats.event[id];
4862 e->val[e->pos] = val;
4863 e->tstamp[e->pos] = local_clock();
4864 e->cnt += 1;
4865 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4866
4867 ufshcd_vops_event_notify(hba, id, &val);
4868 }
4869 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4870
4871 /**
4872 * ufshcd_link_startup - Initialize unipro link startup
4873 * @hba: per adapter instance
4874 *
4875 * Return: 0 for success, non-zero in case of failure.
4876 */
ufshcd_link_startup(struct ufs_hba * hba)4877 static int ufshcd_link_startup(struct ufs_hba *hba)
4878 {
4879 int ret;
4880 int retries = DME_LINKSTARTUP_RETRIES;
4881 bool link_startup_again = false;
4882
4883 /*
4884 * If UFS device isn't active then we will have to issue link startup
4885 * 2 times to make sure the device state move to active.
4886 */
4887 if (!ufshcd_is_ufs_dev_active(hba))
4888 link_startup_again = true;
4889
4890 link_startup:
4891 do {
4892 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4893
4894 ret = ufshcd_dme_link_startup(hba);
4895
4896 /* check if device is detected by inter-connect layer */
4897 if (!ret && !ufshcd_is_device_present(hba)) {
4898 ufshcd_update_evt_hist(hba,
4899 UFS_EVT_LINK_STARTUP_FAIL,
4900 0);
4901 dev_err(hba->dev, "%s: Device not present\n", __func__);
4902 ret = -ENXIO;
4903 goto out;
4904 }
4905
4906 /*
4907 * DME link lost indication is only received when link is up,
4908 * but we can't be sure if the link is up until link startup
4909 * succeeds. So reset the local Uni-Pro and try again.
4910 */
4911 if (ret && retries && ufshcd_hba_enable(hba)) {
4912 ufshcd_update_evt_hist(hba,
4913 UFS_EVT_LINK_STARTUP_FAIL,
4914 (u32)ret);
4915 goto out;
4916 }
4917 } while (ret && retries--);
4918
4919 if (ret) {
4920 /* failed to get the link up... retire */
4921 ufshcd_update_evt_hist(hba,
4922 UFS_EVT_LINK_STARTUP_FAIL,
4923 (u32)ret);
4924 goto out;
4925 }
4926
4927 if (link_startup_again) {
4928 link_startup_again = false;
4929 retries = DME_LINKSTARTUP_RETRIES;
4930 goto link_startup;
4931 }
4932
4933 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4934 ufshcd_init_pwr_info(hba);
4935 ufshcd_print_pwr_info(hba);
4936
4937 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4938 ret = ufshcd_disable_device_tx_lcc(hba);
4939 if (ret)
4940 goto out;
4941 }
4942
4943 /* Include any host controller configuration via UIC commands */
4944 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4945 if (ret)
4946 goto out;
4947
4948 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4949 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4950 ret = ufshcd_make_hba_operational(hba);
4951 out:
4952 if (ret) {
4953 dev_err(hba->dev, "link startup failed %d\n", ret);
4954 ufshcd_print_host_state(hba);
4955 ufshcd_print_pwr_info(hba);
4956 ufshcd_print_evt_hist(hba);
4957 }
4958 return ret;
4959 }
4960
4961 /**
4962 * ufshcd_verify_dev_init() - Verify device initialization
4963 * @hba: per-adapter instance
4964 *
4965 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4966 * device Transport Protocol (UTP) layer is ready after a reset.
4967 * If the UTP layer at the device side is not initialized, it may
4968 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4969 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4970 *
4971 * Return: 0 upon success; < 0 upon failure.
4972 */
ufshcd_verify_dev_init(struct ufs_hba * hba)4973 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4974 {
4975 int err = 0;
4976 int retries;
4977
4978 ufshcd_hold(hba);
4979 mutex_lock(&hba->dev_cmd.lock);
4980 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4981 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4982 hba->nop_out_timeout);
4983
4984 if (!err || err == -ETIMEDOUT)
4985 break;
4986
4987 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4988 }
4989 mutex_unlock(&hba->dev_cmd.lock);
4990 ufshcd_release(hba);
4991
4992 if (err)
4993 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4994 return err;
4995 }
4996
4997 /**
4998 * ufshcd_setup_links - associate link b/w device wlun and other luns
4999 * @sdev: pointer to SCSI device
5000 * @hba: pointer to ufs hba
5001 */
ufshcd_setup_links(struct ufs_hba * hba,struct scsi_device * sdev)5002 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
5003 {
5004 struct device_link *link;
5005
5006 /*
5007 * Device wlun is the supplier & rest of the luns are consumers.
5008 * This ensures that device wlun suspends after all other luns.
5009 */
5010 if (hba->ufs_device_wlun) {
5011 link = device_link_add(&sdev->sdev_gendev,
5012 &hba->ufs_device_wlun->sdev_gendev,
5013 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
5014 if (!link) {
5015 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
5016 dev_name(&hba->ufs_device_wlun->sdev_gendev));
5017 return;
5018 }
5019 hba->luns_avail--;
5020 /* Ignore REPORT_LUN wlun probing */
5021 if (hba->luns_avail == 1) {
5022 ufshcd_rpm_put(hba);
5023 return;
5024 }
5025 } else {
5026 /*
5027 * Device wlun is probed. The assumption is that WLUNs are
5028 * scanned before other LUNs.
5029 */
5030 hba->luns_avail--;
5031 }
5032 }
5033
5034 /**
5035 * ufshcd_lu_init - Initialize the relevant parameters of the LU
5036 * @hba: per-adapter instance
5037 * @sdev: pointer to SCSI device
5038 */
ufshcd_lu_init(struct ufs_hba * hba,struct scsi_device * sdev)5039 static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
5040 {
5041 int len = QUERY_DESC_MAX_SIZE;
5042 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
5043 u8 lun_qdepth = hba->nutrs;
5044 u8 *desc_buf;
5045 int ret;
5046
5047 desc_buf = kzalloc(len, GFP_KERNEL);
5048 if (!desc_buf)
5049 goto set_qdepth;
5050
5051 ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len);
5052 if (ret < 0) {
5053 if (ret == -EOPNOTSUPP)
5054 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5055 lun_qdepth = 1;
5056 kfree(desc_buf);
5057 goto set_qdepth;
5058 }
5059
5060 if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) {
5061 /*
5062 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5063 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5064 */
5065 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs);
5066 }
5067 /*
5068 * According to UFS device specification, the write protection mode is only supported by
5069 * normal LU, not supported by WLUN.
5070 */
5071 if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported &&
5072 !hba->dev_info.is_lu_power_on_wp &&
5073 desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
5074 hba->dev_info.is_lu_power_on_wp = true;
5075
5076 /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5077 if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
5078 desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
5079 hba->dev_info.b_advanced_rpmb_en = true;
5080
5081
5082 kfree(desc_buf);
5083 set_qdepth:
5084 /*
5085 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5086 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5087 */
5088 dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth);
5089 scsi_change_queue_depth(sdev, lun_qdepth);
5090 }
5091
5092 /**
5093 * ufshcd_slave_alloc - handle initial SCSI device configurations
5094 * @sdev: pointer to SCSI device
5095 *
5096 * Return: success.
5097 */
ufshcd_slave_alloc(struct scsi_device * sdev)5098 static int ufshcd_slave_alloc(struct scsi_device *sdev)
5099 {
5100 struct ufs_hba *hba;
5101
5102 hba = shost_priv(sdev->host);
5103
5104 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5105 sdev->use_10_for_ms = 1;
5106
5107 /* DBD field should be set to 1 in mode sense(10) */
5108 sdev->set_dbd_for_ms = 1;
5109
5110 /* allow SCSI layer to restart the device in case of errors */
5111 sdev->allow_restart = 1;
5112
5113 /* REPORT SUPPORTED OPERATION CODES is not supported */
5114 sdev->no_report_opcodes = 1;
5115
5116 /* WRITE_SAME command is not supported */
5117 sdev->no_write_same = 1;
5118
5119 ufshcd_lu_init(hba, sdev);
5120
5121 ufshcd_setup_links(hba, sdev);
5122
5123 return 0;
5124 }
5125
5126 /**
5127 * ufshcd_change_queue_depth - change queue depth
5128 * @sdev: pointer to SCSI device
5129 * @depth: required depth to set
5130 *
5131 * Change queue depth and make sure the max. limits are not crossed.
5132 *
5133 * Return: new queue depth.
5134 */
ufshcd_change_queue_depth(struct scsi_device * sdev,int depth)5135 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
5136 {
5137 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
5138 }
5139
5140 /**
5141 * ufshcd_slave_configure - adjust SCSI device configurations
5142 * @sdev: pointer to SCSI device
5143 *
5144 * Return: 0 (success).
5145 */
ufshcd_slave_configure(struct scsi_device * sdev)5146 static int ufshcd_slave_configure(struct scsi_device *sdev)
5147 {
5148 struct ufs_hba *hba = shost_priv(sdev->host);
5149 struct request_queue *q = sdev->request_queue;
5150
5151 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5152 if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
5153 blk_queue_update_dma_alignment(q, SZ_4K - 1);
5154 /*
5155 * Block runtime-pm until all consumers are added.
5156 * Refer ufshcd_setup_links().
5157 */
5158 if (is_device_wlun(sdev))
5159 pm_runtime_get_noresume(&sdev->sdev_gendev);
5160 else if (ufshcd_is_rpm_autosuspend_allowed(hba))
5161 sdev->rpm_autosuspend = 1;
5162 /*
5163 * Do not print messages during runtime PM to avoid never-ending cycles
5164 * of messages written back to storage by user space causing runtime
5165 * resume, causing more messages and so on.
5166 */
5167 sdev->silence_suspend = 1;
5168
5169 ufshcd_crypto_register(hba, q);
5170
5171 return 0;
5172 }
5173
5174 /**
5175 * ufshcd_slave_destroy - remove SCSI device configurations
5176 * @sdev: pointer to SCSI device
5177 */
ufshcd_slave_destroy(struct scsi_device * sdev)5178 static void ufshcd_slave_destroy(struct scsi_device *sdev)
5179 {
5180 struct ufs_hba *hba;
5181 unsigned long flags;
5182
5183 hba = shost_priv(sdev->host);
5184
5185 /* Drop the reference as it won't be needed anymore */
5186 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5187 spin_lock_irqsave(hba->host->host_lock, flags);
5188 hba->ufs_device_wlun = NULL;
5189 spin_unlock_irqrestore(hba->host->host_lock, flags);
5190 } else if (hba->ufs_device_wlun) {
5191 struct device *supplier = NULL;
5192
5193 /* Ensure UFS Device WLUN exists and does not disappear */
5194 spin_lock_irqsave(hba->host->host_lock, flags);
5195 if (hba->ufs_device_wlun) {
5196 supplier = &hba->ufs_device_wlun->sdev_gendev;
5197 get_device(supplier);
5198 }
5199 spin_unlock_irqrestore(hba->host->host_lock, flags);
5200
5201 if (supplier) {
5202 /*
5203 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5204 * device will not have been registered but can still
5205 * have a device link holding a reference to the device.
5206 */
5207 device_link_remove(&sdev->sdev_gendev, supplier);
5208 put_device(supplier);
5209 }
5210 }
5211 }
5212
5213 /**
5214 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5215 * @lrbp: pointer to local reference block of completed command
5216 * @scsi_status: SCSI command status
5217 *
5218 * Return: value base on SCSI command status.
5219 */
5220 static inline int
ufshcd_scsi_cmd_status(struct ufshcd_lrb * lrbp,int scsi_status)5221 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5222 {
5223 int result = 0;
5224
5225 switch (scsi_status) {
5226 case SAM_STAT_CHECK_CONDITION:
5227 ufshcd_copy_sense_data(lrbp);
5228 fallthrough;
5229 case SAM_STAT_GOOD:
5230 result |= DID_OK << 16 | scsi_status;
5231 break;
5232 case SAM_STAT_TASK_SET_FULL:
5233 case SAM_STAT_BUSY:
5234 case SAM_STAT_TASK_ABORTED:
5235 ufshcd_copy_sense_data(lrbp);
5236 result |= scsi_status;
5237 break;
5238 default:
5239 result |= DID_ERROR << 16;
5240 break;
5241 } /* end of switch */
5242
5243 return result;
5244 }
5245
5246 /**
5247 * ufshcd_transfer_rsp_status - Get overall status of the response
5248 * @hba: per adapter instance
5249 * @lrbp: pointer to local reference block of completed command
5250 * @cqe: pointer to the completion queue entry
5251 *
5252 * Return: result of the command to notify SCSI midlayer.
5253 */
5254 static inline int
ufshcd_transfer_rsp_status(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,struct cq_entry * cqe)5255 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
5256 struct cq_entry *cqe)
5257 {
5258 int result = 0;
5259 int scsi_status;
5260 enum utp_ocs ocs;
5261 u8 upiu_flags;
5262 u32 resid;
5263
5264 upiu_flags = lrbp->ucd_rsp_ptr->header.flags;
5265 resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count);
5266 /*
5267 * Test !overflow instead of underflow to support UFS devices that do
5268 * not set either flag.
5269 */
5270 if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
5271 scsi_set_resid(lrbp->cmd, resid);
5272
5273 /* overall command status of utrd */
5274 ocs = ufshcd_get_tr_ocs(lrbp, cqe);
5275
5276 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5277 if (lrbp->ucd_rsp_ptr->header.response ||
5278 lrbp->ucd_rsp_ptr->header.status)
5279 ocs = OCS_SUCCESS;
5280 }
5281
5282 switch (ocs) {
5283 case OCS_SUCCESS:
5284 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5285 switch (ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr)) {
5286 case UPIU_TRANSACTION_RESPONSE:
5287 /*
5288 * get the result based on SCSI status response
5289 * to notify the SCSI midlayer of the command status
5290 */
5291 scsi_status = lrbp->ucd_rsp_ptr->header.status;
5292 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5293
5294 /*
5295 * Currently we are only supporting BKOPs exception
5296 * events hence we can ignore BKOPs exception event
5297 * during power management callbacks. BKOPs exception
5298 * event is not expected to be raised in runtime suspend
5299 * callback as it allows the urgent bkops.
5300 * During system suspend, we are anyway forcefully
5301 * disabling the bkops and if urgent bkops is needed
5302 * it will be enabled on system resume. Long term
5303 * solution could be to abort the system suspend if
5304 * UFS device needs urgent BKOPs.
5305 */
5306 if (!hba->pm_op_in_progress &&
5307 !ufshcd_eh_in_progress(hba) &&
5308 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5309 /* Flushed in suspend */
5310 schedule_work(&hba->eeh_work);
5311 break;
5312 case UPIU_TRANSACTION_REJECT_UPIU:
5313 /* TODO: handle Reject UPIU Response */
5314 result = DID_ERROR << 16;
5315 dev_err(hba->dev,
5316 "Reject UPIU not fully implemented\n");
5317 break;
5318 default:
5319 dev_err(hba->dev,
5320 "Unexpected request response code = %x\n",
5321 result);
5322 result = DID_ERROR << 16;
5323 break;
5324 }
5325 break;
5326 case OCS_ABORTED:
5327 result |= DID_ABORT << 16;
5328 break;
5329 case OCS_INVALID_COMMAND_STATUS:
5330 result |= DID_REQUEUE << 16;
5331 break;
5332 case OCS_INVALID_CMD_TABLE_ATTR:
5333 case OCS_INVALID_PRDT_ATTR:
5334 case OCS_MISMATCH_DATA_BUF_SIZE:
5335 case OCS_MISMATCH_RESP_UPIU_SIZE:
5336 case OCS_PEER_COMM_FAILURE:
5337 case OCS_FATAL_ERROR:
5338 case OCS_DEVICE_FATAL_ERROR:
5339 case OCS_INVALID_CRYPTO_CONFIG:
5340 case OCS_GENERAL_CRYPTO_ERROR:
5341 default:
5342 result |= DID_ERROR << 16;
5343 dev_err(hba->dev,
5344 "OCS error from controller = %x for tag %d\n",
5345 ocs, lrbp->task_tag);
5346 ufshcd_print_evt_hist(hba);
5347 ufshcd_print_host_state(hba);
5348 break;
5349 } /* end of switch */
5350
5351 if ((host_byte(result) != DID_OK) &&
5352 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
5353 ufshcd_print_tr(hba, lrbp->task_tag, true);
5354 return result;
5355 }
5356
ufshcd_is_auto_hibern8_error(struct ufs_hba * hba,u32 intr_mask)5357 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5358 u32 intr_mask)
5359 {
5360 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5361 !ufshcd_is_auto_hibern8_enabled(hba))
5362 return false;
5363
5364 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5365 return false;
5366
5367 if (hba->active_uic_cmd &&
5368 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5369 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5370 return false;
5371
5372 return true;
5373 }
5374
5375 /**
5376 * ufshcd_uic_cmd_compl - handle completion of uic command
5377 * @hba: per adapter instance
5378 * @intr_status: interrupt status generated by the controller
5379 *
5380 * Return:
5381 * IRQ_HANDLED - If interrupt is valid
5382 * IRQ_NONE - If invalid interrupt
5383 */
ufshcd_uic_cmd_compl(struct ufs_hba * hba,u32 intr_status)5384 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5385 {
5386 irqreturn_t retval = IRQ_NONE;
5387
5388 spin_lock(hba->host->host_lock);
5389 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5390 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5391
5392 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5393 hba->active_uic_cmd->argument2 |=
5394 ufshcd_get_uic_cmd_result(hba);
5395 hba->active_uic_cmd->argument3 =
5396 ufshcd_get_dme_attr_val(hba);
5397 if (!hba->uic_async_done)
5398 hba->active_uic_cmd->cmd_active = 0;
5399 complete(&hba->active_uic_cmd->done);
5400 retval = IRQ_HANDLED;
5401 }
5402
5403 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5404 hba->active_uic_cmd->cmd_active = 0;
5405 complete(hba->uic_async_done);
5406 retval = IRQ_HANDLED;
5407 }
5408
5409 if (retval == IRQ_HANDLED)
5410 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5411 UFS_CMD_COMP);
5412 spin_unlock(hba->host->host_lock);
5413 return retval;
5414 }
5415
5416 /* Release the resources allocated for processing a SCSI command. */
ufshcd_release_scsi_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)5417 void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5418 struct ufshcd_lrb *lrbp)
5419 {
5420 struct scsi_cmnd *cmd = lrbp->cmd;
5421
5422 scsi_dma_unmap(cmd);
5423 ufshcd_release(hba);
5424 ufshcd_clk_scaling_update_busy(hba);
5425 }
5426
5427 /**
5428 * ufshcd_compl_one_cqe - handle a completion queue entry
5429 * @hba: per adapter instance
5430 * @task_tag: the task tag of the request to be completed
5431 * @cqe: pointer to the completion queue entry
5432 */
ufshcd_compl_one_cqe(struct ufs_hba * hba,int task_tag,struct cq_entry * cqe)5433 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
5434 struct cq_entry *cqe)
5435 {
5436 struct ufshcd_lrb *lrbp;
5437 struct scsi_cmnd *cmd;
5438 enum utp_ocs ocs;
5439
5440 lrbp = &hba->lrb[task_tag];
5441 lrbp->compl_time_stamp = ktime_get();
5442 cmd = lrbp->cmd;
5443 if (cmd) {
5444 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5445 ufshcd_update_monitor(hba, lrbp);
5446 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
5447 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
5448 ufshcd_release_scsi_cmd(hba, lrbp);
5449 /* Do not touch lrbp after scsi done */
5450 scsi_done(cmd);
5451 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5452 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5453 if (hba->dev_cmd.complete) {
5454 if (cqe) {
5455 ocs = le32_to_cpu(cqe->status) & MASK_OCS;
5456 lrbp->utr_descriptor_ptr->header.ocs = ocs;
5457 }
5458 complete(hba->dev_cmd.complete);
5459 ufshcd_clk_scaling_update_busy(hba);
5460 }
5461 }
5462 }
5463
5464 /**
5465 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5466 * @hba: per adapter instance
5467 * @completed_reqs: bitmask that indicates which requests to complete
5468 */
__ufshcd_transfer_req_compl(struct ufs_hba * hba,unsigned long completed_reqs)5469 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5470 unsigned long completed_reqs)
5471 {
5472 int tag;
5473
5474 for_each_set_bit(tag, &completed_reqs, hba->nutrs)
5475 ufshcd_compl_one_cqe(hba, tag, NULL);
5476 }
5477
5478 /* Any value that is not an existing queue number is fine for this constant. */
5479 enum {
5480 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
5481 };
5482
ufshcd_clear_polled(struct ufs_hba * hba,unsigned long * completed_reqs)5483 static void ufshcd_clear_polled(struct ufs_hba *hba,
5484 unsigned long *completed_reqs)
5485 {
5486 int tag;
5487
5488 for_each_set_bit(tag, completed_reqs, hba->nutrs) {
5489 struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
5490
5491 if (!cmd)
5492 continue;
5493 if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED)
5494 __clear_bit(tag, completed_reqs);
5495 }
5496 }
5497
5498 /*
5499 * Return: > 0 if one or more commands have been completed or 0 if no
5500 * requests have been completed.
5501 */
ufshcd_poll(struct Scsi_Host * shost,unsigned int queue_num)5502 static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
5503 {
5504 struct ufs_hba *hba = shost_priv(shost);
5505 unsigned long completed_reqs, flags;
5506 u32 tr_doorbell;
5507 struct ufs_hw_queue *hwq;
5508
5509 if (is_mcq_enabled(hba)) {
5510 hwq = &hba->uhq[queue_num];
5511
5512 return ufshcd_mcq_poll_cqe_lock(hba, hwq);
5513 }
5514
5515 spin_lock_irqsave(&hba->outstanding_lock, flags);
5516 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5517 completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5518 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5519 "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5520 hba->outstanding_reqs);
5521 if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) {
5522 /* Do not complete polled requests from interrupt context. */
5523 ufshcd_clear_polled(hba, &completed_reqs);
5524 }
5525 hba->outstanding_reqs &= ~completed_reqs;
5526 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5527
5528 if (completed_reqs)
5529 __ufshcd_transfer_req_compl(hba, completed_reqs);
5530
5531 return completed_reqs != 0;
5532 }
5533
5534 /**
5535 * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
5536 * invoked from the error handler context or ufshcd_host_reset_and_restore()
5537 * to complete the pending transfers and free the resources associated with
5538 * the scsi command.
5539 *
5540 * @hba: per adapter instance
5541 * @force_compl: This flag is set to true when invoked
5542 * from ufshcd_host_reset_and_restore() in which case it requires special
5543 * handling because the host controller has been reset by ufshcd_hba_stop().
5544 */
ufshcd_mcq_compl_pending_transfer(struct ufs_hba * hba,bool force_compl)5545 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
5546 bool force_compl)
5547 {
5548 struct ufs_hw_queue *hwq;
5549 struct ufshcd_lrb *lrbp;
5550 struct scsi_cmnd *cmd;
5551 unsigned long flags;
5552 u32 hwq_num, utag;
5553 int tag;
5554
5555 for (tag = 0; tag < hba->nutrs; tag++) {
5556 lrbp = &hba->lrb[tag];
5557 cmd = lrbp->cmd;
5558 if (!ufshcd_cmd_inflight(cmd) ||
5559 test_bit(SCMD_STATE_COMPLETE, &cmd->state))
5560 continue;
5561
5562 utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
5563 hwq_num = blk_mq_unique_tag_to_hwq(utag);
5564 hwq = &hba->uhq[hwq_num];
5565
5566 if (force_compl) {
5567 ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
5568 /*
5569 * For those cmds of which the cqes are not present
5570 * in the cq, complete them explicitly.
5571 */
5572 if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
5573 spin_lock_irqsave(&hwq->cq_lock, flags);
5574 set_host_byte(cmd, DID_REQUEUE);
5575 ufshcd_release_scsi_cmd(hba, lrbp);
5576 scsi_done(cmd);
5577 spin_unlock_irqrestore(&hwq->cq_lock, flags);
5578 }
5579 } else {
5580 ufshcd_mcq_poll_cqe_lock(hba, hwq);
5581 }
5582 }
5583 }
5584
5585 /**
5586 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5587 * @hba: per adapter instance
5588 *
5589 * Return:
5590 * IRQ_HANDLED - If interrupt is valid
5591 * IRQ_NONE - If invalid interrupt
5592 */
ufshcd_transfer_req_compl(struct ufs_hba * hba)5593 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5594 {
5595 /* Resetting interrupt aggregation counters first and reading the
5596 * DOOR_BELL afterward allows us to handle all the completed requests.
5597 * In order to prevent other interrupts starvation the DB is read once
5598 * after reset. The down side of this solution is the possibility of
5599 * false interrupt if device completes another request after resetting
5600 * aggregation and before reading the DB.
5601 */
5602 if (ufshcd_is_intr_aggr_allowed(hba) &&
5603 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5604 ufshcd_reset_intr_aggr(hba);
5605
5606 if (ufs_fail_completion())
5607 return IRQ_HANDLED;
5608
5609 /*
5610 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5611 * do not want polling to trigger spurious interrupt complaints.
5612 */
5613 ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
5614
5615 return IRQ_HANDLED;
5616 }
5617
__ufshcd_write_ee_control(struct ufs_hba * hba,u32 ee_ctrl_mask)5618 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
5619 {
5620 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5621 QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5622 &ee_ctrl_mask);
5623 }
5624
ufshcd_write_ee_control(struct ufs_hba * hba)5625 int ufshcd_write_ee_control(struct ufs_hba *hba)
5626 {
5627 int err;
5628
5629 mutex_lock(&hba->ee_ctrl_mutex);
5630 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5631 mutex_unlock(&hba->ee_ctrl_mutex);
5632 if (err)
5633 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5634 __func__, err);
5635 return err;
5636 }
5637
ufshcd_update_ee_control(struct ufs_hba * hba,u16 * mask,const u16 * other_mask,u16 set,u16 clr)5638 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
5639 const u16 *other_mask, u16 set, u16 clr)
5640 {
5641 u16 new_mask, ee_ctrl_mask;
5642 int err = 0;
5643
5644 mutex_lock(&hba->ee_ctrl_mutex);
5645 new_mask = (*mask & ~clr) | set;
5646 ee_ctrl_mask = new_mask | *other_mask;
5647 if (ee_ctrl_mask != hba->ee_ctrl_mask)
5648 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5649 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5650 if (!err) {
5651 hba->ee_ctrl_mask = ee_ctrl_mask;
5652 *mask = new_mask;
5653 }
5654 mutex_unlock(&hba->ee_ctrl_mutex);
5655 return err;
5656 }
5657
5658 /**
5659 * ufshcd_disable_ee - disable exception event
5660 * @hba: per-adapter instance
5661 * @mask: exception event to disable
5662 *
5663 * Disables exception event in the device so that the EVENT_ALERT
5664 * bit is not set.
5665 *
5666 * Return: zero on success, non-zero error value on failure.
5667 */
ufshcd_disable_ee(struct ufs_hba * hba,u16 mask)5668 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5669 {
5670 return ufshcd_update_ee_drv_mask(hba, 0, mask);
5671 }
5672
5673 /**
5674 * ufshcd_enable_ee - enable exception event
5675 * @hba: per-adapter instance
5676 * @mask: exception event to enable
5677 *
5678 * Enable corresponding exception event in the device to allow
5679 * device to alert host in critical scenarios.
5680 *
5681 * Return: zero on success, non-zero error value on failure.
5682 */
ufshcd_enable_ee(struct ufs_hba * hba,u16 mask)5683 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5684 {
5685 return ufshcd_update_ee_drv_mask(hba, mask, 0);
5686 }
5687
5688 /**
5689 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5690 * @hba: per-adapter instance
5691 *
5692 * Allow device to manage background operations on its own. Enabling
5693 * this might lead to inconsistent latencies during normal data transfers
5694 * as the device is allowed to manage its own way of handling background
5695 * operations.
5696 *
5697 * Return: zero on success, non-zero on failure.
5698 */
ufshcd_enable_auto_bkops(struct ufs_hba * hba)5699 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5700 {
5701 int err = 0;
5702
5703 if (hba->auto_bkops_enabled)
5704 goto out;
5705
5706 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5707 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5708 if (err) {
5709 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5710 __func__, err);
5711 goto out;
5712 }
5713
5714 hba->auto_bkops_enabled = true;
5715 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5716
5717 /* No need of URGENT_BKOPS exception from the device */
5718 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5719 if (err)
5720 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5721 __func__, err);
5722 out:
5723 return err;
5724 }
5725
5726 /**
5727 * ufshcd_disable_auto_bkops - block device in doing background operations
5728 * @hba: per-adapter instance
5729 *
5730 * Disabling background operations improves command response latency but
5731 * has drawback of device moving into critical state where the device is
5732 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5733 * host is idle so that BKOPS are managed effectively without any negative
5734 * impacts.
5735 *
5736 * Return: zero on success, non-zero on failure.
5737 */
ufshcd_disable_auto_bkops(struct ufs_hba * hba)5738 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5739 {
5740 int err = 0;
5741
5742 if (!hba->auto_bkops_enabled)
5743 goto out;
5744
5745 /*
5746 * If host assisted BKOPs is to be enabled, make sure
5747 * urgent bkops exception is allowed.
5748 */
5749 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5750 if (err) {
5751 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5752 __func__, err);
5753 goto out;
5754 }
5755
5756 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5757 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5758 if (err) {
5759 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5760 __func__, err);
5761 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5762 goto out;
5763 }
5764
5765 hba->auto_bkops_enabled = false;
5766 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5767 hba->is_urgent_bkops_lvl_checked = false;
5768 out:
5769 return err;
5770 }
5771
5772 /**
5773 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5774 * @hba: per adapter instance
5775 *
5776 * After a device reset the device may toggle the BKOPS_EN flag
5777 * to default value. The s/w tracking variables should be updated
5778 * as well. This function would change the auto-bkops state based on
5779 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5780 */
ufshcd_force_reset_auto_bkops(struct ufs_hba * hba)5781 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5782 {
5783 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5784 hba->auto_bkops_enabled = false;
5785 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5786 ufshcd_enable_auto_bkops(hba);
5787 } else {
5788 hba->auto_bkops_enabled = true;
5789 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5790 ufshcd_disable_auto_bkops(hba);
5791 }
5792 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5793 hba->is_urgent_bkops_lvl_checked = false;
5794 }
5795
ufshcd_get_bkops_status(struct ufs_hba * hba,u32 * status)5796 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5797 {
5798 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5799 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5800 }
5801
5802 /**
5803 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5804 * @hba: per-adapter instance
5805 * @status: bkops_status value
5806 *
5807 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5808 * flag in the device to permit background operations if the device
5809 * bkops_status is greater than or equal to "status" argument passed to
5810 * this function, disable otherwise.
5811 *
5812 * Return: 0 for success, non-zero in case of failure.
5813 *
5814 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5815 * to know whether auto bkops is enabled or disabled after this function
5816 * returns control to it.
5817 */
ufshcd_bkops_ctrl(struct ufs_hba * hba,enum bkops_status status)5818 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5819 enum bkops_status status)
5820 {
5821 int err;
5822 u32 curr_status = 0;
5823
5824 err = ufshcd_get_bkops_status(hba, &curr_status);
5825 if (err) {
5826 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5827 __func__, err);
5828 goto out;
5829 } else if (curr_status > BKOPS_STATUS_MAX) {
5830 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5831 __func__, curr_status);
5832 err = -EINVAL;
5833 goto out;
5834 }
5835
5836 if (curr_status >= status)
5837 err = ufshcd_enable_auto_bkops(hba);
5838 else
5839 err = ufshcd_disable_auto_bkops(hba);
5840 out:
5841 return err;
5842 }
5843
5844 /**
5845 * ufshcd_urgent_bkops - handle urgent bkops exception event
5846 * @hba: per-adapter instance
5847 *
5848 * Enable fBackgroundOpsEn flag in the device to permit background
5849 * operations.
5850 *
5851 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5852 * and negative error value for any other failure.
5853 *
5854 * Return: 0 upon success; < 0 upon failure.
5855 */
ufshcd_urgent_bkops(struct ufs_hba * hba)5856 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5857 {
5858 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5859 }
5860
ufshcd_get_ee_status(struct ufs_hba * hba,u32 * status)5861 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5862 {
5863 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5864 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5865 }
5866
ufshcd_bkops_exception_event_handler(struct ufs_hba * hba)5867 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5868 {
5869 int err;
5870 u32 curr_status = 0;
5871
5872 if (hba->is_urgent_bkops_lvl_checked)
5873 goto enable_auto_bkops;
5874
5875 err = ufshcd_get_bkops_status(hba, &curr_status);
5876 if (err) {
5877 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5878 __func__, err);
5879 goto out;
5880 }
5881
5882 /*
5883 * We are seeing that some devices are raising the urgent bkops
5884 * exception events even when BKOPS status doesn't indicate performace
5885 * impacted or critical. Handle these device by determining their urgent
5886 * bkops status at runtime.
5887 */
5888 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5889 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5890 __func__, curr_status);
5891 /* update the current status as the urgent bkops level */
5892 hba->urgent_bkops_lvl = curr_status;
5893 hba->is_urgent_bkops_lvl_checked = true;
5894 }
5895
5896 enable_auto_bkops:
5897 err = ufshcd_enable_auto_bkops(hba);
5898 out:
5899 if (err < 0)
5900 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5901 __func__, err);
5902 }
5903
ufshcd_temp_exception_event_handler(struct ufs_hba * hba,u16 status)5904 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
5905 {
5906 u32 value;
5907
5908 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5909 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
5910 return;
5911
5912 dev_info(hba->dev, "exception Tcase %d\n", value - 80);
5913
5914 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
5915
5916 /*
5917 * A placeholder for the platform vendors to add whatever additional
5918 * steps required
5919 */
5920 }
5921
__ufshcd_wb_toggle(struct ufs_hba * hba,bool set,enum flag_idn idn)5922 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
5923 {
5924 u8 index;
5925 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
5926 UPIU_QUERY_OPCODE_CLEAR_FLAG;
5927
5928 index = ufshcd_wb_get_query_index(hba);
5929 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
5930 }
5931
ufshcd_wb_toggle(struct ufs_hba * hba,bool enable)5932 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
5933 {
5934 int ret;
5935
5936 if (!ufshcd_is_wb_allowed(hba) ||
5937 hba->dev_info.wb_enabled == enable)
5938 return 0;
5939
5940 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
5941 if (ret) {
5942 dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
5943 __func__, enable ? "enabling" : "disabling", ret);
5944 return ret;
5945 }
5946
5947 hba->dev_info.wb_enabled = enable;
5948 dev_dbg(hba->dev, "%s: Write Booster %s\n",
5949 __func__, enable ? "enabled" : "disabled");
5950
5951 return ret;
5952 }
5953
ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba * hba,bool enable)5954 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
5955 bool enable)
5956 {
5957 int ret;
5958
5959 ret = __ufshcd_wb_toggle(hba, enable,
5960 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
5961 if (ret) {
5962 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
5963 __func__, enable ? "enabling" : "disabling", ret);
5964 return;
5965 }
5966 dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
5967 __func__, enable ? "enabled" : "disabled");
5968 }
5969
ufshcd_wb_toggle_buf_flush(struct ufs_hba * hba,bool enable)5970 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
5971 {
5972 int ret;
5973
5974 if (!ufshcd_is_wb_allowed(hba) ||
5975 hba->dev_info.wb_buf_flush_enabled == enable)
5976 return 0;
5977
5978 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
5979 if (ret) {
5980 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
5981 __func__, enable ? "enabling" : "disabling", ret);
5982 return ret;
5983 }
5984
5985 hba->dev_info.wb_buf_flush_enabled = enable;
5986 dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
5987 __func__, enable ? "enabled" : "disabled");
5988
5989 return ret;
5990 }
5991
ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba * hba,u32 avail_buf)5992 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5993 u32 avail_buf)
5994 {
5995 u32 cur_buf;
5996 int ret;
5997 u8 index;
5998
5999 index = ufshcd_wb_get_query_index(hba);
6000 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6001 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
6002 index, 0, &cur_buf);
6003 if (ret) {
6004 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
6005 __func__, ret);
6006 return false;
6007 }
6008
6009 if (!cur_buf) {
6010 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
6011 cur_buf);
6012 return false;
6013 }
6014 /* Let it continue to flush when available buffer exceeds threshold */
6015 return avail_buf < hba->vps->wb_flush_threshold;
6016 }
6017
ufshcd_wb_force_disable(struct ufs_hba * hba)6018 static void ufshcd_wb_force_disable(struct ufs_hba *hba)
6019 {
6020 if (ufshcd_is_wb_buf_flush_allowed(hba))
6021 ufshcd_wb_toggle_buf_flush(hba, false);
6022
6023 ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
6024 ufshcd_wb_toggle(hba, false);
6025 hba->caps &= ~UFSHCD_CAP_WB_EN;
6026
6027 dev_info(hba->dev, "%s: WB force disabled\n", __func__);
6028 }
6029
ufshcd_is_wb_buf_lifetime_available(struct ufs_hba * hba)6030 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
6031 {
6032 u32 lifetime;
6033 int ret;
6034 u8 index;
6035
6036 index = ufshcd_wb_get_query_index(hba);
6037 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6038 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
6039 index, 0, &lifetime);
6040 if (ret) {
6041 dev_err(hba->dev,
6042 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
6043 __func__, ret);
6044 return false;
6045 }
6046
6047 if (lifetime == UFS_WB_EXCEED_LIFETIME) {
6048 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
6049 __func__, lifetime);
6050 return false;
6051 }
6052
6053 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
6054 __func__, lifetime);
6055
6056 return true;
6057 }
6058
ufshcd_wb_need_flush(struct ufs_hba * hba)6059 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
6060 {
6061 int ret;
6062 u32 avail_buf;
6063 u8 index;
6064
6065 if (!ufshcd_is_wb_allowed(hba))
6066 return false;
6067
6068 if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
6069 ufshcd_wb_force_disable(hba);
6070 return false;
6071 }
6072
6073 /*
6074 * The ufs device needs the vcc to be ON to flush.
6075 * With user-space reduction enabled, it's enough to enable flush
6076 * by checking only the available buffer. The threshold
6077 * defined here is > 90% full.
6078 * With user-space preserved enabled, the current-buffer
6079 * should be checked too because the wb buffer size can reduce
6080 * when disk tends to be full. This info is provided by current
6081 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
6082 * keeping vcc on when current buffer is empty.
6083 */
6084 index = ufshcd_wb_get_query_index(hba);
6085 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6086 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
6087 index, 0, &avail_buf);
6088 if (ret) {
6089 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6090 __func__, ret);
6091 return false;
6092 }
6093
6094 if (!hba->dev_info.b_presrv_uspc_en)
6095 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
6096
6097 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
6098 }
6099
ufshcd_rpm_dev_flush_recheck_work(struct work_struct * work)6100 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
6101 {
6102 struct ufs_hba *hba = container_of(to_delayed_work(work),
6103 struct ufs_hba,
6104 rpm_dev_flush_recheck_work);
6105 /*
6106 * To prevent unnecessary VCC power drain after device finishes
6107 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6108 * after a certain delay to recheck the threshold by next runtime
6109 * suspend.
6110 */
6111 ufshcd_rpm_get_sync(hba);
6112 ufshcd_rpm_put_sync(hba);
6113 }
6114
6115 /**
6116 * ufshcd_exception_event_handler - handle exceptions raised by device
6117 * @work: pointer to work data
6118 *
6119 * Read bExceptionEventStatus attribute from the device and handle the
6120 * exception event accordingly.
6121 */
ufshcd_exception_event_handler(struct work_struct * work)6122 static void ufshcd_exception_event_handler(struct work_struct *work)
6123 {
6124 struct ufs_hba *hba;
6125 int err;
6126 u32 status = 0;
6127 hba = container_of(work, struct ufs_hba, eeh_work);
6128
6129 ufshcd_scsi_block_requests(hba);
6130 err = ufshcd_get_ee_status(hba, &status);
6131 if (err) {
6132 dev_err(hba->dev, "%s: failed to get exception status %d\n",
6133 __func__, err);
6134 goto out;
6135 }
6136
6137 trace_ufshcd_exception_event(dev_name(hba->dev), status);
6138
6139 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
6140 ufshcd_bkops_exception_event_handler(hba);
6141
6142 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
6143 ufshcd_temp_exception_event_handler(hba, status);
6144
6145 ufs_debugfs_exception_event(hba, status);
6146 out:
6147 ufshcd_scsi_unblock_requests(hba);
6148 }
6149
6150 /* Complete requests that have door-bell cleared */
ufshcd_complete_requests(struct ufs_hba * hba,bool force_compl)6151 static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
6152 {
6153 if (is_mcq_enabled(hba))
6154 ufshcd_mcq_compl_pending_transfer(hba, force_compl);
6155 else
6156 ufshcd_transfer_req_compl(hba);
6157
6158 ufshcd_tmc_handler(hba);
6159 }
6160
6161 /**
6162 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6163 * to recover from the DL NAC errors or not.
6164 * @hba: per-adapter instance
6165 *
6166 * Return: true if error handling is required, false otherwise.
6167 */
ufshcd_quirk_dl_nac_errors(struct ufs_hba * hba)6168 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6169 {
6170 unsigned long flags;
6171 bool err_handling = true;
6172
6173 spin_lock_irqsave(hba->host->host_lock, flags);
6174 /*
6175 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6176 * device fatal error and/or DL NAC & REPLAY timeout errors.
6177 */
6178 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6179 goto out;
6180
6181 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6182 ((hba->saved_err & UIC_ERROR) &&
6183 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
6184 goto out;
6185
6186 if ((hba->saved_err & UIC_ERROR) &&
6187 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6188 int err;
6189 /*
6190 * wait for 50ms to see if we can get any other errors or not.
6191 */
6192 spin_unlock_irqrestore(hba->host->host_lock, flags);
6193 msleep(50);
6194 spin_lock_irqsave(hba->host->host_lock, flags);
6195
6196 /*
6197 * now check if we have got any other severe errors other than
6198 * DL NAC error?
6199 */
6200 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6201 ((hba->saved_err & UIC_ERROR) &&
6202 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
6203 goto out;
6204
6205 /*
6206 * As DL NAC is the only error received so far, send out NOP
6207 * command to confirm if link is still active or not.
6208 * - If we don't get any response then do error recovery.
6209 * - If we get response then clear the DL NAC error bit.
6210 */
6211
6212 spin_unlock_irqrestore(hba->host->host_lock, flags);
6213 err = ufshcd_verify_dev_init(hba);
6214 spin_lock_irqsave(hba->host->host_lock, flags);
6215
6216 if (err)
6217 goto out;
6218
6219 /* Link seems to be alive hence ignore the DL NAC errors */
6220 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6221 hba->saved_err &= ~UIC_ERROR;
6222 /* clear NAC error */
6223 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6224 if (!hba->saved_uic_err)
6225 err_handling = false;
6226 }
6227 out:
6228 spin_unlock_irqrestore(hba->host->host_lock, flags);
6229 return err_handling;
6230 }
6231
6232 /* host lock must be held before calling this func */
ufshcd_is_saved_err_fatal(struct ufs_hba * hba)6233 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
6234 {
6235 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
6236 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
6237 }
6238
ufshcd_schedule_eh_work(struct ufs_hba * hba)6239 void ufshcd_schedule_eh_work(struct ufs_hba *hba)
6240 {
6241 lockdep_assert_held(hba->host->host_lock);
6242
6243 /* handle fatal errors only when link is not in error state */
6244 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6245 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6246 ufshcd_is_saved_err_fatal(hba))
6247 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
6248 else
6249 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
6250 queue_work(hba->eh_wq, &hba->eh_work);
6251 }
6252 }
6253
ufshcd_force_error_recovery(struct ufs_hba * hba)6254 static void ufshcd_force_error_recovery(struct ufs_hba *hba)
6255 {
6256 spin_lock_irq(hba->host->host_lock);
6257 hba->force_reset = true;
6258 ufshcd_schedule_eh_work(hba);
6259 spin_unlock_irq(hba->host->host_lock);
6260 }
6261
ufshcd_clk_scaling_allow(struct ufs_hba * hba,bool allow)6262 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
6263 {
6264 mutex_lock(&hba->wb_mutex);
6265 down_write(&hba->clk_scaling_lock);
6266 hba->clk_scaling.is_allowed = allow;
6267 up_write(&hba->clk_scaling_lock);
6268 mutex_unlock(&hba->wb_mutex);
6269 }
6270
ufshcd_clk_scaling_suspend(struct ufs_hba * hba,bool suspend)6271 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
6272 {
6273 if (suspend) {
6274 if (hba->clk_scaling.is_enabled)
6275 ufshcd_suspend_clkscaling(hba);
6276 ufshcd_clk_scaling_allow(hba, false);
6277 } else {
6278 ufshcd_clk_scaling_allow(hba, true);
6279 if (hba->clk_scaling.is_enabled)
6280 ufshcd_resume_clkscaling(hba);
6281 }
6282 }
6283
ufshcd_err_handling_prepare(struct ufs_hba * hba)6284 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
6285 {
6286 ufshcd_rpm_get_sync(hba);
6287 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
6288 hba->is_sys_suspended) {
6289 enum ufs_pm_op pm_op;
6290
6291 /*
6292 * Don't assume anything of resume, if
6293 * resume fails, irq and clocks can be OFF, and powers
6294 * can be OFF or in LPM.
6295 */
6296 ufshcd_setup_hba_vreg(hba, true);
6297 ufshcd_enable_irq(hba);
6298 ufshcd_setup_vreg(hba, true);
6299 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6300 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6301 ufshcd_hold(hba);
6302 if (!ufshcd_is_clkgating_allowed(hba))
6303 ufshcd_setup_clocks(hba, true);
6304 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
6305 ufshcd_vops_resume(hba, pm_op);
6306 } else {
6307 ufshcd_hold(hba);
6308 if (ufshcd_is_clkscaling_supported(hba) &&
6309 hba->clk_scaling.is_enabled)
6310 ufshcd_suspend_clkscaling(hba);
6311 ufshcd_clk_scaling_allow(hba, false);
6312 }
6313 ufshcd_scsi_block_requests(hba);
6314 /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
6315 blk_mq_wait_quiesce_done(&hba->host->tag_set);
6316 cancel_work_sync(&hba->eeh_work);
6317 }
6318
ufshcd_err_handling_unprepare(struct ufs_hba * hba)6319 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
6320 {
6321 ufshcd_scsi_unblock_requests(hba);
6322 ufshcd_release(hba);
6323 if (ufshcd_is_clkscaling_supported(hba))
6324 ufshcd_clk_scaling_suspend(hba, false);
6325 ufshcd_rpm_put(hba);
6326 }
6327
ufshcd_err_handling_should_stop(struct ufs_hba * hba)6328 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
6329 {
6330 return (!hba->is_powered || hba->shutting_down ||
6331 !hba->ufs_device_wlun ||
6332 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
6333 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
6334 ufshcd_is_link_broken(hba))));
6335 }
6336
6337 #ifdef CONFIG_PM
ufshcd_recover_pm_error(struct ufs_hba * hba)6338 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
6339 {
6340 struct Scsi_Host *shost = hba->host;
6341 struct scsi_device *sdev;
6342 struct request_queue *q;
6343 int ret;
6344
6345 hba->is_sys_suspended = false;
6346 /*
6347 * Set RPM status of wlun device to RPM_ACTIVE,
6348 * this also clears its runtime error.
6349 */
6350 ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
6351
6352 /* hba device might have a runtime error otherwise */
6353 if (ret)
6354 ret = pm_runtime_set_active(hba->dev);
6355 /*
6356 * If wlun device had runtime error, we also need to resume those
6357 * consumer scsi devices in case any of them has failed to be
6358 * resumed due to supplier runtime resume failure. This is to unblock
6359 * blk_queue_enter in case there are bios waiting inside it.
6360 */
6361 if (!ret) {
6362 shost_for_each_device(sdev, shost) {
6363 q = sdev->request_queue;
6364 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6365 q->rpm_status == RPM_SUSPENDING))
6366 pm_request_resume(q->dev);
6367 }
6368 }
6369 }
6370 #else
ufshcd_recover_pm_error(struct ufs_hba * hba)6371 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6372 {
6373 }
6374 #endif
6375
ufshcd_is_pwr_mode_restore_needed(struct ufs_hba * hba)6376 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6377 {
6378 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6379 u32 mode;
6380
6381 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6382
6383 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6384 return true;
6385
6386 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6387 return true;
6388
6389 return false;
6390 }
6391
ufshcd_abort_one(struct request * rq,void * priv)6392 static bool ufshcd_abort_one(struct request *rq, void *priv)
6393 {
6394 int *ret = priv;
6395 u32 tag = rq->tag;
6396 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
6397 struct scsi_device *sdev = cmd->device;
6398 struct Scsi_Host *shost = sdev->host;
6399 struct ufs_hba *hba = shost_priv(shost);
6400 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6401 struct ufs_hw_queue *hwq;
6402 unsigned long flags;
6403
6404 *ret = ufshcd_try_to_abort_task(hba, tag);
6405 dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
6406 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
6407 *ret ? "failed" : "succeeded");
6408
6409 /* Release cmd in MCQ mode if abort succeeds */
6410 if (is_mcq_enabled(hba) && (*ret == 0)) {
6411 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
6412 if (!hwq)
6413 return 0;
6414 spin_lock_irqsave(&hwq->cq_lock, flags);
6415 if (ufshcd_cmd_inflight(lrbp->cmd))
6416 ufshcd_release_scsi_cmd(hba, lrbp);
6417 spin_unlock_irqrestore(&hwq->cq_lock, flags);
6418 }
6419
6420 return *ret == 0;
6421 }
6422
6423 /**
6424 * ufshcd_abort_all - Abort all pending commands.
6425 * @hba: Host bus adapter pointer.
6426 *
6427 * Return: true if and only if the host controller needs to be reset.
6428 */
ufshcd_abort_all(struct ufs_hba * hba)6429 static bool ufshcd_abort_all(struct ufs_hba *hba)
6430 {
6431 int tag, ret = 0;
6432
6433 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret);
6434 if (ret)
6435 goto out;
6436
6437 /* Clear pending task management requests */
6438 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6439 ret = ufshcd_clear_tm_cmd(hba, tag);
6440 if (ret)
6441 goto out;
6442 }
6443
6444 out:
6445 /* Complete the requests that are cleared by s/w */
6446 ufshcd_complete_requests(hba, false);
6447
6448 return ret != 0;
6449 }
6450
6451 /**
6452 * ufshcd_err_handler - handle UFS errors that require s/w attention
6453 * @work: pointer to work structure
6454 */
ufshcd_err_handler(struct work_struct * work)6455 static void ufshcd_err_handler(struct work_struct *work)
6456 {
6457 int retries = MAX_ERR_HANDLER_RETRIES;
6458 struct ufs_hba *hba;
6459 unsigned long flags;
6460 bool needs_restore;
6461 bool needs_reset;
6462 int pmc_err;
6463
6464 hba = container_of(work, struct ufs_hba, eh_work);
6465
6466 dev_info(hba->dev,
6467 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6468 __func__, ufshcd_state_name[hba->ufshcd_state],
6469 hba->is_powered, hba->shutting_down, hba->saved_err,
6470 hba->saved_uic_err, hba->force_reset,
6471 ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6472
6473 down(&hba->host_sem);
6474 spin_lock_irqsave(hba->host->host_lock, flags);
6475 if (ufshcd_err_handling_should_stop(hba)) {
6476 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6477 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6478 spin_unlock_irqrestore(hba->host->host_lock, flags);
6479 up(&hba->host_sem);
6480 return;
6481 }
6482 ufshcd_set_eh_in_progress(hba);
6483 spin_unlock_irqrestore(hba->host->host_lock, flags);
6484 ufshcd_err_handling_prepare(hba);
6485 /* Complete requests that have door-bell cleared by h/w */
6486 ufshcd_complete_requests(hba, false);
6487 spin_lock_irqsave(hba->host->host_lock, flags);
6488 again:
6489 needs_restore = false;
6490 needs_reset = false;
6491
6492 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6493 hba->ufshcd_state = UFSHCD_STATE_RESET;
6494 /*
6495 * A full reset and restore might have happened after preparation
6496 * is finished, double check whether we should stop.
6497 */
6498 if (ufshcd_err_handling_should_stop(hba))
6499 goto skip_err_handling;
6500
6501 if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) &&
6502 !hba->force_reset) {
6503 bool ret;
6504
6505 spin_unlock_irqrestore(hba->host->host_lock, flags);
6506 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6507 ret = ufshcd_quirk_dl_nac_errors(hba);
6508 spin_lock_irqsave(hba->host->host_lock, flags);
6509 if (!ret && ufshcd_err_handling_should_stop(hba))
6510 goto skip_err_handling;
6511 }
6512
6513 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6514 (hba->saved_uic_err &&
6515 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6516 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6517
6518 spin_unlock_irqrestore(hba->host->host_lock, flags);
6519 ufshcd_print_host_state(hba);
6520 ufshcd_print_pwr_info(hba);
6521 ufshcd_print_evt_hist(hba);
6522 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6523 ufshcd_print_trs_all(hba, pr_prdt);
6524 spin_lock_irqsave(hba->host->host_lock, flags);
6525 }
6526
6527 /*
6528 * if host reset is required then skip clearing the pending
6529 * transfers forcefully because they will get cleared during
6530 * host reset and restore
6531 */
6532 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6533 ufshcd_is_saved_err_fatal(hba) ||
6534 ((hba->saved_err & UIC_ERROR) &&
6535 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6536 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6537 needs_reset = true;
6538 goto do_reset;
6539 }
6540
6541 /*
6542 * If LINERESET was caught, UFS might have been put to PWM mode,
6543 * check if power mode restore is needed.
6544 */
6545 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6546 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6547 if (!hba->saved_uic_err)
6548 hba->saved_err &= ~UIC_ERROR;
6549 spin_unlock_irqrestore(hba->host->host_lock, flags);
6550 if (ufshcd_is_pwr_mode_restore_needed(hba))
6551 needs_restore = true;
6552 spin_lock_irqsave(hba->host->host_lock, flags);
6553 if (!hba->saved_err && !needs_restore)
6554 goto skip_err_handling;
6555 }
6556
6557 hba->silence_err_logs = true;
6558 /* release lock as clear command might sleep */
6559 spin_unlock_irqrestore(hba->host->host_lock, flags);
6560
6561 needs_reset = ufshcd_abort_all(hba);
6562
6563 spin_lock_irqsave(hba->host->host_lock, flags);
6564 hba->silence_err_logs = false;
6565 if (needs_reset)
6566 goto do_reset;
6567
6568 /*
6569 * After all reqs and tasks are cleared from doorbell,
6570 * now it is safe to retore power mode.
6571 */
6572 if (needs_restore) {
6573 spin_unlock_irqrestore(hba->host->host_lock, flags);
6574 /*
6575 * Hold the scaling lock just in case dev cmds
6576 * are sent via bsg and/or sysfs.
6577 */
6578 down_write(&hba->clk_scaling_lock);
6579 hba->force_pmc = true;
6580 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6581 if (pmc_err) {
6582 needs_reset = true;
6583 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6584 __func__, pmc_err);
6585 }
6586 hba->force_pmc = false;
6587 ufshcd_print_pwr_info(hba);
6588 up_write(&hba->clk_scaling_lock);
6589 spin_lock_irqsave(hba->host->host_lock, flags);
6590 }
6591
6592 do_reset:
6593 /* Fatal errors need reset */
6594 if (needs_reset) {
6595 int err;
6596
6597 hba->force_reset = false;
6598 spin_unlock_irqrestore(hba->host->host_lock, flags);
6599 err = ufshcd_reset_and_restore(hba);
6600 if (err)
6601 dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6602 __func__, err);
6603 else
6604 ufshcd_recover_pm_error(hba);
6605 spin_lock_irqsave(hba->host->host_lock, flags);
6606 }
6607
6608 skip_err_handling:
6609 if (!needs_reset) {
6610 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6611 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6612 if (hba->saved_err || hba->saved_uic_err)
6613 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6614 __func__, hba->saved_err, hba->saved_uic_err);
6615 }
6616 /* Exit in an operational state or dead */
6617 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6618 hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6619 if (--retries)
6620 goto again;
6621 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6622 }
6623 ufshcd_clear_eh_in_progress(hba);
6624 spin_unlock_irqrestore(hba->host->host_lock, flags);
6625 ufshcd_err_handling_unprepare(hba);
6626 up(&hba->host_sem);
6627
6628 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6629 ufshcd_state_name[hba->ufshcd_state]);
6630 }
6631
6632 /**
6633 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6634 * @hba: per-adapter instance
6635 *
6636 * Return:
6637 * IRQ_HANDLED - If interrupt is valid
6638 * IRQ_NONE - If invalid interrupt
6639 */
ufshcd_update_uic_error(struct ufs_hba * hba)6640 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6641 {
6642 u32 reg;
6643 irqreturn_t retval = IRQ_NONE;
6644
6645 /* PHY layer error */
6646 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6647 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6648 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6649 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6650 /*
6651 * To know whether this error is fatal or not, DB timeout
6652 * must be checked but this error is handled separately.
6653 */
6654 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6655 dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6656 __func__);
6657
6658 /* Got a LINERESET indication. */
6659 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6660 struct uic_command *cmd = NULL;
6661
6662 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6663 if (hba->uic_async_done && hba->active_uic_cmd)
6664 cmd = hba->active_uic_cmd;
6665 /*
6666 * Ignore the LINERESET during power mode change
6667 * operation via DME_SET command.
6668 */
6669 if (cmd && (cmd->command == UIC_CMD_DME_SET))
6670 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6671 }
6672 retval |= IRQ_HANDLED;
6673 }
6674
6675 /* PA_INIT_ERROR is fatal and needs UIC reset */
6676 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6677 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6678 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6679 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6680
6681 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6682 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6683 else if (hba->dev_quirks &
6684 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6685 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6686 hba->uic_error |=
6687 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6688 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6689 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6690 }
6691 retval |= IRQ_HANDLED;
6692 }
6693
6694 /* UIC NL/TL/DME errors needs software retry */
6695 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6696 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6697 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6698 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6699 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6700 retval |= IRQ_HANDLED;
6701 }
6702
6703 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6704 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6705 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6706 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6707 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6708 retval |= IRQ_HANDLED;
6709 }
6710
6711 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6712 if ((reg & UIC_DME_ERROR) &&
6713 (reg & UIC_DME_ERROR_CODE_MASK)) {
6714 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6715 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6716 retval |= IRQ_HANDLED;
6717 }
6718
6719 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6720 __func__, hba->uic_error);
6721 return retval;
6722 }
6723
6724 /**
6725 * ufshcd_check_errors - Check for errors that need s/w attention
6726 * @hba: per-adapter instance
6727 * @intr_status: interrupt status generated by the controller
6728 *
6729 * Return:
6730 * IRQ_HANDLED - If interrupt is valid
6731 * IRQ_NONE - If invalid interrupt
6732 */
ufshcd_check_errors(struct ufs_hba * hba,u32 intr_status)6733 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
6734 {
6735 bool queue_eh_work = false;
6736 irqreturn_t retval = IRQ_NONE;
6737
6738 spin_lock(hba->host->host_lock);
6739 hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6740
6741 if (hba->errors & INT_FATAL_ERRORS) {
6742 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6743 hba->errors);
6744 queue_eh_work = true;
6745 }
6746
6747 if (hba->errors & UIC_ERROR) {
6748 hba->uic_error = 0;
6749 retval = ufshcd_update_uic_error(hba);
6750 if (hba->uic_error)
6751 queue_eh_work = true;
6752 }
6753
6754 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6755 dev_err(hba->dev,
6756 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6757 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6758 "Enter" : "Exit",
6759 hba->errors, ufshcd_get_upmcrs(hba));
6760 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6761 hba->errors);
6762 ufshcd_set_link_broken(hba);
6763 queue_eh_work = true;
6764 }
6765
6766 if (queue_eh_work) {
6767 /*
6768 * update the transfer error masks to sticky bits, let's do this
6769 * irrespective of current ufshcd_state.
6770 */
6771 hba->saved_err |= hba->errors;
6772 hba->saved_uic_err |= hba->uic_error;
6773
6774 /* dump controller state before resetting */
6775 if ((hba->saved_err &
6776 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6777 (hba->saved_uic_err &&
6778 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6779 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6780 __func__, hba->saved_err,
6781 hba->saved_uic_err);
6782 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6783 "host_regs: ");
6784 ufshcd_print_pwr_info(hba);
6785 }
6786 ufshcd_schedule_eh_work(hba);
6787 retval |= IRQ_HANDLED;
6788 }
6789 /*
6790 * if (!queue_eh_work) -
6791 * Other errors are either non-fatal where host recovers
6792 * itself without s/w intervention or errors that will be
6793 * handled by the SCSI core layer.
6794 */
6795 hba->errors = 0;
6796 hba->uic_error = 0;
6797 spin_unlock(hba->host->host_lock);
6798 return retval;
6799 }
6800
6801 /**
6802 * ufshcd_tmc_handler - handle task management function completion
6803 * @hba: per adapter instance
6804 *
6805 * Return:
6806 * IRQ_HANDLED - If interrupt is valid
6807 * IRQ_NONE - If invalid interrupt
6808 */
ufshcd_tmc_handler(struct ufs_hba * hba)6809 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6810 {
6811 unsigned long flags, pending, issued;
6812 irqreturn_t ret = IRQ_NONE;
6813 int tag;
6814
6815 spin_lock_irqsave(hba->host->host_lock, flags);
6816 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6817 issued = hba->outstanding_tasks & ~pending;
6818 for_each_set_bit(tag, &issued, hba->nutmrs) {
6819 struct request *req = hba->tmf_rqs[tag];
6820 struct completion *c = req->end_io_data;
6821
6822 complete(c);
6823 ret = IRQ_HANDLED;
6824 }
6825 spin_unlock_irqrestore(hba->host->host_lock, flags);
6826
6827 return ret;
6828 }
6829
6830 /**
6831 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6832 * @hba: per adapter instance
6833 *
6834 * Return: IRQ_HANDLED if interrupt is handled.
6835 */
ufshcd_handle_mcq_cq_events(struct ufs_hba * hba)6836 static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
6837 {
6838 struct ufs_hw_queue *hwq;
6839 unsigned long outstanding_cqs;
6840 unsigned int nr_queues;
6841 int i, ret;
6842 u32 events;
6843
6844 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
6845 if (ret)
6846 outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
6847
6848 /* Exclude the poll queues */
6849 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
6850 for_each_set_bit(i, &outstanding_cqs, nr_queues) {
6851 hwq = &hba->uhq[i];
6852
6853 events = ufshcd_mcq_read_cqis(hba, i);
6854 if (events)
6855 ufshcd_mcq_write_cqis(hba, events, i);
6856
6857 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
6858 ufshcd_mcq_poll_cqe_lock(hba, hwq);
6859 }
6860
6861 return IRQ_HANDLED;
6862 }
6863
6864 /**
6865 * ufshcd_sl_intr - Interrupt service routine
6866 * @hba: per adapter instance
6867 * @intr_status: contains interrupts generated by the controller
6868 *
6869 * Return:
6870 * IRQ_HANDLED - If interrupt is valid
6871 * IRQ_NONE - If invalid interrupt
6872 */
ufshcd_sl_intr(struct ufs_hba * hba,u32 intr_status)6873 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6874 {
6875 irqreturn_t retval = IRQ_NONE;
6876
6877 if (intr_status & UFSHCD_UIC_MASK)
6878 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6879
6880 if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6881 retval |= ufshcd_check_errors(hba, intr_status);
6882
6883 if (intr_status & UTP_TASK_REQ_COMPL)
6884 retval |= ufshcd_tmc_handler(hba);
6885
6886 if (intr_status & UTP_TRANSFER_REQ_COMPL)
6887 retval |= ufshcd_transfer_req_compl(hba);
6888
6889 if (intr_status & MCQ_CQ_EVENT_STATUS)
6890 retval |= ufshcd_handle_mcq_cq_events(hba);
6891
6892 return retval;
6893 }
6894
6895 /**
6896 * ufshcd_intr - Main interrupt service routine
6897 * @irq: irq number
6898 * @__hba: pointer to adapter instance
6899 *
6900 * Return:
6901 * IRQ_HANDLED - If interrupt is valid
6902 * IRQ_NONE - If invalid interrupt
6903 */
ufshcd_intr(int irq,void * __hba)6904 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6905 {
6906 u32 intr_status, enabled_intr_status = 0;
6907 irqreturn_t retval = IRQ_NONE;
6908 struct ufs_hba *hba = __hba;
6909 int retries = hba->nutrs;
6910
6911 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6912 hba->ufs_stats.last_intr_status = intr_status;
6913 hba->ufs_stats.last_intr_ts = local_clock();
6914
6915 /*
6916 * There could be max of hba->nutrs reqs in flight and in worst case
6917 * if the reqs get finished 1 by 1 after the interrupt status is
6918 * read, make sure we handle them by checking the interrupt status
6919 * again in a loop until we process all of the reqs before returning.
6920 */
6921 while (intr_status && retries--) {
6922 enabled_intr_status =
6923 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6924 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6925 if (enabled_intr_status)
6926 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6927
6928 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6929 }
6930
6931 if (enabled_intr_status && retval == IRQ_NONE &&
6932 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
6933 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
6934 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6935 __func__,
6936 intr_status,
6937 hba->ufs_stats.last_intr_status,
6938 enabled_intr_status);
6939 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6940 }
6941
6942 return retval;
6943 }
6944
ufshcd_clear_tm_cmd(struct ufs_hba * hba,int tag)6945 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6946 {
6947 int err = 0;
6948 u32 mask = 1 << tag;
6949 unsigned long flags;
6950
6951 if (!test_bit(tag, &hba->outstanding_tasks))
6952 goto out;
6953
6954 spin_lock_irqsave(hba->host->host_lock, flags);
6955 ufshcd_utmrl_clear(hba, tag);
6956 spin_unlock_irqrestore(hba->host->host_lock, flags);
6957
6958 /* poll for max. 1 sec to clear door bell register by h/w */
6959 err = ufshcd_wait_for_register(hba,
6960 REG_UTP_TASK_REQ_DOOR_BELL,
6961 mask, 0, 1000, 1000);
6962
6963 dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
6964 tag, err < 0 ? "failed" : "succeeded");
6965
6966 out:
6967 return err;
6968 }
6969
__ufshcd_issue_tm_cmd(struct ufs_hba * hba,struct utp_task_req_desc * treq,u8 tm_function)6970 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6971 struct utp_task_req_desc *treq, u8 tm_function)
6972 {
6973 struct request_queue *q = hba->tmf_queue;
6974 struct Scsi_Host *host = hba->host;
6975 DECLARE_COMPLETION_ONSTACK(wait);
6976 struct request *req;
6977 unsigned long flags;
6978 int task_tag, err;
6979
6980 /*
6981 * blk_mq_alloc_request() is used here only to get a free tag.
6982 */
6983 req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
6984 if (IS_ERR(req))
6985 return PTR_ERR(req);
6986
6987 req->end_io_data = &wait;
6988 ufshcd_hold(hba);
6989
6990 spin_lock_irqsave(host->host_lock, flags);
6991
6992 task_tag = req->tag;
6993 WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
6994 task_tag);
6995 hba->tmf_rqs[req->tag] = req;
6996 treq->upiu_req.req_header.task_tag = task_tag;
6997
6998 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6999 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
7000
7001 /* send command to the controller */
7002 __set_bit(task_tag, &hba->outstanding_tasks);
7003
7004 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
7005 /* Make sure that doorbell is committed immediately */
7006 wmb();
7007
7008 spin_unlock_irqrestore(host->host_lock, flags);
7009
7010 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
7011
7012 /* wait until the task management command is completed */
7013 err = wait_for_completion_io_timeout(&wait,
7014 msecs_to_jiffies(TM_CMD_TIMEOUT));
7015 if (!err) {
7016 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
7017 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
7018 __func__, tm_function);
7019 if (ufshcd_clear_tm_cmd(hba, task_tag))
7020 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
7021 __func__, task_tag);
7022 err = -ETIMEDOUT;
7023 } else {
7024 err = 0;
7025 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
7026
7027 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
7028 }
7029
7030 spin_lock_irqsave(hba->host->host_lock, flags);
7031 hba->tmf_rqs[req->tag] = NULL;
7032 __clear_bit(task_tag, &hba->outstanding_tasks);
7033 spin_unlock_irqrestore(hba->host->host_lock, flags);
7034
7035 ufshcd_release(hba);
7036 blk_mq_free_request(req);
7037
7038 return err;
7039 }
7040
7041 /**
7042 * ufshcd_issue_tm_cmd - issues task management commands to controller
7043 * @hba: per adapter instance
7044 * @lun_id: LUN ID to which TM command is sent
7045 * @task_id: task ID to which the TM command is applicable
7046 * @tm_function: task management function opcode
7047 * @tm_response: task management service response return value
7048 *
7049 * Return: non-zero value on error, zero on success.
7050 */
ufshcd_issue_tm_cmd(struct ufs_hba * hba,int lun_id,int task_id,u8 tm_function,u8 * tm_response)7051 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
7052 u8 tm_function, u8 *tm_response)
7053 {
7054 struct utp_task_req_desc treq = { };
7055 enum utp_ocs ocs_value;
7056 int err;
7057
7058 /* Configure task request descriptor */
7059 treq.header.interrupt = 1;
7060 treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7061
7062 /* Configure task request UPIU */
7063 treq.upiu_req.req_header.transaction_code = UPIU_TRANSACTION_TASK_REQ;
7064 treq.upiu_req.req_header.lun = lun_id;
7065 treq.upiu_req.req_header.tm_function = tm_function;
7066
7067 /*
7068 * The host shall provide the same value for LUN field in the basic
7069 * header and for Input Parameter.
7070 */
7071 treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
7072 treq.upiu_req.input_param2 = cpu_to_be32(task_id);
7073
7074 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
7075 if (err == -ETIMEDOUT)
7076 return err;
7077
7078 ocs_value = treq.header.ocs & MASK_OCS;
7079 if (ocs_value != OCS_SUCCESS)
7080 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
7081 __func__, ocs_value);
7082 else if (tm_response)
7083 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
7084 MASK_TM_SERVICE_RESP;
7085 return err;
7086 }
7087
7088 /**
7089 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
7090 * @hba: per-adapter instance
7091 * @req_upiu: upiu request
7092 * @rsp_upiu: upiu reply
7093 * @desc_buff: pointer to descriptor buffer, NULL if NA
7094 * @buff_len: descriptor size, 0 if NA
7095 * @cmd_type: specifies the type (NOP, Query...)
7096 * @desc_op: descriptor operation
7097 *
7098 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
7099 * Therefore, it "rides" the device management infrastructure: uses its tag and
7100 * tasks work queues.
7101 *
7102 * Since there is only one available tag for device management commands,
7103 * the caller is expected to hold the hba->dev_cmd.lock mutex.
7104 *
7105 * Return: 0 upon success; < 0 upon failure.
7106 */
ufshcd_issue_devman_upiu_cmd(struct ufs_hba * hba,struct utp_upiu_req * req_upiu,struct utp_upiu_req * rsp_upiu,u8 * desc_buff,int * buff_len,enum dev_cmd_type cmd_type,enum query_opcode desc_op)7107 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
7108 struct utp_upiu_req *req_upiu,
7109 struct utp_upiu_req *rsp_upiu,
7110 u8 *desc_buff, int *buff_len,
7111 enum dev_cmd_type cmd_type,
7112 enum query_opcode desc_op)
7113 {
7114 DECLARE_COMPLETION_ONSTACK(wait);
7115 const u32 tag = hba->reserved_slot;
7116 struct ufshcd_lrb *lrbp;
7117 int err = 0;
7118 u8 upiu_flags;
7119
7120 /* Protects use of hba->reserved_slot. */
7121 lockdep_assert_held(&hba->dev_cmd.lock);
7122
7123 down_read(&hba->clk_scaling_lock);
7124
7125 lrbp = &hba->lrb[tag];
7126 lrbp->cmd = NULL;
7127 lrbp->task_tag = tag;
7128 lrbp->lun = 0;
7129 lrbp->intr_cmd = true;
7130 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7131 hba->dev_cmd.type = cmd_type;
7132
7133 if (hba->ufs_version <= ufshci_version(1, 1))
7134 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
7135 else
7136 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7137
7138 /* update the task tag in the request upiu */
7139 req_upiu->header.task_tag = tag;
7140
7141 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
7142
7143 /* just copy the upiu request as it is */
7144 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7145 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
7146 /* The Data Segment Area is optional depending upon the query
7147 * function value. for WRITE DESCRIPTOR, the data segment
7148 * follows right after the tsf.
7149 */
7150 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
7151 *buff_len = 0;
7152 }
7153
7154 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7155
7156 hba->dev_cmd.complete = &wait;
7157
7158 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
7159
7160 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7161 /*
7162 * ignore the returning value here - ufshcd_check_query_response is
7163 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7164 * read the response directly ignoring all errors.
7165 */
7166 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
7167
7168 /* just copy the upiu response as it is */
7169 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7170 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
7171 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
7172 u16 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
7173 .data_segment_length);
7174
7175 if (*buff_len >= resp_len) {
7176 memcpy(desc_buff, descp, resp_len);
7177 *buff_len = resp_len;
7178 } else {
7179 dev_warn(hba->dev,
7180 "%s: rsp size %d is bigger than buffer size %d",
7181 __func__, resp_len, *buff_len);
7182 *buff_len = 0;
7183 err = -EINVAL;
7184 }
7185 }
7186 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
7187 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
7188
7189 up_read(&hba->clk_scaling_lock);
7190 return err;
7191 }
7192
7193 /**
7194 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7195 * @hba: per-adapter instance
7196 * @req_upiu: upiu request
7197 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
7198 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
7199 * @desc_buff: pointer to descriptor buffer, NULL if NA
7200 * @buff_len: descriptor size, 0 if NA
7201 * @desc_op: descriptor operation
7202 *
7203 * Supports UTP Transfer requests (nop and query), and UTP Task
7204 * Management requests.
7205 * It is up to the caller to fill the upiu conent properly, as it will
7206 * be copied without any further input validations.
7207 *
7208 * Return: 0 upon success; < 0 upon failure.
7209 */
ufshcd_exec_raw_upiu_cmd(struct ufs_hba * hba,struct utp_upiu_req * req_upiu,struct utp_upiu_req * rsp_upiu,enum upiu_request_transaction msgcode,u8 * desc_buff,int * buff_len,enum query_opcode desc_op)7210 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
7211 struct utp_upiu_req *req_upiu,
7212 struct utp_upiu_req *rsp_upiu,
7213 enum upiu_request_transaction msgcode,
7214 u8 *desc_buff, int *buff_len,
7215 enum query_opcode desc_op)
7216 {
7217 int err;
7218 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
7219 struct utp_task_req_desc treq = { };
7220 enum utp_ocs ocs_value;
7221 u8 tm_f = req_upiu->header.tm_function;
7222
7223 switch (msgcode) {
7224 case UPIU_TRANSACTION_NOP_OUT:
7225 cmd_type = DEV_CMD_TYPE_NOP;
7226 fallthrough;
7227 case UPIU_TRANSACTION_QUERY_REQ:
7228 ufshcd_hold(hba);
7229 mutex_lock(&hba->dev_cmd.lock);
7230 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
7231 desc_buff, buff_len,
7232 cmd_type, desc_op);
7233 mutex_unlock(&hba->dev_cmd.lock);
7234 ufshcd_release(hba);
7235
7236 break;
7237 case UPIU_TRANSACTION_TASK_REQ:
7238 treq.header.interrupt = 1;
7239 treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7240
7241 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
7242
7243 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
7244 if (err == -ETIMEDOUT)
7245 break;
7246
7247 ocs_value = treq.header.ocs & MASK_OCS;
7248 if (ocs_value != OCS_SUCCESS) {
7249 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
7250 ocs_value);
7251 break;
7252 }
7253
7254 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
7255
7256 break;
7257 default:
7258 err = -EINVAL;
7259
7260 break;
7261 }
7262
7263 return err;
7264 }
7265
7266 /**
7267 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7268 * @hba: per adapter instance
7269 * @req_upiu: upiu request
7270 * @rsp_upiu: upiu reply
7271 * @req_ehs: EHS field which contains Advanced RPMB Request Message
7272 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
7273 * @sg_cnt: The number of sg lists actually used
7274 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7275 * @dir: DMA direction
7276 *
7277 * Return: zero on success, non-zero on failure.
7278 */
ufshcd_advanced_rpmb_req_handler(struct ufs_hba * hba,struct utp_upiu_req * req_upiu,struct utp_upiu_req * rsp_upiu,struct ufs_ehs * req_ehs,struct ufs_ehs * rsp_ehs,int sg_cnt,struct scatterlist * sg_list,enum dma_data_direction dir)7279 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
7280 struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
7281 struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
7282 enum dma_data_direction dir)
7283 {
7284 DECLARE_COMPLETION_ONSTACK(wait);
7285 const u32 tag = hba->reserved_slot;
7286 struct ufshcd_lrb *lrbp;
7287 int err = 0;
7288 int result;
7289 u8 upiu_flags;
7290 u8 *ehs_data;
7291 u16 ehs_len;
7292
7293 /* Protects use of hba->reserved_slot. */
7294 ufshcd_hold(hba);
7295 mutex_lock(&hba->dev_cmd.lock);
7296 down_read(&hba->clk_scaling_lock);
7297
7298 lrbp = &hba->lrb[tag];
7299 lrbp->cmd = NULL;
7300 lrbp->task_tag = tag;
7301 lrbp->lun = UFS_UPIU_RPMB_WLUN;
7302
7303 lrbp->intr_cmd = true;
7304 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7305 hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
7306
7307 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7308 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7309
7310 /*
7311 * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
7312 * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
7313 * HW controller takes EHS length from UTRD.
7314 */
7315 if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
7316 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
7317 else
7318 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
7319
7320 /* update the task tag */
7321 req_upiu->header.task_tag = tag;
7322
7323 /* copy the UPIU(contains CDB) request as it is */
7324 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7325 /* Copy EHS, starting with byte32, immediately after the CDB package */
7326 memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
7327
7328 if (dir != DMA_NONE && sg_list)
7329 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
7330
7331 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7332
7333 hba->dev_cmd.complete = &wait;
7334
7335 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7336
7337 err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
7338
7339 if (!err) {
7340 /* Just copy the upiu response as it is */
7341 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7342 /* Get the response UPIU result */
7343 result = (lrbp->ucd_rsp_ptr->header.response << 8) |
7344 lrbp->ucd_rsp_ptr->header.status;
7345
7346 ehs_len = lrbp->ucd_rsp_ptr->header.ehs_length;
7347 /*
7348 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7349 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7350 * Message is 02h
7351 */
7352 if (ehs_len == 2 && rsp_ehs) {
7353 /*
7354 * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7355 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7356 */
7357 ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
7358 memcpy(rsp_ehs, ehs_data, ehs_len * 32);
7359 }
7360 }
7361
7362 up_read(&hba->clk_scaling_lock);
7363 mutex_unlock(&hba->dev_cmd.lock);
7364 ufshcd_release(hba);
7365 return err ? : result;
7366 }
7367
7368 /**
7369 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7370 * @cmd: SCSI command pointer
7371 *
7372 * Return: SUCCESS or FAILED.
7373 */
ufshcd_eh_device_reset_handler(struct scsi_cmnd * cmd)7374 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7375 {
7376 unsigned long flags, pending_reqs = 0, not_cleared = 0;
7377 struct Scsi_Host *host;
7378 struct ufs_hba *hba;
7379 struct ufs_hw_queue *hwq;
7380 struct ufshcd_lrb *lrbp;
7381 u32 pos, not_cleared_mask = 0;
7382 int err;
7383 u8 resp = 0xF, lun;
7384
7385 host = cmd->device->host;
7386 hba = shost_priv(host);
7387
7388 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
7389 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
7390 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7391 if (!err)
7392 err = resp;
7393 goto out;
7394 }
7395
7396 if (is_mcq_enabled(hba)) {
7397 for (pos = 0; pos < hba->nutrs; pos++) {
7398 lrbp = &hba->lrb[pos];
7399 if (ufshcd_cmd_inflight(lrbp->cmd) &&
7400 lrbp->lun == lun) {
7401 ufshcd_clear_cmd(hba, pos);
7402 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
7403 ufshcd_mcq_poll_cqe_lock(hba, hwq);
7404 }
7405 }
7406 err = 0;
7407 goto out;
7408 }
7409
7410 /* clear the commands that were pending for corresponding LUN */
7411 spin_lock_irqsave(&hba->outstanding_lock, flags);
7412 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
7413 if (hba->lrb[pos].lun == lun)
7414 __set_bit(pos, &pending_reqs);
7415 hba->outstanding_reqs &= ~pending_reqs;
7416 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7417
7418 for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
7419 if (ufshcd_clear_cmd(hba, pos) < 0) {
7420 spin_lock_irqsave(&hba->outstanding_lock, flags);
7421 not_cleared = 1U << pos &
7422 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7423 hba->outstanding_reqs |= not_cleared;
7424 not_cleared_mask |= not_cleared;
7425 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7426
7427 dev_err(hba->dev, "%s: failed to clear request %d\n",
7428 __func__, pos);
7429 }
7430 }
7431 __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
7432
7433 out:
7434 hba->req_abort_count = 0;
7435 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
7436 if (!err) {
7437 err = SUCCESS;
7438 } else {
7439 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7440 err = FAILED;
7441 }
7442 return err;
7443 }
7444
ufshcd_set_req_abort_skip(struct ufs_hba * hba,unsigned long bitmap)7445 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
7446 {
7447 struct ufshcd_lrb *lrbp;
7448 int tag;
7449
7450 for_each_set_bit(tag, &bitmap, hba->nutrs) {
7451 lrbp = &hba->lrb[tag];
7452 lrbp->req_abort_skip = true;
7453 }
7454 }
7455
7456 /**
7457 * ufshcd_try_to_abort_task - abort a specific task
7458 * @hba: Pointer to adapter instance
7459 * @tag: Task tag/index to be aborted
7460 *
7461 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7462 * command, and in host controller by clearing the door-bell register. There can
7463 * be race between controller sending the command to the device while abort is
7464 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7465 * really issued and then try to abort it.
7466 *
7467 * Return: zero on success, non-zero on failure.
7468 */
ufshcd_try_to_abort_task(struct ufs_hba * hba,int tag)7469 int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
7470 {
7471 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7472 int err = 0;
7473 int poll_cnt;
7474 u8 resp = 0xF;
7475 u32 reg;
7476
7477 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
7478 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7479 UFS_QUERY_TASK, &resp);
7480 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
7481 /* cmd pending in the device */
7482 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
7483 __func__, tag);
7484 break;
7485 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7486 /*
7487 * cmd not pending in the device, check if it is
7488 * in transition.
7489 */
7490 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
7491 __func__, tag);
7492 if (is_mcq_enabled(hba)) {
7493 /* MCQ mode */
7494 if (ufshcd_cmd_inflight(lrbp->cmd)) {
7495 /* sleep for max. 200us same delay as in SDB mode */
7496 usleep_range(100, 200);
7497 continue;
7498 }
7499 /* command completed already */
7500 dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
7501 __func__, tag);
7502 goto out;
7503 }
7504
7505 /* Single Doorbell Mode */
7506 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7507 if (reg & (1 << tag)) {
7508 /* sleep for max. 200us to stabilize */
7509 usleep_range(100, 200);
7510 continue;
7511 }
7512 /* command completed already */
7513 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
7514 __func__, tag);
7515 goto out;
7516 } else {
7517 dev_err(hba->dev,
7518 "%s: no response from device. tag = %d, err %d\n",
7519 __func__, tag, err);
7520 if (!err)
7521 err = resp; /* service response error */
7522 goto out;
7523 }
7524 }
7525
7526 if (!poll_cnt) {
7527 err = -EBUSY;
7528 goto out;
7529 }
7530
7531 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7532 UFS_ABORT_TASK, &resp);
7533 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7534 if (!err) {
7535 err = resp; /* service response error */
7536 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
7537 __func__, tag, err);
7538 }
7539 goto out;
7540 }
7541
7542 err = ufshcd_clear_cmd(hba, tag);
7543 if (err)
7544 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
7545 __func__, tag, err);
7546
7547 out:
7548 return err;
7549 }
7550
7551 /**
7552 * ufshcd_abort - scsi host template eh_abort_handler callback
7553 * @cmd: SCSI command pointer
7554 *
7555 * Return: SUCCESS or FAILED.
7556 */
ufshcd_abort(struct scsi_cmnd * cmd)7557 static int ufshcd_abort(struct scsi_cmnd *cmd)
7558 {
7559 struct Scsi_Host *host = cmd->device->host;
7560 struct ufs_hba *hba = shost_priv(host);
7561 int tag = scsi_cmd_to_rq(cmd)->tag;
7562 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7563 unsigned long flags;
7564 int err = FAILED;
7565 bool outstanding;
7566 u32 reg;
7567
7568 WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
7569
7570 ufshcd_hold(hba);
7571
7572 if (!is_mcq_enabled(hba)) {
7573 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7574 if (!test_bit(tag, &hba->outstanding_reqs)) {
7575 /* If command is already aborted/completed, return FAILED. */
7576 dev_err(hba->dev,
7577 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7578 __func__, tag, hba->outstanding_reqs, reg);
7579 goto release;
7580 }
7581 }
7582
7583 /* Print Transfer Request of aborted task */
7584 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
7585
7586 /*
7587 * Print detailed info about aborted request.
7588 * As more than one request might get aborted at the same time,
7589 * print full information only for the first aborted request in order
7590 * to reduce repeated printouts. For other aborted requests only print
7591 * basic details.
7592 */
7593 scsi_print_command(cmd);
7594 if (!hba->req_abort_count) {
7595 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7596 ufshcd_print_evt_hist(hba);
7597 ufshcd_print_host_state(hba);
7598 ufshcd_print_pwr_info(hba);
7599 ufshcd_print_tr(hba, tag, true);
7600 } else {
7601 ufshcd_print_tr(hba, tag, false);
7602 }
7603 hba->req_abort_count++;
7604
7605 if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
7606 /* only execute this code in single doorbell mode */
7607 dev_err(hba->dev,
7608 "%s: cmd was completed, but without a notifying intr, tag = %d",
7609 __func__, tag);
7610 __ufshcd_transfer_req_compl(hba, 1UL << tag);
7611 goto release;
7612 }
7613
7614 /*
7615 * Task abort to the device W-LUN is illegal. When this command
7616 * will fail, due to spec violation, scsi err handling next step
7617 * will be to send LU reset which, again, is a spec violation.
7618 * To avoid these unnecessary/illegal steps, first we clean up
7619 * the lrb taken by this cmd and re-set it in outstanding_reqs,
7620 * then queue the eh_work and bail.
7621 */
7622 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7623 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
7624
7625 spin_lock_irqsave(host->host_lock, flags);
7626 hba->force_reset = true;
7627 ufshcd_schedule_eh_work(hba);
7628 spin_unlock_irqrestore(host->host_lock, flags);
7629 goto release;
7630 }
7631
7632 if (is_mcq_enabled(hba)) {
7633 /* MCQ mode. Branch off to handle abort for mcq mode */
7634 err = ufshcd_mcq_abort(cmd);
7635 goto release;
7636 }
7637
7638 /* Skip task abort in case previous aborts failed and report failure */
7639 if (lrbp->req_abort_skip) {
7640 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7641 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7642 goto release;
7643 }
7644
7645 err = ufshcd_try_to_abort_task(hba, tag);
7646 if (err) {
7647 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7648 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7649 err = FAILED;
7650 goto release;
7651 }
7652
7653 /*
7654 * Clear the corresponding bit from outstanding_reqs since the command
7655 * has been aborted successfully.
7656 */
7657 spin_lock_irqsave(&hba->outstanding_lock, flags);
7658 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7659 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7660
7661 if (outstanding)
7662 ufshcd_release_scsi_cmd(hba, lrbp);
7663
7664 err = SUCCESS;
7665
7666 release:
7667 /* Matches the ufshcd_hold() call at the start of this function. */
7668 ufshcd_release(hba);
7669 return err;
7670 }
7671
7672 /**
7673 * ufshcd_host_reset_and_restore - reset and restore host controller
7674 * @hba: per-adapter instance
7675 *
7676 * Note that host controller reset may issue DME_RESET to
7677 * local and remote (device) Uni-Pro stack and the attributes
7678 * are reset to default state.
7679 *
7680 * Return: zero on success, non-zero on failure.
7681 */
ufshcd_host_reset_and_restore(struct ufs_hba * hba)7682 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7683 {
7684 int err;
7685
7686 /*
7687 * Stop the host controller and complete the requests
7688 * cleared by h/w
7689 */
7690 ufshcd_hba_stop(hba);
7691 hba->silence_err_logs = true;
7692 ufshcd_complete_requests(hba, true);
7693 hba->silence_err_logs = false;
7694
7695 /* scale up clocks to max frequency before full reinitialization */
7696 ufshcd_scale_clks(hba, true);
7697
7698 err = ufshcd_hba_enable(hba);
7699
7700 /* Establish the link again and restore the device */
7701 if (!err)
7702 err = ufshcd_probe_hba(hba, false);
7703
7704 if (err)
7705 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7706 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
7707 return err;
7708 }
7709
7710 /**
7711 * ufshcd_reset_and_restore - reset and re-initialize host/device
7712 * @hba: per-adapter instance
7713 *
7714 * Reset and recover device, host and re-establish link. This
7715 * is helpful to recover the communication in fatal error conditions.
7716 *
7717 * Return: zero on success, non-zero on failure.
7718 */
ufshcd_reset_and_restore(struct ufs_hba * hba)7719 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7720 {
7721 u32 saved_err = 0;
7722 u32 saved_uic_err = 0;
7723 int err = 0;
7724 unsigned long flags;
7725 int retries = MAX_HOST_RESET_RETRIES;
7726
7727 spin_lock_irqsave(hba->host->host_lock, flags);
7728 do {
7729 /*
7730 * This is a fresh start, cache and clear saved error first,
7731 * in case new error generated during reset and restore.
7732 */
7733 saved_err |= hba->saved_err;
7734 saved_uic_err |= hba->saved_uic_err;
7735 hba->saved_err = 0;
7736 hba->saved_uic_err = 0;
7737 hba->force_reset = false;
7738 hba->ufshcd_state = UFSHCD_STATE_RESET;
7739 spin_unlock_irqrestore(hba->host->host_lock, flags);
7740
7741 /* Reset the attached device */
7742 ufshcd_device_reset(hba);
7743
7744 err = ufshcd_host_reset_and_restore(hba);
7745
7746 spin_lock_irqsave(hba->host->host_lock, flags);
7747 if (err)
7748 continue;
7749 /* Do not exit unless operational or dead */
7750 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7751 hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7752 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7753 err = -EAGAIN;
7754 } while (err && --retries);
7755
7756 /*
7757 * Inform scsi mid-layer that we did reset and allow to handle
7758 * Unit Attention properly.
7759 */
7760 scsi_report_bus_reset(hba->host, 0);
7761 if (err) {
7762 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7763 hba->saved_err |= saved_err;
7764 hba->saved_uic_err |= saved_uic_err;
7765 }
7766 spin_unlock_irqrestore(hba->host->host_lock, flags);
7767
7768 return err;
7769 }
7770
7771 /**
7772 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7773 * @cmd: SCSI command pointer
7774 *
7775 * Return: SUCCESS or FAILED.
7776 */
ufshcd_eh_host_reset_handler(struct scsi_cmnd * cmd)7777 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7778 {
7779 int err = SUCCESS;
7780 unsigned long flags;
7781 struct ufs_hba *hba;
7782
7783 hba = shost_priv(cmd->device->host);
7784
7785 spin_lock_irqsave(hba->host->host_lock, flags);
7786 hba->force_reset = true;
7787 ufshcd_schedule_eh_work(hba);
7788 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7789 spin_unlock_irqrestore(hba->host->host_lock, flags);
7790
7791 flush_work(&hba->eh_work);
7792
7793 spin_lock_irqsave(hba->host->host_lock, flags);
7794 if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7795 err = FAILED;
7796 spin_unlock_irqrestore(hba->host->host_lock, flags);
7797
7798 return err;
7799 }
7800
7801 /**
7802 * ufshcd_get_max_icc_level - calculate the ICC level
7803 * @sup_curr_uA: max. current supported by the regulator
7804 * @start_scan: row at the desc table to start scan from
7805 * @buff: power descriptor buffer
7806 *
7807 * Return: calculated max ICC level for specific regulator.
7808 */
ufshcd_get_max_icc_level(int sup_curr_uA,u32 start_scan,const char * buff)7809 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
7810 const char *buff)
7811 {
7812 int i;
7813 int curr_uA;
7814 u16 data;
7815 u16 unit;
7816
7817 for (i = start_scan; i >= 0; i--) {
7818 data = get_unaligned_be16(&buff[2 * i]);
7819 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7820 ATTR_ICC_LVL_UNIT_OFFSET;
7821 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7822 switch (unit) {
7823 case UFSHCD_NANO_AMP:
7824 curr_uA = curr_uA / 1000;
7825 break;
7826 case UFSHCD_MILI_AMP:
7827 curr_uA = curr_uA * 1000;
7828 break;
7829 case UFSHCD_AMP:
7830 curr_uA = curr_uA * 1000 * 1000;
7831 break;
7832 case UFSHCD_MICRO_AMP:
7833 default:
7834 break;
7835 }
7836 if (sup_curr_uA >= curr_uA)
7837 break;
7838 }
7839 if (i < 0) {
7840 i = 0;
7841 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7842 }
7843
7844 return (u32)i;
7845 }
7846
7847 /**
7848 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7849 * In case regulators are not initialized we'll return 0
7850 * @hba: per-adapter instance
7851 * @desc_buf: power descriptor buffer to extract ICC levels from.
7852 *
7853 * Return: calculated ICC level.
7854 */
ufshcd_find_max_sup_active_icc_level(struct ufs_hba * hba,const u8 * desc_buf)7855 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7856 const u8 *desc_buf)
7857 {
7858 u32 icc_level = 0;
7859
7860 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7861 !hba->vreg_info.vccq2) {
7862 /*
7863 * Using dev_dbg to avoid messages during runtime PM to avoid
7864 * never-ending cycles of messages written back to storage by
7865 * user space causing runtime resume, causing more messages and
7866 * so on.
7867 */
7868 dev_dbg(hba->dev,
7869 "%s: Regulator capability was not set, actvIccLevel=%d",
7870 __func__, icc_level);
7871 goto out;
7872 }
7873
7874 if (hba->vreg_info.vcc->max_uA)
7875 icc_level = ufshcd_get_max_icc_level(
7876 hba->vreg_info.vcc->max_uA,
7877 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7878 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7879
7880 if (hba->vreg_info.vccq->max_uA)
7881 icc_level = ufshcd_get_max_icc_level(
7882 hba->vreg_info.vccq->max_uA,
7883 icc_level,
7884 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7885
7886 if (hba->vreg_info.vccq2->max_uA)
7887 icc_level = ufshcd_get_max_icc_level(
7888 hba->vreg_info.vccq2->max_uA,
7889 icc_level,
7890 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7891 out:
7892 return icc_level;
7893 }
7894
ufshcd_set_active_icc_lvl(struct ufs_hba * hba)7895 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7896 {
7897 int ret;
7898 u8 *desc_buf;
7899 u32 icc_level;
7900
7901 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7902 if (!desc_buf)
7903 return;
7904
7905 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7906 desc_buf, QUERY_DESC_MAX_SIZE);
7907 if (ret) {
7908 dev_err(hba->dev,
7909 "%s: Failed reading power descriptor ret = %d",
7910 __func__, ret);
7911 goto out;
7912 }
7913
7914 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
7915 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7916
7917 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7918 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7919
7920 if (ret)
7921 dev_err(hba->dev,
7922 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7923 __func__, icc_level, ret);
7924
7925 out:
7926 kfree(desc_buf);
7927 }
7928
ufshcd_blk_pm_runtime_init(struct scsi_device * sdev)7929 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7930 {
7931 scsi_autopm_get_device(sdev);
7932 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7933 if (sdev->rpm_autosuspend)
7934 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7935 RPM_AUTOSUSPEND_DELAY_MS);
7936 scsi_autopm_put_device(sdev);
7937 }
7938
7939 /**
7940 * ufshcd_scsi_add_wlus - Adds required W-LUs
7941 * @hba: per-adapter instance
7942 *
7943 * UFS device specification requires the UFS devices to support 4 well known
7944 * logical units:
7945 * "REPORT_LUNS" (address: 01h)
7946 * "UFS Device" (address: 50h)
7947 * "RPMB" (address: 44h)
7948 * "BOOT" (address: 30h)
7949 * UFS device's power management needs to be controlled by "POWER CONDITION"
7950 * field of SSU (START STOP UNIT) command. But this "power condition" field
7951 * will take effect only when its sent to "UFS device" well known logical unit
7952 * hence we require the scsi_device instance to represent this logical unit in
7953 * order for the UFS host driver to send the SSU command for power management.
7954 *
7955 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7956 * Block) LU so user space process can control this LU. User space may also
7957 * want to have access to BOOT LU.
7958 *
7959 * This function adds scsi device instances for each of all well known LUs
7960 * (except "REPORT LUNS" LU).
7961 *
7962 * Return: zero on success (all required W-LUs are added successfully),
7963 * non-zero error value on failure (if failed to add any of the required W-LU).
7964 */
ufshcd_scsi_add_wlus(struct ufs_hba * hba)7965 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7966 {
7967 int ret = 0;
7968 struct scsi_device *sdev_boot, *sdev_rpmb;
7969
7970 hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
7971 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7972 if (IS_ERR(hba->ufs_device_wlun)) {
7973 ret = PTR_ERR(hba->ufs_device_wlun);
7974 hba->ufs_device_wlun = NULL;
7975 goto out;
7976 }
7977 scsi_device_put(hba->ufs_device_wlun);
7978
7979 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7980 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7981 if (IS_ERR(sdev_rpmb)) {
7982 ret = PTR_ERR(sdev_rpmb);
7983 goto remove_ufs_device_wlun;
7984 }
7985 ufshcd_blk_pm_runtime_init(sdev_rpmb);
7986 scsi_device_put(sdev_rpmb);
7987
7988 sdev_boot = __scsi_add_device(hba->host, 0, 0,
7989 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7990 if (IS_ERR(sdev_boot)) {
7991 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
7992 } else {
7993 ufshcd_blk_pm_runtime_init(sdev_boot);
7994 scsi_device_put(sdev_boot);
7995 }
7996 goto out;
7997
7998 remove_ufs_device_wlun:
7999 scsi_remove_device(hba->ufs_device_wlun);
8000 out:
8001 return ret;
8002 }
8003
ufshcd_wb_probe(struct ufs_hba * hba,const u8 * desc_buf)8004 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
8005 {
8006 struct ufs_dev_info *dev_info = &hba->dev_info;
8007 u8 lun;
8008 u32 d_lu_wb_buf_alloc;
8009 u32 ext_ufs_feature;
8010
8011 if (!ufshcd_is_wb_allowed(hba))
8012 return;
8013
8014 /*
8015 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
8016 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
8017 * enabled
8018 */
8019 if (!(dev_info->wspecversion >= 0x310 ||
8020 dev_info->wspecversion == 0x220 ||
8021 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
8022 goto wb_disabled;
8023
8024 ext_ufs_feature = get_unaligned_be32(desc_buf +
8025 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8026
8027 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
8028 goto wb_disabled;
8029
8030 /*
8031 * WB may be supported but not configured while provisioning. The spec
8032 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
8033 * buffer configured.
8034 */
8035 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
8036
8037 dev_info->b_presrv_uspc_en =
8038 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
8039
8040 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
8041 if (!get_unaligned_be32(desc_buf +
8042 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
8043 goto wb_disabled;
8044 } else {
8045 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
8046 d_lu_wb_buf_alloc = 0;
8047 ufshcd_read_unit_desc_param(hba,
8048 lun,
8049 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
8050 (u8 *)&d_lu_wb_buf_alloc,
8051 sizeof(d_lu_wb_buf_alloc));
8052 if (d_lu_wb_buf_alloc) {
8053 dev_info->wb_dedicated_lu = lun;
8054 break;
8055 }
8056 }
8057
8058 if (!d_lu_wb_buf_alloc)
8059 goto wb_disabled;
8060 }
8061
8062 if (!ufshcd_is_wb_buf_lifetime_available(hba))
8063 goto wb_disabled;
8064
8065 return;
8066
8067 wb_disabled:
8068 hba->caps &= ~UFSHCD_CAP_WB_EN;
8069 }
8070
ufshcd_temp_notif_probe(struct ufs_hba * hba,const u8 * desc_buf)8071 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
8072 {
8073 struct ufs_dev_info *dev_info = &hba->dev_info;
8074 u32 ext_ufs_feature;
8075 u8 mask = 0;
8076
8077 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
8078 return;
8079
8080 ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8081
8082 if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
8083 mask |= MASK_EE_TOO_LOW_TEMP;
8084
8085 if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
8086 mask |= MASK_EE_TOO_HIGH_TEMP;
8087
8088 if (mask) {
8089 ufshcd_enable_ee(hba, mask);
8090 ufs_hwmon_probe(hba, mask);
8091 }
8092 }
8093
ufshcd_ext_iid_probe(struct ufs_hba * hba,u8 * desc_buf)8094 static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
8095 {
8096 struct ufs_dev_info *dev_info = &hba->dev_info;
8097 u32 ext_ufs_feature;
8098 u32 ext_iid_en = 0;
8099 int err;
8100
8101 /* Only UFS-4.0 and above may support EXT_IID */
8102 if (dev_info->wspecversion < 0x400)
8103 goto out;
8104
8105 ext_ufs_feature = get_unaligned_be32(desc_buf +
8106 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8107 if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
8108 goto out;
8109
8110 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8111 QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
8112 if (err)
8113 dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
8114
8115 out:
8116 dev_info->b_ext_iid_en = ext_iid_en;
8117 }
8118
ufshcd_fixup_dev_quirks(struct ufs_hba * hba,const struct ufs_dev_quirk * fixups)8119 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
8120 const struct ufs_dev_quirk *fixups)
8121 {
8122 const struct ufs_dev_quirk *f;
8123 struct ufs_dev_info *dev_info = &hba->dev_info;
8124
8125 if (!fixups)
8126 return;
8127
8128 for (f = fixups; f->quirk; f++) {
8129 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
8130 f->wmanufacturerid == UFS_ANY_VENDOR) &&
8131 ((dev_info->model &&
8132 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
8133 !strcmp(f->model, UFS_ANY_MODEL)))
8134 hba->dev_quirks |= f->quirk;
8135 }
8136 }
8137 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
8138
ufs_fixup_device_setup(struct ufs_hba * hba)8139 static void ufs_fixup_device_setup(struct ufs_hba *hba)
8140 {
8141 /* fix by general quirk table */
8142 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
8143
8144 /* allow vendors to fix quirks */
8145 ufshcd_vops_fixup_dev_quirks(hba);
8146 }
8147
ufs_get_device_desc(struct ufs_hba * hba)8148 static int ufs_get_device_desc(struct ufs_hba *hba)
8149 {
8150 int err;
8151 u8 model_index;
8152 u8 *desc_buf;
8153 struct ufs_dev_info *dev_info = &hba->dev_info;
8154
8155 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8156 if (!desc_buf) {
8157 err = -ENOMEM;
8158 goto out;
8159 }
8160
8161 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
8162 QUERY_DESC_MAX_SIZE);
8163 if (err) {
8164 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
8165 __func__, err);
8166 goto out;
8167 }
8168
8169 /*
8170 * getting vendor (manufacturerID) and Bank Index in big endian
8171 * format
8172 */
8173 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
8174 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
8175
8176 /* getting Specification Version in big endian format */
8177 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
8178 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
8179 dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
8180
8181 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
8182
8183 err = ufshcd_read_string_desc(hba, model_index,
8184 &dev_info->model, SD_ASCII_STD);
8185 if (err < 0) {
8186 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
8187 __func__, err);
8188 goto out;
8189 }
8190
8191 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
8192 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
8193
8194 ufs_fixup_device_setup(hba);
8195
8196 ufshcd_wb_probe(hba, desc_buf);
8197
8198 ufshcd_temp_notif_probe(hba, desc_buf);
8199
8200 if (hba->ext_iid_sup)
8201 ufshcd_ext_iid_probe(hba, desc_buf);
8202
8203 /*
8204 * ufshcd_read_string_desc returns size of the string
8205 * reset the error value
8206 */
8207 err = 0;
8208
8209 out:
8210 kfree(desc_buf);
8211 return err;
8212 }
8213
ufs_put_device_desc(struct ufs_hba * hba)8214 static void ufs_put_device_desc(struct ufs_hba *hba)
8215 {
8216 struct ufs_dev_info *dev_info = &hba->dev_info;
8217
8218 kfree(dev_info->model);
8219 dev_info->model = NULL;
8220 }
8221
8222 /**
8223 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8224 * @hba: per-adapter instance
8225 *
8226 * PA_TActivate parameter can be tuned manually if UniPro version is less than
8227 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8228 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8229 * the hibern8 exit latency.
8230 *
8231 * Return: zero on success, non-zero error value on failure.
8232 */
ufshcd_tune_pa_tactivate(struct ufs_hba * hba)8233 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
8234 {
8235 int ret = 0;
8236 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
8237
8238 ret = ufshcd_dme_peer_get(hba,
8239 UIC_ARG_MIB_SEL(
8240 RX_MIN_ACTIVATETIME_CAPABILITY,
8241 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8242 &peer_rx_min_activatetime);
8243 if (ret)
8244 goto out;
8245
8246 /* make sure proper unit conversion is applied */
8247 tuned_pa_tactivate =
8248 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
8249 / PA_TACTIVATE_TIME_UNIT_US);
8250 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8251 tuned_pa_tactivate);
8252
8253 out:
8254 return ret;
8255 }
8256
8257 /**
8258 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8259 * @hba: per-adapter instance
8260 *
8261 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8262 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8263 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8264 * This optimal value can help reduce the hibern8 exit latency.
8265 *
8266 * Return: zero on success, non-zero error value on failure.
8267 */
ufshcd_tune_pa_hibern8time(struct ufs_hba * hba)8268 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
8269 {
8270 int ret = 0;
8271 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
8272 u32 max_hibern8_time, tuned_pa_hibern8time;
8273
8274 ret = ufshcd_dme_get(hba,
8275 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
8276 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8277 &local_tx_hibern8_time_cap);
8278 if (ret)
8279 goto out;
8280
8281 ret = ufshcd_dme_peer_get(hba,
8282 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
8283 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8284 &peer_rx_hibern8_time_cap);
8285 if (ret)
8286 goto out;
8287
8288 max_hibern8_time = max(local_tx_hibern8_time_cap,
8289 peer_rx_hibern8_time_cap);
8290 /* make sure proper unit conversion is applied */
8291 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
8292 / PA_HIBERN8_TIME_UNIT_US);
8293 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
8294 tuned_pa_hibern8time);
8295 out:
8296 return ret;
8297 }
8298
8299 /**
8300 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8301 * less than device PA_TACTIVATE time.
8302 * @hba: per-adapter instance
8303 *
8304 * Some UFS devices require host PA_TACTIVATE to be lower than device
8305 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8306 * for such devices.
8307 *
8308 * Return: zero on success, non-zero error value on failure.
8309 */
ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba * hba)8310 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
8311 {
8312 int ret = 0;
8313 u32 granularity, peer_granularity;
8314 u32 pa_tactivate, peer_pa_tactivate;
8315 u32 pa_tactivate_us, peer_pa_tactivate_us;
8316 static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
8317
8318 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8319 &granularity);
8320 if (ret)
8321 goto out;
8322
8323 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8324 &peer_granularity);
8325 if (ret)
8326 goto out;
8327
8328 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
8329 (granularity > PA_GRANULARITY_MAX_VAL)) {
8330 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
8331 __func__, granularity);
8332 return -EINVAL;
8333 }
8334
8335 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
8336 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
8337 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
8338 __func__, peer_granularity);
8339 return -EINVAL;
8340 }
8341
8342 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
8343 if (ret)
8344 goto out;
8345
8346 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
8347 &peer_pa_tactivate);
8348 if (ret)
8349 goto out;
8350
8351 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
8352 peer_pa_tactivate_us = peer_pa_tactivate *
8353 gran_to_us_table[peer_granularity - 1];
8354
8355 if (pa_tactivate_us >= peer_pa_tactivate_us) {
8356 u32 new_peer_pa_tactivate;
8357
8358 new_peer_pa_tactivate = pa_tactivate_us /
8359 gran_to_us_table[peer_granularity - 1];
8360 new_peer_pa_tactivate++;
8361 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8362 new_peer_pa_tactivate);
8363 }
8364
8365 out:
8366 return ret;
8367 }
8368
ufshcd_tune_unipro_params(struct ufs_hba * hba)8369 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
8370 {
8371 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
8372 ufshcd_tune_pa_tactivate(hba);
8373 ufshcd_tune_pa_hibern8time(hba);
8374 }
8375
8376 ufshcd_vops_apply_dev_quirks(hba);
8377
8378 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
8379 /* set 1ms timeout for PA_TACTIVATE */
8380 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
8381
8382 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
8383 ufshcd_quirk_tune_host_pa_tactivate(hba);
8384 }
8385
ufshcd_clear_dbg_ufs_stats(struct ufs_hba * hba)8386 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
8387 {
8388 hba->ufs_stats.hibern8_exit_cnt = 0;
8389 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
8390 hba->req_abort_count = 0;
8391 }
8392
ufshcd_device_geo_params_init(struct ufs_hba * hba)8393 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
8394 {
8395 int err;
8396 u8 *desc_buf;
8397
8398 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8399 if (!desc_buf) {
8400 err = -ENOMEM;
8401 goto out;
8402 }
8403
8404 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
8405 desc_buf, QUERY_DESC_MAX_SIZE);
8406 if (err) {
8407 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
8408 __func__, err);
8409 goto out;
8410 }
8411
8412 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
8413 hba->dev_info.max_lu_supported = 32;
8414 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
8415 hba->dev_info.max_lu_supported = 8;
8416
8417 out:
8418 kfree(desc_buf);
8419 return err;
8420 }
8421
8422 struct ufs_ref_clk {
8423 unsigned long freq_hz;
8424 enum ufs_ref_clk_freq val;
8425 };
8426
8427 static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
8428 {19200000, REF_CLK_FREQ_19_2_MHZ},
8429 {26000000, REF_CLK_FREQ_26_MHZ},
8430 {38400000, REF_CLK_FREQ_38_4_MHZ},
8431 {52000000, REF_CLK_FREQ_52_MHZ},
8432 {0, REF_CLK_FREQ_INVAL},
8433 };
8434
8435 static enum ufs_ref_clk_freq
ufs_get_bref_clk_from_hz(unsigned long freq)8436 ufs_get_bref_clk_from_hz(unsigned long freq)
8437 {
8438 int i;
8439
8440 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
8441 if (ufs_ref_clk_freqs[i].freq_hz == freq)
8442 return ufs_ref_clk_freqs[i].val;
8443
8444 return REF_CLK_FREQ_INVAL;
8445 }
8446
ufshcd_parse_dev_ref_clk_freq(struct ufs_hba * hba,struct clk * refclk)8447 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
8448 {
8449 unsigned long freq;
8450
8451 freq = clk_get_rate(refclk);
8452
8453 hba->dev_ref_clk_freq =
8454 ufs_get_bref_clk_from_hz(freq);
8455
8456 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
8457 dev_err(hba->dev,
8458 "invalid ref_clk setting = %ld\n", freq);
8459 }
8460
ufshcd_set_dev_ref_clk(struct ufs_hba * hba)8461 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
8462 {
8463 int err;
8464 u32 ref_clk;
8465 u32 freq = hba->dev_ref_clk_freq;
8466
8467 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8468 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
8469
8470 if (err) {
8471 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
8472 err);
8473 goto out;
8474 }
8475
8476 if (ref_clk == freq)
8477 goto out; /* nothing to update */
8478
8479 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8480 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
8481
8482 if (err) {
8483 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
8484 ufs_ref_clk_freqs[freq].freq_hz);
8485 goto out;
8486 }
8487
8488 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
8489 ufs_ref_clk_freqs[freq].freq_hz);
8490
8491 out:
8492 return err;
8493 }
8494
ufshcd_device_params_init(struct ufs_hba * hba)8495 static int ufshcd_device_params_init(struct ufs_hba *hba)
8496 {
8497 bool flag;
8498 int ret;
8499
8500 /* Init UFS geometry descriptor related parameters */
8501 ret = ufshcd_device_geo_params_init(hba);
8502 if (ret)
8503 goto out;
8504
8505 /* Check and apply UFS device quirks */
8506 ret = ufs_get_device_desc(hba);
8507 if (ret) {
8508 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
8509 __func__, ret);
8510 goto out;
8511 }
8512
8513 ufshcd_get_ref_clk_gating_wait(hba);
8514
8515 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
8516 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
8517 hba->dev_info.f_power_on_wp_en = flag;
8518
8519 /* Probe maximum power mode co-supported by both UFS host and device */
8520 if (ufshcd_get_max_pwr_mode(hba))
8521 dev_err(hba->dev,
8522 "%s: Failed getting max supported power mode\n",
8523 __func__);
8524 out:
8525 return ret;
8526 }
8527
ufshcd_set_timestamp_attr(struct ufs_hba * hba)8528 static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
8529 {
8530 int err;
8531 struct ufs_query_req *request = NULL;
8532 struct ufs_query_res *response = NULL;
8533 struct ufs_dev_info *dev_info = &hba->dev_info;
8534 struct utp_upiu_query_v4_0 *upiu_data;
8535
8536 if (dev_info->wspecversion < 0x400)
8537 return;
8538
8539 ufshcd_hold(hba);
8540
8541 mutex_lock(&hba->dev_cmd.lock);
8542
8543 ufshcd_init_query(hba, &request, &response,
8544 UPIU_QUERY_OPCODE_WRITE_ATTR,
8545 QUERY_ATTR_IDN_TIMESTAMP, 0, 0);
8546
8547 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
8548
8549 upiu_data = (struct utp_upiu_query_v4_0 *)&request->upiu_req;
8550
8551 put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
8552
8553 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
8554
8555 if (err)
8556 dev_err(hba->dev, "%s: failed to set timestamp %d\n",
8557 __func__, err);
8558
8559 mutex_unlock(&hba->dev_cmd.lock);
8560 ufshcd_release(hba);
8561 }
8562
8563 /**
8564 * ufshcd_add_lus - probe and add UFS logical units
8565 * @hba: per-adapter instance
8566 *
8567 * Return: 0 upon success; < 0 upon failure.
8568 */
ufshcd_add_lus(struct ufs_hba * hba)8569 static int ufshcd_add_lus(struct ufs_hba *hba)
8570 {
8571 int ret;
8572
8573 /* Add required well known logical units to scsi mid layer */
8574 ret = ufshcd_scsi_add_wlus(hba);
8575 if (ret)
8576 goto out;
8577
8578 /* Initialize devfreq after UFS device is detected */
8579 if (ufshcd_is_clkscaling_supported(hba)) {
8580 memcpy(&hba->clk_scaling.saved_pwr_info,
8581 &hba->pwr_info,
8582 sizeof(struct ufs_pa_layer_attr));
8583 hba->clk_scaling.is_allowed = true;
8584
8585 ret = ufshcd_devfreq_init(hba);
8586 if (ret)
8587 goto out;
8588
8589 hba->clk_scaling.is_enabled = true;
8590 ufshcd_init_clk_scaling_sysfs(hba);
8591 }
8592
8593 ufs_bsg_probe(hba);
8594 scsi_scan_host(hba->host);
8595
8596 out:
8597 return ret;
8598 }
8599
8600 /* SDB - Single Doorbell */
ufshcd_release_sdb_queue(struct ufs_hba * hba,int nutrs)8601 static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
8602 {
8603 size_t ucdl_size, utrdl_size;
8604
8605 ucdl_size = ufshcd_get_ucd_size(hba) * nutrs;
8606 dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
8607 hba->ucdl_dma_addr);
8608
8609 utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
8610 dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
8611 hba->utrdl_dma_addr);
8612
8613 devm_kfree(hba->dev, hba->lrb);
8614 }
8615
ufshcd_alloc_mcq(struct ufs_hba * hba)8616 static int ufshcd_alloc_mcq(struct ufs_hba *hba)
8617 {
8618 int ret;
8619 int old_nutrs = hba->nutrs;
8620
8621 ret = ufshcd_mcq_decide_queue_depth(hba);
8622 if (ret < 0)
8623 return ret;
8624
8625 hba->nutrs = ret;
8626 ret = ufshcd_mcq_init(hba);
8627 if (ret)
8628 goto err;
8629
8630 /*
8631 * Previously allocated memory for nutrs may not be enough in MCQ mode.
8632 * Number of supported tags in MCQ mode may be larger than SDB mode.
8633 */
8634 if (hba->nutrs != old_nutrs) {
8635 ufshcd_release_sdb_queue(hba, old_nutrs);
8636 ret = ufshcd_memory_alloc(hba);
8637 if (ret)
8638 goto err;
8639 ufshcd_host_memory_configure(hba);
8640 }
8641
8642 ret = ufshcd_mcq_memory_alloc(hba);
8643 if (ret)
8644 goto err;
8645
8646 return 0;
8647 err:
8648 hba->nutrs = old_nutrs;
8649 return ret;
8650 }
8651
ufshcd_config_mcq(struct ufs_hba * hba)8652 static void ufshcd_config_mcq(struct ufs_hba *hba)
8653 {
8654 int ret;
8655 u32 intrs;
8656
8657 ret = ufshcd_mcq_vops_config_esi(hba);
8658 dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
8659
8660 intrs = UFSHCD_ENABLE_MCQ_INTRS;
8661 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
8662 intrs &= ~MCQ_CQ_EVENT_STATUS;
8663 ufshcd_enable_intr(hba, intrs);
8664 ufshcd_mcq_make_queues_operational(hba);
8665 ufshcd_mcq_config_mac(hba, hba->nutrs);
8666
8667 hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
8668 hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
8669
8670 /* Select MCQ mode */
8671 ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
8672 REG_UFS_MEM_CFG);
8673 hba->mcq_enabled = true;
8674
8675 dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8676 hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
8677 hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
8678 hba->nutrs);
8679 }
8680
ufshcd_device_init(struct ufs_hba * hba,bool init_dev_params)8681 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
8682 {
8683 int ret;
8684 struct Scsi_Host *host = hba->host;
8685
8686 hba->ufshcd_state = UFSHCD_STATE_RESET;
8687
8688 ret = ufshcd_link_startup(hba);
8689 if (ret)
8690 return ret;
8691
8692 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
8693 return ret;
8694
8695 /* Debug counters initialization */
8696 ufshcd_clear_dbg_ufs_stats(hba);
8697
8698 /* UniPro link is active now */
8699 ufshcd_set_link_active(hba);
8700
8701 /* Reconfigure MCQ upon reset */
8702 if (is_mcq_enabled(hba) && !init_dev_params)
8703 ufshcd_config_mcq(hba);
8704
8705 /* Verify device initialization by sending NOP OUT UPIU */
8706 ret = ufshcd_verify_dev_init(hba);
8707 if (ret)
8708 return ret;
8709
8710 /* Initiate UFS initialization, and waiting until completion */
8711 ret = ufshcd_complete_dev_init(hba);
8712 if (ret)
8713 return ret;
8714
8715 /*
8716 * Initialize UFS device parameters used by driver, these
8717 * parameters are associated with UFS descriptors.
8718 */
8719 if (init_dev_params) {
8720 ret = ufshcd_device_params_init(hba);
8721 if (ret)
8722 return ret;
8723 if (is_mcq_supported(hba) && !hba->scsi_host_added) {
8724 ret = ufshcd_alloc_mcq(hba);
8725 if (!ret) {
8726 ufshcd_config_mcq(hba);
8727 } else {
8728 /* Continue with SDB mode */
8729 use_mcq_mode = false;
8730 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
8731 ret);
8732 }
8733 ret = scsi_add_host(host, hba->dev);
8734 if (ret) {
8735 dev_err(hba->dev, "scsi_add_host failed\n");
8736 return ret;
8737 }
8738 hba->scsi_host_added = true;
8739 } else if (is_mcq_supported(hba)) {
8740 /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
8741 ufshcd_config_mcq(hba);
8742 }
8743 }
8744
8745 ufshcd_tune_unipro_params(hba);
8746
8747 /* UFS device is also active now */
8748 ufshcd_set_ufs_dev_active(hba);
8749 ufshcd_force_reset_auto_bkops(hba);
8750
8751 ufshcd_set_timestamp_attr(hba);
8752
8753 /* Gear up to HS gear if supported */
8754 if (hba->max_pwr_info.is_valid) {
8755 /*
8756 * Set the right value to bRefClkFreq before attempting to
8757 * switch to HS gears.
8758 */
8759 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8760 ufshcd_set_dev_ref_clk(hba);
8761 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8762 if (ret) {
8763 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8764 __func__, ret);
8765 return ret;
8766 }
8767 }
8768
8769 return 0;
8770 }
8771
8772 /**
8773 * ufshcd_probe_hba - probe hba to detect device and initialize it
8774 * @hba: per-adapter instance
8775 * @init_dev_params: whether or not to call ufshcd_device_params_init().
8776 *
8777 * Execute link-startup and verify device initialization
8778 *
8779 * Return: 0 upon success; < 0 upon failure.
8780 */
ufshcd_probe_hba(struct ufs_hba * hba,bool init_dev_params)8781 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
8782 {
8783 ktime_t start = ktime_get();
8784 unsigned long flags;
8785 int ret;
8786
8787 ret = ufshcd_device_init(hba, init_dev_params);
8788 if (ret)
8789 goto out;
8790
8791 if (!hba->pm_op_in_progress &&
8792 (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
8793 /* Reset the device and controller before doing reinit */
8794 ufshcd_device_reset(hba);
8795 ufs_put_device_desc(hba);
8796 ufshcd_hba_stop(hba);
8797 ufshcd_vops_reinit_notify(hba);
8798 ret = ufshcd_hba_enable(hba);
8799 if (ret) {
8800 dev_err(hba->dev, "Host controller enable failed\n");
8801 ufshcd_print_evt_hist(hba);
8802 ufshcd_print_host_state(hba);
8803 goto out;
8804 }
8805
8806 /* Reinit the device */
8807 ret = ufshcd_device_init(hba, init_dev_params);
8808 if (ret)
8809 goto out;
8810 }
8811
8812 ufshcd_print_pwr_info(hba);
8813
8814 /*
8815 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8816 * and for removable UFS card as well, hence always set the parameter.
8817 * Note: Error handler may issue the device reset hence resetting
8818 * bActiveICCLevel as well so it is always safe to set this here.
8819 */
8820 ufshcd_set_active_icc_lvl(hba);
8821
8822 /* Enable UFS Write Booster if supported */
8823 ufshcd_configure_wb(hba);
8824
8825 if (hba->ee_usr_mask)
8826 ufshcd_write_ee_control(hba);
8827 /* Enable Auto-Hibernate if configured */
8828 ufshcd_auto_hibern8_enable(hba);
8829
8830 out:
8831 spin_lock_irqsave(hba->host->host_lock, flags);
8832 if (ret)
8833 hba->ufshcd_state = UFSHCD_STATE_ERROR;
8834 else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
8835 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
8836 spin_unlock_irqrestore(hba->host->host_lock, flags);
8837
8838 trace_ufshcd_init(dev_name(hba->dev), ret,
8839 ktime_to_us(ktime_sub(ktime_get(), start)),
8840 hba->curr_dev_pwr_mode, hba->uic_link_state);
8841 return ret;
8842 }
8843
8844 /**
8845 * ufshcd_async_scan - asynchronous execution for probing hba
8846 * @data: data pointer to pass to this function
8847 * @cookie: cookie data
8848 */
ufshcd_async_scan(void * data,async_cookie_t cookie)8849 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8850 {
8851 struct ufs_hba *hba = (struct ufs_hba *)data;
8852 int ret;
8853
8854 down(&hba->host_sem);
8855 /* Initialize hba, detect and initialize UFS device */
8856 ret = ufshcd_probe_hba(hba, true);
8857 up(&hba->host_sem);
8858 if (ret)
8859 goto out;
8860
8861 /* Probe and add UFS logical units */
8862 ret = ufshcd_add_lus(hba);
8863
8864 out:
8865 pm_runtime_put_sync(hba->dev);
8866
8867 if (ret)
8868 dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
8869 }
8870
ufshcd_eh_timed_out(struct scsi_cmnd * scmd)8871 static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
8872 {
8873 struct ufs_hba *hba = shost_priv(scmd->device->host);
8874
8875 if (!hba->system_suspending) {
8876 /* Activate the error handler in the SCSI core. */
8877 return SCSI_EH_NOT_HANDLED;
8878 }
8879
8880 /*
8881 * If we get here we know that no TMFs are outstanding and also that
8882 * the only pending command is a START STOP UNIT command. Handle the
8883 * timeout of that command directly to prevent a deadlock between
8884 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
8885 */
8886 ufshcd_link_recovery(hba);
8887 dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
8888 __func__, hba->outstanding_tasks);
8889
8890 return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
8891 }
8892
8893 static const struct attribute_group *ufshcd_driver_groups[] = {
8894 &ufs_sysfs_unit_descriptor_group,
8895 &ufs_sysfs_lun_attributes_group,
8896 NULL,
8897 };
8898
8899 static struct ufs_hba_variant_params ufs_hba_vps = {
8900 .hba_enable_delay_us = 1000,
8901 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
8902 .devfreq_profile.polling_ms = 100,
8903 .devfreq_profile.target = ufshcd_devfreq_target,
8904 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
8905 .ondemand_data.upthreshold = 70,
8906 .ondemand_data.downdifferential = 5,
8907 };
8908
8909 static const struct scsi_host_template ufshcd_driver_template = {
8910 .module = THIS_MODULE,
8911 .name = UFSHCD,
8912 .proc_name = UFSHCD,
8913 .map_queues = ufshcd_map_queues,
8914 .queuecommand = ufshcd_queuecommand,
8915 .mq_poll = ufshcd_poll,
8916 .slave_alloc = ufshcd_slave_alloc,
8917 .slave_configure = ufshcd_slave_configure,
8918 .slave_destroy = ufshcd_slave_destroy,
8919 .change_queue_depth = ufshcd_change_queue_depth,
8920 .eh_abort_handler = ufshcd_abort,
8921 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
8922 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
8923 .eh_timed_out = ufshcd_eh_timed_out,
8924 .this_id = -1,
8925 .sg_tablesize = SG_ALL,
8926 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
8927 .can_queue = UFSHCD_CAN_QUEUE,
8928 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
8929 .max_sectors = SZ_1M / SECTOR_SIZE,
8930 .max_host_blocked = 1,
8931 .track_queue_depth = 1,
8932 .skip_settle_delay = 1,
8933 .sdev_groups = ufshcd_driver_groups,
8934 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
8935 };
8936
ufshcd_config_vreg_load(struct device * dev,struct ufs_vreg * vreg,int ua)8937 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
8938 int ua)
8939 {
8940 int ret;
8941
8942 if (!vreg)
8943 return 0;
8944
8945 /*
8946 * "set_load" operation shall be required on those regulators
8947 * which specifically configured current limitation. Otherwise
8948 * zero max_uA may cause unexpected behavior when regulator is
8949 * enabled or set as high power mode.
8950 */
8951 if (!vreg->max_uA)
8952 return 0;
8953
8954 ret = regulator_set_load(vreg->reg, ua);
8955 if (ret < 0) {
8956 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
8957 __func__, vreg->name, ua, ret);
8958 }
8959
8960 return ret;
8961 }
8962
ufshcd_config_vreg_lpm(struct ufs_hba * hba,struct ufs_vreg * vreg)8963 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
8964 struct ufs_vreg *vreg)
8965 {
8966 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
8967 }
8968
ufshcd_config_vreg_hpm(struct ufs_hba * hba,struct ufs_vreg * vreg)8969 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8970 struct ufs_vreg *vreg)
8971 {
8972 if (!vreg)
8973 return 0;
8974
8975 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
8976 }
8977
ufshcd_config_vreg(struct device * dev,struct ufs_vreg * vreg,bool on)8978 static int ufshcd_config_vreg(struct device *dev,
8979 struct ufs_vreg *vreg, bool on)
8980 {
8981 if (regulator_count_voltages(vreg->reg) <= 0)
8982 return 0;
8983
8984 return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
8985 }
8986
ufshcd_enable_vreg(struct device * dev,struct ufs_vreg * vreg)8987 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8988 {
8989 int ret = 0;
8990
8991 if (!vreg || vreg->enabled)
8992 goto out;
8993
8994 ret = ufshcd_config_vreg(dev, vreg, true);
8995 if (!ret)
8996 ret = regulator_enable(vreg->reg);
8997
8998 if (!ret)
8999 vreg->enabled = true;
9000 else
9001 dev_err(dev, "%s: %s enable failed, err=%d\n",
9002 __func__, vreg->name, ret);
9003 out:
9004 return ret;
9005 }
9006
ufshcd_disable_vreg(struct device * dev,struct ufs_vreg * vreg)9007 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
9008 {
9009 int ret = 0;
9010
9011 if (!vreg || !vreg->enabled || vreg->always_on)
9012 goto out;
9013
9014 ret = regulator_disable(vreg->reg);
9015
9016 if (!ret) {
9017 /* ignore errors on applying disable config */
9018 ufshcd_config_vreg(dev, vreg, false);
9019 vreg->enabled = false;
9020 } else {
9021 dev_err(dev, "%s: %s disable failed, err=%d\n",
9022 __func__, vreg->name, ret);
9023 }
9024 out:
9025 return ret;
9026 }
9027
ufshcd_setup_vreg(struct ufs_hba * hba,bool on)9028 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
9029 {
9030 int ret = 0;
9031 struct device *dev = hba->dev;
9032 struct ufs_vreg_info *info = &hba->vreg_info;
9033
9034 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
9035 if (ret)
9036 goto out;
9037
9038 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
9039 if (ret)
9040 goto out;
9041
9042 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
9043
9044 out:
9045 if (ret) {
9046 ufshcd_toggle_vreg(dev, info->vccq2, false);
9047 ufshcd_toggle_vreg(dev, info->vccq, false);
9048 ufshcd_toggle_vreg(dev, info->vcc, false);
9049 }
9050 return ret;
9051 }
9052
ufshcd_setup_hba_vreg(struct ufs_hba * hba,bool on)9053 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
9054 {
9055 struct ufs_vreg_info *info = &hba->vreg_info;
9056
9057 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
9058 }
9059
ufshcd_get_vreg(struct device * dev,struct ufs_vreg * vreg)9060 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
9061 {
9062 int ret = 0;
9063
9064 if (!vreg)
9065 goto out;
9066
9067 vreg->reg = devm_regulator_get(dev, vreg->name);
9068 if (IS_ERR(vreg->reg)) {
9069 ret = PTR_ERR(vreg->reg);
9070 dev_err(dev, "%s: %s get failed, err=%d\n",
9071 __func__, vreg->name, ret);
9072 }
9073 out:
9074 return ret;
9075 }
9076 EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
9077
ufshcd_init_vreg(struct ufs_hba * hba)9078 static int ufshcd_init_vreg(struct ufs_hba *hba)
9079 {
9080 int ret = 0;
9081 struct device *dev = hba->dev;
9082 struct ufs_vreg_info *info = &hba->vreg_info;
9083
9084 ret = ufshcd_get_vreg(dev, info->vcc);
9085 if (ret)
9086 goto out;
9087
9088 ret = ufshcd_get_vreg(dev, info->vccq);
9089 if (!ret)
9090 ret = ufshcd_get_vreg(dev, info->vccq2);
9091 out:
9092 return ret;
9093 }
9094
ufshcd_init_hba_vreg(struct ufs_hba * hba)9095 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
9096 {
9097 struct ufs_vreg_info *info = &hba->vreg_info;
9098
9099 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
9100 }
9101
ufshcd_setup_clocks(struct ufs_hba * hba,bool on)9102 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
9103 {
9104 int ret = 0;
9105 struct ufs_clk_info *clki;
9106 struct list_head *head = &hba->clk_list_head;
9107 unsigned long flags;
9108 ktime_t start = ktime_get();
9109 bool clk_state_changed = false;
9110
9111 if (list_empty(head))
9112 goto out;
9113
9114 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
9115 if (ret)
9116 return ret;
9117
9118 list_for_each_entry(clki, head, list) {
9119 if (!IS_ERR_OR_NULL(clki->clk)) {
9120 /*
9121 * Don't disable clocks which are needed
9122 * to keep the link active.
9123 */
9124 if (ufshcd_is_link_active(hba) &&
9125 clki->keep_link_active)
9126 continue;
9127
9128 clk_state_changed = on ^ clki->enabled;
9129 if (on && !clki->enabled) {
9130 ret = clk_prepare_enable(clki->clk);
9131 if (ret) {
9132 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
9133 __func__, clki->name, ret);
9134 goto out;
9135 }
9136 } else if (!on && clki->enabled) {
9137 clk_disable_unprepare(clki->clk);
9138 }
9139 clki->enabled = on;
9140 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
9141 clki->name, on ? "en" : "dis");
9142 }
9143 }
9144
9145 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
9146 if (ret)
9147 return ret;
9148
9149 out:
9150 if (ret) {
9151 list_for_each_entry(clki, head, list) {
9152 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
9153 clk_disable_unprepare(clki->clk);
9154 }
9155 } else if (!ret && on) {
9156 spin_lock_irqsave(hba->host->host_lock, flags);
9157 hba->clk_gating.state = CLKS_ON;
9158 trace_ufshcd_clk_gating(dev_name(hba->dev),
9159 hba->clk_gating.state);
9160 spin_unlock_irqrestore(hba->host->host_lock, flags);
9161 }
9162
9163 if (clk_state_changed)
9164 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
9165 (on ? "on" : "off"),
9166 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
9167 return ret;
9168 }
9169
ufshcd_parse_ref_clk_property(struct ufs_hba * hba)9170 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
9171 {
9172 u32 freq;
9173 int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
9174
9175 if (ret) {
9176 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
9177 return REF_CLK_FREQ_INVAL;
9178 }
9179
9180 return ufs_get_bref_clk_from_hz(freq);
9181 }
9182
ufshcd_init_clocks(struct ufs_hba * hba)9183 static int ufshcd_init_clocks(struct ufs_hba *hba)
9184 {
9185 int ret = 0;
9186 struct ufs_clk_info *clki;
9187 struct device *dev = hba->dev;
9188 struct list_head *head = &hba->clk_list_head;
9189
9190 if (list_empty(head))
9191 goto out;
9192
9193 list_for_each_entry(clki, head, list) {
9194 if (!clki->name)
9195 continue;
9196
9197 clki->clk = devm_clk_get(dev, clki->name);
9198 if (IS_ERR(clki->clk)) {
9199 ret = PTR_ERR(clki->clk);
9200 dev_err(dev, "%s: %s clk get failed, %d\n",
9201 __func__, clki->name, ret);
9202 goto out;
9203 }
9204
9205 /*
9206 * Parse device ref clk freq as per device tree "ref_clk".
9207 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9208 * in ufshcd_alloc_host().
9209 */
9210 if (!strcmp(clki->name, "ref_clk"))
9211 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
9212
9213 if (clki->max_freq) {
9214 ret = clk_set_rate(clki->clk, clki->max_freq);
9215 if (ret) {
9216 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
9217 __func__, clki->name,
9218 clki->max_freq, ret);
9219 goto out;
9220 }
9221 clki->curr_freq = clki->max_freq;
9222 }
9223 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
9224 clki->name, clk_get_rate(clki->clk));
9225 }
9226 out:
9227 return ret;
9228 }
9229
ufshcd_variant_hba_init(struct ufs_hba * hba)9230 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
9231 {
9232 int err = 0;
9233
9234 if (!hba->vops)
9235 goto out;
9236
9237 err = ufshcd_vops_init(hba);
9238 if (err)
9239 dev_err_probe(hba->dev, err,
9240 "%s: variant %s init failed with err %d\n",
9241 __func__, ufshcd_get_var_name(hba), err);
9242 out:
9243 return err;
9244 }
9245
ufshcd_variant_hba_exit(struct ufs_hba * hba)9246 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
9247 {
9248 if (!hba->vops)
9249 return;
9250
9251 ufshcd_vops_exit(hba);
9252 }
9253
ufshcd_hba_init(struct ufs_hba * hba)9254 static int ufshcd_hba_init(struct ufs_hba *hba)
9255 {
9256 int err;
9257
9258 /*
9259 * Handle host controller power separately from the UFS device power
9260 * rails as it will help controlling the UFS host controller power
9261 * collapse easily which is different than UFS device power collapse.
9262 * Also, enable the host controller power before we go ahead with rest
9263 * of the initialization here.
9264 */
9265 err = ufshcd_init_hba_vreg(hba);
9266 if (err)
9267 goto out;
9268
9269 err = ufshcd_setup_hba_vreg(hba, true);
9270 if (err)
9271 goto out;
9272
9273 err = ufshcd_init_clocks(hba);
9274 if (err)
9275 goto out_disable_hba_vreg;
9276
9277 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
9278 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
9279
9280 err = ufshcd_setup_clocks(hba, true);
9281 if (err)
9282 goto out_disable_hba_vreg;
9283
9284 err = ufshcd_init_vreg(hba);
9285 if (err)
9286 goto out_disable_clks;
9287
9288 err = ufshcd_setup_vreg(hba, true);
9289 if (err)
9290 goto out_disable_clks;
9291
9292 err = ufshcd_variant_hba_init(hba);
9293 if (err)
9294 goto out_disable_vreg;
9295
9296 ufs_debugfs_hba_init(hba);
9297
9298 hba->is_powered = true;
9299 goto out;
9300
9301 out_disable_vreg:
9302 ufshcd_setup_vreg(hba, false);
9303 out_disable_clks:
9304 ufshcd_setup_clocks(hba, false);
9305 out_disable_hba_vreg:
9306 ufshcd_setup_hba_vreg(hba, false);
9307 out:
9308 return err;
9309 }
9310
ufshcd_hba_exit(struct ufs_hba * hba)9311 static void ufshcd_hba_exit(struct ufs_hba *hba)
9312 {
9313 if (hba->is_powered) {
9314 ufshcd_exit_clk_scaling(hba);
9315 ufshcd_exit_clk_gating(hba);
9316 if (hba->eh_wq)
9317 destroy_workqueue(hba->eh_wq);
9318 ufs_debugfs_hba_exit(hba);
9319 ufshcd_variant_hba_exit(hba);
9320 ufshcd_setup_vreg(hba, false);
9321 ufshcd_setup_clocks(hba, false);
9322 ufshcd_setup_hba_vreg(hba, false);
9323 hba->is_powered = false;
9324 ufs_put_device_desc(hba);
9325 }
9326 }
9327
ufshcd_execute_start_stop(struct scsi_device * sdev,enum ufs_dev_pwr_mode pwr_mode,struct scsi_sense_hdr * sshdr)9328 static int ufshcd_execute_start_stop(struct scsi_device *sdev,
9329 enum ufs_dev_pwr_mode pwr_mode,
9330 struct scsi_sense_hdr *sshdr)
9331 {
9332 const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
9333 const struct scsi_exec_args args = {
9334 .sshdr = sshdr,
9335 .req_flags = BLK_MQ_REQ_PM,
9336 .scmd_flags = SCMD_FAIL_IF_RECOVERING,
9337 };
9338
9339 return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL,
9340 /*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0,
9341 &args);
9342 }
9343
9344 /**
9345 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9346 * power mode
9347 * @hba: per adapter instance
9348 * @pwr_mode: device power mode to set
9349 *
9350 * Return: 0 if requested power mode is set successfully;
9351 * < 0 if failed to set the requested power mode.
9352 */
ufshcd_set_dev_pwr_mode(struct ufs_hba * hba,enum ufs_dev_pwr_mode pwr_mode)9353 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
9354 enum ufs_dev_pwr_mode pwr_mode)
9355 {
9356 struct scsi_sense_hdr sshdr;
9357 struct scsi_device *sdp;
9358 unsigned long flags;
9359 int ret, retries;
9360
9361 spin_lock_irqsave(hba->host->host_lock, flags);
9362 sdp = hba->ufs_device_wlun;
9363 if (sdp && scsi_device_online(sdp))
9364 ret = scsi_device_get(sdp);
9365 else
9366 ret = -ENODEV;
9367 spin_unlock_irqrestore(hba->host->host_lock, flags);
9368
9369 if (ret)
9370 return ret;
9371
9372 /*
9373 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9374 * handling, which would wait for host to be resumed. Since we know
9375 * we are functional while we are here, skip host resume in error
9376 * handling context.
9377 */
9378 hba->host->eh_noresume = 1;
9379
9380 /*
9381 * Current function would be generally called from the power management
9382 * callbacks hence set the RQF_PM flag so that it doesn't resume the
9383 * already suspended childs.
9384 */
9385 for (retries = 3; retries > 0; --retries) {
9386 ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
9387 /*
9388 * scsi_execute() only returns a negative value if the request
9389 * queue is dying.
9390 */
9391 if (ret <= 0)
9392 break;
9393 }
9394 if (ret) {
9395 sdev_printk(KERN_WARNING, sdp,
9396 "START_STOP failed for power mode: %d, result %x\n",
9397 pwr_mode, ret);
9398 if (ret > 0) {
9399 if (scsi_sense_valid(&sshdr))
9400 scsi_print_sense_hdr(sdp, NULL, &sshdr);
9401 ret = -EIO;
9402 }
9403 } else {
9404 hba->curr_dev_pwr_mode = pwr_mode;
9405 }
9406
9407 scsi_device_put(sdp);
9408 hba->host->eh_noresume = 0;
9409 return ret;
9410 }
9411
ufshcd_link_state_transition(struct ufs_hba * hba,enum uic_link_state req_link_state,bool check_for_bkops)9412 static int ufshcd_link_state_transition(struct ufs_hba *hba,
9413 enum uic_link_state req_link_state,
9414 bool check_for_bkops)
9415 {
9416 int ret = 0;
9417
9418 if (req_link_state == hba->uic_link_state)
9419 return 0;
9420
9421 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
9422 ret = ufshcd_uic_hibern8_enter(hba);
9423 if (!ret) {
9424 ufshcd_set_link_hibern8(hba);
9425 } else {
9426 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9427 __func__, ret);
9428 goto out;
9429 }
9430 }
9431 /*
9432 * If autobkops is enabled, link can't be turned off because
9433 * turning off the link would also turn off the device, except in the
9434 * case of DeepSleep where the device is expected to remain powered.
9435 */
9436 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
9437 (!check_for_bkops || !hba->auto_bkops_enabled)) {
9438 /*
9439 * Let's make sure that link is in low power mode, we are doing
9440 * this currently by putting the link in Hibern8. Otherway to
9441 * put the link in low power mode is to send the DME end point
9442 * to device and then send the DME reset command to local
9443 * unipro. But putting the link in hibern8 is much faster.
9444 *
9445 * Note also that putting the link in Hibern8 is a requirement
9446 * for entering DeepSleep.
9447 */
9448 ret = ufshcd_uic_hibern8_enter(hba);
9449 if (ret) {
9450 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9451 __func__, ret);
9452 goto out;
9453 }
9454 /*
9455 * Change controller state to "reset state" which
9456 * should also put the link in off/reset state
9457 */
9458 ufshcd_hba_stop(hba);
9459 /*
9460 * TODO: Check if we need any delay to make sure that
9461 * controller is reset
9462 */
9463 ufshcd_set_link_off(hba);
9464 }
9465
9466 out:
9467 return ret;
9468 }
9469
ufshcd_vreg_set_lpm(struct ufs_hba * hba)9470 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
9471 {
9472 bool vcc_off = false;
9473
9474 /*
9475 * It seems some UFS devices may keep drawing more than sleep current
9476 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9477 * To avoid this situation, add 2ms delay before putting these UFS
9478 * rails in LPM mode.
9479 */
9480 if (!ufshcd_is_link_active(hba) &&
9481 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
9482 usleep_range(2000, 2100);
9483
9484 /*
9485 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9486 * power.
9487 *
9488 * If UFS device and link is in OFF state, all power supplies (VCC,
9489 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9490 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9491 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9492 *
9493 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9494 * in low power state which would save some power.
9495 *
9496 * If Write Booster is enabled and the device needs to flush the WB
9497 * buffer OR if bkops status is urgent for WB, keep Vcc on.
9498 */
9499 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9500 !hba->dev_info.is_lu_power_on_wp) {
9501 ufshcd_setup_vreg(hba, false);
9502 vcc_off = true;
9503 } else if (!ufshcd_is_ufs_dev_active(hba)) {
9504 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9505 vcc_off = true;
9506 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
9507 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9508 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
9509 }
9510 }
9511
9512 /*
9513 * Some UFS devices require delay after VCC power rail is turned-off.
9514 */
9515 if (vcc_off && hba->vreg_info.vcc &&
9516 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
9517 usleep_range(5000, 5100);
9518 }
9519
9520 #ifdef CONFIG_PM
ufshcd_vreg_set_hpm(struct ufs_hba * hba)9521 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
9522 {
9523 int ret = 0;
9524
9525 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9526 !hba->dev_info.is_lu_power_on_wp) {
9527 ret = ufshcd_setup_vreg(hba, true);
9528 } else if (!ufshcd_is_ufs_dev_active(hba)) {
9529 if (!ufshcd_is_link_active(hba)) {
9530 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
9531 if (ret)
9532 goto vcc_disable;
9533 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
9534 if (ret)
9535 goto vccq_lpm;
9536 }
9537 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
9538 }
9539 goto out;
9540
9541 vccq_lpm:
9542 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9543 vcc_disable:
9544 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9545 out:
9546 return ret;
9547 }
9548 #endif /* CONFIG_PM */
9549
ufshcd_hba_vreg_set_lpm(struct ufs_hba * hba)9550 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
9551 {
9552 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9553 ufshcd_setup_hba_vreg(hba, false);
9554 }
9555
ufshcd_hba_vreg_set_hpm(struct ufs_hba * hba)9556 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
9557 {
9558 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9559 ufshcd_setup_hba_vreg(hba, true);
9560 }
9561
__ufshcd_wl_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op)9562 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9563 {
9564 int ret = 0;
9565 bool check_for_bkops;
9566 enum ufs_pm_level pm_lvl;
9567 enum ufs_dev_pwr_mode req_dev_pwr_mode;
9568 enum uic_link_state req_link_state;
9569
9570 hba->pm_op_in_progress = true;
9571 if (pm_op != UFS_SHUTDOWN_PM) {
9572 pm_lvl = pm_op == UFS_RUNTIME_PM ?
9573 hba->rpm_lvl : hba->spm_lvl;
9574 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
9575 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
9576 } else {
9577 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
9578 req_link_state = UIC_LINK_OFF_STATE;
9579 }
9580
9581 /*
9582 * If we can't transition into any of the low power modes
9583 * just gate the clocks.
9584 */
9585 ufshcd_hold(hba);
9586 hba->clk_gating.is_suspended = true;
9587
9588 if (ufshcd_is_clkscaling_supported(hba))
9589 ufshcd_clk_scaling_suspend(hba, true);
9590
9591 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
9592 req_link_state == UIC_LINK_ACTIVE_STATE) {
9593 goto vops_suspend;
9594 }
9595
9596 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
9597 (req_link_state == hba->uic_link_state))
9598 goto enable_scaling;
9599
9600 /* UFS device & link must be active before we enter in this function */
9601 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
9602 /* Wait err handler finish or trigger err recovery */
9603 if (!ufshcd_eh_in_progress(hba))
9604 ufshcd_force_error_recovery(hba);
9605 ret = -EBUSY;
9606 goto enable_scaling;
9607 }
9608
9609 if (pm_op == UFS_RUNTIME_PM) {
9610 if (ufshcd_can_autobkops_during_suspend(hba)) {
9611 /*
9612 * The device is idle with no requests in the queue,
9613 * allow background operations if bkops status shows
9614 * that performance might be impacted.
9615 */
9616 ret = ufshcd_urgent_bkops(hba);
9617 if (ret) {
9618 /*
9619 * If return err in suspend flow, IO will hang.
9620 * Trigger error handler and break suspend for
9621 * error recovery.
9622 */
9623 ufshcd_force_error_recovery(hba);
9624 ret = -EBUSY;
9625 goto enable_scaling;
9626 }
9627 } else {
9628 /* make sure that auto bkops is disabled */
9629 ufshcd_disable_auto_bkops(hba);
9630 }
9631 /*
9632 * If device needs to do BKOP or WB buffer flush during
9633 * Hibern8, keep device power mode as "active power mode"
9634 * and VCC supply.
9635 */
9636 hba->dev_info.b_rpm_dev_flush_capable =
9637 hba->auto_bkops_enabled ||
9638 (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
9639 ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
9640 ufshcd_is_auto_hibern8_enabled(hba))) &&
9641 ufshcd_wb_need_flush(hba));
9642 }
9643
9644 flush_work(&hba->eeh_work);
9645
9646 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9647 if (ret)
9648 goto enable_scaling;
9649
9650 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
9651 if (pm_op != UFS_RUNTIME_PM)
9652 /* ensure that bkops is disabled */
9653 ufshcd_disable_auto_bkops(hba);
9654
9655 if (!hba->dev_info.b_rpm_dev_flush_capable) {
9656 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
9657 if (ret && pm_op != UFS_SHUTDOWN_PM) {
9658 /*
9659 * If return err in suspend flow, IO will hang.
9660 * Trigger error handler and break suspend for
9661 * error recovery.
9662 */
9663 ufshcd_force_error_recovery(hba);
9664 ret = -EBUSY;
9665 }
9666 if (ret)
9667 goto enable_scaling;
9668 }
9669 }
9670
9671 /*
9672 * In the case of DeepSleep, the device is expected to remain powered
9673 * with the link off, so do not check for bkops.
9674 */
9675 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
9676 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
9677 if (ret && pm_op != UFS_SHUTDOWN_PM) {
9678 /*
9679 * If return err in suspend flow, IO will hang.
9680 * Trigger error handler and break suspend for
9681 * error recovery.
9682 */
9683 ufshcd_force_error_recovery(hba);
9684 ret = -EBUSY;
9685 }
9686 if (ret)
9687 goto set_dev_active;
9688
9689 vops_suspend:
9690 /*
9691 * Call vendor specific suspend callback. As these callbacks may access
9692 * vendor specific host controller register space call them before the
9693 * host clocks are ON.
9694 */
9695 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9696 if (ret)
9697 goto set_link_active;
9698 goto out;
9699
9700 set_link_active:
9701 /*
9702 * Device hardware reset is required to exit DeepSleep. Also, for
9703 * DeepSleep, the link is off so host reset and restore will be done
9704 * further below.
9705 */
9706 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9707 ufshcd_device_reset(hba);
9708 WARN_ON(!ufshcd_is_link_off(hba));
9709 }
9710 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
9711 ufshcd_set_link_active(hba);
9712 else if (ufshcd_is_link_off(hba))
9713 ufshcd_host_reset_and_restore(hba);
9714 set_dev_active:
9715 /* Can also get here needing to exit DeepSleep */
9716 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9717 ufshcd_device_reset(hba);
9718 ufshcd_host_reset_and_restore(hba);
9719 }
9720 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
9721 ufshcd_disable_auto_bkops(hba);
9722 enable_scaling:
9723 if (ufshcd_is_clkscaling_supported(hba))
9724 ufshcd_clk_scaling_suspend(hba, false);
9725
9726 hba->dev_info.b_rpm_dev_flush_capable = false;
9727 out:
9728 if (hba->dev_info.b_rpm_dev_flush_capable) {
9729 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
9730 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
9731 }
9732
9733 if (ret) {
9734 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
9735 hba->clk_gating.is_suspended = false;
9736 ufshcd_release(hba);
9737 }
9738 hba->pm_op_in_progress = false;
9739 return ret;
9740 }
9741
9742 #ifdef CONFIG_PM
__ufshcd_wl_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)9743 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9744 {
9745 int ret;
9746 enum uic_link_state old_link_state = hba->uic_link_state;
9747
9748 hba->pm_op_in_progress = true;
9749
9750 /*
9751 * Call vendor specific resume callback. As these callbacks may access
9752 * vendor specific host controller register space call them when the
9753 * host clocks are ON.
9754 */
9755 ret = ufshcd_vops_resume(hba, pm_op);
9756 if (ret)
9757 goto out;
9758
9759 /* For DeepSleep, the only supported option is to have the link off */
9760 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
9761
9762 if (ufshcd_is_link_hibern8(hba)) {
9763 ret = ufshcd_uic_hibern8_exit(hba);
9764 if (!ret) {
9765 ufshcd_set_link_active(hba);
9766 } else {
9767 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
9768 __func__, ret);
9769 goto vendor_suspend;
9770 }
9771 } else if (ufshcd_is_link_off(hba)) {
9772 /*
9773 * A full initialization of the host and the device is
9774 * required since the link was put to off during suspend.
9775 * Note, in the case of DeepSleep, the device will exit
9776 * DeepSleep due to device reset.
9777 */
9778 ret = ufshcd_reset_and_restore(hba);
9779 /*
9780 * ufshcd_reset_and_restore() should have already
9781 * set the link state as active
9782 */
9783 if (ret || !ufshcd_is_link_active(hba))
9784 goto vendor_suspend;
9785 }
9786
9787 if (!ufshcd_is_ufs_dev_active(hba)) {
9788 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9789 if (ret)
9790 goto set_old_link_state;
9791 ufshcd_set_timestamp_attr(hba);
9792 }
9793
9794 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9795 ufshcd_enable_auto_bkops(hba);
9796 else
9797 /*
9798 * If BKOPs operations are urgently needed at this moment then
9799 * keep auto-bkops enabled or else disable it.
9800 */
9801 ufshcd_urgent_bkops(hba);
9802
9803 if (hba->ee_usr_mask)
9804 ufshcd_write_ee_control(hba);
9805
9806 if (ufshcd_is_clkscaling_supported(hba))
9807 ufshcd_clk_scaling_suspend(hba, false);
9808
9809 if (hba->dev_info.b_rpm_dev_flush_capable) {
9810 hba->dev_info.b_rpm_dev_flush_capable = false;
9811 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
9812 }
9813
9814 /* Enable Auto-Hibernate if configured */
9815 ufshcd_auto_hibern8_enable(hba);
9816
9817 goto out;
9818
9819 set_old_link_state:
9820 ufshcd_link_state_transition(hba, old_link_state, 0);
9821 vendor_suspend:
9822 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9823 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9824 out:
9825 if (ret)
9826 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
9827 hba->clk_gating.is_suspended = false;
9828 ufshcd_release(hba);
9829 hba->pm_op_in_progress = false;
9830 return ret;
9831 }
9832
ufshcd_wl_runtime_suspend(struct device * dev)9833 static int ufshcd_wl_runtime_suspend(struct device *dev)
9834 {
9835 struct scsi_device *sdev = to_scsi_device(dev);
9836 struct ufs_hba *hba;
9837 int ret;
9838 ktime_t start = ktime_get();
9839
9840 hba = shost_priv(sdev->host);
9841
9842 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
9843 if (ret)
9844 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9845
9846 trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
9847 ktime_to_us(ktime_sub(ktime_get(), start)),
9848 hba->curr_dev_pwr_mode, hba->uic_link_state);
9849
9850 return ret;
9851 }
9852
ufshcd_wl_runtime_resume(struct device * dev)9853 static int ufshcd_wl_runtime_resume(struct device *dev)
9854 {
9855 struct scsi_device *sdev = to_scsi_device(dev);
9856 struct ufs_hba *hba;
9857 int ret = 0;
9858 ktime_t start = ktime_get();
9859
9860 hba = shost_priv(sdev->host);
9861
9862 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
9863 if (ret)
9864 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9865
9866 trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
9867 ktime_to_us(ktime_sub(ktime_get(), start)),
9868 hba->curr_dev_pwr_mode, hba->uic_link_state);
9869
9870 return ret;
9871 }
9872 #endif
9873
9874 #ifdef CONFIG_PM_SLEEP
ufshcd_wl_suspend(struct device * dev)9875 static int ufshcd_wl_suspend(struct device *dev)
9876 {
9877 struct scsi_device *sdev = to_scsi_device(dev);
9878 struct ufs_hba *hba;
9879 int ret = 0;
9880 ktime_t start = ktime_get();
9881
9882 hba = shost_priv(sdev->host);
9883 down(&hba->host_sem);
9884 hba->system_suspending = true;
9885
9886 if (pm_runtime_suspended(dev))
9887 goto out;
9888
9889 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
9890 if (ret) {
9891 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9892 up(&hba->host_sem);
9893 }
9894
9895 out:
9896 if (!ret)
9897 hba->is_sys_suspended = true;
9898 trace_ufshcd_wl_suspend(dev_name(dev), ret,
9899 ktime_to_us(ktime_sub(ktime_get(), start)),
9900 hba->curr_dev_pwr_mode, hba->uic_link_state);
9901
9902 return ret;
9903 }
9904
ufshcd_wl_resume(struct device * dev)9905 static int ufshcd_wl_resume(struct device *dev)
9906 {
9907 struct scsi_device *sdev = to_scsi_device(dev);
9908 struct ufs_hba *hba;
9909 int ret = 0;
9910 ktime_t start = ktime_get();
9911
9912 hba = shost_priv(sdev->host);
9913
9914 if (pm_runtime_suspended(dev))
9915 goto out;
9916
9917 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
9918 if (ret)
9919 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9920 out:
9921 trace_ufshcd_wl_resume(dev_name(dev), ret,
9922 ktime_to_us(ktime_sub(ktime_get(), start)),
9923 hba->curr_dev_pwr_mode, hba->uic_link_state);
9924 if (!ret)
9925 hba->is_sys_suspended = false;
9926 hba->system_suspending = false;
9927 up(&hba->host_sem);
9928 return ret;
9929 }
9930 #endif
9931
9932 /**
9933 * ufshcd_suspend - helper function for suspend operations
9934 * @hba: per adapter instance
9935 *
9936 * This function will put disable irqs, turn off clocks
9937 * and set vreg and hba-vreg in lpm mode.
9938 *
9939 * Return: 0 upon success; < 0 upon failure.
9940 */
ufshcd_suspend(struct ufs_hba * hba)9941 static int ufshcd_suspend(struct ufs_hba *hba)
9942 {
9943 int ret;
9944
9945 if (!hba->is_powered)
9946 return 0;
9947 /*
9948 * Disable the host irq as host controller as there won't be any
9949 * host controller transaction expected till resume.
9950 */
9951 ufshcd_disable_irq(hba);
9952 ret = ufshcd_setup_clocks(hba, false);
9953 if (ret) {
9954 ufshcd_enable_irq(hba);
9955 return ret;
9956 }
9957 if (ufshcd_is_clkgating_allowed(hba)) {
9958 hba->clk_gating.state = CLKS_OFF;
9959 trace_ufshcd_clk_gating(dev_name(hba->dev),
9960 hba->clk_gating.state);
9961 }
9962
9963 ufshcd_vreg_set_lpm(hba);
9964 /* Put the host controller in low power mode if possible */
9965 ufshcd_hba_vreg_set_lpm(hba);
9966 return ret;
9967 }
9968
9969 #ifdef CONFIG_PM
9970 /**
9971 * ufshcd_resume - helper function for resume operations
9972 * @hba: per adapter instance
9973 *
9974 * This function basically turns on the regulators, clocks and
9975 * irqs of the hba.
9976 *
9977 * Return: 0 for success and non-zero for failure.
9978 */
ufshcd_resume(struct ufs_hba * hba)9979 static int ufshcd_resume(struct ufs_hba *hba)
9980 {
9981 int ret;
9982
9983 if (!hba->is_powered)
9984 return 0;
9985
9986 ufshcd_hba_vreg_set_hpm(hba);
9987 ret = ufshcd_vreg_set_hpm(hba);
9988 if (ret)
9989 goto out;
9990
9991 /* Make sure clocks are enabled before accessing controller */
9992 ret = ufshcd_setup_clocks(hba, true);
9993 if (ret)
9994 goto disable_vreg;
9995
9996 /* enable the host irq as host controller would be active soon */
9997 ufshcd_enable_irq(hba);
9998
9999 goto out;
10000
10001 disable_vreg:
10002 ufshcd_vreg_set_lpm(hba);
10003 out:
10004 if (ret)
10005 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
10006 return ret;
10007 }
10008 #endif /* CONFIG_PM */
10009
10010 #ifdef CONFIG_PM_SLEEP
10011 /**
10012 * ufshcd_system_suspend - system suspend callback
10013 * @dev: Device associated with the UFS controller.
10014 *
10015 * Executed before putting the system into a sleep state in which the contents
10016 * of main memory are preserved.
10017 *
10018 * Return: 0 for success and non-zero for failure.
10019 */
ufshcd_system_suspend(struct device * dev)10020 int ufshcd_system_suspend(struct device *dev)
10021 {
10022 struct ufs_hba *hba = dev_get_drvdata(dev);
10023 int ret = 0;
10024 ktime_t start = ktime_get();
10025
10026 if (pm_runtime_suspended(hba->dev))
10027 goto out;
10028
10029 ret = ufshcd_suspend(hba);
10030 out:
10031 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
10032 ktime_to_us(ktime_sub(ktime_get(), start)),
10033 hba->curr_dev_pwr_mode, hba->uic_link_state);
10034 return ret;
10035 }
10036 EXPORT_SYMBOL(ufshcd_system_suspend);
10037
10038 /**
10039 * ufshcd_system_resume - system resume callback
10040 * @dev: Device associated with the UFS controller.
10041 *
10042 * Executed after waking the system up from a sleep state in which the contents
10043 * of main memory were preserved.
10044 *
10045 * Return: 0 for success and non-zero for failure.
10046 */
ufshcd_system_resume(struct device * dev)10047 int ufshcd_system_resume(struct device *dev)
10048 {
10049 struct ufs_hba *hba = dev_get_drvdata(dev);
10050 ktime_t start = ktime_get();
10051 int ret = 0;
10052
10053 if (pm_runtime_suspended(hba->dev))
10054 goto out;
10055
10056 ret = ufshcd_resume(hba);
10057
10058 out:
10059 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
10060 ktime_to_us(ktime_sub(ktime_get(), start)),
10061 hba->curr_dev_pwr_mode, hba->uic_link_state);
10062
10063 return ret;
10064 }
10065 EXPORT_SYMBOL(ufshcd_system_resume);
10066 #endif /* CONFIG_PM_SLEEP */
10067
10068 #ifdef CONFIG_PM
10069 /**
10070 * ufshcd_runtime_suspend - runtime suspend callback
10071 * @dev: Device associated with the UFS controller.
10072 *
10073 * Check the description of ufshcd_suspend() function for more details.
10074 *
10075 * Return: 0 for success and non-zero for failure.
10076 */
ufshcd_runtime_suspend(struct device * dev)10077 int ufshcd_runtime_suspend(struct device *dev)
10078 {
10079 struct ufs_hba *hba = dev_get_drvdata(dev);
10080 int ret;
10081 ktime_t start = ktime_get();
10082
10083 ret = ufshcd_suspend(hba);
10084
10085 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
10086 ktime_to_us(ktime_sub(ktime_get(), start)),
10087 hba->curr_dev_pwr_mode, hba->uic_link_state);
10088 return ret;
10089 }
10090 EXPORT_SYMBOL(ufshcd_runtime_suspend);
10091
10092 /**
10093 * ufshcd_runtime_resume - runtime resume routine
10094 * @dev: Device associated with the UFS controller.
10095 *
10096 * This function basically brings controller
10097 * to active state. Following operations are done in this function:
10098 *
10099 * 1. Turn on all the controller related clocks
10100 * 2. Turn ON VCC rail
10101 *
10102 * Return: 0 upon success; < 0 upon failure.
10103 */
ufshcd_runtime_resume(struct device * dev)10104 int ufshcd_runtime_resume(struct device *dev)
10105 {
10106 struct ufs_hba *hba = dev_get_drvdata(dev);
10107 int ret;
10108 ktime_t start = ktime_get();
10109
10110 ret = ufshcd_resume(hba);
10111
10112 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
10113 ktime_to_us(ktime_sub(ktime_get(), start)),
10114 hba->curr_dev_pwr_mode, hba->uic_link_state);
10115 return ret;
10116 }
10117 EXPORT_SYMBOL(ufshcd_runtime_resume);
10118 #endif /* CONFIG_PM */
10119
ufshcd_wl_shutdown(struct device * dev)10120 static void ufshcd_wl_shutdown(struct device *dev)
10121 {
10122 struct scsi_device *sdev = to_scsi_device(dev);
10123 struct ufs_hba *hba = shost_priv(sdev->host);
10124
10125 down(&hba->host_sem);
10126 hba->shutting_down = true;
10127 up(&hba->host_sem);
10128
10129 /* Turn on everything while shutting down */
10130 ufshcd_rpm_get_sync(hba);
10131 scsi_device_quiesce(sdev);
10132 shost_for_each_device(sdev, hba->host) {
10133 if (sdev == hba->ufs_device_wlun)
10134 continue;
10135 mutex_lock(&sdev->state_mutex);
10136 scsi_device_set_state(sdev, SDEV_OFFLINE);
10137 mutex_unlock(&sdev->state_mutex);
10138 }
10139 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10140
10141 /*
10142 * Next, turn off the UFS controller and the UFS regulators. Disable
10143 * clocks.
10144 */
10145 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
10146 ufshcd_suspend(hba);
10147
10148 hba->is_powered = false;
10149 }
10150
10151 /**
10152 * ufshcd_remove - de-allocate SCSI host and host memory space
10153 * data structure memory
10154 * @hba: per adapter instance
10155 */
ufshcd_remove(struct ufs_hba * hba)10156 void ufshcd_remove(struct ufs_hba *hba)
10157 {
10158 if (hba->ufs_device_wlun)
10159 ufshcd_rpm_get_sync(hba);
10160 ufs_hwmon_remove(hba);
10161 ufs_bsg_remove(hba);
10162 ufs_sysfs_remove_nodes(hba->dev);
10163 blk_mq_destroy_queue(hba->tmf_queue);
10164 blk_put_queue(hba->tmf_queue);
10165 blk_mq_free_tag_set(&hba->tmf_tag_set);
10166 if (hba->scsi_host_added)
10167 scsi_remove_host(hba->host);
10168 /* disable interrupts */
10169 ufshcd_disable_intr(hba, hba->intr_mask);
10170 ufshcd_hba_stop(hba);
10171 ufshcd_hba_exit(hba);
10172 }
10173 EXPORT_SYMBOL_GPL(ufshcd_remove);
10174
10175 #ifdef CONFIG_PM_SLEEP
ufshcd_system_freeze(struct device * dev)10176 int ufshcd_system_freeze(struct device *dev)
10177 {
10178
10179 return ufshcd_system_suspend(dev);
10180
10181 }
10182 EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
10183
ufshcd_system_restore(struct device * dev)10184 int ufshcd_system_restore(struct device *dev)
10185 {
10186
10187 struct ufs_hba *hba = dev_get_drvdata(dev);
10188 int ret;
10189
10190 ret = ufshcd_system_resume(dev);
10191 if (ret)
10192 return ret;
10193
10194 /* Configure UTRL and UTMRL base address registers */
10195 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
10196 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
10197 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
10198 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
10199 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
10200 REG_UTP_TASK_REQ_LIST_BASE_L);
10201 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
10202 REG_UTP_TASK_REQ_LIST_BASE_H);
10203 /*
10204 * Make sure that UTRL and UTMRL base address registers
10205 * are updated with the latest queue addresses. Only after
10206 * updating these addresses, we can queue the new commands.
10207 */
10208 ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
10209
10210 return 0;
10211
10212 }
10213 EXPORT_SYMBOL_GPL(ufshcd_system_restore);
10214
ufshcd_system_thaw(struct device * dev)10215 int ufshcd_system_thaw(struct device *dev)
10216 {
10217 return ufshcd_system_resume(dev);
10218 }
10219 EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
10220 #endif /* CONFIG_PM_SLEEP */
10221
10222 /**
10223 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10224 * @hba: pointer to Host Bus Adapter (HBA)
10225 */
ufshcd_dealloc_host(struct ufs_hba * hba)10226 void ufshcd_dealloc_host(struct ufs_hba *hba)
10227 {
10228 scsi_host_put(hba->host);
10229 }
10230 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
10231
10232 /**
10233 * ufshcd_set_dma_mask - Set dma mask based on the controller
10234 * addressing capability
10235 * @hba: per adapter instance
10236 *
10237 * Return: 0 for success, non-zero for failure.
10238 */
ufshcd_set_dma_mask(struct ufs_hba * hba)10239 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
10240 {
10241 if (hba->vops && hba->vops->set_dma_mask)
10242 return hba->vops->set_dma_mask(hba);
10243 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
10244 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
10245 return 0;
10246 }
10247 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
10248 }
10249
10250 /**
10251 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10252 * @dev: pointer to device handle
10253 * @hba_handle: driver private handle
10254 *
10255 * Return: 0 on success, non-zero value on failure.
10256 */
ufshcd_alloc_host(struct device * dev,struct ufs_hba ** hba_handle)10257 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
10258 {
10259 struct Scsi_Host *host;
10260 struct ufs_hba *hba;
10261 int err = 0;
10262
10263 if (!dev) {
10264 dev_err(dev,
10265 "Invalid memory reference for dev is NULL\n");
10266 err = -ENODEV;
10267 goto out_error;
10268 }
10269
10270 host = scsi_host_alloc(&ufshcd_driver_template,
10271 sizeof(struct ufs_hba));
10272 if (!host) {
10273 dev_err(dev, "scsi_host_alloc failed\n");
10274 err = -ENOMEM;
10275 goto out_error;
10276 }
10277 host->nr_maps = HCTX_TYPE_POLL + 1;
10278 hba = shost_priv(host);
10279 hba->host = host;
10280 hba->dev = dev;
10281 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
10282 hba->nop_out_timeout = NOP_OUT_TIMEOUT;
10283 ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
10284 INIT_LIST_HEAD(&hba->clk_list_head);
10285 spin_lock_init(&hba->outstanding_lock);
10286
10287 *hba_handle = hba;
10288
10289 out_error:
10290 return err;
10291 }
10292 EXPORT_SYMBOL(ufshcd_alloc_host);
10293
10294 /* This function exists because blk_mq_alloc_tag_set() requires this. */
ufshcd_queue_tmf(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * qd)10295 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
10296 const struct blk_mq_queue_data *qd)
10297 {
10298 WARN_ON_ONCE(true);
10299 return BLK_STS_NOTSUPP;
10300 }
10301
10302 static const struct blk_mq_ops ufshcd_tmf_ops = {
10303 .queue_rq = ufshcd_queue_tmf,
10304 };
10305
10306 /**
10307 * ufshcd_init - Driver initialization routine
10308 * @hba: per-adapter instance
10309 * @mmio_base: base register address
10310 * @irq: Interrupt line of device
10311 *
10312 * Return: 0 on success, non-zero value on failure.
10313 */
ufshcd_init(struct ufs_hba * hba,void __iomem * mmio_base,unsigned int irq)10314 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
10315 {
10316 int err;
10317 struct Scsi_Host *host = hba->host;
10318 struct device *dev = hba->dev;
10319 char eh_wq_name[sizeof("ufs_eh_wq_00")];
10320
10321 /*
10322 * dev_set_drvdata() must be called before any callbacks are registered
10323 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10324 * sysfs).
10325 */
10326 dev_set_drvdata(dev, hba);
10327
10328 if (!mmio_base) {
10329 dev_err(hba->dev,
10330 "Invalid memory reference for mmio_base is NULL\n");
10331 err = -ENODEV;
10332 goto out_error;
10333 }
10334
10335 hba->mmio_base = mmio_base;
10336 hba->irq = irq;
10337 hba->vps = &ufs_hba_vps;
10338
10339 err = ufshcd_hba_init(hba);
10340 if (err)
10341 goto out_error;
10342
10343 /* Read capabilities registers */
10344 err = ufshcd_hba_capabilities(hba);
10345 if (err)
10346 goto out_disable;
10347
10348 /* Get UFS version supported by the controller */
10349 hba->ufs_version = ufshcd_get_ufs_version(hba);
10350
10351 /* Get Interrupt bit mask per version */
10352 hba->intr_mask = ufshcd_get_intr_mask(hba);
10353
10354 err = ufshcd_set_dma_mask(hba);
10355 if (err) {
10356 dev_err(hba->dev, "set dma mask failed\n");
10357 goto out_disable;
10358 }
10359
10360 /* Allocate memory for host memory space */
10361 err = ufshcd_memory_alloc(hba);
10362 if (err) {
10363 dev_err(hba->dev, "Memory allocation failed\n");
10364 goto out_disable;
10365 }
10366
10367 /* Configure LRB */
10368 ufshcd_host_memory_configure(hba);
10369
10370 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
10371 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
10372 host->max_id = UFSHCD_MAX_ID;
10373 host->max_lun = UFS_MAX_LUNS;
10374 host->max_channel = UFSHCD_MAX_CHANNEL;
10375 host->unique_id = host->host_no;
10376 host->max_cmd_len = UFS_CDB_SIZE;
10377 host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
10378
10379 hba->max_pwr_info.is_valid = false;
10380
10381 /* Initialize work queues */
10382 snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
10383 hba->host->host_no);
10384 hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
10385 if (!hba->eh_wq) {
10386 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
10387 __func__);
10388 err = -ENOMEM;
10389 goto out_disable;
10390 }
10391 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
10392 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
10393
10394 sema_init(&hba->host_sem, 1);
10395
10396 /* Initialize UIC command mutex */
10397 mutex_init(&hba->uic_cmd_mutex);
10398
10399 /* Initialize mutex for device management commands */
10400 mutex_init(&hba->dev_cmd.lock);
10401
10402 /* Initialize mutex for exception event control */
10403 mutex_init(&hba->ee_ctrl_mutex);
10404
10405 mutex_init(&hba->wb_mutex);
10406 init_rwsem(&hba->clk_scaling_lock);
10407
10408 ufshcd_init_clk_gating(hba);
10409
10410 ufshcd_init_clk_scaling(hba);
10411
10412 /*
10413 * In order to avoid any spurious interrupt immediately after
10414 * registering UFS controller interrupt handler, clear any pending UFS
10415 * interrupt status and disable all the UFS interrupts.
10416 */
10417 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10418 REG_INTERRUPT_STATUS);
10419 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10420 /*
10421 * Make sure that UFS interrupts are disabled and any pending interrupt
10422 * status is cleared before registering UFS interrupt handler.
10423 */
10424 ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
10425
10426 /* IRQ registration */
10427 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
10428 if (err) {
10429 dev_err(hba->dev, "request irq failed\n");
10430 goto out_disable;
10431 } else {
10432 hba->is_irq_enabled = true;
10433 }
10434
10435 if (!is_mcq_supported(hba)) {
10436 if (!hba->lsdb_sup) {
10437 dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n",
10438 __func__);
10439 err = -EINVAL;
10440 goto out_disable;
10441 }
10442 err = scsi_add_host(host, hba->dev);
10443 if (err) {
10444 dev_err(hba->dev, "scsi_add_host failed\n");
10445 goto out_disable;
10446 }
10447 hba->scsi_host_added = true;
10448 }
10449
10450 hba->tmf_tag_set = (struct blk_mq_tag_set) {
10451 .nr_hw_queues = 1,
10452 .queue_depth = hba->nutmrs,
10453 .ops = &ufshcd_tmf_ops,
10454 .flags = BLK_MQ_F_NO_SCHED,
10455 };
10456 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
10457 if (err < 0)
10458 goto out_remove_scsi_host;
10459 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
10460 if (IS_ERR(hba->tmf_queue)) {
10461 err = PTR_ERR(hba->tmf_queue);
10462 goto free_tmf_tag_set;
10463 }
10464 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
10465 sizeof(*hba->tmf_rqs), GFP_KERNEL);
10466 if (!hba->tmf_rqs) {
10467 err = -ENOMEM;
10468 goto free_tmf_queue;
10469 }
10470
10471 /* Reset the attached device */
10472 ufshcd_device_reset(hba);
10473
10474 ufshcd_init_crypto(hba);
10475
10476 /* Host controller enable */
10477 err = ufshcd_hba_enable(hba);
10478 if (err) {
10479 dev_err(hba->dev, "Host controller enable failed\n");
10480 ufshcd_print_evt_hist(hba);
10481 ufshcd_print_host_state(hba);
10482 goto free_tmf_queue;
10483 }
10484
10485 /*
10486 * Set the default power management level for runtime and system PM.
10487 * Default power saving mode is to keep UFS link in Hibern8 state
10488 * and UFS device in sleep state.
10489 */
10490 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10491 UFS_SLEEP_PWR_MODE,
10492 UIC_LINK_HIBERN8_STATE);
10493 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10494 UFS_SLEEP_PWR_MODE,
10495 UIC_LINK_HIBERN8_STATE);
10496
10497 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
10498 ufshcd_rpm_dev_flush_recheck_work);
10499
10500 /* Set the default auto-hiberate idle timer value to 150 ms */
10501 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
10502 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
10503 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
10504 }
10505
10506 /* Hold auto suspend until async scan completes */
10507 pm_runtime_get_sync(dev);
10508 atomic_set(&hba->scsi_block_reqs_cnt, 0);
10509 /*
10510 * We are assuming that device wasn't put in sleep/power-down
10511 * state exclusively during the boot stage before kernel.
10512 * This assumption helps avoid doing link startup twice during
10513 * ufshcd_probe_hba().
10514 */
10515 ufshcd_set_ufs_dev_active(hba);
10516
10517 async_schedule(ufshcd_async_scan, hba);
10518 ufs_sysfs_add_nodes(hba->dev);
10519
10520 device_enable_async_suspend(dev);
10521 return 0;
10522
10523 free_tmf_queue:
10524 blk_mq_destroy_queue(hba->tmf_queue);
10525 blk_put_queue(hba->tmf_queue);
10526 free_tmf_tag_set:
10527 blk_mq_free_tag_set(&hba->tmf_tag_set);
10528 out_remove_scsi_host:
10529 if (hba->scsi_host_added)
10530 scsi_remove_host(hba->host);
10531 out_disable:
10532 hba->is_irq_enabled = false;
10533 ufshcd_hba_exit(hba);
10534 out_error:
10535 return err;
10536 }
10537 EXPORT_SYMBOL_GPL(ufshcd_init);
10538
ufshcd_resume_complete(struct device * dev)10539 void ufshcd_resume_complete(struct device *dev)
10540 {
10541 struct ufs_hba *hba = dev_get_drvdata(dev);
10542
10543 if (hba->complete_put) {
10544 ufshcd_rpm_put(hba);
10545 hba->complete_put = false;
10546 }
10547 }
10548 EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
10549
ufshcd_rpm_ok_for_spm(struct ufs_hba * hba)10550 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
10551 {
10552 struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
10553 enum ufs_dev_pwr_mode dev_pwr_mode;
10554 enum uic_link_state link_state;
10555 unsigned long flags;
10556 bool res;
10557
10558 spin_lock_irqsave(&dev->power.lock, flags);
10559 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
10560 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
10561 res = pm_runtime_suspended(dev) &&
10562 hba->curr_dev_pwr_mode == dev_pwr_mode &&
10563 hba->uic_link_state == link_state &&
10564 !hba->dev_info.b_rpm_dev_flush_capable;
10565 spin_unlock_irqrestore(&dev->power.lock, flags);
10566
10567 return res;
10568 }
10569
__ufshcd_suspend_prepare(struct device * dev,bool rpm_ok_for_spm)10570 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
10571 {
10572 struct ufs_hba *hba = dev_get_drvdata(dev);
10573 int ret;
10574
10575 /*
10576 * SCSI assumes that runtime-pm and system-pm for scsi drivers
10577 * are same. And it doesn't wake up the device for system-suspend
10578 * if it's runtime suspended. But ufs doesn't follow that.
10579 * Refer ufshcd_resume_complete()
10580 */
10581 if (hba->ufs_device_wlun) {
10582 /* Prevent runtime suspend */
10583 ufshcd_rpm_get_noresume(hba);
10584 /*
10585 * Check if already runtime suspended in same state as system
10586 * suspend would be.
10587 */
10588 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
10589 /* RPM state is not ok for SPM, so runtime resume */
10590 ret = ufshcd_rpm_resume(hba);
10591 if (ret < 0 && ret != -EACCES) {
10592 ufshcd_rpm_put(hba);
10593 return ret;
10594 }
10595 }
10596 hba->complete_put = true;
10597 }
10598 return 0;
10599 }
10600 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
10601
ufshcd_suspend_prepare(struct device * dev)10602 int ufshcd_suspend_prepare(struct device *dev)
10603 {
10604 return __ufshcd_suspend_prepare(dev, true);
10605 }
10606 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
10607
10608 #ifdef CONFIG_PM_SLEEP
ufshcd_wl_poweroff(struct device * dev)10609 static int ufshcd_wl_poweroff(struct device *dev)
10610 {
10611 struct scsi_device *sdev = to_scsi_device(dev);
10612 struct ufs_hba *hba = shost_priv(sdev->host);
10613
10614 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10615 return 0;
10616 }
10617 #endif
10618
ufshcd_wl_probe(struct device * dev)10619 static int ufshcd_wl_probe(struct device *dev)
10620 {
10621 struct scsi_device *sdev = to_scsi_device(dev);
10622
10623 if (!is_device_wlun(sdev))
10624 return -ENODEV;
10625
10626 blk_pm_runtime_init(sdev->request_queue, dev);
10627 pm_runtime_set_autosuspend_delay(dev, 0);
10628 pm_runtime_allow(dev);
10629
10630 return 0;
10631 }
10632
ufshcd_wl_remove(struct device * dev)10633 static int ufshcd_wl_remove(struct device *dev)
10634 {
10635 pm_runtime_forbid(dev);
10636 return 0;
10637 }
10638
10639 static const struct dev_pm_ops ufshcd_wl_pm_ops = {
10640 #ifdef CONFIG_PM_SLEEP
10641 .suspend = ufshcd_wl_suspend,
10642 .resume = ufshcd_wl_resume,
10643 .freeze = ufshcd_wl_suspend,
10644 .thaw = ufshcd_wl_resume,
10645 .poweroff = ufshcd_wl_poweroff,
10646 .restore = ufshcd_wl_resume,
10647 #endif
10648 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
10649 };
10650
ufshcd_check_header_layout(void)10651 static void ufshcd_check_header_layout(void)
10652 {
10653 /*
10654 * gcc compilers before version 10 cannot do constant-folding for
10655 * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
10656 * before.
10657 */
10658 if (IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 100000)
10659 return;
10660
10661 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10662 .cci = 3})[0] != 3);
10663
10664 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10665 .ehs_length = 2})[1] != 2);
10666
10667 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10668 .enable_crypto = 1})[2]
10669 != 0x80);
10670
10671 BUILD_BUG_ON((((u8 *)&(struct request_desc_header){
10672 .command_type = 5,
10673 .data_direction = 3,
10674 .interrupt = 1,
10675 })[3]) != ((5 << 4) | (3 << 1) | 1));
10676
10677 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10678 .dunl = cpu_to_le32(0xdeadbeef)})[1] !=
10679 cpu_to_le32(0xdeadbeef));
10680
10681 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10682 .ocs = 4})[8] != 4);
10683
10684 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10685 .cds = 5})[9] != 5);
10686
10687 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10688 .dunu = cpu_to_le32(0xbadcafe)})[3] !=
10689 cpu_to_le32(0xbadcafe));
10690
10691 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10692 .iid = 0xf })[4] != 0xf0);
10693
10694 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10695 .command_set_type = 0xf })[4] != 0xf);
10696 }
10697
10698 /*
10699 * ufs_dev_wlun_template - describes ufs device wlun
10700 * ufs-device wlun - used to send pm commands
10701 * All luns are consumers of ufs-device wlun.
10702 *
10703 * Currently, no sd driver is present for wluns.
10704 * Hence the no specific pm operations are performed.
10705 * With ufs design, SSU should be sent to ufs-device wlun.
10706 * Hence register a scsi driver for ufs wluns only.
10707 */
10708 static struct scsi_driver ufs_dev_wlun_template = {
10709 .gendrv = {
10710 .name = "ufs_device_wlun",
10711 .owner = THIS_MODULE,
10712 .probe = ufshcd_wl_probe,
10713 .remove = ufshcd_wl_remove,
10714 .pm = &ufshcd_wl_pm_ops,
10715 .shutdown = ufshcd_wl_shutdown,
10716 },
10717 };
10718
ufshcd_core_init(void)10719 static int __init ufshcd_core_init(void)
10720 {
10721 int ret;
10722
10723 ufshcd_check_header_layout();
10724
10725 ufs_debugfs_init();
10726
10727 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
10728 if (ret)
10729 ufs_debugfs_exit();
10730 return ret;
10731 }
10732
ufshcd_core_exit(void)10733 static void __exit ufshcd_core_exit(void)
10734 {
10735 ufs_debugfs_exit();
10736 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
10737 }
10738
10739 module_init(ufshcd_core_init);
10740 module_exit(ufshcd_core_exit);
10741
10742 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10743 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10744 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10745 MODULE_SOFTDEP("pre: governor_simpleondemand");
10746 MODULE_LICENSE("GPL");
10747