Lines Matching refs:hba

42 	struct ufs_hba *hba = dev_get_drvdata(dev);  in ufs_sysfs_pm_lvl_store()  local
43 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_sysfs_pm_lvl_store()
53 (!(hba->caps & UFSHCD_CAP_DEEPSLEEP) || in ufs_sysfs_pm_lvl_store()
57 spin_lock_irqsave(hba->host->host_lock, flags); in ufs_sysfs_pm_lvl_store()
59 hba->rpm_lvl = value; in ufs_sysfs_pm_lvl_store()
61 hba->spm_lvl = value; in ufs_sysfs_pm_lvl_store()
62 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufs_sysfs_pm_lvl_store()
69 struct ufs_hba *hba = dev_get_drvdata(dev); in rpm_lvl_show() local
71 return sysfs_emit(buf, "%d\n", hba->rpm_lvl); in rpm_lvl_show()
83 struct ufs_hba *hba = dev_get_drvdata(dev); in rpm_target_dev_state_show() local
86 ufs_pm_lvl_states[hba->rpm_lvl].dev_state)); in rpm_target_dev_state_show()
92 struct ufs_hba *hba = dev_get_drvdata(dev); in rpm_target_link_state_show() local
95 ufs_pm_lvl_states[hba->rpm_lvl].link_state)); in rpm_target_link_state_show()
101 struct ufs_hba *hba = dev_get_drvdata(dev); in spm_lvl_show() local
103 return sysfs_emit(buf, "%d\n", hba->spm_lvl); in spm_lvl_show()
115 struct ufs_hba *hba = dev_get_drvdata(dev); in spm_target_dev_state_show() local
118 ufs_pm_lvl_states[hba->spm_lvl].dev_state)); in spm_target_dev_state_show()
124 struct ufs_hba *hba = dev_get_drvdata(dev); in spm_target_link_state_show() local
127 ufs_pm_lvl_states[hba->spm_lvl].link_state)); in spm_target_link_state_show()
159 struct ufs_hba *hba = dev_get_drvdata(dev); in auto_hibern8_show() local
161 if (!ufshcd_is_auto_hibern8_supported(hba)) in auto_hibern8_show()
164 down(&hba->host_sem); in auto_hibern8_show()
165 if (!ufshcd_is_user_access_allowed(hba)) { in auto_hibern8_show()
170 pm_runtime_get_sync(hba->dev); in auto_hibern8_show()
171 ufshcd_hold(hba); in auto_hibern8_show()
172 ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); in auto_hibern8_show()
173 ufshcd_release(hba); in auto_hibern8_show()
174 pm_runtime_put_sync(hba->dev); in auto_hibern8_show()
179 up(&hba->host_sem); in auto_hibern8_show()
187 struct ufs_hba *hba = dev_get_drvdata(dev); in auto_hibern8_store() local
191 if (!ufshcd_is_auto_hibern8_supported(hba)) in auto_hibern8_store()
200 down(&hba->host_sem); in auto_hibern8_store()
201 if (!ufshcd_is_user_access_allowed(hba)) { in auto_hibern8_store()
206 ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer)); in auto_hibern8_store()
209 up(&hba->host_sem); in auto_hibern8_store()
216 struct ufs_hba *hba = dev_get_drvdata(dev); in wb_on_show() local
218 return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled); in wb_on_show()
224 struct ufs_hba *hba = dev_get_drvdata(dev); in wb_on_store() local
228 if (!ufshcd_is_wb_allowed(hba) || (ufshcd_is_clkscaling_supported(hba) in wb_on_store()
229 && ufshcd_enable_wb_if_scaling_up(hba))) { in wb_on_store()
244 down(&hba->host_sem); in wb_on_store()
245 if (!ufshcd_is_user_access_allowed(hba)) { in wb_on_store()
250 ufshcd_rpm_get_sync(hba); in wb_on_store()
251 res = ufshcd_wb_toggle(hba, wb_enable); in wb_on_store()
252 ufshcd_rpm_put_sync(hba); in wb_on_store()
254 up(&hba->host_sem); in wb_on_store()
262 struct ufs_hba *hba = dev_get_drvdata(dev); in enable_wb_buf_flush_show() local
264 return sysfs_emit(buf, "%d\n", hba->dev_info.wb_buf_flush_enabled); in enable_wb_buf_flush_show()
271 struct ufs_hba *hba = dev_get_drvdata(dev); in enable_wb_buf_flush_store() local
275 if (!ufshcd_is_wb_buf_flush_allowed(hba)) { in enable_wb_buf_flush_store()
286 down(&hba->host_sem); in enable_wb_buf_flush_store()
287 if (!ufshcd_is_user_access_allowed(hba)) { in enable_wb_buf_flush_store()
292 ufshcd_rpm_get_sync(hba); in enable_wb_buf_flush_store()
293 res = ufshcd_wb_toggle_buf_flush(hba, enable_wb_buf_flush); in enable_wb_buf_flush_store()
294 ufshcd_rpm_put_sync(hba); in enable_wb_buf_flush_store()
297 up(&hba->host_sem); in enable_wb_buf_flush_store()
305 struct ufs_hba *hba = dev_get_drvdata(dev); in wb_flush_threshold_show() local
307 return sysfs_emit(buf, "%u\n", hba->vps->wb_flush_threshold); in wb_flush_threshold_show()
314 struct ufs_hba *hba = dev_get_drvdata(dev); in wb_flush_threshold_store() local
327 hba->vps->wb_flush_threshold = wb_flush_threshold; in wb_flush_threshold_store()
364 struct ufs_hba *hba = dev_get_drvdata(dev); in clock_scaling_show() local
366 return sysfs_emit(buf, "%d\n", ufshcd_is_clkscaling_supported(hba)); in clock_scaling_show()
372 struct ufs_hba *hba = dev_get_drvdata(dev); in write_booster_show() local
374 return sysfs_emit(buf, "%d\n", ufshcd_is_wb_allowed(hba)); in write_booster_show()
398 struct ufs_hba *hba = dev_get_drvdata(dev); in monitor_enable_show() local
400 return sysfs_emit(buf, "%d\n", hba->monitor.enabled); in monitor_enable_show()
407 struct ufs_hba *hba = dev_get_drvdata(dev); in monitor_enable_store() local
414 spin_lock_irqsave(hba->host->host_lock, flags); in monitor_enable_store()
415 if (value == hba->monitor.enabled) in monitor_enable_store()
419 memset(&hba->monitor, 0, sizeof(hba->monitor)); in monitor_enable_store()
421 hba->monitor.enabled = true; in monitor_enable_store()
422 hba->monitor.enabled_ts = ktime_get(); in monitor_enable_store()
426 spin_unlock_irqrestore(hba->host->host_lock, flags); in monitor_enable_store()
433 struct ufs_hba *hba = dev_get_drvdata(dev); in monitor_chunk_size_show() local
435 return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size); in monitor_chunk_size_show()
442 struct ufs_hba *hba = dev_get_drvdata(dev); in monitor_chunk_size_store() local
448 spin_lock_irqsave(hba->host->host_lock, flags); in monitor_chunk_size_store()
450 if (!hba->monitor.enabled) in monitor_chunk_size_store()
451 hba->monitor.chunk_size = value; in monitor_chunk_size_store()
452 spin_unlock_irqrestore(hba->host->host_lock, flags); in monitor_chunk_size_store()
459 struct ufs_hba *hba = dev_get_drvdata(dev); in read_total_sectors_show() local
461 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]); in read_total_sectors_show()
467 struct ufs_hba *hba = dev_get_drvdata(dev); in read_total_busy_show() local
470 ktime_to_us(hba->monitor.total_busy[READ])); in read_total_busy_show()
476 struct ufs_hba *hba = dev_get_drvdata(dev); in read_nr_requests_show() local
478 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]); in read_nr_requests_show()
485 struct ufs_hba *hba = dev_get_drvdata(dev); in read_req_latency_avg_show() local
486 struct ufs_hba_monitor *m = &hba->monitor; in read_req_latency_avg_show()
496 struct ufs_hba *hba = dev_get_drvdata(dev); in read_req_latency_max_show() local
499 ktime_to_us(hba->monitor.lat_max[READ])); in read_req_latency_max_show()
506 struct ufs_hba *hba = dev_get_drvdata(dev); in read_req_latency_min_show() local
509 ktime_to_us(hba->monitor.lat_min[READ])); in read_req_latency_min_show()
516 struct ufs_hba *hba = dev_get_drvdata(dev); in read_req_latency_sum_show() local
519 ktime_to_us(hba->monitor.lat_sum[READ])); in read_req_latency_sum_show()
526 struct ufs_hba *hba = dev_get_drvdata(dev); in write_total_sectors_show() local
528 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]); in write_total_sectors_show()
534 struct ufs_hba *hba = dev_get_drvdata(dev); in write_total_busy_show() local
537 ktime_to_us(hba->monitor.total_busy[WRITE])); in write_total_busy_show()
543 struct ufs_hba *hba = dev_get_drvdata(dev); in write_nr_requests_show() local
545 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]); in write_nr_requests_show()
552 struct ufs_hba *hba = dev_get_drvdata(dev); in write_req_latency_avg_show() local
553 struct ufs_hba_monitor *m = &hba->monitor; in write_req_latency_avg_show()
563 struct ufs_hba *hba = dev_get_drvdata(dev); in write_req_latency_max_show() local
566 ktime_to_us(hba->monitor.lat_max[WRITE])); in write_req_latency_max_show()
573 struct ufs_hba *hba = dev_get_drvdata(dev); in write_req_latency_min_show() local
576 ktime_to_us(hba->monitor.lat_min[WRITE])); in write_req_latency_min_show()
583 struct ufs_hba *hba = dev_get_drvdata(dev); in write_req_latency_sum_show() local
586 ktime_to_us(hba->monitor.lat_sum[WRITE])); in write_req_latency_sum_show()
631 static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba, in ufs_sysfs_read_desc_param() argument
644 down(&hba->host_sem); in ufs_sysfs_read_desc_param()
645 if (!ufshcd_is_user_access_allowed(hba)) { in ufs_sysfs_read_desc_param()
650 ufshcd_rpm_get_sync(hba); in ufs_sysfs_read_desc_param()
651 ret = ufshcd_read_desc_param(hba, desc_id, desc_index, in ufs_sysfs_read_desc_param()
653 ufshcd_rpm_put_sync(hba); in ufs_sysfs_read_desc_param()
678 up(&hba->host_sem); in ufs_sysfs_read_desc_param()
686 struct ufs_hba *hba = dev_get_drvdata(dev); \
687 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
899 struct ufs_hba *hba = dev_get_drvdata(dev); \
900 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, \
1016 struct ufs_hba *hba = dev_get_drvdata(dev); \
1021 down(&hba->host_sem); \
1022 if (!ufshcd_is_user_access_allowed(hba)) { \
1023 up(&hba->host_sem); \
1028 up(&hba->host_sem); \
1031 ufshcd_rpm_get_sync(hba); \
1032 ret = ufshcd_query_descriptor_retry(hba, \
1042 ret = ufshcd_read_string_desc(hba, index, &desc_buf, \
1048 ufshcd_rpm_put_sync(hba); \
1050 up(&hba->host_sem); \
1088 struct ufs_hba *hba = dev_get_drvdata(dev); \
1090 down(&hba->host_sem); \
1091 if (!ufshcd_is_user_access_allowed(hba)) { \
1092 up(&hba->host_sem); \
1096 index = ufshcd_wb_get_query_index(hba); \
1097 ufshcd_rpm_get_sync(hba); \
1098 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
1100 ufshcd_rpm_put_sync(hba); \
1107 up(&hba->host_sem); \
1154 struct ufs_hba *hba = dev_get_drvdata(dev); \
1159 down(&hba->host_sem); \
1160 if (!ufshcd_is_user_access_allowed(hba)) { \
1161 up(&hba->host_sem); \
1165 index = ufshcd_wb_get_query_index(hba); \
1166 ufshcd_rpm_get_sync(hba); \
1167 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
1169 ufshcd_rpm_put_sync(hba); \
1176 up(&hba->host_sem); \
1252 struct ufs_hba *hba = shost_priv(sdev->host); \
1254 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \
1256 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
1327 struct ufs_hba *hba = shost_priv(sdev->host); in dyn_cap_needed_attribute_show() local
1331 down(&hba->host_sem); in dyn_cap_needed_attribute_show()
1332 if (!ufshcd_is_user_access_allowed(hba)) { in dyn_cap_needed_attribute_show()
1337 ufshcd_rpm_get_sync(hba); in dyn_cap_needed_attribute_show()
1338 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, in dyn_cap_needed_attribute_show()
1340 ufshcd_rpm_put_sync(hba); in dyn_cap_needed_attribute_show()
1349 up(&hba->host_sem); in dyn_cap_needed_attribute_show()