1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 3 */ 4 5 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 6 7 #include <linux/debugfs.h> 8 #include <linux/delay.h> 9 10 #include "dpu_vbif.h" 11 #include "dpu_hw_vbif.h" 12 #include "dpu_trace.h" 13 14 static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx) 15 { 16 if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif)) 17 return dpu_kms->hw_vbif[vbif_idx]; 18 19 return NULL; 20 } 21 22 static const char *dpu_vbif_name(enum dpu_vbif idx) 23 { 24 switch (idx) { 25 case VBIF_RT: 26 return "VBIF_RT"; 27 case VBIF_NRT: 28 return "VBIF_NRT"; 29 default: 30 return "??"; 31 } 32 } 33 34 /** 35 * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt 36 * @vbif: Pointer to hardware vbif driver 37 * @xin_id: Client interface identifier 38 * @return: 0 if success; error code otherwise 39 */ 40 static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id) 41 { 42 ktime_t timeout; 43 bool status; 44 int rc; 45 46 if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) { 47 DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL); 48 return -EINVAL; 49 } 50 51 timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout); 52 for (;;) { 53 status = vbif->ops.get_halt_ctrl(vbif, xin_id); 54 if (status) 55 break; 56 if (ktime_compare_safe(ktime_get(), timeout) > 0) { 57 status = vbif->ops.get_halt_ctrl(vbif, xin_id); 58 break; 59 } 60 usleep_range(501, 1000); 61 } 62 63 if (!status) { 64 rc = -ETIMEDOUT; 65 DPU_ERROR("%s client %d not halting. TIMEDOUT.\n", 66 dpu_vbif_name(vbif->idx), xin_id); 67 } else { 68 rc = 0; 69 DRM_DEBUG_ATOMIC("%s client %d is halted\n", 70 dpu_vbif_name(vbif->idx), xin_id); 71 } 72 73 return rc; 74 } 75 76 /** 77 * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters 78 * @vbif: Pointer to hardware vbif driver 79 * @ot_lim: Pointer to OT limit to be modified 80 * @params: Pointer to usecase parameters 81 */ 82 static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif, 83 u32 *ot_lim, struct dpu_vbif_set_ot_params *params) 84 { 85 u64 pps; 86 const struct dpu_vbif_dynamic_ot_tbl *tbl; 87 u32 i; 88 89 if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM))) 90 return; 91 92 /* Dynamic OT setting done only for WFD */ 93 if (!params->is_wfd) 94 return; 95 96 pps = params->frame_rate; 97 pps *= params->width; 98 pps *= params->height; 99 100 tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl : 101 &vbif->cap->dynamic_ot_wr_tbl; 102 103 for (i = 0; i < tbl->count; i++) { 104 if (pps <= tbl->cfg[i].pps) { 105 *ot_lim = tbl->cfg[i].ot_limit; 106 break; 107 } 108 } 109 110 DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n", 111 dpu_vbif_name(vbif->idx), params->xin_id, 112 params->width, params->height, params->frame_rate, 113 pps, *ot_lim); 114 } 115 116 /** 117 * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters 118 * @vbif: Pointer to hardware vbif driver 119 * @params: Pointer to usecase parameters 120 * @return: OT limit 121 */ 122 static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif, 123 struct dpu_vbif_set_ot_params *params) 124 { 125 u32 ot_lim = 0; 126 u32 val; 127 128 if (!vbif || !vbif->cap) { 129 DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL); 130 return -EINVAL; 131 } 132 133 if (vbif->cap->default_ot_wr_limit && !params->rd) 134 ot_lim = vbif->cap->default_ot_wr_limit; 135 else if (vbif->cap->default_ot_rd_limit && params->rd) 136 ot_lim = vbif->cap->default_ot_rd_limit; 137 138 /* 139 * If default ot is not set from dt/catalog, 140 * then do not configure it. 141 */ 142 if (ot_lim == 0) 143 goto exit; 144 145 /* Modify the limits if the target and the use case requires it */ 146 _dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params); 147 148 if (vbif && vbif->ops.get_limit_conf) { 149 val = vbif->ops.get_limit_conf(vbif, 150 params->xin_id, params->rd); 151 if (val == ot_lim) 152 ot_lim = 0; 153 } 154 155 exit: 156 DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n", 157 dpu_vbif_name(vbif->idx), params->xin_id, ot_lim); 158 return ot_lim; 159 } 160 161 /** 162 * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters 163 * @dpu_kms: DPU handler 164 * @params: Pointer to usecase parameters 165 * 166 * Note this function would block waiting for bus halt. 167 */ 168 void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, 169 struct dpu_vbif_set_ot_params *params) 170 { 171 struct dpu_hw_vbif *vbif; 172 struct dpu_hw_mdp *mdp; 173 bool forced_on = false; 174 u32 ot_lim; 175 int ret; 176 177 mdp = dpu_kms->hw_mdp; 178 179 vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); 180 if (!vbif || !mdp) { 181 DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n", 182 vbif != NULL, mdp != NULL); 183 return; 184 } 185 186 if (!mdp->ops.setup_clk_force_ctrl || 187 !vbif->ops.set_limit_conf || 188 !vbif->ops.set_halt_ctrl) 189 return; 190 191 /* set write_gather_en for all write clients */ 192 if (vbif->ops.set_write_gather_en && !params->rd) 193 vbif->ops.set_write_gather_en(vbif, params->xin_id); 194 195 ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF; 196 197 if (ot_lim == 0) 198 return; 199 200 trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim, 201 params->vbif_idx); 202 203 forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true); 204 205 vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim); 206 207 vbif->ops.set_halt_ctrl(vbif, params->xin_id, true); 208 209 ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id); 210 if (ret) 211 trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id); 212 213 vbif->ops.set_halt_ctrl(vbif, params->xin_id, false); 214 215 if (forced_on) 216 mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false); 217 } 218 219 void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, 220 struct dpu_vbif_set_qos_params *params) 221 { 222 struct dpu_hw_vbif *vbif; 223 struct dpu_hw_mdp *mdp; 224 bool forced_on = false; 225 const struct dpu_vbif_qos_tbl *qos_tbl; 226 int i; 227 228 if (!params || !dpu_kms->hw_mdp) { 229 DPU_ERROR("invalid arguments\n"); 230 return; 231 } 232 mdp = dpu_kms->hw_mdp; 233 234 vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); 235 236 if (!vbif || !vbif->cap) { 237 DPU_ERROR("invalid vbif %d\n", params->vbif_idx); 238 return; 239 } 240 241 if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) { 242 DRM_DEBUG_ATOMIC("qos remap not supported\n"); 243 return; 244 } 245 246 qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl : 247 &vbif->cap->qos_nrt_tbl; 248 249 if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) { 250 DRM_DEBUG_ATOMIC("qos tbl not defined\n"); 251 return; 252 } 253 254 forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true); 255 256 for (i = 0; i < qos_tbl->npriority_lvl; i++) { 257 DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n", 258 dpu_vbif_name(params->vbif_idx), params->xin_id, i, 259 qos_tbl->priority_lvl[i]); 260 vbif->ops.set_qos_remap(vbif, params->xin_id, i, 261 qos_tbl->priority_lvl[i]); 262 } 263 264 if (forced_on) 265 mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false); 266 } 267 268 void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms) 269 { 270 struct dpu_hw_vbif *vbif; 271 u32 i, pnd, src; 272 273 for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { 274 vbif = dpu_kms->hw_vbif[i]; 275 if (vbif && vbif->ops.clear_errors) { 276 vbif->ops.clear_errors(vbif, &pnd, &src); 277 if (pnd || src) { 278 DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n", 279 dpu_vbif_name(vbif->idx), pnd, src); 280 } 281 } 282 } 283 } 284 285 void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms) 286 { 287 struct dpu_hw_vbif *vbif; 288 int i, j; 289 290 for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { 291 vbif = dpu_kms->hw_vbif[i]; 292 if (vbif && vbif->cap && vbif->ops.set_mem_type) { 293 for (j = 0; j < vbif->cap->memtype_count; j++) 294 vbif->ops.set_mem_type( 295 vbif, j, vbif->cap->memtype[j]); 296 } 297 } 298 } 299 300 #ifdef CONFIG_DEBUG_FS 301 302 void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root) 303 { 304 char vbif_name[32]; 305 struct dentry *entry, *debugfs_vbif; 306 int i, j; 307 308 entry = debugfs_create_dir("vbif", debugfs_root); 309 310 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { 311 const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; 312 313 snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id); 314 315 debugfs_vbif = debugfs_create_dir(vbif_name, entry); 316 317 debugfs_create_u32("features", 0600, debugfs_vbif, 318 (u32 *)&vbif->features); 319 320 debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif, 321 (u32 *)&vbif->xin_halt_timeout); 322 323 debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif, 324 (u32 *)&vbif->default_ot_rd_limit); 325 326 debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif, 327 (u32 *)&vbif->default_ot_wr_limit); 328 329 for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) { 330 const struct dpu_vbif_dynamic_ot_cfg *cfg = 331 &vbif->dynamic_ot_rd_tbl.cfg[j]; 332 333 snprintf(vbif_name, sizeof(vbif_name), 334 "dynamic_ot_rd_%d_pps", j); 335 debugfs_create_u64(vbif_name, 0400, debugfs_vbif, 336 (u64 *)&cfg->pps); 337 snprintf(vbif_name, sizeof(vbif_name), 338 "dynamic_ot_rd_%d_ot_limit", j); 339 debugfs_create_u32(vbif_name, 0400, debugfs_vbif, 340 (u32 *)&cfg->ot_limit); 341 } 342 343 for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) { 344 const struct dpu_vbif_dynamic_ot_cfg *cfg = 345 &vbif->dynamic_ot_wr_tbl.cfg[j]; 346 347 snprintf(vbif_name, sizeof(vbif_name), 348 "dynamic_ot_wr_%d_pps", j); 349 debugfs_create_u64(vbif_name, 0400, debugfs_vbif, 350 (u64 *)&cfg->pps); 351 snprintf(vbif_name, sizeof(vbif_name), 352 "dynamic_ot_wr_%d_ot_limit", j); 353 debugfs_create_u32(vbif_name, 0400, debugfs_vbif, 354 (u32 *)&cfg->ot_limit); 355 } 356 } 357 } 358 #endif 359