1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 3 */ 4 5 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 6 7 #include <linux/debugfs.h> 8 #include <linux/errno.h> 9 #include <linux/mutex.h> 10 #include <linux/pm_opp.h> 11 #include <linux/sort.h> 12 #include <linux/clk.h> 13 #include <linux/bitmap.h> 14 15 #include "dpu_kms.h" 16 #include "dpu_trace.h" 17 #include "dpu_crtc.h" 18 #include "dpu_core_perf.h" 19 20 /** 21 * enum dpu_perf_mode - performance tuning mode 22 * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client 23 * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting 24 * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting 25 */ 26 enum dpu_perf_mode { 27 DPU_PERF_MODE_NORMAL, 28 DPU_PERF_MODE_MINIMUM, 29 DPU_PERF_MODE_FIXED, 30 DPU_PERF_MODE_MAX 31 }; 32 33 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) 34 { 35 struct msm_drm_private *priv; 36 priv = crtc->dev->dev_private; 37 return to_dpu_kms(priv->kms); 38 } 39 40 static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms, 41 struct drm_crtc *crtc, 42 struct drm_crtc_state *state, 43 struct dpu_core_perf_params *perf) 44 { 45 struct dpu_crtc_state *dpu_cstate; 46 47 if (!kms || !kms->catalog || !crtc || !state || !perf) { 48 DPU_ERROR("invalid parameters\n"); 49 return; 50 } 51 52 dpu_cstate = to_dpu_crtc_state(state); 53 memset(perf, 0, sizeof(struct dpu_core_perf_params)); 54 55 if (!dpu_cstate->bw_control) { 56 perf->bw_ctl = kms->catalog->perf.max_bw_high * 57 1000ULL; 58 perf->max_per_pipe_ib = perf->bw_ctl; 59 perf->core_clk_rate = kms->perf.max_core_clk_rate; 60 } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { 61 perf->bw_ctl = 0; 62 perf->max_per_pipe_ib = 0; 63 perf->core_clk_rate = 0; 64 } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) { 65 perf->bw_ctl = kms->perf.fix_core_ab_vote; 66 perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote; 67 perf->core_clk_rate = kms->perf.fix_core_clk_rate; 68 } 69 70 DPU_DEBUG( 71 "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n", 72 crtc->base.id, perf->core_clk_rate, 73 perf->max_per_pipe_ib, perf->bw_ctl); 74 } 75 76 int dpu_core_perf_crtc_check(struct drm_crtc *crtc, 77 struct drm_crtc_state *state) 78 { 79 u32 bw, threshold; 80 u64 bw_sum_of_intfs = 0; 81 enum dpu_crtc_client_type curr_client_type; 82 struct dpu_crtc_state *dpu_cstate; 83 struct drm_crtc *tmp_crtc; 84 struct dpu_kms *kms; 85 86 if (!crtc || !state) { 87 DPU_ERROR("invalid crtc\n"); 88 return -EINVAL; 89 } 90 91 kms = _dpu_crtc_get_kms(crtc); 92 if (!kms->catalog) { 93 DPU_ERROR("invalid parameters\n"); 94 return 0; 95 } 96 97 /* we only need bandwidth check on real-time clients (interfaces) */ 98 if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT) 99 return 0; 100 101 dpu_cstate = to_dpu_crtc_state(state); 102 103 /* obtain new values */ 104 _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf); 105 106 bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl; 107 curr_client_type = dpu_crtc_get_client_type(crtc); 108 109 drm_for_each_crtc(tmp_crtc, crtc->dev) { 110 if (tmp_crtc->enabled && 111 (dpu_crtc_get_client_type(tmp_crtc) == 112 curr_client_type) && (tmp_crtc != crtc)) { 113 struct dpu_crtc_state *tmp_cstate = 114 to_dpu_crtc_state(tmp_crtc->state); 115 116 DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n", 117 tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl, 118 tmp_cstate->bw_control); 119 /* 120 * For bw check only use the bw if the 121 * atomic property has been already set 122 */ 123 if (tmp_cstate->bw_control) 124 bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl; 125 } 126 127 /* convert bandwidth to kb */ 128 bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000); 129 DPU_DEBUG("calculated bandwidth=%uk\n", bw); 130 131 threshold = kms->catalog->perf.max_bw_high; 132 133 DPU_DEBUG("final threshold bw limit = %d\n", threshold); 134 135 if (!dpu_cstate->bw_control) { 136 DPU_DEBUG("bypass bandwidth check\n"); 137 } else if (!threshold) { 138 DPU_ERROR("no bandwidth limits specified\n"); 139 return -E2BIG; 140 } else if (bw > threshold) { 141 DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, 142 threshold); 143 return -E2BIG; 144 } 145 } 146 147 return 0; 148 } 149 150 static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, 151 struct drm_crtc *crtc) 152 { 153 struct dpu_core_perf_params perf = { 0 }; 154 enum dpu_crtc_client_type curr_client_type 155 = dpu_crtc_get_client_type(crtc); 156 struct drm_crtc *tmp_crtc; 157 struct dpu_crtc_state *dpu_cstate; 158 int ret = 0; 159 160 drm_for_each_crtc(tmp_crtc, crtc->dev) { 161 if (tmp_crtc->enabled && 162 curr_client_type == 163 dpu_crtc_get_client_type(tmp_crtc)) { 164 dpu_cstate = to_dpu_crtc_state(tmp_crtc->state); 165 166 perf.max_per_pipe_ib = max(perf.max_per_pipe_ib, 167 dpu_cstate->new_perf.max_per_pipe_ib); 168 169 DPU_DEBUG("crtc=%d bw=%llu\n", tmp_crtc->base.id, 170 dpu_cstate->new_perf.bw_ctl); 171 } 172 } 173 return ret; 174 } 175 176 /** 177 * @dpu_core_perf_crtc_release_bw() - request zero bandwidth 178 * @crtc - pointer to a crtc 179 * 180 * Function checks a state variable for the crtc, if all pending commit 181 * requests are done, meaning no more bandwidth is needed, release 182 * bandwidth request. 183 */ 184 void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc) 185 { 186 struct dpu_crtc *dpu_crtc; 187 struct dpu_kms *kms; 188 189 if (!crtc) { 190 DPU_ERROR("invalid crtc\n"); 191 return; 192 } 193 194 kms = _dpu_crtc_get_kms(crtc); 195 if (!kms->catalog) { 196 DPU_ERROR("invalid kms\n"); 197 return; 198 } 199 200 dpu_crtc = to_dpu_crtc(crtc); 201 202 if (atomic_dec_return(&kms->bandwidth_ref) > 0) 203 return; 204 205 /* Release the bandwidth */ 206 if (kms->perf.enable_bw_release) { 207 trace_dpu_cmd_release_bw(crtc->base.id); 208 DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id); 209 dpu_crtc->cur_perf.bw_ctl = 0; 210 _dpu_core_perf_crtc_update_bus(kms, crtc); 211 } 212 } 213 214 static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate) 215 { 216 struct dss_clk *core_clk = kms->perf.core_clk; 217 218 if (core_clk->max_rate && (rate > core_clk->max_rate)) 219 rate = core_clk->max_rate; 220 221 core_clk->rate = rate; 222 return dev_pm_opp_set_rate(&kms->pdev->dev, core_clk->rate); 223 } 224 225 static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms) 226 { 227 u64 clk_rate = kms->perf.perf_tune.min_core_clk; 228 struct drm_crtc *crtc; 229 struct dpu_crtc_state *dpu_cstate; 230 231 drm_for_each_crtc(crtc, kms->dev) { 232 if (crtc->enabled) { 233 dpu_cstate = to_dpu_crtc_state(crtc->state); 234 clk_rate = max(dpu_cstate->new_perf.core_clk_rate, 235 clk_rate); 236 clk_rate = clk_round_rate(kms->perf.core_clk->clk, 237 clk_rate); 238 } 239 } 240 241 if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) 242 clk_rate = kms->perf.fix_core_clk_rate; 243 244 DPU_DEBUG("clk:%llu\n", clk_rate); 245 246 return clk_rate; 247 } 248 249 int dpu_core_perf_crtc_update(struct drm_crtc *crtc, 250 int params_changed, bool stop_req) 251 { 252 struct dpu_core_perf_params *new, *old; 253 bool update_bus = false, update_clk = false; 254 u64 clk_rate = 0; 255 struct dpu_crtc *dpu_crtc; 256 struct dpu_crtc_state *dpu_cstate; 257 struct dpu_kms *kms; 258 int ret; 259 260 if (!crtc) { 261 DPU_ERROR("invalid crtc\n"); 262 return -EINVAL; 263 } 264 265 kms = _dpu_crtc_get_kms(crtc); 266 if (!kms->catalog) { 267 DPU_ERROR("invalid kms\n"); 268 return -EINVAL; 269 } 270 271 dpu_crtc = to_dpu_crtc(crtc); 272 dpu_cstate = to_dpu_crtc_state(crtc->state); 273 274 DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n", 275 crtc->base.id, stop_req, kms->perf.core_clk_rate); 276 277 old = &dpu_crtc->cur_perf; 278 new = &dpu_cstate->new_perf; 279 280 if (crtc->enabled && !stop_req) { 281 /* 282 * cases for bus bandwidth update. 283 * 1. new bandwidth vote - "ab or ib vote" is higher 284 * than current vote for update request. 285 * 2. new bandwidth vote - "ab or ib vote" is lower 286 * than current vote at end of commit or stop. 287 */ 288 if ((params_changed && ((new->bw_ctl > old->bw_ctl) || 289 (new->max_per_pipe_ib > old->max_per_pipe_ib))) || 290 (!params_changed && ((new->bw_ctl < old->bw_ctl) || 291 (new->max_per_pipe_ib < old->max_per_pipe_ib)))) { 292 DPU_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n", 293 crtc->base.id, params_changed, 294 new->bw_ctl, old->bw_ctl); 295 old->bw_ctl = new->bw_ctl; 296 old->max_per_pipe_ib = new->max_per_pipe_ib; 297 update_bus = true; 298 } 299 300 if ((params_changed && 301 (new->core_clk_rate > old->core_clk_rate)) || 302 (!params_changed && 303 (new->core_clk_rate < old->core_clk_rate))) { 304 old->core_clk_rate = new->core_clk_rate; 305 update_clk = true; 306 } 307 } else { 308 DPU_DEBUG("crtc=%d disable\n", crtc->base.id); 309 memset(old, 0, sizeof(*old)); 310 memset(new, 0, sizeof(*new)); 311 update_bus = true; 312 update_clk = true; 313 } 314 315 trace_dpu_perf_crtc_update(crtc->base.id, new->bw_ctl, 316 new->core_clk_rate, stop_req, update_bus, update_clk); 317 318 if (update_bus) { 319 ret = _dpu_core_perf_crtc_update_bus(kms, crtc); 320 if (ret) { 321 DPU_ERROR("crtc-%d: failed to update bus bw vote\n", 322 crtc->base.id); 323 return ret; 324 } 325 } 326 327 /* 328 * Update the clock after bandwidth vote to ensure 329 * bandwidth is available before clock rate is increased. 330 */ 331 if (update_clk) { 332 clk_rate = _dpu_core_perf_get_core_clk_rate(kms); 333 334 trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate); 335 336 ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate); 337 if (ret) { 338 DPU_ERROR("failed to set %s clock rate %llu\n", 339 kms->perf.core_clk->clk_name, clk_rate); 340 return ret; 341 } 342 343 kms->perf.core_clk_rate = clk_rate; 344 DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate); 345 } 346 return 0; 347 } 348 349 #ifdef CONFIG_DEBUG_FS 350 351 static ssize_t _dpu_core_perf_mode_write(struct file *file, 352 const char __user *user_buf, size_t count, loff_t *ppos) 353 { 354 struct dpu_core_perf *perf = file->private_data; 355 struct dpu_perf_cfg *cfg = &perf->catalog->perf; 356 u32 perf_mode = 0; 357 int ret; 358 359 ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode); 360 if (ret) 361 return ret; 362 363 if (perf_mode >= DPU_PERF_MODE_MAX) 364 return -EINVAL; 365 366 if (perf_mode == DPU_PERF_MODE_FIXED) { 367 DRM_INFO("fix performance mode\n"); 368 } else if (perf_mode == DPU_PERF_MODE_MINIMUM) { 369 /* run the driver with max clk and BW vote */ 370 perf->perf_tune.min_core_clk = perf->max_core_clk_rate; 371 perf->perf_tune.min_bus_vote = 372 (u64) cfg->max_bw_high * 1000; 373 DRM_INFO("minimum performance mode\n"); 374 } else if (perf_mode == DPU_PERF_MODE_NORMAL) { 375 /* reset the perf tune params to 0 */ 376 perf->perf_tune.min_core_clk = 0; 377 perf->perf_tune.min_bus_vote = 0; 378 DRM_INFO("normal performance mode\n"); 379 } 380 perf->perf_tune.mode = perf_mode; 381 382 return count; 383 } 384 385 static ssize_t _dpu_core_perf_mode_read(struct file *file, 386 char __user *buff, size_t count, loff_t *ppos) 387 { 388 struct dpu_core_perf *perf = file->private_data; 389 int len; 390 char buf[128]; 391 392 len = scnprintf(buf, sizeof(buf), 393 "mode %d min_mdp_clk %llu min_bus_vote %llu\n", 394 perf->perf_tune.mode, 395 perf->perf_tune.min_core_clk, 396 perf->perf_tune.min_bus_vote); 397 398 return simple_read_from_buffer(buff, count, ppos, buf, len); 399 } 400 401 static const struct file_operations dpu_core_perf_mode_fops = { 402 .open = simple_open, 403 .read = _dpu_core_perf_mode_read, 404 .write = _dpu_core_perf_mode_write, 405 }; 406 407 int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent) 408 { 409 struct dpu_core_perf *perf = &dpu_kms->perf; 410 struct dpu_mdss_cfg *catalog = perf->catalog; 411 struct dentry *entry; 412 413 entry = debugfs_create_dir("core_perf", parent); 414 415 debugfs_create_u64("max_core_clk_rate", 0600, entry, 416 &perf->max_core_clk_rate); 417 debugfs_create_u64("core_clk_rate", 0600, entry, 418 &perf->core_clk_rate); 419 debugfs_create_u32("enable_bw_release", 0600, entry, 420 (u32 *)&perf->enable_bw_release); 421 debugfs_create_u32("threshold_low", 0600, entry, 422 (u32 *)&catalog->perf.max_bw_low); 423 debugfs_create_u32("threshold_high", 0600, entry, 424 (u32 *)&catalog->perf.max_bw_high); 425 debugfs_create_u32("min_core_ib", 0600, entry, 426 (u32 *)&catalog->perf.min_core_ib); 427 debugfs_create_u32("min_llcc_ib", 0600, entry, 428 (u32 *)&catalog->perf.min_llcc_ib); 429 debugfs_create_u32("min_dram_ib", 0600, entry, 430 (u32 *)&catalog->perf.min_dram_ib); 431 debugfs_create_file("perf_mode", 0600, entry, 432 (u32 *)perf, &dpu_core_perf_mode_fops); 433 debugfs_create_u64("fix_core_clk_rate", 0600, entry, 434 &perf->fix_core_clk_rate); 435 debugfs_create_u64("fix_core_ib_vote", 0600, entry, 436 &perf->fix_core_ib_vote); 437 debugfs_create_u64("fix_core_ab_vote", 0600, entry, 438 &perf->fix_core_ab_vote); 439 440 return 0; 441 } 442 #endif 443 444 void dpu_core_perf_destroy(struct dpu_core_perf *perf) 445 { 446 if (!perf) { 447 DPU_ERROR("invalid parameters\n"); 448 return; 449 } 450 451 perf->max_core_clk_rate = 0; 452 perf->core_clk = NULL; 453 perf->catalog = NULL; 454 perf->dev = NULL; 455 } 456 457 int dpu_core_perf_init(struct dpu_core_perf *perf, 458 struct drm_device *dev, 459 struct dpu_mdss_cfg *catalog, 460 struct dss_clk *core_clk) 461 { 462 perf->dev = dev; 463 perf->catalog = catalog; 464 perf->core_clk = core_clk; 465 466 perf->max_core_clk_rate = core_clk->max_rate; 467 if (!perf->max_core_clk_rate) { 468 DPU_DEBUG("optional max core clk rate, use default\n"); 469 perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE; 470 } 471 472 return 0; 473 } 474