1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/sched/mm.h> 30 #include <linux/sort.h> 31 32 #include <drm/drm_debugfs.h> 33 34 #include "gem/i915_gem_context.h" 35 #include "gt/intel_gt_buffer_pool.h" 36 #include "gt/intel_gt_clock_utils.h" 37 #include "gt/intel_gt.h" 38 #include "gt/intel_gt_pm.h" 39 #include "gt/intel_gt_requests.h" 40 #include "gt/intel_reset.h" 41 #include "gt/intel_rc6.h" 42 #include "gt/intel_rps.h" 43 #include "gt/intel_sseu_debugfs.h" 44 45 #include "i915_debugfs.h" 46 #include "i915_debugfs_params.h" 47 #include "i915_irq.h" 48 #include "i915_scheduler.h" 49 #include "i915_trace.h" 50 #include "intel_pm.h" 51 #include "intel_sideband.h" 52 53 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 54 { 55 return to_i915(node->minor->dev); 56 } 57 58 static int i915_capabilities(struct seq_file *m, void *data) 59 { 60 struct drm_i915_private *i915 = node_to_i915(m->private); 61 struct drm_printer p = drm_seq_file_printer(m); 62 63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); 64 65 intel_device_info_print_static(INTEL_INFO(i915), &p); 66 intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); 67 intel_gt_info_print(&i915->gt.info, &p); 68 intel_driver_caps_print(&i915->caps, &p); 69 70 kernel_param_lock(THIS_MODULE); 71 i915_params_dump(&i915->params, &p); 72 kernel_param_unlock(THIS_MODULE); 73 74 return 0; 75 } 76 77 static char get_tiling_flag(struct drm_i915_gem_object *obj) 78 { 79 switch (i915_gem_object_get_tiling(obj)) { 80 default: 81 case I915_TILING_NONE: return ' '; 82 case I915_TILING_X: return 'X'; 83 case I915_TILING_Y: return 'Y'; 84 } 85 } 86 87 static char get_global_flag(struct drm_i915_gem_object *obj) 88 { 89 return READ_ONCE(obj->userfault_count) ? 'g' : ' '; 90 } 91 92 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 93 { 94 return obj->mm.mapping ? 'M' : ' '; 95 } 96 97 static const char * 98 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len) 99 { 100 size_t x = 0; 101 102 switch (page_sizes) { 103 case 0: 104 return ""; 105 case I915_GTT_PAGE_SIZE_4K: 106 return "4K"; 107 case I915_GTT_PAGE_SIZE_64K: 108 return "64K"; 109 case I915_GTT_PAGE_SIZE_2M: 110 return "2M"; 111 default: 112 if (!buf) 113 return "M"; 114 115 if (page_sizes & I915_GTT_PAGE_SIZE_2M) 116 x += snprintf(buf + x, len - x, "2M, "); 117 if (page_sizes & I915_GTT_PAGE_SIZE_64K) 118 x += snprintf(buf + x, len - x, "64K, "); 119 if (page_sizes & I915_GTT_PAGE_SIZE_4K) 120 x += snprintf(buf + x, len - x, "4K, "); 121 buf[x-2] = '\0'; 122 123 return buf; 124 } 125 } 126 127 void 128 i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 129 { 130 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 131 struct intel_engine_cs *engine; 132 struct i915_vma *vma; 133 int pin_count = 0; 134 135 seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s", 136 &obj->base, 137 get_tiling_flag(obj), 138 get_global_flag(obj), 139 get_pin_mapped_flag(obj), 140 obj->base.size / 1024, 141 obj->read_domains, 142 obj->write_domain, 143 i915_cache_level_str(dev_priv, obj->cache_level), 144 obj->mm.dirty ? " dirty" : "", 145 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); 146 if (obj->base.name) 147 seq_printf(m, " (name: %d)", obj->base.name); 148 149 spin_lock(&obj->vma.lock); 150 list_for_each_entry(vma, &obj->vma.list, obj_link) { 151 if (!drm_mm_node_allocated(&vma->node)) 152 continue; 153 154 spin_unlock(&obj->vma.lock); 155 156 if (i915_vma_is_pinned(vma)) 157 pin_count++; 158 159 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s", 160 i915_vma_is_ggtt(vma) ? "g" : "pp", 161 vma->node.start, vma->node.size, 162 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0)); 163 if (i915_vma_is_ggtt(vma)) { 164 switch (vma->ggtt_view.type) { 165 case I915_GGTT_VIEW_NORMAL: 166 seq_puts(m, ", normal"); 167 break; 168 169 case I915_GGTT_VIEW_PARTIAL: 170 seq_printf(m, ", partial [%08llx+%x]", 171 vma->ggtt_view.partial.offset << PAGE_SHIFT, 172 vma->ggtt_view.partial.size << PAGE_SHIFT); 173 break; 174 175 case I915_GGTT_VIEW_ROTATED: 176 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", 177 vma->ggtt_view.rotated.plane[0].width, 178 vma->ggtt_view.rotated.plane[0].height, 179 vma->ggtt_view.rotated.plane[0].stride, 180 vma->ggtt_view.rotated.plane[0].offset, 181 vma->ggtt_view.rotated.plane[1].width, 182 vma->ggtt_view.rotated.plane[1].height, 183 vma->ggtt_view.rotated.plane[1].stride, 184 vma->ggtt_view.rotated.plane[1].offset); 185 break; 186 187 case I915_GGTT_VIEW_REMAPPED: 188 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", 189 vma->ggtt_view.remapped.plane[0].width, 190 vma->ggtt_view.remapped.plane[0].height, 191 vma->ggtt_view.remapped.plane[0].stride, 192 vma->ggtt_view.remapped.plane[0].offset, 193 vma->ggtt_view.remapped.plane[1].width, 194 vma->ggtt_view.remapped.plane[1].height, 195 vma->ggtt_view.remapped.plane[1].stride, 196 vma->ggtt_view.remapped.plane[1].offset); 197 break; 198 199 default: 200 MISSING_CASE(vma->ggtt_view.type); 201 break; 202 } 203 } 204 if (vma->fence) 205 seq_printf(m, " , fence: %d", vma->fence->id); 206 seq_puts(m, ")"); 207 208 spin_lock(&obj->vma.lock); 209 } 210 spin_unlock(&obj->vma.lock); 211 212 seq_printf(m, " (pinned x %d)", pin_count); 213 if (i915_gem_object_is_stolen(obj)) 214 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 215 if (i915_gem_object_is_framebuffer(obj)) 216 seq_printf(m, " (fb)"); 217 218 engine = i915_gem_object_last_write_engine(obj); 219 if (engine) 220 seq_printf(m, " (%s)", engine->name); 221 } 222 223 static int i915_gem_object_info(struct seq_file *m, void *data) 224 { 225 struct drm_i915_private *i915 = node_to_i915(m->private); 226 struct intel_memory_region *mr; 227 enum intel_region_id id; 228 229 seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n", 230 i915->mm.shrink_count, 231 atomic_read(&i915->mm.free_count), 232 i915->mm.shrink_memory); 233 for_each_memory_region(mr, i915, id) 234 seq_printf(m, "%s: total:%pa, available:%pa bytes\n", 235 mr->name, &mr->total, &mr->avail); 236 237 return 0; 238 } 239 240 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 241 static ssize_t gpu_state_read(struct file *file, char __user *ubuf, 242 size_t count, loff_t *pos) 243 { 244 struct i915_gpu_coredump *error; 245 ssize_t ret; 246 void *buf; 247 248 error = file->private_data; 249 if (!error) 250 return 0; 251 252 /* Bounce buffer required because of kernfs __user API convenience. */ 253 buf = kmalloc(count, GFP_KERNEL); 254 if (!buf) 255 return -ENOMEM; 256 257 ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count); 258 if (ret <= 0) 259 goto out; 260 261 if (!copy_to_user(ubuf, buf, ret)) 262 *pos += ret; 263 else 264 ret = -EFAULT; 265 266 out: 267 kfree(buf); 268 return ret; 269 } 270 271 static int gpu_state_release(struct inode *inode, struct file *file) 272 { 273 i915_gpu_coredump_put(file->private_data); 274 return 0; 275 } 276 277 static int i915_gpu_info_open(struct inode *inode, struct file *file) 278 { 279 struct drm_i915_private *i915 = inode->i_private; 280 struct i915_gpu_coredump *gpu; 281 intel_wakeref_t wakeref; 282 283 gpu = NULL; 284 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 285 gpu = i915_gpu_coredump(&i915->gt, ALL_ENGINES); 286 if (IS_ERR(gpu)) 287 return PTR_ERR(gpu); 288 289 file->private_data = gpu; 290 return 0; 291 } 292 293 static const struct file_operations i915_gpu_info_fops = { 294 .owner = THIS_MODULE, 295 .open = i915_gpu_info_open, 296 .read = gpu_state_read, 297 .llseek = default_llseek, 298 .release = gpu_state_release, 299 }; 300 301 static ssize_t 302 i915_error_state_write(struct file *filp, 303 const char __user *ubuf, 304 size_t cnt, 305 loff_t *ppos) 306 { 307 struct i915_gpu_coredump *error = filp->private_data; 308 309 if (!error) 310 return 0; 311 312 drm_dbg(&error->i915->drm, "Resetting error state\n"); 313 i915_reset_error_state(error->i915); 314 315 return cnt; 316 } 317 318 static int i915_error_state_open(struct inode *inode, struct file *file) 319 { 320 struct i915_gpu_coredump *error; 321 322 error = i915_first_error_state(inode->i_private); 323 if (IS_ERR(error)) 324 return PTR_ERR(error); 325 326 file->private_data = error; 327 return 0; 328 } 329 330 static const struct file_operations i915_error_state_fops = { 331 .owner = THIS_MODULE, 332 .open = i915_error_state_open, 333 .read = gpu_state_read, 334 .write = i915_error_state_write, 335 .llseek = default_llseek, 336 .release = gpu_state_release, 337 }; 338 #endif 339 340 static int i915_frequency_info(struct seq_file *m, void *unused) 341 { 342 struct drm_i915_private *dev_priv = node_to_i915(m->private); 343 struct intel_uncore *uncore = &dev_priv->uncore; 344 struct intel_rps *rps = &dev_priv->gt.rps; 345 intel_wakeref_t wakeref; 346 347 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 348 349 if (IS_GEN(dev_priv, 5)) { 350 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 351 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK); 352 353 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 354 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 355 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 356 MEMSTAT_VID_SHIFT); 357 seq_printf(m, "Current P-state: %d\n", 358 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 359 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 360 u32 rpmodectl, freq_sts; 361 362 rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL); 363 seq_printf(m, "Video Turbo Mode: %s\n", 364 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 365 seq_printf(m, "HW control enabled: %s\n", 366 yesno(rpmodectl & GEN6_RP_ENABLE)); 367 seq_printf(m, "SW control enabled: %s\n", 368 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 369 GEN6_RP_MEDIA_SW_MODE)); 370 371 vlv_punit_get(dev_priv); 372 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 373 vlv_punit_put(dev_priv); 374 375 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 376 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 377 378 seq_printf(m, "actual GPU freq: %d MHz\n", 379 intel_gpu_freq(rps, (freq_sts >> 8) & 0xff)); 380 381 seq_printf(m, "current GPU freq: %d MHz\n", 382 intel_gpu_freq(rps, rps->cur_freq)); 383 384 seq_printf(m, "max GPU freq: %d MHz\n", 385 intel_gpu_freq(rps, rps->max_freq)); 386 387 seq_printf(m, "min GPU freq: %d MHz\n", 388 intel_gpu_freq(rps, rps->min_freq)); 389 390 seq_printf(m, "idle GPU freq: %d MHz\n", 391 intel_gpu_freq(rps, rps->idle_freq)); 392 393 seq_printf(m, 394 "efficient (RPe) frequency: %d MHz\n", 395 intel_gpu_freq(rps, rps->efficient_freq)); 396 } else if (INTEL_GEN(dev_priv) >= 6) { 397 u32 rp_state_limits; 398 u32 gt_perf_status; 399 u32 rp_state_cap; 400 u32 rpmodectl, rpinclimit, rpdeclimit; 401 u32 rpstat, cagf, reqf; 402 u32 rpupei, rpcurup, rpprevup; 403 u32 rpdownei, rpcurdown, rpprevdown; 404 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 405 int max_freq; 406 407 rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS); 408 if (IS_GEN9_LP(dev_priv)) { 409 rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP); 410 gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS); 411 } else { 412 rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP); 413 gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS); 414 } 415 416 /* RPSTAT1 is in the GT power well */ 417 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 418 419 reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ); 420 if (INTEL_GEN(dev_priv) >= 9) 421 reqf >>= 23; 422 else { 423 reqf &= ~GEN6_TURBO_DISABLE; 424 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 425 reqf >>= 24; 426 else 427 reqf >>= 25; 428 } 429 reqf = intel_gpu_freq(rps, reqf); 430 431 rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL); 432 rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD); 433 rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD); 434 435 rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1); 436 rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 437 rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 438 rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 439 rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 440 rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 441 rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 442 cagf = intel_rps_read_actual_frequency(rps); 443 444 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 445 446 if (INTEL_GEN(dev_priv) >= 11) { 447 pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); 448 pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); 449 /* 450 * The equivalent to the PM ISR & IIR cannot be read 451 * without affecting the current state of the system 452 */ 453 pm_isr = 0; 454 pm_iir = 0; 455 } else if (INTEL_GEN(dev_priv) >= 8) { 456 pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2)); 457 pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2)); 458 pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2)); 459 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2)); 460 } else { 461 pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER); 462 pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR); 463 pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR); 464 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); 465 } 466 pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK); 467 468 seq_printf(m, "Video Turbo Mode: %s\n", 469 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 470 seq_printf(m, "HW control enabled: %s\n", 471 yesno(rpmodectl & GEN6_RP_ENABLE)); 472 seq_printf(m, "SW control enabled: %s\n", 473 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 474 GEN6_RP_MEDIA_SW_MODE)); 475 476 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", 477 pm_ier, pm_imr, pm_mask); 478 if (INTEL_GEN(dev_priv) <= 10) 479 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n", 480 pm_isr, pm_iir); 481 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", 482 rps->pm_intrmsk_mbz); 483 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 484 seq_printf(m, "Render p-state ratio: %d\n", 485 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 486 seq_printf(m, "Render p-state VID: %d\n", 487 gt_perf_status & 0xff); 488 seq_printf(m, "Render p-state limit: %d\n", 489 rp_state_limits & 0xff); 490 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 491 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 492 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 493 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 494 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 495 seq_printf(m, "CAGF: %dMHz\n", cagf); 496 seq_printf(m, "RP CUR UP EI: %d (%lldns)\n", 497 rpupei, 498 intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei)); 499 seq_printf(m, "RP CUR UP: %d (%lldun)\n", 500 rpcurup, 501 intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup)); 502 seq_printf(m, "RP PREV UP: %d (%lldns)\n", 503 rpprevup, 504 intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup)); 505 seq_printf(m, "Up threshold: %d%%\n", 506 rps->power.up_threshold); 507 508 seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n", 509 rpdownei, 510 intel_gt_pm_interval_to_ns(&dev_priv->gt, 511 rpdownei)); 512 seq_printf(m, "RP CUR DOWN: %d (%lldns)\n", 513 rpcurdown, 514 intel_gt_pm_interval_to_ns(&dev_priv->gt, 515 rpcurdown)); 516 seq_printf(m, "RP PREV DOWN: %d (%lldns)\n", 517 rpprevdown, 518 intel_gt_pm_interval_to_ns(&dev_priv->gt, 519 rpprevdown)); 520 seq_printf(m, "Down threshold: %d%%\n", 521 rps->power.down_threshold); 522 523 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : 524 rp_state_cap >> 16) & 0xff; 525 max_freq *= (IS_GEN9_BC(dev_priv) || 526 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); 527 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 528 intel_gpu_freq(rps, max_freq)); 529 530 max_freq = (rp_state_cap & 0xff00) >> 8; 531 max_freq *= (IS_GEN9_BC(dev_priv) || 532 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); 533 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 534 intel_gpu_freq(rps, max_freq)); 535 536 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : 537 rp_state_cap >> 0) & 0xff; 538 max_freq *= (IS_GEN9_BC(dev_priv) || 539 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); 540 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 541 intel_gpu_freq(rps, max_freq)); 542 seq_printf(m, "Max overclocked frequency: %dMHz\n", 543 intel_gpu_freq(rps, rps->max_freq)); 544 545 seq_printf(m, "Current freq: %d MHz\n", 546 intel_gpu_freq(rps, rps->cur_freq)); 547 seq_printf(m, "Actual freq: %d MHz\n", cagf); 548 seq_printf(m, "Idle freq: %d MHz\n", 549 intel_gpu_freq(rps, rps->idle_freq)); 550 seq_printf(m, "Min freq: %d MHz\n", 551 intel_gpu_freq(rps, rps->min_freq)); 552 seq_printf(m, "Boost freq: %d MHz\n", 553 intel_gpu_freq(rps, rps->boost_freq)); 554 seq_printf(m, "Max freq: %d MHz\n", 555 intel_gpu_freq(rps, rps->max_freq)); 556 seq_printf(m, 557 "efficient (RPe) frequency: %d MHz\n", 558 intel_gpu_freq(rps, rps->efficient_freq)); 559 } else { 560 seq_puts(m, "no P-state info available\n"); 561 } 562 563 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk); 564 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 565 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 566 567 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 568 return 0; 569 } 570 571 static const char *swizzle_string(unsigned swizzle) 572 { 573 switch (swizzle) { 574 case I915_BIT_6_SWIZZLE_NONE: 575 return "none"; 576 case I915_BIT_6_SWIZZLE_9: 577 return "bit9"; 578 case I915_BIT_6_SWIZZLE_9_10: 579 return "bit9/bit10"; 580 case I915_BIT_6_SWIZZLE_9_11: 581 return "bit9/bit11"; 582 case I915_BIT_6_SWIZZLE_9_10_11: 583 return "bit9/bit10/bit11"; 584 case I915_BIT_6_SWIZZLE_9_17: 585 return "bit9/bit17"; 586 case I915_BIT_6_SWIZZLE_9_10_17: 587 return "bit9/bit10/bit17"; 588 case I915_BIT_6_SWIZZLE_UNKNOWN: 589 return "unknown"; 590 } 591 592 return "bug"; 593 } 594 595 static int i915_swizzle_info(struct seq_file *m, void *data) 596 { 597 struct drm_i915_private *dev_priv = node_to_i915(m->private); 598 struct intel_uncore *uncore = &dev_priv->uncore; 599 intel_wakeref_t wakeref; 600 601 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 602 swizzle_string(dev_priv->ggtt.bit_6_swizzle_x)); 603 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 604 swizzle_string(dev_priv->ggtt.bit_6_swizzle_y)); 605 606 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 607 seq_puts(m, "L-shaped memory detected\n"); 608 609 /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */ 610 if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) 611 return 0; 612 613 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 614 615 if (IS_GEN_RANGE(dev_priv, 3, 4)) { 616 seq_printf(m, "DDC = 0x%08x\n", 617 intel_uncore_read(uncore, DCC)); 618 seq_printf(m, "DDC2 = 0x%08x\n", 619 intel_uncore_read(uncore, DCC2)); 620 seq_printf(m, "C0DRB3 = 0x%04x\n", 621 intel_uncore_read16(uncore, C0DRB3)); 622 seq_printf(m, "C1DRB3 = 0x%04x\n", 623 intel_uncore_read16(uncore, C1DRB3)); 624 } else if (INTEL_GEN(dev_priv) >= 6) { 625 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 626 intel_uncore_read(uncore, MAD_DIMM_C0)); 627 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 628 intel_uncore_read(uncore, MAD_DIMM_C1)); 629 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 630 intel_uncore_read(uncore, MAD_DIMM_C2)); 631 seq_printf(m, "TILECTL = 0x%08x\n", 632 intel_uncore_read(uncore, TILECTL)); 633 if (INTEL_GEN(dev_priv) >= 8) 634 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 635 intel_uncore_read(uncore, GAMTARBMODE)); 636 else 637 seq_printf(m, "ARB_MODE = 0x%08x\n", 638 intel_uncore_read(uncore, ARB_MODE)); 639 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 640 intel_uncore_read(uncore, DISP_ARB_CTL)); 641 } 642 643 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 644 645 return 0; 646 } 647 648 static int i915_rps_boost_info(struct seq_file *m, void *data) 649 { 650 struct drm_i915_private *dev_priv = node_to_i915(m->private); 651 struct intel_rps *rps = &dev_priv->gt.rps; 652 653 seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps))); 654 seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps))); 655 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake)); 656 seq_printf(m, "Boosts outstanding? %d\n", 657 atomic_read(&rps->num_waiters)); 658 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); 659 seq_printf(m, "Frequency requested %d, actual %d\n", 660 intel_gpu_freq(rps, rps->cur_freq), 661 intel_rps_read_actual_frequency(rps)); 662 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 663 intel_gpu_freq(rps, rps->min_freq), 664 intel_gpu_freq(rps, rps->min_freq_softlimit), 665 intel_gpu_freq(rps, rps->max_freq_softlimit), 666 intel_gpu_freq(rps, rps->max_freq)); 667 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", 668 intel_gpu_freq(rps, rps->idle_freq), 669 intel_gpu_freq(rps, rps->efficient_freq), 670 intel_gpu_freq(rps, rps->boost_freq)); 671 672 seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts)); 673 674 return 0; 675 } 676 677 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 678 { 679 struct drm_i915_private *dev_priv = node_to_i915(m->private); 680 struct pci_dev *pdev = dev_priv->drm.pdev; 681 682 if (!HAS_RUNTIME_PM(dev_priv)) 683 seq_puts(m, "Runtime power management not supported\n"); 684 685 seq_printf(m, "Runtime power status: %s\n", 686 enableddisabled(!dev_priv->power_domains.init_wakeref)); 687 688 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); 689 seq_printf(m, "IRQs disabled: %s\n", 690 yesno(!intel_irqs_enabled(dev_priv))); 691 #ifdef CONFIG_PM 692 seq_printf(m, "Usage count: %d\n", 693 atomic_read(&dev_priv->drm.dev->power.usage_count)); 694 #else 695 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 696 #endif 697 seq_printf(m, "PCI device power state: %s [%d]\n", 698 pci_power_name(pdev->current_state), 699 pdev->current_state); 700 701 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) { 702 struct drm_printer p = drm_seq_file_printer(m); 703 704 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p); 705 } 706 707 return 0; 708 } 709 710 static int i915_engine_info(struct seq_file *m, void *unused) 711 { 712 struct drm_i915_private *i915 = node_to_i915(m->private); 713 struct intel_engine_cs *engine; 714 intel_wakeref_t wakeref; 715 struct drm_printer p; 716 717 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 718 719 seq_printf(m, "GT awake? %s [%d], %llums\n", 720 yesno(i915->gt.awake), 721 atomic_read(&i915->gt.wakeref.count), 722 ktime_to_ms(intel_gt_get_awake_time(&i915->gt))); 723 seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n", 724 i915->gt.clock_frequency, 725 i915->gt.clock_period_ns); 726 727 p = drm_seq_file_printer(m); 728 for_each_uabi_engine(engine, i915) 729 intel_engine_dump(engine, &p, "%s\n", engine->name); 730 731 intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule); 732 733 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 734 735 return 0; 736 } 737 738 static int i915_wa_registers(struct seq_file *m, void *unused) 739 { 740 struct drm_i915_private *i915 = node_to_i915(m->private); 741 struct intel_engine_cs *engine; 742 743 for_each_uabi_engine(engine, i915) { 744 const struct i915_wa_list *wal = &engine->ctx_wa_list; 745 const struct i915_wa *wa; 746 unsigned int count; 747 748 count = wal->count; 749 if (!count) 750 continue; 751 752 seq_printf(m, "%s: Workarounds applied: %u\n", 753 engine->name, count); 754 755 for (wa = wal->list; count--; wa++) 756 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", 757 i915_mmio_reg_offset(wa->reg), 758 wa->set, wa->clr); 759 760 seq_printf(m, "\n"); 761 } 762 763 return 0; 764 } 765 766 static int 767 i915_wedged_get(void *data, u64 *val) 768 { 769 struct drm_i915_private *i915 = data; 770 int ret = intel_gt_terminally_wedged(&i915->gt); 771 772 switch (ret) { 773 case -EIO: 774 *val = 1; 775 return 0; 776 case 0: 777 *val = 0; 778 return 0; 779 default: 780 return ret; 781 } 782 } 783 784 static int 785 i915_wedged_set(void *data, u64 val) 786 { 787 struct drm_i915_private *i915 = data; 788 789 /* Flush any previous reset before applying for a new one */ 790 wait_event(i915->gt.reset.queue, 791 !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags)); 792 793 intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE, 794 "Manually set wedged engine mask = %llx", val); 795 return 0; 796 } 797 798 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 799 i915_wedged_get, i915_wedged_set, 800 "%llu\n"); 801 802 static int 803 i915_perf_noa_delay_set(void *data, u64 val) 804 { 805 struct drm_i915_private *i915 = data; 806 807 /* 808 * This would lead to infinite waits as we're doing timestamp 809 * difference on the CS with only 32bits. 810 */ 811 if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX) 812 return -EINVAL; 813 814 atomic64_set(&i915->perf.noa_programming_delay, val); 815 return 0; 816 } 817 818 static int 819 i915_perf_noa_delay_get(void *data, u64 *val) 820 { 821 struct drm_i915_private *i915 = data; 822 823 *val = atomic64_read(&i915->perf.noa_programming_delay); 824 return 0; 825 } 826 827 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops, 828 i915_perf_noa_delay_get, 829 i915_perf_noa_delay_set, 830 "%llu\n"); 831 832 #define DROP_UNBOUND BIT(0) 833 #define DROP_BOUND BIT(1) 834 #define DROP_RETIRE BIT(2) 835 #define DROP_ACTIVE BIT(3) 836 #define DROP_FREED BIT(4) 837 #define DROP_SHRINK_ALL BIT(5) 838 #define DROP_IDLE BIT(6) 839 #define DROP_RESET_ACTIVE BIT(7) 840 #define DROP_RESET_SEQNO BIT(8) 841 #define DROP_RCU BIT(9) 842 #define DROP_ALL (DROP_UNBOUND | \ 843 DROP_BOUND | \ 844 DROP_RETIRE | \ 845 DROP_ACTIVE | \ 846 DROP_FREED | \ 847 DROP_SHRINK_ALL |\ 848 DROP_IDLE | \ 849 DROP_RESET_ACTIVE | \ 850 DROP_RESET_SEQNO | \ 851 DROP_RCU) 852 static int 853 i915_drop_caches_get(void *data, u64 *val) 854 { 855 *val = DROP_ALL; 856 857 return 0; 858 } 859 static int 860 gt_drop_caches(struct intel_gt *gt, u64 val) 861 { 862 int ret; 863 864 if (val & DROP_RESET_ACTIVE && 865 wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) 866 intel_gt_set_wedged(gt); 867 868 if (val & DROP_RETIRE) 869 intel_gt_retire_requests(gt); 870 871 if (val & (DROP_IDLE | DROP_ACTIVE)) { 872 ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 873 if (ret) 874 return ret; 875 } 876 877 if (val & DROP_IDLE) { 878 ret = intel_gt_pm_wait_for_idle(gt); 879 if (ret) 880 return ret; 881 } 882 883 if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt)) 884 intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL); 885 886 if (val & DROP_FREED) 887 intel_gt_flush_buffer_pool(gt); 888 889 return 0; 890 } 891 892 static int 893 i915_drop_caches_set(void *data, u64 val) 894 { 895 struct drm_i915_private *i915 = data; 896 int ret; 897 898 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", 899 val, val & DROP_ALL); 900 901 ret = gt_drop_caches(&i915->gt, val); 902 if (ret) 903 return ret; 904 905 fs_reclaim_acquire(GFP_KERNEL); 906 if (val & DROP_BOUND) 907 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND); 908 909 if (val & DROP_UNBOUND) 910 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND); 911 912 if (val & DROP_SHRINK_ALL) 913 i915_gem_shrink_all(i915); 914 fs_reclaim_release(GFP_KERNEL); 915 916 if (val & DROP_RCU) 917 rcu_barrier(); 918 919 if (val & DROP_FREED) 920 i915_gem_drain_freed_objects(i915); 921 922 return 0; 923 } 924 925 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 926 i915_drop_caches_get, i915_drop_caches_set, 927 "0x%08llx\n"); 928 929 static int i915_sseu_status(struct seq_file *m, void *unused) 930 { 931 struct drm_i915_private *i915 = node_to_i915(m->private); 932 struct intel_gt *gt = &i915->gt; 933 934 return intel_sseu_status(m, gt); 935 } 936 937 static int i915_forcewake_open(struct inode *inode, struct file *file) 938 { 939 struct drm_i915_private *i915 = inode->i_private; 940 struct intel_gt *gt = &i915->gt; 941 942 atomic_inc(>->user_wakeref); 943 intel_gt_pm_get(gt); 944 if (INTEL_GEN(i915) >= 6) 945 intel_uncore_forcewake_user_get(gt->uncore); 946 947 return 0; 948 } 949 950 static int i915_forcewake_release(struct inode *inode, struct file *file) 951 { 952 struct drm_i915_private *i915 = inode->i_private; 953 struct intel_gt *gt = &i915->gt; 954 955 if (INTEL_GEN(i915) >= 6) 956 intel_uncore_forcewake_user_put(&i915->uncore); 957 intel_gt_pm_put(gt); 958 atomic_dec(>->user_wakeref); 959 960 return 0; 961 } 962 963 static const struct file_operations i915_forcewake_fops = { 964 .owner = THIS_MODULE, 965 .open = i915_forcewake_open, 966 .release = i915_forcewake_release, 967 }; 968 969 static const struct drm_info_list i915_debugfs_list[] = { 970 {"i915_capabilities", i915_capabilities, 0}, 971 {"i915_gem_objects", i915_gem_object_info, 0}, 972 {"i915_frequency_info", i915_frequency_info, 0}, 973 {"i915_swizzle_info", i915_swizzle_info, 0}, 974 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 975 {"i915_engine_info", i915_engine_info, 0}, 976 {"i915_wa_registers", i915_wa_registers, 0}, 977 {"i915_sseu_status", i915_sseu_status, 0}, 978 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 979 }; 980 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 981 982 static const struct i915_debugfs_files { 983 const char *name; 984 const struct file_operations *fops; 985 } i915_debugfs_files[] = { 986 {"i915_perf_noa_delay", &i915_perf_noa_delay_fops}, 987 {"i915_wedged", &i915_wedged_fops}, 988 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 989 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 990 {"i915_error_state", &i915_error_state_fops}, 991 {"i915_gpu_info", &i915_gpu_info_fops}, 992 #endif 993 }; 994 995 void i915_debugfs_register(struct drm_i915_private *dev_priv) 996 { 997 struct drm_minor *minor = dev_priv->drm.primary; 998 int i; 999 1000 i915_debugfs_params(dev_priv); 1001 1002 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root, 1003 to_i915(minor->dev), &i915_forcewake_fops); 1004 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 1005 debugfs_create_file(i915_debugfs_files[i].name, 1006 S_IRUGO | S_IWUSR, 1007 minor->debugfs_root, 1008 to_i915(minor->dev), 1009 i915_debugfs_files[i].fops); 1010 } 1011 1012 drm_debugfs_create_files(i915_debugfs_list, 1013 I915_DEBUGFS_ENTRIES, 1014 minor->debugfs_root, minor); 1015 } 1016