1 /* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * 26 */ 27 28 #include <linux/device.h> 29 #include <linux/module.h> 30 #include <linux/stat.h> 31 #include <linux/sysfs.h> 32 #include "intel_drv.h" 33 #include "i915_drv.h" 34 35 #define dev_to_drm_minor(d) dev_get_drvdata((d)) 36 37 #ifdef CONFIG_PM 38 static u32 calc_residency(struct drm_device *dev, const u32 reg) 39 { 40 struct drm_i915_private *dev_priv = dev->dev_private; 41 u64 raw_time; /* 32b value may overflow during fixed point math */ 42 u64 units = 128ULL, div = 100000ULL, bias = 100ULL; 43 u32 ret; 44 45 if (!intel_enable_rc6(dev)) 46 return 0; 47 48 intel_runtime_pm_get(dev_priv); 49 50 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ 51 if (IS_VALLEYVIEW(dev)) { 52 u32 reg, czcount_30ns; 53 54 if (IS_CHERRYVIEW(dev)) 55 reg = CHV_CLK_CTL1; 56 else 57 reg = VLV_CLK_CTL2; 58 59 czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT; 60 61 if (!czcount_30ns) { 62 WARN(!czcount_30ns, "bogus CZ count value"); 63 ret = 0; 64 goto out; 65 } 66 67 units = 0; 68 div = 1000000ULL; 69 70 if (IS_CHERRYVIEW(dev)) { 71 /* Special case for 320Mhz */ 72 if (czcount_30ns == 1) { 73 div = 10000000ULL; 74 units = 3125ULL; 75 } else { 76 /* chv counts are one less */ 77 czcount_30ns += 1; 78 } 79 } 80 81 if (units == 0) 82 units = DIV_ROUND_UP_ULL(30ULL * bias, 83 (u64)czcount_30ns); 84 85 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 86 units <<= 8; 87 88 div = div * bias; 89 } 90 91 raw_time = I915_READ(reg) * units; 92 ret = DIV_ROUND_UP_ULL(raw_time, div); 93 94 out: 95 intel_runtime_pm_put(dev_priv); 96 return ret; 97 } 98 99 static ssize_t 100 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 101 { 102 struct drm_minor *dminor = dev_to_drm_minor(kdev); 103 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); 104 } 105 106 static ssize_t 107 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) 108 { 109 struct drm_minor *dminor = dev_get_drvdata(kdev); 110 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); 111 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); 112 } 113 114 static ssize_t 115 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) 116 { 117 struct drm_minor *dminor = dev_to_drm_minor(kdev); 118 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); 119 if (IS_VALLEYVIEW(dminor->dev)) 120 rc6p_residency = 0; 121 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); 122 } 123 124 static ssize_t 125 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) 126 { 127 struct drm_minor *dminor = dev_to_drm_minor(kdev); 128 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); 129 if (IS_VALLEYVIEW(dminor->dev)) 130 rc6pp_residency = 0; 131 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); 132 } 133 134 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); 135 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); 136 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); 137 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); 138 139 static struct attribute *rc6_attrs[] = { 140 &dev_attr_rc6_enable.attr, 141 &dev_attr_rc6_residency_ms.attr, 142 &dev_attr_rc6p_residency_ms.attr, 143 &dev_attr_rc6pp_residency_ms.attr, 144 NULL 145 }; 146 147 static struct attribute_group rc6_attr_group = { 148 .name = power_group_name, 149 .attrs = rc6_attrs 150 }; 151 #endif 152 153 static int l3_access_valid(struct drm_device *dev, loff_t offset) 154 { 155 if (!HAS_L3_DPF(dev)) 156 return -EPERM; 157 158 if (offset % 4 != 0) 159 return -EINVAL; 160 161 if (offset >= GEN7_L3LOG_SIZE) 162 return -ENXIO; 163 164 return 0; 165 } 166 167 static ssize_t 168 i915_l3_read(struct file *filp, struct kobject *kobj, 169 struct bin_attribute *attr, char *buf, 170 loff_t offset, size_t count) 171 { 172 struct device *dev = container_of(kobj, struct device, kobj); 173 struct drm_minor *dminor = dev_to_drm_minor(dev); 174 struct drm_device *drm_dev = dminor->dev; 175 struct drm_i915_private *dev_priv = drm_dev->dev_private; 176 int slice = (int)(uintptr_t)attr->private; 177 int ret; 178 179 count = round_down(count, 4); 180 181 ret = l3_access_valid(drm_dev, offset); 182 if (ret) 183 return ret; 184 185 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); 186 187 ret = i915_mutex_lock_interruptible(drm_dev); 188 if (ret) 189 return ret; 190 191 if (dev_priv->l3_parity.remap_info[slice]) 192 memcpy(buf, 193 dev_priv->l3_parity.remap_info[slice] + (offset/4), 194 count); 195 else 196 memset(buf, 0, count); 197 198 mutex_unlock(&drm_dev->struct_mutex); 199 200 return count; 201 } 202 203 static ssize_t 204 i915_l3_write(struct file *filp, struct kobject *kobj, 205 struct bin_attribute *attr, char *buf, 206 loff_t offset, size_t count) 207 { 208 struct device *dev = container_of(kobj, struct device, kobj); 209 struct drm_minor *dminor = dev_to_drm_minor(dev); 210 struct drm_device *drm_dev = dminor->dev; 211 struct drm_i915_private *dev_priv = drm_dev->dev_private; 212 struct intel_context *ctx; 213 u32 *temp = NULL; /* Just here to make handling failures easy */ 214 int slice = (int)(uintptr_t)attr->private; 215 int ret; 216 217 if (!HAS_HW_CONTEXTS(drm_dev)) 218 return -ENXIO; 219 220 ret = l3_access_valid(drm_dev, offset); 221 if (ret) 222 return ret; 223 224 ret = i915_mutex_lock_interruptible(drm_dev); 225 if (ret) 226 return ret; 227 228 if (!dev_priv->l3_parity.remap_info[slice]) { 229 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); 230 if (!temp) { 231 mutex_unlock(&drm_dev->struct_mutex); 232 return -ENOMEM; 233 } 234 } 235 236 ret = i915_gpu_idle(drm_dev); 237 if (ret) { 238 kfree(temp); 239 mutex_unlock(&drm_dev->struct_mutex); 240 return ret; 241 } 242 243 /* TODO: Ideally we really want a GPU reset here to make sure errors 244 * aren't propagated. Since I cannot find a stable way to reset the GPU 245 * at this point it is left as a TODO. 246 */ 247 if (temp) 248 dev_priv->l3_parity.remap_info[slice] = temp; 249 250 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count); 251 252 /* NB: We defer the remapping until we switch to the context */ 253 list_for_each_entry(ctx, &dev_priv->context_list, link) 254 ctx->remap_slice |= (1<<slice); 255 256 mutex_unlock(&drm_dev->struct_mutex); 257 258 return count; 259 } 260 261 static struct bin_attribute dpf_attrs = { 262 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, 263 .size = GEN7_L3LOG_SIZE, 264 .read = i915_l3_read, 265 .write = i915_l3_write, 266 .mmap = NULL, 267 .private = (void *)0 268 }; 269 270 static struct bin_attribute dpf_attrs_1 = { 271 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)}, 272 .size = GEN7_L3LOG_SIZE, 273 .read = i915_l3_read, 274 .write = i915_l3_write, 275 .mmap = NULL, 276 .private = (void *)1 277 }; 278 279 static ssize_t gt_cur_freq_mhz_show(struct device *kdev, 280 struct device_attribute *attr, char *buf) 281 { 282 struct drm_minor *minor = dev_to_drm_minor(kdev); 283 struct drm_device *dev = minor->dev; 284 struct drm_i915_private *dev_priv = dev->dev_private; 285 int ret; 286 287 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 288 289 intel_runtime_pm_get(dev_priv); 290 291 mutex_lock(&dev_priv->rps.hw_lock); 292 if (IS_VALLEYVIEW(dev_priv->dev)) { 293 u32 freq; 294 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 295 ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); 296 } else { 297 ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER; 298 } 299 mutex_unlock(&dev_priv->rps.hw_lock); 300 301 intel_runtime_pm_put(dev_priv); 302 303 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 304 } 305 306 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, 307 struct device_attribute *attr, char *buf) 308 { 309 struct drm_minor *minor = dev_to_drm_minor(kdev); 310 struct drm_device *dev = minor->dev; 311 struct drm_i915_private *dev_priv = dev->dev_private; 312 313 return snprintf(buf, PAGE_SIZE, "%d\n", 314 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 315 } 316 317 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 318 { 319 struct drm_minor *minor = dev_to_drm_minor(kdev); 320 struct drm_device *dev = minor->dev; 321 struct drm_i915_private *dev_priv = dev->dev_private; 322 int ret; 323 324 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 325 326 mutex_lock(&dev_priv->rps.hw_lock); 327 if (IS_VALLEYVIEW(dev_priv->dev)) 328 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 329 else 330 ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 331 mutex_unlock(&dev_priv->rps.hw_lock); 332 333 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 334 } 335 336 static ssize_t gt_max_freq_mhz_store(struct device *kdev, 337 struct device_attribute *attr, 338 const char *buf, size_t count) 339 { 340 struct drm_minor *minor = dev_to_drm_minor(kdev); 341 struct drm_device *dev = minor->dev; 342 struct drm_i915_private *dev_priv = dev->dev_private; 343 u32 val; 344 ssize_t ret; 345 346 ret = kstrtou32(buf, 0, &val); 347 if (ret) 348 return ret; 349 350 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 351 352 mutex_lock(&dev_priv->rps.hw_lock); 353 354 if (IS_VALLEYVIEW(dev_priv->dev)) 355 val = vlv_freq_opcode(dev_priv, val); 356 else 357 val /= GT_FREQUENCY_MULTIPLIER; 358 359 if (val < dev_priv->rps.min_freq || 360 val > dev_priv->rps.max_freq || 361 val < dev_priv->rps.min_freq_softlimit) { 362 mutex_unlock(&dev_priv->rps.hw_lock); 363 return -EINVAL; 364 } 365 366 if (val > dev_priv->rps.rp0_freq) 367 DRM_DEBUG("User requested overclocking to %d\n", 368 val * GT_FREQUENCY_MULTIPLIER); 369 370 dev_priv->rps.max_freq_softlimit = val; 371 372 if (dev_priv->rps.cur_freq > val) { 373 if (IS_VALLEYVIEW(dev)) 374 valleyview_set_rps(dev, val); 375 else 376 gen6_set_rps(dev, val); 377 } else if (!IS_VALLEYVIEW(dev)) { 378 /* We still need gen6_set_rps to process the new max_delay and 379 * update the interrupt limits even though frequency request is 380 * unchanged. */ 381 gen6_set_rps(dev, dev_priv->rps.cur_freq); 382 } 383 384 mutex_unlock(&dev_priv->rps.hw_lock); 385 386 return count; 387 } 388 389 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 390 { 391 struct drm_minor *minor = dev_to_drm_minor(kdev); 392 struct drm_device *dev = minor->dev; 393 struct drm_i915_private *dev_priv = dev->dev_private; 394 int ret; 395 396 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 397 398 mutex_lock(&dev_priv->rps.hw_lock); 399 if (IS_VALLEYVIEW(dev_priv->dev)) 400 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 401 else 402 ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 403 mutex_unlock(&dev_priv->rps.hw_lock); 404 405 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 406 } 407 408 static ssize_t gt_min_freq_mhz_store(struct device *kdev, 409 struct device_attribute *attr, 410 const char *buf, size_t count) 411 { 412 struct drm_minor *minor = dev_to_drm_minor(kdev); 413 struct drm_device *dev = minor->dev; 414 struct drm_i915_private *dev_priv = dev->dev_private; 415 u32 val; 416 ssize_t ret; 417 418 ret = kstrtou32(buf, 0, &val); 419 if (ret) 420 return ret; 421 422 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 423 424 mutex_lock(&dev_priv->rps.hw_lock); 425 426 if (IS_VALLEYVIEW(dev)) 427 val = vlv_freq_opcode(dev_priv, val); 428 else 429 val /= GT_FREQUENCY_MULTIPLIER; 430 431 if (val < dev_priv->rps.min_freq || 432 val > dev_priv->rps.max_freq || 433 val > dev_priv->rps.max_freq_softlimit) { 434 mutex_unlock(&dev_priv->rps.hw_lock); 435 return -EINVAL; 436 } 437 438 dev_priv->rps.min_freq_softlimit = val; 439 440 if (dev_priv->rps.cur_freq < val) { 441 if (IS_VALLEYVIEW(dev)) 442 valleyview_set_rps(dev, val); 443 else 444 gen6_set_rps(dev, val); 445 } else if (!IS_VALLEYVIEW(dev)) { 446 /* We still need gen6_set_rps to process the new min_delay and 447 * update the interrupt limits even though frequency request is 448 * unchanged. */ 449 gen6_set_rps(dev, dev_priv->rps.cur_freq); 450 } 451 452 mutex_unlock(&dev_priv->rps.hw_lock); 453 454 return count; 455 456 } 457 458 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); 459 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); 460 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); 461 462 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL); 463 464 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); 465 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); 466 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); 467 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); 468 469 /* For now we have a static number of RP states */ 470 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 471 { 472 struct drm_minor *minor = dev_to_drm_minor(kdev); 473 struct drm_device *dev = minor->dev; 474 struct drm_i915_private *dev_priv = dev->dev_private; 475 u32 val, rp_state_cap; 476 ssize_t ret; 477 478 ret = mutex_lock_interruptible(&dev->struct_mutex); 479 if (ret) 480 return ret; 481 intel_runtime_pm_get(dev_priv); 482 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 483 intel_runtime_pm_put(dev_priv); 484 mutex_unlock(&dev->struct_mutex); 485 486 if (attr == &dev_attr_gt_RP0_freq_mhz) { 487 if (IS_VALLEYVIEW(dev)) 488 val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq); 489 else 490 val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER; 491 } else if (attr == &dev_attr_gt_RP1_freq_mhz) { 492 if (IS_VALLEYVIEW(dev)) 493 val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq); 494 else 495 val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER; 496 } else if (attr == &dev_attr_gt_RPn_freq_mhz) { 497 if (IS_VALLEYVIEW(dev)) 498 val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq); 499 else 500 val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER; 501 } else { 502 BUG(); 503 } 504 return snprintf(buf, PAGE_SIZE, "%d\n", val); 505 } 506 507 static const struct attribute *gen6_attrs[] = { 508 &dev_attr_gt_cur_freq_mhz.attr, 509 &dev_attr_gt_max_freq_mhz.attr, 510 &dev_attr_gt_min_freq_mhz.attr, 511 &dev_attr_gt_RP0_freq_mhz.attr, 512 &dev_attr_gt_RP1_freq_mhz.attr, 513 &dev_attr_gt_RPn_freq_mhz.attr, 514 NULL, 515 }; 516 517 static const struct attribute *vlv_attrs[] = { 518 &dev_attr_gt_cur_freq_mhz.attr, 519 &dev_attr_gt_max_freq_mhz.attr, 520 &dev_attr_gt_min_freq_mhz.attr, 521 &dev_attr_gt_RP0_freq_mhz.attr, 522 &dev_attr_gt_RP1_freq_mhz.attr, 523 &dev_attr_gt_RPn_freq_mhz.attr, 524 &dev_attr_vlv_rpe_freq_mhz.attr, 525 NULL, 526 }; 527 528 static ssize_t error_state_read(struct file *filp, struct kobject *kobj, 529 struct bin_attribute *attr, char *buf, 530 loff_t off, size_t count) 531 { 532 533 struct device *kdev = container_of(kobj, struct device, kobj); 534 struct drm_minor *minor = dev_to_drm_minor(kdev); 535 struct drm_device *dev = minor->dev; 536 struct i915_error_state_file_priv error_priv; 537 struct drm_i915_error_state_buf error_str; 538 ssize_t ret_count = 0; 539 int ret; 540 541 memset(&error_priv, 0, sizeof(error_priv)); 542 543 ret = i915_error_state_buf_init(&error_str, count, off); 544 if (ret) 545 return ret; 546 547 error_priv.dev = dev; 548 i915_error_state_get(dev, &error_priv); 549 550 ret = i915_error_state_to_str(&error_str, &error_priv); 551 if (ret) 552 goto out; 553 554 ret_count = count < error_str.bytes ? count : error_str.bytes; 555 556 memcpy(buf, error_str.buf, ret_count); 557 out: 558 i915_error_state_put(&error_priv); 559 i915_error_state_buf_release(&error_str); 560 561 return ret ?: ret_count; 562 } 563 564 static ssize_t error_state_write(struct file *file, struct kobject *kobj, 565 struct bin_attribute *attr, char *buf, 566 loff_t off, size_t count) 567 { 568 struct device *kdev = container_of(kobj, struct device, kobj); 569 struct drm_minor *minor = dev_to_drm_minor(kdev); 570 struct drm_device *dev = minor->dev; 571 int ret; 572 573 DRM_DEBUG_DRIVER("Resetting error state\n"); 574 575 ret = mutex_lock_interruptible(&dev->struct_mutex); 576 if (ret) 577 return ret; 578 579 i915_destroy_error_state(dev); 580 mutex_unlock(&dev->struct_mutex); 581 582 return count; 583 } 584 585 static struct bin_attribute error_state_attr = { 586 .attr.name = "error", 587 .attr.mode = S_IRUSR | S_IWUSR, 588 .size = 0, 589 .read = error_state_read, 590 .write = error_state_write, 591 }; 592 593 void i915_setup_sysfs(struct drm_device *dev) 594 { 595 int ret; 596 597 #ifdef CONFIG_PM 598 if (INTEL_INFO(dev)->gen >= 6) { 599 ret = sysfs_merge_group(&dev->primary->kdev->kobj, 600 &rc6_attr_group); 601 if (ret) 602 DRM_ERROR("RC6 residency sysfs setup failed\n"); 603 } 604 #endif 605 if (HAS_L3_DPF(dev)) { 606 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs); 607 if (ret) 608 DRM_ERROR("l3 parity sysfs setup failed\n"); 609 610 if (NUM_L3_SLICES(dev) > 1) { 611 ret = device_create_bin_file(dev->primary->kdev, 612 &dpf_attrs_1); 613 if (ret) 614 DRM_ERROR("l3 parity slice 1 setup failed\n"); 615 } 616 } 617 618 ret = 0; 619 if (IS_VALLEYVIEW(dev)) 620 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs); 621 else if (INTEL_INFO(dev)->gen >= 6) 622 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs); 623 if (ret) 624 DRM_ERROR("RPS sysfs setup failed\n"); 625 626 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj, 627 &error_state_attr); 628 if (ret) 629 DRM_ERROR("error_state sysfs setup failed\n"); 630 } 631 632 void i915_teardown_sysfs(struct drm_device *dev) 633 { 634 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr); 635 if (IS_VALLEYVIEW(dev)) 636 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs); 637 else 638 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs); 639 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1); 640 device_remove_bin_file(dev->primary->kdev, &dpf_attrs); 641 #ifdef CONFIG_PM 642 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group); 643 #endif 644 } 645