xref: /openbmc/linux/drivers/gpu/drm/i915/i915_sysfs.c (revision f677b30b487ca3763c3de3f1b4d8c976c2961cd1)
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *
26  */
27 
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/stat.h>
31 #include <linux/sysfs.h>
32 #include "intel_drv.h"
33 #include "i915_drv.h"
34 
35 #define dev_to_drm_minor(d) dev_get_drvdata((d))
36 
37 #ifdef CONFIG_PM
38 static u32 calc_residency(struct drm_device *dev, const u32 reg)
39 {
40 	struct drm_i915_private *dev_priv = dev->dev_private;
41 	u64 raw_time; /* 32b value may overflow during fixed point math */
42 	u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
43 
44 	if (!intel_enable_rc6(dev))
45 		return 0;
46 
47 	/* On VLV, residency time is in CZ units rather than 1.28us */
48 	if (IS_VALLEYVIEW(dev)) {
49 		u32 clkctl2;
50 
51 		clkctl2 = I915_READ(VLV_CLK_CTL2) >>
52 			CLK_CTL2_CZCOUNT_30NS_SHIFT;
53 		if (!clkctl2) {
54 			WARN(!clkctl2, "bogus CZ count value");
55 			return 0;
56 		}
57 		units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
58 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
59 			units <<= 8;
60 
61 		div = 1000000ULL * bias;
62 	}
63 
64 	raw_time = I915_READ(reg) * units;
65 	return DIV_ROUND_UP_ULL(raw_time, div);
66 }
67 
68 static ssize_t
69 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
70 {
71 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
72 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
73 }
74 
75 static ssize_t
76 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
77 {
78 	struct drm_minor *dminor = dev_get_drvdata(kdev);
79 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
80 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
81 }
82 
83 static ssize_t
84 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
85 {
86 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
87 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
88 	if (IS_VALLEYVIEW(dminor->dev))
89 		rc6p_residency = 0;
90 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
91 }
92 
93 static ssize_t
94 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
95 {
96 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
97 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
98 	if (IS_VALLEYVIEW(dminor->dev))
99 		rc6pp_residency = 0;
100 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
101 }
102 
103 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
104 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
105 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
106 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
107 
108 static struct attribute *rc6_attrs[] = {
109 	&dev_attr_rc6_enable.attr,
110 	&dev_attr_rc6_residency_ms.attr,
111 	&dev_attr_rc6p_residency_ms.attr,
112 	&dev_attr_rc6pp_residency_ms.attr,
113 	NULL
114 };
115 
116 static struct attribute_group rc6_attr_group = {
117 	.name = power_group_name,
118 	.attrs =  rc6_attrs
119 };
120 #endif
121 
122 static int l3_access_valid(struct drm_device *dev, loff_t offset)
123 {
124 	if (!HAS_L3_DPF(dev))
125 		return -EPERM;
126 
127 	if (offset % 4 != 0)
128 		return -EINVAL;
129 
130 	if (offset >= GEN7_L3LOG_SIZE)
131 		return -ENXIO;
132 
133 	return 0;
134 }
135 
136 static ssize_t
137 i915_l3_read(struct file *filp, struct kobject *kobj,
138 	     struct bin_attribute *attr, char *buf,
139 	     loff_t offset, size_t count)
140 {
141 	struct device *dev = container_of(kobj, struct device, kobj);
142 	struct drm_minor *dminor = dev_to_drm_minor(dev);
143 	struct drm_device *drm_dev = dminor->dev;
144 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
145 	int slice = (int)(uintptr_t)attr->private;
146 	int ret;
147 
148 	count = round_down(count, 4);
149 
150 	ret = l3_access_valid(drm_dev, offset);
151 	if (ret)
152 		return ret;
153 
154 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
155 
156 	ret = i915_mutex_lock_interruptible(drm_dev);
157 	if (ret)
158 		return ret;
159 
160 	if (dev_priv->l3_parity.remap_info[slice])
161 		memcpy(buf,
162 		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
163 		       count);
164 	else
165 		memset(buf, 0, count);
166 
167 	mutex_unlock(&drm_dev->struct_mutex);
168 
169 	return count;
170 }
171 
172 static ssize_t
173 i915_l3_write(struct file *filp, struct kobject *kobj,
174 	      struct bin_attribute *attr, char *buf,
175 	      loff_t offset, size_t count)
176 {
177 	struct device *dev = container_of(kobj, struct device, kobj);
178 	struct drm_minor *dminor = dev_to_drm_minor(dev);
179 	struct drm_device *drm_dev = dminor->dev;
180 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
181 	struct i915_hw_context *ctx;
182 	u32 *temp = NULL; /* Just here to make handling failures easy */
183 	int slice = (int)(uintptr_t)attr->private;
184 	int ret;
185 
186 	ret = l3_access_valid(drm_dev, offset);
187 	if (ret)
188 		return ret;
189 
190 	if (dev_priv->hw_contexts_disabled)
191 		return -ENXIO;
192 
193 	ret = i915_mutex_lock_interruptible(drm_dev);
194 	if (ret)
195 		return ret;
196 
197 	if (!dev_priv->l3_parity.remap_info[slice]) {
198 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
199 		if (!temp) {
200 			mutex_unlock(&drm_dev->struct_mutex);
201 			return -ENOMEM;
202 		}
203 	}
204 
205 	ret = i915_gpu_idle(drm_dev);
206 	if (ret) {
207 		kfree(temp);
208 		mutex_unlock(&drm_dev->struct_mutex);
209 		return ret;
210 	}
211 
212 	/* TODO: Ideally we really want a GPU reset here to make sure errors
213 	 * aren't propagated. Since I cannot find a stable way to reset the GPU
214 	 * at this point it is left as a TODO.
215 	*/
216 	if (temp)
217 		dev_priv->l3_parity.remap_info[slice] = temp;
218 
219 	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
220 
221 	/* NB: We defer the remapping until we switch to the context */
222 	list_for_each_entry(ctx, &dev_priv->context_list, link)
223 		ctx->remap_slice |= (1<<slice);
224 
225 	mutex_unlock(&drm_dev->struct_mutex);
226 
227 	return count;
228 }
229 
230 static struct bin_attribute dpf_attrs = {
231 	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
232 	.size = GEN7_L3LOG_SIZE,
233 	.read = i915_l3_read,
234 	.write = i915_l3_write,
235 	.mmap = NULL,
236 	.private = (void *)0
237 };
238 
239 static struct bin_attribute dpf_attrs_1 = {
240 	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
241 	.size = GEN7_L3LOG_SIZE,
242 	.read = i915_l3_read,
243 	.write = i915_l3_write,
244 	.mmap = NULL,
245 	.private = (void *)1
246 };
247 
248 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
249 				    struct device_attribute *attr, char *buf)
250 {
251 	struct drm_minor *minor = dev_to_drm_minor(kdev);
252 	struct drm_device *dev = minor->dev;
253 	struct drm_i915_private *dev_priv = dev->dev_private;
254 	int ret;
255 
256 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
257 
258 	mutex_lock(&dev_priv->rps.hw_lock);
259 	if (IS_VALLEYVIEW(dev_priv->dev)) {
260 		u32 freq;
261 		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
262 		ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
263 	} else {
264 		ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
265 	}
266 	mutex_unlock(&dev_priv->rps.hw_lock);
267 
268 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
269 }
270 
271 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
272 				     struct device_attribute *attr, char *buf)
273 {
274 	struct drm_minor *minor = dev_to_drm_minor(kdev);
275 	struct drm_device *dev = minor->dev;
276 	struct drm_i915_private *dev_priv = dev->dev_private;
277 
278 	return snprintf(buf, PAGE_SIZE, "%d\n",
279 			vlv_gpu_freq(dev_priv->mem_freq,
280 				     dev_priv->rps.rpe_delay));
281 }
282 
283 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
284 {
285 	struct drm_minor *minor = dev_to_drm_minor(kdev);
286 	struct drm_device *dev = minor->dev;
287 	struct drm_i915_private *dev_priv = dev->dev_private;
288 	int ret;
289 
290 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
291 
292 	mutex_lock(&dev_priv->rps.hw_lock);
293 	if (IS_VALLEYVIEW(dev_priv->dev))
294 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
295 	else
296 		ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
297 	mutex_unlock(&dev_priv->rps.hw_lock);
298 
299 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
300 }
301 
302 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
303 				     struct device_attribute *attr,
304 				     const char *buf, size_t count)
305 {
306 	struct drm_minor *minor = dev_to_drm_minor(kdev);
307 	struct drm_device *dev = minor->dev;
308 	struct drm_i915_private *dev_priv = dev->dev_private;
309 	u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
310 	ssize_t ret;
311 
312 	ret = kstrtou32(buf, 0, &val);
313 	if (ret)
314 		return ret;
315 
316 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
317 
318 	mutex_lock(&dev_priv->rps.hw_lock);
319 
320 	if (IS_VALLEYVIEW(dev_priv->dev)) {
321 		val = vlv_freq_opcode(dev_priv->mem_freq, val);
322 
323 		hw_max = valleyview_rps_max_freq(dev_priv);
324 		hw_min = valleyview_rps_min_freq(dev_priv);
325 		non_oc_max = hw_max;
326 	} else {
327 		val /= GT_FREQUENCY_MULTIPLIER;
328 
329 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
330 		hw_max = dev_priv->rps.hw_max;
331 		non_oc_max = (rp_state_cap & 0xff);
332 		hw_min = ((rp_state_cap & 0xff0000) >> 16);
333 	}
334 
335 	if (val < hw_min || val > hw_max ||
336 	    val < dev_priv->rps.min_delay) {
337 		mutex_unlock(&dev_priv->rps.hw_lock);
338 		return -EINVAL;
339 	}
340 
341 	if (val > non_oc_max)
342 		DRM_DEBUG("User requested overclocking to %d\n",
343 			  val * GT_FREQUENCY_MULTIPLIER);
344 
345 	if (dev_priv->rps.cur_delay > val) {
346 		if (IS_VALLEYVIEW(dev_priv->dev))
347 			valleyview_set_rps(dev_priv->dev, val);
348 		else
349 			gen6_set_rps(dev_priv->dev, val);
350 	}
351 
352 	dev_priv->rps.max_delay = val;
353 
354 	mutex_unlock(&dev_priv->rps.hw_lock);
355 
356 	return count;
357 }
358 
359 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
360 {
361 	struct drm_minor *minor = dev_to_drm_minor(kdev);
362 	struct drm_device *dev = minor->dev;
363 	struct drm_i915_private *dev_priv = dev->dev_private;
364 	int ret;
365 
366 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
367 
368 	mutex_lock(&dev_priv->rps.hw_lock);
369 	if (IS_VALLEYVIEW(dev_priv->dev))
370 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
371 	else
372 		ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
373 	mutex_unlock(&dev_priv->rps.hw_lock);
374 
375 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
376 }
377 
378 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
379 				     struct device_attribute *attr,
380 				     const char *buf, size_t count)
381 {
382 	struct drm_minor *minor = dev_to_drm_minor(kdev);
383 	struct drm_device *dev = minor->dev;
384 	struct drm_i915_private *dev_priv = dev->dev_private;
385 	u32 val, rp_state_cap, hw_max, hw_min;
386 	ssize_t ret;
387 
388 	ret = kstrtou32(buf, 0, &val);
389 	if (ret)
390 		return ret;
391 
392 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
393 
394 	mutex_lock(&dev_priv->rps.hw_lock);
395 
396 	if (IS_VALLEYVIEW(dev)) {
397 		val = vlv_freq_opcode(dev_priv->mem_freq, val);
398 
399 		hw_max = valleyview_rps_max_freq(dev_priv);
400 		hw_min = valleyview_rps_min_freq(dev_priv);
401 	} else {
402 		val /= GT_FREQUENCY_MULTIPLIER;
403 
404 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
405 		hw_max = dev_priv->rps.hw_max;
406 		hw_min = ((rp_state_cap & 0xff0000) >> 16);
407 	}
408 
409 	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
410 		mutex_unlock(&dev_priv->rps.hw_lock);
411 		return -EINVAL;
412 	}
413 
414 	if (dev_priv->rps.cur_delay < val) {
415 		if (IS_VALLEYVIEW(dev))
416 			valleyview_set_rps(dev, val);
417 		else
418 			gen6_set_rps(dev_priv->dev, val);
419 	}
420 
421 	dev_priv->rps.min_delay = val;
422 
423 	mutex_unlock(&dev_priv->rps.hw_lock);
424 
425 	return count;
426 
427 }
428 
429 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
430 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
431 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
432 
433 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
434 
435 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
436 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
437 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
438 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
439 
440 /* For now we have a static number of RP states */
441 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
442 {
443 	struct drm_minor *minor = dev_to_drm_minor(kdev);
444 	struct drm_device *dev = minor->dev;
445 	struct drm_i915_private *dev_priv = dev->dev_private;
446 	u32 val, rp_state_cap;
447 	ssize_t ret;
448 
449 	ret = mutex_lock_interruptible(&dev->struct_mutex);
450 	if (ret)
451 		return ret;
452 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
453 	mutex_unlock(&dev->struct_mutex);
454 
455 	if (attr == &dev_attr_gt_RP0_freq_mhz) {
456 		val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
457 	} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
458 		val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
459 	} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
460 		val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
461 	} else {
462 		BUG();
463 	}
464 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
465 }
466 
467 static const struct attribute *gen6_attrs[] = {
468 	&dev_attr_gt_cur_freq_mhz.attr,
469 	&dev_attr_gt_max_freq_mhz.attr,
470 	&dev_attr_gt_min_freq_mhz.attr,
471 	&dev_attr_gt_RP0_freq_mhz.attr,
472 	&dev_attr_gt_RP1_freq_mhz.attr,
473 	&dev_attr_gt_RPn_freq_mhz.attr,
474 	NULL,
475 };
476 
477 static const struct attribute *vlv_attrs[] = {
478 	&dev_attr_gt_cur_freq_mhz.attr,
479 	&dev_attr_gt_max_freq_mhz.attr,
480 	&dev_attr_gt_min_freq_mhz.attr,
481 	&dev_attr_vlv_rpe_freq_mhz.attr,
482 	NULL,
483 };
484 
485 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
486 				struct bin_attribute *attr, char *buf,
487 				loff_t off, size_t count)
488 {
489 
490 	struct device *kdev = container_of(kobj, struct device, kobj);
491 	struct drm_minor *minor = dev_to_drm_minor(kdev);
492 	struct drm_device *dev = minor->dev;
493 	struct i915_error_state_file_priv error_priv;
494 	struct drm_i915_error_state_buf error_str;
495 	ssize_t ret_count = 0;
496 	int ret;
497 
498 	memset(&error_priv, 0, sizeof(error_priv));
499 
500 	ret = i915_error_state_buf_init(&error_str, count, off);
501 	if (ret)
502 		return ret;
503 
504 	error_priv.dev = dev;
505 	i915_error_state_get(dev, &error_priv);
506 
507 	ret = i915_error_state_to_str(&error_str, &error_priv);
508 	if (ret)
509 		goto out;
510 
511 	ret_count = count < error_str.bytes ? count : error_str.bytes;
512 
513 	memcpy(buf, error_str.buf, ret_count);
514 out:
515 	i915_error_state_put(&error_priv);
516 	i915_error_state_buf_release(&error_str);
517 
518 	return ret ?: ret_count;
519 }
520 
521 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
522 				 struct bin_attribute *attr, char *buf,
523 				 loff_t off, size_t count)
524 {
525 	struct device *kdev = container_of(kobj, struct device, kobj);
526 	struct drm_minor *minor = dev_to_drm_minor(kdev);
527 	struct drm_device *dev = minor->dev;
528 	int ret;
529 
530 	DRM_DEBUG_DRIVER("Resetting error state\n");
531 
532 	ret = mutex_lock_interruptible(&dev->struct_mutex);
533 	if (ret)
534 		return ret;
535 
536 	i915_destroy_error_state(dev);
537 	mutex_unlock(&dev->struct_mutex);
538 
539 	return count;
540 }
541 
542 static struct bin_attribute error_state_attr = {
543 	.attr.name = "error",
544 	.attr.mode = S_IRUSR | S_IWUSR,
545 	.size = 0,
546 	.read = error_state_read,
547 	.write = error_state_write,
548 };
549 
550 void i915_setup_sysfs(struct drm_device *dev)
551 {
552 	int ret;
553 
554 #ifdef CONFIG_PM
555 	if (INTEL_INFO(dev)->gen >= 6) {
556 		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
557 					&rc6_attr_group);
558 		if (ret)
559 			DRM_ERROR("RC6 residency sysfs setup failed\n");
560 	}
561 #endif
562 	if (HAS_L3_DPF(dev)) {
563 		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
564 		if (ret)
565 			DRM_ERROR("l3 parity sysfs setup failed\n");
566 
567 		if (NUM_L3_SLICES(dev) > 1) {
568 			ret = device_create_bin_file(dev->primary->kdev,
569 						     &dpf_attrs_1);
570 			if (ret)
571 				DRM_ERROR("l3 parity slice 1 setup failed\n");
572 		}
573 	}
574 
575 	ret = 0;
576 	if (IS_VALLEYVIEW(dev))
577 		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
578 	else if (INTEL_INFO(dev)->gen >= 6)
579 		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
580 	if (ret)
581 		DRM_ERROR("RPS sysfs setup failed\n");
582 
583 	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
584 				    &error_state_attr);
585 	if (ret)
586 		DRM_ERROR("error_state sysfs setup failed\n");
587 }
588 
589 void i915_teardown_sysfs(struct drm_device *dev)
590 {
591 	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
592 	if (IS_VALLEYVIEW(dev))
593 		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
594 	else
595 		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
596 	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
597 	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
598 #ifdef CONFIG_PM
599 	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
600 #endif
601 }
602