xref: /openbmc/linux/drivers/gpu/drm/i915/i915_sysfs.c (revision e8f6f3b4)
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *
26  */
27 
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/stat.h>
31 #include <linux/sysfs.h>
32 #include "intel_drv.h"
33 #include "i915_drv.h"
34 
35 #define dev_to_drm_minor(d) dev_get_drvdata((d))
36 
37 #ifdef CONFIG_PM
38 static u32 calc_residency(struct drm_device *dev, const u32 reg)
39 {
40 	struct drm_i915_private *dev_priv = dev->dev_private;
41 	u64 raw_time; /* 32b value may overflow during fixed point math */
42 	u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
43 	u32 ret;
44 
45 	if (!intel_enable_rc6(dev))
46 		return 0;
47 
48 	intel_runtime_pm_get(dev_priv);
49 
50 	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
51 	if (IS_VALLEYVIEW(dev)) {
52 		u32 reg, czcount_30ns;
53 
54 		if (IS_CHERRYVIEW(dev))
55 			reg = CHV_CLK_CTL1;
56 		else
57 			reg = VLV_CLK_CTL2;
58 
59 		czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
60 
61 		if (!czcount_30ns) {
62 			WARN(!czcount_30ns, "bogus CZ count value");
63 			ret = 0;
64 			goto out;
65 		}
66 
67 		units = 0;
68 		div = 1000000ULL;
69 
70 		if (IS_CHERRYVIEW(dev)) {
71 			/* Special case for 320Mhz */
72 			if (czcount_30ns == 1) {
73 				div = 10000000ULL;
74 				units = 3125ULL;
75 			} else {
76 				/* chv counts are one less */
77 				czcount_30ns += 1;
78 			}
79 		}
80 
81 		if (units == 0)
82 			units = DIV_ROUND_UP_ULL(30ULL * bias,
83 						 (u64)czcount_30ns);
84 
85 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
86 			units <<= 8;
87 
88 		div = div * bias;
89 	}
90 
91 	raw_time = I915_READ(reg) * units;
92 	ret = DIV_ROUND_UP_ULL(raw_time, div);
93 
94 out:
95 	intel_runtime_pm_put(dev_priv);
96 	return ret;
97 }
98 
99 static ssize_t
100 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
101 {
102 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
103 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
104 }
105 
106 static ssize_t
107 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
108 {
109 	struct drm_minor *dminor = dev_get_drvdata(kdev);
110 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
111 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
112 }
113 
114 static ssize_t
115 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
116 {
117 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
118 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
119 	if (IS_VALLEYVIEW(dminor->dev))
120 		rc6p_residency = 0;
121 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
122 }
123 
124 static ssize_t
125 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
126 {
127 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
128 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
129 	if (IS_VALLEYVIEW(dminor->dev))
130 		rc6pp_residency = 0;
131 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
132 }
133 
134 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
135 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
136 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
137 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
138 
139 static struct attribute *rc6_attrs[] = {
140 	&dev_attr_rc6_enable.attr,
141 	&dev_attr_rc6_residency_ms.attr,
142 	NULL
143 };
144 
145 static struct attribute_group rc6_attr_group = {
146 	.name = power_group_name,
147 	.attrs =  rc6_attrs
148 };
149 
150 static struct attribute *rc6p_attrs[] = {
151 	&dev_attr_rc6p_residency_ms.attr,
152 	&dev_attr_rc6pp_residency_ms.attr,
153 	NULL
154 };
155 
156 static struct attribute_group rc6p_attr_group = {
157 	.name = power_group_name,
158 	.attrs =  rc6p_attrs
159 };
160 #endif
161 
162 static int l3_access_valid(struct drm_device *dev, loff_t offset)
163 {
164 	if (!HAS_L3_DPF(dev))
165 		return -EPERM;
166 
167 	if (offset % 4 != 0)
168 		return -EINVAL;
169 
170 	if (offset >= GEN7_L3LOG_SIZE)
171 		return -ENXIO;
172 
173 	return 0;
174 }
175 
176 static ssize_t
177 i915_l3_read(struct file *filp, struct kobject *kobj,
178 	     struct bin_attribute *attr, char *buf,
179 	     loff_t offset, size_t count)
180 {
181 	struct device *dev = container_of(kobj, struct device, kobj);
182 	struct drm_minor *dminor = dev_to_drm_minor(dev);
183 	struct drm_device *drm_dev = dminor->dev;
184 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
185 	int slice = (int)(uintptr_t)attr->private;
186 	int ret;
187 
188 	count = round_down(count, 4);
189 
190 	ret = l3_access_valid(drm_dev, offset);
191 	if (ret)
192 		return ret;
193 
194 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
195 
196 	ret = i915_mutex_lock_interruptible(drm_dev);
197 	if (ret)
198 		return ret;
199 
200 	if (dev_priv->l3_parity.remap_info[slice])
201 		memcpy(buf,
202 		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
203 		       count);
204 	else
205 		memset(buf, 0, count);
206 
207 	mutex_unlock(&drm_dev->struct_mutex);
208 
209 	return count;
210 }
211 
212 static ssize_t
213 i915_l3_write(struct file *filp, struct kobject *kobj,
214 	      struct bin_attribute *attr, char *buf,
215 	      loff_t offset, size_t count)
216 {
217 	struct device *dev = container_of(kobj, struct device, kobj);
218 	struct drm_minor *dminor = dev_to_drm_minor(dev);
219 	struct drm_device *drm_dev = dminor->dev;
220 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
221 	struct intel_context *ctx;
222 	u32 *temp = NULL; /* Just here to make handling failures easy */
223 	int slice = (int)(uintptr_t)attr->private;
224 	int ret;
225 
226 	if (!HAS_HW_CONTEXTS(drm_dev))
227 		return -ENXIO;
228 
229 	ret = l3_access_valid(drm_dev, offset);
230 	if (ret)
231 		return ret;
232 
233 	ret = i915_mutex_lock_interruptible(drm_dev);
234 	if (ret)
235 		return ret;
236 
237 	if (!dev_priv->l3_parity.remap_info[slice]) {
238 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
239 		if (!temp) {
240 			mutex_unlock(&drm_dev->struct_mutex);
241 			return -ENOMEM;
242 		}
243 	}
244 
245 	ret = i915_gpu_idle(drm_dev);
246 	if (ret) {
247 		kfree(temp);
248 		mutex_unlock(&drm_dev->struct_mutex);
249 		return ret;
250 	}
251 
252 	/* TODO: Ideally we really want a GPU reset here to make sure errors
253 	 * aren't propagated. Since I cannot find a stable way to reset the GPU
254 	 * at this point it is left as a TODO.
255 	*/
256 	if (temp)
257 		dev_priv->l3_parity.remap_info[slice] = temp;
258 
259 	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
260 
261 	/* NB: We defer the remapping until we switch to the context */
262 	list_for_each_entry(ctx, &dev_priv->context_list, link)
263 		ctx->remap_slice |= (1<<slice);
264 
265 	mutex_unlock(&drm_dev->struct_mutex);
266 
267 	return count;
268 }
269 
270 static struct bin_attribute dpf_attrs = {
271 	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
272 	.size = GEN7_L3LOG_SIZE,
273 	.read = i915_l3_read,
274 	.write = i915_l3_write,
275 	.mmap = NULL,
276 	.private = (void *)0
277 };
278 
279 static struct bin_attribute dpf_attrs_1 = {
280 	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
281 	.size = GEN7_L3LOG_SIZE,
282 	.read = i915_l3_read,
283 	.write = i915_l3_write,
284 	.mmap = NULL,
285 	.private = (void *)1
286 };
287 
288 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
289 				    struct device_attribute *attr, char *buf)
290 {
291 	struct drm_minor *minor = dev_to_drm_minor(kdev);
292 	struct drm_device *dev = minor->dev;
293 	struct drm_i915_private *dev_priv = dev->dev_private;
294 	int ret;
295 
296 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
297 
298 	intel_runtime_pm_get(dev_priv);
299 
300 	mutex_lock(&dev_priv->rps.hw_lock);
301 	if (IS_VALLEYVIEW(dev_priv->dev)) {
302 		u32 freq;
303 		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
304 		ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
305 	} else {
306 		ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
307 	}
308 	mutex_unlock(&dev_priv->rps.hw_lock);
309 
310 	intel_runtime_pm_put(dev_priv);
311 
312 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
313 }
314 
315 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
316 				     struct device_attribute *attr, char *buf)
317 {
318 	struct drm_minor *minor = dev_to_drm_minor(kdev);
319 	struct drm_device *dev = minor->dev;
320 	struct drm_i915_private *dev_priv = dev->dev_private;
321 
322 	return snprintf(buf, PAGE_SIZE, "%d\n",
323 			vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
324 }
325 
326 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
327 {
328 	struct drm_minor *minor = dev_to_drm_minor(kdev);
329 	struct drm_device *dev = minor->dev;
330 	struct drm_i915_private *dev_priv = dev->dev_private;
331 	int ret;
332 
333 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
334 
335 	mutex_lock(&dev_priv->rps.hw_lock);
336 	if (IS_VALLEYVIEW(dev_priv->dev))
337 		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
338 	else
339 		ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
340 	mutex_unlock(&dev_priv->rps.hw_lock);
341 
342 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
343 }
344 
345 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
346 				     struct device_attribute *attr,
347 				     const char *buf, size_t count)
348 {
349 	struct drm_minor *minor = dev_to_drm_minor(kdev);
350 	struct drm_device *dev = minor->dev;
351 	struct drm_i915_private *dev_priv = dev->dev_private;
352 	u32 val;
353 	ssize_t ret;
354 
355 	ret = kstrtou32(buf, 0, &val);
356 	if (ret)
357 		return ret;
358 
359 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
360 
361 	mutex_lock(&dev_priv->rps.hw_lock);
362 
363 	if (IS_VALLEYVIEW(dev_priv->dev))
364 		val = vlv_freq_opcode(dev_priv, val);
365 	else
366 		val /= GT_FREQUENCY_MULTIPLIER;
367 
368 	if (val < dev_priv->rps.min_freq ||
369 	    val > dev_priv->rps.max_freq ||
370 	    val < dev_priv->rps.min_freq_softlimit) {
371 		mutex_unlock(&dev_priv->rps.hw_lock);
372 		return -EINVAL;
373 	}
374 
375 	if (val > dev_priv->rps.rp0_freq)
376 		DRM_DEBUG("User requested overclocking to %d\n",
377 			  val * GT_FREQUENCY_MULTIPLIER);
378 
379 	dev_priv->rps.max_freq_softlimit = val;
380 
381 	if (dev_priv->rps.cur_freq > val) {
382 		if (IS_VALLEYVIEW(dev))
383 			valleyview_set_rps(dev, val);
384 		else
385 			gen6_set_rps(dev, val);
386 	} else if (!IS_VALLEYVIEW(dev)) {
387 		/* We still need gen6_set_rps to process the new max_delay and
388 		 * update the interrupt limits even though frequency request is
389 		 * unchanged. */
390 		gen6_set_rps(dev, dev_priv->rps.cur_freq);
391 	}
392 
393 	mutex_unlock(&dev_priv->rps.hw_lock);
394 
395 	return count;
396 }
397 
398 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
399 {
400 	struct drm_minor *minor = dev_to_drm_minor(kdev);
401 	struct drm_device *dev = minor->dev;
402 	struct drm_i915_private *dev_priv = dev->dev_private;
403 	int ret;
404 
405 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
406 
407 	mutex_lock(&dev_priv->rps.hw_lock);
408 	if (IS_VALLEYVIEW(dev_priv->dev))
409 		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
410 	else
411 		ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
412 	mutex_unlock(&dev_priv->rps.hw_lock);
413 
414 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
415 }
416 
417 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
418 				     struct device_attribute *attr,
419 				     const char *buf, size_t count)
420 {
421 	struct drm_minor *minor = dev_to_drm_minor(kdev);
422 	struct drm_device *dev = minor->dev;
423 	struct drm_i915_private *dev_priv = dev->dev_private;
424 	u32 val;
425 	ssize_t ret;
426 
427 	ret = kstrtou32(buf, 0, &val);
428 	if (ret)
429 		return ret;
430 
431 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
432 
433 	mutex_lock(&dev_priv->rps.hw_lock);
434 
435 	if (IS_VALLEYVIEW(dev))
436 		val = vlv_freq_opcode(dev_priv, val);
437 	else
438 		val /= GT_FREQUENCY_MULTIPLIER;
439 
440 	if (val < dev_priv->rps.min_freq ||
441 	    val > dev_priv->rps.max_freq ||
442 	    val > dev_priv->rps.max_freq_softlimit) {
443 		mutex_unlock(&dev_priv->rps.hw_lock);
444 		return -EINVAL;
445 	}
446 
447 	dev_priv->rps.min_freq_softlimit = val;
448 
449 	if (dev_priv->rps.cur_freq < val) {
450 		if (IS_VALLEYVIEW(dev))
451 			valleyview_set_rps(dev, val);
452 		else
453 			gen6_set_rps(dev, val);
454 	} else if (!IS_VALLEYVIEW(dev)) {
455 		/* We still need gen6_set_rps to process the new min_delay and
456 		 * update the interrupt limits even though frequency request is
457 		 * unchanged. */
458 		gen6_set_rps(dev, dev_priv->rps.cur_freq);
459 	}
460 
461 	mutex_unlock(&dev_priv->rps.hw_lock);
462 
463 	return count;
464 
465 }
466 
467 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
468 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
469 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
470 
471 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
472 
473 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
474 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
475 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
476 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
477 
478 /* For now we have a static number of RP states */
479 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
480 {
481 	struct drm_minor *minor = dev_to_drm_minor(kdev);
482 	struct drm_device *dev = minor->dev;
483 	struct drm_i915_private *dev_priv = dev->dev_private;
484 	u32 val, rp_state_cap;
485 	ssize_t ret;
486 
487 	ret = mutex_lock_interruptible(&dev->struct_mutex);
488 	if (ret)
489 		return ret;
490 	intel_runtime_pm_get(dev_priv);
491 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
492 	intel_runtime_pm_put(dev_priv);
493 	mutex_unlock(&dev->struct_mutex);
494 
495 	if (attr == &dev_attr_gt_RP0_freq_mhz) {
496 		if (IS_VALLEYVIEW(dev))
497 			val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
498 		else
499 			val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
500 	} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
501 		if (IS_VALLEYVIEW(dev))
502 			val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
503 		else
504 			val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
505 	} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
506 		if (IS_VALLEYVIEW(dev))
507 			val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
508 		else
509 			val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
510 	} else {
511 		BUG();
512 	}
513 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
514 }
515 
516 static const struct attribute *gen6_attrs[] = {
517 	&dev_attr_gt_cur_freq_mhz.attr,
518 	&dev_attr_gt_max_freq_mhz.attr,
519 	&dev_attr_gt_min_freq_mhz.attr,
520 	&dev_attr_gt_RP0_freq_mhz.attr,
521 	&dev_attr_gt_RP1_freq_mhz.attr,
522 	&dev_attr_gt_RPn_freq_mhz.attr,
523 	NULL,
524 };
525 
526 static const struct attribute *vlv_attrs[] = {
527 	&dev_attr_gt_cur_freq_mhz.attr,
528 	&dev_attr_gt_max_freq_mhz.attr,
529 	&dev_attr_gt_min_freq_mhz.attr,
530 	&dev_attr_gt_RP0_freq_mhz.attr,
531 	&dev_attr_gt_RP1_freq_mhz.attr,
532 	&dev_attr_gt_RPn_freq_mhz.attr,
533 	&dev_attr_vlv_rpe_freq_mhz.attr,
534 	NULL,
535 };
536 
537 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
538 				struct bin_attribute *attr, char *buf,
539 				loff_t off, size_t count)
540 {
541 
542 	struct device *kdev = container_of(kobj, struct device, kobj);
543 	struct drm_minor *minor = dev_to_drm_minor(kdev);
544 	struct drm_device *dev = minor->dev;
545 	struct i915_error_state_file_priv error_priv;
546 	struct drm_i915_error_state_buf error_str;
547 	ssize_t ret_count = 0;
548 	int ret;
549 
550 	memset(&error_priv, 0, sizeof(error_priv));
551 
552 	ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
553 	if (ret)
554 		return ret;
555 
556 	error_priv.dev = dev;
557 	i915_error_state_get(dev, &error_priv);
558 
559 	ret = i915_error_state_to_str(&error_str, &error_priv);
560 	if (ret)
561 		goto out;
562 
563 	ret_count = count < error_str.bytes ? count : error_str.bytes;
564 
565 	memcpy(buf, error_str.buf, ret_count);
566 out:
567 	i915_error_state_put(&error_priv);
568 	i915_error_state_buf_release(&error_str);
569 
570 	return ret ?: ret_count;
571 }
572 
573 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
574 				 struct bin_attribute *attr, char *buf,
575 				 loff_t off, size_t count)
576 {
577 	struct device *kdev = container_of(kobj, struct device, kobj);
578 	struct drm_minor *minor = dev_to_drm_minor(kdev);
579 	struct drm_device *dev = minor->dev;
580 	int ret;
581 
582 	DRM_DEBUG_DRIVER("Resetting error state\n");
583 
584 	ret = mutex_lock_interruptible(&dev->struct_mutex);
585 	if (ret)
586 		return ret;
587 
588 	i915_destroy_error_state(dev);
589 	mutex_unlock(&dev->struct_mutex);
590 
591 	return count;
592 }
593 
594 static struct bin_attribute error_state_attr = {
595 	.attr.name = "error",
596 	.attr.mode = S_IRUSR | S_IWUSR,
597 	.size = 0,
598 	.read = error_state_read,
599 	.write = error_state_write,
600 };
601 
602 void i915_setup_sysfs(struct drm_device *dev)
603 {
604 	int ret;
605 
606 #ifdef CONFIG_PM
607 	if (HAS_RC6(dev)) {
608 		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
609 					&rc6_attr_group);
610 		if (ret)
611 			DRM_ERROR("RC6 residency sysfs setup failed\n");
612 	}
613 	if (HAS_RC6p(dev)) {
614 		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
615 					&rc6p_attr_group);
616 		if (ret)
617 			DRM_ERROR("RC6p residency sysfs setup failed\n");
618 	}
619 #endif
620 	if (HAS_L3_DPF(dev)) {
621 		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
622 		if (ret)
623 			DRM_ERROR("l3 parity sysfs setup failed\n");
624 
625 		if (NUM_L3_SLICES(dev) > 1) {
626 			ret = device_create_bin_file(dev->primary->kdev,
627 						     &dpf_attrs_1);
628 			if (ret)
629 				DRM_ERROR("l3 parity slice 1 setup failed\n");
630 		}
631 	}
632 
633 	ret = 0;
634 	if (IS_VALLEYVIEW(dev))
635 		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
636 	else if (INTEL_INFO(dev)->gen >= 6)
637 		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
638 	if (ret)
639 		DRM_ERROR("RPS sysfs setup failed\n");
640 
641 	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
642 				    &error_state_attr);
643 	if (ret)
644 		DRM_ERROR("error_state sysfs setup failed\n");
645 }
646 
647 void i915_teardown_sysfs(struct drm_device *dev)
648 {
649 	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
650 	if (IS_VALLEYVIEW(dev))
651 		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
652 	else
653 		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
654 	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
655 	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
656 #ifdef CONFIG_PM
657 	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
658 	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
659 #endif
660 }
661