xref: /openbmc/linux/drivers/gpu/drm/i915/i915_sysfs.c (revision 92a2c6b2)
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *
26  */
27 
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/stat.h>
31 #include <linux/sysfs.h>
32 #include "intel_drv.h"
33 #include "i915_drv.h"
34 
35 #define dev_to_drm_minor(d) dev_get_drvdata((d))
36 
37 #ifdef CONFIG_PM
38 static u32 calc_residency(struct drm_device *dev, const u32 reg)
39 {
40 	struct drm_i915_private *dev_priv = dev->dev_private;
41 	u64 raw_time; /* 32b value may overflow during fixed point math */
42 	u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
43 	u32 ret;
44 
45 	if (!intel_enable_rc6(dev))
46 		return 0;
47 
48 	intel_runtime_pm_get(dev_priv);
49 
50 	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
51 	if (IS_VALLEYVIEW(dev)) {
52 		u32 clk_reg, czcount_30ns;
53 
54 		if (IS_CHERRYVIEW(dev))
55 			clk_reg = CHV_CLK_CTL1;
56 		else
57 			clk_reg = VLV_CLK_CTL2;
58 
59 		czcount_30ns = I915_READ(clk_reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
60 
61 		if (!czcount_30ns) {
62 			WARN(!czcount_30ns, "bogus CZ count value");
63 			ret = 0;
64 			goto out;
65 		}
66 
67 		units = 0;
68 		div = 1000000ULL;
69 
70 		if (IS_CHERRYVIEW(dev)) {
71 			/* Special case for 320Mhz */
72 			if (czcount_30ns == 1) {
73 				div = 10000000ULL;
74 				units = 3125ULL;
75 			} else {
76 				/* chv counts are one less */
77 				czcount_30ns += 1;
78 			}
79 		}
80 
81 		if (units == 0)
82 			units = DIV_ROUND_UP_ULL(30ULL * bias,
83 						 (u64)czcount_30ns);
84 
85 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
86 			units <<= 8;
87 
88 		div = div * bias;
89 	}
90 
91 	raw_time = I915_READ(reg) * units;
92 	ret = DIV_ROUND_UP_ULL(raw_time, div);
93 
94 out:
95 	intel_runtime_pm_put(dev_priv);
96 	return ret;
97 }
98 
99 static ssize_t
100 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
101 {
102 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
103 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
104 }
105 
106 static ssize_t
107 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
108 {
109 	struct drm_minor *dminor = dev_get_drvdata(kdev);
110 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
111 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
112 }
113 
114 static ssize_t
115 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
116 {
117 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
118 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
119 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
120 }
121 
122 static ssize_t
123 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
124 {
125 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
126 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
127 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
128 }
129 
130 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
131 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
132 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
133 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
134 
135 static struct attribute *rc6_attrs[] = {
136 	&dev_attr_rc6_enable.attr,
137 	&dev_attr_rc6_residency_ms.attr,
138 	NULL
139 };
140 
141 static struct attribute_group rc6_attr_group = {
142 	.name = power_group_name,
143 	.attrs =  rc6_attrs
144 };
145 
146 static struct attribute *rc6p_attrs[] = {
147 	&dev_attr_rc6p_residency_ms.attr,
148 	&dev_attr_rc6pp_residency_ms.attr,
149 	NULL
150 };
151 
152 static struct attribute_group rc6p_attr_group = {
153 	.name = power_group_name,
154 	.attrs =  rc6p_attrs
155 };
156 #endif
157 
158 static int l3_access_valid(struct drm_device *dev, loff_t offset)
159 {
160 	if (!HAS_L3_DPF(dev))
161 		return -EPERM;
162 
163 	if (offset % 4 != 0)
164 		return -EINVAL;
165 
166 	if (offset >= GEN7_L3LOG_SIZE)
167 		return -ENXIO;
168 
169 	return 0;
170 }
171 
172 static ssize_t
173 i915_l3_read(struct file *filp, struct kobject *kobj,
174 	     struct bin_attribute *attr, char *buf,
175 	     loff_t offset, size_t count)
176 {
177 	struct device *dev = container_of(kobj, struct device, kobj);
178 	struct drm_minor *dminor = dev_to_drm_minor(dev);
179 	struct drm_device *drm_dev = dminor->dev;
180 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
181 	int slice = (int)(uintptr_t)attr->private;
182 	int ret;
183 
184 	count = round_down(count, 4);
185 
186 	ret = l3_access_valid(drm_dev, offset);
187 	if (ret)
188 		return ret;
189 
190 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
191 
192 	ret = i915_mutex_lock_interruptible(drm_dev);
193 	if (ret)
194 		return ret;
195 
196 	if (dev_priv->l3_parity.remap_info[slice])
197 		memcpy(buf,
198 		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
199 		       count);
200 	else
201 		memset(buf, 0, count);
202 
203 	mutex_unlock(&drm_dev->struct_mutex);
204 
205 	return count;
206 }
207 
208 static ssize_t
209 i915_l3_write(struct file *filp, struct kobject *kobj,
210 	      struct bin_attribute *attr, char *buf,
211 	      loff_t offset, size_t count)
212 {
213 	struct device *dev = container_of(kobj, struct device, kobj);
214 	struct drm_minor *dminor = dev_to_drm_minor(dev);
215 	struct drm_device *drm_dev = dminor->dev;
216 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
217 	struct intel_context *ctx;
218 	u32 *temp = NULL; /* Just here to make handling failures easy */
219 	int slice = (int)(uintptr_t)attr->private;
220 	int ret;
221 
222 	if (!HAS_HW_CONTEXTS(drm_dev))
223 		return -ENXIO;
224 
225 	ret = l3_access_valid(drm_dev, offset);
226 	if (ret)
227 		return ret;
228 
229 	ret = i915_mutex_lock_interruptible(drm_dev);
230 	if (ret)
231 		return ret;
232 
233 	if (!dev_priv->l3_parity.remap_info[slice]) {
234 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
235 		if (!temp) {
236 			mutex_unlock(&drm_dev->struct_mutex);
237 			return -ENOMEM;
238 		}
239 	}
240 
241 	ret = i915_gpu_idle(drm_dev);
242 	if (ret) {
243 		kfree(temp);
244 		mutex_unlock(&drm_dev->struct_mutex);
245 		return ret;
246 	}
247 
248 	/* TODO: Ideally we really want a GPU reset here to make sure errors
249 	 * aren't propagated. Since I cannot find a stable way to reset the GPU
250 	 * at this point it is left as a TODO.
251 	*/
252 	if (temp)
253 		dev_priv->l3_parity.remap_info[slice] = temp;
254 
255 	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
256 
257 	/* NB: We defer the remapping until we switch to the context */
258 	list_for_each_entry(ctx, &dev_priv->context_list, link)
259 		ctx->remap_slice |= (1<<slice);
260 
261 	mutex_unlock(&drm_dev->struct_mutex);
262 
263 	return count;
264 }
265 
266 static struct bin_attribute dpf_attrs = {
267 	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
268 	.size = GEN7_L3LOG_SIZE,
269 	.read = i915_l3_read,
270 	.write = i915_l3_write,
271 	.mmap = NULL,
272 	.private = (void *)0
273 };
274 
275 static struct bin_attribute dpf_attrs_1 = {
276 	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
277 	.size = GEN7_L3LOG_SIZE,
278 	.read = i915_l3_read,
279 	.write = i915_l3_write,
280 	.mmap = NULL,
281 	.private = (void *)1
282 };
283 
284 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
285 				    struct device_attribute *attr, char *buf)
286 {
287 	struct drm_minor *minor = dev_to_drm_minor(kdev);
288 	struct drm_device *dev = minor->dev;
289 	struct drm_i915_private *dev_priv = dev->dev_private;
290 	int ret;
291 
292 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
293 
294 	intel_runtime_pm_get(dev_priv);
295 
296 	mutex_lock(&dev_priv->rps.hw_lock);
297 	if (IS_VALLEYVIEW(dev_priv->dev)) {
298 		u32 freq;
299 		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
300 		ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
301 	} else {
302 		u32 rpstat = I915_READ(GEN6_RPSTAT1);
303 		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
304 			ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
305 		else
306 			ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
307 		ret = intel_gpu_freq(dev_priv, ret);
308 	}
309 	mutex_unlock(&dev_priv->rps.hw_lock);
310 
311 	intel_runtime_pm_put(dev_priv);
312 
313 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
314 }
315 
316 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
317 				    struct device_attribute *attr, char *buf)
318 {
319 	struct drm_minor *minor = dev_to_drm_minor(kdev);
320 	struct drm_device *dev = minor->dev;
321 	struct drm_i915_private *dev_priv = dev->dev_private;
322 	int ret;
323 
324 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
325 
326 	intel_runtime_pm_get(dev_priv);
327 
328 	mutex_lock(&dev_priv->rps.hw_lock);
329 	ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
330 	mutex_unlock(&dev_priv->rps.hw_lock);
331 
332 	intel_runtime_pm_put(dev_priv);
333 
334 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
335 }
336 
337 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
338 				     struct device_attribute *attr, char *buf)
339 {
340 	struct drm_minor *minor = dev_to_drm_minor(kdev);
341 	struct drm_device *dev = minor->dev;
342 	struct drm_i915_private *dev_priv = dev->dev_private;
343 
344 	return snprintf(buf, PAGE_SIZE,
345 			"%d\n",
346 			intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
347 }
348 
349 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
350 {
351 	struct drm_minor *minor = dev_to_drm_minor(kdev);
352 	struct drm_device *dev = minor->dev;
353 	struct drm_i915_private *dev_priv = dev->dev_private;
354 	int ret;
355 
356 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
357 
358 	mutex_lock(&dev_priv->rps.hw_lock);
359 	ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
360 	mutex_unlock(&dev_priv->rps.hw_lock);
361 
362 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
363 }
364 
365 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
366 				     struct device_attribute *attr,
367 				     const char *buf, size_t count)
368 {
369 	struct drm_minor *minor = dev_to_drm_minor(kdev);
370 	struct drm_device *dev = minor->dev;
371 	struct drm_i915_private *dev_priv = dev->dev_private;
372 	u32 val;
373 	ssize_t ret;
374 
375 	ret = kstrtou32(buf, 0, &val);
376 	if (ret)
377 		return ret;
378 
379 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
380 
381 	mutex_lock(&dev_priv->rps.hw_lock);
382 
383 	val = intel_freq_opcode(dev_priv, val);
384 
385 	if (val < dev_priv->rps.min_freq ||
386 	    val > dev_priv->rps.max_freq ||
387 	    val < dev_priv->rps.min_freq_softlimit) {
388 		mutex_unlock(&dev_priv->rps.hw_lock);
389 		return -EINVAL;
390 	}
391 
392 	if (val > dev_priv->rps.rp0_freq)
393 		DRM_DEBUG("User requested overclocking to %d\n",
394 			  intel_gpu_freq(dev_priv, val));
395 
396 	dev_priv->rps.max_freq_softlimit = val;
397 
398 	val = clamp_t(int, dev_priv->rps.cur_freq,
399 		      dev_priv->rps.min_freq_softlimit,
400 		      dev_priv->rps.max_freq_softlimit);
401 
402 	/* We still need *_set_rps to process the new max_delay and
403 	 * update the interrupt limits and PMINTRMSK even though
404 	 * frequency request may be unchanged. */
405 	if (IS_VALLEYVIEW(dev))
406 		valleyview_set_rps(dev, val);
407 	else
408 		gen6_set_rps(dev, val);
409 
410 	mutex_unlock(&dev_priv->rps.hw_lock);
411 
412 	return count;
413 }
414 
415 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
416 {
417 	struct drm_minor *minor = dev_to_drm_minor(kdev);
418 	struct drm_device *dev = minor->dev;
419 	struct drm_i915_private *dev_priv = dev->dev_private;
420 	int ret;
421 
422 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
423 
424 	mutex_lock(&dev_priv->rps.hw_lock);
425 	ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
426 	mutex_unlock(&dev_priv->rps.hw_lock);
427 
428 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
429 }
430 
431 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
432 				     struct device_attribute *attr,
433 				     const char *buf, size_t count)
434 {
435 	struct drm_minor *minor = dev_to_drm_minor(kdev);
436 	struct drm_device *dev = minor->dev;
437 	struct drm_i915_private *dev_priv = dev->dev_private;
438 	u32 val;
439 	ssize_t ret;
440 
441 	ret = kstrtou32(buf, 0, &val);
442 	if (ret)
443 		return ret;
444 
445 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
446 
447 	mutex_lock(&dev_priv->rps.hw_lock);
448 
449 	val = intel_freq_opcode(dev_priv, val);
450 
451 	if (val < dev_priv->rps.min_freq ||
452 	    val > dev_priv->rps.max_freq ||
453 	    val > dev_priv->rps.max_freq_softlimit) {
454 		mutex_unlock(&dev_priv->rps.hw_lock);
455 		return -EINVAL;
456 	}
457 
458 	dev_priv->rps.min_freq_softlimit = val;
459 
460 	val = clamp_t(int, dev_priv->rps.cur_freq,
461 		      dev_priv->rps.min_freq_softlimit,
462 		      dev_priv->rps.max_freq_softlimit);
463 
464 	/* We still need *_set_rps to process the new min_delay and
465 	 * update the interrupt limits and PMINTRMSK even though
466 	 * frequency request may be unchanged. */
467 	if (IS_VALLEYVIEW(dev))
468 		valleyview_set_rps(dev, val);
469 	else
470 		gen6_set_rps(dev, val);
471 
472 	mutex_unlock(&dev_priv->rps.hw_lock);
473 
474 	return count;
475 
476 }
477 
478 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
479 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
480 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
481 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
482 
483 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
484 
485 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
486 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
487 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
488 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
489 
490 /* For now we have a static number of RP states */
491 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
492 {
493 	struct drm_minor *minor = dev_to_drm_minor(kdev);
494 	struct drm_device *dev = minor->dev;
495 	struct drm_i915_private *dev_priv = dev->dev_private;
496 	u32 val, rp_state_cap;
497 	ssize_t ret;
498 
499 	ret = mutex_lock_interruptible(&dev->struct_mutex);
500 	if (ret)
501 		return ret;
502 	intel_runtime_pm_get(dev_priv);
503 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
504 	intel_runtime_pm_put(dev_priv);
505 	mutex_unlock(&dev->struct_mutex);
506 
507 	if (attr == &dev_attr_gt_RP0_freq_mhz) {
508 		if (IS_VALLEYVIEW(dev))
509 			val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
510 		else
511 			val = intel_gpu_freq(dev_priv,
512 					     ((rp_state_cap & 0x0000ff) >> 0));
513 	} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
514 		if (IS_VALLEYVIEW(dev))
515 			val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
516 		else
517 			val = intel_gpu_freq(dev_priv,
518 					     ((rp_state_cap & 0x00ff00) >> 8));
519 	} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
520 		if (IS_VALLEYVIEW(dev))
521 			val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
522 		else
523 			val = intel_gpu_freq(dev_priv,
524 					     ((rp_state_cap & 0xff0000) >> 16));
525 	} else {
526 		BUG();
527 	}
528 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
529 }
530 
531 static const struct attribute *gen6_attrs[] = {
532 	&dev_attr_gt_act_freq_mhz.attr,
533 	&dev_attr_gt_cur_freq_mhz.attr,
534 	&dev_attr_gt_max_freq_mhz.attr,
535 	&dev_attr_gt_min_freq_mhz.attr,
536 	&dev_attr_gt_RP0_freq_mhz.attr,
537 	&dev_attr_gt_RP1_freq_mhz.attr,
538 	&dev_attr_gt_RPn_freq_mhz.attr,
539 	NULL,
540 };
541 
542 static const struct attribute *vlv_attrs[] = {
543 	&dev_attr_gt_act_freq_mhz.attr,
544 	&dev_attr_gt_cur_freq_mhz.attr,
545 	&dev_attr_gt_max_freq_mhz.attr,
546 	&dev_attr_gt_min_freq_mhz.attr,
547 	&dev_attr_gt_RP0_freq_mhz.attr,
548 	&dev_attr_gt_RP1_freq_mhz.attr,
549 	&dev_attr_gt_RPn_freq_mhz.attr,
550 	&dev_attr_vlv_rpe_freq_mhz.attr,
551 	NULL,
552 };
553 
554 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
555 				struct bin_attribute *attr, char *buf,
556 				loff_t off, size_t count)
557 {
558 
559 	struct device *kdev = container_of(kobj, struct device, kobj);
560 	struct drm_minor *minor = dev_to_drm_minor(kdev);
561 	struct drm_device *dev = minor->dev;
562 	struct i915_error_state_file_priv error_priv;
563 	struct drm_i915_error_state_buf error_str;
564 	ssize_t ret_count = 0;
565 	int ret;
566 
567 	memset(&error_priv, 0, sizeof(error_priv));
568 
569 	ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
570 	if (ret)
571 		return ret;
572 
573 	error_priv.dev = dev;
574 	i915_error_state_get(dev, &error_priv);
575 
576 	ret = i915_error_state_to_str(&error_str, &error_priv);
577 	if (ret)
578 		goto out;
579 
580 	ret_count = count < error_str.bytes ? count : error_str.bytes;
581 
582 	memcpy(buf, error_str.buf, ret_count);
583 out:
584 	i915_error_state_put(&error_priv);
585 	i915_error_state_buf_release(&error_str);
586 
587 	return ret ?: ret_count;
588 }
589 
590 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
591 				 struct bin_attribute *attr, char *buf,
592 				 loff_t off, size_t count)
593 {
594 	struct device *kdev = container_of(kobj, struct device, kobj);
595 	struct drm_minor *minor = dev_to_drm_minor(kdev);
596 	struct drm_device *dev = minor->dev;
597 	int ret;
598 
599 	DRM_DEBUG_DRIVER("Resetting error state\n");
600 
601 	ret = mutex_lock_interruptible(&dev->struct_mutex);
602 	if (ret)
603 		return ret;
604 
605 	i915_destroy_error_state(dev);
606 	mutex_unlock(&dev->struct_mutex);
607 
608 	return count;
609 }
610 
611 static struct bin_attribute error_state_attr = {
612 	.attr.name = "error",
613 	.attr.mode = S_IRUSR | S_IWUSR,
614 	.size = 0,
615 	.read = error_state_read,
616 	.write = error_state_write,
617 };
618 
619 void i915_setup_sysfs(struct drm_device *dev)
620 {
621 	int ret;
622 
623 #ifdef CONFIG_PM
624 	if (HAS_RC6(dev)) {
625 		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
626 					&rc6_attr_group);
627 		if (ret)
628 			DRM_ERROR("RC6 residency sysfs setup failed\n");
629 	}
630 	if (HAS_RC6p(dev)) {
631 		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
632 					&rc6p_attr_group);
633 		if (ret)
634 			DRM_ERROR("RC6p residency sysfs setup failed\n");
635 	}
636 #endif
637 	if (HAS_L3_DPF(dev)) {
638 		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
639 		if (ret)
640 			DRM_ERROR("l3 parity sysfs setup failed\n");
641 
642 		if (NUM_L3_SLICES(dev) > 1) {
643 			ret = device_create_bin_file(dev->primary->kdev,
644 						     &dpf_attrs_1);
645 			if (ret)
646 				DRM_ERROR("l3 parity slice 1 setup failed\n");
647 		}
648 	}
649 
650 	ret = 0;
651 	if (IS_VALLEYVIEW(dev))
652 		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
653 	else if (INTEL_INFO(dev)->gen >= 6)
654 		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
655 	if (ret)
656 		DRM_ERROR("RPS sysfs setup failed\n");
657 
658 	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
659 				    &error_state_attr);
660 	if (ret)
661 		DRM_ERROR("error_state sysfs setup failed\n");
662 }
663 
664 void i915_teardown_sysfs(struct drm_device *dev)
665 {
666 	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
667 	if (IS_VALLEYVIEW(dev))
668 		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
669 	else
670 		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
671 	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
672 	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
673 #ifdef CONFIG_PM
674 	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
675 	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
676 #endif
677 }
678