xref: /openbmc/linux/drivers/gpu/drm/i915/i915_sysfs.c (revision 0a073b843bcd9a660f76e497182aac97cafddc4c)
10136db58SBen Widawsky /*
20136db58SBen Widawsky  * Copyright © 2012 Intel Corporation
30136db58SBen Widawsky  *
40136db58SBen Widawsky  * Permission is hereby granted, free of charge, to any person obtaining a
50136db58SBen Widawsky  * copy of this software and associated documentation files (the "Software"),
60136db58SBen Widawsky  * to deal in the Software without restriction, including without limitation
70136db58SBen Widawsky  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
80136db58SBen Widawsky  * and/or sell copies of the Software, and to permit persons to whom the
90136db58SBen Widawsky  * Software is furnished to do so, subject to the following conditions:
100136db58SBen Widawsky  *
110136db58SBen Widawsky  * The above copyright notice and this permission notice (including the next
120136db58SBen Widawsky  * paragraph) shall be included in all copies or substantial portions of the
130136db58SBen Widawsky  * Software.
140136db58SBen Widawsky  *
150136db58SBen Widawsky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
160136db58SBen Widawsky  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
170136db58SBen Widawsky  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
180136db58SBen Widawsky  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
190136db58SBen Widawsky  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
200136db58SBen Widawsky  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
210136db58SBen Widawsky  * IN THE SOFTWARE.
220136db58SBen Widawsky  *
230136db58SBen Widawsky  * Authors:
240136db58SBen Widawsky  *    Ben Widawsky <ben@bwidawsk.net>
250136db58SBen Widawsky  *
260136db58SBen Widawsky  */
270136db58SBen Widawsky 
280136db58SBen Widawsky #include <linux/device.h>
290136db58SBen Widawsky #include <linux/module.h>
300136db58SBen Widawsky #include <linux/stat.h>
310136db58SBen Widawsky #include <linux/sysfs.h>
3284bc7581SBen Widawsky #include "intel_drv.h"
330136db58SBen Widawsky #include "i915_drv.h"
340136db58SBen Widawsky 
355ab3633dSHunt Xu #ifdef CONFIG_PM
360136db58SBen Widawsky static u32 calc_residency(struct drm_device *dev, const u32 reg)
370136db58SBen Widawsky {
380136db58SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
390136db58SBen Widawsky 	u64 raw_time; /* 32b value may overflow during fixed point math */
400136db58SBen Widawsky 
410136db58SBen Widawsky 	if (!intel_enable_rc6(dev))
420136db58SBen Widawsky 		return 0;
430136db58SBen Widawsky 
44a85d4bcbSBen Widawsky 	raw_time = I915_READ(reg) * 128ULL;
45a85d4bcbSBen Widawsky 	return DIV_ROUND_UP_ULL(raw_time, 100000);
460136db58SBen Widawsky }
470136db58SBen Widawsky 
480136db58SBen Widawsky static ssize_t
49dbdfd8e9SBen Widawsky show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
500136db58SBen Widawsky {
51dbdfd8e9SBen Widawsky 	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
523e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
530136db58SBen Widawsky }
540136db58SBen Widawsky 
550136db58SBen Widawsky static ssize_t
56dbdfd8e9SBen Widawsky show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
570136db58SBen Widawsky {
58dbdfd8e9SBen Widawsky 	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
590136db58SBen Widawsky 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
603e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
610136db58SBen Widawsky }
620136db58SBen Widawsky 
630136db58SBen Widawsky static ssize_t
64dbdfd8e9SBen Widawsky show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
650136db58SBen Widawsky {
66dbdfd8e9SBen Widawsky 	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
670136db58SBen Widawsky 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
683e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
690136db58SBen Widawsky }
700136db58SBen Widawsky 
710136db58SBen Widawsky static ssize_t
72dbdfd8e9SBen Widawsky show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
730136db58SBen Widawsky {
74dbdfd8e9SBen Widawsky 	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
750136db58SBen Widawsky 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
763e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
770136db58SBen Widawsky }
780136db58SBen Widawsky 
790136db58SBen Widawsky static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
800136db58SBen Widawsky static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
810136db58SBen Widawsky static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
820136db58SBen Widawsky static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
830136db58SBen Widawsky 
840136db58SBen Widawsky static struct attribute *rc6_attrs[] = {
850136db58SBen Widawsky 	&dev_attr_rc6_enable.attr,
860136db58SBen Widawsky 	&dev_attr_rc6_residency_ms.attr,
870136db58SBen Widawsky 	&dev_attr_rc6p_residency_ms.attr,
880136db58SBen Widawsky 	&dev_attr_rc6pp_residency_ms.attr,
890136db58SBen Widawsky 	NULL
900136db58SBen Widawsky };
910136db58SBen Widawsky 
920136db58SBen Widawsky static struct attribute_group rc6_attr_group = {
930136db58SBen Widawsky 	.name = power_group_name,
940136db58SBen Widawsky 	.attrs =  rc6_attrs
950136db58SBen Widawsky };
968c3f929bSBen Widawsky #endif
970136db58SBen Widawsky 
9884bc7581SBen Widawsky static int l3_access_valid(struct drm_device *dev, loff_t offset)
9984bc7581SBen Widawsky {
100ebf69cb8SDaniel Vetter 	if (!HAS_L3_GPU_CACHE(dev))
10184bc7581SBen Widawsky 		return -EPERM;
10284bc7581SBen Widawsky 
10384bc7581SBen Widawsky 	if (offset % 4 != 0)
10484bc7581SBen Widawsky 		return -EINVAL;
10584bc7581SBen Widawsky 
10684bc7581SBen Widawsky 	if (offset >= GEN7_L3LOG_SIZE)
10784bc7581SBen Widawsky 		return -ENXIO;
10884bc7581SBen Widawsky 
10984bc7581SBen Widawsky 	return 0;
11084bc7581SBen Widawsky }
11184bc7581SBen Widawsky 
11284bc7581SBen Widawsky static ssize_t
11384bc7581SBen Widawsky i915_l3_read(struct file *filp, struct kobject *kobj,
11484bc7581SBen Widawsky 	     struct bin_attribute *attr, char *buf,
11584bc7581SBen Widawsky 	     loff_t offset, size_t count)
11684bc7581SBen Widawsky {
11784bc7581SBen Widawsky 	struct device *dev = container_of(kobj, struct device, kobj);
11884bc7581SBen Widawsky 	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
11984bc7581SBen Widawsky 	struct drm_device *drm_dev = dminor->dev;
12084bc7581SBen Widawsky 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
12184bc7581SBen Widawsky 	uint32_t misccpctl;
12284bc7581SBen Widawsky 	int i, ret;
12384bc7581SBen Widawsky 
12484bc7581SBen Widawsky 	ret = l3_access_valid(drm_dev, offset);
12584bc7581SBen Widawsky 	if (ret)
12684bc7581SBen Widawsky 		return ret;
12784bc7581SBen Widawsky 
12884bc7581SBen Widawsky 	ret = i915_mutex_lock_interruptible(drm_dev);
12984bc7581SBen Widawsky 	if (ret)
13084bc7581SBen Widawsky 		return ret;
13184bc7581SBen Widawsky 
13284bc7581SBen Widawsky 	misccpctl = I915_READ(GEN7_MISCCPCTL);
13384bc7581SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
13484bc7581SBen Widawsky 
13584bc7581SBen Widawsky 	for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
13684bc7581SBen Widawsky 		*((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
13784bc7581SBen Widawsky 
13884bc7581SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
13984bc7581SBen Widawsky 
14084bc7581SBen Widawsky 	mutex_unlock(&drm_dev->struct_mutex);
14184bc7581SBen Widawsky 
14284bc7581SBen Widawsky 	return i - offset;
14384bc7581SBen Widawsky }
14484bc7581SBen Widawsky 
14584bc7581SBen Widawsky static ssize_t
14684bc7581SBen Widawsky i915_l3_write(struct file *filp, struct kobject *kobj,
14784bc7581SBen Widawsky 	      struct bin_attribute *attr, char *buf,
14884bc7581SBen Widawsky 	      loff_t offset, size_t count)
14984bc7581SBen Widawsky {
15084bc7581SBen Widawsky 	struct device *dev = container_of(kobj, struct device, kobj);
15184bc7581SBen Widawsky 	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
15284bc7581SBen Widawsky 	struct drm_device *drm_dev = dminor->dev;
15384bc7581SBen Widawsky 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
15484bc7581SBen Widawsky 	u32 *temp = NULL; /* Just here to make handling failures easy */
15584bc7581SBen Widawsky 	int ret;
15684bc7581SBen Widawsky 
15784bc7581SBen Widawsky 	ret = l3_access_valid(drm_dev, offset);
15884bc7581SBen Widawsky 	if (ret)
15984bc7581SBen Widawsky 		return ret;
16084bc7581SBen Widawsky 
16184bc7581SBen Widawsky 	ret = i915_mutex_lock_interruptible(drm_dev);
16284bc7581SBen Widawsky 	if (ret)
16384bc7581SBen Widawsky 		return ret;
16484bc7581SBen Widawsky 
165a4da4fa4SDaniel Vetter 	if (!dev_priv->l3_parity.remap_info) {
16684bc7581SBen Widawsky 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
16784bc7581SBen Widawsky 		if (!temp) {
16884bc7581SBen Widawsky 			mutex_unlock(&drm_dev->struct_mutex);
16984bc7581SBen Widawsky 			return -ENOMEM;
17084bc7581SBen Widawsky 		}
17184bc7581SBen Widawsky 	}
17284bc7581SBen Widawsky 
17384bc7581SBen Widawsky 	ret = i915_gpu_idle(drm_dev);
17484bc7581SBen Widawsky 	if (ret) {
17584bc7581SBen Widawsky 		kfree(temp);
17684bc7581SBen Widawsky 		mutex_unlock(&drm_dev->struct_mutex);
17784bc7581SBen Widawsky 		return ret;
17884bc7581SBen Widawsky 	}
17984bc7581SBen Widawsky 
18084bc7581SBen Widawsky 	/* TODO: Ideally we really want a GPU reset here to make sure errors
18184bc7581SBen Widawsky 	 * aren't propagated. Since I cannot find a stable way to reset the GPU
18284bc7581SBen Widawsky 	 * at this point it is left as a TODO.
18384bc7581SBen Widawsky 	*/
18484bc7581SBen Widawsky 	if (temp)
185a4da4fa4SDaniel Vetter 		dev_priv->l3_parity.remap_info = temp;
18684bc7581SBen Widawsky 
187a4da4fa4SDaniel Vetter 	memcpy(dev_priv->l3_parity.remap_info + (offset/4),
18884bc7581SBen Widawsky 	       buf + (offset/4),
18984bc7581SBen Widawsky 	       count);
19084bc7581SBen Widawsky 
19184bc7581SBen Widawsky 	i915_gem_l3_remap(drm_dev);
19284bc7581SBen Widawsky 
19384bc7581SBen Widawsky 	mutex_unlock(&drm_dev->struct_mutex);
19484bc7581SBen Widawsky 
19584bc7581SBen Widawsky 	return count;
19684bc7581SBen Widawsky }
19784bc7581SBen Widawsky 
19884bc7581SBen Widawsky static struct bin_attribute dpf_attrs = {
19984bc7581SBen Widawsky 	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
20084bc7581SBen Widawsky 	.size = GEN7_L3LOG_SIZE,
20184bc7581SBen Widawsky 	.read = i915_l3_read,
20284bc7581SBen Widawsky 	.write = i915_l3_write,
20384bc7581SBen Widawsky 	.mmap = NULL
20484bc7581SBen Widawsky };
20584bc7581SBen Widawsky 
206df6eedc8SBen Widawsky static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
207df6eedc8SBen Widawsky 				    struct device_attribute *attr, char *buf)
208df6eedc8SBen Widawsky {
209df6eedc8SBen Widawsky 	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
210df6eedc8SBen Widawsky 	struct drm_device *dev = minor->dev;
211df6eedc8SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
212df6eedc8SBen Widawsky 	int ret;
213df6eedc8SBen Widawsky 
2144fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
215*0a073b84SJesse Barnes 	if (IS_VALLEYVIEW(dev_priv->dev))
216*0a073b84SJesse Barnes 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay);
217*0a073b84SJesse Barnes 	else
218df6eedc8SBen Widawsky 		ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
2194fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
220df6eedc8SBen Widawsky 
2213e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
222df6eedc8SBen Widawsky }
223df6eedc8SBen Widawsky 
224df6eedc8SBen Widawsky static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
225df6eedc8SBen Widawsky {
226df6eedc8SBen Widawsky 	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
227df6eedc8SBen Widawsky 	struct drm_device *dev = minor->dev;
228df6eedc8SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
229df6eedc8SBen Widawsky 	int ret;
230df6eedc8SBen Widawsky 
2314fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
232*0a073b84SJesse Barnes 	if (IS_VALLEYVIEW(dev_priv->dev))
233*0a073b84SJesse Barnes 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
234*0a073b84SJesse Barnes 	else
235182642b0SMika Kuoppala 		ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
2364fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
237df6eedc8SBen Widawsky 
2383e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
239df6eedc8SBen Widawsky }
240df6eedc8SBen Widawsky 
24146ddf194SBen Widawsky static ssize_t gt_max_freq_mhz_store(struct device *kdev,
24246ddf194SBen Widawsky 				     struct device_attribute *attr,
24346ddf194SBen Widawsky 				     const char *buf, size_t count)
24446ddf194SBen Widawsky {
24546ddf194SBen Widawsky 	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
24646ddf194SBen Widawsky 	struct drm_device *dev = minor->dev;
24746ddf194SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
24831c77388SBen Widawsky 	u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
24946ddf194SBen Widawsky 	ssize_t ret;
25046ddf194SBen Widawsky 
25146ddf194SBen Widawsky 	ret = kstrtou32(buf, 0, &val);
25246ddf194SBen Widawsky 	if (ret)
25346ddf194SBen Widawsky 		return ret;
25446ddf194SBen Widawsky 
2554fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
25646ddf194SBen Widawsky 
257*0a073b84SJesse Barnes 	if (IS_VALLEYVIEW(dev_priv->dev)) {
258*0a073b84SJesse Barnes 		val = vlv_freq_opcode(dev_priv->mem_freq, val);
259*0a073b84SJesse Barnes 
260*0a073b84SJesse Barnes 		hw_max = valleyview_rps_max_freq(dev_priv);
261*0a073b84SJesse Barnes 		hw_min = valleyview_rps_min_freq(dev_priv);
262*0a073b84SJesse Barnes 		non_oc_max = hw_max;
263*0a073b84SJesse Barnes 	} else {
264*0a073b84SJesse Barnes 		val /= GT_FREQUENCY_MULTIPLIER;
265*0a073b84SJesse Barnes 
26646ddf194SBen Widawsky 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
26731c77388SBen Widawsky 		hw_max = dev_priv->rps.hw_max;
26831c77388SBen Widawsky 		non_oc_max = (rp_state_cap & 0xff);
26946ddf194SBen Widawsky 		hw_min = ((rp_state_cap & 0xff0000) >> 16);
270*0a073b84SJesse Barnes 	}
27146ddf194SBen Widawsky 
272*0a073b84SJesse Barnes 	if (val < hw_min || val > hw_max ||
273*0a073b84SJesse Barnes 	    val < dev_priv->rps.min_delay) {
2744fc688ceSJesse Barnes 		mutex_unlock(&dev_priv->rps.hw_lock);
27546ddf194SBen Widawsky 		return -EINVAL;
27646ddf194SBen Widawsky 	}
27746ddf194SBen Widawsky 
27831c77388SBen Widawsky 	if (val > non_oc_max)
27931c77388SBen Widawsky 		DRM_DEBUG("User requested overclocking to %d\n",
28031c77388SBen Widawsky 			  val * GT_FREQUENCY_MULTIPLIER);
28131c77388SBen Widawsky 
282*0a073b84SJesse Barnes 	if (dev_priv->rps.cur_delay > val) {
283*0a073b84SJesse Barnes 		if (IS_VALLEYVIEW(dev_priv->dev))
284*0a073b84SJesse Barnes 			valleyview_set_rps(dev_priv->dev, val);
285*0a073b84SJesse Barnes 		else
28646ddf194SBen Widawsky 			gen6_set_rps(dev_priv->dev, val);
287*0a073b84SJesse Barnes 	}
28846ddf194SBen Widawsky 
28946ddf194SBen Widawsky 	dev_priv->rps.max_delay = val;
29046ddf194SBen Widawsky 
2914fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
29246ddf194SBen Widawsky 
29346ddf194SBen Widawsky 	return count;
29446ddf194SBen Widawsky }
29546ddf194SBen Widawsky 
296df6eedc8SBen Widawsky static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
297df6eedc8SBen Widawsky {
298df6eedc8SBen Widawsky 	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
299df6eedc8SBen Widawsky 	struct drm_device *dev = minor->dev;
300df6eedc8SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
301df6eedc8SBen Widawsky 	int ret;
302df6eedc8SBen Widawsky 
3034fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
304*0a073b84SJesse Barnes 	if (IS_VALLEYVIEW(dev_priv->dev))
305*0a073b84SJesse Barnes 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
306*0a073b84SJesse Barnes 	else
307df6eedc8SBen Widawsky 		ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
3084fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
309df6eedc8SBen Widawsky 
3103e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
311df6eedc8SBen Widawsky }
312df6eedc8SBen Widawsky 
31346ddf194SBen Widawsky static ssize_t gt_min_freq_mhz_store(struct device *kdev,
31446ddf194SBen Widawsky 				     struct device_attribute *attr,
31546ddf194SBen Widawsky 				     const char *buf, size_t count)
31646ddf194SBen Widawsky {
31746ddf194SBen Widawsky 	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
31846ddf194SBen Widawsky 	struct drm_device *dev = minor->dev;
31946ddf194SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
32046ddf194SBen Widawsky 	u32 val, rp_state_cap, hw_max, hw_min;
32146ddf194SBen Widawsky 	ssize_t ret;
32246ddf194SBen Widawsky 
32346ddf194SBen Widawsky 	ret = kstrtou32(buf, 0, &val);
32446ddf194SBen Widawsky 	if (ret)
32546ddf194SBen Widawsky 		return ret;
32646ddf194SBen Widawsky 
3274fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
32846ddf194SBen Widawsky 
329*0a073b84SJesse Barnes 	if (IS_VALLEYVIEW(dev)) {
330*0a073b84SJesse Barnes 		val = vlv_freq_opcode(dev_priv->mem_freq, val);
331*0a073b84SJesse Barnes 
332*0a073b84SJesse Barnes 		hw_max = valleyview_rps_max_freq(dev_priv);
333*0a073b84SJesse Barnes 		hw_min = valleyview_rps_min_freq(dev_priv);
334*0a073b84SJesse Barnes 	} else {
335*0a073b84SJesse Barnes 		val /= GT_FREQUENCY_MULTIPLIER;
336*0a073b84SJesse Barnes 
33746ddf194SBen Widawsky 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
33831c77388SBen Widawsky 		hw_max = dev_priv->rps.hw_max;
33946ddf194SBen Widawsky 		hw_min = ((rp_state_cap & 0xff0000) >> 16);
340*0a073b84SJesse Barnes 	}
34146ddf194SBen Widawsky 
34246ddf194SBen Widawsky 	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
3434fc688ceSJesse Barnes 		mutex_unlock(&dev_priv->rps.hw_lock);
34446ddf194SBen Widawsky 		return -EINVAL;
34546ddf194SBen Widawsky 	}
34646ddf194SBen Widawsky 
347*0a073b84SJesse Barnes 	if (dev_priv->rps.cur_delay < val) {
348*0a073b84SJesse Barnes 		if (IS_VALLEYVIEW(dev))
349*0a073b84SJesse Barnes 			valleyview_set_rps(dev, val);
350*0a073b84SJesse Barnes 		else
35146ddf194SBen Widawsky 			gen6_set_rps(dev_priv->dev, val);
352*0a073b84SJesse Barnes 	}
35346ddf194SBen Widawsky 
35446ddf194SBen Widawsky 	dev_priv->rps.min_delay = val;
35546ddf194SBen Widawsky 
3564fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
35746ddf194SBen Widawsky 
35846ddf194SBen Widawsky 	return count;
35946ddf194SBen Widawsky 
36046ddf194SBen Widawsky }
36146ddf194SBen Widawsky 
362df6eedc8SBen Widawsky static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
36346ddf194SBen Widawsky static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
36446ddf194SBen Widawsky static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
365df6eedc8SBen Widawsky 
366ac6ae347SBen Widawsky 
367ac6ae347SBen Widawsky static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
368ac6ae347SBen Widawsky static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
369ac6ae347SBen Widawsky static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
370ac6ae347SBen Widawsky static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
371ac6ae347SBen Widawsky 
372ac6ae347SBen Widawsky /* For now we have a static number of RP states */
373ac6ae347SBen Widawsky static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
374ac6ae347SBen Widawsky {
375ac6ae347SBen Widawsky 	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
376ac6ae347SBen Widawsky 	struct drm_device *dev = minor->dev;
377ac6ae347SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
378ac6ae347SBen Widawsky 	u32 val, rp_state_cap;
379ac6ae347SBen Widawsky 	ssize_t ret;
380ac6ae347SBen Widawsky 
381ac6ae347SBen Widawsky 	ret = mutex_lock_interruptible(&dev->struct_mutex);
382ac6ae347SBen Widawsky 	if (ret)
383ac6ae347SBen Widawsky 		return ret;
384ac6ae347SBen Widawsky 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
385ac6ae347SBen Widawsky 	mutex_unlock(&dev->struct_mutex);
386ac6ae347SBen Widawsky 
387ac6ae347SBen Widawsky 	if (attr == &dev_attr_gt_RP0_freq_mhz) {
388ac6ae347SBen Widawsky 		val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
389ac6ae347SBen Widawsky 	} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
390ac6ae347SBen Widawsky 		val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
391ac6ae347SBen Widawsky 	} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
392ac6ae347SBen Widawsky 		val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
393ac6ae347SBen Widawsky 	} else {
394ac6ae347SBen Widawsky 		BUG();
395ac6ae347SBen Widawsky 	}
3963e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
397ac6ae347SBen Widawsky }
398ac6ae347SBen Widawsky 
399df6eedc8SBen Widawsky static const struct attribute *gen6_attrs[] = {
400df6eedc8SBen Widawsky 	&dev_attr_gt_cur_freq_mhz.attr,
401df6eedc8SBen Widawsky 	&dev_attr_gt_max_freq_mhz.attr,
402df6eedc8SBen Widawsky 	&dev_attr_gt_min_freq_mhz.attr,
403ac6ae347SBen Widawsky 	&dev_attr_gt_RP0_freq_mhz.attr,
404ac6ae347SBen Widawsky 	&dev_attr_gt_RP1_freq_mhz.attr,
405ac6ae347SBen Widawsky 	&dev_attr_gt_RPn_freq_mhz.attr,
406df6eedc8SBen Widawsky 	NULL,
407df6eedc8SBen Widawsky };
408df6eedc8SBen Widawsky 
4090136db58SBen Widawsky void i915_setup_sysfs(struct drm_device *dev)
4100136db58SBen Widawsky {
4110136db58SBen Widawsky 	int ret;
4120136db58SBen Widawsky 
4138c3f929bSBen Widawsky #ifdef CONFIG_PM
414112abd29SDaniel Vetter 	if (INTEL_INFO(dev)->gen >= 6) {
415112abd29SDaniel Vetter 		ret = sysfs_merge_group(&dev->primary->kdev.kobj,
416112abd29SDaniel Vetter 					&rc6_attr_group);
4170136db58SBen Widawsky 		if (ret)
41884bc7581SBen Widawsky 			DRM_ERROR("RC6 residency sysfs setup failed\n");
419112abd29SDaniel Vetter 	}
4208c3f929bSBen Widawsky #endif
421e1ef7cc2SBen Widawsky 	if (HAS_L3_GPU_CACHE(dev)) {
42284bc7581SBen Widawsky 		ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
42384bc7581SBen Widawsky 		if (ret)
42484bc7581SBen Widawsky 			DRM_ERROR("l3 parity sysfs setup failed\n");
4250136db58SBen Widawsky 	}
426df6eedc8SBen Widawsky 
427df6eedc8SBen Widawsky 	if (INTEL_INFO(dev)->gen >= 6) {
428df6eedc8SBen Widawsky 		ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
429df6eedc8SBen Widawsky 		if (ret)
430df6eedc8SBen Widawsky 			DRM_ERROR("gen6 sysfs setup failed\n");
431df6eedc8SBen Widawsky 	}
432112abd29SDaniel Vetter }
4330136db58SBen Widawsky 
4340136db58SBen Widawsky void i915_teardown_sysfs(struct drm_device *dev)
4350136db58SBen Widawsky {
436df6eedc8SBen Widawsky 	sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
43784bc7581SBen Widawsky 	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
438853c70e8SBen Widawsky #ifdef CONFIG_PM
4390136db58SBen Widawsky 	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
440853c70e8SBen Widawsky #endif
4410136db58SBen Widawsky }
442