xref: /openbmc/linux/drivers/gpu/drm/i915/i915_sysfs.c (revision 14c8d110e083d3a09ccf8cfe18ad22fe1450c2e9)
10136db58SBen Widawsky /*
20136db58SBen Widawsky  * Copyright © 2012 Intel Corporation
30136db58SBen Widawsky  *
40136db58SBen Widawsky  * Permission is hereby granted, free of charge, to any person obtaining a
50136db58SBen Widawsky  * copy of this software and associated documentation files (the "Software"),
60136db58SBen Widawsky  * to deal in the Software without restriction, including without limitation
70136db58SBen Widawsky  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
80136db58SBen Widawsky  * and/or sell copies of the Software, and to permit persons to whom the
90136db58SBen Widawsky  * Software is furnished to do so, subject to the following conditions:
100136db58SBen Widawsky  *
110136db58SBen Widawsky  * The above copyright notice and this permission notice (including the next
120136db58SBen Widawsky  * paragraph) shall be included in all copies or substantial portions of the
130136db58SBen Widawsky  * Software.
140136db58SBen Widawsky  *
150136db58SBen Widawsky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
160136db58SBen Widawsky  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
170136db58SBen Widawsky  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
180136db58SBen Widawsky  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
190136db58SBen Widawsky  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
200136db58SBen Widawsky  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
210136db58SBen Widawsky  * IN THE SOFTWARE.
220136db58SBen Widawsky  *
230136db58SBen Widawsky  * Authors:
240136db58SBen Widawsky  *    Ben Widawsky <ben@bwidawsk.net>
250136db58SBen Widawsky  *
260136db58SBen Widawsky  */
270136db58SBen Widawsky 
280136db58SBen Widawsky #include <linux/device.h>
290136db58SBen Widawsky #include <linux/module.h>
300136db58SBen Widawsky #include <linux/stat.h>
310136db58SBen Widawsky #include <linux/sysfs.h>
3284bc7581SBen Widawsky #include "intel_drv.h"
330136db58SBen Widawsky #include "i915_drv.h"
340136db58SBen Widawsky 
35*14c8d110SDave Airlie #define dev_to_drm_minor(d) container_of((d), struct drm_minor, kdev)
36*14c8d110SDave Airlie 
375ab3633dSHunt Xu #ifdef CONFIG_PM
380136db58SBen Widawsky static u32 calc_residency(struct drm_device *dev, const u32 reg)
390136db58SBen Widawsky {
400136db58SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
410136db58SBen Widawsky 	u64 raw_time; /* 32b value may overflow during fixed point math */
42e454a05dSJesse Barnes 	u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
430136db58SBen Widawsky 
440136db58SBen Widawsky 	if (!intel_enable_rc6(dev))
450136db58SBen Widawsky 		return 0;
460136db58SBen Widawsky 
47e454a05dSJesse Barnes 	/* On VLV, residency time is in CZ units rather than 1.28us */
48e454a05dSJesse Barnes 	if (IS_VALLEYVIEW(dev)) {
49e454a05dSJesse Barnes 		u32 clkctl2;
50e454a05dSJesse Barnes 
51e454a05dSJesse Barnes 		clkctl2 = I915_READ(VLV_CLK_CTL2) >>
52e454a05dSJesse Barnes 			CLK_CTL2_CZCOUNT_30NS_SHIFT;
53e454a05dSJesse Barnes 		if (!clkctl2) {
54e454a05dSJesse Barnes 			WARN(!clkctl2, "bogus CZ count value");
55e454a05dSJesse Barnes 			return 0;
56e454a05dSJesse Barnes 		}
57e454a05dSJesse Barnes 		units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
58e454a05dSJesse Barnes 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
59e454a05dSJesse Barnes 			units <<= 8;
60e454a05dSJesse Barnes 
61e454a05dSJesse Barnes 		div = 1000000ULL * bias;
62e454a05dSJesse Barnes 	}
63e454a05dSJesse Barnes 
64e454a05dSJesse Barnes 	raw_time = I915_READ(reg) * units;
65e454a05dSJesse Barnes 	return DIV_ROUND_UP_ULL(raw_time, div);
660136db58SBen Widawsky }
670136db58SBen Widawsky 
680136db58SBen Widawsky static ssize_t
69dbdfd8e9SBen Widawsky show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
700136db58SBen Widawsky {
71*14c8d110SDave Airlie 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
723e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
730136db58SBen Widawsky }
740136db58SBen Widawsky 
750136db58SBen Widawsky static ssize_t
76dbdfd8e9SBen Widawsky show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
770136db58SBen Widawsky {
78*14c8d110SDave Airlie 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
790136db58SBen Widawsky 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
803e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
810136db58SBen Widawsky }
820136db58SBen Widawsky 
830136db58SBen Widawsky static ssize_t
84dbdfd8e9SBen Widawsky show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
850136db58SBen Widawsky {
86*14c8d110SDave Airlie 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
870136db58SBen Widawsky 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
885ffd494bSJesse Barnes 	if (IS_VALLEYVIEW(dminor->dev))
895ffd494bSJesse Barnes 		rc6p_residency = 0;
903e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
910136db58SBen Widawsky }
920136db58SBen Widawsky 
930136db58SBen Widawsky static ssize_t
94dbdfd8e9SBen Widawsky show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
950136db58SBen Widawsky {
96*14c8d110SDave Airlie 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
970136db58SBen Widawsky 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
985ffd494bSJesse Barnes 	if (IS_VALLEYVIEW(dminor->dev))
995ffd494bSJesse Barnes 		rc6pp_residency = 0;
1003e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
1010136db58SBen Widawsky }
1020136db58SBen Widawsky 
1030136db58SBen Widawsky static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
1040136db58SBen Widawsky static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
1050136db58SBen Widawsky static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
1060136db58SBen Widawsky static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
1070136db58SBen Widawsky 
1080136db58SBen Widawsky static struct attribute *rc6_attrs[] = {
1090136db58SBen Widawsky 	&dev_attr_rc6_enable.attr,
1100136db58SBen Widawsky 	&dev_attr_rc6_residency_ms.attr,
1110136db58SBen Widawsky 	&dev_attr_rc6p_residency_ms.attr,
1120136db58SBen Widawsky 	&dev_attr_rc6pp_residency_ms.attr,
1130136db58SBen Widawsky 	NULL
1140136db58SBen Widawsky };
1150136db58SBen Widawsky 
1160136db58SBen Widawsky static struct attribute_group rc6_attr_group = {
1170136db58SBen Widawsky 	.name = power_group_name,
1180136db58SBen Widawsky 	.attrs =  rc6_attrs
1190136db58SBen Widawsky };
1208c3f929bSBen Widawsky #endif
1210136db58SBen Widawsky 
12284bc7581SBen Widawsky static int l3_access_valid(struct drm_device *dev, loff_t offset)
12384bc7581SBen Widawsky {
124040d2baaSBen Widawsky 	if (!HAS_L3_DPF(dev))
12584bc7581SBen Widawsky 		return -EPERM;
12684bc7581SBen Widawsky 
12784bc7581SBen Widawsky 	if (offset % 4 != 0)
12884bc7581SBen Widawsky 		return -EINVAL;
12984bc7581SBen Widawsky 
13084bc7581SBen Widawsky 	if (offset >= GEN7_L3LOG_SIZE)
13184bc7581SBen Widawsky 		return -ENXIO;
13284bc7581SBen Widawsky 
13384bc7581SBen Widawsky 	return 0;
13484bc7581SBen Widawsky }
13584bc7581SBen Widawsky 
13684bc7581SBen Widawsky static ssize_t
13784bc7581SBen Widawsky i915_l3_read(struct file *filp, struct kobject *kobj,
13884bc7581SBen Widawsky 	     struct bin_attribute *attr, char *buf,
13984bc7581SBen Widawsky 	     loff_t offset, size_t count)
14084bc7581SBen Widawsky {
14184bc7581SBen Widawsky 	struct device *dev = container_of(kobj, struct device, kobj);
142*14c8d110SDave Airlie 	struct drm_minor *dminor = dev_to_drm_minor(dev);
14384bc7581SBen Widawsky 	struct drm_device *drm_dev = dminor->dev;
14484bc7581SBen Widawsky 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
14535a85ac6SBen Widawsky 	int slice = (int)(uintptr_t)attr->private;
1463ccfd19dSBen Widawsky 	int ret;
14784bc7581SBen Widawsky 
1481c3dcd1cSBen Widawsky 	count = round_down(count, 4);
1491c3dcd1cSBen Widawsky 
15084bc7581SBen Widawsky 	ret = l3_access_valid(drm_dev, offset);
15184bc7581SBen Widawsky 	if (ret)
15284bc7581SBen Widawsky 		return ret;
15384bc7581SBen Widawsky 
154e5ad4026SDan Carpenter 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
15533618ea5SBen Widawsky 
15684bc7581SBen Widawsky 	ret = i915_mutex_lock_interruptible(drm_dev);
15784bc7581SBen Widawsky 	if (ret)
15884bc7581SBen Widawsky 		return ret;
15984bc7581SBen Widawsky 
16035a85ac6SBen Widawsky 	if (dev_priv->l3_parity.remap_info[slice])
1611c966dd2SBen Widawsky 		memcpy(buf,
16235a85ac6SBen Widawsky 		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
1631c966dd2SBen Widawsky 		       count);
1641c966dd2SBen Widawsky 	else
1651c966dd2SBen Widawsky 		memset(buf, 0, count);
1661c966dd2SBen Widawsky 
16784bc7581SBen Widawsky 	mutex_unlock(&drm_dev->struct_mutex);
16884bc7581SBen Widawsky 
1691c966dd2SBen Widawsky 	return count;
17084bc7581SBen Widawsky }
17184bc7581SBen Widawsky 
17284bc7581SBen Widawsky static ssize_t
17384bc7581SBen Widawsky i915_l3_write(struct file *filp, struct kobject *kobj,
17484bc7581SBen Widawsky 	      struct bin_attribute *attr, char *buf,
17584bc7581SBen Widawsky 	      loff_t offset, size_t count)
17684bc7581SBen Widawsky {
17784bc7581SBen Widawsky 	struct device *dev = container_of(kobj, struct device, kobj);
178*14c8d110SDave Airlie 	struct drm_minor *dminor = dev_to_drm_minor(dev);
17984bc7581SBen Widawsky 	struct drm_device *drm_dev = dminor->dev;
18084bc7581SBen Widawsky 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
1813ccfd19dSBen Widawsky 	struct i915_hw_context *ctx;
18284bc7581SBen Widawsky 	u32 *temp = NULL; /* Just here to make handling failures easy */
18335a85ac6SBen Widawsky 	int slice = (int)(uintptr_t)attr->private;
18484bc7581SBen Widawsky 	int ret;
18584bc7581SBen Widawsky 
18684bc7581SBen Widawsky 	ret = l3_access_valid(drm_dev, offset);
18784bc7581SBen Widawsky 	if (ret)
18884bc7581SBen Widawsky 		return ret;
18984bc7581SBen Widawsky 
1903ccfd19dSBen Widawsky 	if (dev_priv->hw_contexts_disabled)
1913ccfd19dSBen Widawsky 		return -ENXIO;
1923ccfd19dSBen Widawsky 
19384bc7581SBen Widawsky 	ret = i915_mutex_lock_interruptible(drm_dev);
19484bc7581SBen Widawsky 	if (ret)
19584bc7581SBen Widawsky 		return ret;
19684bc7581SBen Widawsky 
19735a85ac6SBen Widawsky 	if (!dev_priv->l3_parity.remap_info[slice]) {
19884bc7581SBen Widawsky 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
19984bc7581SBen Widawsky 		if (!temp) {
20084bc7581SBen Widawsky 			mutex_unlock(&drm_dev->struct_mutex);
20184bc7581SBen Widawsky 			return -ENOMEM;
20284bc7581SBen Widawsky 		}
20384bc7581SBen Widawsky 	}
20484bc7581SBen Widawsky 
20584bc7581SBen Widawsky 	ret = i915_gpu_idle(drm_dev);
20684bc7581SBen Widawsky 	if (ret) {
20784bc7581SBen Widawsky 		kfree(temp);
20884bc7581SBen Widawsky 		mutex_unlock(&drm_dev->struct_mutex);
20984bc7581SBen Widawsky 		return ret;
21084bc7581SBen Widawsky 	}
21184bc7581SBen Widawsky 
21284bc7581SBen Widawsky 	/* TODO: Ideally we really want a GPU reset here to make sure errors
21384bc7581SBen Widawsky 	 * aren't propagated. Since I cannot find a stable way to reset the GPU
21484bc7581SBen Widawsky 	 * at this point it is left as a TODO.
21584bc7581SBen Widawsky 	*/
21684bc7581SBen Widawsky 	if (temp)
21735a85ac6SBen Widawsky 		dev_priv->l3_parity.remap_info[slice] = temp;
21884bc7581SBen Widawsky 
21935a85ac6SBen Widawsky 	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
22084bc7581SBen Widawsky 
2213ccfd19dSBen Widawsky 	/* NB: We defer the remapping until we switch to the context */
2223ccfd19dSBen Widawsky 	list_for_each_entry(ctx, &dev_priv->context_list, link)
2233ccfd19dSBen Widawsky 		ctx->remap_slice |= (1<<slice);
22484bc7581SBen Widawsky 
22584bc7581SBen Widawsky 	mutex_unlock(&drm_dev->struct_mutex);
22684bc7581SBen Widawsky 
22784bc7581SBen Widawsky 	return count;
22884bc7581SBen Widawsky }
22984bc7581SBen Widawsky 
23084bc7581SBen Widawsky static struct bin_attribute dpf_attrs = {
23184bc7581SBen Widawsky 	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
23284bc7581SBen Widawsky 	.size = GEN7_L3LOG_SIZE,
23384bc7581SBen Widawsky 	.read = i915_l3_read,
23484bc7581SBen Widawsky 	.write = i915_l3_write,
23535a85ac6SBen Widawsky 	.mmap = NULL,
23635a85ac6SBen Widawsky 	.private = (void *)0
23735a85ac6SBen Widawsky };
23835a85ac6SBen Widawsky 
23935a85ac6SBen Widawsky static struct bin_attribute dpf_attrs_1 = {
24035a85ac6SBen Widawsky 	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
24135a85ac6SBen Widawsky 	.size = GEN7_L3LOG_SIZE,
24235a85ac6SBen Widawsky 	.read = i915_l3_read,
24335a85ac6SBen Widawsky 	.write = i915_l3_write,
24435a85ac6SBen Widawsky 	.mmap = NULL,
24535a85ac6SBen Widawsky 	.private = (void *)1
24684bc7581SBen Widawsky };
24784bc7581SBen Widawsky 
248df6eedc8SBen Widawsky static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
249df6eedc8SBen Widawsky 				    struct device_attribute *attr, char *buf)
250df6eedc8SBen Widawsky {
251*14c8d110SDave Airlie 	struct drm_minor *minor = dev_to_drm_minor(kdev);
252df6eedc8SBen Widawsky 	struct drm_device *dev = minor->dev;
253df6eedc8SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
254df6eedc8SBen Widawsky 	int ret;
255df6eedc8SBen Widawsky 
2564fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
257177006a1SJesse Barnes 	if (IS_VALLEYVIEW(dev_priv->dev)) {
258177006a1SJesse Barnes 		u32 freq;
25964936258SJani Nikula 		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
260177006a1SJesse Barnes 		ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
261177006a1SJesse Barnes 	} else {
262df6eedc8SBen Widawsky 		ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
263177006a1SJesse Barnes 	}
2644fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
265df6eedc8SBen Widawsky 
2663e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
267df6eedc8SBen Widawsky }
268df6eedc8SBen Widawsky 
26997e4eed7SChris Wilson static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
27097e4eed7SChris Wilson 				     struct device_attribute *attr, char *buf)
27197e4eed7SChris Wilson {
272*14c8d110SDave Airlie 	struct drm_minor *minor = dev_to_drm_minor(kdev);
27397e4eed7SChris Wilson 	struct drm_device *dev = minor->dev;
27497e4eed7SChris Wilson 	struct drm_i915_private *dev_priv = dev->dev_private;
27597e4eed7SChris Wilson 
27697e4eed7SChris Wilson 	return snprintf(buf, PAGE_SIZE, "%d\n",
27797e4eed7SChris Wilson 			vlv_gpu_freq(dev_priv->mem_freq,
27897e4eed7SChris Wilson 				     dev_priv->rps.rpe_delay));
27997e4eed7SChris Wilson }
28097e4eed7SChris Wilson 
281df6eedc8SBen Widawsky static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
282df6eedc8SBen Widawsky {
283*14c8d110SDave Airlie 	struct drm_minor *minor = dev_to_drm_minor(kdev);
284df6eedc8SBen Widawsky 	struct drm_device *dev = minor->dev;
285df6eedc8SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
286df6eedc8SBen Widawsky 	int ret;
287df6eedc8SBen Widawsky 
2884fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
2890a073b84SJesse Barnes 	if (IS_VALLEYVIEW(dev_priv->dev))
2900a073b84SJesse Barnes 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
2910a073b84SJesse Barnes 	else
292182642b0SMika Kuoppala 		ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
2934fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
294df6eedc8SBen Widawsky 
2953e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
296df6eedc8SBen Widawsky }
297df6eedc8SBen Widawsky 
29846ddf194SBen Widawsky static ssize_t gt_max_freq_mhz_store(struct device *kdev,
29946ddf194SBen Widawsky 				     struct device_attribute *attr,
30046ddf194SBen Widawsky 				     const char *buf, size_t count)
30146ddf194SBen Widawsky {
302*14c8d110SDave Airlie 	struct drm_minor *minor = dev_to_drm_minor(kdev);
30346ddf194SBen Widawsky 	struct drm_device *dev = minor->dev;
30446ddf194SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
30531c77388SBen Widawsky 	u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
30646ddf194SBen Widawsky 	ssize_t ret;
30746ddf194SBen Widawsky 
30846ddf194SBen Widawsky 	ret = kstrtou32(buf, 0, &val);
30946ddf194SBen Widawsky 	if (ret)
31046ddf194SBen Widawsky 		return ret;
31146ddf194SBen Widawsky 
3124fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
31346ddf194SBen Widawsky 
3140a073b84SJesse Barnes 	if (IS_VALLEYVIEW(dev_priv->dev)) {
3150a073b84SJesse Barnes 		val = vlv_freq_opcode(dev_priv->mem_freq, val);
3160a073b84SJesse Barnes 
3170a073b84SJesse Barnes 		hw_max = valleyview_rps_max_freq(dev_priv);
3180a073b84SJesse Barnes 		hw_min = valleyview_rps_min_freq(dev_priv);
3190a073b84SJesse Barnes 		non_oc_max = hw_max;
3200a073b84SJesse Barnes 	} else {
3210a073b84SJesse Barnes 		val /= GT_FREQUENCY_MULTIPLIER;
3220a073b84SJesse Barnes 
32346ddf194SBen Widawsky 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
32431c77388SBen Widawsky 		hw_max = dev_priv->rps.hw_max;
32531c77388SBen Widawsky 		non_oc_max = (rp_state_cap & 0xff);
32646ddf194SBen Widawsky 		hw_min = ((rp_state_cap & 0xff0000) >> 16);
3270a073b84SJesse Barnes 	}
32846ddf194SBen Widawsky 
3290a073b84SJesse Barnes 	if (val < hw_min || val > hw_max ||
3300a073b84SJesse Barnes 	    val < dev_priv->rps.min_delay) {
3314fc688ceSJesse Barnes 		mutex_unlock(&dev_priv->rps.hw_lock);
33246ddf194SBen Widawsky 		return -EINVAL;
33346ddf194SBen Widawsky 	}
33446ddf194SBen Widawsky 
33531c77388SBen Widawsky 	if (val > non_oc_max)
33631c77388SBen Widawsky 		DRM_DEBUG("User requested overclocking to %d\n",
33731c77388SBen Widawsky 			  val * GT_FREQUENCY_MULTIPLIER);
33831c77388SBen Widawsky 
3390a073b84SJesse Barnes 	if (dev_priv->rps.cur_delay > val) {
3400a073b84SJesse Barnes 		if (IS_VALLEYVIEW(dev_priv->dev))
3410a073b84SJesse Barnes 			valleyview_set_rps(dev_priv->dev, val);
3420a073b84SJesse Barnes 		else
34346ddf194SBen Widawsky 			gen6_set_rps(dev_priv->dev, val);
3440a073b84SJesse Barnes 	}
34546ddf194SBen Widawsky 
34646ddf194SBen Widawsky 	dev_priv->rps.max_delay = val;
34746ddf194SBen Widawsky 
3484fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
34946ddf194SBen Widawsky 
35046ddf194SBen Widawsky 	return count;
35146ddf194SBen Widawsky }
35246ddf194SBen Widawsky 
353df6eedc8SBen Widawsky static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
354df6eedc8SBen Widawsky {
355*14c8d110SDave Airlie 	struct drm_minor *minor = dev_to_drm_minor(kdev);
356df6eedc8SBen Widawsky 	struct drm_device *dev = minor->dev;
357df6eedc8SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
358df6eedc8SBen Widawsky 	int ret;
359df6eedc8SBen Widawsky 
3604fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
3610a073b84SJesse Barnes 	if (IS_VALLEYVIEW(dev_priv->dev))
3620a073b84SJesse Barnes 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
3630a073b84SJesse Barnes 	else
364df6eedc8SBen Widawsky 		ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
3654fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
366df6eedc8SBen Widawsky 
3673e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
368df6eedc8SBen Widawsky }
369df6eedc8SBen Widawsky 
37046ddf194SBen Widawsky static ssize_t gt_min_freq_mhz_store(struct device *kdev,
37146ddf194SBen Widawsky 				     struct device_attribute *attr,
37246ddf194SBen Widawsky 				     const char *buf, size_t count)
37346ddf194SBen Widawsky {
374*14c8d110SDave Airlie 	struct drm_minor *minor = dev_to_drm_minor(kdev);
37546ddf194SBen Widawsky 	struct drm_device *dev = minor->dev;
37646ddf194SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
37746ddf194SBen Widawsky 	u32 val, rp_state_cap, hw_max, hw_min;
37846ddf194SBen Widawsky 	ssize_t ret;
37946ddf194SBen Widawsky 
38046ddf194SBen Widawsky 	ret = kstrtou32(buf, 0, &val);
38146ddf194SBen Widawsky 	if (ret)
38246ddf194SBen Widawsky 		return ret;
38346ddf194SBen Widawsky 
3844fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
38546ddf194SBen Widawsky 
3860a073b84SJesse Barnes 	if (IS_VALLEYVIEW(dev)) {
3870a073b84SJesse Barnes 		val = vlv_freq_opcode(dev_priv->mem_freq, val);
3880a073b84SJesse Barnes 
3890a073b84SJesse Barnes 		hw_max = valleyview_rps_max_freq(dev_priv);
3900a073b84SJesse Barnes 		hw_min = valleyview_rps_min_freq(dev_priv);
3910a073b84SJesse Barnes 	} else {
3920a073b84SJesse Barnes 		val /= GT_FREQUENCY_MULTIPLIER;
3930a073b84SJesse Barnes 
39446ddf194SBen Widawsky 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
39531c77388SBen Widawsky 		hw_max = dev_priv->rps.hw_max;
39646ddf194SBen Widawsky 		hw_min = ((rp_state_cap & 0xff0000) >> 16);
3970a073b84SJesse Barnes 	}
39846ddf194SBen Widawsky 
39946ddf194SBen Widawsky 	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
4004fc688ceSJesse Barnes 		mutex_unlock(&dev_priv->rps.hw_lock);
40146ddf194SBen Widawsky 		return -EINVAL;
40246ddf194SBen Widawsky 	}
40346ddf194SBen Widawsky 
4040a073b84SJesse Barnes 	if (dev_priv->rps.cur_delay < val) {
4050a073b84SJesse Barnes 		if (IS_VALLEYVIEW(dev))
4060a073b84SJesse Barnes 			valleyview_set_rps(dev, val);
4070a073b84SJesse Barnes 		else
40846ddf194SBen Widawsky 			gen6_set_rps(dev_priv->dev, val);
4090a073b84SJesse Barnes 	}
41046ddf194SBen Widawsky 
41146ddf194SBen Widawsky 	dev_priv->rps.min_delay = val;
41246ddf194SBen Widawsky 
4134fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
41446ddf194SBen Widawsky 
41546ddf194SBen Widawsky 	return count;
41646ddf194SBen Widawsky 
41746ddf194SBen Widawsky }
41846ddf194SBen Widawsky 
419df6eedc8SBen Widawsky static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
42046ddf194SBen Widawsky static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
42146ddf194SBen Widawsky static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
422df6eedc8SBen Widawsky 
42397e4eed7SChris Wilson static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
424ac6ae347SBen Widawsky 
425ac6ae347SBen Widawsky static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
426ac6ae347SBen Widawsky static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
427ac6ae347SBen Widawsky static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
428ac6ae347SBen Widawsky static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
429ac6ae347SBen Widawsky 
430ac6ae347SBen Widawsky /* For now we have a static number of RP states */
431ac6ae347SBen Widawsky static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
432ac6ae347SBen Widawsky {
433*14c8d110SDave Airlie 	struct drm_minor *minor = dev_to_drm_minor(kdev);
434ac6ae347SBen Widawsky 	struct drm_device *dev = minor->dev;
435ac6ae347SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
436ac6ae347SBen Widawsky 	u32 val, rp_state_cap;
437ac6ae347SBen Widawsky 	ssize_t ret;
438ac6ae347SBen Widawsky 
439ac6ae347SBen Widawsky 	ret = mutex_lock_interruptible(&dev->struct_mutex);
440ac6ae347SBen Widawsky 	if (ret)
441ac6ae347SBen Widawsky 		return ret;
442ac6ae347SBen Widawsky 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
443ac6ae347SBen Widawsky 	mutex_unlock(&dev->struct_mutex);
444ac6ae347SBen Widawsky 
445ac6ae347SBen Widawsky 	if (attr == &dev_attr_gt_RP0_freq_mhz) {
446ac6ae347SBen Widawsky 		val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
447ac6ae347SBen Widawsky 	} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
448ac6ae347SBen Widawsky 		val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
449ac6ae347SBen Widawsky 	} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
450ac6ae347SBen Widawsky 		val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
451ac6ae347SBen Widawsky 	} else {
452ac6ae347SBen Widawsky 		BUG();
453ac6ae347SBen Widawsky 	}
4543e2a1556SJani Nikula 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
455ac6ae347SBen Widawsky }
456ac6ae347SBen Widawsky 
457df6eedc8SBen Widawsky static const struct attribute *gen6_attrs[] = {
458df6eedc8SBen Widawsky 	&dev_attr_gt_cur_freq_mhz.attr,
459df6eedc8SBen Widawsky 	&dev_attr_gt_max_freq_mhz.attr,
460df6eedc8SBen Widawsky 	&dev_attr_gt_min_freq_mhz.attr,
461ac6ae347SBen Widawsky 	&dev_attr_gt_RP0_freq_mhz.attr,
462ac6ae347SBen Widawsky 	&dev_attr_gt_RP1_freq_mhz.attr,
463ac6ae347SBen Widawsky 	&dev_attr_gt_RPn_freq_mhz.attr,
464df6eedc8SBen Widawsky 	NULL,
465df6eedc8SBen Widawsky };
466df6eedc8SBen Widawsky 
46797e4eed7SChris Wilson static const struct attribute *vlv_attrs[] = {
46897e4eed7SChris Wilson 	&dev_attr_gt_cur_freq_mhz.attr,
46997e4eed7SChris Wilson 	&dev_attr_gt_max_freq_mhz.attr,
47097e4eed7SChris Wilson 	&dev_attr_gt_min_freq_mhz.attr,
47197e4eed7SChris Wilson 	&dev_attr_vlv_rpe_freq_mhz.attr,
47297e4eed7SChris Wilson 	NULL,
47397e4eed7SChris Wilson };
47497e4eed7SChris Wilson 
475ef86ddceSMika Kuoppala static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
476ef86ddceSMika Kuoppala 				struct bin_attribute *attr, char *buf,
477ef86ddceSMika Kuoppala 				loff_t off, size_t count)
478ef86ddceSMika Kuoppala {
479ef86ddceSMika Kuoppala 
480ef86ddceSMika Kuoppala 	struct device *kdev = container_of(kobj, struct device, kobj);
481*14c8d110SDave Airlie 	struct drm_minor *minor = dev_to_drm_minor(kdev);
482ef86ddceSMika Kuoppala 	struct drm_device *dev = minor->dev;
483ef86ddceSMika Kuoppala 	struct i915_error_state_file_priv error_priv;
484ef86ddceSMika Kuoppala 	struct drm_i915_error_state_buf error_str;
485ef86ddceSMika Kuoppala 	ssize_t ret_count = 0;
486ef86ddceSMika Kuoppala 	int ret;
487ef86ddceSMika Kuoppala 
488ef86ddceSMika Kuoppala 	memset(&error_priv, 0, sizeof(error_priv));
489ef86ddceSMika Kuoppala 
490ef86ddceSMika Kuoppala 	ret = i915_error_state_buf_init(&error_str, count, off);
491ef86ddceSMika Kuoppala 	if (ret)
492ef86ddceSMika Kuoppala 		return ret;
493ef86ddceSMika Kuoppala 
494ef86ddceSMika Kuoppala 	error_priv.dev = dev;
495ef86ddceSMika Kuoppala 	i915_error_state_get(dev, &error_priv);
496ef86ddceSMika Kuoppala 
497ef86ddceSMika Kuoppala 	ret = i915_error_state_to_str(&error_str, &error_priv);
498ef86ddceSMika Kuoppala 	if (ret)
499ef86ddceSMika Kuoppala 		goto out;
500ef86ddceSMika Kuoppala 
501ef86ddceSMika Kuoppala 	ret_count = count < error_str.bytes ? count : error_str.bytes;
502ef86ddceSMika Kuoppala 
503ef86ddceSMika Kuoppala 	memcpy(buf, error_str.buf, ret_count);
504ef86ddceSMika Kuoppala out:
505ef86ddceSMika Kuoppala 	i915_error_state_put(&error_priv);
506ef86ddceSMika Kuoppala 	i915_error_state_buf_release(&error_str);
507ef86ddceSMika Kuoppala 
508ef86ddceSMika Kuoppala 	return ret ?: ret_count;
509ef86ddceSMika Kuoppala }
510ef86ddceSMika Kuoppala 
511ef86ddceSMika Kuoppala static ssize_t error_state_write(struct file *file, struct kobject *kobj,
512ef86ddceSMika Kuoppala 				 struct bin_attribute *attr, char *buf,
513ef86ddceSMika Kuoppala 				 loff_t off, size_t count)
514ef86ddceSMika Kuoppala {
515ef86ddceSMika Kuoppala 	struct device *kdev = container_of(kobj, struct device, kobj);
516*14c8d110SDave Airlie 	struct drm_minor *minor = dev_to_drm_minor(kdev);
517ef86ddceSMika Kuoppala 	struct drm_device *dev = minor->dev;
518ef86ddceSMika Kuoppala 	int ret;
519ef86ddceSMika Kuoppala 
520ef86ddceSMika Kuoppala 	DRM_DEBUG_DRIVER("Resetting error state\n");
521ef86ddceSMika Kuoppala 
522ef86ddceSMika Kuoppala 	ret = mutex_lock_interruptible(&dev->struct_mutex);
523ef86ddceSMika Kuoppala 	if (ret)
524ef86ddceSMika Kuoppala 		return ret;
525ef86ddceSMika Kuoppala 
526ef86ddceSMika Kuoppala 	i915_destroy_error_state(dev);
527ef86ddceSMika Kuoppala 	mutex_unlock(&dev->struct_mutex);
528ef86ddceSMika Kuoppala 
529ef86ddceSMika Kuoppala 	return count;
530ef86ddceSMika Kuoppala }
531ef86ddceSMika Kuoppala 
532ef86ddceSMika Kuoppala static struct bin_attribute error_state_attr = {
533ef86ddceSMika Kuoppala 	.attr.name = "error",
534ef86ddceSMika Kuoppala 	.attr.mode = S_IRUSR | S_IWUSR,
535ef86ddceSMika Kuoppala 	.size = 0,
536ef86ddceSMika Kuoppala 	.read = error_state_read,
537ef86ddceSMika Kuoppala 	.write = error_state_write,
538ef86ddceSMika Kuoppala };
539ef86ddceSMika Kuoppala 
5400136db58SBen Widawsky void i915_setup_sysfs(struct drm_device *dev)
5410136db58SBen Widawsky {
5420136db58SBen Widawsky 	int ret;
5430136db58SBen Widawsky 
5448c3f929bSBen Widawsky #ifdef CONFIG_PM
545112abd29SDaniel Vetter 	if (INTEL_INFO(dev)->gen >= 6) {
546112abd29SDaniel Vetter 		ret = sysfs_merge_group(&dev->primary->kdev.kobj,
547112abd29SDaniel Vetter 					&rc6_attr_group);
5480136db58SBen Widawsky 		if (ret)
54984bc7581SBen Widawsky 			DRM_ERROR("RC6 residency sysfs setup failed\n");
550112abd29SDaniel Vetter 	}
5518c3f929bSBen Widawsky #endif
552040d2baaSBen Widawsky 	if (HAS_L3_DPF(dev)) {
55384bc7581SBen Widawsky 		ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
55484bc7581SBen Widawsky 		if (ret)
55584bc7581SBen Widawsky 			DRM_ERROR("l3 parity sysfs setup failed\n");
55635a85ac6SBen Widawsky 
55735a85ac6SBen Widawsky 		if (NUM_L3_SLICES(dev) > 1) {
55835a85ac6SBen Widawsky 			ret = device_create_bin_file(&dev->primary->kdev,
55935a85ac6SBen Widawsky 						     &dpf_attrs_1);
56035a85ac6SBen Widawsky 			if (ret)
56135a85ac6SBen Widawsky 				DRM_ERROR("l3 parity slice 1 setup failed\n");
56235a85ac6SBen Widawsky 		}
5630136db58SBen Widawsky 	}
564df6eedc8SBen Widawsky 
56597e4eed7SChris Wilson 	ret = 0;
56697e4eed7SChris Wilson 	if (IS_VALLEYVIEW(dev))
56797e4eed7SChris Wilson 		ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
56897e4eed7SChris Wilson 	else if (INTEL_INFO(dev)->gen >= 6)
569df6eedc8SBen Widawsky 		ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
570df6eedc8SBen Widawsky 	if (ret)
57197e4eed7SChris Wilson 		DRM_ERROR("RPS sysfs setup failed\n");
572ef86ddceSMika Kuoppala 
573ef86ddceSMika Kuoppala 	ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
574ef86ddceSMika Kuoppala 				    &error_state_attr);
575ef86ddceSMika Kuoppala 	if (ret)
576ef86ddceSMika Kuoppala 		DRM_ERROR("error_state sysfs setup failed\n");
577112abd29SDaniel Vetter }
5780136db58SBen Widawsky 
5790136db58SBen Widawsky void i915_teardown_sysfs(struct drm_device *dev)
5800136db58SBen Widawsky {
581ef86ddceSMika Kuoppala 	sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
58297e4eed7SChris Wilson 	if (IS_VALLEYVIEW(dev))
58397e4eed7SChris Wilson 		sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
58497e4eed7SChris Wilson 	else
585df6eedc8SBen Widawsky 		sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
58635a85ac6SBen Widawsky 	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs_1);
58784bc7581SBen Widawsky 	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
588853c70e8SBen Widawsky #ifdef CONFIG_PM
5890136db58SBen Widawsky 	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
590853c70e8SBen Widawsky #endif
5910136db58SBen Widawsky }
592