xref: /openbmc/linux/drivers/gpu/drm/i915/i915_query.c (revision cb3908c133f1285069673f11ad651d14ae0406cf)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include <linux/nospec.h>
8 
9 #include "i915_drv.h"
10 #include "i915_query.h"
11 #include <uapi/drm/i915_drm.h>
12 
13 static int copy_query_item(void *query_hdr, size_t query_sz,
14 			   u32 total_length,
15 			   struct drm_i915_query_item *query_item)
16 {
17 	if (query_item->length == 0)
18 		return total_length;
19 
20 	if (query_item->length < total_length)
21 		return -EINVAL;
22 
23 	if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
24 			   query_sz))
25 		return -EFAULT;
26 
27 	if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
28 		       total_length))
29 		return -EFAULT;
30 
31 	return 0;
32 }
33 
34 static int query_topology_info(struct drm_i915_private *dev_priv,
35 			       struct drm_i915_query_item *query_item)
36 {
37 	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
38 	struct drm_i915_query_topology_info topo;
39 	u32 slice_length, subslice_length, eu_length, total_length;
40 	int ret;
41 
42 	if (query_item->flags != 0)
43 		return -EINVAL;
44 
45 	if (sseu->max_slices == 0)
46 		return -ENODEV;
47 
48 	BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
49 
50 	slice_length = sizeof(sseu->slice_mask);
51 	subslice_length = sseu->max_slices *
52 		DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
53 	eu_length = sseu->max_slices * sseu->max_subslices *
54 		DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
55 
56 	total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
57 
58 	ret = copy_query_item(&topo, sizeof(topo), total_length,
59 			      query_item);
60 	if (ret != 0)
61 		return ret;
62 
63 	if (topo.flags != 0)
64 		return -EINVAL;
65 
66 	memset(&topo, 0, sizeof(topo));
67 	topo.max_slices = sseu->max_slices;
68 	topo.max_subslices = sseu->max_subslices;
69 	topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
70 
71 	topo.subslice_offset = slice_length;
72 	topo.subslice_stride = DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
73 	topo.eu_offset = slice_length + subslice_length;
74 	topo.eu_stride =
75 		DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
76 
77 	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
78 			   &topo, sizeof(topo)))
79 		return -EFAULT;
80 
81 	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
82 			   &sseu->slice_mask, slice_length))
83 		return -EFAULT;
84 
85 	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
86 					   sizeof(topo) + slice_length),
87 			   sseu->subslice_mask, subslice_length))
88 		return -EFAULT;
89 
90 	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
91 					   sizeof(topo) +
92 					   slice_length + subslice_length),
93 			   sseu->eu_mask, eu_length))
94 		return -EFAULT;
95 
96 	return total_length;
97 }
98 
99 static int
100 query_engine_info(struct drm_i915_private *i915,
101 		  struct drm_i915_query_item *query_item)
102 {
103 	struct drm_i915_query_engine_info __user *query_ptr =
104 				u64_to_user_ptr(query_item->data_ptr);
105 	struct drm_i915_engine_info __user *info_ptr;
106 	struct drm_i915_query_engine_info query;
107 	struct drm_i915_engine_info info = { };
108 	struct intel_engine_cs *engine;
109 	enum intel_engine_id id;
110 	int len, ret;
111 
112 	if (query_item->flags)
113 		return -EINVAL;
114 
115 	len = sizeof(struct drm_i915_query_engine_info) +
116 	      RUNTIME_INFO(i915)->num_engines *
117 	      sizeof(struct drm_i915_engine_info);
118 
119 	ret = copy_query_item(&query, sizeof(query), len, query_item);
120 	if (ret != 0)
121 		return ret;
122 
123 	if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
124 	    query.rsvd[2])
125 		return -EINVAL;
126 
127 	info_ptr = &query_ptr->engines[0];
128 
129 	for_each_engine(engine, i915, id) {
130 		info.engine.engine_class = engine->uabi_class;
131 		info.engine.engine_instance = engine->instance;
132 		info.capabilities = engine->uabi_capabilities;
133 
134 		if (__copy_to_user(info_ptr, &info, sizeof(info)))
135 			return -EFAULT;
136 
137 		query.num_engines++;
138 		info_ptr++;
139 	}
140 
141 	if (__copy_to_user(query_ptr, &query, sizeof(query)))
142 		return -EFAULT;
143 
144 	return len;
145 }
146 
147 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
148 					struct drm_i915_query_item *query_item) = {
149 	query_topology_info,
150 	query_engine_info,
151 };
152 
153 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
154 {
155 	struct drm_i915_private *dev_priv = to_i915(dev);
156 	struct drm_i915_query *args = data;
157 	struct drm_i915_query_item __user *user_item_ptr =
158 		u64_to_user_ptr(args->items_ptr);
159 	u32 i;
160 
161 	if (args->flags != 0)
162 		return -EINVAL;
163 
164 	for (i = 0; i < args->num_items; i++, user_item_ptr++) {
165 		struct drm_i915_query_item item;
166 		unsigned long func_idx;
167 		int ret;
168 
169 		if (copy_from_user(&item, user_item_ptr, sizeof(item)))
170 			return -EFAULT;
171 
172 		if (item.query_id == 0)
173 			return -EINVAL;
174 
175 		if (overflows_type(item.query_id - 1, unsigned long))
176 			return -EINVAL;
177 
178 		func_idx = item.query_id - 1;
179 
180 		ret = -EINVAL;
181 		if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
182 			func_idx = array_index_nospec(func_idx,
183 						      ARRAY_SIZE(i915_query_funcs));
184 			ret = i915_query_funcs[func_idx](dev_priv, &item);
185 		}
186 
187 		/* Only write the length back to userspace if they differ. */
188 		if (ret != item.length && put_user(ret, &user_item_ptr->length))
189 			return -EFAULT;
190 	}
191 
192 	return 0;
193 }
194