1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include <linux/nospec.h> 8 9 #include "i915_drv.h" 10 #include "i915_perf.h" 11 #include "i915_query.h" 12 #include <uapi/drm/i915_drm.h> 13 14 static int copy_query_item(void *query_hdr, size_t query_sz, 15 u32 total_length, 16 struct drm_i915_query_item *query_item) 17 { 18 if (query_item->length == 0) 19 return total_length; 20 21 if (query_item->length < total_length) 22 return -EINVAL; 23 24 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr), 25 query_sz)) 26 return -EFAULT; 27 28 return 0; 29 } 30 31 static int query_topology_info(struct drm_i915_private *dev_priv, 32 struct drm_i915_query_item *query_item) 33 { 34 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 35 struct drm_i915_query_topology_info topo; 36 u32 slice_length, subslice_length, eu_length, total_length; 37 int ret; 38 39 if (query_item->flags != 0) 40 return -EINVAL; 41 42 if (sseu->max_slices == 0) 43 return -ENODEV; 44 45 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); 46 47 slice_length = sizeof(sseu->slice_mask); 48 subslice_length = sseu->max_slices * sseu->ss_stride; 49 eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride; 50 total_length = sizeof(topo) + slice_length + subslice_length + 51 eu_length; 52 53 ret = copy_query_item(&topo, sizeof(topo), total_length, 54 query_item); 55 if (ret != 0) 56 return ret; 57 58 if (topo.flags != 0) 59 return -EINVAL; 60 61 memset(&topo, 0, sizeof(topo)); 62 topo.max_slices = sseu->max_slices; 63 topo.max_subslices = sseu->max_subslices; 64 topo.max_eus_per_subslice = sseu->max_eus_per_subslice; 65 66 topo.subslice_offset = slice_length; 67 topo.subslice_stride = sseu->ss_stride; 68 topo.eu_offset = slice_length + subslice_length; 69 topo.eu_stride = sseu->eu_stride; 70 71 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), 72 &topo, sizeof(topo))) 73 return -EFAULT; 74 75 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), 76 &sseu->slice_mask, slice_length)) 77 return -EFAULT; 78 79 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + 80 sizeof(topo) + slice_length), 81 sseu->subslice_mask, subslice_length)) 82 return -EFAULT; 83 84 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + 85 sizeof(topo) + 86 slice_length + subslice_length), 87 sseu->eu_mask, eu_length)) 88 return -EFAULT; 89 90 return total_length; 91 } 92 93 static int 94 query_engine_info(struct drm_i915_private *i915, 95 struct drm_i915_query_item *query_item) 96 { 97 struct drm_i915_query_engine_info __user *query_ptr = 98 u64_to_user_ptr(query_item->data_ptr); 99 struct drm_i915_engine_info __user *info_ptr; 100 struct drm_i915_query_engine_info query; 101 struct drm_i915_engine_info info = { }; 102 unsigned int num_uabi_engines = 0; 103 struct intel_engine_cs *engine; 104 int len, ret; 105 106 if (query_item->flags) 107 return -EINVAL; 108 109 for_each_uabi_engine(engine, i915) 110 num_uabi_engines++; 111 112 len = sizeof(struct drm_i915_query_engine_info) + 113 num_uabi_engines * sizeof(struct drm_i915_engine_info); 114 115 ret = copy_query_item(&query, sizeof(query), len, query_item); 116 if (ret != 0) 117 return ret; 118 119 if (query.num_engines || query.rsvd[0] || query.rsvd[1] || 120 query.rsvd[2]) 121 return -EINVAL; 122 123 info_ptr = &query_ptr->engines[0]; 124 125 for_each_uabi_engine(engine, i915) { 126 info.engine.engine_class = engine->uabi_class; 127 info.engine.engine_instance = engine->uabi_instance; 128 info.capabilities = engine->uabi_capabilities; 129 130 if (copy_to_user(info_ptr, &info, sizeof(info))) 131 return -EFAULT; 132 133 query.num_engines++; 134 info_ptr++; 135 } 136 137 if (copy_to_user(query_ptr, &query, sizeof(query))) 138 return -EFAULT; 139 140 return len; 141 } 142 143 static int can_copy_perf_config_registers_or_number(u32 user_n_regs, 144 u64 user_regs_ptr, 145 u32 kernel_n_regs) 146 { 147 /* 148 * We'll just put the number of registers, and won't copy the 149 * register. 150 */ 151 if (user_n_regs == 0) 152 return 0; 153 154 if (user_n_regs < kernel_n_regs) 155 return -EINVAL; 156 157 return 0; 158 } 159 160 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs, 161 u32 kernel_n_regs, 162 u64 user_regs_ptr, 163 u32 *user_n_regs) 164 { 165 u32 __user *p = u64_to_user_ptr(user_regs_ptr); 166 u32 r; 167 168 if (*user_n_regs == 0) { 169 *user_n_regs = kernel_n_regs; 170 return 0; 171 } 172 173 *user_n_regs = kernel_n_regs; 174 175 if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs)) 176 return -EFAULT; 177 178 for (r = 0; r < kernel_n_regs; r++, p += 2) { 179 unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr), 180 p, Efault); 181 unsafe_put_user(kernel_regs[r].value, p + 1, Efault); 182 } 183 user_write_access_end(); 184 return 0; 185 Efault: 186 user_write_access_end(); 187 return -EFAULT; 188 } 189 190 static int query_perf_config_data(struct drm_i915_private *i915, 191 struct drm_i915_query_item *query_item, 192 bool use_uuid) 193 { 194 struct drm_i915_query_perf_config __user *user_query_config_ptr = 195 u64_to_user_ptr(query_item->data_ptr); 196 struct drm_i915_perf_oa_config __user *user_config_ptr = 197 u64_to_user_ptr(query_item->data_ptr + 198 sizeof(struct drm_i915_query_perf_config)); 199 struct drm_i915_perf_oa_config user_config; 200 struct i915_perf *perf = &i915->perf; 201 struct i915_oa_config *oa_config; 202 char uuid[UUID_STRING_LEN + 1]; 203 u64 config_id; 204 u32 flags, total_size; 205 int ret; 206 207 if (!perf->i915) 208 return -ENODEV; 209 210 total_size = 211 sizeof(struct drm_i915_query_perf_config) + 212 sizeof(struct drm_i915_perf_oa_config); 213 214 if (query_item->length == 0) 215 return total_size; 216 217 if (query_item->length < total_size) { 218 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n", 219 query_item->length, total_size); 220 return -EINVAL; 221 } 222 223 if (get_user(flags, &user_query_config_ptr->flags)) 224 return -EFAULT; 225 226 if (flags != 0) 227 return -EINVAL; 228 229 if (use_uuid) { 230 struct i915_oa_config *tmp; 231 int id; 232 233 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid)); 234 235 memset(&uuid, 0, sizeof(uuid)); 236 if (copy_from_user(uuid, user_query_config_ptr->uuid, 237 sizeof(user_query_config_ptr->uuid))) 238 return -EFAULT; 239 240 oa_config = NULL; 241 rcu_read_lock(); 242 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 243 if (!strcmp(tmp->uuid, uuid)) { 244 oa_config = i915_oa_config_get(tmp); 245 break; 246 } 247 } 248 rcu_read_unlock(); 249 } else { 250 if (get_user(config_id, &user_query_config_ptr->config)) 251 return -EFAULT; 252 253 oa_config = i915_perf_get_oa_config(perf, config_id); 254 } 255 if (!oa_config) 256 return -ENOENT; 257 258 if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) { 259 ret = -EFAULT; 260 goto out; 261 } 262 263 ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs, 264 user_config.boolean_regs_ptr, 265 oa_config->b_counter_regs_len); 266 if (ret) 267 goto out; 268 269 ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs, 270 user_config.flex_regs_ptr, 271 oa_config->flex_regs_len); 272 if (ret) 273 goto out; 274 275 ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs, 276 user_config.mux_regs_ptr, 277 oa_config->mux_regs_len); 278 if (ret) 279 goto out; 280 281 ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs, 282 oa_config->b_counter_regs_len, 283 user_config.boolean_regs_ptr, 284 &user_config.n_boolean_regs); 285 if (ret) 286 goto out; 287 288 ret = copy_perf_config_registers_or_number(oa_config->flex_regs, 289 oa_config->flex_regs_len, 290 user_config.flex_regs_ptr, 291 &user_config.n_flex_regs); 292 if (ret) 293 goto out; 294 295 ret = copy_perf_config_registers_or_number(oa_config->mux_regs, 296 oa_config->mux_regs_len, 297 user_config.mux_regs_ptr, 298 &user_config.n_mux_regs); 299 if (ret) 300 goto out; 301 302 memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid)); 303 304 if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) { 305 ret = -EFAULT; 306 goto out; 307 } 308 309 ret = total_size; 310 311 out: 312 i915_oa_config_put(oa_config); 313 return ret; 314 } 315 316 static size_t sizeof_perf_config_list(size_t count) 317 { 318 return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count; 319 } 320 321 static size_t sizeof_perf_metrics(struct i915_perf *perf) 322 { 323 struct i915_oa_config *tmp; 324 size_t i; 325 int id; 326 327 i = 1; 328 rcu_read_lock(); 329 idr_for_each_entry(&perf->metrics_idr, tmp, id) 330 i++; 331 rcu_read_unlock(); 332 333 return sizeof_perf_config_list(i); 334 } 335 336 static int query_perf_config_list(struct drm_i915_private *i915, 337 struct drm_i915_query_item *query_item) 338 { 339 struct drm_i915_query_perf_config __user *user_query_config_ptr = 340 u64_to_user_ptr(query_item->data_ptr); 341 struct i915_perf *perf = &i915->perf; 342 u64 *oa_config_ids = NULL; 343 int alloc, n_configs; 344 u32 flags; 345 int ret; 346 347 if (!perf->i915) 348 return -ENODEV; 349 350 if (query_item->length == 0) 351 return sizeof_perf_metrics(perf); 352 353 if (get_user(flags, &user_query_config_ptr->flags)) 354 return -EFAULT; 355 356 if (flags != 0) 357 return -EINVAL; 358 359 n_configs = 1; 360 do { 361 struct i915_oa_config *tmp; 362 u64 *ids; 363 int id; 364 365 ids = krealloc(oa_config_ids, 366 n_configs * sizeof(*oa_config_ids), 367 GFP_KERNEL); 368 if (!ids) 369 return -ENOMEM; 370 371 alloc = fetch_and_zero(&n_configs); 372 373 ids[n_configs++] = 1ull; /* reserved for test_config */ 374 rcu_read_lock(); 375 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 376 if (n_configs < alloc) 377 ids[n_configs] = id; 378 n_configs++; 379 } 380 rcu_read_unlock(); 381 382 oa_config_ids = ids; 383 } while (n_configs > alloc); 384 385 if (query_item->length < sizeof_perf_config_list(n_configs)) { 386 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n", 387 query_item->length, 388 sizeof_perf_config_list(n_configs)); 389 kfree(oa_config_ids); 390 return -EINVAL; 391 } 392 393 if (put_user(n_configs, &user_query_config_ptr->config)) { 394 kfree(oa_config_ids); 395 return -EFAULT; 396 } 397 398 ret = copy_to_user(user_query_config_ptr + 1, 399 oa_config_ids, 400 n_configs * sizeof(*oa_config_ids)); 401 kfree(oa_config_ids); 402 if (ret) 403 return -EFAULT; 404 405 return sizeof_perf_config_list(n_configs); 406 } 407 408 static int query_perf_config(struct drm_i915_private *i915, 409 struct drm_i915_query_item *query_item) 410 { 411 switch (query_item->flags) { 412 case DRM_I915_QUERY_PERF_CONFIG_LIST: 413 return query_perf_config_list(i915, query_item); 414 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID: 415 return query_perf_config_data(i915, query_item, true); 416 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID: 417 return query_perf_config_data(i915, query_item, false); 418 default: 419 return -EINVAL; 420 } 421 } 422 423 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, 424 struct drm_i915_query_item *query_item) = { 425 query_topology_info, 426 query_engine_info, 427 query_perf_config, 428 }; 429 430 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 431 { 432 struct drm_i915_private *dev_priv = to_i915(dev); 433 struct drm_i915_query *args = data; 434 struct drm_i915_query_item __user *user_item_ptr = 435 u64_to_user_ptr(args->items_ptr); 436 u32 i; 437 438 if (args->flags != 0) 439 return -EINVAL; 440 441 for (i = 0; i < args->num_items; i++, user_item_ptr++) { 442 struct drm_i915_query_item item; 443 unsigned long func_idx; 444 int ret; 445 446 if (copy_from_user(&item, user_item_ptr, sizeof(item))) 447 return -EFAULT; 448 449 if (item.query_id == 0) 450 return -EINVAL; 451 452 if (overflows_type(item.query_id - 1, unsigned long)) 453 return -EINVAL; 454 455 func_idx = item.query_id - 1; 456 457 ret = -EINVAL; 458 if (func_idx < ARRAY_SIZE(i915_query_funcs)) { 459 func_idx = array_index_nospec(func_idx, 460 ARRAY_SIZE(i915_query_funcs)); 461 ret = i915_query_funcs[func_idx](dev_priv, &item); 462 } 463 464 /* Only write the length back to userspace if they differ. */ 465 if (ret != item.length && put_user(ret, &user_item_ptr->length)) 466 return -EFAULT; 467 } 468 469 return 0; 470 } 471