1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include <linux/nospec.h> 8 9 #include "i915_drv.h" 10 #include "i915_perf.h" 11 #include "i915_query.h" 12 #include <uapi/drm/i915_drm.h> 13 14 static int copy_query_item(void *query_hdr, size_t query_sz, 15 u32 total_length, 16 struct drm_i915_query_item *query_item) 17 { 18 if (query_item->length == 0) 19 return total_length; 20 21 if (query_item->length < total_length) 22 return -EINVAL; 23 24 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr), 25 query_sz)) 26 return -EFAULT; 27 28 return 0; 29 } 30 31 static int query_topology_info(struct drm_i915_private *dev_priv, 32 struct drm_i915_query_item *query_item) 33 { 34 const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu; 35 struct drm_i915_query_topology_info topo; 36 u32 slice_length, subslice_length, eu_length, total_length; 37 int ret; 38 39 if (query_item->flags != 0) 40 return -EINVAL; 41 42 if (sseu->max_slices == 0) 43 return -ENODEV; 44 45 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); 46 47 slice_length = sizeof(sseu->slice_mask); 48 subslice_length = sseu->max_slices * sseu->ss_stride; 49 eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride; 50 total_length = sizeof(topo) + slice_length + subslice_length + 51 eu_length; 52 53 ret = copy_query_item(&topo, sizeof(topo), total_length, 54 query_item); 55 if (ret != 0) 56 return ret; 57 58 if (topo.flags != 0) 59 return -EINVAL; 60 61 memset(&topo, 0, sizeof(topo)); 62 topo.max_slices = sseu->max_slices; 63 topo.max_subslices = sseu->max_subslices; 64 topo.max_eus_per_subslice = sseu->max_eus_per_subslice; 65 66 topo.subslice_offset = slice_length; 67 topo.subslice_stride = sseu->ss_stride; 68 topo.eu_offset = slice_length + subslice_length; 69 topo.eu_stride = sseu->eu_stride; 70 71 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), 72 &topo, sizeof(topo))) 73 return -EFAULT; 74 75 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), 76 &sseu->slice_mask, slice_length)) 77 return -EFAULT; 78 79 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + 80 sizeof(topo) + slice_length), 81 sseu->subslice_mask, subslice_length)) 82 return -EFAULT; 83 84 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + 85 sizeof(topo) + 86 slice_length + subslice_length), 87 sseu->eu_mask, eu_length)) 88 return -EFAULT; 89 90 return total_length; 91 } 92 93 static int 94 query_engine_info(struct drm_i915_private *i915, 95 struct drm_i915_query_item *query_item) 96 { 97 struct drm_i915_query_engine_info __user *query_ptr = 98 u64_to_user_ptr(query_item->data_ptr); 99 struct drm_i915_engine_info __user *info_ptr; 100 struct drm_i915_query_engine_info query; 101 struct drm_i915_engine_info info = { }; 102 unsigned int num_uabi_engines = 0; 103 struct intel_engine_cs *engine; 104 int len, ret; 105 106 if (query_item->flags) 107 return -EINVAL; 108 109 for_each_uabi_engine(engine, i915) 110 num_uabi_engines++; 111 112 len = struct_size(query_ptr, engines, num_uabi_engines); 113 114 ret = copy_query_item(&query, sizeof(query), len, query_item); 115 if (ret != 0) 116 return ret; 117 118 if (query.num_engines || query.rsvd[0] || query.rsvd[1] || 119 query.rsvd[2]) 120 return -EINVAL; 121 122 info_ptr = &query_ptr->engines[0]; 123 124 for_each_uabi_engine(engine, i915) { 125 info.engine.engine_class = engine->uabi_class; 126 info.engine.engine_instance = engine->uabi_instance; 127 info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE; 128 info.capabilities = engine->uabi_capabilities; 129 info.logical_instance = ilog2(engine->logical_mask); 130 131 if (copy_to_user(info_ptr, &info, sizeof(info))) 132 return -EFAULT; 133 134 query.num_engines++; 135 info_ptr++; 136 } 137 138 if (copy_to_user(query_ptr, &query, sizeof(query))) 139 return -EFAULT; 140 141 return len; 142 } 143 144 static int can_copy_perf_config_registers_or_number(u32 user_n_regs, 145 u64 user_regs_ptr, 146 u32 kernel_n_regs) 147 { 148 /* 149 * We'll just put the number of registers, and won't copy the 150 * register. 151 */ 152 if (user_n_regs == 0) 153 return 0; 154 155 if (user_n_regs < kernel_n_regs) 156 return -EINVAL; 157 158 return 0; 159 } 160 161 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs, 162 u32 kernel_n_regs, 163 u64 user_regs_ptr, 164 u32 *user_n_regs) 165 { 166 u32 __user *p = u64_to_user_ptr(user_regs_ptr); 167 u32 r; 168 169 if (*user_n_regs == 0) { 170 *user_n_regs = kernel_n_regs; 171 return 0; 172 } 173 174 *user_n_regs = kernel_n_regs; 175 176 if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs)) 177 return -EFAULT; 178 179 for (r = 0; r < kernel_n_regs; r++, p += 2) { 180 unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr), 181 p, Efault); 182 unsafe_put_user(kernel_regs[r].value, p + 1, Efault); 183 } 184 user_write_access_end(); 185 return 0; 186 Efault: 187 user_write_access_end(); 188 return -EFAULT; 189 } 190 191 static int query_perf_config_data(struct drm_i915_private *i915, 192 struct drm_i915_query_item *query_item, 193 bool use_uuid) 194 { 195 struct drm_i915_query_perf_config __user *user_query_config_ptr = 196 u64_to_user_ptr(query_item->data_ptr); 197 struct drm_i915_perf_oa_config __user *user_config_ptr = 198 u64_to_user_ptr(query_item->data_ptr + 199 sizeof(struct drm_i915_query_perf_config)); 200 struct drm_i915_perf_oa_config user_config; 201 struct i915_perf *perf = &i915->perf; 202 struct i915_oa_config *oa_config; 203 char uuid[UUID_STRING_LEN + 1]; 204 u64 config_id; 205 u32 flags, total_size; 206 int ret; 207 208 if (!perf->i915) 209 return -ENODEV; 210 211 total_size = 212 sizeof(struct drm_i915_query_perf_config) + 213 sizeof(struct drm_i915_perf_oa_config); 214 215 if (query_item->length == 0) 216 return total_size; 217 218 if (query_item->length < total_size) { 219 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n", 220 query_item->length, total_size); 221 return -EINVAL; 222 } 223 224 if (get_user(flags, &user_query_config_ptr->flags)) 225 return -EFAULT; 226 227 if (flags != 0) 228 return -EINVAL; 229 230 if (use_uuid) { 231 struct i915_oa_config *tmp; 232 int id; 233 234 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid)); 235 236 memset(&uuid, 0, sizeof(uuid)); 237 if (copy_from_user(uuid, user_query_config_ptr->uuid, 238 sizeof(user_query_config_ptr->uuid))) 239 return -EFAULT; 240 241 oa_config = NULL; 242 rcu_read_lock(); 243 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 244 if (!strcmp(tmp->uuid, uuid)) { 245 oa_config = i915_oa_config_get(tmp); 246 break; 247 } 248 } 249 rcu_read_unlock(); 250 } else { 251 if (get_user(config_id, &user_query_config_ptr->config)) 252 return -EFAULT; 253 254 oa_config = i915_perf_get_oa_config(perf, config_id); 255 } 256 if (!oa_config) 257 return -ENOENT; 258 259 if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) { 260 ret = -EFAULT; 261 goto out; 262 } 263 264 ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs, 265 user_config.boolean_regs_ptr, 266 oa_config->b_counter_regs_len); 267 if (ret) 268 goto out; 269 270 ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs, 271 user_config.flex_regs_ptr, 272 oa_config->flex_regs_len); 273 if (ret) 274 goto out; 275 276 ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs, 277 user_config.mux_regs_ptr, 278 oa_config->mux_regs_len); 279 if (ret) 280 goto out; 281 282 ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs, 283 oa_config->b_counter_regs_len, 284 user_config.boolean_regs_ptr, 285 &user_config.n_boolean_regs); 286 if (ret) 287 goto out; 288 289 ret = copy_perf_config_registers_or_number(oa_config->flex_regs, 290 oa_config->flex_regs_len, 291 user_config.flex_regs_ptr, 292 &user_config.n_flex_regs); 293 if (ret) 294 goto out; 295 296 ret = copy_perf_config_registers_or_number(oa_config->mux_regs, 297 oa_config->mux_regs_len, 298 user_config.mux_regs_ptr, 299 &user_config.n_mux_regs); 300 if (ret) 301 goto out; 302 303 memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid)); 304 305 if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) { 306 ret = -EFAULT; 307 goto out; 308 } 309 310 ret = total_size; 311 312 out: 313 i915_oa_config_put(oa_config); 314 return ret; 315 } 316 317 static size_t sizeof_perf_config_list(size_t count) 318 { 319 return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count; 320 } 321 322 static size_t sizeof_perf_metrics(struct i915_perf *perf) 323 { 324 struct i915_oa_config *tmp; 325 size_t i; 326 int id; 327 328 i = 1; 329 rcu_read_lock(); 330 idr_for_each_entry(&perf->metrics_idr, tmp, id) 331 i++; 332 rcu_read_unlock(); 333 334 return sizeof_perf_config_list(i); 335 } 336 337 static int query_perf_config_list(struct drm_i915_private *i915, 338 struct drm_i915_query_item *query_item) 339 { 340 struct drm_i915_query_perf_config __user *user_query_config_ptr = 341 u64_to_user_ptr(query_item->data_ptr); 342 struct i915_perf *perf = &i915->perf; 343 u64 *oa_config_ids = NULL; 344 int alloc, n_configs; 345 u32 flags; 346 int ret; 347 348 if (!perf->i915) 349 return -ENODEV; 350 351 if (query_item->length == 0) 352 return sizeof_perf_metrics(perf); 353 354 if (get_user(flags, &user_query_config_ptr->flags)) 355 return -EFAULT; 356 357 if (flags != 0) 358 return -EINVAL; 359 360 n_configs = 1; 361 do { 362 struct i915_oa_config *tmp; 363 u64 *ids; 364 int id; 365 366 ids = krealloc(oa_config_ids, 367 n_configs * sizeof(*oa_config_ids), 368 GFP_KERNEL); 369 if (!ids) 370 return -ENOMEM; 371 372 alloc = fetch_and_zero(&n_configs); 373 374 ids[n_configs++] = 1ull; /* reserved for test_config */ 375 rcu_read_lock(); 376 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 377 if (n_configs < alloc) 378 ids[n_configs] = id; 379 n_configs++; 380 } 381 rcu_read_unlock(); 382 383 oa_config_ids = ids; 384 } while (n_configs > alloc); 385 386 if (query_item->length < sizeof_perf_config_list(n_configs)) { 387 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n", 388 query_item->length, 389 sizeof_perf_config_list(n_configs)); 390 kfree(oa_config_ids); 391 return -EINVAL; 392 } 393 394 if (put_user(n_configs, &user_query_config_ptr->config)) { 395 kfree(oa_config_ids); 396 return -EFAULT; 397 } 398 399 ret = copy_to_user(user_query_config_ptr + 1, 400 oa_config_ids, 401 n_configs * sizeof(*oa_config_ids)); 402 kfree(oa_config_ids); 403 if (ret) 404 return -EFAULT; 405 406 return sizeof_perf_config_list(n_configs); 407 } 408 409 static int query_perf_config(struct drm_i915_private *i915, 410 struct drm_i915_query_item *query_item) 411 { 412 switch (query_item->flags) { 413 case DRM_I915_QUERY_PERF_CONFIG_LIST: 414 return query_perf_config_list(i915, query_item); 415 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID: 416 return query_perf_config_data(i915, query_item, true); 417 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID: 418 return query_perf_config_data(i915, query_item, false); 419 default: 420 return -EINVAL; 421 } 422 } 423 424 static int query_memregion_info(struct drm_i915_private *i915, 425 struct drm_i915_query_item *query_item) 426 { 427 struct drm_i915_query_memory_regions __user *query_ptr = 428 u64_to_user_ptr(query_item->data_ptr); 429 struct drm_i915_memory_region_info __user *info_ptr = 430 &query_ptr->regions[0]; 431 struct drm_i915_memory_region_info info = { }; 432 struct drm_i915_query_memory_regions query; 433 struct intel_memory_region *mr; 434 u32 total_length; 435 int ret, id, i; 436 437 if (query_item->flags != 0) 438 return -EINVAL; 439 440 total_length = sizeof(query); 441 for_each_memory_region(mr, i915, id) { 442 if (mr->private) 443 continue; 444 445 total_length += sizeof(info); 446 } 447 448 ret = copy_query_item(&query, sizeof(query), total_length, query_item); 449 if (ret != 0) 450 return ret; 451 452 if (query.num_regions) 453 return -EINVAL; 454 455 for (i = 0; i < ARRAY_SIZE(query.rsvd); i++) { 456 if (query.rsvd[i]) 457 return -EINVAL; 458 } 459 460 for_each_memory_region(mr, i915, id) { 461 if (mr->private) 462 continue; 463 464 info.region.memory_class = mr->type; 465 info.region.memory_instance = mr->instance; 466 info.probed_size = mr->total; 467 info.unallocated_size = mr->avail; 468 469 if (__copy_to_user(info_ptr, &info, sizeof(info))) 470 return -EFAULT; 471 472 query.num_regions++; 473 info_ptr++; 474 } 475 476 if (__copy_to_user(query_ptr, &query, sizeof(query))) 477 return -EFAULT; 478 479 return total_length; 480 } 481 482 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, 483 struct drm_i915_query_item *query_item) = { 484 query_topology_info, 485 query_engine_info, 486 query_perf_config, 487 query_memregion_info, 488 }; 489 490 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 491 { 492 struct drm_i915_private *dev_priv = to_i915(dev); 493 struct drm_i915_query *args = data; 494 struct drm_i915_query_item __user *user_item_ptr = 495 u64_to_user_ptr(args->items_ptr); 496 u32 i; 497 498 if (args->flags != 0) 499 return -EINVAL; 500 501 for (i = 0; i < args->num_items; i++, user_item_ptr++) { 502 struct drm_i915_query_item item; 503 unsigned long func_idx; 504 int ret; 505 506 if (copy_from_user(&item, user_item_ptr, sizeof(item))) 507 return -EFAULT; 508 509 if (item.query_id == 0) 510 return -EINVAL; 511 512 if (overflows_type(item.query_id - 1, unsigned long)) 513 return -EINVAL; 514 515 func_idx = item.query_id - 1; 516 517 ret = -EINVAL; 518 if (func_idx < ARRAY_SIZE(i915_query_funcs)) { 519 func_idx = array_index_nospec(func_idx, 520 ARRAY_SIZE(i915_query_funcs)); 521 ret = i915_query_funcs[func_idx](dev_priv, &item); 522 } 523 524 /* Only write the length back to userspace if they differ. */ 525 if (ret != item.length && put_user(ret, &user_item_ptr->length)) 526 return -EFAULT; 527 } 528 529 return 0; 530 } 531