1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include <linux/nospec.h> 8 9 #include "i915_drv.h" 10 #include "i915_perf.h" 11 #include "i915_query.h" 12 #include "gt/intel_engine_user.h" 13 #include <uapi/drm/i915_drm.h> 14 15 static int copy_query_item(void *query_hdr, size_t query_sz, 16 u32 total_length, 17 struct drm_i915_query_item *query_item) 18 { 19 if (query_item->length == 0) 20 return total_length; 21 22 if (query_item->length < total_length) 23 return -EINVAL; 24 25 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr), 26 query_sz)) 27 return -EFAULT; 28 29 return 0; 30 } 31 32 static int fill_topology_info(const struct sseu_dev_info *sseu, 33 struct drm_i915_query_item *query_item, 34 const u8 *subslice_mask) 35 { 36 struct drm_i915_query_topology_info topo; 37 u32 slice_length, subslice_length, eu_length, total_length; 38 int ret; 39 40 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); 41 42 if (sseu->max_slices == 0) 43 return -ENODEV; 44 45 slice_length = sizeof(sseu->slice_mask); 46 subslice_length = sseu->max_slices * sseu->ss_stride; 47 eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride; 48 total_length = sizeof(topo) + slice_length + subslice_length + 49 eu_length; 50 51 ret = copy_query_item(&topo, sizeof(topo), total_length, query_item); 52 53 if (ret != 0) 54 return ret; 55 56 memset(&topo, 0, sizeof(topo)); 57 topo.max_slices = sseu->max_slices; 58 topo.max_subslices = sseu->max_subslices; 59 topo.max_eus_per_subslice = sseu->max_eus_per_subslice; 60 61 topo.subslice_offset = slice_length; 62 topo.subslice_stride = sseu->ss_stride; 63 topo.eu_offset = slice_length + subslice_length; 64 topo.eu_stride = sseu->eu_stride; 65 66 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), 67 &topo, sizeof(topo))) 68 return -EFAULT; 69 70 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), 71 &sseu->slice_mask, slice_length)) 72 return -EFAULT; 73 74 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + 75 sizeof(topo) + slice_length), 76 subslice_mask, subslice_length)) 77 return -EFAULT; 78 79 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + 80 sizeof(topo) + 81 slice_length + subslice_length), 82 sseu->eu_mask, eu_length)) 83 return -EFAULT; 84 85 return total_length; 86 } 87 88 static int query_topology_info(struct drm_i915_private *dev_priv, 89 struct drm_i915_query_item *query_item) 90 { 91 const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu; 92 93 if (query_item->flags != 0) 94 return -EINVAL; 95 96 return fill_topology_info(sseu, query_item, sseu->subslice_mask); 97 } 98 99 static int query_geometry_subslices(struct drm_i915_private *i915, 100 struct drm_i915_query_item *query_item) 101 { 102 const struct sseu_dev_info *sseu; 103 struct intel_engine_cs *engine; 104 struct i915_engine_class_instance classinstance; 105 106 if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) 107 return -ENODEV; 108 109 classinstance = *((struct i915_engine_class_instance *)&query_item->flags); 110 111 engine = intel_engine_lookup_user(i915, (u8)classinstance.engine_class, 112 (u8)classinstance.engine_instance); 113 114 if (!engine) 115 return -EINVAL; 116 117 if (engine->class != RENDER_CLASS) 118 return -EINVAL; 119 120 sseu = &engine->gt->info.sseu; 121 122 return fill_topology_info(sseu, query_item, sseu->geometry_subslice_mask); 123 } 124 125 static int 126 query_engine_info(struct drm_i915_private *i915, 127 struct drm_i915_query_item *query_item) 128 { 129 struct drm_i915_query_engine_info __user *query_ptr = 130 u64_to_user_ptr(query_item->data_ptr); 131 struct drm_i915_engine_info __user *info_ptr; 132 struct drm_i915_query_engine_info query; 133 struct drm_i915_engine_info info = { }; 134 unsigned int num_uabi_engines = 0; 135 struct intel_engine_cs *engine; 136 int len, ret; 137 138 if (query_item->flags) 139 return -EINVAL; 140 141 for_each_uabi_engine(engine, i915) 142 num_uabi_engines++; 143 144 len = struct_size(query_ptr, engines, num_uabi_engines); 145 146 ret = copy_query_item(&query, sizeof(query), len, query_item); 147 if (ret != 0) 148 return ret; 149 150 if (query.num_engines || query.rsvd[0] || query.rsvd[1] || 151 query.rsvd[2]) 152 return -EINVAL; 153 154 info_ptr = &query_ptr->engines[0]; 155 156 for_each_uabi_engine(engine, i915) { 157 info.engine.engine_class = engine->uabi_class; 158 info.engine.engine_instance = engine->uabi_instance; 159 info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE; 160 info.capabilities = engine->uabi_capabilities; 161 info.logical_instance = ilog2(engine->logical_mask); 162 163 if (copy_to_user(info_ptr, &info, sizeof(info))) 164 return -EFAULT; 165 166 query.num_engines++; 167 info_ptr++; 168 } 169 170 if (copy_to_user(query_ptr, &query, sizeof(query))) 171 return -EFAULT; 172 173 return len; 174 } 175 176 static int can_copy_perf_config_registers_or_number(u32 user_n_regs, 177 u64 user_regs_ptr, 178 u32 kernel_n_regs) 179 { 180 /* 181 * We'll just put the number of registers, and won't copy the 182 * register. 183 */ 184 if (user_n_regs == 0) 185 return 0; 186 187 if (user_n_regs < kernel_n_regs) 188 return -EINVAL; 189 190 return 0; 191 } 192 193 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs, 194 u32 kernel_n_regs, 195 u64 user_regs_ptr, 196 u32 *user_n_regs) 197 { 198 u32 __user *p = u64_to_user_ptr(user_regs_ptr); 199 u32 r; 200 201 if (*user_n_regs == 0) { 202 *user_n_regs = kernel_n_regs; 203 return 0; 204 } 205 206 *user_n_regs = kernel_n_regs; 207 208 if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs)) 209 return -EFAULT; 210 211 for (r = 0; r < kernel_n_regs; r++, p += 2) { 212 unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr), 213 p, Efault); 214 unsafe_put_user(kernel_regs[r].value, p + 1, Efault); 215 } 216 user_write_access_end(); 217 return 0; 218 Efault: 219 user_write_access_end(); 220 return -EFAULT; 221 } 222 223 static int query_perf_config_data(struct drm_i915_private *i915, 224 struct drm_i915_query_item *query_item, 225 bool use_uuid) 226 { 227 struct drm_i915_query_perf_config __user *user_query_config_ptr = 228 u64_to_user_ptr(query_item->data_ptr); 229 struct drm_i915_perf_oa_config __user *user_config_ptr = 230 u64_to_user_ptr(query_item->data_ptr + 231 sizeof(struct drm_i915_query_perf_config)); 232 struct drm_i915_perf_oa_config user_config; 233 struct i915_perf *perf = &i915->perf; 234 struct i915_oa_config *oa_config; 235 char uuid[UUID_STRING_LEN + 1]; 236 u64 config_id; 237 u32 flags, total_size; 238 int ret; 239 240 if (!perf->i915) 241 return -ENODEV; 242 243 total_size = 244 sizeof(struct drm_i915_query_perf_config) + 245 sizeof(struct drm_i915_perf_oa_config); 246 247 if (query_item->length == 0) 248 return total_size; 249 250 if (query_item->length < total_size) { 251 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n", 252 query_item->length, total_size); 253 return -EINVAL; 254 } 255 256 if (get_user(flags, &user_query_config_ptr->flags)) 257 return -EFAULT; 258 259 if (flags != 0) 260 return -EINVAL; 261 262 if (use_uuid) { 263 struct i915_oa_config *tmp; 264 int id; 265 266 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid)); 267 268 memset(&uuid, 0, sizeof(uuid)); 269 if (copy_from_user(uuid, user_query_config_ptr->uuid, 270 sizeof(user_query_config_ptr->uuid))) 271 return -EFAULT; 272 273 oa_config = NULL; 274 rcu_read_lock(); 275 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 276 if (!strcmp(tmp->uuid, uuid)) { 277 oa_config = i915_oa_config_get(tmp); 278 break; 279 } 280 } 281 rcu_read_unlock(); 282 } else { 283 if (get_user(config_id, &user_query_config_ptr->config)) 284 return -EFAULT; 285 286 oa_config = i915_perf_get_oa_config(perf, config_id); 287 } 288 if (!oa_config) 289 return -ENOENT; 290 291 if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) { 292 ret = -EFAULT; 293 goto out; 294 } 295 296 ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs, 297 user_config.boolean_regs_ptr, 298 oa_config->b_counter_regs_len); 299 if (ret) 300 goto out; 301 302 ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs, 303 user_config.flex_regs_ptr, 304 oa_config->flex_regs_len); 305 if (ret) 306 goto out; 307 308 ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs, 309 user_config.mux_regs_ptr, 310 oa_config->mux_regs_len); 311 if (ret) 312 goto out; 313 314 ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs, 315 oa_config->b_counter_regs_len, 316 user_config.boolean_regs_ptr, 317 &user_config.n_boolean_regs); 318 if (ret) 319 goto out; 320 321 ret = copy_perf_config_registers_or_number(oa_config->flex_regs, 322 oa_config->flex_regs_len, 323 user_config.flex_regs_ptr, 324 &user_config.n_flex_regs); 325 if (ret) 326 goto out; 327 328 ret = copy_perf_config_registers_or_number(oa_config->mux_regs, 329 oa_config->mux_regs_len, 330 user_config.mux_regs_ptr, 331 &user_config.n_mux_regs); 332 if (ret) 333 goto out; 334 335 memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid)); 336 337 if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) { 338 ret = -EFAULT; 339 goto out; 340 } 341 342 ret = total_size; 343 344 out: 345 i915_oa_config_put(oa_config); 346 return ret; 347 } 348 349 static size_t sizeof_perf_config_list(size_t count) 350 { 351 return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count; 352 } 353 354 static size_t sizeof_perf_metrics(struct i915_perf *perf) 355 { 356 struct i915_oa_config *tmp; 357 size_t i; 358 int id; 359 360 i = 1; 361 rcu_read_lock(); 362 idr_for_each_entry(&perf->metrics_idr, tmp, id) 363 i++; 364 rcu_read_unlock(); 365 366 return sizeof_perf_config_list(i); 367 } 368 369 static int query_perf_config_list(struct drm_i915_private *i915, 370 struct drm_i915_query_item *query_item) 371 { 372 struct drm_i915_query_perf_config __user *user_query_config_ptr = 373 u64_to_user_ptr(query_item->data_ptr); 374 struct i915_perf *perf = &i915->perf; 375 u64 *oa_config_ids = NULL; 376 int alloc, n_configs; 377 u32 flags; 378 int ret; 379 380 if (!perf->i915) 381 return -ENODEV; 382 383 if (query_item->length == 0) 384 return sizeof_perf_metrics(perf); 385 386 if (get_user(flags, &user_query_config_ptr->flags)) 387 return -EFAULT; 388 389 if (flags != 0) 390 return -EINVAL; 391 392 n_configs = 1; 393 do { 394 struct i915_oa_config *tmp; 395 u64 *ids; 396 int id; 397 398 ids = krealloc(oa_config_ids, 399 n_configs * sizeof(*oa_config_ids), 400 GFP_KERNEL); 401 if (!ids) 402 return -ENOMEM; 403 404 alloc = fetch_and_zero(&n_configs); 405 406 ids[n_configs++] = 1ull; /* reserved for test_config */ 407 rcu_read_lock(); 408 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 409 if (n_configs < alloc) 410 ids[n_configs] = id; 411 n_configs++; 412 } 413 rcu_read_unlock(); 414 415 oa_config_ids = ids; 416 } while (n_configs > alloc); 417 418 if (query_item->length < sizeof_perf_config_list(n_configs)) { 419 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n", 420 query_item->length, 421 sizeof_perf_config_list(n_configs)); 422 kfree(oa_config_ids); 423 return -EINVAL; 424 } 425 426 if (put_user(n_configs, &user_query_config_ptr->config)) { 427 kfree(oa_config_ids); 428 return -EFAULT; 429 } 430 431 ret = copy_to_user(user_query_config_ptr + 1, 432 oa_config_ids, 433 n_configs * sizeof(*oa_config_ids)); 434 kfree(oa_config_ids); 435 if (ret) 436 return -EFAULT; 437 438 return sizeof_perf_config_list(n_configs); 439 } 440 441 static int query_perf_config(struct drm_i915_private *i915, 442 struct drm_i915_query_item *query_item) 443 { 444 switch (query_item->flags) { 445 case DRM_I915_QUERY_PERF_CONFIG_LIST: 446 return query_perf_config_list(i915, query_item); 447 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID: 448 return query_perf_config_data(i915, query_item, true); 449 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID: 450 return query_perf_config_data(i915, query_item, false); 451 default: 452 return -EINVAL; 453 } 454 } 455 456 static int query_memregion_info(struct drm_i915_private *i915, 457 struct drm_i915_query_item *query_item) 458 { 459 struct drm_i915_query_memory_regions __user *query_ptr = 460 u64_to_user_ptr(query_item->data_ptr); 461 struct drm_i915_memory_region_info __user *info_ptr = 462 &query_ptr->regions[0]; 463 struct drm_i915_memory_region_info info = { }; 464 struct drm_i915_query_memory_regions query; 465 struct intel_memory_region *mr; 466 u32 total_length; 467 int ret, id, i; 468 469 if (query_item->flags != 0) 470 return -EINVAL; 471 472 total_length = sizeof(query); 473 for_each_memory_region(mr, i915, id) { 474 if (mr->private) 475 continue; 476 477 total_length += sizeof(info); 478 } 479 480 ret = copy_query_item(&query, sizeof(query), total_length, query_item); 481 if (ret != 0) 482 return ret; 483 484 if (query.num_regions) 485 return -EINVAL; 486 487 for (i = 0; i < ARRAY_SIZE(query.rsvd); i++) { 488 if (query.rsvd[i]) 489 return -EINVAL; 490 } 491 492 for_each_memory_region(mr, i915, id) { 493 if (mr->private) 494 continue; 495 496 info.region.memory_class = mr->type; 497 info.region.memory_instance = mr->instance; 498 info.probed_size = mr->total; 499 info.unallocated_size = mr->avail; 500 501 if (__copy_to_user(info_ptr, &info, sizeof(info))) 502 return -EFAULT; 503 504 query.num_regions++; 505 info_ptr++; 506 } 507 508 if (__copy_to_user(query_ptr, &query, sizeof(query))) 509 return -EFAULT; 510 511 return total_length; 512 } 513 514 static int query_hwconfig_blob(struct drm_i915_private *i915, 515 struct drm_i915_query_item *query_item) 516 { 517 struct intel_gt *gt = to_gt(i915); 518 struct intel_hwconfig *hwconfig = >->info.hwconfig; 519 520 if (!hwconfig->size || !hwconfig->ptr) 521 return -ENODEV; 522 523 if (query_item->length == 0) 524 return hwconfig->size; 525 526 if (query_item->length < hwconfig->size) 527 return -EINVAL; 528 529 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), 530 hwconfig->ptr, hwconfig->size)) 531 return -EFAULT; 532 533 return hwconfig->size; 534 } 535 536 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, 537 struct drm_i915_query_item *query_item) = { 538 query_topology_info, 539 query_engine_info, 540 query_perf_config, 541 query_memregion_info, 542 query_hwconfig_blob, 543 query_geometry_subslices, 544 }; 545 546 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 547 { 548 struct drm_i915_private *dev_priv = to_i915(dev); 549 struct drm_i915_query *args = data; 550 struct drm_i915_query_item __user *user_item_ptr = 551 u64_to_user_ptr(args->items_ptr); 552 u32 i; 553 554 if (args->flags != 0) 555 return -EINVAL; 556 557 for (i = 0; i < args->num_items; i++, user_item_ptr++) { 558 struct drm_i915_query_item item; 559 unsigned long func_idx; 560 int ret; 561 562 if (copy_from_user(&item, user_item_ptr, sizeof(item))) 563 return -EFAULT; 564 565 if (item.query_id == 0) 566 return -EINVAL; 567 568 if (overflows_type(item.query_id - 1, unsigned long)) 569 return -EINVAL; 570 571 func_idx = item.query_id - 1; 572 573 ret = -EINVAL; 574 if (func_idx < ARRAY_SIZE(i915_query_funcs)) { 575 func_idx = array_index_nospec(func_idx, 576 ARRAY_SIZE(i915_query_funcs)); 577 ret = i915_query_funcs[func_idx](dev_priv, &item); 578 } 579 580 /* Only write the length back to userspace if they differ. */ 581 if (ret != item.length && put_user(ret, &user_item_ptr->length)) 582 return -EFAULT; 583 } 584 585 return 0; 586 } 587