1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include <linux/nospec.h> 8 9 #include "i915_drv.h" 10 #include "i915_perf.h" 11 #include "i915_query.h" 12 #include <uapi/drm/i915_drm.h> 13 14 static int copy_query_item(void *query_hdr, size_t query_sz, 15 u32 total_length, 16 struct drm_i915_query_item *query_item) 17 { 18 if (query_item->length == 0) 19 return total_length; 20 21 if (query_item->length < total_length) 22 return -EINVAL; 23 24 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr), 25 query_sz)) 26 return -EFAULT; 27 28 if (!access_ok(u64_to_user_ptr(query_item->data_ptr), 29 total_length)) 30 return -EFAULT; 31 32 return 0; 33 } 34 35 static int query_topology_info(struct drm_i915_private *dev_priv, 36 struct drm_i915_query_item *query_item) 37 { 38 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 39 struct drm_i915_query_topology_info topo; 40 u32 slice_length, subslice_length, eu_length, total_length; 41 int ret; 42 43 if (query_item->flags != 0) 44 return -EINVAL; 45 46 if (sseu->max_slices == 0) 47 return -ENODEV; 48 49 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); 50 51 slice_length = sizeof(sseu->slice_mask); 52 subslice_length = sseu->max_slices * sseu->ss_stride; 53 eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride; 54 total_length = sizeof(topo) + slice_length + subslice_length + 55 eu_length; 56 57 ret = copy_query_item(&topo, sizeof(topo), total_length, 58 query_item); 59 if (ret != 0) 60 return ret; 61 62 if (topo.flags != 0) 63 return -EINVAL; 64 65 memset(&topo, 0, sizeof(topo)); 66 topo.max_slices = sseu->max_slices; 67 topo.max_subslices = sseu->max_subslices; 68 topo.max_eus_per_subslice = sseu->max_eus_per_subslice; 69 70 topo.subslice_offset = slice_length; 71 topo.subslice_stride = sseu->ss_stride; 72 topo.eu_offset = slice_length + subslice_length; 73 topo.eu_stride = sseu->eu_stride; 74 75 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), 76 &topo, sizeof(topo))) 77 return -EFAULT; 78 79 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), 80 &sseu->slice_mask, slice_length)) 81 return -EFAULT; 82 83 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + 84 sizeof(topo) + slice_length), 85 sseu->subslice_mask, subslice_length)) 86 return -EFAULT; 87 88 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + 89 sizeof(topo) + 90 slice_length + subslice_length), 91 sseu->eu_mask, eu_length)) 92 return -EFAULT; 93 94 return total_length; 95 } 96 97 static int 98 query_engine_info(struct drm_i915_private *i915, 99 struct drm_i915_query_item *query_item) 100 { 101 struct drm_i915_query_engine_info __user *query_ptr = 102 u64_to_user_ptr(query_item->data_ptr); 103 struct drm_i915_engine_info __user *info_ptr; 104 struct drm_i915_query_engine_info query; 105 struct drm_i915_engine_info info = { }; 106 unsigned int num_uabi_engines = 0; 107 struct intel_engine_cs *engine; 108 int len, ret; 109 110 if (query_item->flags) 111 return -EINVAL; 112 113 for_each_uabi_engine(engine, i915) 114 num_uabi_engines++; 115 116 len = sizeof(struct drm_i915_query_engine_info) + 117 num_uabi_engines * sizeof(struct drm_i915_engine_info); 118 119 ret = copy_query_item(&query, sizeof(query), len, query_item); 120 if (ret != 0) 121 return ret; 122 123 if (query.num_engines || query.rsvd[0] || query.rsvd[1] || 124 query.rsvd[2]) 125 return -EINVAL; 126 127 info_ptr = &query_ptr->engines[0]; 128 129 for_each_uabi_engine(engine, i915) { 130 info.engine.engine_class = engine->uabi_class; 131 info.engine.engine_instance = engine->uabi_instance; 132 info.capabilities = engine->uabi_capabilities; 133 134 if (__copy_to_user(info_ptr, &info, sizeof(info))) 135 return -EFAULT; 136 137 query.num_engines++; 138 info_ptr++; 139 } 140 141 if (__copy_to_user(query_ptr, &query, sizeof(query))) 142 return -EFAULT; 143 144 return len; 145 } 146 147 static int can_copy_perf_config_registers_or_number(u32 user_n_regs, 148 u64 user_regs_ptr, 149 u32 kernel_n_regs) 150 { 151 /* 152 * We'll just put the number of registers, and won't copy the 153 * register. 154 */ 155 if (user_n_regs == 0) 156 return 0; 157 158 if (user_n_regs < kernel_n_regs) 159 return -EINVAL; 160 161 if (!access_ok(u64_to_user_ptr(user_regs_ptr), 162 2 * sizeof(u32) * kernel_n_regs)) 163 return -EFAULT; 164 165 return 0; 166 } 167 168 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs, 169 u32 kernel_n_regs, 170 u64 user_regs_ptr, 171 u32 *user_n_regs) 172 { 173 u32 r; 174 175 if (*user_n_regs == 0) { 176 *user_n_regs = kernel_n_regs; 177 return 0; 178 } 179 180 *user_n_regs = kernel_n_regs; 181 182 for (r = 0; r < kernel_n_regs; r++) { 183 u32 __user *user_reg_ptr = 184 u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2); 185 u32 __user *user_val_ptr = 186 u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 + 187 sizeof(u32)); 188 int ret; 189 190 ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr), 191 user_reg_ptr); 192 if (ret) 193 return -EFAULT; 194 195 ret = __put_user(kernel_regs[r].value, user_val_ptr); 196 if (ret) 197 return -EFAULT; 198 } 199 200 return 0; 201 } 202 203 static int query_perf_config_data(struct drm_i915_private *i915, 204 struct drm_i915_query_item *query_item, 205 bool use_uuid) 206 { 207 struct drm_i915_query_perf_config __user *user_query_config_ptr = 208 u64_to_user_ptr(query_item->data_ptr); 209 struct drm_i915_perf_oa_config __user *user_config_ptr = 210 u64_to_user_ptr(query_item->data_ptr + 211 sizeof(struct drm_i915_query_perf_config)); 212 struct drm_i915_perf_oa_config user_config; 213 struct i915_perf *perf = &i915->perf; 214 struct i915_oa_config *oa_config; 215 char uuid[UUID_STRING_LEN + 1]; 216 u64 config_id; 217 u32 flags, total_size; 218 int ret; 219 220 if (!perf->i915) 221 return -ENODEV; 222 223 total_size = 224 sizeof(struct drm_i915_query_perf_config) + 225 sizeof(struct drm_i915_perf_oa_config); 226 227 if (query_item->length == 0) 228 return total_size; 229 230 if (query_item->length < total_size) { 231 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n", 232 query_item->length, total_size); 233 return -EINVAL; 234 } 235 236 if (!access_ok(user_query_config_ptr, total_size)) 237 return -EFAULT; 238 239 if (__get_user(flags, &user_query_config_ptr->flags)) 240 return -EFAULT; 241 242 if (flags != 0) 243 return -EINVAL; 244 245 if (use_uuid) { 246 struct i915_oa_config *tmp; 247 int id; 248 249 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid)); 250 251 memset(&uuid, 0, sizeof(uuid)); 252 if (__copy_from_user(uuid, user_query_config_ptr->uuid, 253 sizeof(user_query_config_ptr->uuid))) 254 return -EFAULT; 255 256 oa_config = NULL; 257 rcu_read_lock(); 258 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 259 if (!strcmp(tmp->uuid, uuid)) { 260 oa_config = i915_oa_config_get(tmp); 261 break; 262 } 263 } 264 rcu_read_unlock(); 265 } else { 266 if (__get_user(config_id, &user_query_config_ptr->config)) 267 return -EFAULT; 268 269 oa_config = i915_perf_get_oa_config(perf, config_id); 270 } 271 if (!oa_config) 272 return -ENOENT; 273 274 if (__copy_from_user(&user_config, user_config_ptr, 275 sizeof(user_config))) { 276 ret = -EFAULT; 277 goto out; 278 } 279 280 ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs, 281 user_config.boolean_regs_ptr, 282 oa_config->b_counter_regs_len); 283 if (ret) 284 goto out; 285 286 ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs, 287 user_config.flex_regs_ptr, 288 oa_config->flex_regs_len); 289 if (ret) 290 goto out; 291 292 ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs, 293 user_config.mux_regs_ptr, 294 oa_config->mux_regs_len); 295 if (ret) 296 goto out; 297 298 ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs, 299 oa_config->b_counter_regs_len, 300 user_config.boolean_regs_ptr, 301 &user_config.n_boolean_regs); 302 if (ret) 303 goto out; 304 305 ret = copy_perf_config_registers_or_number(oa_config->flex_regs, 306 oa_config->flex_regs_len, 307 user_config.flex_regs_ptr, 308 &user_config.n_flex_regs); 309 if (ret) 310 goto out; 311 312 ret = copy_perf_config_registers_or_number(oa_config->mux_regs, 313 oa_config->mux_regs_len, 314 user_config.mux_regs_ptr, 315 &user_config.n_mux_regs); 316 if (ret) 317 goto out; 318 319 memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid)); 320 321 if (__copy_to_user(user_config_ptr, &user_config, 322 sizeof(user_config))) { 323 ret = -EFAULT; 324 goto out; 325 } 326 327 ret = total_size; 328 329 out: 330 i915_oa_config_put(oa_config); 331 return ret; 332 } 333 334 static size_t sizeof_perf_config_list(size_t count) 335 { 336 return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count; 337 } 338 339 static size_t sizeof_perf_metrics(struct i915_perf *perf) 340 { 341 struct i915_oa_config *tmp; 342 size_t i; 343 int id; 344 345 i = 1; 346 rcu_read_lock(); 347 idr_for_each_entry(&perf->metrics_idr, tmp, id) 348 i++; 349 rcu_read_unlock(); 350 351 return sizeof_perf_config_list(i); 352 } 353 354 static int query_perf_config_list(struct drm_i915_private *i915, 355 struct drm_i915_query_item *query_item) 356 { 357 struct drm_i915_query_perf_config __user *user_query_config_ptr = 358 u64_to_user_ptr(query_item->data_ptr); 359 struct i915_perf *perf = &i915->perf; 360 u64 *oa_config_ids = NULL; 361 int alloc, n_configs; 362 u32 flags; 363 int ret; 364 365 if (!perf->i915) 366 return -ENODEV; 367 368 if (query_item->length == 0) 369 return sizeof_perf_metrics(perf); 370 371 if (get_user(flags, &user_query_config_ptr->flags)) 372 return -EFAULT; 373 374 if (flags != 0) 375 return -EINVAL; 376 377 n_configs = 1; 378 do { 379 struct i915_oa_config *tmp; 380 u64 *ids; 381 int id; 382 383 ids = krealloc(oa_config_ids, 384 n_configs * sizeof(*oa_config_ids), 385 GFP_KERNEL); 386 if (!ids) 387 return -ENOMEM; 388 389 alloc = fetch_and_zero(&n_configs); 390 391 ids[n_configs++] = 1ull; /* reserved for test_config */ 392 rcu_read_lock(); 393 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 394 if (n_configs < alloc) 395 ids[n_configs] = id; 396 n_configs++; 397 } 398 rcu_read_unlock(); 399 400 oa_config_ids = ids; 401 } while (n_configs > alloc); 402 403 if (query_item->length < sizeof_perf_config_list(n_configs)) { 404 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n", 405 query_item->length, 406 sizeof_perf_config_list(n_configs)); 407 kfree(oa_config_ids); 408 return -EINVAL; 409 } 410 411 if (put_user(n_configs, &user_query_config_ptr->config)) { 412 kfree(oa_config_ids); 413 return -EFAULT; 414 } 415 416 ret = copy_to_user(user_query_config_ptr + 1, 417 oa_config_ids, 418 n_configs * sizeof(*oa_config_ids)); 419 kfree(oa_config_ids); 420 if (ret) 421 return -EFAULT; 422 423 return sizeof_perf_config_list(n_configs); 424 } 425 426 static int query_perf_config(struct drm_i915_private *i915, 427 struct drm_i915_query_item *query_item) 428 { 429 switch (query_item->flags) { 430 case DRM_I915_QUERY_PERF_CONFIG_LIST: 431 return query_perf_config_list(i915, query_item); 432 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID: 433 return query_perf_config_data(i915, query_item, true); 434 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID: 435 return query_perf_config_data(i915, query_item, false); 436 default: 437 return -EINVAL; 438 } 439 } 440 441 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, 442 struct drm_i915_query_item *query_item) = { 443 query_topology_info, 444 query_engine_info, 445 query_perf_config, 446 }; 447 448 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 449 { 450 struct drm_i915_private *dev_priv = to_i915(dev); 451 struct drm_i915_query *args = data; 452 struct drm_i915_query_item __user *user_item_ptr = 453 u64_to_user_ptr(args->items_ptr); 454 u32 i; 455 456 if (args->flags != 0) 457 return -EINVAL; 458 459 for (i = 0; i < args->num_items; i++, user_item_ptr++) { 460 struct drm_i915_query_item item; 461 unsigned long func_idx; 462 int ret; 463 464 if (copy_from_user(&item, user_item_ptr, sizeof(item))) 465 return -EFAULT; 466 467 if (item.query_id == 0) 468 return -EINVAL; 469 470 if (overflows_type(item.query_id - 1, unsigned long)) 471 return -EINVAL; 472 473 func_idx = item.query_id - 1; 474 475 ret = -EINVAL; 476 if (func_idx < ARRAY_SIZE(i915_query_funcs)) { 477 func_idx = array_index_nospec(func_idx, 478 ARRAY_SIZE(i915_query_funcs)); 479 ret = i915_query_funcs[func_idx](dev_priv, &item); 480 } 481 482 /* Only write the length back to userspace if they differ. */ 483 if (ret != item.length && put_user(ret, &user_item_ptr->length)) 484 return -EFAULT; 485 } 486 487 return 0; 488 } 489