1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include <linux/nospec.h> 8 9 #include "i915_drv.h" 10 #include "i915_perf.h" 11 #include "i915_query.h" 12 #include <uapi/drm/i915_drm.h> 13 14 static int copy_query_item(void *query_hdr, size_t query_sz, 15 u32 total_length, 16 struct drm_i915_query_item *query_item) 17 { 18 if (query_item->length == 0) 19 return total_length; 20 21 if (query_item->length < total_length) 22 return -EINVAL; 23 24 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr), 25 query_sz)) 26 return -EFAULT; 27 28 if (!access_ok(u64_to_user_ptr(query_item->data_ptr), 29 total_length)) 30 return -EFAULT; 31 32 return 0; 33 } 34 35 static int query_topology_info(struct drm_i915_private *dev_priv, 36 struct drm_i915_query_item *query_item) 37 { 38 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 39 struct drm_i915_query_topology_info topo; 40 u32 slice_length, subslice_length, eu_length, total_length; 41 int ret; 42 43 if (query_item->flags != 0) 44 return -EINVAL; 45 46 if (sseu->max_slices == 0) 47 return -ENODEV; 48 49 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); 50 51 slice_length = sizeof(sseu->slice_mask); 52 subslice_length = sseu->max_slices * sseu->ss_stride; 53 eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride; 54 total_length = sizeof(topo) + slice_length + subslice_length + 55 eu_length; 56 57 ret = copy_query_item(&topo, sizeof(topo), total_length, 58 query_item); 59 if (ret != 0) 60 return ret; 61 62 if (topo.flags != 0) 63 return -EINVAL; 64 65 memset(&topo, 0, sizeof(topo)); 66 topo.max_slices = sseu->max_slices; 67 topo.max_subslices = sseu->max_subslices; 68 topo.max_eus_per_subslice = sseu->max_eus_per_subslice; 69 70 topo.subslice_offset = slice_length; 71 topo.subslice_stride = sseu->ss_stride; 72 topo.eu_offset = slice_length + subslice_length; 73 topo.eu_stride = sseu->eu_stride; 74 75 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), 76 &topo, sizeof(topo))) 77 return -EFAULT; 78 79 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), 80 &sseu->slice_mask, slice_length)) 81 return -EFAULT; 82 83 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + 84 sizeof(topo) + slice_length), 85 sseu->subslice_mask, subslice_length)) 86 return -EFAULT; 87 88 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + 89 sizeof(topo) + 90 slice_length + subslice_length), 91 sseu->eu_mask, eu_length)) 92 return -EFAULT; 93 94 return total_length; 95 } 96 97 static int 98 query_engine_info(struct drm_i915_private *i915, 99 struct drm_i915_query_item *query_item) 100 { 101 struct drm_i915_query_engine_info __user *query_ptr = 102 u64_to_user_ptr(query_item->data_ptr); 103 struct drm_i915_engine_info __user *info_ptr; 104 struct drm_i915_query_engine_info query; 105 struct drm_i915_engine_info info = { }; 106 struct intel_engine_cs *engine; 107 int len, ret; 108 109 if (query_item->flags) 110 return -EINVAL; 111 112 len = sizeof(struct drm_i915_query_engine_info) + 113 RUNTIME_INFO(i915)->num_engines * 114 sizeof(struct drm_i915_engine_info); 115 116 ret = copy_query_item(&query, sizeof(query), len, query_item); 117 if (ret != 0) 118 return ret; 119 120 if (query.num_engines || query.rsvd[0] || query.rsvd[1] || 121 query.rsvd[2]) 122 return -EINVAL; 123 124 info_ptr = &query_ptr->engines[0]; 125 126 for_each_uabi_engine(engine, i915) { 127 info.engine.engine_class = engine->uabi_class; 128 info.engine.engine_instance = engine->uabi_instance; 129 info.capabilities = engine->uabi_capabilities; 130 131 if (__copy_to_user(info_ptr, &info, sizeof(info))) 132 return -EFAULT; 133 134 query.num_engines++; 135 info_ptr++; 136 } 137 138 if (__copy_to_user(query_ptr, &query, sizeof(query))) 139 return -EFAULT; 140 141 return len; 142 } 143 144 static int can_copy_perf_config_registers_or_number(u32 user_n_regs, 145 u64 user_regs_ptr, 146 u32 kernel_n_regs) 147 { 148 /* 149 * We'll just put the number of registers, and won't copy the 150 * register. 151 */ 152 if (user_n_regs == 0) 153 return 0; 154 155 if (user_n_regs < kernel_n_regs) 156 return -EINVAL; 157 158 if (!access_ok(u64_to_user_ptr(user_regs_ptr), 159 2 * sizeof(u32) * kernel_n_regs)) 160 return -EFAULT; 161 162 return 0; 163 } 164 165 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs, 166 u32 kernel_n_regs, 167 u64 user_regs_ptr, 168 u32 *user_n_regs) 169 { 170 u32 r; 171 172 if (*user_n_regs == 0) { 173 *user_n_regs = kernel_n_regs; 174 return 0; 175 } 176 177 *user_n_regs = kernel_n_regs; 178 179 for (r = 0; r < kernel_n_regs; r++) { 180 u32 __user *user_reg_ptr = 181 u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2); 182 u32 __user *user_val_ptr = 183 u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 + 184 sizeof(u32)); 185 int ret; 186 187 ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr), 188 user_reg_ptr); 189 if (ret) 190 return -EFAULT; 191 192 ret = __put_user(kernel_regs[r].value, user_val_ptr); 193 if (ret) 194 return -EFAULT; 195 } 196 197 return 0; 198 } 199 200 static int query_perf_config_data(struct drm_i915_private *i915, 201 struct drm_i915_query_item *query_item, 202 bool use_uuid) 203 { 204 struct drm_i915_query_perf_config __user *user_query_config_ptr = 205 u64_to_user_ptr(query_item->data_ptr); 206 struct drm_i915_perf_oa_config __user *user_config_ptr = 207 u64_to_user_ptr(query_item->data_ptr + 208 sizeof(struct drm_i915_query_perf_config)); 209 struct drm_i915_perf_oa_config user_config; 210 struct i915_perf *perf = &i915->perf; 211 struct i915_oa_config *oa_config; 212 char uuid[UUID_STRING_LEN + 1]; 213 u64 config_id; 214 u32 flags, total_size; 215 int ret; 216 217 if (!perf->i915) 218 return -ENODEV; 219 220 total_size = 221 sizeof(struct drm_i915_query_perf_config) + 222 sizeof(struct drm_i915_perf_oa_config); 223 224 if (query_item->length == 0) 225 return total_size; 226 227 if (query_item->length < total_size) { 228 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n", 229 query_item->length, total_size); 230 return -EINVAL; 231 } 232 233 if (!access_ok(user_query_config_ptr, total_size)) 234 return -EFAULT; 235 236 if (__get_user(flags, &user_query_config_ptr->flags)) 237 return -EFAULT; 238 239 if (flags != 0) 240 return -EINVAL; 241 242 if (use_uuid) { 243 struct i915_oa_config *tmp; 244 int id; 245 246 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid)); 247 248 memset(&uuid, 0, sizeof(uuid)); 249 if (__copy_from_user(uuid, user_query_config_ptr->uuid, 250 sizeof(user_query_config_ptr->uuid))) 251 return -EFAULT; 252 253 oa_config = NULL; 254 rcu_read_lock(); 255 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 256 if (!strcmp(tmp->uuid, uuid)) { 257 oa_config = i915_oa_config_get(tmp); 258 break; 259 } 260 } 261 rcu_read_unlock(); 262 } else { 263 if (__get_user(config_id, &user_query_config_ptr->config)) 264 return -EFAULT; 265 266 oa_config = i915_perf_get_oa_config(perf, config_id); 267 } 268 if (!oa_config) 269 return -ENOENT; 270 271 if (__copy_from_user(&user_config, user_config_ptr, 272 sizeof(user_config))) { 273 ret = -EFAULT; 274 goto out; 275 } 276 277 ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs, 278 user_config.boolean_regs_ptr, 279 oa_config->b_counter_regs_len); 280 if (ret) 281 goto out; 282 283 ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs, 284 user_config.flex_regs_ptr, 285 oa_config->flex_regs_len); 286 if (ret) 287 goto out; 288 289 ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs, 290 user_config.mux_regs_ptr, 291 oa_config->mux_regs_len); 292 if (ret) 293 goto out; 294 295 ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs, 296 oa_config->b_counter_regs_len, 297 user_config.boolean_regs_ptr, 298 &user_config.n_boolean_regs); 299 if (ret) 300 goto out; 301 302 ret = copy_perf_config_registers_or_number(oa_config->flex_regs, 303 oa_config->flex_regs_len, 304 user_config.flex_regs_ptr, 305 &user_config.n_flex_regs); 306 if (ret) 307 goto out; 308 309 ret = copy_perf_config_registers_or_number(oa_config->mux_regs, 310 oa_config->mux_regs_len, 311 user_config.mux_regs_ptr, 312 &user_config.n_mux_regs); 313 if (ret) 314 goto out; 315 316 memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid)); 317 318 if (__copy_to_user(user_config_ptr, &user_config, 319 sizeof(user_config))) { 320 ret = -EFAULT; 321 goto out; 322 } 323 324 ret = total_size; 325 326 out: 327 i915_oa_config_put(oa_config); 328 return ret; 329 } 330 331 static size_t sizeof_perf_config_list(size_t count) 332 { 333 return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count; 334 } 335 336 static size_t sizeof_perf_metrics(struct i915_perf *perf) 337 { 338 struct i915_oa_config *tmp; 339 size_t i; 340 int id; 341 342 i = 1; 343 rcu_read_lock(); 344 idr_for_each_entry(&perf->metrics_idr, tmp, id) 345 i++; 346 rcu_read_unlock(); 347 348 return sizeof_perf_config_list(i); 349 } 350 351 static int query_perf_config_list(struct drm_i915_private *i915, 352 struct drm_i915_query_item *query_item) 353 { 354 struct drm_i915_query_perf_config __user *user_query_config_ptr = 355 u64_to_user_ptr(query_item->data_ptr); 356 struct i915_perf *perf = &i915->perf; 357 u64 *oa_config_ids = NULL; 358 int alloc, n_configs; 359 u32 flags; 360 int ret; 361 362 if (!perf->i915) 363 return -ENODEV; 364 365 if (query_item->length == 0) 366 return sizeof_perf_metrics(perf); 367 368 if (get_user(flags, &user_query_config_ptr->flags)) 369 return -EFAULT; 370 371 if (flags != 0) 372 return -EINVAL; 373 374 n_configs = 1; 375 do { 376 struct i915_oa_config *tmp; 377 u64 *ids; 378 int id; 379 380 ids = krealloc(oa_config_ids, 381 n_configs * sizeof(*oa_config_ids), 382 GFP_KERNEL); 383 if (!ids) 384 return -ENOMEM; 385 386 alloc = fetch_and_zero(&n_configs); 387 388 ids[n_configs++] = 1ull; /* reserved for test_config */ 389 rcu_read_lock(); 390 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 391 if (n_configs < alloc) 392 ids[n_configs] = id; 393 n_configs++; 394 } 395 rcu_read_unlock(); 396 397 oa_config_ids = ids; 398 } while (n_configs > alloc); 399 400 if (query_item->length < sizeof_perf_config_list(n_configs)) { 401 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n", 402 query_item->length, 403 sizeof_perf_config_list(n_configs)); 404 kfree(oa_config_ids); 405 return -EINVAL; 406 } 407 408 if (put_user(n_configs, &user_query_config_ptr->config)) { 409 kfree(oa_config_ids); 410 return -EFAULT; 411 } 412 413 ret = copy_to_user(user_query_config_ptr + 1, 414 oa_config_ids, 415 n_configs * sizeof(*oa_config_ids)); 416 kfree(oa_config_ids); 417 if (ret) 418 return -EFAULT; 419 420 return sizeof_perf_config_list(n_configs); 421 } 422 423 static int query_perf_config(struct drm_i915_private *i915, 424 struct drm_i915_query_item *query_item) 425 { 426 switch (query_item->flags) { 427 case DRM_I915_QUERY_PERF_CONFIG_LIST: 428 return query_perf_config_list(i915, query_item); 429 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID: 430 return query_perf_config_data(i915, query_item, true); 431 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID: 432 return query_perf_config_data(i915, query_item, false); 433 default: 434 return -EINVAL; 435 } 436 } 437 438 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, 439 struct drm_i915_query_item *query_item) = { 440 query_topology_info, 441 query_engine_info, 442 query_perf_config, 443 }; 444 445 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 446 { 447 struct drm_i915_private *dev_priv = to_i915(dev); 448 struct drm_i915_query *args = data; 449 struct drm_i915_query_item __user *user_item_ptr = 450 u64_to_user_ptr(args->items_ptr); 451 u32 i; 452 453 if (args->flags != 0) 454 return -EINVAL; 455 456 for (i = 0; i < args->num_items; i++, user_item_ptr++) { 457 struct drm_i915_query_item item; 458 unsigned long func_idx; 459 int ret; 460 461 if (copy_from_user(&item, user_item_ptr, sizeof(item))) 462 return -EFAULT; 463 464 if (item.query_id == 0) 465 return -EINVAL; 466 467 if (overflows_type(item.query_id - 1, unsigned long)) 468 return -EINVAL; 469 470 func_idx = item.query_id - 1; 471 472 ret = -EINVAL; 473 if (func_idx < ARRAY_SIZE(i915_query_funcs)) { 474 func_idx = array_index_nospec(func_idx, 475 ARRAY_SIZE(i915_query_funcs)); 476 ret = i915_query_funcs[func_idx](dev_priv, &item); 477 } 478 479 /* Only write the length back to userspace if they differ. */ 480 if (ret != item.length && put_user(ret, &user_item_ptr->length)) 481 return -EFAULT; 482 } 483 484 return 0; 485 } 486