1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include <linux/nospec.h> 8 9 #include "i915_drv.h" 10 #include "i915_query.h" 11 #include <uapi/drm/i915_drm.h> 12 13 static int copy_query_item(void *query_hdr, size_t query_sz, 14 u32 total_length, 15 struct drm_i915_query_item *query_item) 16 { 17 if (query_item->length == 0) 18 return total_length; 19 20 if (query_item->length < total_length) 21 return -EINVAL; 22 23 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr), 24 query_sz)) 25 return -EFAULT; 26 27 if (!access_ok(u64_to_user_ptr(query_item->data_ptr), 28 total_length)) 29 return -EFAULT; 30 31 return 0; 32 } 33 34 static int query_topology_info(struct drm_i915_private *dev_priv, 35 struct drm_i915_query_item *query_item) 36 { 37 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 38 struct drm_i915_query_topology_info topo; 39 u32 slice_length, subslice_length, eu_length, total_length; 40 int ret; 41 42 if (query_item->flags != 0) 43 return -EINVAL; 44 45 if (sseu->max_slices == 0) 46 return -ENODEV; 47 48 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); 49 50 slice_length = sizeof(sseu->slice_mask); 51 subslice_length = sseu->max_slices * 52 DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE); 53 eu_length = sseu->max_slices * sseu->max_subslices * 54 DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); 55 56 total_length = sizeof(topo) + slice_length + subslice_length + eu_length; 57 58 ret = copy_query_item(&topo, sizeof(topo), total_length, 59 query_item); 60 if (ret != 0) 61 return ret; 62 63 if (topo.flags != 0) 64 return -EINVAL; 65 66 memset(&topo, 0, sizeof(topo)); 67 topo.max_slices = sseu->max_slices; 68 topo.max_subslices = sseu->max_subslices; 69 topo.max_eus_per_subslice = sseu->max_eus_per_subslice; 70 71 topo.subslice_offset = slice_length; 72 topo.subslice_stride = DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE); 73 topo.eu_offset = slice_length + subslice_length; 74 topo.eu_stride = 75 DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); 76 77 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), 78 &topo, sizeof(topo))) 79 return -EFAULT; 80 81 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), 82 &sseu->slice_mask, slice_length)) 83 return -EFAULT; 84 85 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + 86 sizeof(topo) + slice_length), 87 sseu->subslice_mask, subslice_length)) 88 return -EFAULT; 89 90 if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + 91 sizeof(topo) + 92 slice_length + subslice_length), 93 sseu->eu_mask, eu_length)) 94 return -EFAULT; 95 96 return total_length; 97 } 98 99 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, 100 struct drm_i915_query_item *query_item) = { 101 query_topology_info, 102 }; 103 104 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 105 { 106 struct drm_i915_private *dev_priv = to_i915(dev); 107 struct drm_i915_query *args = data; 108 struct drm_i915_query_item __user *user_item_ptr = 109 u64_to_user_ptr(args->items_ptr); 110 u32 i; 111 112 if (args->flags != 0) 113 return -EINVAL; 114 115 for (i = 0; i < args->num_items; i++, user_item_ptr++) { 116 struct drm_i915_query_item item; 117 unsigned long func_idx; 118 int ret; 119 120 if (copy_from_user(&item, user_item_ptr, sizeof(item))) 121 return -EFAULT; 122 123 if (item.query_id == 0) 124 return -EINVAL; 125 126 if (overflows_type(item.query_id - 1, unsigned long)) 127 return -EINVAL; 128 129 func_idx = item.query_id - 1; 130 131 ret = -EINVAL; 132 if (func_idx < ARRAY_SIZE(i915_query_funcs)) { 133 func_idx = array_index_nospec(func_idx, 134 ARRAY_SIZE(i915_query_funcs)); 135 ret = i915_query_funcs[func_idx](dev_priv, &item); 136 } 137 138 /* Only write the length back to userspace if they differ. */ 139 if (ret != item.length && put_user(ret, &user_item_ptr->length)) 140 return -EFAULT; 141 } 142 143 return 0; 144 } 145