1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2018 Broadcom 4 */ 5 6 /** 7 * DOC: VC4 V3D performance monitor module 8 * 9 * The V3D block provides 16 hardware counters which can count various events. 10 */ 11 12 #include "vc4_drv.h" 13 #include "vc4_regs.h" 14 15 #define VC4_PERFMONID_MIN 1 16 #define VC4_PERFMONID_MAX U32_MAX 17 18 void vc4_perfmon_get(struct vc4_perfmon *perfmon) 19 { 20 struct vc4_dev *vc4; 21 22 if (!perfmon) 23 return; 24 25 vc4 = perfmon->dev; 26 if (WARN_ON_ONCE(vc4->is_vc5)) 27 return; 28 29 refcount_inc(&perfmon->refcnt); 30 } 31 32 void vc4_perfmon_put(struct vc4_perfmon *perfmon) 33 { 34 struct vc4_dev *vc4; 35 36 if (!perfmon) 37 return; 38 39 vc4 = perfmon->dev; 40 if (WARN_ON_ONCE(vc4->is_vc5)) 41 return; 42 43 if (refcount_dec_and_test(&perfmon->refcnt)) 44 kfree(perfmon); 45 } 46 47 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon) 48 { 49 unsigned int i; 50 u32 mask; 51 52 if (WARN_ON_ONCE(vc4->is_vc5)) 53 return; 54 55 if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon)) 56 return; 57 58 for (i = 0; i < perfmon->ncounters; i++) 59 V3D_WRITE(V3D_PCTRS(i), perfmon->events[i]); 60 61 mask = GENMASK(perfmon->ncounters - 1, 0); 62 V3D_WRITE(V3D_PCTRC, mask); 63 V3D_WRITE(V3D_PCTRE, V3D_PCTRE_EN | mask); 64 vc4->active_perfmon = perfmon; 65 } 66 67 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, 68 bool capture) 69 { 70 unsigned int i; 71 72 if (WARN_ON_ONCE(vc4->is_vc5)) 73 return; 74 75 if (WARN_ON_ONCE(!vc4->active_perfmon || 76 perfmon != vc4->active_perfmon)) 77 return; 78 79 if (capture) { 80 for (i = 0; i < perfmon->ncounters; i++) 81 perfmon->counters[i] += V3D_READ(V3D_PCTR(i)); 82 } 83 84 V3D_WRITE(V3D_PCTRE, 0); 85 vc4->active_perfmon = NULL; 86 } 87 88 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) 89 { 90 struct vc4_dev *vc4 = vc4file->dev; 91 struct vc4_perfmon *perfmon; 92 93 if (WARN_ON_ONCE(vc4->is_vc5)) 94 return NULL; 95 96 mutex_lock(&vc4file->perfmon.lock); 97 perfmon = idr_find(&vc4file->perfmon.idr, id); 98 vc4_perfmon_get(perfmon); 99 mutex_unlock(&vc4file->perfmon.lock); 100 101 return perfmon; 102 } 103 104 void vc4_perfmon_open_file(struct vc4_file *vc4file) 105 { 106 struct vc4_dev *vc4 = vc4file->dev; 107 108 if (WARN_ON_ONCE(vc4->is_vc5)) 109 return; 110 111 mutex_init(&vc4file->perfmon.lock); 112 idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN); 113 vc4file->dev = vc4; 114 } 115 116 static int vc4_perfmon_idr_del(int id, void *elem, void *data) 117 { 118 struct vc4_perfmon *perfmon = elem; 119 120 vc4_perfmon_put(perfmon); 121 122 return 0; 123 } 124 125 void vc4_perfmon_close_file(struct vc4_file *vc4file) 126 { 127 struct vc4_dev *vc4 = vc4file->dev; 128 129 if (WARN_ON_ONCE(vc4->is_vc5)) 130 return; 131 132 mutex_lock(&vc4file->perfmon.lock); 133 idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL); 134 idr_destroy(&vc4file->perfmon.idr); 135 mutex_unlock(&vc4file->perfmon.lock); 136 mutex_destroy(&vc4file->perfmon.lock); 137 } 138 139 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, 140 struct drm_file *file_priv) 141 { 142 struct vc4_dev *vc4 = to_vc4_dev(dev); 143 struct vc4_file *vc4file = file_priv->driver_priv; 144 struct drm_vc4_perfmon_create *req = data; 145 struct vc4_perfmon *perfmon; 146 unsigned int i; 147 int ret; 148 149 if (WARN_ON_ONCE(vc4->is_vc5)) 150 return -ENODEV; 151 152 if (!vc4->v3d) { 153 DRM_DEBUG("Creating perfmon no VC4 V3D probed\n"); 154 return -ENODEV; 155 } 156 157 /* Number of monitored counters cannot exceed HW limits. */ 158 if (req->ncounters > DRM_VC4_MAX_PERF_COUNTERS || 159 !req->ncounters) 160 return -EINVAL; 161 162 /* Make sure all events are valid. */ 163 for (i = 0; i < req->ncounters; i++) { 164 if (req->events[i] >= VC4_PERFCNT_NUM_EVENTS) 165 return -EINVAL; 166 } 167 168 perfmon = kzalloc(struct_size(perfmon, counters, req->ncounters), 169 GFP_KERNEL); 170 if (!perfmon) 171 return -ENOMEM; 172 perfmon->dev = vc4; 173 174 for (i = 0; i < req->ncounters; i++) 175 perfmon->events[i] = req->events[i]; 176 177 perfmon->ncounters = req->ncounters; 178 179 refcount_set(&perfmon->refcnt, 1); 180 181 mutex_lock(&vc4file->perfmon.lock); 182 ret = idr_alloc(&vc4file->perfmon.idr, perfmon, VC4_PERFMONID_MIN, 183 VC4_PERFMONID_MAX, GFP_KERNEL); 184 mutex_unlock(&vc4file->perfmon.lock); 185 186 if (ret < 0) { 187 kfree(perfmon); 188 return ret; 189 } 190 191 req->id = ret; 192 return 0; 193 } 194 195 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 196 struct drm_file *file_priv) 197 { 198 struct vc4_dev *vc4 = to_vc4_dev(dev); 199 struct vc4_file *vc4file = file_priv->driver_priv; 200 struct drm_vc4_perfmon_destroy *req = data; 201 struct vc4_perfmon *perfmon; 202 203 if (WARN_ON_ONCE(vc4->is_vc5)) 204 return -ENODEV; 205 206 if (!vc4->v3d) { 207 DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n"); 208 return -ENODEV; 209 } 210 211 mutex_lock(&vc4file->perfmon.lock); 212 perfmon = idr_remove(&vc4file->perfmon.idr, req->id); 213 mutex_unlock(&vc4file->perfmon.lock); 214 215 if (!perfmon) 216 return -EINVAL; 217 218 vc4_perfmon_put(perfmon); 219 return 0; 220 } 221 222 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 223 struct drm_file *file_priv) 224 { 225 struct vc4_dev *vc4 = to_vc4_dev(dev); 226 struct vc4_file *vc4file = file_priv->driver_priv; 227 struct drm_vc4_perfmon_get_values *req = data; 228 struct vc4_perfmon *perfmon; 229 int ret; 230 231 if (WARN_ON_ONCE(vc4->is_vc5)) 232 return -ENODEV; 233 234 if (!vc4->v3d) { 235 DRM_DEBUG("Getting perfmon no VC4 V3D probed\n"); 236 return -ENODEV; 237 } 238 239 mutex_lock(&vc4file->perfmon.lock); 240 perfmon = idr_find(&vc4file->perfmon.idr, req->id); 241 vc4_perfmon_get(perfmon); 242 mutex_unlock(&vc4file->perfmon.lock); 243 244 if (!perfmon) 245 return -EINVAL; 246 247 if (copy_to_user(u64_to_user_ptr(req->values_ptr), perfmon->counters, 248 perfmon->ncounters * sizeof(u64))) 249 ret = -EFAULT; 250 else 251 ret = 0; 252 253 vc4_perfmon_put(perfmon); 254 return ret; 255 } 256