1 #if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 2 #define _AMDGPU_TRACE_H_ 3 4 #include <linux/stringify.h> 5 #include <linux/types.h> 6 #include <linux/tracepoint.h> 7 8 #include <drm/drmP.h> 9 10 #undef TRACE_SYSTEM 11 #define TRACE_SYSTEM amdgpu 12 #define TRACE_INCLUDE_FILE amdgpu_trace 13 14 TRACE_EVENT(amdgpu_mm_rreg, 15 TP_PROTO(unsigned did, uint32_t reg, uint32_t value), 16 TP_ARGS(did, reg, value), 17 TP_STRUCT__entry( 18 __field(unsigned, did) 19 __field(uint32_t, reg) 20 __field(uint32_t, value) 21 ), 22 TP_fast_assign( 23 __entry->did = did; 24 __entry->reg = reg; 25 __entry->value = value; 26 ), 27 TP_printk("0x%04lx, 0x%04lx, 0x%08lx", 28 (unsigned long)__entry->did, 29 (unsigned long)__entry->reg, 30 (unsigned long)__entry->value) 31 ); 32 33 TRACE_EVENT(amdgpu_mm_wreg, 34 TP_PROTO(unsigned did, uint32_t reg, uint32_t value), 35 TP_ARGS(did, reg, value), 36 TP_STRUCT__entry( 37 __field(unsigned, did) 38 __field(uint32_t, reg) 39 __field(uint32_t, value) 40 ), 41 TP_fast_assign( 42 __entry->did = did; 43 __entry->reg = reg; 44 __entry->value = value; 45 ), 46 TP_printk("0x%04lx, 0x%04lx, 0x%08lx", 47 (unsigned long)__entry->did, 48 (unsigned long)__entry->reg, 49 (unsigned long)__entry->value) 50 ); 51 52 TRACE_EVENT(amdgpu_bo_create, 53 TP_PROTO(struct amdgpu_bo *bo), 54 TP_ARGS(bo), 55 TP_STRUCT__entry( 56 __field(struct amdgpu_bo *, bo) 57 __field(u32, pages) 58 __field(u32, type) 59 __field(u32, prefer) 60 __field(u32, allow) 61 __field(u32, visible) 62 ), 63 64 TP_fast_assign( 65 __entry->bo = bo; 66 __entry->pages = bo->tbo.num_pages; 67 __entry->type = bo->tbo.mem.mem_type; 68 __entry->prefer = bo->prefered_domains; 69 __entry->allow = bo->allowed_domains; 70 __entry->visible = bo->flags; 71 ), 72 73 TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d", 74 __entry->bo, __entry->pages, __entry->type, 75 __entry->prefer, __entry->allow, __entry->visible) 76 ); 77 78 TRACE_EVENT(amdgpu_cs, 79 TP_PROTO(struct amdgpu_cs_parser *p, int i), 80 TP_ARGS(p, i), 81 TP_STRUCT__entry( 82 __field(struct amdgpu_bo_list *, bo_list) 83 __field(u32, ring) 84 __field(u32, dw) 85 __field(u32, fences) 86 ), 87 88 TP_fast_assign( 89 __entry->bo_list = p->bo_list; 90 __entry->ring = p->job->ring->idx; 91 __entry->dw = p->job->ibs[i].length_dw; 92 __entry->fences = amdgpu_fence_count_emitted( 93 p->job->ring); 94 ), 95 TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", 96 __entry->bo_list, __entry->ring, __entry->dw, 97 __entry->fences) 98 ); 99 100 TRACE_EVENT(amdgpu_cs_ioctl, 101 TP_PROTO(struct amdgpu_job *job), 102 TP_ARGS(job), 103 TP_STRUCT__entry( 104 __field(struct amdgpu_device *, adev) 105 __field(struct amd_sched_job *, sched_job) 106 __field(struct amdgpu_ib *, ib) 107 __field(struct fence *, fence) 108 __field(char *, ring_name) 109 __field(u32, num_ibs) 110 ), 111 112 TP_fast_assign( 113 __entry->adev = job->adev; 114 __entry->sched_job = &job->base; 115 __entry->ib = job->ibs; 116 __entry->fence = &job->base.s_fence->finished; 117 __entry->ring_name = job->ring->name; 118 __entry->num_ibs = job->num_ibs; 119 ), 120 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", 121 __entry->adev, __entry->sched_job, __entry->ib, 122 __entry->fence, __entry->ring_name, __entry->num_ibs) 123 ); 124 125 TRACE_EVENT(amdgpu_sched_run_job, 126 TP_PROTO(struct amdgpu_job *job), 127 TP_ARGS(job), 128 TP_STRUCT__entry( 129 __field(struct amdgpu_device *, adev) 130 __field(struct amd_sched_job *, sched_job) 131 __field(struct amdgpu_ib *, ib) 132 __field(struct fence *, fence) 133 __field(char *, ring_name) 134 __field(u32, num_ibs) 135 ), 136 137 TP_fast_assign( 138 __entry->adev = job->adev; 139 __entry->sched_job = &job->base; 140 __entry->ib = job->ibs; 141 __entry->fence = &job->base.s_fence->finished; 142 __entry->ring_name = job->ring->name; 143 __entry->num_ibs = job->num_ibs; 144 ), 145 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", 146 __entry->adev, __entry->sched_job, __entry->ib, 147 __entry->fence, __entry->ring_name, __entry->num_ibs) 148 ); 149 150 151 TRACE_EVENT(amdgpu_vm_grab_id, 152 TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job), 153 TP_ARGS(vm, ring, job), 154 TP_STRUCT__entry( 155 __field(struct amdgpu_vm *, vm) 156 __field(u32, ring) 157 __field(u32, vmid) 158 __field(u64, pd_addr) 159 __field(u32, needs_flush) 160 ), 161 162 TP_fast_assign( 163 __entry->vm = vm; 164 __entry->ring = ring; 165 __entry->vmid = job->vm_id; 166 __entry->pd_addr = job->vm_pd_addr; 167 __entry->needs_flush = job->vm_needs_flush; 168 ), 169 TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u", 170 __entry->vm, __entry->ring, __entry->vmid, 171 __entry->pd_addr, __entry->needs_flush) 172 ); 173 174 TRACE_EVENT(amdgpu_vm_bo_map, 175 TP_PROTO(struct amdgpu_bo_va *bo_va, 176 struct amdgpu_bo_va_mapping *mapping), 177 TP_ARGS(bo_va, mapping), 178 TP_STRUCT__entry( 179 __field(struct amdgpu_bo *, bo) 180 __field(long, start) 181 __field(long, last) 182 __field(u64, offset) 183 __field(u32, flags) 184 ), 185 186 TP_fast_assign( 187 __entry->bo = bo_va->bo; 188 __entry->start = mapping->it.start; 189 __entry->last = mapping->it.last; 190 __entry->offset = mapping->offset; 191 __entry->flags = mapping->flags; 192 ), 193 TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", 194 __entry->bo, __entry->start, __entry->last, 195 __entry->offset, __entry->flags) 196 ); 197 198 TRACE_EVENT(amdgpu_vm_bo_unmap, 199 TP_PROTO(struct amdgpu_bo_va *bo_va, 200 struct amdgpu_bo_va_mapping *mapping), 201 TP_ARGS(bo_va, mapping), 202 TP_STRUCT__entry( 203 __field(struct amdgpu_bo *, bo) 204 __field(long, start) 205 __field(long, last) 206 __field(u64, offset) 207 __field(u32, flags) 208 ), 209 210 TP_fast_assign( 211 __entry->bo = bo_va->bo; 212 __entry->start = mapping->it.start; 213 __entry->last = mapping->it.last; 214 __entry->offset = mapping->offset; 215 __entry->flags = mapping->flags; 216 ), 217 TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", 218 __entry->bo, __entry->start, __entry->last, 219 __entry->offset, __entry->flags) 220 ); 221 222 DECLARE_EVENT_CLASS(amdgpu_vm_mapping, 223 TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 224 TP_ARGS(mapping), 225 TP_STRUCT__entry( 226 __field(u64, soffset) 227 __field(u64, eoffset) 228 __field(u32, flags) 229 ), 230 231 TP_fast_assign( 232 __entry->soffset = mapping->it.start; 233 __entry->eoffset = mapping->it.last + 1; 234 __entry->flags = mapping->flags; 235 ), 236 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", 237 __entry->soffset, __entry->eoffset, __entry->flags) 238 ); 239 240 DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update, 241 TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 242 TP_ARGS(mapping) 243 ); 244 245 DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping, 246 TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 247 TP_ARGS(mapping) 248 ); 249 250 TRACE_EVENT(amdgpu_vm_set_page, 251 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, 252 uint32_t incr, uint32_t flags), 253 TP_ARGS(pe, addr, count, incr, flags), 254 TP_STRUCT__entry( 255 __field(u64, pe) 256 __field(u64, addr) 257 __field(u32, count) 258 __field(u32, incr) 259 __field(u32, flags) 260 ), 261 262 TP_fast_assign( 263 __entry->pe = pe; 264 __entry->addr = addr; 265 __entry->count = count; 266 __entry->incr = incr; 267 __entry->flags = flags; 268 ), 269 TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u", 270 __entry->pe, __entry->addr, __entry->incr, 271 __entry->flags, __entry->count) 272 ); 273 274 TRACE_EVENT(amdgpu_vm_flush, 275 TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), 276 TP_ARGS(pd_addr, ring, id), 277 TP_STRUCT__entry( 278 __field(u64, pd_addr) 279 __field(u32, ring) 280 __field(u32, id) 281 ), 282 283 TP_fast_assign( 284 __entry->pd_addr = pd_addr; 285 __entry->ring = ring; 286 __entry->id = id; 287 ), 288 TP_printk("ring=%u, id=%u, pd_addr=%010Lx", 289 __entry->ring, __entry->id, __entry->pd_addr) 290 ); 291 292 TRACE_EVENT(amdgpu_bo_list_set, 293 TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo), 294 TP_ARGS(list, bo), 295 TP_STRUCT__entry( 296 __field(struct amdgpu_bo_list *, list) 297 __field(struct amdgpu_bo *, bo) 298 __field(u64, bo_size) 299 ), 300 301 TP_fast_assign( 302 __entry->list = list; 303 __entry->bo = bo; 304 __entry->bo_size = amdgpu_bo_size(bo); 305 ), 306 TP_printk("list=%p, bo=%p, bo_size = %Ld", 307 __entry->list, 308 __entry->bo, 309 __entry->bo_size) 310 ); 311 312 TRACE_EVENT(amdgpu_cs_bo_status, 313 TP_PROTO(uint64_t total_bo, uint64_t total_size), 314 TP_ARGS(total_bo, total_size), 315 TP_STRUCT__entry( 316 __field(u64, total_bo) 317 __field(u64, total_size) 318 ), 319 320 TP_fast_assign( 321 __entry->total_bo = total_bo; 322 __entry->total_size = total_size; 323 ), 324 TP_printk("total bo size = %Ld, total bo count = %Ld", 325 __entry->total_bo, __entry->total_size) 326 ); 327 328 TRACE_EVENT(amdgpu_ttm_bo_move, 329 TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement), 330 TP_ARGS(bo, new_placement, old_placement), 331 TP_STRUCT__entry( 332 __field(struct amdgpu_bo *, bo) 333 __field(u64, bo_size) 334 __field(u32, new_placement) 335 __field(u32, old_placement) 336 ), 337 338 TP_fast_assign( 339 __entry->bo = bo; 340 __entry->bo_size = amdgpu_bo_size(bo); 341 __entry->new_placement = new_placement; 342 __entry->old_placement = old_placement; 343 ), 344 TP_printk("bo=%p from:%d to %d with size = %Ld", 345 __entry->bo, __entry->old_placement, 346 __entry->new_placement, __entry->bo_size) 347 ); 348 349 #endif 350 351 /* This part must be outside protection */ 352 #undef TRACE_INCLUDE_PATH 353 #define TRACE_INCLUDE_PATH . 354 #include <trace/define_trace.h> 355