1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #undef TRACE_SYSTEM 27 #define TRACE_SYSTEM amdgpu_dm 28 29 #if !defined(_AMDGPU_DM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 30 #define _AMDGPU_DM_TRACE_H_ 31 32 #include <linux/tracepoint.h> 33 #include <drm/drm_connector.h> 34 #include <drm/drm_crtc.h> 35 #include <drm/drm_plane.h> 36 #include <drm/drm_fourcc.h> 37 #include <drm/drm_framebuffer.h> 38 #include <drm/drm_encoder.h> 39 #include <drm/drm_atomic.h> 40 41 #include "dc/inc/core_types.h" 42 43 DECLARE_EVENT_CLASS(amdgpu_dc_reg_template, 44 TP_PROTO(unsigned long *count, uint32_t reg, uint32_t value), 45 TP_ARGS(count, reg, value), 46 47 TP_STRUCT__entry( 48 __field(uint32_t, reg) 49 __field(uint32_t, value) 50 ), 51 52 TP_fast_assign( 53 __entry->reg = reg; 54 __entry->value = value; 55 *count = *count + 1; 56 ), 57 58 TP_printk("reg=0x%08lx, value=0x%08lx", 59 (unsigned long)__entry->reg, 60 (unsigned long)__entry->value) 61 ); 62 63 DEFINE_EVENT(amdgpu_dc_reg_template, amdgpu_dc_rreg, 64 TP_PROTO(unsigned long *count, uint32_t reg, uint32_t value), 65 TP_ARGS(count, reg, value)); 66 67 DEFINE_EVENT(amdgpu_dc_reg_template, amdgpu_dc_wreg, 68 TP_PROTO(unsigned long *count, uint32_t reg, uint32_t value), 69 TP_ARGS(count, reg, value)); 70 71 TRACE_EVENT(amdgpu_dc_performance, 72 TP_PROTO(unsigned long read_count, unsigned long write_count, 73 unsigned long *last_read, unsigned long *last_write, 74 const char *func, unsigned int line), 75 TP_ARGS(read_count, write_count, last_read, last_write, func, line), 76 TP_STRUCT__entry( 77 __field(uint32_t, reads) 78 __field(uint32_t, writes) 79 __field(uint32_t, read_delta) 80 __field(uint32_t, write_delta) 81 __string(func, func) 82 __field(uint32_t, line) 83 ), 84 TP_fast_assign( 85 __entry->reads = read_count; 86 __entry->writes = write_count; 87 __entry->read_delta = read_count - *last_read; 88 __entry->write_delta = write_count - *last_write; 89 __assign_str(func, func); 90 __entry->line = line; 91 *last_read = read_count; 92 *last_write = write_count; 93 ), 94 TP_printk("%s:%d reads=%08ld (%08ld total), writes=%08ld (%08ld total)", 95 __get_str(func), __entry->line, 96 (unsigned long)__entry->read_delta, 97 (unsigned long)__entry->reads, 98 (unsigned long)__entry->write_delta, 99 (unsigned long)__entry->writes) 100 ); 101 102 TRACE_EVENT(amdgpu_dm_connector_atomic_check, 103 TP_PROTO(const struct drm_connector_state *state), 104 TP_ARGS(state), 105 106 TP_STRUCT__entry( 107 __field(uint32_t, conn_id) 108 __field(const struct drm_connector_state *, conn_state) 109 __field(const struct drm_atomic_state *, state) 110 __field(const struct drm_crtc_commit *, commit) 111 __field(uint32_t, crtc_id) 112 __field(uint32_t, best_encoder_id) 113 __field(enum drm_link_status, link_status) 114 __field(bool, self_refresh_aware) 115 __field(enum hdmi_picture_aspect, picture_aspect_ratio) 116 __field(unsigned int, content_type) 117 __field(unsigned int, hdcp_content_type) 118 __field(unsigned int, content_protection) 119 __field(unsigned int, scaling_mode) 120 __field(u32, colorspace) 121 __field(u8, max_requested_bpc) 122 __field(u8, max_bpc) 123 ), 124 125 TP_fast_assign( 126 __entry->conn_id = state->connector->base.id; 127 __entry->conn_state = state; 128 __entry->state = state->state; 129 __entry->commit = state->commit; 130 __entry->crtc_id = state->crtc ? state->crtc->base.id : 0; 131 __entry->best_encoder_id = state->best_encoder ? 132 state->best_encoder->base.id : 0; 133 __entry->link_status = state->link_status; 134 __entry->self_refresh_aware = state->self_refresh_aware; 135 __entry->picture_aspect_ratio = state->picture_aspect_ratio; 136 __entry->content_type = state->content_type; 137 __entry->hdcp_content_type = state->hdcp_content_type; 138 __entry->content_protection = state->content_protection; 139 __entry->scaling_mode = state->scaling_mode; 140 __entry->colorspace = state->colorspace; 141 __entry->max_requested_bpc = state->max_requested_bpc; 142 __entry->max_bpc = state->max_bpc; 143 ), 144 145 TP_printk("conn_id=%u conn_state=%p state=%p commit=%p crtc_id=%u " 146 "best_encoder_id=%u link_status=%d self_refresh_aware=%d " 147 "picture_aspect_ratio=%d content_type=%u " 148 "hdcp_content_type=%u content_protection=%u scaling_mode=%u " 149 "colorspace=%u max_requested_bpc=%u max_bpc=%u", 150 __entry->conn_id, __entry->conn_state, __entry->state, 151 __entry->commit, __entry->crtc_id, __entry->best_encoder_id, 152 __entry->link_status, __entry->self_refresh_aware, 153 __entry->picture_aspect_ratio, __entry->content_type, 154 __entry->hdcp_content_type, __entry->content_protection, 155 __entry->scaling_mode, __entry->colorspace, 156 __entry->max_requested_bpc, __entry->max_bpc) 157 ); 158 159 TRACE_EVENT(amdgpu_dm_crtc_atomic_check, 160 TP_PROTO(const struct drm_crtc_state *state), 161 TP_ARGS(state), 162 163 TP_STRUCT__entry( 164 __field(const struct drm_atomic_state *, state) 165 __field(const struct drm_crtc_state *, crtc_state) 166 __field(const struct drm_crtc_commit *, commit) 167 __field(uint32_t, crtc_id) 168 __field(bool, enable) 169 __field(bool, active) 170 __field(bool, planes_changed) 171 __field(bool, mode_changed) 172 __field(bool, active_changed) 173 __field(bool, connectors_changed) 174 __field(bool, zpos_changed) 175 __field(bool, color_mgmt_changed) 176 __field(bool, no_vblank) 177 __field(bool, async_flip) 178 __field(bool, vrr_enabled) 179 __field(bool, self_refresh_active) 180 __field(u32, plane_mask) 181 __field(u32, connector_mask) 182 __field(u32, encoder_mask) 183 ), 184 185 TP_fast_assign( 186 __entry->state = state->state; 187 __entry->crtc_state = state; 188 __entry->crtc_id = state->crtc->base.id; 189 __entry->commit = state->commit; 190 __entry->enable = state->enable; 191 __entry->active = state->active; 192 __entry->planes_changed = state->planes_changed; 193 __entry->mode_changed = state->mode_changed; 194 __entry->active_changed = state->active_changed; 195 __entry->connectors_changed = state->connectors_changed; 196 __entry->zpos_changed = state->zpos_changed; 197 __entry->color_mgmt_changed = state->color_mgmt_changed; 198 __entry->no_vblank = state->no_vblank; 199 __entry->async_flip = state->async_flip; 200 __entry->vrr_enabled = state->vrr_enabled; 201 __entry->self_refresh_active = state->self_refresh_active; 202 __entry->plane_mask = state->plane_mask; 203 __entry->connector_mask = state->connector_mask; 204 __entry->encoder_mask = state->encoder_mask; 205 ), 206 207 TP_printk("crtc_id=%u crtc_state=%p state=%p commit=%p changed(" 208 "planes=%d mode=%d active=%d conn=%d zpos=%d color_mgmt=%d) " 209 "state(enable=%d active=%d async_flip=%d vrr_enabled=%d " 210 "self_refresh_active=%d no_vblank=%d) mask(plane=%x conn=%x " 211 "enc=%x)", 212 __entry->crtc_id, __entry->crtc_state, __entry->state, 213 __entry->commit, __entry->planes_changed, 214 __entry->mode_changed, __entry->active_changed, 215 __entry->connectors_changed, __entry->zpos_changed, 216 __entry->color_mgmt_changed, __entry->enable, __entry->active, 217 __entry->async_flip, __entry->vrr_enabled, 218 __entry->self_refresh_active, __entry->no_vblank, 219 __entry->plane_mask, __entry->connector_mask, 220 __entry->encoder_mask) 221 ); 222 223 DECLARE_EVENT_CLASS(amdgpu_dm_plane_state_template, 224 TP_PROTO(const struct drm_plane_state *state), 225 TP_ARGS(state), 226 TP_STRUCT__entry( 227 __field(uint32_t, plane_id) 228 __field(enum drm_plane_type, plane_type) 229 __field(const struct drm_plane_state *, plane_state) 230 __field(const struct drm_atomic_state *, state) 231 __field(uint32_t, crtc_id) 232 __field(uint32_t, fb_id) 233 __field(uint32_t, fb_format) 234 __field(uint8_t, fb_planes) 235 __field(uint64_t, fb_modifier) 236 __field(const struct dma_fence *, fence) 237 __field(int32_t, crtc_x) 238 __field(int32_t, crtc_y) 239 __field(uint32_t, crtc_w) 240 __field(uint32_t, crtc_h) 241 __field(uint32_t, src_x) 242 __field(uint32_t, src_y) 243 __field(uint32_t, src_w) 244 __field(uint32_t, src_h) 245 __field(u32, alpha) 246 __field(uint32_t, pixel_blend_mode) 247 __field(unsigned int, rotation) 248 __field(unsigned int, zpos) 249 __field(unsigned int, normalized_zpos) 250 __field(enum drm_color_encoding, color_encoding) 251 __field(enum drm_color_range, color_range) 252 __field(bool, visible) 253 ), 254 255 TP_fast_assign( 256 __entry->plane_id = state->plane->base.id; 257 __entry->plane_type = state->plane->type; 258 __entry->plane_state = state; 259 __entry->state = state->state; 260 __entry->crtc_id = state->crtc ? state->crtc->base.id : 0; 261 __entry->fb_id = state->fb ? state->fb->base.id : 0; 262 __entry->fb_format = state->fb ? state->fb->format->format : 0; 263 __entry->fb_planes = state->fb ? state->fb->format->num_planes : 0; 264 __entry->fb_modifier = state->fb ? state->fb->modifier : 0; 265 __entry->fence = state->fence; 266 __entry->crtc_x = state->crtc_x; 267 __entry->crtc_y = state->crtc_y; 268 __entry->crtc_w = state->crtc_w; 269 __entry->crtc_h = state->crtc_h; 270 __entry->src_x = state->src_x >> 16; 271 __entry->src_y = state->src_y >> 16; 272 __entry->src_w = state->src_w >> 16; 273 __entry->src_h = state->src_h >> 16; 274 __entry->alpha = state->alpha; 275 __entry->pixel_blend_mode = state->pixel_blend_mode; 276 __entry->rotation = state->rotation; 277 __entry->zpos = state->zpos; 278 __entry->normalized_zpos = state->normalized_zpos; 279 __entry->color_encoding = state->color_encoding; 280 __entry->color_range = state->color_range; 281 __entry->visible = state->visible; 282 ), 283 284 TP_printk("plane_id=%u plane_type=%d plane_state=%p state=%p " 285 "crtc_id=%u fb(id=%u fmt=%c%c%c%c planes=%u mod=%llu) " 286 "fence=%p crtc_x=%d crtc_y=%d crtc_w=%u crtc_h=%u " 287 "src_x=%u src_y=%u src_w=%u src_h=%u alpha=%u " 288 "pixel_blend_mode=%u rotation=%u zpos=%u " 289 "normalized_zpos=%u color_encoding=%d color_range=%d " 290 "visible=%d", 291 __entry->plane_id, __entry->plane_type, __entry->plane_state, 292 __entry->state, __entry->crtc_id, __entry->fb_id, 293 (__entry->fb_format & 0xff) ? (__entry->fb_format & 0xff) : 'N', 294 ((__entry->fb_format >> 8) & 0xff) ? ((__entry->fb_format >> 8) & 0xff) : 'O', 295 ((__entry->fb_format >> 16) & 0xff) ? ((__entry->fb_format >> 16) & 0xff) : 'N', 296 ((__entry->fb_format >> 24) & 0x7f) ? ((__entry->fb_format >> 24) & 0x7f) : 'E', 297 __entry->fb_planes, 298 __entry->fb_modifier, __entry->fence, __entry->crtc_x, 299 __entry->crtc_y, __entry->crtc_w, __entry->crtc_h, 300 __entry->src_x, __entry->src_y, __entry->src_w, __entry->src_h, 301 __entry->alpha, __entry->pixel_blend_mode, __entry->rotation, 302 __entry->zpos, __entry->normalized_zpos, 303 __entry->color_encoding, __entry->color_range, 304 __entry->visible) 305 ); 306 307 DEFINE_EVENT(amdgpu_dm_plane_state_template, amdgpu_dm_plane_atomic_check, 308 TP_PROTO(const struct drm_plane_state *state), 309 TP_ARGS(state)); 310 311 DEFINE_EVENT(amdgpu_dm_plane_state_template, amdgpu_dm_atomic_update_cursor, 312 TP_PROTO(const struct drm_plane_state *state), 313 TP_ARGS(state)); 314 315 TRACE_EVENT(amdgpu_dm_atomic_state_template, 316 TP_PROTO(const struct drm_atomic_state *state), 317 TP_ARGS(state), 318 319 TP_STRUCT__entry( 320 __field(const struct drm_atomic_state *, state) 321 __field(bool, allow_modeset) 322 __field(bool, legacy_cursor_update) 323 __field(bool, async_update) 324 __field(bool, duplicated) 325 __field(int, num_connector) 326 __field(int, num_private_objs) 327 ), 328 329 TP_fast_assign( 330 __entry->state = state; 331 __entry->allow_modeset = state->allow_modeset; 332 __entry->legacy_cursor_update = state->legacy_cursor_update; 333 __entry->async_update = state->async_update; 334 __entry->duplicated = state->duplicated; 335 __entry->num_connector = state->num_connector; 336 __entry->num_private_objs = state->num_private_objs; 337 ), 338 339 TP_printk("state=%p allow_modeset=%d legacy_cursor_update=%d " 340 "async_update=%d duplicated=%d num_connector=%d " 341 "num_private_objs=%d", 342 __entry->state, __entry->allow_modeset, __entry->legacy_cursor_update, 343 __entry->async_update, __entry->duplicated, __entry->num_connector, 344 __entry->num_private_objs) 345 ); 346 347 DEFINE_EVENT(amdgpu_dm_atomic_state_template, amdgpu_dm_atomic_commit_tail_begin, 348 TP_PROTO(const struct drm_atomic_state *state), 349 TP_ARGS(state)); 350 351 DEFINE_EVENT(amdgpu_dm_atomic_state_template, amdgpu_dm_atomic_commit_tail_finish, 352 TP_PROTO(const struct drm_atomic_state *state), 353 TP_ARGS(state)); 354 355 DEFINE_EVENT(amdgpu_dm_atomic_state_template, amdgpu_dm_atomic_check_begin, 356 TP_PROTO(const struct drm_atomic_state *state), 357 TP_ARGS(state)); 358 359 TRACE_EVENT(amdgpu_dm_atomic_check_finish, 360 TP_PROTO(const struct drm_atomic_state *state, int res), 361 TP_ARGS(state, res), 362 363 TP_STRUCT__entry( 364 __field(const struct drm_atomic_state *, state) 365 __field(int, res) 366 __field(bool, async_update) 367 __field(bool, allow_modeset) 368 ), 369 370 TP_fast_assign( 371 __entry->state = state; 372 __entry->res = res; 373 __entry->async_update = state->async_update; 374 __entry->allow_modeset = state->allow_modeset; 375 ), 376 377 TP_printk("state=%p res=%d async_update=%d allow_modeset=%d", 378 __entry->state, __entry->res, 379 __entry->async_update, __entry->allow_modeset) 380 ); 381 382 TRACE_EVENT(amdgpu_dm_dc_pipe_state, 383 TP_PROTO(int pipe_idx, const struct dc_plane_state *plane_state, 384 const struct dc_stream_state *stream, 385 const struct plane_resource *plane_res, 386 int update_flags), 387 TP_ARGS(pipe_idx, plane_state, stream, plane_res, update_flags), 388 389 TP_STRUCT__entry( 390 __field(int, pipe_idx) 391 __field(const void *, stream) 392 __field(int, stream_w) 393 __field(int, stream_h) 394 __field(int, dst_x) 395 __field(int, dst_y) 396 __field(int, dst_w) 397 __field(int, dst_h) 398 __field(int, src_x) 399 __field(int, src_y) 400 __field(int, src_w) 401 __field(int, src_h) 402 __field(int, clip_x) 403 __field(int, clip_y) 404 __field(int, clip_w) 405 __field(int, clip_h) 406 __field(int, recout_x) 407 __field(int, recout_y) 408 __field(int, recout_w) 409 __field(int, recout_h) 410 __field(int, viewport_x) 411 __field(int, viewport_y) 412 __field(int, viewport_w) 413 __field(int, viewport_h) 414 __field(int, flip_immediate) 415 __field(int, surface_pitch) 416 __field(int, format) 417 __field(int, swizzle) 418 __field(unsigned int, update_flags) 419 ), 420 421 TP_fast_assign( 422 __entry->pipe_idx = pipe_idx; 423 __entry->stream = stream; 424 __entry->stream_w = stream->timing.h_addressable; 425 __entry->stream_h = stream->timing.v_addressable; 426 __entry->dst_x = plane_state->dst_rect.x; 427 __entry->dst_y = plane_state->dst_rect.y; 428 __entry->dst_w = plane_state->dst_rect.width; 429 __entry->dst_h = plane_state->dst_rect.height; 430 __entry->src_x = plane_state->src_rect.x; 431 __entry->src_y = plane_state->src_rect.y; 432 __entry->src_w = plane_state->src_rect.width; 433 __entry->src_h = plane_state->src_rect.height; 434 __entry->clip_x = plane_state->clip_rect.x; 435 __entry->clip_y = plane_state->clip_rect.y; 436 __entry->clip_w = plane_state->clip_rect.width; 437 __entry->clip_h = plane_state->clip_rect.height; 438 __entry->recout_x = plane_res->scl_data.recout.x; 439 __entry->recout_y = plane_res->scl_data.recout.y; 440 __entry->recout_w = plane_res->scl_data.recout.width; 441 __entry->recout_h = plane_res->scl_data.recout.height; 442 __entry->viewport_x = plane_res->scl_data.viewport.x; 443 __entry->viewport_y = plane_res->scl_data.viewport.y; 444 __entry->viewport_w = plane_res->scl_data.viewport.width; 445 __entry->viewport_h = plane_res->scl_data.viewport.height; 446 __entry->flip_immediate = plane_state->flip_immediate; 447 __entry->surface_pitch = plane_state->plane_size.surface_pitch; 448 __entry->format = plane_state->format; 449 __entry->swizzle = plane_state->tiling_info.gfx9.swizzle; 450 __entry->update_flags = update_flags; 451 ), 452 TP_printk("pipe_idx=%d stream=%p rct(%d,%d) dst=(%d,%d,%d,%d) " 453 "src=(%d,%d,%d,%d) clip=(%d,%d,%d,%d) recout=(%d,%d,%d,%d) " 454 "viewport=(%d,%d,%d,%d) flip_immediate=%d pitch=%d " 455 "format=%d swizzle=%d update_flags=%x", 456 __entry->pipe_idx, 457 __entry->stream, 458 __entry->stream_w, 459 __entry->stream_h, 460 __entry->dst_x, 461 __entry->dst_y, 462 __entry->dst_w, 463 __entry->dst_h, 464 __entry->src_x, 465 __entry->src_y, 466 __entry->src_w, 467 __entry->src_h, 468 __entry->clip_x, 469 __entry->clip_y, 470 __entry->clip_w, 471 __entry->clip_h, 472 __entry->recout_x, 473 __entry->recout_y, 474 __entry->recout_w, 475 __entry->recout_h, 476 __entry->viewport_x, 477 __entry->viewport_y, 478 __entry->viewport_w, 479 __entry->viewport_h, 480 __entry->flip_immediate, 481 __entry->surface_pitch, 482 __entry->format, 483 __entry->swizzle, 484 __entry->update_flags 485 ) 486 ); 487 488 TRACE_EVENT(amdgpu_dm_dc_clocks_state, 489 TP_PROTO(const struct dc_clocks *clk), 490 TP_ARGS(clk), 491 492 TP_STRUCT__entry( 493 __field(int, dispclk_khz) 494 __field(int, dppclk_khz) 495 __field(int, disp_dpp_voltage_level_khz) 496 __field(int, dcfclk_khz) 497 __field(int, socclk_khz) 498 __field(int, dcfclk_deep_sleep_khz) 499 __field(int, fclk_khz) 500 __field(int, phyclk_khz) 501 __field(int, dramclk_khz) 502 __field(int, p_state_change_support) 503 __field(int, prev_p_state_change_support) 504 __field(int, pwr_state) 505 __field(int, dtm_level) 506 __field(int, max_supported_dppclk_khz) 507 __field(int, max_supported_dispclk_khz) 508 __field(int, bw_dppclk_khz) 509 __field(int, bw_dispclk_khz) 510 ), 511 TP_fast_assign( 512 __entry->dispclk_khz = clk->dispclk_khz; 513 __entry->dppclk_khz = clk->dppclk_khz; 514 __entry->dcfclk_khz = clk->dcfclk_khz; 515 __entry->socclk_khz = clk->socclk_khz; 516 __entry->dcfclk_deep_sleep_khz = clk->dcfclk_deep_sleep_khz; 517 __entry->fclk_khz = clk->fclk_khz; 518 __entry->phyclk_khz = clk->phyclk_khz; 519 __entry->dramclk_khz = clk->dramclk_khz; 520 __entry->p_state_change_support = clk->p_state_change_support; 521 __entry->prev_p_state_change_support = clk->prev_p_state_change_support; 522 __entry->pwr_state = clk->pwr_state; 523 __entry->prev_p_state_change_support = clk->prev_p_state_change_support; 524 __entry->dtm_level = clk->dtm_level; 525 __entry->max_supported_dppclk_khz = clk->max_supported_dppclk_khz; 526 __entry->max_supported_dispclk_khz = clk->max_supported_dispclk_khz; 527 __entry->bw_dppclk_khz = clk->bw_dppclk_khz; 528 __entry->bw_dispclk_khz = clk->bw_dispclk_khz; 529 ), 530 TP_printk("dispclk_khz=%d dppclk_khz=%d disp_dpp_voltage_level_khz=%d dcfclk_khz=%d socclk_khz=%d " 531 "dcfclk_deep_sleep_khz=%d fclk_khz=%d phyclk_khz=%d " 532 "dramclk_khz=%d p_state_change_support=%d " 533 "prev_p_state_change_support=%d pwr_state=%d prev_p_state_change_support=%d " 534 "dtm_level=%d max_supported_dppclk_khz=%d max_supported_dispclk_khz=%d " 535 "bw_dppclk_khz=%d bw_dispclk_khz=%d ", 536 __entry->dispclk_khz, 537 __entry->dppclk_khz, 538 __entry->disp_dpp_voltage_level_khz, 539 __entry->dcfclk_khz, 540 __entry->socclk_khz, 541 __entry->dcfclk_deep_sleep_khz, 542 __entry->fclk_khz, 543 __entry->phyclk_khz, 544 __entry->dramclk_khz, 545 __entry->p_state_change_support, 546 __entry->prev_p_state_change_support, 547 __entry->pwr_state, 548 __entry->prev_p_state_change_support, 549 __entry->dtm_level, 550 __entry->max_supported_dppclk_khz, 551 __entry->max_supported_dispclk_khz, 552 __entry->bw_dppclk_khz, 553 __entry->bw_dispclk_khz 554 ) 555 ); 556 557 TRACE_EVENT(amdgpu_dm_dce_clocks_state, 558 TP_PROTO(const struct dce_bw_output *clk), 559 TP_ARGS(clk), 560 561 TP_STRUCT__entry( 562 __field(bool, cpuc_state_change_enable) 563 __field(bool, cpup_state_change_enable) 564 __field(bool, stutter_mode_enable) 565 __field(bool, nbp_state_change_enable) 566 __field(bool, all_displays_in_sync) 567 __field(int, sclk_khz) 568 __field(int, sclk_deep_sleep_khz) 569 __field(int, yclk_khz) 570 __field(int, dispclk_khz) 571 __field(int, blackout_recovery_time_us) 572 ), 573 TP_fast_assign( 574 __entry->cpuc_state_change_enable = clk->cpuc_state_change_enable; 575 __entry->cpup_state_change_enable = clk->cpup_state_change_enable; 576 __entry->stutter_mode_enable = clk->stutter_mode_enable; 577 __entry->nbp_state_change_enable = clk->nbp_state_change_enable; 578 __entry->all_displays_in_sync = clk->all_displays_in_sync; 579 __entry->sclk_khz = clk->sclk_khz; 580 __entry->sclk_deep_sleep_khz = clk->sclk_deep_sleep_khz; 581 __entry->yclk_khz = clk->yclk_khz; 582 __entry->dispclk_khz = clk->dispclk_khz; 583 __entry->blackout_recovery_time_us = clk->blackout_recovery_time_us; 584 ), 585 TP_printk("cpuc_state_change_enable=%d cpup_state_change_enable=%d stutter_mode_enable=%d " 586 "nbp_state_change_enable=%d all_displays_in_sync=%d sclk_khz=%d sclk_deep_sleep_khz=%d " 587 "yclk_khz=%d dispclk_khz=%d blackout_recovery_time_us=%d", 588 __entry->cpuc_state_change_enable, 589 __entry->cpup_state_change_enable, 590 __entry->stutter_mode_enable, 591 __entry->nbp_state_change_enable, 592 __entry->all_displays_in_sync, 593 __entry->sclk_khz, 594 __entry->sclk_deep_sleep_khz, 595 __entry->yclk_khz, 596 __entry->dispclk_khz, 597 __entry->blackout_recovery_time_us 598 ) 599 ); 600 601 TRACE_EVENT(amdgpu_dmub_trace_high_irq, 602 TP_PROTO(uint32_t trace_code, uint32_t tick_count, uint32_t param0, 603 uint32_t param1), 604 TP_ARGS(trace_code, tick_count, param0, param1), 605 TP_STRUCT__entry( 606 __field(uint32_t, trace_code) 607 __field(uint32_t, tick_count) 608 __field(uint32_t, param0) 609 __field(uint32_t, param1) 610 ), 611 TP_fast_assign( 612 __entry->trace_code = trace_code; 613 __entry->tick_count = tick_count; 614 __entry->param0 = param0; 615 __entry->param1 = param1; 616 ), 617 TP_printk("trace_code=%u tick_count=%u param0=%u param1=%u", 618 __entry->trace_code, __entry->tick_count, 619 __entry->param0, __entry->param1) 620 ); 621 622 TRACE_EVENT(amdgpu_refresh_rate_track, 623 TP_PROTO(int crtc_index, ktime_t refresh_rate_ns, uint32_t refresh_rate_hz), 624 TP_ARGS(crtc_index, refresh_rate_ns, refresh_rate_hz), 625 TP_STRUCT__entry( 626 __field(int, crtc_index) 627 __field(ktime_t, refresh_rate_ns) 628 __field(uint32_t, refresh_rate_hz) 629 ), 630 TP_fast_assign( 631 __entry->crtc_index = crtc_index; 632 __entry->refresh_rate_ns = refresh_rate_ns; 633 __entry->refresh_rate_hz = refresh_rate_hz; 634 ), 635 TP_printk("crtc_index=%d refresh_rate=%dHz (%lld)", 636 __entry->crtc_index, 637 __entry->refresh_rate_hz, 638 __entry->refresh_rate_ns) 639 ); 640 641 TRACE_EVENT(dcn_fpu, 642 TP_PROTO(bool begin, const char *function, const int line, const int recursion_depth), 643 TP_ARGS(begin, function, line, recursion_depth), 644 645 TP_STRUCT__entry( 646 __field(bool, begin) 647 __field(const char *, function) 648 __field(int, line) 649 __field(int, recursion_depth) 650 ), 651 TP_fast_assign( 652 __entry->begin = begin; 653 __entry->function = function; 654 __entry->line = line; 655 __entry->recursion_depth = recursion_depth; 656 ), 657 TP_printk("%s: recursion_depth: %d: %s()+%d:", 658 __entry->begin ? "begin" : "end", 659 __entry->recursion_depth, 660 __entry->function, 661 __entry->line 662 ) 663 ); 664 665 #endif /* _AMDGPU_DM_TRACE_H_ */ 666 667 #undef TRACE_INCLUDE_PATH 668 #define TRACE_INCLUDE_PATH . 669 #define TRACE_INCLUDE_FILE amdgpu_dm_trace 670 #include <trace/define_trace.h> 671