Lines Matching +full:mc +full:- +full:sid

43 #include "sid.h"
1230 switch (rdev->family) { in si_init_golden_registers()
1298 * si_get_allowed_info_register - fetch the register for the info ioctl
1304 * Returns 0 for success or -EINVAL for an invalid register
1323 return -EINVAL; in si_get_allowed_info_register()
1331 * si_get_xclk - get the xclk
1340 u32 reference_clock = rdev->clock.spll.reference_freq; in si_get_xclk()
1580 if (!rdev->mc_fw) in si_mc_load_microcode()
1581 return -EINVAL; in si_mc_load_microcode()
1583 if (rdev->new_fw) { in si_mc_load_microcode()
1585 (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data; in si_mc_load_microcode()
1587 radeon_ucode_print_mc_hdr(&hdr->header); in si_mc_load_microcode()
1588 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); in si_mc_load_microcode()
1590 (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); in si_mc_load_microcode()
1591 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; in si_mc_load_microcode()
1593 (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in si_mc_load_microcode()
1595 ucode_size = rdev->mc_fw->size / 4; in si_mc_load_microcode()
1597 switch (rdev->family) { in si_mc_load_microcode()
1620 fw_data = (const __be32 *)rdev->mc_fw->data; in si_mc_load_microcode()
1630 /* load mc io regs */ in si_mc_load_microcode()
1632 if (rdev->new_fw) { in si_mc_load_microcode()
1640 /* load the MC ucode */ in si_mc_load_microcode()
1642 if (rdev->new_fw) in si_mc_load_microcode()
1654 for (i = 0; i < rdev->usec_timeout; i++) { in si_mc_load_microcode()
1659 for (i = 0; i < rdev->usec_timeout; i++) { in si_mc_load_microcode()
1684 switch (rdev->family) { in si_init_microcode()
1698 if ((rdev->pdev->revision == 0x81) && in si_init_microcode()
1699 ((rdev->pdev->device == 0x6810) || in si_init_microcode()
1700 (rdev->pdev->device == 0x6811))) in si_init_microcode()
1713 if (((rdev->pdev->device == 0x6820) && in si_init_microcode()
1714 ((rdev->pdev->revision == 0x81) || in si_init_microcode()
1715 (rdev->pdev->revision == 0x83))) || in si_init_microcode()
1716 ((rdev->pdev->device == 0x6821) && in si_init_microcode()
1717 ((rdev->pdev->revision == 0x83) || in si_init_microcode()
1718 (rdev->pdev->revision == 0x87))) || in si_init_microcode()
1719 ((rdev->pdev->revision == 0x87) && in si_init_microcode()
1720 ((rdev->pdev->device == 0x6823) || in si_init_microcode()
1721 (rdev->pdev->device == 0x682b)))) in si_init_microcode()
1734 if (((rdev->pdev->revision == 0x81) && in si_init_microcode()
1735 ((rdev->pdev->device == 0x6600) || in si_init_microcode()
1736 (rdev->pdev->device == 0x6604) || in si_init_microcode()
1737 (rdev->pdev->device == 0x6605) || in si_init_microcode()
1738 (rdev->pdev->device == 0x6610))) || in si_init_microcode()
1739 ((rdev->pdev->revision == 0x83) && in si_init_microcode()
1740 (rdev->pdev->device == 0x6610))) in si_init_microcode()
1752 if (((rdev->pdev->revision == 0x81) && in si_init_microcode()
1753 (rdev->pdev->device == 0x6660)) || in si_init_microcode()
1754 ((rdev->pdev->revision == 0x83) && in si_init_microcode()
1755 ((rdev->pdev->device == 0x6660) || in si_init_microcode()
1756 (rdev->pdev->device == 0x6663) || in si_init_microcode()
1757 (rdev->pdev->device == 0x6665) || in si_init_microcode()
1758 (rdev->pdev->device == 0x6667)))) in si_init_microcode()
1760 else if ((rdev->pdev->revision == 0xc3) && in si_init_microcode()
1761 (rdev->pdev->device == 0x6665)) in si_init_microcode()
1781 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); in si_init_microcode()
1784 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); in si_init_microcode()
1787 if (rdev->pfp_fw->size != pfp_req_size) { in si_init_microcode()
1789 rdev->pfp_fw->size, fw_name); in si_init_microcode()
1790 err = -EINVAL; in si_init_microcode()
1794 err = radeon_ucode_validate(rdev->pfp_fw); in si_init_microcode()
1805 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); in si_init_microcode()
1808 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); in si_init_microcode()
1811 if (rdev->me_fw->size != me_req_size) { in si_init_microcode()
1813 rdev->me_fw->size, fw_name); in si_init_microcode()
1814 err = -EINVAL; in si_init_microcode()
1817 err = radeon_ucode_validate(rdev->me_fw); in si_init_microcode()
1828 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); in si_init_microcode()
1831 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); in si_init_microcode()
1834 if (rdev->ce_fw->size != ce_req_size) { in si_init_microcode()
1836 rdev->ce_fw->size, fw_name); in si_init_microcode()
1837 err = -EINVAL; in si_init_microcode()
1840 err = radeon_ucode_validate(rdev->ce_fw); in si_init_microcode()
1851 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); in si_init_microcode()
1854 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); in si_init_microcode()
1857 if (rdev->rlc_fw->size != rlc_req_size) { in si_init_microcode()
1859 rdev->rlc_fw->size, fw_name); in si_init_microcode()
1860 err = -EINVAL; in si_init_microcode()
1863 err = radeon_ucode_validate(rdev->rlc_fw); in si_init_microcode()
1877 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); in si_init_microcode()
1880 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); in si_init_microcode()
1883 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); in si_init_microcode()
1887 if ((rdev->mc_fw->size != mc_req_size) && in si_init_microcode()
1888 (rdev->mc_fw->size != mc2_req_size)) { in si_init_microcode()
1890 rdev->mc_fw->size, fw_name); in si_init_microcode()
1891 err = -EINVAL; in si_init_microcode()
1893 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); in si_init_microcode()
1895 err = radeon_ucode_validate(rdev->mc_fw); in si_init_microcode()
1911 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); in si_init_microcode()
1914 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); in si_init_microcode()
1917 release_firmware(rdev->smc_fw); in si_init_microcode()
1918 rdev->smc_fw = NULL; in si_init_microcode()
1920 } else if (rdev->smc_fw->size != smc_req_size) { in si_init_microcode()
1922 rdev->smc_fw->size, fw_name); in si_init_microcode()
1923 err = -EINVAL; in si_init_microcode()
1926 err = radeon_ucode_validate(rdev->smc_fw); in si_init_microcode()
1937 rdev->new_fw = false; in si_init_microcode()
1940 err = -EINVAL; in si_init_microcode()
1942 rdev->new_fw = true; in si_init_microcode()
1946 if (err != -EINVAL) in si_init_microcode()
1949 release_firmware(rdev->pfp_fw); in si_init_microcode()
1950 rdev->pfp_fw = NULL; in si_init_microcode()
1951 release_firmware(rdev->me_fw); in si_init_microcode()
1952 rdev->me_fw = NULL; in si_init_microcode()
1953 release_firmware(rdev->ce_fw); in si_init_microcode()
1954 rdev->ce_fw = NULL; in si_init_microcode()
1955 release_firmware(rdev->rlc_fw); in si_init_microcode()
1956 rdev->rlc_fw = NULL; in si_init_microcode()
1957 release_firmware(rdev->mc_fw); in si_init_microcode()
1958 rdev->mc_fw = NULL; in si_init_microcode()
1959 release_firmware(rdev->smc_fw); in si_init_microcode()
1960 rdev->smc_fw = NULL; in si_init_microcode()
1972 u32 pipe_offset = radeon_crtc->crtc_id * 0x20; in dce6_line_buffer_adjust()
1979 * 0 - half lb in dce6_line_buffer_adjust()
1980 * 2 - whole lb, other crtc must be disabled in dce6_line_buffer_adjust()
1984 * non-linked crtcs for maximum line buffer allocation. in dce6_line_buffer_adjust()
1986 if (radeon_crtc->base.enabled && mode) { in dce6_line_buffer_adjust()
1999 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, in dce6_line_buffer_adjust()
2004 for (i = 0; i < rdev->usec_timeout; i++) { in dce6_line_buffer_adjust()
2011 if (radeon_crtc->base.enabled && mode) { in dce6_line_buffer_adjust()
2076 yclk.full = dfixed_const(wm->yclk); in dce6_dram_bandwidth()
2078 dram_channels.full = dfixed_const(wm->dram_channels * 4); in dce6_dram_bandwidth()
2096 yclk.full = dfixed_const(wm->yclk); in dce6_dram_bandwidth_for_display()
2098 dram_channels.full = dfixed_const(wm->dram_channels * 4); in dce6_dram_bandwidth_for_display()
2116 sclk.full = dfixed_const(wm->sclk); in dce6_data_return_bandwidth()
2142 disp_clk.full = dfixed_const(wm->disp_clk); in dce6_dmif_request_bandwidth()
2148 sclk.full = dfixed_const(wm->sclk); in dce6_dmif_request_bandwidth()
2188 line_time.full = dfixed_const(wm->active_time + wm->blank_time); in dce6_average_bandwidth()
2190 bpp.full = dfixed_const(wm->bytes_per_pixel); in dce6_average_bandwidth()
2191 src_width.full = dfixed_const(wm->src_width); in dce6_average_bandwidth()
2193 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); in dce6_average_bandwidth()
2206 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ in dce6_latency_watermark()
2207 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + in dce6_latency_watermark()
2208 (wm->num_heads * cursor_line_pair_return_time); in dce6_latency_watermark()
2214 if (wm->num_heads == 0) in dce6_latency_watermark()
2219 if ((wm->vsc.full > a.full) || in dce6_latency_watermark()
2220 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || in dce6_latency_watermark()
2221 (wm->vtaps >= 5) || in dce6_latency_watermark()
2222 ((wm->vsc.full >= a.full) && wm->interlaced)) in dce6_latency_watermark()
2228 b.full = dfixed_const(wm->num_heads); in dce6_latency_watermark()
2230 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); in dce6_latency_watermark()
2233 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); in dce6_latency_watermark()
2235 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); in dce6_latency_watermark()
2242 if (line_fill_time < wm->active_time) in dce6_latency_watermark()
2245 return latency + (line_fill_time - wm->active_time); in dce6_latency_watermark()
2252 (dce6_dram_bandwidth_for_display(wm) / wm->num_heads)) in dce6_average_bandwidth_vs_dram_bandwidth_for_display()
2261 (dce6_available_bandwidth(wm) / wm->num_heads)) in dce6_average_bandwidth_vs_available_bandwidth()
2269 u32 lb_partitions = wm->lb_size / wm->src_width; in dce6_check_latency_hiding()
2270 u32 line_time = wm->active_time + wm->blank_time; in dce6_check_latency_hiding()
2276 if (wm->vsc.full > a.full) in dce6_check_latency_hiding()
2279 if (lb_partitions <= (wm->vtaps + 1)) in dce6_check_latency_hiding()
2285 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); in dce6_check_latency_hiding()
2297 struct drm_display_mode *mode = &radeon_crtc->base.mode; in dce6_program_watermarks()
2309 if (radeon_crtc->base.enabled && num_heads && mode) { in dce6_program_watermarks()
2310 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, in dce6_program_watermarks()
2311 (u32)mode->clock); in dce6_program_watermarks()
2312 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, in dce6_program_watermarks()
2313 (u32)mode->clock); in dce6_program_watermarks()
2318 if (rdev->family == CHIP_ARUBA) in dce6_program_watermarks()
2324 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { in dce6_program_watermarks()
2330 wm_high.yclk = rdev->pm.current_mclk * 10; in dce6_program_watermarks()
2331 wm_high.sclk = rdev->pm.current_sclk * 10; in dce6_program_watermarks()
2334 wm_high.disp_clk = mode->clock; in dce6_program_watermarks()
2335 wm_high.src_width = mode->crtc_hdisplay; in dce6_program_watermarks()
2337 wm_high.blank_time = line_time - wm_high.active_time; in dce6_program_watermarks()
2339 if (mode->flags & DRM_MODE_FLAG_INTERLACE) in dce6_program_watermarks()
2341 wm_high.vsc = radeon_crtc->vsc; in dce6_program_watermarks()
2343 if (radeon_crtc->rmx_type != RMX_OFF) in dce6_program_watermarks()
2351 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { in dce6_program_watermarks()
2357 wm_low.yclk = rdev->pm.current_mclk * 10; in dce6_program_watermarks()
2358 wm_low.sclk = rdev->pm.current_sclk * 10; in dce6_program_watermarks()
2361 wm_low.disp_clk = mode->clock; in dce6_program_watermarks()
2362 wm_low.src_width = mode->crtc_hdisplay; in dce6_program_watermarks()
2364 wm_low.blank_time = line_time - wm_low.active_time; in dce6_program_watermarks()
2366 if (mode->flags & DRM_MODE_FLAG_INTERLACE) in dce6_program_watermarks()
2368 wm_low.vsc = radeon_crtc->vsc; in dce6_program_watermarks()
2370 if (radeon_crtc->rmx_type != RMX_OFF) in dce6_program_watermarks()
2387 (rdev->disp_priority == 2)) { in dce6_program_watermarks()
2395 (rdev->disp_priority == 2)) { in dce6_program_watermarks()
2402 b.full = dfixed_const(mode->clock); in dce6_program_watermarks()
2406 c.full = dfixed_mul(c, radeon_crtc->hsc); in dce6_program_watermarks()
2414 b.full = dfixed_const(mode->clock); in dce6_program_watermarks()
2418 c.full = dfixed_mul(c, radeon_crtc->hsc); in dce6_program_watermarks()
2426 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); in dce6_program_watermarks()
2430 arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); in dce6_program_watermarks()
2434 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); in dce6_program_watermarks()
2435 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, in dce6_program_watermarks()
2439 tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); in dce6_program_watermarks()
2442 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); in dce6_program_watermarks()
2443 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, in dce6_program_watermarks()
2447 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3); in dce6_program_watermarks()
2450 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); in dce6_program_watermarks()
2451 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); in dce6_program_watermarks()
2454 radeon_crtc->line_time = line_time; in dce6_program_watermarks()
2455 radeon_crtc->wm_high = latency_watermark_a; in dce6_program_watermarks()
2456 radeon_crtc->wm_low = latency_watermark_b; in dce6_program_watermarks()
2466 if (!rdev->mode_info.mode_config_initialized) in dce6_bandwidth_update()
2471 for (i = 0; i < rdev->num_crtc; i++) { in dce6_bandwidth_update()
2472 if (rdev->mode_info.crtcs[i]->base.enabled) in dce6_bandwidth_update()
2475 for (i = 0; i < rdev->num_crtc; i += 2) { in dce6_bandwidth_update()
2476 mode0 = &rdev->mode_info.crtcs[i]->base.mode; in dce6_bandwidth_update()
2477 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; in dce6_bandwidth_update()
2478 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); in dce6_bandwidth_update()
2479 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); in dce6_bandwidth_update()
2480 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); in dce6_bandwidth_update()
2481 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); in dce6_bandwidth_update()
2490 u32 *tile = rdev->config.si.tile_mode_array; in si_tiling_mode_table_init()
2492 ARRAY_SIZE(rdev->config.si.tile_mode_array); in si_tiling_mode_table_init()
2495 switch (rdev->config.si.mem_row_size_in_kb) { in si_tiling_mode_table_init()
2511 switch(rdev->family) { in si_tiling_mode_table_init()
2514 /* non-AA compressed depth or any compressed stencil */ in si_tiling_mode_table_init()
2550 /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ in si_tiling_mode_table_init()
2559 /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ in si_tiling_mode_table_init()
2568 /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ in si_tiling_mode_table_init()
2729 /* non-AA compressed depth or any compressed stencil */ in si_tiling_mode_table_init()
2765 /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ in si_tiling_mode_table_init()
2774 /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ in si_tiling_mode_table_init()
2783 /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ in si_tiling_mode_table_init()
2942 DRM_ERROR("unknown asic: 0x%x\n", rdev->family); in si_tiling_mode_table_init()
3063 rdev->config.si.backend_enable_mask = enabled_rbs; in si_setup_rb()
3097 switch (rdev->family) { in si_gpu_init()
3099 rdev->config.si.max_shader_engines = 2; in si_gpu_init()
3100 rdev->config.si.max_tile_pipes = 12; in si_gpu_init()
3101 rdev->config.si.max_cu_per_sh = 8; in si_gpu_init()
3102 rdev->config.si.max_sh_per_se = 2; in si_gpu_init()
3103 rdev->config.si.max_backends_per_se = 4; in si_gpu_init()
3104 rdev->config.si.max_texture_channel_caches = 12; in si_gpu_init()
3105 rdev->config.si.max_gprs = 256; in si_gpu_init()
3106 rdev->config.si.max_gs_threads = 32; in si_gpu_init()
3107 rdev->config.si.max_hw_contexts = 8; in si_gpu_init()
3109 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; in si_gpu_init()
3110 rdev->config.si.sc_prim_fifo_size_backend = 0x100; in si_gpu_init()
3111 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; in si_gpu_init()
3112 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; in si_gpu_init()
3116 rdev->config.si.max_shader_engines = 2; in si_gpu_init()
3117 rdev->config.si.max_tile_pipes = 8; in si_gpu_init()
3118 rdev->config.si.max_cu_per_sh = 5; in si_gpu_init()
3119 rdev->config.si.max_sh_per_se = 2; in si_gpu_init()
3120 rdev->config.si.max_backends_per_se = 4; in si_gpu_init()
3121 rdev->config.si.max_texture_channel_caches = 8; in si_gpu_init()
3122 rdev->config.si.max_gprs = 256; in si_gpu_init()
3123 rdev->config.si.max_gs_threads = 32; in si_gpu_init()
3124 rdev->config.si.max_hw_contexts = 8; in si_gpu_init()
3126 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; in si_gpu_init()
3127 rdev->config.si.sc_prim_fifo_size_backend = 0x100; in si_gpu_init()
3128 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; in si_gpu_init()
3129 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; in si_gpu_init()
3134 rdev->config.si.max_shader_engines = 1; in si_gpu_init()
3135 rdev->config.si.max_tile_pipes = 4; in si_gpu_init()
3136 rdev->config.si.max_cu_per_sh = 5; in si_gpu_init()
3137 rdev->config.si.max_sh_per_se = 2; in si_gpu_init()
3138 rdev->config.si.max_backends_per_se = 4; in si_gpu_init()
3139 rdev->config.si.max_texture_channel_caches = 4; in si_gpu_init()
3140 rdev->config.si.max_gprs = 256; in si_gpu_init()
3141 rdev->config.si.max_gs_threads = 32; in si_gpu_init()
3142 rdev->config.si.max_hw_contexts = 8; in si_gpu_init()
3144 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; in si_gpu_init()
3145 rdev->config.si.sc_prim_fifo_size_backend = 0x40; in si_gpu_init()
3146 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; in si_gpu_init()
3147 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; in si_gpu_init()
3151 rdev->config.si.max_shader_engines = 1; in si_gpu_init()
3152 rdev->config.si.max_tile_pipes = 4; in si_gpu_init()
3153 rdev->config.si.max_cu_per_sh = 6; in si_gpu_init()
3154 rdev->config.si.max_sh_per_se = 1; in si_gpu_init()
3155 rdev->config.si.max_backends_per_se = 2; in si_gpu_init()
3156 rdev->config.si.max_texture_channel_caches = 4; in si_gpu_init()
3157 rdev->config.si.max_gprs = 256; in si_gpu_init()
3158 rdev->config.si.max_gs_threads = 16; in si_gpu_init()
3159 rdev->config.si.max_hw_contexts = 8; in si_gpu_init()
3161 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; in si_gpu_init()
3162 rdev->config.si.sc_prim_fifo_size_backend = 0x40; in si_gpu_init()
3163 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; in si_gpu_init()
3164 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; in si_gpu_init()
3168 rdev->config.si.max_shader_engines = 1; in si_gpu_init()
3169 rdev->config.si.max_tile_pipes = 4; in si_gpu_init()
3170 rdev->config.si.max_cu_per_sh = 5; in si_gpu_init()
3171 rdev->config.si.max_sh_per_se = 1; in si_gpu_init()
3172 rdev->config.si.max_backends_per_se = 1; in si_gpu_init()
3173 rdev->config.si.max_texture_channel_caches = 2; in si_gpu_init()
3174 rdev->config.si.max_gprs = 256; in si_gpu_init()
3175 rdev->config.si.max_gs_threads = 16; in si_gpu_init()
3176 rdev->config.si.max_hw_contexts = 8; in si_gpu_init()
3178 rdev->config.si.sc_prim_fifo_size_frontend = 0x20; in si_gpu_init()
3179 rdev->config.si.sc_prim_fifo_size_backend = 0x40; in si_gpu_init()
3180 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; in si_gpu_init()
3181 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; in si_gpu_init()
3206 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; in si_gpu_init()
3207 rdev->config.si.mem_max_burst_length_bytes = 256; in si_gpu_init()
3209 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; in si_gpu_init()
3210 if (rdev->config.si.mem_row_size_in_kb > 4) in si_gpu_init()
3211 rdev->config.si.mem_row_size_in_kb = 4; in si_gpu_init()
3212 /* XXX use MC settings? */ in si_gpu_init()
3213 rdev->config.si.shader_engine_tile_size = 32; in si_gpu_init()
3214 rdev->config.si.num_gpus = 1; in si_gpu_init()
3215 rdev->config.si.multi_gpu_tile_size = 64; in si_gpu_init()
3219 switch (rdev->config.si.mem_row_size_in_kb) { in si_gpu_init()
3239 rdev->config.si.tile_config = 0; in si_gpu_init()
3240 switch (rdev->config.si.num_tile_pipes) { in si_gpu_init()
3242 rdev->config.si.tile_config |= (0 << 0); in si_gpu_init()
3245 rdev->config.si.tile_config |= (1 << 0); in si_gpu_init()
3248 rdev->config.si.tile_config |= (2 << 0); in si_gpu_init()
3253 rdev->config.si.tile_config |= (3 << 0); in si_gpu_init()
3258 rdev->config.si.tile_config |= 0 << 4; in si_gpu_init()
3261 rdev->config.si.tile_config |= 1 << 4; in si_gpu_init()
3265 rdev->config.si.tile_config |= 2 << 4; in si_gpu_init()
3268 rdev->config.si.tile_config |= in si_gpu_init()
3270 rdev->config.si.tile_config |= in si_gpu_init()
3279 if (rdev->has_uvd) { in si_gpu_init()
3287 si_setup_rb(rdev, rdev->config.si.max_shader_engines, in si_gpu_init()
3288 rdev->config.si.max_sh_per_se, in si_gpu_init()
3289 rdev->config.si.max_backends_per_se); in si_gpu_init()
3291 si_setup_spi(rdev, rdev->config.si.max_shader_engines, in si_gpu_init()
3292 rdev->config.si.max_sh_per_se, in si_gpu_init()
3293 rdev->config.si.max_cu_per_sh); in si_gpu_init()
3295 rdev->config.si.active_cus = 0; in si_gpu_init()
3296 for (i = 0; i < rdev->config.si.max_shader_engines; i++) { in si_gpu_init()
3297 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { in si_gpu_init()
3298 rdev->config.si.active_cus += in si_gpu_init()
3313 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) | in si_gpu_init()
3314 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) | in si_gpu_init()
3315 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) | in si_gpu_init()
3316 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size))); in si_gpu_init()
3361 rdev->scratch.num_reg = 7; in si_scratch_init()
3362 rdev->scratch.reg_base = SCRATCH_REG0; in si_scratch_init()
3363 for (i = 0; i < rdev->scratch.num_reg; i++) { in si_scratch_init()
3364 rdev->scratch.free[i] = true; in si_scratch_init()
3365 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); in si_scratch_init()
3372 struct radeon_ring *ring = &rdev->ring[fence->ring]; in si_fence_ring_emit()
3373 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in si_fence_ring_emit()
3377 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); in si_fence_ring_emit()
3387 /* EVENT_WRITE_EOP - flush caches, send int */ in si_fence_ring_emit()
3392 radeon_ring_write(ring, fence->seq); in si_fence_ring_emit()
3401 struct radeon_ring *ring = &rdev->ring[ib->ring]; in si_ring_ib_execute()
3402 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; in si_ring_ib_execute()
3405 if (ib->is_const_ib) { in si_ring_ib_execute()
3413 if (ring->rptr_save_reg) { in si_ring_ib_execute()
3414 next_rptr = ring->wptr + 3 + 4 + 8; in si_ring_ib_execute()
3416 radeon_ring_write(ring, ((ring->rptr_save_reg - in si_ring_ib_execute()
3419 } else if (rdev->wb.enabled) { in si_ring_ib_execute()
3420 next_rptr = ring->wptr + 5 + 4 + 8; in si_ring_ib_execute()
3423 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); in si_ring_ib_execute()
3424 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); in si_ring_ib_execute()
3436 (ib->gpu_addr & 0xFFFFFFFC)); in si_ring_ib_execute()
3437 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); in si_ring_ib_execute()
3438 radeon_ring_write(ring, ib->length_dw | (vm_id << 24)); in si_ring_ib_execute()
3440 if (!ib->is_const_ib) { in si_ring_ib_execute()
3443 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); in si_ring_ib_execute()
3464 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) in si_cp_enable()
3465 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); in si_cp_enable()
3468 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; in si_cp_enable()
3469 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; in si_cp_enable()
3470 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; in si_cp_enable()
3479 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw) in si_cp_load_microcode()
3480 return -EINVAL; in si_cp_load_microcode()
3484 if (rdev->new_fw) { in si_cp_load_microcode()
3486 (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data; in si_cp_load_microcode()
3488 (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data; in si_cp_load_microcode()
3490 (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data; in si_cp_load_microcode()
3494 radeon_ucode_print_gfx_hdr(&pfp_hdr->header); in si_cp_load_microcode()
3495 radeon_ucode_print_gfx_hdr(&ce_hdr->header); in si_cp_load_microcode()
3496 radeon_ucode_print_gfx_hdr(&me_hdr->header); in si_cp_load_microcode()
3500 (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); in si_cp_load_microcode()
3501 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; in si_cp_load_microcode()
3509 (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); in si_cp_load_microcode()
3510 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; in si_cp_load_microcode()
3518 (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); in si_cp_load_microcode()
3519 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; in si_cp_load_microcode()
3528 fw_data = (const __be32 *)rdev->pfp_fw->data; in si_cp_load_microcode()
3535 fw_data = (const __be32 *)rdev->ce_fw->data; in si_cp_load_microcode()
3542 fw_data = (const __be32 *)rdev->me_fw->data; in si_cp_load_microcode()
3558 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_cp_start()
3570 radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1); in si_cp_start()
3612 ring = &rdev->ring[i]; in si_cp_start()
3634 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_cp_fini()
3636 radeon_scratch_free(rdev, ring->rptr_save_reg); in si_cp_fini()
3638 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; in si_cp_fini()
3640 radeon_scratch_free(rdev, ring->rptr_save_reg); in si_cp_fini()
3642 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; in si_cp_fini()
3644 radeon_scratch_free(rdev, ring->rptr_save_reg); in si_cp_fini()
3663 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); in si_cp_resume()
3665 /* ring 0 - compute and gfx */ in si_cp_resume()
3667 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_cp_resume()
3668 rb_bufsz = order_base_2(ring->ring_size / 8); in si_cp_resume()
3677 ring->wptr = 0; in si_cp_resume()
3678 WREG32(CP_RB0_WPTR, ring->wptr); in si_cp_resume()
3681 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); in si_cp_resume()
3682 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); in si_cp_resume()
3684 if (rdev->wb.enabled) in si_cp_resume()
3694 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); in si_cp_resume()
3696 /* ring1 - compute only */ in si_cp_resume()
3698 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; in si_cp_resume()
3699 rb_bufsz = order_base_2(ring->ring_size / 8); in si_cp_resume()
3708 ring->wptr = 0; in si_cp_resume()
3709 WREG32(CP_RB1_WPTR, ring->wptr); in si_cp_resume()
3712 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); in si_cp_resume()
3713 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); in si_cp_resume()
3718 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); in si_cp_resume()
3720 /* ring2 - compute only */ in si_cp_resume()
3722 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; in si_cp_resume()
3723 rb_bufsz = order_base_2(ring->ring_size / 8); in si_cp_resume()
3732 ring->wptr = 0; in si_cp_resume()
3733 WREG32(CP_RB2_WPTR, ring->wptr); in si_cp_resume()
3736 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); in si_cp_resume()
3737 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); in si_cp_resume()
3742 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); in si_cp_resume()
3746 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; in si_cp_resume()
3747 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true; in si_cp_resume()
3748 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true; in si_cp_resume()
3749 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); in si_cp_resume()
3751 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; in si_cp_resume()
3752 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; in si_cp_resume()
3753 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; in si_cp_resume()
3756 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]); in si_cp_resume()
3758 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; in si_cp_resume()
3760 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]); in si_cp_resume()
3762 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; in si_cp_resume()
3767 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) in si_cp_resume()
3768 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); in si_cp_resume()
3845 /* Skip MC reset as it's mostly likely not hung, just busy */ in si_gpu_check_soft_reset()
3847 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); in si_gpu_check_soft_reset()
3863 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); in si_gpu_soft_reset()
3866 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", in si_gpu_soft_reset()
3868 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", in si_gpu_soft_reset()
3898 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); in si_gpu_soft_reset()
3952 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); in si_gpu_soft_reset()
3966 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); in si_gpu_soft_reset()
3998 for (i = 0; i < rdev->usec_timeout; i++) { in si_set_clk_bypass_mode()
4039 dev_info(rdev->dev, "GPU pci config reset\n"); in si_gpu_pci_config_reset()
4067 dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); in si_gpu_pci_config_reset()
4075 pci_clear_master(rdev->pdev); in si_gpu_pci_config_reset()
4079 for (i = 0; i < rdev->usec_timeout; i++) { in si_gpu_pci_config_reset()
4118 * si_gfx_is_lockup - Check if the GFX engine is locked up
4139 /* MC */
4158 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); in si_mc_program()
4165 rdev->mc.vram_start >> 12); in si_mc_program()
4167 rdev->mc.vram_end >> 12); in si_mc_program()
4169 rdev->vram_scratch.gpu_addr >> 12); in si_mc_program()
4170 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; in si_mc_program()
4171 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); in si_mc_program()
4174 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); in si_mc_program()
4181 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); in si_mc_program()
4192 struct radeon_mc *mc) in si_vram_gtt_location() argument
4194 if (mc->mc_vram_size > 0xFFC0000000ULL) { in si_vram_gtt_location()
4196 dev_warn(rdev->dev, "limiting VRAM\n"); in si_vram_gtt_location()
4197 mc->real_vram_size = 0xFFC0000000ULL; in si_vram_gtt_location()
4198 mc->mc_vram_size = 0xFFC0000000ULL; in si_vram_gtt_location()
4200 radeon_vram_location(rdev, &rdev->mc, 0); in si_vram_gtt_location()
4201 rdev->mc.gtt_base_align = 0; in si_vram_gtt_location()
4202 radeon_gtt_location(rdev, mc); in si_vram_gtt_location()
4211 rdev->mc.vram_is_ddr = true; in si_mc_init()
4251 rdev->mc.vram_width = numchan * chansize; in si_mc_init()
4253 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); in si_mc_init()
4254 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); in si_mc_init()
4263 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL; in si_mc_init()
4264 rdev->mc.real_vram_size = rdev->mc.mc_vram_size; in si_mc_init()
4265 rdev->mc.visible_vram_size = rdev->mc.aper_size; in si_mc_init()
4266 si_vram_gtt_location(rdev, &rdev->mc); in si_mc_init()
4280 /* bits 0-15 are the VM contexts0-15 */ in si_pcie_gart_tlb_flush()
4288 if (rdev->gart.robj == NULL) { in si_pcie_gart_enable()
4289 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); in si_pcie_gart_enable()
4290 return -EINVAL; in si_pcie_gart_enable()
4315 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); in si_pcie_gart_enable()
4316 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); in si_pcie_gart_enable()
4317 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); in si_pcie_gart_enable()
4319 (u32)(rdev->dummy_page.addr >> 12)); in si_pcie_gart_enable()
4328 /* empty context1-15 */ in si_pcie_gart_enable()
4331 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); in si_pcie_gart_enable()
4339 rdev->vm_manager.saved_table_addr[i]); in si_pcie_gart_enable()
4341 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), in si_pcie_gart_enable()
4342 rdev->vm_manager.saved_table_addr[i]); in si_pcie_gart_enable()
4345 /* enable context1-15 */ in si_pcie_gart_enable()
4347 (u32)(rdev->dummy_page.addr >> 12)); in si_pcie_gart_enable()
4350 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) | in si_pcie_gart_enable()
4366 (unsigned)(rdev->mc.gtt_size >> 20), in si_pcie_gart_enable()
4367 (unsigned long long)rdev->gart.table_addr); in si_pcie_gart_enable()
4368 rdev->gart.ready = true; in si_pcie_gart_enable()
4381 reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2); in si_pcie_gart_disable()
4382 rdev->vm_manager.saved_table_addr[i] = RREG32(reg); in si_pcie_gart_disable()
4459 switch (pkt->opcode) { in si_vm_packet3_ce_check()
4472 DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode); in si_vm_packet3_ce_check()
4473 return -EINVAL; in si_vm_packet3_ce_check()
4492 return -EINVAL; in si_vm_packet3_cp_dma_check()
4499 return -EINVAL; in si_vm_packet3_cp_dma_check()
4513 return -EINVAL; in si_vm_packet3_cp_dma_check()
4520 return -EINVAL; in si_vm_packet3_cp_dma_check()
4533 u32 idx = pkt->idx + 1; in si_vm_packet3_gfx_check()
4537 switch (pkt->opcode) { in si_vm_packet3_gfx_check()
4588 return -EINVAL; in si_vm_packet3_gfx_check()
4596 return -EINVAL; in si_vm_packet3_gfx_check()
4598 for (i = 0; i < (pkt->count - 2); i++) { in si_vm_packet3_gfx_check()
4601 return -EINVAL; in si_vm_packet3_gfx_check()
4610 return -EINVAL; in si_vm_packet3_gfx_check()
4617 return -EINVAL; in si_vm_packet3_gfx_check()
4622 end_reg = 4 * pkt->count + start_reg - 4; in si_vm_packet3_gfx_check()
4627 return -EINVAL; in si_vm_packet3_gfx_check()
4629 for (i = 0; i < pkt->count; i++) { in si_vm_packet3_gfx_check()
4632 return -EINVAL; in si_vm_packet3_gfx_check()
4641 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); in si_vm_packet3_gfx_check()
4642 return -EINVAL; in si_vm_packet3_gfx_check()
4651 u32 idx = pkt->idx + 1; in si_vm_packet3_compute_check()
4655 switch (pkt->opcode) { in si_vm_packet3_compute_check()
4691 return -EINVAL; in si_vm_packet3_compute_check()
4699 return -EINVAL; in si_vm_packet3_compute_check()
4701 for (i = 0; i < (pkt->count - 2); i++) { in si_vm_packet3_compute_check()
4704 return -EINVAL; in si_vm_packet3_compute_check()
4713 return -EINVAL; in si_vm_packet3_compute_check()
4720 return -EINVAL; in si_vm_packet3_compute_check()
4729 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode); in si_vm_packet3_compute_check()
4730 return -EINVAL; in si_vm_packet3_compute_check()
4743 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]); in si_ib_parse()
4744 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]); in si_ib_parse()
4748 dev_err(rdev->dev, "Packet0 not allowed!\n"); in si_ib_parse()
4749 ret = -EINVAL; in si_ib_parse()
4755 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]); in si_ib_parse()
4756 if (ib->is_const_ib) in si_ib_parse()
4757 ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt); in si_ib_parse()
4759 switch (ib->ring) { in si_ib_parse()
4761 ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt); in si_ib_parse()
4765 ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt); in si_ib_parse()
4768 dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring); in si_ib_parse()
4769 ret = -EINVAL; in si_ib_parse()
4776 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type); in si_ib_parse()
4777 ret = -EINVAL; in si_ib_parse()
4781 for (i = 0; i < ib->length_dw; i++) { in si_ib_parse()
4783 printk("\t0x%08x <---\n", ib->ptr[i]); in si_ib_parse()
4785 printk("\t0x%08x\n", ib->ptr[i]); in si_ib_parse()
4789 } while (idx < ib->length_dw); in si_ib_parse()
4800 rdev->vm_manager.nvm = 16; in si_vm_init()
4802 rdev->vm_manager.vram_base_offset = 0; in si_vm_init()
4812 * si_vm_decode_fault - print human readable fault info
4828 if (rdev->family == CHIP_TAHITI) { in si_vm_decode_fault()
5088 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2); in si_vm_flush()
5101 /* bits 0-15 are the VM contexts0-15 */ in si_vm_flush()
5131 for (i = 0; i < rdev->usec_timeout; i++) { in si_wait_for_rlc_serdes()
5137 for (i = 0; i < rdev->usec_timeout; i++) { in si_wait_for_rlc_serdes()
5162 for (i = 0; i < rdev->usec_timeout; i++) { in si_enable_gui_idle_interrupt()
5234 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA)) in si_enable_dma_pg()
5258 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { in si_enable_gfx_cgpg()
5282 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); in si_init_gfx_cgpg()
5288 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); in si_init_gfx_cgpg()
5313 for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) { in si_get_cu_active_bitmap()
5327 for (i = 0; i < rdev->config.si.max_shader_engines; i++) { in si_init_ao_cu_mask()
5328 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { in si_init_ao_cu_mask()
5332 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { in si_init_ao_cu_mask()
5361 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) { in si_enable_cgcg()
5399 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) { in si_enable_mgcg()
5405 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) { in si_enable_mgcg()
5455 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) { in si_enable_uvd_mgcg()
5503 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS)) in si_enable_mc_ls()
5520 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG)) in si_enable_mc_mgcg()
5535 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) { in si_enable_dma_mgcg()
5573 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS)) in si_enable_bif_mgls()
5591 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG)) in si_enable_hdp_mgcg()
5607 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS)) in si_enable_hdp_ls()
5646 if (rdev->has_uvd) { in si_update_cg()
5664 if (rdev->has_uvd) { in si_init_cg()
5672 if (rdev->has_uvd) { in si_fini_cg()
5688 if (rdev->rlc.cs_data == NULL) in si_get_csb_size()
5696 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { in si_get_csb_size()
5697 for (ext = sect->section; ext->extent != NULL; ++ext) { in si_get_csb_size()
5698 if (sect->id == SECT_CONTEXT) in si_get_csb_size()
5699 count += 2 + ext->reg_count; in si_get_csb_size()
5720 if (rdev->rlc.cs_data == NULL) in si_get_csb_buffer()
5732 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { in si_get_csb_buffer()
5733 for (ext = sect->section; ext->extent != NULL; ++ext) { in si_get_csb_buffer()
5734 if (sect->id == SECT_CONTEXT) { in si_get_csb_buffer()
5736 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); in si_get_csb_buffer()
5737 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000); in si_get_csb_buffer()
5738 for (i = 0; i < ext->reg_count; i++) in si_get_csb_buffer()
5739 buffer[count++] = cpu_to_le32(ext->extent[i]); in si_get_csb_buffer()
5747 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); in si_get_csb_buffer()
5748 switch (rdev->family) { in si_get_csb_buffer()
5776 if (rdev->pg_flags) { in si_init_pg()
5777 if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) { in si_init_pg()
5781 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { in si_init_pg()
5784 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); in si_init_pg()
5785 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); in si_init_pg()
5790 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); in si_init_pg()
5791 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); in si_init_pg()
5797 if (rdev->pg_flags) { in si_fini_pg()
5868 if (!rdev->rlc_fw) in si_rlc_resume()
5869 return -EINVAL; in si_rlc_resume()
5889 if (rdev->new_fw) { in si_rlc_resume()
5891 (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data; in si_rlc_resume()
5892 u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; in si_rlc_resume()
5894 (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in si_rlc_resume()
5896 radeon_ucode_print_rlc_hdr(&hdr->header); in si_rlc_resume()
5904 (const __be32 *)rdev->rlc_fw->data; in si_rlc_resume()
5928 rdev->ih.enabled = true; in si_enable_interrupts()
5943 rdev->ih.enabled = false; in si_disable_interrupts()
5944 rdev->ih.rptr = 0; in si_disable_interrupts()
5963 for (i = 0; i < rdev->num_crtc; i++) in si_disable_interrupt_state()
5965 for (i = 0; i < rdev->num_crtc; i++) in si_disable_interrupt_state()
6000 WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8); in si_irq_init()
6002 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi in si_irq_init()
6003 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN in si_irq_init()
6006 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ in si_irq_init()
6010 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); in si_irq_init()
6011 rb_bufsz = order_base_2(rdev->ih.ring_size / 4); in si_irq_init()
6017 if (rdev->wb.enabled) in si_irq_init()
6021 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); in si_irq_init()
6022 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); in si_irq_init()
6033 if (rdev->msi_enabled) in si_irq_init()
6040 pci_set_master(rdev->pdev); in si_irq_init()
6058 if (!rdev->irq.installed) { in si_irq_set()
6060 return -EINVAL; in si_irq_set()
6063 if (!rdev->ih.enabled) { in si_irq_set()
6080 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { in si_irq_set()
6084 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) { in si_irq_set()
6088 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) { in si_irq_set()
6092 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { in si_irq_set()
6097 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) { in si_irq_set()
6111 if (rdev->irq.dpm_thermal) { in si_irq_set()
6116 for (i = 0; i < rdev->num_crtc; i++) { in si_irq_set()
6119 rdev->irq.crtc_vblank_int[i] || in si_irq_set()
6120 atomic_read(&rdev->irq.pflip[i]), "vblank", i); in si_irq_set()
6123 for (i = 0; i < rdev->num_crtc; i++) in si_irq_set()
6131 rdev->irq.hpd[i], "HPD", i); in si_irq_set()
6147 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int; in si_irq_ack()
6148 u32 *grph_int = rdev->irq.stat_regs.evergreen.grph_int; in si_irq_ack()
6155 if (i < rdev->num_crtc) in si_irq_ack()
6160 for (i = 0; i < rdev->num_crtc; i += 2) { in si_irq_ack()
6213 if (rdev->wb.enabled) in si_get_ih_wptr()
6214 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); in si_get_ih_wptr()
6224 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", in si_get_ih_wptr()
6225 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); in si_get_ih_wptr()
6226 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; in si_get_ih_wptr()
6231 return (wptr & rdev->ih.ptr_mask); in si_get_ih_wptr()
6236 * [7:0] - interrupt source id
6237 * [31:8] - reserved
6238 * [59:32] - interrupt source data
6239 * [63:60] - reserved
6240 * [71:64] - RINGID
6241 * [79:72] - VMID
6242 * [127:80] - reserved
6246 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int; in si_irq_process()
6259 if (!rdev->ih.enabled || rdev->shutdown) in si_irq_process()
6266 if (atomic_xchg(&rdev->ih.lock, 1)) in si_irq_process()
6269 rptr = rdev->ih.rptr; in si_irq_process()
6281 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; in si_irq_process()
6282 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; in si_irq_process()
6283 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff; in si_irq_process()
6292 crtc_idx = src_id - 1; in si_irq_process()
6298 if (rdev->irq.crtc_vblank_int[crtc_idx]) { in si_irq_process()
6300 rdev->pm.vblank_sync = true; in si_irq_process()
6301 wake_up(&rdev->irq.vblank_queue); in si_irq_process()
6303 if (atomic_read(&rdev->irq.pflip[crtc_idx])) { in si_irq_process()
6318 DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n", in si_irq_process()
6332 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); in si_irq_process()
6334 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); in si_irq_process()
6344 hpd_idx = src_data - 6; in si_irq_process()
6377 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); in si_irq_process()
6378 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", in si_irq_process()
6380 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", in si_irq_process()
6413 rdev->pm.dpm.thermal.high_to_low = false; in si_irq_process()
6418 rdev->pm.dpm.thermal.high_to_low = true; in si_irq_process()
6435 rptr &= rdev->ih.ptr_mask; in si_irq_process()
6439 schedule_work(&rdev->dp_work); in si_irq_process()
6441 schedule_delayed_work(&rdev->hotplug_work, 0); in si_irq_process()
6442 if (queue_thermal && rdev->pm.dpm_enabled) in si_irq_process()
6443 schedule_work(&rdev->pm.dpm.thermal.work); in si_irq_process()
6444 rdev->ih.rptr = rptr; in si_irq_process()
6445 atomic_set(&rdev->ih.lock, 0); in si_irq_process()
6462 if (!rdev->has_uvd) in si_uvd_init()
6467 dev_err(rdev->dev, "failed UVD (%d) init.\n", r); in si_uvd_init()
6469 * At this point rdev->uvd.vcpu_bo is NULL which trickles down in si_uvd_init()
6474 rdev->has_uvd = false; in si_uvd_init()
6477 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; in si_uvd_init()
6478 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096); in si_uvd_init()
6485 if (!rdev->has_uvd) in si_uvd_start()
6490 dev_err(rdev->dev, "failed UVD resume (%d).\n", r); in si_uvd_start()
6495 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r); in si_uvd_start()
6501 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; in si_uvd_start()
6509 if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size) in si_uvd_resume()
6512 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; in si_uvd_resume()
6513 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); in si_uvd_resume()
6515 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); in si_uvd_resume()
6520 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r); in si_uvd_resume()
6529 if (!rdev->has_vce) in si_vce_init()
6534 dev_err(rdev->dev, "failed VCE (%d) init.\n", r); in si_vce_init()
6536 * At this point rdev->vce.vcpu_bo is NULL which trickles down in si_vce_init()
6541 rdev->has_vce = false; in si_vce_init()
6544 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL; in si_vce_init()
6545 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096); in si_vce_init()
6546 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL; in si_vce_init()
6547 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096); in si_vce_init()
6554 if (!rdev->has_vce) in si_vce_start()
6559 dev_err(rdev->dev, "failed VCE resume (%d).\n", r); in si_vce_start()
6564 dev_err(rdev->dev, "failed VCE resume (%d).\n", r); in si_vce_start()
6569 dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r); in si_vce_start()
6574 dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r); in si_vce_start()
6580 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; in si_vce_start()
6581 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0; in si_vce_start()
6589 if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size) in si_vce_resume()
6592 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; in si_vce_resume()
6593 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP); in si_vce_resume()
6595 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r); in si_vce_resume()
6598 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; in si_vce_resume()
6599 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP); in si_vce_resume()
6601 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r); in si_vce_resume()
6606 dev_err(rdev->dev, "failed initializing VCE (%d).\n", r); in si_vce_resume()
6621 /* scratch needs to be initialized before MC */ in si_startup()
6628 if (!rdev->pm.dpm_enabled) { in si_startup()
6631 DRM_ERROR("Failed to load MC firmware!\n"); in si_startup()
6642 if (rdev->family == CHIP_VERDE) { in si_startup()
6643 rdev->rlc.reg_list = verde_rlc_save_restore_register_list; in si_startup()
6644 rdev->rlc.reg_list_size = in si_startup()
6647 rdev->rlc.cs_data = si_cs_data; in si_startup()
6661 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); in si_startup()
6667 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); in si_startup()
6673 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); in si_startup()
6679 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); in si_startup()
6685 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); in si_startup()
6693 if (!rdev->irq.installed) { in si_startup()
6707 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_startup()
6708 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, in si_startup()
6713 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; in si_startup()
6714 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, in si_startup()
6719 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; in si_startup()
6720 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, in si_startup()
6725 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; in si_startup()
6726 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, in si_startup()
6731 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; in si_startup()
6732 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, in si_startup()
6753 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); in si_startup()
6759 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); in si_startup()
6779 atom_asic_init(rdev->mode_info.atom_context); in si_resume()
6784 if (rdev->pm.pm_method == PM_METHOD_DPM) in si_resume()
6787 rdev->accel_working = true; in si_resume()
6791 rdev->accel_working = false; in si_resume()
6806 if (rdev->has_uvd) { in si_suspend()
6810 if (rdev->has_vce) in si_suspend()
6828 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_init()
6834 return -EINVAL; in si_init()
6837 if (!rdev->is_atom_bios) { in si_init()
6838 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); in si_init()
6839 return -EINVAL; in si_init()
6847 if (!rdev->bios) { in si_init()
6848 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); in si_init()
6849 return -EINVAL; in si_init()
6852 atom_asic_init(rdev->mode_info.atom_context); in si_init()
6875 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || in si_init()
6876 !rdev->rlc_fw || !rdev->mc_fw) { in si_init()
6887 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in si_init()
6888 ring->ring_obj = NULL; in si_init()
6891 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; in si_init()
6892 ring->ring_obj = NULL; in si_init()
6895 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; in si_init()
6896 ring->ring_obj = NULL; in si_init()
6899 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; in si_init()
6900 ring->ring_obj = NULL; in si_init()
6903 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; in si_init()
6904 ring->ring_obj = NULL; in si_init()
6910 rdev->ih.ring_obj = NULL; in si_init()
6917 rdev->accel_working = true; in si_init()
6920 dev_err(rdev->dev, "disabling GPU acceleration\n"); in si_init()
6930 rdev->accel_working = false; in si_init()
6933 /* Don't start up if the MC ucode is missing. in si_init()
6934 * The default clocks and voltages before the MC ucode in si_init()
6937 if (!rdev->mc_fw) { in si_init()
6938 DRM_ERROR("radeon: MC ucode required for NI+.\n"); in si_init()
6939 return -EINVAL; in si_init()
6958 if (rdev->has_uvd) { in si_fini()
6962 if (rdev->has_vce) in si_fini()
6970 kfree(rdev->bios); in si_fini()
6971 rdev->bios = NULL; in si_fini()
6975 * si_get_gpu_clock_counter - return GPU clock counter snapshot
6986 mutex_lock(&rdev->gpu_clock_mutex); in si_get_gpu_clock_counter()
6990 mutex_unlock(&rdev->gpu_clock_mutex); in si_get_gpu_clock_counter()
7085 struct pci_dev *root = rdev->pdev->bus->self; in si_pcie_gen3_enable()
7091 if (pci_is_root_bus(rdev->pdev->bus)) in si_pcie_gen3_enable()
7097 if (rdev->flags & RADEON_IS_IGP) in si_pcie_gen3_enable()
7100 if (!(rdev->flags & RADEON_IS_PCIE)) in si_pcie_gen3_enable()
7128 if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev)) in si_pcie_gen3_enable()
7132 /* re-try equalization if gen3 is not already enabled */ in si_pcie_gen3_enable()
7139 pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); in si_pcie_gen3_enable()
7157 pcie_capability_read_word(rdev->pdev, in si_pcie_gen3_enable()
7165 pcie_capability_read_word(rdev->pdev, in si_pcie_gen3_enable()
7171 pcie_capability_read_word(rdev->pdev, in si_pcie_gen3_enable()
7190 pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL, in si_pcie_gen3_enable()
7207 pcie_capability_read_word(rdev->pdev, in si_pcie_gen3_enable()
7215 pcie_capability_write_word(rdev->pdev, in si_pcie_gen3_enable()
7231 pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16); in si_pcie_gen3_enable()
7239 pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16); in si_pcie_gen3_enable()
7245 for (i = 0; i < rdev->usec_timeout; i++) { in si_pcie_gen3_enable()
7262 if (!(rdev->flags & RADEON_IS_PCIE)) in si_program_aspm()
7320 if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) { in si_program_aspm()
7369 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN)) in si_program_aspm()
7376 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN)) in si_program_aspm()
7382 !pci_is_root_bus(rdev->pdev->bus)) { in si_program_aspm()
7383 struct pci_dev *root = rdev->pdev->bus->self; in si_program_aspm()
7483 return -ETIMEDOUT; in si_vce_send_vcepll_ctlreq()