cik.c (85bf20d18ac0cf6e729465efcb8758b5db2ec6c2) | cik.c (1c0a46255f8d7daf5b601668836e185fd1294e94) |
---|---|
1/* 2 * Copyright 2012 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the --- 28 unchanged lines hidden (view full) --- 37MODULE_FIRMWARE("radeon/BONAIRE_me.bin"); 38MODULE_FIRMWARE("radeon/BONAIRE_ce.bin"); 39MODULE_FIRMWARE("radeon/BONAIRE_mec.bin"); 40MODULE_FIRMWARE("radeon/BONAIRE_mc.bin"); 41MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin"); 42MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); 43MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); 44MODULE_FIRMWARE("radeon/BONAIRE_smc.bin"); | 1/* 2 * Copyright 2012 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the --- 28 unchanged lines hidden (view full) --- 37MODULE_FIRMWARE("radeon/BONAIRE_me.bin"); 38MODULE_FIRMWARE("radeon/BONAIRE_ce.bin"); 39MODULE_FIRMWARE("radeon/BONAIRE_mec.bin"); 40MODULE_FIRMWARE("radeon/BONAIRE_mc.bin"); 41MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin"); 42MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); 43MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); 44MODULE_FIRMWARE("radeon/BONAIRE_smc.bin"); |
45 46MODULE_FIRMWARE("radeon/bonaire_pfp.bin"); 47MODULE_FIRMWARE("radeon/bonaire_me.bin"); 48MODULE_FIRMWARE("radeon/bonaire_ce.bin"); 49MODULE_FIRMWARE("radeon/bonaire_mec.bin"); 50MODULE_FIRMWARE("radeon/bonaire_mc.bin"); 51MODULE_FIRMWARE("radeon/bonaire_rlc.bin"); 52MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); 53MODULE_FIRMWARE("radeon/bonaire_smc.bin"); 54 |
|
45MODULE_FIRMWARE("radeon/HAWAII_pfp.bin"); 46MODULE_FIRMWARE("radeon/HAWAII_me.bin"); 47MODULE_FIRMWARE("radeon/HAWAII_ce.bin"); 48MODULE_FIRMWARE("radeon/HAWAII_mec.bin"); 49MODULE_FIRMWARE("radeon/HAWAII_mc.bin"); 50MODULE_FIRMWARE("radeon/HAWAII_mc2.bin"); 51MODULE_FIRMWARE("radeon/HAWAII_rlc.bin"); 52MODULE_FIRMWARE("radeon/HAWAII_sdma.bin"); 53MODULE_FIRMWARE("radeon/HAWAII_smc.bin"); | 55MODULE_FIRMWARE("radeon/HAWAII_pfp.bin"); 56MODULE_FIRMWARE("radeon/HAWAII_me.bin"); 57MODULE_FIRMWARE("radeon/HAWAII_ce.bin"); 58MODULE_FIRMWARE("radeon/HAWAII_mec.bin"); 59MODULE_FIRMWARE("radeon/HAWAII_mc.bin"); 60MODULE_FIRMWARE("radeon/HAWAII_mc2.bin"); 61MODULE_FIRMWARE("radeon/HAWAII_rlc.bin"); 62MODULE_FIRMWARE("radeon/HAWAII_sdma.bin"); 63MODULE_FIRMWARE("radeon/HAWAII_smc.bin"); |
64 65MODULE_FIRMWARE("radeon/hawaii_pfp.bin"); 66MODULE_FIRMWARE("radeon/hawaii_me.bin"); 67MODULE_FIRMWARE("radeon/hawaii_ce.bin"); 68MODULE_FIRMWARE("radeon/hawaii_mec.bin"); 69MODULE_FIRMWARE("radeon/hawaii_mc.bin"); 70MODULE_FIRMWARE("radeon/hawaii_rlc.bin"); 71MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); 72MODULE_FIRMWARE("radeon/hawaii_smc.bin"); 73 |
|
54MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); 55MODULE_FIRMWARE("radeon/KAVERI_me.bin"); 56MODULE_FIRMWARE("radeon/KAVERI_ce.bin"); 57MODULE_FIRMWARE("radeon/KAVERI_mec.bin"); 58MODULE_FIRMWARE("radeon/KAVERI_rlc.bin"); 59MODULE_FIRMWARE("radeon/KAVERI_sdma.bin"); | 74MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); 75MODULE_FIRMWARE("radeon/KAVERI_me.bin"); 76MODULE_FIRMWARE("radeon/KAVERI_ce.bin"); 77MODULE_FIRMWARE("radeon/KAVERI_mec.bin"); 78MODULE_FIRMWARE("radeon/KAVERI_rlc.bin"); 79MODULE_FIRMWARE("radeon/KAVERI_sdma.bin"); |
80 81MODULE_FIRMWARE("radeon/kaveri_pfp.bin"); 82MODULE_FIRMWARE("radeon/kaveri_me.bin"); 83MODULE_FIRMWARE("radeon/kaveri_ce.bin"); 84MODULE_FIRMWARE("radeon/kaveri_mec.bin"); 85MODULE_FIRMWARE("radeon/kaveri_mec2.bin"); 86MODULE_FIRMWARE("radeon/kaveri_rlc.bin"); 87MODULE_FIRMWARE("radeon/kaveri_sdma.bin"); 88 |
|
60MODULE_FIRMWARE("radeon/KABINI_pfp.bin"); 61MODULE_FIRMWARE("radeon/KABINI_me.bin"); 62MODULE_FIRMWARE("radeon/KABINI_ce.bin"); 63MODULE_FIRMWARE("radeon/KABINI_mec.bin"); 64MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); 65MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); | 89MODULE_FIRMWARE("radeon/KABINI_pfp.bin"); 90MODULE_FIRMWARE("radeon/KABINI_me.bin"); 91MODULE_FIRMWARE("radeon/KABINI_ce.bin"); 92MODULE_FIRMWARE("radeon/KABINI_mec.bin"); 93MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); 94MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); |
95 96MODULE_FIRMWARE("radeon/kabini_pfp.bin"); 97MODULE_FIRMWARE("radeon/kabini_me.bin"); 98MODULE_FIRMWARE("radeon/kabini_ce.bin"); 99MODULE_FIRMWARE("radeon/kabini_mec.bin"); 100MODULE_FIRMWARE("radeon/kabini_rlc.bin"); 101MODULE_FIRMWARE("radeon/kabini_sdma.bin"); 102 |
|
66MODULE_FIRMWARE("radeon/MULLINS_pfp.bin"); 67MODULE_FIRMWARE("radeon/MULLINS_me.bin"); 68MODULE_FIRMWARE("radeon/MULLINS_ce.bin"); 69MODULE_FIRMWARE("radeon/MULLINS_mec.bin"); 70MODULE_FIRMWARE("radeon/MULLINS_rlc.bin"); 71MODULE_FIRMWARE("radeon/MULLINS_sdma.bin"); 72 | 103MODULE_FIRMWARE("radeon/MULLINS_pfp.bin"); 104MODULE_FIRMWARE("radeon/MULLINS_me.bin"); 105MODULE_FIRMWARE("radeon/MULLINS_ce.bin"); 106MODULE_FIRMWARE("radeon/MULLINS_mec.bin"); 107MODULE_FIRMWARE("radeon/MULLINS_rlc.bin"); 108MODULE_FIRMWARE("radeon/MULLINS_sdma.bin"); 109 |
110MODULE_FIRMWARE("radeon/mullins_pfp.bin"); 111MODULE_FIRMWARE("radeon/mullins_me.bin"); 112MODULE_FIRMWARE("radeon/mullins_ce.bin"); 113MODULE_FIRMWARE("radeon/mullins_mec.bin"); 114MODULE_FIRMWARE("radeon/mullins_rlc.bin"); 115MODULE_FIRMWARE("radeon/mullins_sdma.bin"); 116 |
|
73extern int r600_ih_ring_alloc(struct radeon_device *rdev); 74extern void r600_ih_ring_fini(struct radeon_device *rdev); 75extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 76extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 77extern bool evergreen_is_display_hung(struct radeon_device *rdev); 78extern void sumo_rlc_fini(struct radeon_device *rdev); 79extern int sumo_rlc_init(struct radeon_device *rdev); 80extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); --- 1433 unchanged lines hidden (view full) --- 1514 0x88c4, 0x001f3ae3, 0x00000082, 1515 0x88d4, 0x0000001f, 0x00000010, 1516 0x30934, 0xffffffff, 0x00000000 1517}; 1518 1519 1520static void cik_init_golden_registers(struct radeon_device *rdev) 1521{ | 117extern int r600_ih_ring_alloc(struct radeon_device *rdev); 118extern void r600_ih_ring_fini(struct radeon_device *rdev); 119extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 120extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 121extern bool evergreen_is_display_hung(struct radeon_device *rdev); 122extern void sumo_rlc_fini(struct radeon_device *rdev); 123extern int sumo_rlc_init(struct radeon_device *rdev); 124extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); --- 1433 unchanged lines hidden (view full) --- 1558 0x88c4, 0x001f3ae3, 0x00000082, 1559 0x88d4, 0x0000001f, 0x00000010, 1560 0x30934, 0xffffffff, 0x00000000 1561}; 1562 1563 1564static void cik_init_golden_registers(struct radeon_device *rdev) 1565{ |
1566 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 1567 mutex_lock(&rdev->grbm_idx_mutex); |
|
1522 switch (rdev->family) { 1523 case CHIP_BONAIRE: 1524 radeon_program_register_sequence(rdev, 1525 bonaire_mgcg_cgcg_init, 1526 (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init)); 1527 radeon_program_register_sequence(rdev, 1528 bonaire_golden_registers, 1529 (const u32)ARRAY_SIZE(bonaire_golden_registers)); --- 58 unchanged lines hidden (view full) --- 1588 (const u32)ARRAY_SIZE(hawaii_golden_common_registers)); 1589 radeon_program_register_sequence(rdev, 1590 hawaii_golden_spm_registers, 1591 (const u32)ARRAY_SIZE(hawaii_golden_spm_registers)); 1592 break; 1593 default: 1594 break; 1595 } | 1568 switch (rdev->family) { 1569 case CHIP_BONAIRE: 1570 radeon_program_register_sequence(rdev, 1571 bonaire_mgcg_cgcg_init, 1572 (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init)); 1573 radeon_program_register_sequence(rdev, 1574 bonaire_golden_registers, 1575 (const u32)ARRAY_SIZE(bonaire_golden_registers)); --- 58 unchanged lines hidden (view full) --- 1634 (const u32)ARRAY_SIZE(hawaii_golden_common_registers)); 1635 radeon_program_register_sequence(rdev, 1636 hawaii_golden_spm_registers, 1637 (const u32)ARRAY_SIZE(hawaii_golden_spm_registers)); 1638 break; 1639 default: 1640 break; 1641 } |
1642 mutex_unlock(&rdev->grbm_idx_mutex); |
|
1596} 1597 1598/** 1599 * cik_get_xclk - get the xclk 1600 * 1601 * @rdev: radeon_device pointer 1602 * 1603 * Returns the reference clock used by the gfx engine --- 151 unchanged lines hidden (view full) --- 1755 * 1756 * @rdev: radeon_device pointer 1757 * 1758 * Load the GDDR MC ucode into the hw (CIK). 1759 * Returns 0 on success, error on failure. 1760 */ 1761int ci_mc_load_microcode(struct radeon_device *rdev) 1762{ | 1643} 1644 1645/** 1646 * cik_get_xclk - get the xclk 1647 * 1648 * @rdev: radeon_device pointer 1649 * 1650 * Returns the reference clock used by the gfx engine --- 151 unchanged lines hidden (view full) --- 1802 * 1803 * @rdev: radeon_device pointer 1804 * 1805 * Load the GDDR MC ucode into the hw (CIK). 1806 * Returns 0 on success, error on failure. 1807 */ 1808int ci_mc_load_microcode(struct radeon_device *rdev) 1809{ |
1763 const __be32 *fw_data; 1764 u32 running, blackout = 0; 1765 u32 *io_mc_regs; | 1810 const __be32 *fw_data = NULL; 1811 const __le32 *new_fw_data = NULL; 1812 u32 running, blackout = 0, tmp; 1813 u32 *io_mc_regs = NULL; 1814 const __le32 *new_io_mc_regs = NULL; |
1766 int i, regs_size, ucode_size; 1767 1768 if (!rdev->mc_fw) 1769 return -EINVAL; 1770 | 1815 int i, regs_size, ucode_size; 1816 1817 if (!rdev->mc_fw) 1818 return -EINVAL; 1819 |
1771 ucode_size = rdev->mc_fw->size / 4; | 1820 if (rdev->new_fw) { 1821 const struct mc_firmware_header_v1_0 *hdr = 1822 (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data; |
1772 | 1823 |
1773 switch (rdev->family) { 1774 case CHIP_BONAIRE: 1775 io_mc_regs = (u32 *)&bonaire_io_mc_regs; 1776 regs_size = BONAIRE_IO_MC_REGS_SIZE; 1777 break; 1778 case CHIP_HAWAII: 1779 io_mc_regs = (u32 *)&hawaii_io_mc_regs; 1780 regs_size = HAWAII_IO_MC_REGS_SIZE; 1781 break; 1782 default: 1783 return -EINVAL; | 1824 radeon_ucode_print_mc_hdr(&hdr->header); 1825 1826 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); 1827 new_io_mc_regs = (const __le32 *) 1828 (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); 1829 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 1830 new_fw_data = (const __le32 *) 1831 (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1832 } else { 1833 ucode_size = rdev->mc_fw->size / 4; 1834 1835 switch (rdev->family) { 1836 case CHIP_BONAIRE: 1837 io_mc_regs = (u32 *)&bonaire_io_mc_regs; 1838 regs_size = BONAIRE_IO_MC_REGS_SIZE; 1839 break; 1840 case CHIP_HAWAII: 1841 io_mc_regs = (u32 *)&hawaii_io_mc_regs; 1842 regs_size = HAWAII_IO_MC_REGS_SIZE; 1843 break; 1844 default: 1845 return -EINVAL; 1846 } 1847 fw_data = (const __be32 *)rdev->mc_fw->data; |
1784 } 1785 1786 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 1787 1788 if (running == 0) { 1789 if (running) { 1790 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 1791 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); 1792 } 1793 1794 /* reset the engine and set to writable */ 1795 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 1796 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 1797 1798 /* load mc io regs */ 1799 for (i = 0; i < regs_size; i++) { | 1848 } 1849 1850 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 1851 1852 if (running == 0) { 1853 if (running) { 1854 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 1855 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); 1856 } 1857 1858 /* reset the engine and set to writable */ 1859 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 1860 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 1861 1862 /* load mc io regs */ 1863 for (i = 0; i < regs_size; i++) { |
1800 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 1801 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); | 1864 if (rdev->new_fw) { 1865 WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++)); 1866 WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++)); 1867 } else { 1868 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 1869 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 1870 } |
1802 } | 1871 } |
1872 1873 tmp = RREG32(MC_SEQ_MISC0); 1874 if ((rdev->pdev->device == 0x6649) && ((tmp & 0xff00) == 0x5600)) { 1875 WREG32(MC_SEQ_IO_DEBUG_INDEX, 5); 1876 WREG32(MC_SEQ_IO_DEBUG_DATA, 0x00000023); 1877 WREG32(MC_SEQ_IO_DEBUG_INDEX, 9); 1878 WREG32(MC_SEQ_IO_DEBUG_DATA, 0x000001f0); 1879 } 1880 |
|
1803 /* load the MC ucode */ | 1881 /* load the MC ucode */ |
1804 fw_data = (const __be32 *)rdev->mc_fw->data; 1805 for (i = 0; i < ucode_size; i++) 1806 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); | 1882 for (i = 0; i < ucode_size; i++) { 1883 if (rdev->new_fw) 1884 WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++)); 1885 else 1886 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 1887 } |
1807 1808 /* put the engine back into the active state */ 1809 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 1810 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 1811 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 1812 1813 /* wait for training to complete */ 1814 for (i = 0; i < rdev->usec_timeout; i++) { --- 21 unchanged lines hidden (view full) --- 1836 * 1837 * Use the firmware interface to load the ucode images into 1838 * the driver (not loaded into hw). 1839 * Returns 0 on success, error on failure. 1840 */ 1841static int cik_init_microcode(struct radeon_device *rdev) 1842{ 1843 const char *chip_name; | 1888 1889 /* put the engine back into the active state */ 1890 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 1891 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 1892 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 1893 1894 /* wait for training to complete */ 1895 for (i = 0; i < rdev->usec_timeout; i++) { --- 21 unchanged lines hidden (view full) --- 1917 * 1918 * Use the firmware interface to load the ucode images into 1919 * the driver (not loaded into hw). 1920 * Returns 0 on success, error on failure. 1921 */ 1922static int cik_init_microcode(struct radeon_device *rdev) 1923{ 1924 const char *chip_name; |
1925 const char *new_chip_name; |
|
1844 size_t pfp_req_size, me_req_size, ce_req_size, 1845 mec_req_size, rlc_req_size, mc_req_size = 0, 1846 sdma_req_size, smc_req_size = 0, mc2_req_size = 0; 1847 char fw_name[30]; | 1926 size_t pfp_req_size, me_req_size, ce_req_size, 1927 mec_req_size, rlc_req_size, mc_req_size = 0, 1928 sdma_req_size, smc_req_size = 0, mc2_req_size = 0; 1929 char fw_name[30]; |
1930 int new_fw = 0; |
|
1848 int err; | 1931 int err; |
1932 int num_fw; |
|
1849 1850 DRM_DEBUG("\n"); 1851 1852 switch (rdev->family) { 1853 case CHIP_BONAIRE: 1854 chip_name = "BONAIRE"; | 1933 1934 DRM_DEBUG("\n"); 1935 1936 switch (rdev->family) { 1937 case CHIP_BONAIRE: 1938 chip_name = "BONAIRE"; |
1939 new_chip_name = "bonaire"; |
|
1855 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1856 me_req_size = CIK_ME_UCODE_SIZE * 4; 1857 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1858 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1859 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; 1860 mc_req_size = BONAIRE_MC_UCODE_SIZE * 4; 1861 mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4; 1862 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1863 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4); | 1940 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1941 me_req_size = CIK_ME_UCODE_SIZE * 4; 1942 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1943 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1944 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; 1945 mc_req_size = BONAIRE_MC_UCODE_SIZE * 4; 1946 mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4; 1947 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1948 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4); |
1949 num_fw = 8; |
|
1864 break; 1865 case CHIP_HAWAII: 1866 chip_name = "HAWAII"; | 1950 break; 1951 case CHIP_HAWAII: 1952 chip_name = "HAWAII"; |
1953 new_chip_name = "hawaii"; |
|
1867 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1868 me_req_size = CIK_ME_UCODE_SIZE * 4; 1869 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1870 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1871 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; 1872 mc_req_size = HAWAII_MC_UCODE_SIZE * 4; 1873 mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4; 1874 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1875 smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4); | 1954 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1955 me_req_size = CIK_ME_UCODE_SIZE * 4; 1956 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1957 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1958 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; 1959 mc_req_size = HAWAII_MC_UCODE_SIZE * 4; 1960 mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4; 1961 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1962 smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4); |
1963 num_fw = 8; |
|
1876 break; 1877 case CHIP_KAVERI: 1878 chip_name = "KAVERI"; | 1964 break; 1965 case CHIP_KAVERI: 1966 chip_name = "KAVERI"; |
1967 new_chip_name = "kaveri"; |
|
1879 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1880 me_req_size = CIK_ME_UCODE_SIZE * 4; 1881 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1882 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1883 rlc_req_size = KV_RLC_UCODE_SIZE * 4; 1884 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; | 1968 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1969 me_req_size = CIK_ME_UCODE_SIZE * 4; 1970 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1971 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1972 rlc_req_size = KV_RLC_UCODE_SIZE * 4; 1973 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; |
1974 num_fw = 7; |
|
1885 break; 1886 case CHIP_KABINI: 1887 chip_name = "KABINI"; | 1975 break; 1976 case CHIP_KABINI: 1977 chip_name = "KABINI"; |
1978 new_chip_name = "kabini"; |
|
1888 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1889 me_req_size = CIK_ME_UCODE_SIZE * 4; 1890 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1891 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1892 rlc_req_size = KB_RLC_UCODE_SIZE * 4; 1893 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; | 1979 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1980 me_req_size = CIK_ME_UCODE_SIZE * 4; 1981 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1982 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1983 rlc_req_size = KB_RLC_UCODE_SIZE * 4; 1984 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; |
1985 num_fw = 6; |
|
1894 break; 1895 case CHIP_MULLINS: 1896 chip_name = "MULLINS"; | 1986 break; 1987 case CHIP_MULLINS: 1988 chip_name = "MULLINS"; |
1989 new_chip_name = "mullins"; |
|
1897 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1898 me_req_size = CIK_ME_UCODE_SIZE * 4; 1899 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1900 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1901 rlc_req_size = ML_RLC_UCODE_SIZE * 4; 1902 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; | 1990 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1991 me_req_size = CIK_ME_UCODE_SIZE * 4; 1992 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1993 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1994 rlc_req_size = ML_RLC_UCODE_SIZE * 4; 1995 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; |
1996 num_fw = 6; |
|
1903 break; 1904 default: BUG(); 1905 } 1906 | 1997 break; 1998 default: BUG(); 1999 } 2000 |
1907 DRM_INFO("Loading %s Microcode\n", chip_name); | 2001 DRM_INFO("Loading %s Microcode\n", new_chip_name); |
1908 | 2002 |
1909 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | 2003 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); |
1910 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); | 2004 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); |
1911 if (err) 1912 goto out; 1913 if (rdev->pfp_fw->size != pfp_req_size) { 1914 printk(KERN_ERR 1915 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 1916 rdev->pfp_fw->size, fw_name); 1917 err = -EINVAL; 1918 goto out; | 2005 if (err) { 2006 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 2007 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); 2008 if (err) 2009 goto out; 2010 if (rdev->pfp_fw->size != pfp_req_size) { 2011 printk(KERN_ERR 2012 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 2013 rdev->pfp_fw->size, fw_name); 2014 err = -EINVAL; 2015 goto out; 2016 } 2017 } else { 2018 err = radeon_ucode_validate(rdev->pfp_fw); 2019 if (err) { 2020 printk(KERN_ERR 2021 "cik_fw: validation failed for firmware \"%s\"\n", 2022 fw_name); 2023 goto out; 2024 } else { 2025 new_fw++; 2026 } |
1919 } 1920 | 2027 } 2028 |
1921 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); | 2029 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name); |
1922 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); | 2030 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); |
1923 if (err) 1924 goto out; 1925 if (rdev->me_fw->size != me_req_size) { 1926 printk(KERN_ERR 1927 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 1928 rdev->me_fw->size, fw_name); 1929 err = -EINVAL; | 2031 if (err) { 2032 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 2033 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 2034 if (err) 2035 goto out; 2036 if (rdev->me_fw->size != me_req_size) { 2037 printk(KERN_ERR 2038 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 2039 rdev->me_fw->size, fw_name); 2040 err = -EINVAL; 2041 } 2042 } else { 2043 err = radeon_ucode_validate(rdev->me_fw); 2044 if (err) { 2045 printk(KERN_ERR 2046 "cik_fw: validation failed for firmware \"%s\"\n", 2047 fw_name); 2048 goto out; 2049 } else { 2050 new_fw++; 2051 } |
1930 } 1931 | 2052 } 2053 |
1932 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); | 2054 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name); |
1933 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); | 2055 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); |
1934 if (err) 1935 goto out; 1936 if (rdev->ce_fw->size != ce_req_size) { 1937 printk(KERN_ERR 1938 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 1939 rdev->ce_fw->size, fw_name); 1940 err = -EINVAL; | 2056 if (err) { 2057 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); 2058 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); 2059 if (err) 2060 goto out; 2061 if (rdev->ce_fw->size != ce_req_size) { 2062 printk(KERN_ERR 2063 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 2064 rdev->ce_fw->size, fw_name); 2065 err = -EINVAL; 2066 } 2067 } else { 2068 err = radeon_ucode_validate(rdev->ce_fw); 2069 if (err) { 2070 printk(KERN_ERR 2071 "cik_fw: validation failed for firmware \"%s\"\n", 2072 fw_name); 2073 goto out; 2074 } else { 2075 new_fw++; 2076 } |
1941 } 1942 | 2077 } 2078 |
1943 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); | 2079 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", new_chip_name); |
1944 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev); | 2080 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev); |
1945 if (err) 1946 goto out; 1947 if (rdev->mec_fw->size != mec_req_size) { 1948 printk(KERN_ERR 1949 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 1950 rdev->mec_fw->size, fw_name); 1951 err = -EINVAL; | 2081 if (err) { 2082 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); 2083 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev); 2084 if (err) 2085 goto out; 2086 if (rdev->mec_fw->size != mec_req_size) { 2087 printk(KERN_ERR 2088 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 2089 rdev->mec_fw->size, fw_name); 2090 err = -EINVAL; 2091 } 2092 } else { 2093 err = radeon_ucode_validate(rdev->mec_fw); 2094 if (err) { 2095 printk(KERN_ERR 2096 "cik_fw: validation failed for firmware \"%s\"\n", 2097 fw_name); 2098 goto out; 2099 } else { 2100 new_fw++; 2101 } |
1952 } 1953 | 2102 } 2103 |
1954 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); | 2104 if (rdev->family == CHIP_KAVERI) { 2105 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", new_chip_name); 2106 err = request_firmware(&rdev->mec2_fw, fw_name, rdev->dev); 2107 if (err) { 2108 goto out; 2109 } else { 2110 err = radeon_ucode_validate(rdev->mec2_fw); 2111 if (err) { 2112 goto out; 2113 } else { 2114 new_fw++; 2115 } 2116 } 2117 } 2118 2119 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name); |
1955 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); | 2120 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); |
1956 if (err) 1957 goto out; 1958 if (rdev->rlc_fw->size != rlc_req_size) { 1959 printk(KERN_ERR 1960 "cik_rlc: Bogus length %zu in firmware \"%s\"\n", 1961 rdev->rlc_fw->size, fw_name); 1962 err = -EINVAL; | 2121 if (err) { 2122 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); 2123 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); 2124 if (err) 2125 goto out; 2126 if (rdev->rlc_fw->size != rlc_req_size) { 2127 printk(KERN_ERR 2128 "cik_rlc: Bogus length %zu in firmware \"%s\"\n", 2129 rdev->rlc_fw->size, fw_name); 2130 err = -EINVAL; 2131 } 2132 } else { 2133 err = radeon_ucode_validate(rdev->rlc_fw); 2134 if (err) { 2135 printk(KERN_ERR 2136 "cik_fw: validation failed for firmware \"%s\"\n", 2137 fw_name); 2138 goto out; 2139 } else { 2140 new_fw++; 2141 } |
1963 } 1964 | 2142 } 2143 |
1965 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); | 2144 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", new_chip_name); |
1966 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev); | 2145 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev); |
1967 if (err) 1968 goto out; 1969 if (rdev->sdma_fw->size != sdma_req_size) { 1970 printk(KERN_ERR 1971 "cik_sdma: Bogus length %zu in firmware \"%s\"\n", 1972 rdev->sdma_fw->size, fw_name); 1973 err = -EINVAL; | 2146 if (err) { 2147 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); 2148 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev); 2149 if (err) 2150 goto out; 2151 if (rdev->sdma_fw->size != sdma_req_size) { 2152 printk(KERN_ERR 2153 "cik_sdma: Bogus length %zu in firmware \"%s\"\n", 2154 rdev->sdma_fw->size, fw_name); 2155 err = -EINVAL; 2156 } 2157 } else { 2158 err = radeon_ucode_validate(rdev->sdma_fw); 2159 if (err) { 2160 printk(KERN_ERR 2161 "cik_fw: validation failed for firmware \"%s\"\n", 2162 fw_name); 2163 goto out; 2164 } else { 2165 new_fw++; 2166 } |
1974 } 1975 1976 /* No SMC, MC ucode on APUs */ 1977 if (!(rdev->flags & RADEON_IS_IGP)) { | 2167 } 2168 2169 /* No SMC, MC ucode on APUs */ 2170 if (!(rdev->flags & RADEON_IS_IGP)) { |
1978 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); | 2171 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); |
1979 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1980 if (err) { | 2172 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 2173 if (err) { |
1981 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | 2174 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); |
1982 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); | 2175 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); |
1983 if (err) | 2176 if (err) { 2177 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 2178 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 2179 if (err) 2180 goto out; 2181 } 2182 if ((rdev->mc_fw->size != mc_req_size) && 2183 (rdev->mc_fw->size != mc2_req_size)){ 2184 printk(KERN_ERR 2185 "cik_mc: Bogus length %zu in firmware \"%s\"\n", 2186 rdev->mc_fw->size, fw_name); 2187 err = -EINVAL; 2188 } 2189 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); 2190 } else { 2191 err = radeon_ucode_validate(rdev->mc_fw); 2192 if (err) { 2193 printk(KERN_ERR 2194 "cik_fw: validation failed for firmware \"%s\"\n", 2195 fw_name); |
1984 goto out; | 2196 goto out; |
2197 } else { 2198 new_fw++; 2199 } |
|
1985 } | 2200 } |
1986 if ((rdev->mc_fw->size != mc_req_size) && 1987 (rdev->mc_fw->size != mc2_req_size)){ 1988 printk(KERN_ERR 1989 "cik_mc: Bogus length %zu in firmware \"%s\"\n", 1990 rdev->mc_fw->size, fw_name); 1991 err = -EINVAL; 1992 } 1993 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); | |
1994 | 2201 |
1995 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); | 2202 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); |
1996 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 1997 if (err) { | 2203 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 2204 if (err) { |
1998 printk(KERN_ERR 1999 "smc: error loading firmware \"%s\"\n", 2000 fw_name); 2001 release_firmware(rdev->smc_fw); 2002 rdev->smc_fw = NULL; 2003 err = 0; 2004 } else if (rdev->smc_fw->size != smc_req_size) { 2005 printk(KERN_ERR 2006 "cik_smc: Bogus length %zu in firmware \"%s\"\n", 2007 rdev->smc_fw->size, fw_name); 2008 err = -EINVAL; | 2205 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 2206 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 2207 if (err) { 2208 printk(KERN_ERR 2209 "smc: error loading firmware \"%s\"\n", 2210 fw_name); 2211 release_firmware(rdev->smc_fw); 2212 rdev->smc_fw = NULL; 2213 err = 0; 2214 } else if (rdev->smc_fw->size != smc_req_size) { 2215 printk(KERN_ERR 2216 "cik_smc: Bogus length %zu in firmware \"%s\"\n", 2217 rdev->smc_fw->size, fw_name); 2218 err = -EINVAL; 2219 } 2220 } else { 2221 err = radeon_ucode_validate(rdev->smc_fw); 2222 if (err) { 2223 printk(KERN_ERR 2224 "cik_fw: validation failed for firmware \"%s\"\n", 2225 fw_name); 2226 goto out; 2227 } else { 2228 new_fw++; 2229 } |
2009 } 2010 } 2011 | 2230 } 2231 } 2232 |
2233 if (new_fw == 0) { 2234 rdev->new_fw = false; 2235 } else if (new_fw < num_fw) { 2236 printk(KERN_ERR "ci_fw: mixing new and old firmware!\n"); 2237 err = -EINVAL; 2238 } else { 2239 rdev->new_fw = true; 2240 } 2241 |
|
2012out: 2013 if (err) { 2014 if (err != -EINVAL) 2015 printk(KERN_ERR 2016 "cik_cp: Failed to load firmware \"%s\"\n", 2017 fw_name); 2018 release_firmware(rdev->pfp_fw); 2019 rdev->pfp_fw = NULL; 2020 release_firmware(rdev->me_fw); 2021 rdev->me_fw = NULL; 2022 release_firmware(rdev->ce_fw); 2023 rdev->ce_fw = NULL; | 2242out: 2243 if (err) { 2244 if (err != -EINVAL) 2245 printk(KERN_ERR 2246 "cik_cp: Failed to load firmware \"%s\"\n", 2247 fw_name); 2248 release_firmware(rdev->pfp_fw); 2249 rdev->pfp_fw = NULL; 2250 release_firmware(rdev->me_fw); 2251 rdev->me_fw = NULL; 2252 release_firmware(rdev->ce_fw); 2253 rdev->ce_fw = NULL; |
2254 release_firmware(rdev->mec_fw); 2255 rdev->mec_fw = NULL; 2256 release_firmware(rdev->mec2_fw); 2257 rdev->mec2_fw = NULL; |
|
2024 release_firmware(rdev->rlc_fw); 2025 rdev->rlc_fw = NULL; | 2258 release_firmware(rdev->rlc_fw); 2259 rdev->rlc_fw = NULL; |
2260 release_firmware(rdev->sdma_fw); 2261 rdev->sdma_fw = NULL; |
|
2026 release_firmware(rdev->mc_fw); 2027 rdev->mc_fw = NULL; 2028 release_firmware(rdev->smc_fw); 2029 rdev->smc_fw = NULL; 2030 } 2031 return err; 2032} 2033 --- 252 unchanged lines hidden (view full) --- 2286 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2287 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2288 NUM_BANKS(ADDR_SURF_2_BANK)); 2289 break; 2290 default: 2291 gb_tile_moden = 0; 2292 break; 2293 } | 2262 release_firmware(rdev->mc_fw); 2263 rdev->mc_fw = NULL; 2264 release_firmware(rdev->smc_fw); 2265 rdev->smc_fw = NULL; 2266 } 2267 return err; 2268} 2269 --- 252 unchanged lines hidden (view full) --- 2522 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2523 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2524 NUM_BANKS(ADDR_SURF_2_BANK)); 2525 break; 2526 default: 2527 gb_tile_moden = 0; 2528 break; 2529 } |
2530 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; |
|
2294 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2295 } 2296 } else if (num_pipe_configs == 8) { 2297 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 2298 switch (reg_offset) { 2299 case 0: 2300 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2301 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | --- 887 unchanged lines hidden (view full) --- 3189 u32 se_num, u32 sh_per_se, 3190 u32 max_rb_num_per_se) 3191{ 3192 int i, j; 3193 u32 data, mask; 3194 u32 disabled_rbs = 0; 3195 u32 enabled_rbs = 0; 3196 | 2531 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2532 } 2533 } else if (num_pipe_configs == 8) { 2534 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 2535 switch (reg_offset) { 2536 case 0: 2537 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2538 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | --- 887 unchanged lines hidden (view full) --- 3426 u32 se_num, u32 sh_per_se, 3427 u32 max_rb_num_per_se) 3428{ 3429 int i, j; 3430 u32 data, mask; 3431 u32 disabled_rbs = 0; 3432 u32 enabled_rbs = 0; 3433 |
3434 mutex_lock(&rdev->grbm_idx_mutex); |
|
3197 for (i = 0; i < se_num; i++) { 3198 for (j = 0; j < sh_per_se; j++) { 3199 cik_select_se_sh(rdev, i, j); 3200 data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se); 3201 if (rdev->family == CHIP_HAWAII) 3202 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); 3203 else 3204 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH); 3205 } 3206 } 3207 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); | 3435 for (i = 0; i < se_num; i++) { 3436 for (j = 0; j < sh_per_se; j++) { 3437 cik_select_se_sh(rdev, i, j); 3438 data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se); 3439 if (rdev->family == CHIP_HAWAII) 3440 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); 3441 else 3442 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH); 3443 } 3444 } 3445 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
3446 mutex_unlock(&rdev->grbm_idx_mutex); |
|
3208 3209 mask = 1; 3210 for (i = 0; i < max_rb_num_per_se * se_num; i++) { 3211 if (!(disabled_rbs & mask)) 3212 enabled_rbs |= mask; 3213 mask <<= 1; 3214 } 3215 3216 rdev->config.cik.backend_enable_mask = enabled_rbs; 3217 | 3447 3448 mask = 1; 3449 for (i = 0; i < max_rb_num_per_se * se_num; i++) { 3450 if (!(disabled_rbs & mask)) 3451 enabled_rbs |= mask; 3452 mask <<= 1; 3453 } 3454 3455 rdev->config.cik.backend_enable_mask = enabled_rbs; 3456 |
3457 mutex_lock(&rdev->grbm_idx_mutex); |
|
3218 for (i = 0; i < se_num; i++) { 3219 cik_select_se_sh(rdev, i, 0xffffffff); 3220 data = 0; 3221 for (j = 0; j < sh_per_se; j++) { 3222 switch (enabled_rbs & 3) { 3223 case 0: 3224 if (j == 0) 3225 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3); --- 11 unchanged lines hidden (view full) --- 3237 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); 3238 break; 3239 } 3240 enabled_rbs >>= 2; 3241 } 3242 WREG32(PA_SC_RASTER_CONFIG, data); 3243 } 3244 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); | 3458 for (i = 0; i < se_num; i++) { 3459 cik_select_se_sh(rdev, i, 0xffffffff); 3460 data = 0; 3461 for (j = 0; j < sh_per_se; j++) { 3462 switch (enabled_rbs & 3) { 3463 case 0: 3464 if (j == 0) 3465 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3); --- 11 unchanged lines hidden (view full) --- 3477 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); 3478 break; 3479 } 3480 enabled_rbs >>= 2; 3481 } 3482 WREG32(PA_SC_RASTER_CONFIG, data); 3483 } 3484 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
3485 mutex_unlock(&rdev->grbm_idx_mutex); |
|
3245} 3246 3247/** 3248 * cik_gpu_init - setup the 3D engine 3249 * 3250 * @rdev: radeon_device pointer 3251 * 3252 * Configures the 3D engine and tiling configuration 3253 * registers so that the 3D engine is usable. 3254 */ 3255static void cik_gpu_init(struct radeon_device *rdev) 3256{ 3257 u32 gb_addr_config = RREG32(GB_ADDR_CONFIG); 3258 u32 mc_shared_chmap, mc_arb_ramcfg; 3259 u32 hdp_host_path_cntl; 3260 u32 tmp; | 3486} 3487 3488/** 3489 * cik_gpu_init - setup the 3D engine 3490 * 3491 * @rdev: radeon_device pointer 3492 * 3493 * Configures the 3D engine and tiling configuration 3494 * registers so that the 3D engine is usable. 3495 */ 3496static void cik_gpu_init(struct radeon_device *rdev) 3497{ 3498 u32 gb_addr_config = RREG32(GB_ADDR_CONFIG); 3499 u32 mc_shared_chmap, mc_arb_ramcfg; 3500 u32 hdp_host_path_cntl; 3501 u32 tmp; |
3261 int i, j, k; | 3502 int i, j; |
3262 3263 switch (rdev->family) { 3264 case CHIP_BONAIRE: 3265 rdev->config.cik.max_shader_engines = 2; 3266 rdev->config.cik.max_tile_pipes = 4; 3267 rdev->config.cik.max_cu_per_sh = 7; 3268 rdev->config.cik.max_sh_per_se = 1; 3269 rdev->config.cik.max_backends_per_se = 2; --- 44 unchanged lines hidden (view full) --- 3314 (rdev->pdev->device == 0x131D)) { 3315 rdev->config.cik.max_cu_per_sh = 6; 3316 rdev->config.cik.max_backends_per_se = 2; 3317 } else if ((rdev->pdev->device == 0x1306) || 3318 (rdev->pdev->device == 0x1307) || 3319 (rdev->pdev->device == 0x130B) || 3320 (rdev->pdev->device == 0x130E) || 3321 (rdev->pdev->device == 0x1315) || | 3503 3504 switch (rdev->family) { 3505 case CHIP_BONAIRE: 3506 rdev->config.cik.max_shader_engines = 2; 3507 rdev->config.cik.max_tile_pipes = 4; 3508 rdev->config.cik.max_cu_per_sh = 7; 3509 rdev->config.cik.max_sh_per_se = 1; 3510 rdev->config.cik.max_backends_per_se = 2; --- 44 unchanged lines hidden (view full) --- 3555 (rdev->pdev->device == 0x131D)) { 3556 rdev->config.cik.max_cu_per_sh = 6; 3557 rdev->config.cik.max_backends_per_se = 2; 3558 } else if ((rdev->pdev->device == 0x1306) || 3559 (rdev->pdev->device == 0x1307) || 3560 (rdev->pdev->device == 0x130B) || 3561 (rdev->pdev->device == 0x130E) || 3562 (rdev->pdev->device == 0x1315) || |
3563 (rdev->pdev->device == 0x1318) || |
|
3322 (rdev->pdev->device == 0x131B)) { 3323 rdev->config.cik.max_cu_per_sh = 4; 3324 rdev->config.cik.max_backends_per_se = 1; 3325 } else { 3326 rdev->config.cik.max_cu_per_sh = 3; 3327 rdev->config.cik.max_backends_per_se = 1; 3328 } 3329 rdev->config.cik.max_sh_per_se = 1; --- 112 unchanged lines hidden (view full) --- 3442 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); 3443 3444 cik_tiling_mode_table_init(rdev); 3445 3446 cik_setup_rb(rdev, rdev->config.cik.max_shader_engines, 3447 rdev->config.cik.max_sh_per_se, 3448 rdev->config.cik.max_backends_per_se); 3449 | 3564 (rdev->pdev->device == 0x131B)) { 3565 rdev->config.cik.max_cu_per_sh = 4; 3566 rdev->config.cik.max_backends_per_se = 1; 3567 } else { 3568 rdev->config.cik.max_cu_per_sh = 3; 3569 rdev->config.cik.max_backends_per_se = 1; 3570 } 3571 rdev->config.cik.max_sh_per_se = 1; --- 112 unchanged lines hidden (view full) --- 3684 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); 3685 3686 cik_tiling_mode_table_init(rdev); 3687 3688 cik_setup_rb(rdev, rdev->config.cik.max_shader_engines, 3689 rdev->config.cik.max_sh_per_se, 3690 rdev->config.cik.max_backends_per_se); 3691 |
3692 rdev->config.cik.active_cus = 0; |
|
3450 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { 3451 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { | 3693 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { 3694 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { |
3452 for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) { 3453 rdev->config.cik.active_cus += 3454 hweight32(cik_get_cu_active_bitmap(rdev, i, j)); 3455 } | 3695 rdev->config.cik.active_cus += 3696 hweight32(cik_get_cu_active_bitmap(rdev, i, j)); |
3456 } 3457 } 3458 3459 /* set HW defaults for 3D engine */ 3460 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 3461 | 3697 } 3698 } 3699 3700 /* set HW defaults for 3D engine */ 3701 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 3702 |
3703 mutex_lock(&rdev->grbm_idx_mutex); 3704 /* 3705 * making sure that the following register writes will be broadcasted 3706 * to all the shaders 3707 */ 3708 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
|
3462 WREG32(SX_DEBUG_1, 0x20); 3463 3464 WREG32(TA_CNTL_AUX, 0x00010000); 3465 3466 tmp = RREG32(SPI_CONFIG_CNTL); 3467 tmp |= 0x03000000; 3468 WREG32(SPI_CONFIG_CNTL, tmp); 3469 --- 39 unchanged lines hidden (view full) --- 3509 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 3510 WREG32(HDP_MISC_CNTL, tmp); 3511 3512 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 3513 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 3514 3515 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 3516 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER); | 3709 WREG32(SX_DEBUG_1, 0x20); 3710 3711 WREG32(TA_CNTL_AUX, 0x00010000); 3712 3713 tmp = RREG32(SPI_CONFIG_CNTL); 3714 tmp |= 0x03000000; 3715 WREG32(SPI_CONFIG_CNTL, tmp); 3716 --- 39 unchanged lines hidden (view full) --- 3756 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 3757 WREG32(HDP_MISC_CNTL, tmp); 3758 3759 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 3760 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 3761 3762 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 3763 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER); |
3764 mutex_unlock(&rdev->grbm_idx_mutex); |
|
3517 3518 udelay(50); 3519} 3520 3521/* 3522 * GPU scratch registers helpers function. 3523 */ 3524/** --- 46 unchanged lines hidden (view full) --- 3571 if (r) { 3572 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r); 3573 radeon_scratch_free(rdev, scratch); 3574 return r; 3575 } 3576 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 3577 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); 3578 radeon_ring_write(ring, 0xDEADBEEF); | 3765 3766 udelay(50); 3767} 3768 3769/* 3770 * GPU scratch registers helpers function. 3771 */ 3772/** --- 46 unchanged lines hidden (view full) --- 3819 if (r) { 3820 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r); 3821 radeon_scratch_free(rdev, scratch); 3822 return r; 3823 } 3824 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 3825 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); 3826 radeon_ring_write(ring, 0xDEADBEEF); |
3579 radeon_ring_unlock_commit(rdev, ring); | 3827 radeon_ring_unlock_commit(rdev, ring, false); |
3580 3581 for (i = 0; i < rdev->usec_timeout; i++) { 3582 tmp = RREG32(scratch); 3583 if (tmp == 0xDEADBEEF) 3584 break; 3585 DRM_UDELAY(1); 3586 } 3587 if (i < rdev->usec_timeout) { --- 72 unchanged lines hidden (view full) --- 3660 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | 3661 EOP_TC_ACTION_EN | 3662 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 3663 EVENT_INDEX(5))); 3664 radeon_ring_write(ring, addr & 0xfffffffc); 3665 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2)); 3666 radeon_ring_write(ring, fence->seq); 3667 radeon_ring_write(ring, 0); | 3828 3829 for (i = 0; i < rdev->usec_timeout; i++) { 3830 tmp = RREG32(scratch); 3831 if (tmp == 0xDEADBEEF) 3832 break; 3833 DRM_UDELAY(1); 3834 } 3835 if (i < rdev->usec_timeout) { --- 72 unchanged lines hidden (view full) --- 3908 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | 3909 EOP_TC_ACTION_EN | 3910 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 3911 EVENT_INDEX(5))); 3912 radeon_ring_write(ring, addr & 0xfffffffc); 3913 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2)); 3914 radeon_ring_write(ring, fence->seq); 3915 radeon_ring_write(ring, 0); |
3668 /* HDP flush */ 3669 cik_hdp_flush_cp_ring_emit(rdev, fence->ring); | |
3670} 3671 3672/** 3673 * cik_fence_compute_ring_emit - emit a fence on the compute ring 3674 * 3675 * @rdev: radeon_device pointer 3676 * @fence: radeon fence object 3677 * --- 12 unchanged lines hidden (view full) --- 3690 EOP_TC_ACTION_EN | 3691 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 3692 EVENT_INDEX(5))); 3693 radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2)); 3694 radeon_ring_write(ring, addr & 0xfffffffc); 3695 radeon_ring_write(ring, upper_32_bits(addr)); 3696 radeon_ring_write(ring, fence->seq); 3697 radeon_ring_write(ring, 0); | 3916} 3917 3918/** 3919 * cik_fence_compute_ring_emit - emit a fence on the compute ring 3920 * 3921 * @rdev: radeon_device pointer 3922 * @fence: radeon fence object 3923 * --- 12 unchanged lines hidden (view full) --- 3936 EOP_TC_ACTION_EN | 3937 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 3938 EVENT_INDEX(5))); 3939 radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2)); 3940 radeon_ring_write(ring, addr & 0xfffffffc); 3941 radeon_ring_write(ring, upper_32_bits(addr)); 3942 radeon_ring_write(ring, fence->seq); 3943 radeon_ring_write(ring, 0); |
3698 /* HDP flush */ 3699 cik_hdp_flush_cp_ring_emit(rdev, fence->ring); | |
3700} 3701 | 3944} 3945 |
3946/** 3947 * cik_semaphore_ring_emit - emit a semaphore on the CP ring 3948 * 3949 * @rdev: radeon_device pointer 3950 * @ring: radeon ring buffer object 3951 * @semaphore: radeon semaphore object 3952 * @emit_wait: Is this a sempahore wait? 3953 * 3954 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP 3955 * from running ahead of semaphore waits. 3956 */ |
|
3702bool cik_semaphore_ring_emit(struct radeon_device *rdev, 3703 struct radeon_ring *ring, 3704 struct radeon_semaphore *semaphore, 3705 bool emit_wait) 3706{ 3707 uint64_t addr = semaphore->gpu_addr; 3708 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 3709 3710 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 3711 radeon_ring_write(ring, lower_32_bits(addr)); 3712 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 3713 | 3957bool cik_semaphore_ring_emit(struct radeon_device *rdev, 3958 struct radeon_ring *ring, 3959 struct radeon_semaphore *semaphore, 3960 bool emit_wait) 3961{ 3962 uint64_t addr = semaphore->gpu_addr; 3963 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 3964 3965 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 3966 radeon_ring_write(ring, lower_32_bits(addr)); 3967 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 3968 |
3969 if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) { 3970 /* Prevent the PFP from running ahead of the semaphore wait */ 3971 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 3972 radeon_ring_write(ring, 0x0); 3973 } 3974 |
|
3714 return true; 3715} 3716 3717/** 3718 * cik_copy_cpdma - copy pages using the CP DMA engine 3719 * 3720 * @rdev: radeon_device pointer 3721 * @src_offset: src GPU address 3722 * @dst_offset: dst GPU address 3723 * @num_gpu_pages: number of GPU pages to xfer | 3975 return true; 3976} 3977 3978/** 3979 * cik_copy_cpdma - copy pages using the CP DMA engine 3980 * 3981 * @rdev: radeon_device pointer 3982 * @src_offset: src GPU address 3983 * @dst_offset: dst GPU address 3984 * @num_gpu_pages: number of GPU pages to xfer |
3724 * @fence: radeon fence object | 3985 * @resv: reservation object to sync to |
3725 * 3726 * Copy GPU paging using the CP DMA engine (CIK+). 3727 * Used by the radeon ttm implementation to move pages if 3728 * registered as the asic copy callback. 3729 */ | 3986 * 3987 * Copy GPU paging using the CP DMA engine (CIK+). 3988 * Used by the radeon ttm implementation to move pages if 3989 * registered as the asic copy callback. 3990 */ |
3730int cik_copy_cpdma(struct radeon_device *rdev, 3731 uint64_t src_offset, uint64_t dst_offset, 3732 unsigned num_gpu_pages, 3733 struct radeon_fence **fence) | 3991struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, 3992 uint64_t src_offset, uint64_t dst_offset, 3993 unsigned num_gpu_pages, 3994 struct reservation_object *resv) |
3734{ 3735 struct radeon_semaphore *sem = NULL; | 3995{ 3996 struct radeon_semaphore *sem = NULL; |
3997 struct radeon_fence *fence; |
|
3736 int ring_index = rdev->asic->copy.blit_ring_index; 3737 struct radeon_ring *ring = &rdev->ring[ring_index]; 3738 u32 size_in_bytes, cur_size_in_bytes, control; 3739 int i, num_loops; 3740 int r = 0; 3741 3742 r = radeon_semaphore_create(rdev, &sem); 3743 if (r) { 3744 DRM_ERROR("radeon: moving bo (%d).\n", r); | 3998 int ring_index = rdev->asic->copy.blit_ring_index; 3999 struct radeon_ring *ring = &rdev->ring[ring_index]; 4000 u32 size_in_bytes, cur_size_in_bytes, control; 4001 int i, num_loops; 4002 int r = 0; 4003 4004 r = radeon_semaphore_create(rdev, &sem); 4005 if (r) { 4006 DRM_ERROR("radeon: moving bo (%d).\n", r); |
3745 return r; | 4007 return ERR_PTR(r); |
3746 } 3747 3748 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 3749 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 3750 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18); 3751 if (r) { 3752 DRM_ERROR("radeon: moving bo (%d).\n", r); 3753 radeon_semaphore_free(rdev, &sem, NULL); | 4008 } 4009 4010 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 4011 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 4012 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18); 4013 if (r) { 4014 DRM_ERROR("radeon: moving bo (%d).\n", r); 4015 radeon_semaphore_free(rdev, &sem, NULL); |
3754 return r; | 4016 return ERR_PTR(r); |
3755 } 3756 | 4017 } 4018 |
3757 radeon_semaphore_sync_to(sem, *fence); | 4019 radeon_semaphore_sync_resv(rdev, sem, resv, false); |
3758 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 3759 3760 for (i = 0; i < num_loops; i++) { 3761 cur_size_in_bytes = size_in_bytes; 3762 if (cur_size_in_bytes > 0x1fffff) 3763 cur_size_in_bytes = 0x1fffff; 3764 size_in_bytes -= cur_size_in_bytes; 3765 control = 0; --- 5 unchanged lines hidden (view full) --- 3771 radeon_ring_write(ring, upper_32_bits(src_offset)); 3772 radeon_ring_write(ring, lower_32_bits(dst_offset)); 3773 radeon_ring_write(ring, upper_32_bits(dst_offset)); 3774 radeon_ring_write(ring, cur_size_in_bytes); 3775 src_offset += cur_size_in_bytes; 3776 dst_offset += cur_size_in_bytes; 3777 } 3778 | 4020 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 4021 4022 for (i = 0; i < num_loops; i++) { 4023 cur_size_in_bytes = size_in_bytes; 4024 if (cur_size_in_bytes > 0x1fffff) 4025 cur_size_in_bytes = 0x1fffff; 4026 size_in_bytes -= cur_size_in_bytes; 4027 control = 0; --- 5 unchanged lines hidden (view full) --- 4033 radeon_ring_write(ring, upper_32_bits(src_offset)); 4034 radeon_ring_write(ring, lower_32_bits(dst_offset)); 4035 radeon_ring_write(ring, upper_32_bits(dst_offset)); 4036 radeon_ring_write(ring, cur_size_in_bytes); 4037 src_offset += cur_size_in_bytes; 4038 dst_offset += cur_size_in_bytes; 4039 } 4040 |
3779 r = radeon_fence_emit(rdev, fence, ring->idx); | 4041 r = radeon_fence_emit(rdev, &fence, ring->idx); |
3780 if (r) { 3781 radeon_ring_unlock_undo(rdev, ring); 3782 radeon_semaphore_free(rdev, &sem, NULL); | 4042 if (r) { 4043 radeon_ring_unlock_undo(rdev, ring); 4044 radeon_semaphore_free(rdev, &sem, NULL); |
3783 return r; | 4045 return ERR_PTR(r); |
3784 } 3785 | 4046 } 4047 |
3786 radeon_ring_unlock_commit(rdev, ring); 3787 radeon_semaphore_free(rdev, &sem, *fence); | 4048 radeon_ring_unlock_commit(rdev, ring, false); 4049 radeon_semaphore_free(rdev, &sem, fence); |
3788 | 4050 |
3789 return r; | 4051 return fence; |
3790} 3791 3792/* 3793 * IB stuff 3794 */ 3795/** 3796 * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring 3797 * --- 79 unchanged lines hidden (view full) --- 3877 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3878 radeon_scratch_free(rdev, scratch); 3879 return r; 3880 } 3881 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 3882 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2); 3883 ib.ptr[2] = 0xDEADBEEF; 3884 ib.length_dw = 3; | 4052} 4053 4054/* 4055 * IB stuff 4056 */ 4057/** 4058 * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring 4059 * --- 79 unchanged lines hidden (view full) --- 4139 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 4140 radeon_scratch_free(rdev, scratch); 4141 return r; 4142 } 4143 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 4144 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2); 4145 ib.ptr[2] = 0xDEADBEEF; 4146 ib.length_dw = 3; |
3885 r = radeon_ib_schedule(rdev, &ib, NULL); | 4147 r = radeon_ib_schedule(rdev, &ib, NULL, false); |
3886 if (r) { 3887 radeon_scratch_free(rdev, scratch); 3888 radeon_ib_free(rdev, &ib); 3889 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3890 return r; 3891 } 3892 r = radeon_fence_wait(ib.fence, false); 3893 if (r) { --- 69 unchanged lines hidden (view full) --- 3963 * 3964 * @rdev: radeon_device pointer 3965 * 3966 * Loads the gfx PFP, ME, and CE ucode. 3967 * Returns 0 for success, -EINVAL if the ucode is not available. 3968 */ 3969static int cik_cp_gfx_load_microcode(struct radeon_device *rdev) 3970{ | 4148 if (r) { 4149 radeon_scratch_free(rdev, scratch); 4150 radeon_ib_free(rdev, &ib); 4151 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 4152 return r; 4153 } 4154 r = radeon_fence_wait(ib.fence, false); 4155 if (r) { --- 69 unchanged lines hidden (view full) --- 4225 * 4226 * @rdev: radeon_device pointer 4227 * 4228 * Loads the gfx PFP, ME, and CE ucode. 4229 * Returns 0 for success, -EINVAL if the ucode is not available. 4230 */ 4231static int cik_cp_gfx_load_microcode(struct radeon_device *rdev) 4232{ |
3971 const __be32 *fw_data; | |
3972 int i; 3973 3974 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw) 3975 return -EINVAL; 3976 3977 cik_cp_gfx_enable(rdev, false); 3978 | 4233 int i; 4234 4235 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw) 4236 return -EINVAL; 4237 4238 cik_cp_gfx_enable(rdev, false); 4239 |
3979 /* PFP */ 3980 fw_data = (const __be32 *)rdev->pfp_fw->data; 3981 WREG32(CP_PFP_UCODE_ADDR, 0); 3982 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++) 3983 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 3984 WREG32(CP_PFP_UCODE_ADDR, 0); | 4240 if (rdev->new_fw) { 4241 const struct gfx_firmware_header_v1_0 *pfp_hdr = 4242 (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data; 4243 const struct gfx_firmware_header_v1_0 *ce_hdr = 4244 (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data; 4245 const struct gfx_firmware_header_v1_0 *me_hdr = 4246 (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data; 4247 const __le32 *fw_data; 4248 u32 fw_size; |
3985 | 4249 |
3986 /* CE */ 3987 fw_data = (const __be32 *)rdev->ce_fw->data; 3988 WREG32(CP_CE_UCODE_ADDR, 0); 3989 for (i = 0; i < CIK_CE_UCODE_SIZE; i++) 3990 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); 3991 WREG32(CP_CE_UCODE_ADDR, 0); | 4250 radeon_ucode_print_gfx_hdr(&pfp_hdr->header); 4251 radeon_ucode_print_gfx_hdr(&ce_hdr->header); 4252 radeon_ucode_print_gfx_hdr(&me_hdr->header); |
3992 | 4253 |
3993 /* ME */ 3994 fw_data = (const __be32 *)rdev->me_fw->data; 3995 WREG32(CP_ME_RAM_WADDR, 0); 3996 for (i = 0; i < CIK_ME_UCODE_SIZE; i++) 3997 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 3998 WREG32(CP_ME_RAM_WADDR, 0); | 4254 /* PFP */ 4255 fw_data = (const __le32 *) 4256 (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 4257 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; 4258 WREG32(CP_PFP_UCODE_ADDR, 0); 4259 for (i = 0; i < fw_size; i++) 4260 WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); 4261 WREG32(CP_PFP_UCODE_ADDR, le32_to_cpu(pfp_hdr->header.ucode_version)); |
3999 | 4262 |
4000 WREG32(CP_PFP_UCODE_ADDR, 0); 4001 WREG32(CP_CE_UCODE_ADDR, 0); 4002 WREG32(CP_ME_RAM_WADDR, 0); 4003 WREG32(CP_ME_RAM_RADDR, 0); | 4263 /* CE */ 4264 fw_data = (const __le32 *) 4265 (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); 4266 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; 4267 WREG32(CP_CE_UCODE_ADDR, 0); 4268 for (i = 0; i < fw_size; i++) 4269 WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); 4270 WREG32(CP_CE_UCODE_ADDR, le32_to_cpu(ce_hdr->header.ucode_version)); 4271 4272 /* ME */ 4273 fw_data = (const __be32 *) 4274 (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 4275 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; 4276 WREG32(CP_ME_RAM_WADDR, 0); 4277 for (i = 0; i < fw_size; i++) 4278 WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++)); 4279 WREG32(CP_ME_RAM_WADDR, le32_to_cpu(me_hdr->header.ucode_version)); 4280 WREG32(CP_ME_RAM_RADDR, le32_to_cpu(me_hdr->header.ucode_version)); 4281 } else { 4282 const __be32 *fw_data; 4283 4284 /* PFP */ 4285 fw_data = (const __be32 *)rdev->pfp_fw->data; 4286 WREG32(CP_PFP_UCODE_ADDR, 0); 4287 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++) 4288 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 4289 WREG32(CP_PFP_UCODE_ADDR, 0); 4290 4291 /* CE */ 4292 fw_data = (const __be32 *)rdev->ce_fw->data; 4293 WREG32(CP_CE_UCODE_ADDR, 0); 4294 for (i = 0; i < CIK_CE_UCODE_SIZE; i++) 4295 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); 4296 WREG32(CP_CE_UCODE_ADDR, 0); 4297 4298 /* ME */ 4299 fw_data = (const __be32 *)rdev->me_fw->data; 4300 WREG32(CP_ME_RAM_WADDR, 0); 4301 for (i = 0; i < CIK_ME_UCODE_SIZE; i++) 4302 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 4303 WREG32(CP_ME_RAM_WADDR, 0); 4304 } 4305 |
4004 return 0; 4005} 4006 4007/** 4008 * cik_cp_gfx_start - start the gfx ring 4009 * 4010 * @rdev: radeon_device pointer 4011 * --- 43 unchanged lines hidden (view full) --- 4055 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 4056 radeon_ring_write(ring, 0); 4057 4058 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 4059 radeon_ring_write(ring, 0x00000316); 4060 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 4061 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 4062 | 4306 return 0; 4307} 4308 4309/** 4310 * cik_cp_gfx_start - start the gfx ring 4311 * 4312 * @rdev: radeon_device pointer 4313 * --- 43 unchanged lines hidden (view full) --- 4357 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 4358 radeon_ring_write(ring, 0); 4359 4360 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 4361 radeon_ring_write(ring, 0x00000316); 4362 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 4363 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 4364 |
4063 radeon_ring_unlock_commit(rdev, ring); | 4365 radeon_ring_unlock_commit(rdev, ring, false); |
4064 4065 return 0; 4066} 4067 4068/** 4069 * cik_cp_gfx_fini - stop the gfx ring 4070 * 4071 * @rdev: radeon_device pointer --- 183 unchanged lines hidden (view full) --- 4255 * 4256 * @rdev: radeon_device pointer 4257 * 4258 * Loads the compute MEC1&2 ucode. 4259 * Returns 0 for success, -EINVAL if the ucode is not available. 4260 */ 4261static int cik_cp_compute_load_microcode(struct radeon_device *rdev) 4262{ | 4366 4367 return 0; 4368} 4369 4370/** 4371 * cik_cp_gfx_fini - stop the gfx ring 4372 * 4373 * @rdev: radeon_device pointer --- 183 unchanged lines hidden (view full) --- 4557 * 4558 * @rdev: radeon_device pointer 4559 * 4560 * Loads the compute MEC1&2 ucode. 4561 * Returns 0 for success, -EINVAL if the ucode is not available. 4562 */ 4563static int cik_cp_compute_load_microcode(struct radeon_device *rdev) 4564{ |
4263 const __be32 *fw_data; | |
4264 int i; 4265 4266 if (!rdev->mec_fw) 4267 return -EINVAL; 4268 4269 cik_cp_compute_enable(rdev, false); 4270 | 4565 int i; 4566 4567 if (!rdev->mec_fw) 4568 return -EINVAL; 4569 4570 cik_cp_compute_enable(rdev, false); 4571 |
4271 /* MEC1 */ 4272 fw_data = (const __be32 *)rdev->mec_fw->data; 4273 WREG32(CP_MEC_ME1_UCODE_ADDR, 0); 4274 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++) 4275 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++)); 4276 WREG32(CP_MEC_ME1_UCODE_ADDR, 0); | 4572 if (rdev->new_fw) { 4573 const struct gfx_firmware_header_v1_0 *mec_hdr = 4574 (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data; 4575 const __le32 *fw_data; 4576 u32 fw_size; |
4277 | 4577 |
4278 if (rdev->family == CHIP_KAVERI) { | 4578 radeon_ucode_print_gfx_hdr(&mec_hdr->header); 4579 4580 /* MEC1 */ 4581 fw_data = (const __le32 *) 4582 (rdev->mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 4583 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; 4584 WREG32(CP_MEC_ME1_UCODE_ADDR, 0); 4585 for (i = 0; i < fw_size; i++) 4586 WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++)); 4587 WREG32(CP_MEC_ME1_UCODE_ADDR, le32_to_cpu(mec_hdr->header.ucode_version)); 4588 |
4279 /* MEC2 */ | 4589 /* MEC2 */ |
4590 if (rdev->family == CHIP_KAVERI) { 4591 const struct gfx_firmware_header_v1_0 *mec2_hdr = 4592 (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data; 4593 4594 fw_data = (const __le32 *) 4595 (rdev->mec2_fw->data + 4596 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes)); 4597 fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4; 4598 WREG32(CP_MEC_ME2_UCODE_ADDR, 0); 4599 for (i = 0; i < fw_size; i++) 4600 WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++)); 4601 WREG32(CP_MEC_ME2_UCODE_ADDR, le32_to_cpu(mec2_hdr->header.ucode_version)); 4602 } 4603 } else { 4604 const __be32 *fw_data; 4605 4606 /* MEC1 */ |
|
4280 fw_data = (const __be32 *)rdev->mec_fw->data; | 4607 fw_data = (const __be32 *)rdev->mec_fw->data; |
4281 WREG32(CP_MEC_ME2_UCODE_ADDR, 0); | 4608 WREG32(CP_MEC_ME1_UCODE_ADDR, 0); |
4282 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++) | 4609 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++) |
4283 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++)); 4284 WREG32(CP_MEC_ME2_UCODE_ADDR, 0); | 4610 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++)); 4611 WREG32(CP_MEC_ME1_UCODE_ADDR, 0); 4612 4613 if (rdev->family == CHIP_KAVERI) { 4614 /* MEC2 */ 4615 fw_data = (const __be32 *)rdev->mec_fw->data; 4616 WREG32(CP_MEC_ME2_UCODE_ADDR, 0); 4617 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++) 4618 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++)); 4619 WREG32(CP_MEC_ME2_UCODE_ADDR, 0); 4620 } |
4285 } 4286 4287 return 0; 4288} 4289 4290/** 4291 * cik_cp_compute_start - start the compute queues 4292 * --- 64 unchanged lines hidden (view full) --- 4357static int cik_mec_init(struct radeon_device *rdev) 4358{ 4359 int r; 4360 u32 *hpd; 4361 4362 /* 4363 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total 4364 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total | 4621 } 4622 4623 return 0; 4624} 4625 4626/** 4627 * cik_cp_compute_start - start the compute queues 4628 * --- 64 unchanged lines hidden (view full) --- 4693static int cik_mec_init(struct radeon_device *rdev) 4694{ 4695 int r; 4696 u32 *hpd; 4697 4698 /* 4699 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total 4700 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total |
4701 * Nonetheless, we assign only 1 pipe because all other pipes will 4702 * be handled by KFD |
|
4365 */ | 4703 */ |
4366 if (rdev->family == CHIP_KAVERI) 4367 rdev->mec.num_mec = 2; 4368 else 4369 rdev->mec.num_mec = 1; 4370 rdev->mec.num_pipe = 4; | 4704 rdev->mec.num_mec = 1; 4705 rdev->mec.num_pipe = 1; |
4371 rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8; 4372 4373 if (rdev->mec.hpd_eop_obj == NULL) { 4374 r = radeon_bo_create(rdev, 4375 rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2, 4376 PAGE_SIZE, true, | 4706 rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8; 4707 4708 if (rdev->mec.hpd_eop_obj == NULL) { 4709 r = radeon_bo_create(rdev, 4710 rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2, 4711 PAGE_SIZE, true, |
4377 RADEON_GEM_DOMAIN_GTT, NULL, | 4712 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, |
4378 &rdev->mec.hpd_eop_obj); 4379 if (r) { 4380 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r); 4381 return r; 4382 } 4383 } 4384 4385 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false); --- 97 unchanged lines hidden (view full) --- 4483 * @rdev: radeon_device pointer 4484 * 4485 * Program the compute queues and test them to make sure they 4486 * are working. 4487 * Returns 0 for success, error for failure. 4488 */ 4489static int cik_cp_compute_resume(struct radeon_device *rdev) 4490{ | 4713 &rdev->mec.hpd_eop_obj); 4714 if (r) { 4715 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r); 4716 return r; 4717 } 4718 } 4719 4720 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false); --- 97 unchanged lines hidden (view full) --- 4818 * @rdev: radeon_device pointer 4819 * 4820 * Program the compute queues and test them to make sure they 4821 * are working. 4822 * Returns 0 for success, error for failure. 4823 */ 4824static int cik_cp_compute_resume(struct radeon_device *rdev) 4825{ |
4491 int r, i, idx; | 4826 int r, i, j, idx; |
4492 u32 tmp; 4493 bool use_doorbell = true; 4494 u64 hqd_gpu_addr; 4495 u64 mqd_gpu_addr; 4496 u64 eop_gpu_addr; 4497 u64 wb_gpu_addr; 4498 u32 *buf; 4499 struct bonaire_mqd *mqd; --- 4 unchanged lines hidden (view full) --- 4504 4505 /* fix up chicken bits */ 4506 tmp = RREG32(CP_CPF_DEBUG); 4507 tmp |= (1 << 23); 4508 WREG32(CP_CPF_DEBUG, tmp); 4509 4510 /* init the pipes */ 4511 mutex_lock(&rdev->srbm_mutex); | 4827 u32 tmp; 4828 bool use_doorbell = true; 4829 u64 hqd_gpu_addr; 4830 u64 mqd_gpu_addr; 4831 u64 eop_gpu_addr; 4832 u64 wb_gpu_addr; 4833 u32 *buf; 4834 struct bonaire_mqd *mqd; --- 4 unchanged lines hidden (view full) --- 4839 4840 /* fix up chicken bits */ 4841 tmp = RREG32(CP_CPF_DEBUG); 4842 tmp |= (1 << 23); 4843 WREG32(CP_CPF_DEBUG, tmp); 4844 4845 /* init the pipes */ 4846 mutex_lock(&rdev->srbm_mutex); |
4512 for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { 4513 int me = (i < 4) ? 1 : 2; 4514 int pipe = (i < 4) ? i : (i - 4); | |
4515 | 4847 |
4516 eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2); | 4848 eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr; |
4517 | 4849 |
4518 cik_srbm_select(rdev, me, pipe, 0, 0); | 4850 cik_srbm_select(rdev, 0, 0, 0, 0); |
4519 | 4851 |
4520 /* write the EOP addr */ 4521 WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8); 4522 WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8); | 4852 /* write the EOP addr */ 4853 WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8); 4854 WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8); |
4523 | 4855 |
4524 /* set the VMID assigned */ 4525 WREG32(CP_HPD_EOP_VMID, 0); | 4856 /* set the VMID assigned */ 4857 WREG32(CP_HPD_EOP_VMID, 0); |
4526 | 4858 |
4527 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4528 tmp = RREG32(CP_HPD_EOP_CONTROL); 4529 tmp &= ~EOP_SIZE_MASK; 4530 tmp |= order_base_2(MEC_HPD_SIZE / 8); 4531 WREG32(CP_HPD_EOP_CONTROL, tmp); 4532 } 4533 cik_srbm_select(rdev, 0, 0, 0, 0); | 4859 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4860 tmp = RREG32(CP_HPD_EOP_CONTROL); 4861 tmp &= ~EOP_SIZE_MASK; 4862 tmp |= order_base_2(MEC_HPD_SIZE / 8); 4863 WREG32(CP_HPD_EOP_CONTROL, tmp); 4864 |
4534 mutex_unlock(&rdev->srbm_mutex); 4535 4536 /* init the queues. Just two for now. */ 4537 for (i = 0; i < 2; i++) { 4538 if (i == 0) 4539 idx = CAYMAN_RING_TYPE_CP1_INDEX; 4540 else 4541 idx = CAYMAN_RING_TYPE_CP2_INDEX; 4542 4543 if (rdev->ring[idx].mqd_obj == NULL) { 4544 r = radeon_bo_create(rdev, 4545 sizeof(struct bonaire_mqd), 4546 PAGE_SIZE, true, | 4865 mutex_unlock(&rdev->srbm_mutex); 4866 4867 /* init the queues. Just two for now. */ 4868 for (i = 0; i < 2; i++) { 4869 if (i == 0) 4870 idx = CAYMAN_RING_TYPE_CP1_INDEX; 4871 else 4872 idx = CAYMAN_RING_TYPE_CP2_INDEX; 4873 4874 if (rdev->ring[idx].mqd_obj == NULL) { 4875 r = radeon_bo_create(rdev, 4876 sizeof(struct bonaire_mqd), 4877 PAGE_SIZE, true, |
4547 RADEON_GEM_DOMAIN_GTT, NULL, 4548 &rdev->ring[idx].mqd_obj); | 4878 RADEON_GEM_DOMAIN_GTT, 0, NULL, 4879 NULL, &rdev->ring[idx].mqd_obj); |
4549 if (r) { 4550 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r); 4551 return r; 4552 } 4553 } 4554 4555 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false); 4556 if (unlikely(r != 0)) { --- 45 unchanged lines hidden (view full) --- 4602 mqd->queue_state.cp_hqd_pq_doorbell_control); 4603 4604 /* disable the queue if it's active */ 4605 mqd->queue_state.cp_hqd_dequeue_request = 0; 4606 mqd->queue_state.cp_hqd_pq_rptr = 0; 4607 mqd->queue_state.cp_hqd_pq_wptr= 0; 4608 if (RREG32(CP_HQD_ACTIVE) & 1) { 4609 WREG32(CP_HQD_DEQUEUE_REQUEST, 1); | 4880 if (r) { 4881 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r); 4882 return r; 4883 } 4884 } 4885 4886 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false); 4887 if (unlikely(r != 0)) { --- 45 unchanged lines hidden (view full) --- 4933 mqd->queue_state.cp_hqd_pq_doorbell_control); 4934 4935 /* disable the queue if it's active */ 4936 mqd->queue_state.cp_hqd_dequeue_request = 0; 4937 mqd->queue_state.cp_hqd_pq_rptr = 0; 4938 mqd->queue_state.cp_hqd_pq_wptr= 0; 4939 if (RREG32(CP_HQD_ACTIVE) & 1) { 4940 WREG32(CP_HQD_DEQUEUE_REQUEST, 1); |
4610 for (i = 0; i < rdev->usec_timeout; i++) { | 4941 for (j = 0; j < rdev->usec_timeout; j++) { |
4611 if (!(RREG32(CP_HQD_ACTIVE) & 1)) 4612 break; 4613 udelay(1); 4614 } 4615 WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request); 4616 WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr); 4617 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr); 4618 } --- 777 unchanged lines hidden (view full) --- 5396 5397 if (rdev->gart.robj == NULL) { 5398 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 5399 return -EINVAL; 5400 } 5401 r = radeon_gart_table_vram_pin(rdev); 5402 if (r) 5403 return r; | 4942 if (!(RREG32(CP_HQD_ACTIVE) & 1)) 4943 break; 4944 udelay(1); 4945 } 4946 WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request); 4947 WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr); 4948 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr); 4949 } --- 777 unchanged lines hidden (view full) --- 5727 5728 if (rdev->gart.robj == NULL) { 5729 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 5730 return -EINVAL; 5731 } 5732 r = radeon_gart_table_vram_pin(rdev); 5733 if (r) 5734 return r; |
5404 radeon_gart_restore(rdev); | |
5405 /* Setup TLB control */ 5406 WREG32(MC_VM_MX_L1_TLB_CNTL, 5407 (0xA << 7) | 5408 ENABLE_L1_TLB | 5409 ENABLE_L1_FRAGMENT_PROCESSING | 5410 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 5411 ENABLE_ADVANCED_DRIVER_MODEL | 5412 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); --- 17 unchanged lines hidden (view full) --- 5430 WREG32(VM_CONTEXT0_CNTL2, 0); 5431 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 5432 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT)); 5433 5434 WREG32(0x15D4, 0); 5435 WREG32(0x15D8, 0); 5436 WREG32(0x15DC, 0); 5437 | 5735 /* Setup TLB control */ 5736 WREG32(MC_VM_MX_L1_TLB_CNTL, 5737 (0xA << 7) | 5738 ENABLE_L1_TLB | 5739 ENABLE_L1_FRAGMENT_PROCESSING | 5740 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 5741 ENABLE_ADVANCED_DRIVER_MODEL | 5742 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); --- 17 unchanged lines hidden (view full) --- 5760 WREG32(VM_CONTEXT0_CNTL2, 0); 5761 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 5762 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT)); 5763 5764 WREG32(0x15D4, 0); 5765 WREG32(0x15D8, 0); 5766 WREG32(0x15DC, 0); 5767 |
5438 /* empty context1-15 */ 5439 /* FIXME start with 4G, once using 2 level pt switch to full 5440 * vm size space 5441 */ | 5768 /* restore context1-15 */ |
5442 /* set vm size, must be a multiple of 4 */ 5443 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 5444 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); 5445 for (i = 1; i < 16; i++) { 5446 if (i < 8) 5447 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), | 5769 /* set vm size, must be a multiple of 4 */ 5770 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 5771 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); 5772 for (i = 1; i < 16; i++) { 5773 if (i < 8) 5774 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), |
5448 rdev->gart.table_addr >> 12); | 5775 rdev->vm_manager.saved_table_addr[i]); |
5449 else 5450 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), | 5776 else 5777 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), |
5451 rdev->gart.table_addr >> 12); | 5778 rdev->vm_manager.saved_table_addr[i]); |
5452 } 5453 5454 /* enable context1-15 */ 5455 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 5456 (u32)(rdev->dummy_page.addr >> 12)); 5457 WREG32(VM_CONTEXT1_CNTL2, 4); 5458 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 5459 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) | --- 48 unchanged lines hidden (view full) --- 5508 * cik_pcie_gart_disable - gart disable 5509 * 5510 * @rdev: radeon_device pointer 5511 * 5512 * This disables all VM page table (CIK). 5513 */ 5514static void cik_pcie_gart_disable(struct radeon_device *rdev) 5515{ | 5779 } 5780 5781 /* enable context1-15 */ 5782 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 5783 (u32)(rdev->dummy_page.addr >> 12)); 5784 WREG32(VM_CONTEXT1_CNTL2, 4); 5785 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 5786 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) | --- 48 unchanged lines hidden (view full) --- 5835 * cik_pcie_gart_disable - gart disable 5836 * 5837 * @rdev: radeon_device pointer 5838 * 5839 * This disables all VM page table (CIK). 5840 */ 5841static void cik_pcie_gart_disable(struct radeon_device *rdev) 5842{ |
5843 unsigned i; 5844 5845 for (i = 1; i < 16; ++i) { 5846 uint32_t reg; 5847 if (i < 8) 5848 reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2); 5849 else 5850 reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2); 5851 rdev->vm_manager.saved_table_addr[i] = RREG32(reg); 5852 } 5853 |
|
5516 /* Disable all tables */ 5517 WREG32(VM_CONTEXT0_CNTL, 0); 5518 WREG32(VM_CONTEXT1_CNTL, 0); 5519 /* Setup TLB control */ 5520 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS | 5521 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 5522 /* Setup L2 cache */ 5523 WREG32(VM_L2_CNTL, --- 48 unchanged lines hidden (view full) --- 5572 * @rdev: radeon_device pointer 5573 * 5574 * Inits cik specific vm parameters (number of VMs, base of vram for 5575 * VMIDs 1-15) (CIK). 5576 * Returns 0 for success. 5577 */ 5578int cik_vm_init(struct radeon_device *rdev) 5579{ | 5854 /* Disable all tables */ 5855 WREG32(VM_CONTEXT0_CNTL, 0); 5856 WREG32(VM_CONTEXT1_CNTL, 0); 5857 /* Setup TLB control */ 5858 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS | 5859 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 5860 /* Setup L2 cache */ 5861 WREG32(VM_L2_CNTL, --- 48 unchanged lines hidden (view full) --- 5910 * @rdev: radeon_device pointer 5911 * 5912 * Inits cik specific vm parameters (number of VMs, base of vram for 5913 * VMIDs 1-15) (CIK). 5914 * Returns 0 for success. 5915 */ 5916int cik_vm_init(struct radeon_device *rdev) 5917{ |
5580 /* number of VMs */ 5581 rdev->vm_manager.nvm = 16; | 5918 /* 5919 * number of VMs 5920 * VMID 0 is reserved for System 5921 * radeon graphics/compute will use VMIDs 1-7 5922 * amdkfd will use VMIDs 8-15 5923 */ 5924 rdev->vm_manager.nvm = RADEON_NUM_OF_VMIDS; |
5582 /* base offset of vram pages */ 5583 if (rdev->flags & RADEON_IS_IGP) { 5584 u64 tmp = RREG32(MC_VM_FB_OFFSET); 5585 tmp <<= 22; 5586 rdev->vm_manager.vram_base_offset = tmp; 5587 } else 5588 rdev->vm_manager.vram_base_offset = 0; 5589 --- 46 unchanged lines hidden (view full) --- 5636 * @rdev: radeon_device pointer 5637 * 5638 * Update the page table base and flush the VM TLB 5639 * using the CP (CIK). 5640 */ 5641void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 5642{ 5643 struct radeon_ring *ring = &rdev->ring[ridx]; | 5925 /* base offset of vram pages */ 5926 if (rdev->flags & RADEON_IS_IGP) { 5927 u64 tmp = RREG32(MC_VM_FB_OFFSET); 5928 tmp <<= 22; 5929 rdev->vm_manager.vram_base_offset = tmp; 5930 } else 5931 rdev->vm_manager.vram_base_offset = 0; 5932 --- 46 unchanged lines hidden (view full) --- 5979 * @rdev: radeon_device pointer 5980 * 5981 * Update the page table base and flush the VM TLB 5982 * using the CP (CIK). 5983 */ 5984void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 5985{ 5986 struct radeon_ring *ring = &rdev->ring[ridx]; |
5987 int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX); |
|
5644 5645 if (vm == NULL) 5646 return; 5647 5648 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 5988 5989 if (vm == NULL) 5990 return; 5991 5992 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
5649 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 5993 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
5650 WRITE_DATA_DST_SEL(0))); 5651 if (vm->id < 8) { 5652 radeon_ring_write(ring, 5653 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); 5654 } else { 5655 radeon_ring_write(ring, 5656 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); 5657 } 5658 radeon_ring_write(ring, 0); 5659 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 5660 5661 /* update SH_MEM_* regs */ 5662 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 5994 WRITE_DATA_DST_SEL(0))); 5995 if (vm->id < 8) { 5996 radeon_ring_write(ring, 5997 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); 5998 } else { 5999 radeon_ring_write(ring, 6000 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); 6001 } 6002 radeon_ring_write(ring, 0); 6003 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 6004 6005 /* update SH_MEM_* regs */ 6006 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
5663 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 6007 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
5664 WRITE_DATA_DST_SEL(0))); 5665 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 5666 radeon_ring_write(ring, 0); 5667 radeon_ring_write(ring, VMID(vm->id)); 5668 5669 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); | 6008 WRITE_DATA_DST_SEL(0))); 6009 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 6010 radeon_ring_write(ring, 0); 6011 radeon_ring_write(ring, VMID(vm->id)); 6012 6013 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); |
5670 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 6014 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
5671 WRITE_DATA_DST_SEL(0))); 5672 radeon_ring_write(ring, SH_MEM_BASES >> 2); 5673 radeon_ring_write(ring, 0); 5674 5675 radeon_ring_write(ring, 0); /* SH_MEM_BASES */ 5676 radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */ 5677 radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */ 5678 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */ 5679 5680 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 6015 WRITE_DATA_DST_SEL(0))); 6016 radeon_ring_write(ring, SH_MEM_BASES >> 2); 6017 radeon_ring_write(ring, 0); 6018 6019 radeon_ring_write(ring, 0); /* SH_MEM_BASES */ 6020 radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */ 6021 radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */ 6022 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */ 6023 6024 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
5681 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 6025 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
5682 WRITE_DATA_DST_SEL(0))); 5683 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 5684 radeon_ring_write(ring, 0); 5685 radeon_ring_write(ring, VMID(0)); 5686 5687 /* HDP flush */ 5688 cik_hdp_flush_cp_ring_emit(rdev, ridx); 5689 5690 /* bits 0-15 are the VM contexts0-15 */ 5691 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 6026 WRITE_DATA_DST_SEL(0))); 6027 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 6028 radeon_ring_write(ring, 0); 6029 radeon_ring_write(ring, VMID(0)); 6030 6031 /* HDP flush */ 6032 cik_hdp_flush_cp_ring_emit(rdev, ridx); 6033 6034 /* bits 0-15 are the VM contexts0-15 */ 6035 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
5692 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 6036 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
5693 WRITE_DATA_DST_SEL(0))); 5694 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 5695 radeon_ring_write(ring, 0); 5696 radeon_ring_write(ring, 1 << vm->id); 5697 5698 /* compute doesn't have PFP */ | 6037 WRITE_DATA_DST_SEL(0))); 6038 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 6039 radeon_ring_write(ring, 0); 6040 radeon_ring_write(ring, 1 << vm->id); 6041 6042 /* compute doesn't have PFP */ |
5699 if (ridx == RADEON_RING_TYPE_GFX_INDEX) { | 6043 if (usepfp) { |
5700 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 5701 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 5702 radeon_ring_write(ring, 0x0); 5703 } 5704} 5705 5706/* 5707 * RLC --- 25 unchanged lines hidden (view full) --- 5733 WREG32(RLC_LB_CNTL, tmp); 5734} 5735 5736static void cik_wait_for_rlc_serdes(struct radeon_device *rdev) 5737{ 5738 u32 i, j, k; 5739 u32 mask; 5740 | 6044 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 6045 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 6046 radeon_ring_write(ring, 0x0); 6047 } 6048} 6049 6050/* 6051 * RLC --- 25 unchanged lines hidden (view full) --- 6077 WREG32(RLC_LB_CNTL, tmp); 6078} 6079 6080static void cik_wait_for_rlc_serdes(struct radeon_device *rdev) 6081{ 6082 u32 i, j, k; 6083 u32 mask; 6084 |
6085 mutex_lock(&rdev->grbm_idx_mutex); |
|
5741 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { 5742 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { 5743 cik_select_se_sh(rdev, i, j); 5744 for (k = 0; k < rdev->usec_timeout; k++) { 5745 if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0) 5746 break; 5747 udelay(1); 5748 } 5749 } 5750 } 5751 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); | 6086 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { 6087 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { 6088 cik_select_se_sh(rdev, i, j); 6089 for (k = 0; k < rdev->usec_timeout; k++) { 6090 if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0) 6091 break; 6092 udelay(1); 6093 } 6094 } 6095 } 6096 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
6097 mutex_unlock(&rdev->grbm_idx_mutex); |
|
5752 5753 mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY; 5754 for (k = 0; k < rdev->usec_timeout; k++) { 5755 if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) 5756 break; 5757 udelay(1); 5758 } 5759} --- 99 unchanged lines hidden (view full) --- 5859 * 5860 * Initialize the RLC registers, load the ucode, 5861 * and start the RLC (CIK). 5862 * Returns 0 for success, -EINVAL if the ucode is not available. 5863 */ 5864static int cik_rlc_resume(struct radeon_device *rdev) 5865{ 5866 u32 i, size, tmp; | 6098 6099 mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY; 6100 for (k = 0; k < rdev->usec_timeout; k++) { 6101 if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) 6102 break; 6103 udelay(1); 6104 } 6105} --- 99 unchanged lines hidden (view full) --- 6205 * 6206 * Initialize the RLC registers, load the ucode, 6207 * and start the RLC (CIK). 6208 * Returns 0 for success, -EINVAL if the ucode is not available. 6209 */ 6210static int cik_rlc_resume(struct radeon_device *rdev) 6211{ 6212 u32 i, size, tmp; |
5867 const __be32 *fw_data; | |
5868 5869 if (!rdev->rlc_fw) 5870 return -EINVAL; 5871 | 6213 6214 if (!rdev->rlc_fw) 6215 return -EINVAL; 6216 |
5872 switch (rdev->family) { 5873 case CHIP_BONAIRE: 5874 case CHIP_HAWAII: 5875 default: 5876 size = BONAIRE_RLC_UCODE_SIZE; 5877 break; 5878 case CHIP_KAVERI: 5879 size = KV_RLC_UCODE_SIZE; 5880 break; 5881 case CHIP_KABINI: 5882 size = KB_RLC_UCODE_SIZE; 5883 break; 5884 case CHIP_MULLINS: 5885 size = ML_RLC_UCODE_SIZE; 5886 break; 5887 } 5888 | |
5889 cik_rlc_stop(rdev); 5890 5891 /* disable CG */ 5892 tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc; 5893 WREG32(RLC_CGCG_CGLS_CTRL, tmp); 5894 5895 si_rlc_reset(rdev); 5896 5897 cik_init_pg(rdev); 5898 5899 cik_init_cg(rdev); 5900 5901 WREG32(RLC_LB_CNTR_INIT, 0); 5902 WREG32(RLC_LB_CNTR_MAX, 0x00008000); 5903 | 6217 cik_rlc_stop(rdev); 6218 6219 /* disable CG */ 6220 tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc; 6221 WREG32(RLC_CGCG_CGLS_CTRL, tmp); 6222 6223 si_rlc_reset(rdev); 6224 6225 cik_init_pg(rdev); 6226 6227 cik_init_cg(rdev); 6228 6229 WREG32(RLC_LB_CNTR_INIT, 0); 6230 WREG32(RLC_LB_CNTR_MAX, 0x00008000); 6231 |
6232 mutex_lock(&rdev->grbm_idx_mutex); |
|
5904 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 5905 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff); 5906 WREG32(RLC_LB_PARAMS, 0x00600408); 5907 WREG32(RLC_LB_CNTL, 0x80000004); | 6233 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 6234 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff); 6235 WREG32(RLC_LB_PARAMS, 0x00600408); 6236 WREG32(RLC_LB_CNTL, 0x80000004); |
6237 mutex_unlock(&rdev->grbm_idx_mutex); |
|
5908 5909 WREG32(RLC_MC_CNTL, 0); 5910 WREG32(RLC_UCODE_CNTL, 0); 5911 | 6238 6239 WREG32(RLC_MC_CNTL, 0); 6240 WREG32(RLC_UCODE_CNTL, 0); 6241 |
5912 fw_data = (const __be32 *)rdev->rlc_fw->data; | 6242 if (rdev->new_fw) { 6243 const struct rlc_firmware_header_v1_0 *hdr = 6244 (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data; 6245 const __le32 *fw_data = (const __le32 *) 6246 (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 6247 6248 radeon_ucode_print_rlc_hdr(&hdr->header); 6249 6250 size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
5913 WREG32(RLC_GPM_UCODE_ADDR, 0); | 6251 WREG32(RLC_GPM_UCODE_ADDR, 0); |
5914 for (i = 0; i < size; i++) 5915 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++)); 5916 WREG32(RLC_GPM_UCODE_ADDR, 0); | 6252 for (i = 0; i < size; i++) 6253 WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); 6254 WREG32(RLC_GPM_UCODE_ADDR, le32_to_cpu(hdr->header.ucode_version)); 6255 } else { 6256 const __be32 *fw_data; |
5917 | 6257 |
6258 switch (rdev->family) { 6259 case CHIP_BONAIRE: 6260 case CHIP_HAWAII: 6261 default: 6262 size = BONAIRE_RLC_UCODE_SIZE; 6263 break; 6264 case CHIP_KAVERI: 6265 size = KV_RLC_UCODE_SIZE; 6266 break; 6267 case CHIP_KABINI: 6268 size = KB_RLC_UCODE_SIZE; 6269 break; 6270 case CHIP_MULLINS: 6271 size = ML_RLC_UCODE_SIZE; 6272 break; 6273 } 6274 6275 fw_data = (const __be32 *)rdev->rlc_fw->data; 6276 WREG32(RLC_GPM_UCODE_ADDR, 0); 6277 for (i = 0; i < size; i++) 6278 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++)); 6279 WREG32(RLC_GPM_UCODE_ADDR, 0); 6280 } 6281 |
|
5918 /* XXX - find out what chips support lbpw */ 5919 cik_enable_lbpw(rdev, false); 5920 5921 if (rdev->family == CHIP_BONAIRE) 5922 WREG32(RLC_DRIVER_DMA_STATUS, 0); 5923 5924 cik_rlc_start(rdev); 5925 --- 6 unchanged lines hidden (view full) --- 5932 5933 orig = data = RREG32(RLC_CGCG_CGLS_CTRL); 5934 5935 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) { 5936 cik_enable_gui_idle_interrupt(rdev, true); 5937 5938 tmp = cik_halt_rlc(rdev); 5939 | 6282 /* XXX - find out what chips support lbpw */ 6283 cik_enable_lbpw(rdev, false); 6284 6285 if (rdev->family == CHIP_BONAIRE) 6286 WREG32(RLC_DRIVER_DMA_STATUS, 0); 6287 6288 cik_rlc_start(rdev); 6289 --- 6 unchanged lines hidden (view full) --- 6296 6297 orig = data = RREG32(RLC_CGCG_CGLS_CTRL); 6298 6299 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) { 6300 cik_enable_gui_idle_interrupt(rdev, true); 6301 6302 tmp = cik_halt_rlc(rdev); 6303 |
6304 mutex_lock(&rdev->grbm_idx_mutex); |
|
5940 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 5941 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 5942 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 5943 tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE; 5944 WREG32(RLC_SERDES_WR_CTRL, tmp2); | 6305 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 6306 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 6307 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 6308 tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE; 6309 WREG32(RLC_SERDES_WR_CTRL, tmp2); |
6310 mutex_unlock(&rdev->grbm_idx_mutex); |
|
5945 5946 cik_update_rlc(rdev, tmp); 5947 5948 data |= CGCG_EN | CGLS_EN; 5949 } else { 5950 cik_enable_gui_idle_interrupt(rdev, false); 5951 5952 RREG32(CB_CGTT_SCLK_CTRL); --- 25 unchanged lines hidden (view full) --- 5978 5979 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); 5980 data &= 0xfffffffd; 5981 if (orig != data) 5982 WREG32(RLC_CGTT_MGCG_OVERRIDE, data); 5983 5984 tmp = cik_halt_rlc(rdev); 5985 | 6311 6312 cik_update_rlc(rdev, tmp); 6313 6314 data |= CGCG_EN | CGLS_EN; 6315 } else { 6316 cik_enable_gui_idle_interrupt(rdev, false); 6317 6318 RREG32(CB_CGTT_SCLK_CTRL); --- 25 unchanged lines hidden (view full) --- 6344 6345 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); 6346 data &= 0xfffffffd; 6347 if (orig != data) 6348 WREG32(RLC_CGTT_MGCG_OVERRIDE, data); 6349 6350 tmp = cik_halt_rlc(rdev); 6351 |
6352 mutex_lock(&rdev->grbm_idx_mutex); |
|
5986 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 5987 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 5988 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 5989 data = BPM_ADDR_MASK | MGCG_OVERRIDE_0; 5990 WREG32(RLC_SERDES_WR_CTRL, data); | 6353 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 6354 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 6355 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 6356 data = BPM_ADDR_MASK | MGCG_OVERRIDE_0; 6357 WREG32(RLC_SERDES_WR_CTRL, data); |
6358 mutex_unlock(&rdev->grbm_idx_mutex); |
|
5991 5992 cik_update_rlc(rdev, tmp); 5993 5994 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) { 5995 orig = data = RREG32(CGTS_SM_CTRL_REG); 5996 data &= ~SM_MODE_MASK; 5997 data |= SM_MODE(0x2); 5998 data |= SM_MODE_ENABLE; --- 27 unchanged lines hidden (view full) --- 6026 6027 orig = data = RREG32(CGTS_SM_CTRL_REG); 6028 data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE; 6029 if (orig != data) 6030 WREG32(CGTS_SM_CTRL_REG, data); 6031 6032 tmp = cik_halt_rlc(rdev); 6033 | 6359 6360 cik_update_rlc(rdev, tmp); 6361 6362 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) { 6363 orig = data = RREG32(CGTS_SM_CTRL_REG); 6364 data &= ~SM_MODE_MASK; 6365 data |= SM_MODE(0x2); 6366 data |= SM_MODE_ENABLE; --- 27 unchanged lines hidden (view full) --- 6394 6395 orig = data = RREG32(CGTS_SM_CTRL_REG); 6396 data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE; 6397 if (orig != data) 6398 WREG32(CGTS_SM_CTRL_REG, data); 6399 6400 tmp = cik_halt_rlc(rdev); 6401 |
6402 mutex_lock(&rdev->grbm_idx_mutex); |
|
6034 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 6035 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 6036 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 6037 data = BPM_ADDR_MASK | MGCG_OVERRIDE_1; 6038 WREG32(RLC_SERDES_WR_CTRL, data); | 6403 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 6404 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 6405 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 6406 data = BPM_ADDR_MASK | MGCG_OVERRIDE_1; 6407 WREG32(RLC_SERDES_WR_CTRL, data); |
6408 mutex_unlock(&rdev->grbm_idx_mutex); |
|
6039 6040 cik_update_rlc(rdev, tmp); 6041 } 6042} 6043 6044static const u32 mc_cg_registers[] = 6045{ 6046 MC_HUB_MISC_HUB_CG, --- 295 unchanged lines hidden (view full) --- 6342} 6343 6344#define CP_ME_TABLE_SIZE 96 6345#define CP_ME_TABLE_OFFSET 2048 6346#define CP_MEC_TABLE_OFFSET 4096 6347 6348void cik_init_cp_pg_table(struct radeon_device *rdev) 6349{ | 6409 6410 cik_update_rlc(rdev, tmp); 6411 } 6412} 6413 6414static const u32 mc_cg_registers[] = 6415{ 6416 MC_HUB_MISC_HUB_CG, --- 295 unchanged lines hidden (view full) --- 6712} 6713 6714#define CP_ME_TABLE_SIZE 96 6715#define CP_ME_TABLE_OFFSET 2048 6716#define CP_MEC_TABLE_OFFSET 4096 6717 6718void cik_init_cp_pg_table(struct radeon_device *rdev) 6719{ |
6350 const __be32 *fw_data; | |
6351 volatile u32 *dst_ptr; 6352 int me, i, max_me = 4; 6353 u32 bo_offset = 0; | 6720 volatile u32 *dst_ptr; 6721 int me, i, max_me = 4; 6722 u32 bo_offset = 0; |
6354 u32 table_offset; | 6723 u32 table_offset, table_size; |
6355 6356 if (rdev->family == CHIP_KAVERI) 6357 max_me = 5; 6358 6359 if (rdev->rlc.cp_table_ptr == NULL) 6360 return; 6361 6362 /* write the cp table buffer */ 6363 dst_ptr = rdev->rlc.cp_table_ptr; 6364 for (me = 0; me < max_me; me++) { | 6724 6725 if (rdev->family == CHIP_KAVERI) 6726 max_me = 5; 6727 6728 if (rdev->rlc.cp_table_ptr == NULL) 6729 return; 6730 6731 /* write the cp table buffer */ 6732 dst_ptr = rdev->rlc.cp_table_ptr; 6733 for (me = 0; me < max_me; me++) { |
6365 if (me == 0) { 6366 fw_data = (const __be32 *)rdev->ce_fw->data; 6367 table_offset = CP_ME_TABLE_OFFSET; 6368 } else if (me == 1) { 6369 fw_data = (const __be32 *)rdev->pfp_fw->data; 6370 table_offset = CP_ME_TABLE_OFFSET; 6371 } else if (me == 2) { 6372 fw_data = (const __be32 *)rdev->me_fw->data; 6373 table_offset = CP_ME_TABLE_OFFSET; | 6734 if (rdev->new_fw) { 6735 const __le32 *fw_data; 6736 const struct gfx_firmware_header_v1_0 *hdr; 6737 6738 if (me == 0) { 6739 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data; 6740 fw_data = (const __le32 *) 6741 (rdev->ce_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 6742 table_offset = le32_to_cpu(hdr->jt_offset); 6743 table_size = le32_to_cpu(hdr->jt_size); 6744 } else if (me == 1) { 6745 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data; 6746 fw_data = (const __le32 *) 6747 (rdev->pfp_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 6748 table_offset = le32_to_cpu(hdr->jt_offset); 6749 table_size = le32_to_cpu(hdr->jt_size); 6750 } else if (me == 2) { 6751 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data; 6752 fw_data = (const __le32 *) 6753 (rdev->me_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 6754 table_offset = le32_to_cpu(hdr->jt_offset); 6755 table_size = le32_to_cpu(hdr->jt_size); 6756 } else if (me == 3) { 6757 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data; 6758 fw_data = (const __le32 *) 6759 (rdev->mec_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 6760 table_offset = le32_to_cpu(hdr->jt_offset); 6761 table_size = le32_to_cpu(hdr->jt_size); 6762 } else { 6763 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data; 6764 fw_data = (const __le32 *) 6765 (rdev->mec2_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 6766 table_offset = le32_to_cpu(hdr->jt_offset); 6767 table_size = le32_to_cpu(hdr->jt_size); 6768 } 6769 6770 for (i = 0; i < table_size; i ++) { 6771 dst_ptr[bo_offset + i] = 6772 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); 6773 } 6774 bo_offset += table_size; |
6374 } else { | 6775 } else { |
6375 fw_data = (const __be32 *)rdev->mec_fw->data; 6376 table_offset = CP_MEC_TABLE_OFFSET; 6377 } | 6776 const __be32 *fw_data; 6777 table_size = CP_ME_TABLE_SIZE; |
6378 | 6778 |
6379 for (i = 0; i < CP_ME_TABLE_SIZE; i ++) { 6380 dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i])); | 6779 if (me == 0) { 6780 fw_data = (const __be32 *)rdev->ce_fw->data; 6781 table_offset = CP_ME_TABLE_OFFSET; 6782 } else if (me == 1) { 6783 fw_data = (const __be32 *)rdev->pfp_fw->data; 6784 table_offset = CP_ME_TABLE_OFFSET; 6785 } else if (me == 2) { 6786 fw_data = (const __be32 *)rdev->me_fw->data; 6787 table_offset = CP_ME_TABLE_OFFSET; 6788 } else { 6789 fw_data = (const __be32 *)rdev->mec_fw->data; 6790 table_offset = CP_MEC_TABLE_OFFSET; 6791 } 6792 6793 for (i = 0; i < table_size; i ++) { 6794 dst_ptr[bo_offset + i] = 6795 cpu_to_le32(be32_to_cpu(fw_data[table_offset + i])); 6796 } 6797 bo_offset += table_size; |
6381 } | 6798 } |
6382 bo_offset += CP_ME_TABLE_SIZE; | |
6383 } 6384} 6385 6386static void cik_enable_gfx_cgpg(struct radeon_device *rdev, 6387 bool enable) 6388{ 6389 u32 data, orig; 6390 --- 22 unchanged lines hidden (view full) --- 6413 } 6414} 6415 6416static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh) 6417{ 6418 u32 mask = 0, tmp, tmp1; 6419 int i; 6420 | 6799 } 6800} 6801 6802static void cik_enable_gfx_cgpg(struct radeon_device *rdev, 6803 bool enable) 6804{ 6805 u32 data, orig; 6806 --- 22 unchanged lines hidden (view full) --- 6829 } 6830} 6831 6832static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh) 6833{ 6834 u32 mask = 0, tmp, tmp1; 6835 int i; 6836 |
6837 mutex_lock(&rdev->grbm_idx_mutex); |
|
6421 cik_select_se_sh(rdev, se, sh); 6422 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG); 6423 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG); 6424 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); | 6838 cik_select_se_sh(rdev, se, sh); 6839 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG); 6840 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG); 6841 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
6842 mutex_unlock(&rdev->grbm_idx_mutex); |
|
6425 6426 tmp &= 0xffff0000; 6427 6428 tmp |= tmp1; 6429 tmp >>= 16; 6430 6431 for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) { 6432 mask <<= 1; --- 467 unchanged lines hidden (view full) --- 6900 * 6901 * Enable interrupt sources on the GPU (vblanks, hpd, 6902 * etc.) (CIK). 6903 * Returns 0 for success, errors for failure. 6904 */ 6905int cik_irq_set(struct radeon_device *rdev) 6906{ 6907 u32 cp_int_cntl; | 6843 6844 tmp &= 0xffff0000; 6845 6846 tmp |= tmp1; 6847 tmp >>= 16; 6848 6849 for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) { 6850 mask <<= 1; --- 467 unchanged lines hidden (view full) --- 7318 * 7319 * Enable interrupt sources on the GPU (vblanks, hpd, 7320 * etc.) (CIK). 7321 * Returns 0 for success, errors for failure. 7322 */ 7323int cik_irq_set(struct radeon_device *rdev) 7324{ 7325 u32 cp_int_cntl; |
6908 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; 6909 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; | 7326 u32 cp_m1p0; |
6910 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 6911 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 6912 u32 grbm_int_cntl = 0; 6913 u32 dma_cntl, dma_cntl1; 6914 u32 thermal_int; 6915 6916 if (!rdev->irq.installed) { 6917 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); --- 17 unchanged lines hidden (view full) --- 6935 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 6936 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 6937 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 6938 6939 dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 6940 dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 6941 6942 cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; | 7327 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 7328 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 7329 u32 grbm_int_cntl = 0; 7330 u32 dma_cntl, dma_cntl1; 7331 u32 thermal_int; 7332 7333 if (!rdev->irq.installed) { 7334 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); --- 17 unchanged lines hidden (view full) --- 7352 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 7353 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 7354 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 7355 7356 dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 7357 dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 7358 7359 cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; |
6943 cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6944 cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6945 cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6946 cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6947 cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6948 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6949 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; | |
6950 6951 if (rdev->flags & RADEON_IS_IGP) 6952 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & 6953 ~(THERM_INTH_MASK | THERM_INTL_MASK); 6954 else 6955 thermal_int = RREG32_SMC(CG_THERMAL_INT) & 6956 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 6957 --- 5 unchanged lines hidden (view full) --- 6963 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) { 6964 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 6965 DRM_DEBUG("si_irq_set: sw int cp1\n"); 6966 if (ring->me == 1) { 6967 switch (ring->pipe) { 6968 case 0: 6969 cp_m1p0 |= TIME_STAMP_INT_ENABLE; 6970 break; | 7360 7361 if (rdev->flags & RADEON_IS_IGP) 7362 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & 7363 ~(THERM_INTH_MASK | THERM_INTL_MASK); 7364 else 7365 thermal_int = RREG32_SMC(CG_THERMAL_INT) & 7366 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 7367 --- 5 unchanged lines hidden (view full) --- 7373 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) { 7374 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 7375 DRM_DEBUG("si_irq_set: sw int cp1\n"); 7376 if (ring->me == 1) { 7377 switch (ring->pipe) { 7378 case 0: 7379 cp_m1p0 |= TIME_STAMP_INT_ENABLE; 7380 break; |
6971 case 1: 6972 cp_m1p1 |= TIME_STAMP_INT_ENABLE; 6973 break; 6974 case 2: 6975 cp_m1p2 |= TIME_STAMP_INT_ENABLE; 6976 break; 6977 case 3: 6978 cp_m1p2 |= TIME_STAMP_INT_ENABLE; 6979 break; | |
6980 default: 6981 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe); 6982 break; 6983 } | 7381 default: 7382 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe); 7383 break; 7384 } |
6984 } else if (ring->me == 2) { 6985 switch (ring->pipe) { 6986 case 0: 6987 cp_m2p0 |= TIME_STAMP_INT_ENABLE; 6988 break; 6989 case 1: 6990 cp_m2p1 |= TIME_STAMP_INT_ENABLE; 6991 break; 6992 case 2: 6993 cp_m2p2 |= TIME_STAMP_INT_ENABLE; 6994 break; 6995 case 3: 6996 cp_m2p2 |= TIME_STAMP_INT_ENABLE; 6997 break; 6998 default: 6999 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe); 7000 break; 7001 } | |
7002 } else { 7003 DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me); 7004 } 7005 } 7006 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) { 7007 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 7008 DRM_DEBUG("si_irq_set: sw int cp2\n"); 7009 if (ring->me == 1) { 7010 switch (ring->pipe) { 7011 case 0: 7012 cp_m1p0 |= TIME_STAMP_INT_ENABLE; 7013 break; | 7385 } else { 7386 DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me); 7387 } 7388 } 7389 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) { 7390 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 7391 DRM_DEBUG("si_irq_set: sw int cp2\n"); 7392 if (ring->me == 1) { 7393 switch (ring->pipe) { 7394 case 0: 7395 cp_m1p0 |= TIME_STAMP_INT_ENABLE; 7396 break; |
7014 case 1: 7015 cp_m1p1 |= TIME_STAMP_INT_ENABLE; 7016 break; 7017 case 2: 7018 cp_m1p2 |= TIME_STAMP_INT_ENABLE; 7019 break; 7020 case 3: 7021 cp_m1p2 |= TIME_STAMP_INT_ENABLE; 7022 break; | |
7023 default: 7024 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe); 7025 break; 7026 } | 7397 default: 7398 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe); 7399 break; 7400 } |
7027 } else if (ring->me == 2) { 7028 switch (ring->pipe) { 7029 case 0: 7030 cp_m2p0 |= TIME_STAMP_INT_ENABLE; 7031 break; 7032 case 1: 7033 cp_m2p1 |= TIME_STAMP_INT_ENABLE; 7034 break; 7035 case 2: 7036 cp_m2p2 |= TIME_STAMP_INT_ENABLE; 7037 break; 7038 case 3: 7039 cp_m2p2 |= TIME_STAMP_INT_ENABLE; 7040 break; 7041 default: 7042 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe); 7043 break; 7044 } | |
7045 } else { 7046 DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me); 7047 } 7048 } 7049 7050 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { 7051 DRM_DEBUG("cik_irq_set: sw int dma\n"); 7052 dma_cntl |= TRAP_ENABLE; --- 68 unchanged lines hidden (view full) --- 7121 } 7122 7123 WREG32(CP_INT_CNTL_RING0, cp_int_cntl); 7124 7125 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl); 7126 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1); 7127 7128 WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0); | 7401 } else { 7402 DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me); 7403 } 7404 } 7405 7406 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { 7407 DRM_DEBUG("cik_irq_set: sw int dma\n"); 7408 dma_cntl |= TRAP_ENABLE; --- 68 unchanged lines hidden (view full) --- 7477 } 7478 7479 WREG32(CP_INT_CNTL_RING0, cp_int_cntl); 7480 7481 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl); 7482 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1); 7483 7484 WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0); |
7129 WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1); 7130 WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2); 7131 WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3); 7132 WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0); 7133 WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1); 7134 WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2); 7135 WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3); | |
7136 7137 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 7138 7139 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 7140 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); 7141 if (rdev->num_crtc >= 4) { 7142 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); 7143 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); --- 217 unchanged lines hidden (view full) --- 7361 u32 wptr, tmp; 7362 7363 if (rdev->wb.enabled) 7364 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 7365 else 7366 wptr = RREG32(IH_RB_WPTR); 7367 7368 if (wptr & RB_OVERFLOW) { | 7485 7486 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 7487 7488 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 7489 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); 7490 if (rdev->num_crtc >= 4) { 7491 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); 7492 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); --- 217 unchanged lines hidden (view full) --- 7710 u32 wptr, tmp; 7711 7712 if (rdev->wb.enabled) 7713 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 7714 else 7715 wptr = RREG32(IH_RB_WPTR); 7716 7717 if (wptr & RB_OVERFLOW) { |
7718 wptr &= ~RB_OVERFLOW; |
|
7369 /* When a ring buffer overflow happen start parsing interrupt 7370 * from the last not overwritten vector (wptr + 16). Hopefully 7371 * this should allow us to catchup. 7372 */ | 7719 /* When a ring buffer overflow happen start parsing interrupt 7720 * from the last not overwritten vector (wptr + 16). Hopefully 7721 * this should allow us to catchup. 7722 */ |
7373 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 7374 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | 7723 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 7724 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); |
7375 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 7376 tmp = RREG32(IH_RB_CNTL); 7377 tmp |= IH_WPTR_OVERFLOW_CLEAR; 7378 WREG32(IH_RB_CNTL, tmp); 7379 } 7380 return (wptr & rdev->ih.ptr_mask); 7381} 7382 --- 228 unchanged lines hidden (view full) --- 7611 break; 7612 case 8: /* D1 page flip */ 7613 case 10: /* D2 page flip */ 7614 case 12: /* D3 page flip */ 7615 case 14: /* D4 page flip */ 7616 case 16: /* D5 page flip */ 7617 case 18: /* D6 page flip */ 7618 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); | 7725 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 7726 tmp = RREG32(IH_RB_CNTL); 7727 tmp |= IH_WPTR_OVERFLOW_CLEAR; 7728 WREG32(IH_RB_CNTL, tmp); 7729 } 7730 return (wptr & rdev->ih.ptr_mask); 7731} 7732 --- 228 unchanged lines hidden (view full) --- 7961 break; 7962 case 8: /* D1 page flip */ 7963 case 10: /* D2 page flip */ 7964 case 12: /* D3 page flip */ 7965 case 14: /* D4 page flip */ 7966 case 16: /* D5 page flip */ 7967 case 18: /* D6 page flip */ 7968 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); |
7619 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); | 7969 if (radeon_use_pflipirq > 0) 7970 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); |
7620 break; 7621 case 42: /* HPD hotplug */ 7622 switch (src_data) { 7623 case 0: 7624 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) { 7625 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT; 7626 queue_hotplug = true; 7627 DRM_DEBUG("IH: HPD1\n"); --- 231 unchanged lines hidden (view full) --- 7859 default: 7860 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 7861 break; 7862 } 7863 7864 /* wptr/rptr are in bytes! */ 7865 rptr += 16; 7866 rptr &= rdev->ih.ptr_mask; | 7971 break; 7972 case 42: /* HPD hotplug */ 7973 switch (src_data) { 7974 case 0: 7975 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) { 7976 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT; 7977 queue_hotplug = true; 7978 DRM_DEBUG("IH: HPD1\n"); --- 231 unchanged lines hidden (view full) --- 8210 default: 8211 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8212 break; 8213 } 8214 8215 /* wptr/rptr are in bytes! */ 8216 rptr += 16; 8217 rptr &= rdev->ih.ptr_mask; |
8218 WREG32(IH_RB_RPTR, rptr); |
|
7867 } 7868 if (queue_hotplug) 7869 schedule_work(&rdev->hotplug_work); | 8219 } 8220 if (queue_hotplug) 8221 schedule_work(&rdev->hotplug_work); |
7870 if (queue_reset) 7871 schedule_work(&rdev->reset_work); | 8222 if (queue_reset) { 8223 rdev->needs_reset = true; 8224 wake_up_all(&rdev->fence_queue); 8225 } |
7872 if (queue_thermal) 7873 schedule_work(&rdev->pm.dpm.thermal.work); 7874 rdev->ih.rptr = rptr; | 8226 if (queue_thermal) 8227 schedule_work(&rdev->pm.dpm.thermal.work); 8228 rdev->ih.rptr = rptr; |
7875 WREG32(IH_RB_RPTR, rdev->ih.rptr); | |
7876 atomic_set(&rdev->ih.lock, 0); 7877 7878 /* make sure wptr hasn't changed while processing */ 7879 wptr = cik_get_ih_wptr(rdev); 7880 if (wptr != rptr) 7881 goto restart_ih; 7882 7883 return IRQ_HANDLED; --- 9 unchanged lines hidden (view full) --- 7893 * 7894 * Programs the asic to a functional state (CIK). 7895 * Called by cik_init() and cik_resume(). 7896 * Returns 0 for success, error for failure. 7897 */ 7898static int cik_startup(struct radeon_device *rdev) 7899{ 7900 struct radeon_ring *ring; | 8229 atomic_set(&rdev->ih.lock, 0); 8230 8231 /* make sure wptr hasn't changed while processing */ 8232 wptr = cik_get_ih_wptr(rdev); 8233 if (wptr != rptr) 8234 goto restart_ih; 8235 8236 return IRQ_HANDLED; --- 9 unchanged lines hidden (view full) --- 8246 * 8247 * Programs the asic to a functional state (CIK). 8248 * Called by cik_init() and cik_resume(). 8249 * Returns 0 for success, error for failure. 8250 */ 8251static int cik_startup(struct radeon_device *rdev) 8252{ 8253 struct radeon_ring *ring; |
8254 u32 nop; |
|
7901 int r; 7902 7903 /* enable pcie gen2/3 link */ 7904 cik_pcie_gen3_enable(rdev); 7905 /* enable aspm */ 7906 cik_program_aspm(rdev); 7907 7908 /* scratch needs to be initialized before MC */ --- 117 unchanged lines hidden (view full) --- 8026 r = cik_irq_init(rdev); 8027 if (r) { 8028 DRM_ERROR("radeon: IH init failed (%d).\n", r); 8029 radeon_irq_kms_fini(rdev); 8030 return r; 8031 } 8032 cik_irq_set(rdev); 8033 | 8255 int r; 8256 8257 /* enable pcie gen2/3 link */ 8258 cik_pcie_gen3_enable(rdev); 8259 /* enable aspm */ 8260 cik_program_aspm(rdev); 8261 8262 /* scratch needs to be initialized before MC */ --- 117 unchanged lines hidden (view full) --- 8380 r = cik_irq_init(rdev); 8381 if (r) { 8382 DRM_ERROR("radeon: IH init failed (%d).\n", r); 8383 radeon_irq_kms_fini(rdev); 8384 return r; 8385 } 8386 cik_irq_set(rdev); 8387 |
8388 if (rdev->family == CHIP_HAWAII) { 8389 if (rdev->new_fw) 8390 nop = PACKET3(PACKET3_NOP, 0x3FFF); 8391 else 8392 nop = RADEON_CP_PACKET2; 8393 } else { 8394 nop = PACKET3(PACKET3_NOP, 0x3FFF); 8395 } 8396 |
|
8034 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 8035 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 8397 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 8398 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
8036 PACKET3(PACKET3_NOP, 0x3FFF)); | 8399 nop); |
8037 if (r) 8038 return r; 8039 8040 /* set up the compute queues */ 8041 /* type-2 packets are deprecated on MEC, use type-3 instead */ 8042 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 8043 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, | 8400 if (r) 8401 return r; 8402 8403 /* set up the compute queues */ 8404 /* type-2 packets are deprecated on MEC, use type-3 instead */ 8405 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 8406 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, |
8044 PACKET3(PACKET3_NOP, 0x3FFF)); | 8407 nop); |
8045 if (r) 8046 return r; 8047 ring->me = 1; /* first MEC */ 8048 ring->pipe = 0; /* first pipe */ 8049 ring->queue = 0; /* first queue */ 8050 ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET; 8051 8052 /* type-2 packets are deprecated on MEC, use type-3 instead */ 8053 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 8054 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, | 8408 if (r) 8409 return r; 8410 ring->me = 1; /* first MEC */ 8411 ring->pipe = 0; /* first pipe */ 8412 ring->queue = 0; /* first queue */ 8413 ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET; 8414 8415 /* type-2 packets are deprecated on MEC, use type-3 instead */ 8416 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 8417 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, |
8055 PACKET3(PACKET3_NOP, 0x3FFF)); | 8418 nop); |
8056 if (r) 8057 return r; 8058 /* dGPU only have 1 MEC */ 8059 ring->me = 1; /* first MEC */ 8060 ring->pipe = 0; /* first pipe */ 8061 ring->queue = 1; /* second queue */ 8062 ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET; 8063 --- 1097 unchanged lines hidden (view full) --- 9161static void cik_pcie_gen3_enable(struct radeon_device *rdev) 9162{ 9163 struct pci_dev *root = rdev->pdev->bus->self; 9164 int bridge_pos, gpu_pos; 9165 u32 speed_cntl, mask, current_data_rate; 9166 int ret, i; 9167 u16 tmp16; 9168 | 8419 if (r) 8420 return r; 8421 /* dGPU only have 1 MEC */ 8422 ring->me = 1; /* first MEC */ 8423 ring->pipe = 0; /* first pipe */ 8424 ring->queue = 1; /* second queue */ 8425 ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET; 8426 --- 1097 unchanged lines hidden (view full) --- 9524static void cik_pcie_gen3_enable(struct radeon_device *rdev) 9525{ 9526 struct pci_dev *root = rdev->pdev->bus->self; 9527 int bridge_pos, gpu_pos; 9528 u32 speed_cntl, mask, current_data_rate; 9529 int ret, i; 9530 u16 tmp16; 9531 |
9532 if (pci_is_root_bus(rdev->pdev->bus)) 9533 return; 9534 |
|
9169 if (radeon_pcie_gen2 == 0) 9170 return; 9171 9172 if (rdev->flags & RADEON_IS_IGP) 9173 return; 9174 9175 if (!(rdev->flags & RADEON_IS_PCIE)) 9176 return; --- 210 unchanged lines hidden (view full) --- 9387 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data); 9388 9389 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 9390 data &= ~LC_DYN_LANES_PWR_STATE_MASK; 9391 data |= LC_DYN_LANES_PWR_STATE(3); 9392 if (orig != data) 9393 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); 9394 | 9535 if (radeon_pcie_gen2 == 0) 9536 return; 9537 9538 if (rdev->flags & RADEON_IS_IGP) 9539 return; 9540 9541 if (!(rdev->flags & RADEON_IS_PCIE)) 9542 return; --- 210 unchanged lines hidden (view full) --- 9753 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data); 9754 9755 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 9756 data &= ~LC_DYN_LANES_PWR_STATE_MASK; 9757 data |= LC_DYN_LANES_PWR_STATE(3); 9758 if (orig != data) 9759 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); 9760 |
9395 if (!disable_clkreq) { | 9761 if (!disable_clkreq && 9762 !pci_is_root_bus(rdev->pdev->bus)) { |
9396 struct pci_dev *root = rdev->pdev->bus->self; 9397 u32 lnkcap; 9398 9399 clk_req_support = false; 9400 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); 9401 if (lnkcap & PCI_EXP_LNKCAP_CLKPM) 9402 clk_req_support = true; 9403 } else { --- 61 unchanged lines hidden --- | 9763 struct pci_dev *root = rdev->pdev->bus->self; 9764 u32 lnkcap; 9765 9766 clk_req_support = false; 9767 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); 9768 if (lnkcap & PCI_EXP_LNKCAP_CLKPM) 9769 clk_req_support = true; 9770 } else { --- 61 unchanged lines hidden --- |