1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #ifndef __AMDGPU_RLC_H__ 25 #define __AMDGPU_RLC_H__ 26 27 #include "clearstate_defs.h" 28 29 #define AMDGPU_MAX_RLC_INSTANCES 8 30 31 /* firmware ID used in rlc toc */ 32 typedef enum _FIRMWARE_ID_ { 33 FIRMWARE_ID_INVALID = 0, 34 FIRMWARE_ID_RLC_G_UCODE = 1, 35 FIRMWARE_ID_RLC_TOC = 2, 36 FIRMWARE_ID_RLCG_SCRATCH = 3, 37 FIRMWARE_ID_RLC_SRM_ARAM = 4, 38 FIRMWARE_ID_RLC_SRM_INDEX_ADDR = 5, 39 FIRMWARE_ID_RLC_SRM_INDEX_DATA = 6, 40 FIRMWARE_ID_RLC_P_UCODE = 7, 41 FIRMWARE_ID_RLC_V_UCODE = 8, 42 FIRMWARE_ID_RLX6_UCODE = 9, 43 FIRMWARE_ID_RLX6_DRAM_BOOT = 10, 44 FIRMWARE_ID_GLOBAL_TAP_DELAYS = 11, 45 FIRMWARE_ID_SE0_TAP_DELAYS = 12, 46 FIRMWARE_ID_SE1_TAP_DELAYS = 13, 47 FIRMWARE_ID_GLOBAL_SE0_SE1_SKEW_DELAYS = 14, 48 FIRMWARE_ID_SDMA0_UCODE = 15, 49 FIRMWARE_ID_SDMA0_JT = 16, 50 FIRMWARE_ID_SDMA1_UCODE = 17, 51 FIRMWARE_ID_SDMA1_JT = 18, 52 FIRMWARE_ID_CP_CE = 19, 53 FIRMWARE_ID_CP_PFP = 20, 54 FIRMWARE_ID_CP_ME = 21, 55 FIRMWARE_ID_CP_MEC = 22, 56 FIRMWARE_ID_CP_MES = 23, 57 FIRMWARE_ID_MES_STACK = 24, 58 FIRMWARE_ID_RLC_SRM_DRAM_SR = 25, 59 FIRMWARE_ID_RLCG_SCRATCH_SR = 26, 60 FIRMWARE_ID_RLCP_SCRATCH_SR = 27, 61 FIRMWARE_ID_RLCV_SCRATCH_SR = 28, 62 FIRMWARE_ID_RLX6_DRAM_SR = 29, 63 FIRMWARE_ID_SDMA0_PG_CONTEXT = 30, 64 FIRMWARE_ID_SDMA1_PG_CONTEXT = 31, 65 FIRMWARE_ID_GLOBAL_MUX_SELECT_RAM = 32, 66 FIRMWARE_ID_SE0_MUX_SELECT_RAM = 33, 67 FIRMWARE_ID_SE1_MUX_SELECT_RAM = 34, 68 FIRMWARE_ID_ACCUM_CTRL_RAM = 35, 69 FIRMWARE_ID_RLCP_CAM = 36, 70 FIRMWARE_ID_RLC_SPP_CAM_EXT = 37, 71 FIRMWARE_ID_MAX = 38, 72 } FIRMWARE_ID; 73 74 typedef enum _SOC21_FIRMWARE_ID_ { 75 SOC21_FIRMWARE_ID_INVALID = 0, 76 SOC21_FIRMWARE_ID_RLC_G_UCODE = 1, 77 SOC21_FIRMWARE_ID_RLC_TOC = 2, 78 SOC21_FIRMWARE_ID_RLCG_SCRATCH = 3, 79 SOC21_FIRMWARE_ID_RLC_SRM_ARAM = 4, 80 SOC21_FIRMWARE_ID_RLC_P_UCODE = 5, 81 SOC21_FIRMWARE_ID_RLC_V_UCODE = 6, 82 SOC21_FIRMWARE_ID_RLX6_UCODE = 7, 83 SOC21_FIRMWARE_ID_RLX6_UCODE_CORE1 = 8, 84 SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT = 9, 85 SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT_CORE1 = 10, 86 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0 = 11, 87 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1 = 12, 88 SOC21_FIRMWARE_ID_CP_PFP = 13, 89 SOC21_FIRMWARE_ID_CP_ME = 14, 90 SOC21_FIRMWARE_ID_CP_MEC = 15, 91 SOC21_FIRMWARE_ID_RS64_MES_P0 = 16, 92 SOC21_FIRMWARE_ID_RS64_MES_P1 = 17, 93 SOC21_FIRMWARE_ID_RS64_PFP = 18, 94 SOC21_FIRMWARE_ID_RS64_ME = 19, 95 SOC21_FIRMWARE_ID_RS64_MEC = 20, 96 SOC21_FIRMWARE_ID_RS64_MES_P0_STACK = 21, 97 SOC21_FIRMWARE_ID_RS64_MES_P1_STACK = 22, 98 SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK = 23, 99 SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK = 24, 100 SOC21_FIRMWARE_ID_RS64_ME_P0_STACK = 25, 101 SOC21_FIRMWARE_ID_RS64_ME_P1_STACK = 26, 102 SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK = 27, 103 SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK = 28, 104 SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK = 29, 105 SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK = 30, 106 SOC21_FIRMWARE_ID_RLC_SRM_DRAM_SR = 31, 107 SOC21_FIRMWARE_ID_RLCG_SCRATCH_SR = 32, 108 SOC21_FIRMWARE_ID_RLCP_SCRATCH_SR = 33, 109 SOC21_FIRMWARE_ID_RLCV_SCRATCH_SR = 34, 110 SOC21_FIRMWARE_ID_RLX6_DRAM_SR = 35, 111 SOC21_FIRMWARE_ID_RLX6_DRAM_SR_CORE1 = 36, 112 SOC21_FIRMWARE_ID_MAX = 37 113 } SOC21_FIRMWARE_ID; 114 115 typedef struct _RLC_TABLE_OF_CONTENT { 116 union { 117 unsigned int DW0; 118 struct { 119 unsigned int offset : 25; 120 unsigned int id : 7; 121 }; 122 }; 123 124 union { 125 unsigned int DW1; 126 struct { 127 unsigned int load_at_boot : 1; 128 unsigned int load_at_vddgfx : 1; 129 unsigned int load_at_reset : 1; 130 unsigned int memory_destination : 2; 131 unsigned int vfflr_image_code : 4; 132 unsigned int load_mode_direct : 1; 133 unsigned int save_for_vddgfx : 1; 134 unsigned int save_for_vfflr : 1; 135 unsigned int reserved : 1; 136 unsigned int signed_source : 1; 137 unsigned int size : 18; 138 }; 139 }; 140 141 union { 142 unsigned int DW2; 143 struct { 144 unsigned int indirect_addr_reg : 16; 145 unsigned int index : 16; 146 }; 147 }; 148 149 union { 150 unsigned int DW3; 151 struct { 152 unsigned int indirect_data_reg : 16; 153 unsigned int indirect_start_offset : 16; 154 }; 155 }; 156 } RLC_TABLE_OF_CONTENT; 157 158 #define RLC_TOC_MAX_SIZE 64 159 160 struct amdgpu_rlc_funcs { 161 bool (*is_rlc_enabled)(struct amdgpu_device *adev); 162 void (*set_safe_mode)(struct amdgpu_device *adev, int xcc_id); 163 void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id); 164 int (*init)(struct amdgpu_device *adev); 165 u32 (*get_csb_size)(struct amdgpu_device *adev); 166 void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer); 167 int (*get_cp_table_num)(struct amdgpu_device *adev); 168 int (*resume)(struct amdgpu_device *adev); 169 void (*stop)(struct amdgpu_device *adev); 170 void (*reset)(struct amdgpu_device *adev); 171 void (*start)(struct amdgpu_device *adev); 172 void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid); 173 bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg); 174 }; 175 176 struct amdgpu_rlcg_reg_access_ctrl { 177 uint32_t scratch_reg0; 178 uint32_t scratch_reg1; 179 uint32_t scratch_reg2; 180 uint32_t scratch_reg3; 181 uint32_t grbm_cntl; 182 uint32_t grbm_idx; 183 uint32_t spare_int; 184 }; 185 186 struct amdgpu_rlc { 187 /* for power gating */ 188 struct amdgpu_bo *save_restore_obj; 189 uint64_t save_restore_gpu_addr; 190 volatile uint32_t *sr_ptr; 191 const u32 *reg_list; 192 u32 reg_list_size; 193 /* for clear state */ 194 struct amdgpu_bo *clear_state_obj; 195 uint64_t clear_state_gpu_addr; 196 volatile uint32_t *cs_ptr; 197 const struct cs_section_def *cs_data; 198 u32 clear_state_size; 199 /* for cp tables */ 200 struct amdgpu_bo *cp_table_obj; 201 uint64_t cp_table_gpu_addr; 202 volatile uint32_t *cp_table_ptr; 203 u32 cp_table_size; 204 205 /* safe mode for updating CG/PG state */ 206 bool in_safe_mode[AMDGPU_MAX_RLC_INSTANCES]; 207 const struct amdgpu_rlc_funcs *funcs; 208 209 /* for firmware data */ 210 u32 save_and_restore_offset; 211 u32 clear_state_descriptor_offset; 212 u32 avail_scratch_ram_locations; 213 u32 reg_restore_list_size; 214 u32 reg_list_format_start; 215 u32 reg_list_format_separate_start; 216 u32 starting_offsets_start; 217 u32 reg_list_format_size_bytes; 218 u32 reg_list_size_bytes; 219 u32 reg_list_format_direct_reg_list_length; 220 u32 save_restore_list_cntl_size_bytes; 221 u32 save_restore_list_gpm_size_bytes; 222 u32 save_restore_list_srm_size_bytes; 223 u32 rlc_iram_ucode_size_bytes; 224 u32 rlc_dram_ucode_size_bytes; 225 u32 rlcp_ucode_size_bytes; 226 u32 rlcv_ucode_size_bytes; 227 u32 global_tap_delays_ucode_size_bytes; 228 u32 se0_tap_delays_ucode_size_bytes; 229 u32 se1_tap_delays_ucode_size_bytes; 230 u32 se2_tap_delays_ucode_size_bytes; 231 u32 se3_tap_delays_ucode_size_bytes; 232 233 u32 *register_list_format; 234 u32 *register_restore; 235 u8 *save_restore_list_cntl; 236 u8 *save_restore_list_gpm; 237 u8 *save_restore_list_srm; 238 u8 *rlc_iram_ucode; 239 u8 *rlc_dram_ucode; 240 u8 *rlcp_ucode; 241 u8 *rlcv_ucode; 242 u8 *global_tap_delays_ucode; 243 u8 *se0_tap_delays_ucode; 244 u8 *se1_tap_delays_ucode; 245 u8 *se2_tap_delays_ucode; 246 u8 *se3_tap_delays_ucode; 247 248 bool is_rlc_v2_1; 249 250 /* for rlc autoload */ 251 struct amdgpu_bo *rlc_autoload_bo; 252 u64 rlc_autoload_gpu_addr; 253 void *rlc_autoload_ptr; 254 255 /* rlc toc buffer */ 256 struct amdgpu_bo *rlc_toc_bo; 257 uint64_t rlc_toc_gpu_addr; 258 void *rlc_toc_buf; 259 260 bool rlcg_reg_access_supported; 261 /* registers for rlcg indirect reg access */ 262 struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl[AMDGPU_MAX_RLC_INSTANCES]; 263 }; 264 265 void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id); 266 void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id); 267 int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws); 268 int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev); 269 int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev); 270 void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev); 271 void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev); 272 int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev, 273 uint16_t version_major, 274 uint16_t version_minor); 275 #endif 276