1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 #include <linux/firmware.h> 26 #include "amdgpu.h" 27 #include "amdgpu_gfx.h" 28 #include "amdgpu_rlc.h" 29 30 /** 31 * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode 32 * 33 * @adev: amdgpu_device pointer 34 * 35 * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode. 36 */ 37 void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev) 38 { 39 if (adev->gfx.rlc.in_safe_mode) 40 return; 41 42 /* if RLC is not enabled, do nothing */ 43 if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) 44 return; 45 46 if (adev->cg_flags & 47 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | 48 AMD_CG_SUPPORT_GFX_3D_CGCG)) { 49 adev->gfx.rlc.funcs->set_safe_mode(adev); 50 adev->gfx.rlc.in_safe_mode = true; 51 } 52 } 53 54 /** 55 * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode 56 * 57 * @adev: amdgpu_device pointer 58 * 59 * Set RLC exit safe mode if RLC is enabled and have entered into safe mode. 60 */ 61 void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev) 62 { 63 if (!(adev->gfx.rlc.in_safe_mode)) 64 return; 65 66 /* if RLC is not enabled, do nothing */ 67 if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) 68 return; 69 70 if (adev->cg_flags & 71 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | 72 AMD_CG_SUPPORT_GFX_3D_CGCG)) { 73 adev->gfx.rlc.funcs->unset_safe_mode(adev); 74 adev->gfx.rlc.in_safe_mode = false; 75 } 76 } 77 78 /** 79 * amdgpu_gfx_rlc_init_sr - Init save restore block 80 * 81 * @adev: amdgpu_device pointer 82 * @dws: the size of save restore block 83 * 84 * Allocate and setup value to save restore block of rlc. 85 * Returns 0 on succeess or negative error code if allocate failed. 86 */ 87 int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) 88 { 89 const u32 *src_ptr; 90 volatile u32 *dst_ptr; 91 u32 i; 92 int r; 93 94 /* allocate save restore block */ 95 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, 96 AMDGPU_GEM_DOMAIN_VRAM, 97 &adev->gfx.rlc.save_restore_obj, 98 &adev->gfx.rlc.save_restore_gpu_addr, 99 (void **)&adev->gfx.rlc.sr_ptr); 100 if (r) { 101 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); 102 amdgpu_gfx_rlc_fini(adev); 103 return r; 104 } 105 106 /* write the sr buffer */ 107 src_ptr = adev->gfx.rlc.reg_list; 108 dst_ptr = adev->gfx.rlc.sr_ptr; 109 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) 110 dst_ptr[i] = cpu_to_le32(src_ptr[i]); 111 amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); 112 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); 113 114 return 0; 115 } 116 117 /** 118 * amdgpu_gfx_rlc_init_csb - Init clear state block 119 * 120 * @adev: amdgpu_device pointer 121 * 122 * Allocate and setup value to clear state block of rlc. 123 * Returns 0 on succeess or negative error code if allocate failed. 124 */ 125 int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) 126 { 127 volatile u32 *dst_ptr; 128 u32 dws; 129 int r; 130 131 /* allocate clear state block */ 132 adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); 133 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, 134 AMDGPU_GEM_DOMAIN_VRAM, 135 &adev->gfx.rlc.clear_state_obj, 136 &adev->gfx.rlc.clear_state_gpu_addr, 137 (void **)&adev->gfx.rlc.cs_ptr); 138 if (r) { 139 dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r); 140 amdgpu_gfx_rlc_fini(adev); 141 return r; 142 } 143 144 /* set up the cs buffer */ 145 dst_ptr = adev->gfx.rlc.cs_ptr; 146 adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr); 147 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); 148 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); 149 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 150 151 return 0; 152 } 153 154 /** 155 * amdgpu_gfx_rlc_init_cpt - Init cp table 156 * 157 * @adev: amdgpu_device pointer 158 * 159 * Allocate and setup value to cp table of rlc. 160 * Returns 0 on succeess or negative error code if allocate failed. 161 */ 162 int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev) 163 { 164 int r; 165 166 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, 167 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 168 &adev->gfx.rlc.cp_table_obj, 169 &adev->gfx.rlc.cp_table_gpu_addr, 170 (void **)&adev->gfx.rlc.cp_table_ptr); 171 if (r) { 172 dev_err(adev->dev, "(%d) failed to create cp table bo\n", r); 173 amdgpu_gfx_rlc_fini(adev); 174 return r; 175 } 176 177 /* set up the cp table */ 178 amdgpu_gfx_rlc_setup_cp_table(adev); 179 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); 180 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); 181 182 return 0; 183 } 184 185 /** 186 * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table 187 * 188 * @adev: amdgpu_device pointer 189 * 190 * Write cp firmware data into cp table. 191 */ 192 void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev) 193 { 194 const __le32 *fw_data; 195 volatile u32 *dst_ptr; 196 int me, i, max_me; 197 u32 bo_offset = 0; 198 u32 table_offset, table_size; 199 200 max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev); 201 202 /* write the cp table buffer */ 203 dst_ptr = adev->gfx.rlc.cp_table_ptr; 204 for (me = 0; me < max_me; me++) { 205 if (me == 0) { 206 const struct gfx_firmware_header_v1_0 *hdr = 207 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; 208 fw_data = (const __le32 *) 209 (adev->gfx.ce_fw->data + 210 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 211 table_offset = le32_to_cpu(hdr->jt_offset); 212 table_size = le32_to_cpu(hdr->jt_size); 213 } else if (me == 1) { 214 const struct gfx_firmware_header_v1_0 *hdr = 215 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 216 fw_data = (const __le32 *) 217 (adev->gfx.pfp_fw->data + 218 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 219 table_offset = le32_to_cpu(hdr->jt_offset); 220 table_size = le32_to_cpu(hdr->jt_size); 221 } else if (me == 2) { 222 const struct gfx_firmware_header_v1_0 *hdr = 223 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 224 fw_data = (const __le32 *) 225 (adev->gfx.me_fw->data + 226 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 227 table_offset = le32_to_cpu(hdr->jt_offset); 228 table_size = le32_to_cpu(hdr->jt_size); 229 } else if (me == 3) { 230 const struct gfx_firmware_header_v1_0 *hdr = 231 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 232 fw_data = (const __le32 *) 233 (adev->gfx.mec_fw->data + 234 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 235 table_offset = le32_to_cpu(hdr->jt_offset); 236 table_size = le32_to_cpu(hdr->jt_size); 237 } else if (me == 4) { 238 const struct gfx_firmware_header_v1_0 *hdr = 239 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 240 fw_data = (const __le32 *) 241 (adev->gfx.mec2_fw->data + 242 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 243 table_offset = le32_to_cpu(hdr->jt_offset); 244 table_size = le32_to_cpu(hdr->jt_size); 245 } 246 247 for (i = 0; i < table_size; i ++) { 248 dst_ptr[bo_offset + i] = 249 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); 250 } 251 252 bo_offset += table_size; 253 } 254 } 255 256 /** 257 * amdgpu_gfx_rlc_fini - Free BO which used for RLC 258 * 259 * @adev: amdgpu_device pointer 260 * 261 * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block 262 * and rlc_jump_table_block. 263 */ 264 void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev) 265 { 266 /* save restore block */ 267 if (adev->gfx.rlc.save_restore_obj) { 268 amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, 269 &adev->gfx.rlc.save_restore_gpu_addr, 270 (void **)&adev->gfx.rlc.sr_ptr); 271 } 272 273 /* clear state block */ 274 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 275 &adev->gfx.rlc.clear_state_gpu_addr, 276 (void **)&adev->gfx.rlc.cs_ptr); 277 278 /* jump table block */ 279 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 280 &adev->gfx.rlc.cp_table_gpu_addr, 281 (void **)&adev->gfx.rlc.cp_table_ptr); 282 } 283