1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <drm/drmP.h> 24 #include <drm/amdgpu_drm.h> 25 #include "amdgpu.h" 26 #include "atomfirmware.h" 27 #include "amdgpu_atomfirmware.h" 28 #include "atom.h" 29 #include "atombios.h" 30 31 #define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t)) 32 33 bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev) 34 { 35 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 36 firmwareinfo); 37 uint16_t data_offset; 38 39 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, 40 NULL, NULL, &data_offset)) { 41 struct atom_firmware_info_v3_1 *firmware_info = 42 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + 43 data_offset); 44 45 if (le32_to_cpu(firmware_info->firmware_capability) & 46 ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) 47 return true; 48 } 49 return false; 50 } 51 52 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) 53 { 54 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 55 firmwareinfo); 56 uint16_t data_offset; 57 58 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, 59 NULL, NULL, &data_offset)) { 60 struct atom_firmware_info_v3_1 *firmware_info = 61 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + 62 data_offset); 63 64 adev->bios_scratch_reg_offset = 65 le32_to_cpu(firmware_info->bios_scratch_reg_startaddr); 66 } 67 } 68 69 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) 70 { 71 struct atom_context *ctx = adev->mode_info.atom_context; 72 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 73 vram_usagebyfirmware); 74 struct vram_usagebyfirmware_v2_1 * firmware_usage; 75 uint32_t start_addr, size; 76 uint16_t data_offset; 77 int usage_bytes = 0; 78 79 if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 80 firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); 81 DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n", 82 le32_to_cpu(firmware_usage->start_address_in_kb), 83 le16_to_cpu(firmware_usage->used_by_firmware_in_kb), 84 le16_to_cpu(firmware_usage->used_by_driver_in_kb)); 85 86 start_addr = le32_to_cpu(firmware_usage->start_address_in_kb); 87 size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb); 88 89 if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) == 90 (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << 91 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { 92 /* Firmware request VRAM reservation for SR-IOV */ 93 adev->fw_vram_usage.start_offset = (start_addr & 94 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; 95 adev->fw_vram_usage.size = size << 10; 96 /* Use the default scratch size */ 97 usage_bytes = 0; 98 } else { 99 usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10; 100 } 101 } 102 ctx->scratch_size_bytes = 0; 103 if (usage_bytes == 0) 104 usage_bytes = 20 * 1024; 105 /* allocate some scratch memory */ 106 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); 107 if (!ctx->scratch) 108 return -ENOMEM; 109 ctx->scratch_size_bytes = usage_bytes; 110 return 0; 111 } 112 113 union igp_info { 114 struct atom_integrated_system_info_v1_11 v11; 115 }; 116 117 /* 118 * Return vram width from integrated system info table, if available, 119 * or 0 if not. 120 */ 121 int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev) 122 { 123 struct amdgpu_mode_info *mode_info = &adev->mode_info; 124 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 125 integratedsysteminfo); 126 u16 data_offset, size; 127 union igp_info *igp_info; 128 u8 frev, crev; 129 130 /* get any igp specific overrides */ 131 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, 132 &frev, &crev, &data_offset)) { 133 igp_info = (union igp_info *) 134 (mode_info->atom_context->bios + data_offset); 135 switch (crev) { 136 case 11: 137 return igp_info->v11.umachannelnumber * 64; 138 default: 139 return 0; 140 } 141 } 142 143 return 0; 144 } 145 146 union firmware_info { 147 struct atom_firmware_info_v3_1 v31; 148 }; 149 150 union smu_info { 151 struct atom_smu_info_v3_1 v31; 152 }; 153 154 union umc_info { 155 struct atom_umc_info_v3_1 v31; 156 }; 157 158 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) 159 { 160 struct amdgpu_mode_info *mode_info = &adev->mode_info; 161 struct amdgpu_pll *spll = &adev->clock.spll; 162 struct amdgpu_pll *mpll = &adev->clock.mpll; 163 uint8_t frev, crev; 164 uint16_t data_offset; 165 int ret = -EINVAL, index; 166 167 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 168 firmwareinfo); 169 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 170 &frev, &crev, &data_offset)) { 171 union firmware_info *firmware_info = 172 (union firmware_info *)(mode_info->atom_context->bios + 173 data_offset); 174 175 adev->clock.default_sclk = 176 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz); 177 adev->clock.default_mclk = 178 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz); 179 180 adev->pm.current_sclk = adev->clock.default_sclk; 181 adev->pm.current_mclk = adev->clock.default_mclk; 182 183 /* not technically a clock, but... */ 184 adev->mode_info.firmware_flags = 185 le32_to_cpu(firmware_info->v31.firmware_capability); 186 187 ret = 0; 188 } 189 190 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 191 smu_info); 192 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 193 &frev, &crev, &data_offset)) { 194 union smu_info *smu_info = 195 (union smu_info *)(mode_info->atom_context->bios + 196 data_offset); 197 198 /* system clock */ 199 spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz); 200 201 spll->reference_div = 0; 202 spll->min_post_div = 1; 203 spll->max_post_div = 1; 204 spll->min_ref_div = 2; 205 spll->max_ref_div = 0xff; 206 spll->min_feedback_div = 4; 207 spll->max_feedback_div = 0xff; 208 spll->best_vco = 0; 209 210 ret = 0; 211 } 212 213 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 214 umc_info); 215 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 216 &frev, &crev, &data_offset)) { 217 union umc_info *umc_info = 218 (union umc_info *)(mode_info->atom_context->bios + 219 data_offset); 220 221 /* memory clock */ 222 mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz); 223 224 mpll->reference_div = 0; 225 mpll->min_post_div = 1; 226 mpll->max_post_div = 1; 227 mpll->min_ref_div = 2; 228 mpll->max_ref_div = 0xff; 229 mpll->min_feedback_div = 4; 230 mpll->max_feedback_div = 0xff; 231 mpll->best_vco = 0; 232 233 ret = 0; 234 } 235 236 return ret; 237 } 238