1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include "i915_drv.h" 26 27 void intel_device_info_dump(struct drm_i915_private *dev_priv) 28 { 29 const struct intel_device_info *info = &dev_priv->info; 30 31 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x", 32 info->gen, 33 dev_priv->drm.pdev->device, 34 dev_priv->drm.pdev->revision); 35 #define PRINT_FLAG(name) \ 36 DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name)) 37 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); 38 #undef PRINT_FLAG 39 } 40 41 static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) 42 { 43 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 44 u32 fuse, eu_dis; 45 46 fuse = I915_READ(CHV_FUSE_GT); 47 48 sseu->slice_mask = BIT(0); 49 50 if (!(fuse & CHV_FGT_DISABLE_SS0)) { 51 sseu->subslice_mask |= BIT(0); 52 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | 53 CHV_FGT_EU_DIS_SS0_R1_MASK); 54 sseu->eu_total += 8 - hweight32(eu_dis); 55 } 56 57 if (!(fuse & CHV_FGT_DISABLE_SS1)) { 58 sseu->subslice_mask |= BIT(1); 59 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | 60 CHV_FGT_EU_DIS_SS1_R1_MASK); 61 sseu->eu_total += 8 - hweight32(eu_dis); 62 } 63 64 /* 65 * CHV expected to always have a uniform distribution of EU 66 * across subslices. 67 */ 68 sseu->eu_per_subslice = sseu_subslice_total(sseu) ? 69 sseu->eu_total / sseu_subslice_total(sseu) : 70 0; 71 /* 72 * CHV supports subslice power gating on devices with more than 73 * one subslice, and supports EU power gating on devices with 74 * more than one EU pair per subslice. 75 */ 76 sseu->has_slice_pg = 0; 77 sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1; 78 sseu->has_eu_pg = (sseu->eu_per_subslice > 2); 79 } 80 81 static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) 82 { 83 struct intel_device_info *info = mkwrite_device_info(dev_priv); 84 struct sseu_dev_info *sseu = &info->sseu; 85 int s_max = 3, ss_max = 4, eu_max = 8; 86 int s, ss; 87 u32 fuse2, eu_disable; 88 u8 eu_mask = 0xff; 89 90 fuse2 = I915_READ(GEN8_FUSE2); 91 sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; 92 93 /* 94 * The subslice disable field is global, i.e. it applies 95 * to each of the enabled slices. 96 */ 97 sseu->subslice_mask = (1 << ss_max) - 1; 98 sseu->subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >> 99 GEN9_F2_SS_DIS_SHIFT); 100 101 /* 102 * Iterate through enabled slices and subslices to 103 * count the total enabled EU. 104 */ 105 for (s = 0; s < s_max; s++) { 106 if (!(sseu->slice_mask & BIT(s))) 107 /* skip disabled slice */ 108 continue; 109 110 eu_disable = I915_READ(GEN9_EU_DISABLE(s)); 111 for (ss = 0; ss < ss_max; ss++) { 112 int eu_per_ss; 113 114 if (!(sseu->subslice_mask & BIT(ss))) 115 /* skip disabled subslice */ 116 continue; 117 118 eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) & 119 eu_mask); 120 121 /* 122 * Record which subslice(s) has(have) 7 EUs. we 123 * can tune the hash used to spread work among 124 * subslices if they are unbalanced. 125 */ 126 if (eu_per_ss == 7) 127 sseu->subslice_7eu[s] |= BIT(ss); 128 129 sseu->eu_total += eu_per_ss; 130 } 131 } 132 133 /* 134 * SKL is expected to always have a uniform distribution 135 * of EU across subslices with the exception that any one 136 * EU in any one subslice may be fused off for die 137 * recovery. BXT is expected to be perfectly uniform in EU 138 * distribution. 139 */ 140 sseu->eu_per_subslice = sseu_subslice_total(sseu) ? 141 DIV_ROUND_UP(sseu->eu_total, 142 sseu_subslice_total(sseu)) : 0; 143 /* 144 * SKL supports slice power gating on devices with more than 145 * one slice, and supports EU power gating on devices with 146 * more than one EU pair per subslice. BXT supports subslice 147 * power gating on devices with more than one subslice, and 148 * supports EU power gating on devices with more than one EU 149 * pair per subslice. 150 */ 151 sseu->has_slice_pg = 152 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && 153 hweight8(sseu->slice_mask) > 1; 154 sseu->has_subslice_pg = 155 IS_BROXTON(dev_priv) && sseu_subslice_total(sseu) > 1; 156 sseu->has_eu_pg = sseu->eu_per_subslice > 2; 157 158 if (IS_BROXTON(dev_priv)) { 159 #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask & BIT(ss))) 160 /* 161 * There is a HW issue in 2x6 fused down parts that requires 162 * Pooled EU to be enabled as a WA. The pool configuration 163 * changes depending upon which subslice is fused down. This 164 * doesn't affect if the device has all 3 subslices enabled. 165 */ 166 /* WaEnablePooledEuFor2x6:bxt */ 167 info->has_pooled_eu = ((hweight8(sseu->subslice_mask) == 3) || 168 (hweight8(sseu->subslice_mask) == 2 && 169 INTEL_REVID(dev_priv) < BXT_REVID_C0)); 170 171 sseu->min_eu_in_pool = 0; 172 if (info->has_pooled_eu) { 173 if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0)) 174 sseu->min_eu_in_pool = 3; 175 else if (IS_SS_DISABLED(1)) 176 sseu->min_eu_in_pool = 6; 177 else 178 sseu->min_eu_in_pool = 9; 179 } 180 #undef IS_SS_DISABLED 181 } 182 } 183 184 static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) 185 { 186 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 187 const int s_max = 3, ss_max = 3, eu_max = 8; 188 int s, ss; 189 u32 fuse2, eu_disable[3]; /* s_max */ 190 191 fuse2 = I915_READ(GEN8_FUSE2); 192 sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; 193 /* 194 * The subslice disable field is global, i.e. it applies 195 * to each of the enabled slices. 196 */ 197 sseu->subslice_mask = BIT(ss_max) - 1; 198 sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >> 199 GEN8_F2_SS_DIS_SHIFT); 200 201 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; 202 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | 203 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) << 204 (32 - GEN8_EU_DIS0_S1_SHIFT)); 205 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) | 206 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) << 207 (32 - GEN8_EU_DIS1_S2_SHIFT)); 208 209 /* 210 * Iterate through enabled slices and subslices to 211 * count the total enabled EU. 212 */ 213 for (s = 0; s < s_max; s++) { 214 if (!(sseu->slice_mask & BIT(s))) 215 /* skip disabled slice */ 216 continue; 217 218 for (ss = 0; ss < ss_max; ss++) { 219 u32 n_disabled; 220 221 if (!(sseu->subslice_mask & BIT(ss))) 222 /* skip disabled subslice */ 223 continue; 224 225 n_disabled = hweight8(eu_disable[s] >> (ss * eu_max)); 226 227 /* 228 * Record which subslices have 7 EUs. 229 */ 230 if (eu_max - n_disabled == 7) 231 sseu->subslice_7eu[s] |= 1 << ss; 232 233 sseu->eu_total += eu_max - n_disabled; 234 } 235 } 236 237 /* 238 * BDW is expected to always have a uniform distribution of EU across 239 * subslices with the exception that any one EU in any one subslice may 240 * be fused off for die recovery. 241 */ 242 sseu->eu_per_subslice = sseu_subslice_total(sseu) ? 243 DIV_ROUND_UP(sseu->eu_total, 244 sseu_subslice_total(sseu)) : 0; 245 246 /* 247 * BDW supports slice power gating on devices with more than 248 * one slice. 249 */ 250 sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1; 251 sseu->has_subslice_pg = 0; 252 sseu->has_eu_pg = 0; 253 } 254 255 /* 256 * Determine various intel_device_info fields at runtime. 257 * 258 * Use it when either: 259 * - it's judged too laborious to fill n static structures with the limit 260 * when a simple if statement does the job, 261 * - run-time checks (eg read fuse/strap registers) are needed. 262 * 263 * This function needs to be called: 264 * - after the MMIO has been setup as we are reading registers, 265 * - after the PCH has been detected, 266 * - before the first usage of the fields it can tweak. 267 */ 268 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) 269 { 270 struct intel_device_info *info = mkwrite_device_info(dev_priv); 271 enum pipe pipe; 272 273 /* 274 * Skylake and Broxton currently don't expose the topmost plane as its 275 * use is exclusive with the legacy cursor and we only want to expose 276 * one of those, not both. Until we can safely expose the topmost plane 277 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, 278 * we don't expose the topmost plane at all to prevent ABI breakage 279 * down the line. 280 */ 281 if (IS_BROXTON(dev_priv)) { 282 info->num_sprites[PIPE_A] = 2; 283 info->num_sprites[PIPE_B] = 2; 284 info->num_sprites[PIPE_C] = 1; 285 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 286 for_each_pipe(dev_priv, pipe) 287 info->num_sprites[pipe] = 2; 288 } else if (INTEL_GEN(dev_priv) >= 5) { 289 for_each_pipe(dev_priv, pipe) 290 info->num_sprites[pipe] = 1; 291 } 292 293 if (i915.disable_display) { 294 DRM_INFO("Display disabled (module parameter)\n"); 295 info->num_pipes = 0; 296 } else if (info->num_pipes > 0 && 297 (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && 298 HAS_PCH_SPLIT(dev_priv)) { 299 u32 fuse_strap = I915_READ(FUSE_STRAP); 300 u32 sfuse_strap = I915_READ(SFUSE_STRAP); 301 302 /* 303 * SFUSE_STRAP is supposed to have a bit signalling the display 304 * is fused off. Unfortunately it seems that, at least in 305 * certain cases, fused off display means that PCH display 306 * reads don't land anywhere. In that case, we read 0s. 307 * 308 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK 309 * should be set when taking over after the firmware. 310 */ 311 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || 312 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || 313 (dev_priv->pch_type == PCH_CPT && 314 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { 315 DRM_INFO("Display fused off, disabling\n"); 316 info->num_pipes = 0; 317 } else if (fuse_strap & IVB_PIPE_C_DISABLE) { 318 DRM_INFO("PipeC fused off\n"); 319 info->num_pipes -= 1; 320 } 321 } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) { 322 u32 dfsm = I915_READ(SKL_DFSM); 323 u8 disabled_mask = 0; 324 bool invalid; 325 int num_bits; 326 327 if (dfsm & SKL_DFSM_PIPE_A_DISABLE) 328 disabled_mask |= BIT(PIPE_A); 329 if (dfsm & SKL_DFSM_PIPE_B_DISABLE) 330 disabled_mask |= BIT(PIPE_B); 331 if (dfsm & SKL_DFSM_PIPE_C_DISABLE) 332 disabled_mask |= BIT(PIPE_C); 333 334 num_bits = hweight8(disabled_mask); 335 336 switch (disabled_mask) { 337 case BIT(PIPE_A): 338 case BIT(PIPE_B): 339 case BIT(PIPE_A) | BIT(PIPE_B): 340 case BIT(PIPE_A) | BIT(PIPE_C): 341 invalid = true; 342 break; 343 default: 344 invalid = false; 345 } 346 347 if (num_bits > info->num_pipes || invalid) 348 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n", 349 disabled_mask); 350 else 351 info->num_pipes -= num_bits; 352 } 353 354 /* Initialize slice/subslice/EU info */ 355 if (IS_CHERRYVIEW(dev_priv)) 356 cherryview_sseu_info_init(dev_priv); 357 else if (IS_BROADWELL(dev_priv)) 358 broadwell_sseu_info_init(dev_priv); 359 else if (INTEL_INFO(dev_priv)->gen >= 9) 360 gen9_sseu_info_init(dev_priv); 361 362 info->has_snoop = !info->has_llc; 363 364 /* Snooping is broken on BXT A stepping. */ 365 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 366 info->has_snoop = false; 367 368 DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask); 369 DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask)); 370 DRM_DEBUG_DRIVER("subslice total: %u\n", 371 sseu_subslice_total(&info->sseu)); 372 DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask); 373 DRM_DEBUG_DRIVER("subslice per slice: %u\n", 374 hweight8(info->sseu.subslice_mask)); 375 DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total); 376 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice); 377 DRM_DEBUG_DRIVER("has slice power gating: %s\n", 378 info->sseu.has_slice_pg ? "y" : "n"); 379 DRM_DEBUG_DRIVER("has subslice power gating: %s\n", 380 info->sseu.has_subslice_pg ? "y" : "n"); 381 DRM_DEBUG_DRIVER("has EU power gating: %s\n", 382 info->sseu.has_eu_pg ? "y" : "n"); 383 } 384