mtrr.c (ebda79e5057778be1ad8ed072e4229894dfc66b7) | mtrr.c (9ae38b4fb13597ce1821376d23958bbe4976c759) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * vMTRR implementation 4 * 5 * Copyright (C) 2006 Qumranet, Inc. 6 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 7 * Copyright(C) 2015 Intel Corporation. 8 * --- 17 unchanged lines hidden (view full) --- 26#define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff) 27 28static bool is_mtrr_base_msr(unsigned int msr) 29{ 30 /* MTRR base MSRs use even numbers, masks use odd numbers. */ 31 return !(msr & 0x1); 32} 33 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * vMTRR implementation 4 * 5 * Copyright (C) 2006 Qumranet, Inc. 6 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 7 * Copyright(C) 2015 Intel Corporation. 8 * --- 17 unchanged lines hidden (view full) --- 26#define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff) 27 28static bool is_mtrr_base_msr(unsigned int msr) 29{ 30 /* MTRR base MSRs use even numbers, masks use odd numbers. */ 31 return !(msr & 0x1); 32} 33 |
34static struct kvm_mtrr_range *var_mtrr_msr_to_range(struct kvm_vcpu *vcpu, 35 unsigned int msr) 36{ 37 int index = (msr - 0x200) / 2; 38 39 return &vcpu->arch.mtrr_state.var_ranges[index]; 40} 41 |
|
34static bool msr_mtrr_valid(unsigned msr) 35{ 36 switch (msr) { 37 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: 38 case MSR_MTRRfix64K_00000: 39 case MSR_MTRRfix16K_80000: 40 case MSR_MTRRfix16K_A0000: 41 case MSR_MTRRfix4K_C0000: --- 267 unchanged lines hidden (view full) --- 309 */ 310 *end = (*start | ~mask) + 1; 311} 312 313static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr) 314{ 315 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; 316 gfn_t start, end; | 42static bool msr_mtrr_valid(unsigned msr) 43{ 44 switch (msr) { 45 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: 46 case MSR_MTRRfix64K_00000: 47 case MSR_MTRRfix16K_80000: 48 case MSR_MTRRfix16K_A0000: 49 case MSR_MTRRfix4K_C0000: --- 267 unchanged lines hidden (view full) --- 317 */ 318 *end = (*start | ~mask) + 1; 319} 320 321static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr) 322{ 323 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; 324 gfn_t start, end; |
317 int index; | |
318 319 if (msr == MSR_IA32_CR_PAT || !tdp_enabled || 320 !kvm_arch_has_noncoherent_dma(vcpu->kvm)) 321 return; 322 323 if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType) 324 return; 325 326 /* fixed MTRRs. */ 327 if (fixed_msr_to_range(msr, &start, &end)) { 328 if (!fixed_mtrr_is_enabled(mtrr_state)) 329 return; 330 } else if (msr == MSR_MTRRdefType) { 331 start = 0x0; 332 end = ~0ULL; 333 } else { 334 /* variable range MTRRs. */ | 325 326 if (msr == MSR_IA32_CR_PAT || !tdp_enabled || 327 !kvm_arch_has_noncoherent_dma(vcpu->kvm)) 328 return; 329 330 if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType) 331 return; 332 333 /* fixed MTRRs. */ 334 if (fixed_msr_to_range(msr, &start, &end)) { 335 if (!fixed_mtrr_is_enabled(mtrr_state)) 336 return; 337 } else if (msr == MSR_MTRRdefType) { 338 start = 0x0; 339 end = ~0ULL; 340 } else { 341 /* variable range MTRRs. */ |
335 index = (msr - 0x200) / 2; 336 var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end); | 342 var_mtrr_range(var_mtrr_msr_to_range(vcpu, msr), &start, &end); |
337 } 338 339 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end)); 340} 341 342static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range) 343{ 344 return (range->mask & (1 << 11)) != 0; 345} 346 347static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) 348{ 349 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; 350 struct kvm_mtrr_range *tmp, *cur; | 343 } 344 345 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end)); 346} 347 348static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range) 349{ 350 return (range->mask & (1 << 11)) != 0; 351} 352 353static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) 354{ 355 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; 356 struct kvm_mtrr_range *tmp, *cur; |
351 int index; | |
352 | 357 |
353 index = (msr - 0x200) / 2; 354 cur = &mtrr_state->var_ranges[index]; | 358 cur = var_mtrr_msr_to_range(vcpu, msr); |
355 356 /* remove the entry if it's in the list. */ 357 if (var_mtrr_range_is_valid(cur)) | 359 360 /* remove the entry if it's in the list. */ 361 if (var_mtrr_range_is_valid(cur)) |
358 list_del(&mtrr_state->var_ranges[index].node); | 362 list_del(&cur->node); |
359 360 /* 361 * Set all illegal GPA bits in the mask, since those bits must 362 * implicitly be 0. The bits are then cleared when reading them. 363 */ 364 if (is_mtrr_base_msr(msr)) 365 cur->base = data; 366 else --- 51 unchanged lines hidden (view full) --- 418 index = fixed_msr_to_range_index(msr); 419 if (index >= 0) 420 *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index]; 421 else if (msr == MSR_MTRRdefType) 422 *pdata = vcpu->arch.mtrr_state.deftype; 423 else if (msr == MSR_IA32_CR_PAT) 424 *pdata = vcpu->arch.pat; 425 else { /* Variable MTRRs */ | 363 364 /* 365 * Set all illegal GPA bits in the mask, since those bits must 366 * implicitly be 0. The bits are then cleared when reading them. 367 */ 368 if (is_mtrr_base_msr(msr)) 369 cur->base = data; 370 else --- 51 unchanged lines hidden (view full) --- 422 index = fixed_msr_to_range_index(msr); 423 if (index >= 0) 424 *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index]; 425 else if (msr == MSR_MTRRdefType) 426 *pdata = vcpu->arch.mtrr_state.deftype; 427 else if (msr == MSR_IA32_CR_PAT) 428 *pdata = vcpu->arch.pat; 429 else { /* Variable MTRRs */ |
426 index = (msr - 0x200) / 2; | |
427 if (is_mtrr_base_msr(msr)) | 430 if (is_mtrr_base_msr(msr)) |
428 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; | 431 *pdata = var_mtrr_msr_to_range(vcpu, msr)->base; |
429 else | 432 else |
430 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; | 433 *pdata = var_mtrr_msr_to_range(vcpu, msr)->mask; |
431 432 *pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu); 433 } 434 435 return 0; 436} 437 438void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu) --- 286 unchanged lines hidden --- | 434 435 *pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu); 436 } 437 438 return 0; 439} 440 441void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu) --- 286 unchanged lines hidden --- |