aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu/paging_tmpl.h
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-01-08 12:24:43 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2020-01-27 20:00:06 +0100
commit83f06fa7a6fd9d5758e5f8438e2137f25f6f2e6b (patch)
treea5f95d0f15028314a0d9265bde706a214fd5562f /arch/x86/kvm/mmu/paging_tmpl.h
parentf9fa2509e5ca8229b4baca295865b542803bf25d (diff)
downloadkernel_replicant_linux-83f06fa7a6fd9d5758e5f8438e2137f25f6f2e6b.tar.gz
kernel_replicant_linux-83f06fa7a6fd9d5758e5f8438e2137f25f6f2e6b.tar.bz2
kernel_replicant_linux-83f06fa7a6fd9d5758e5f8438e2137f25f6f2e6b.zip
KVM: x86/mmu: Rely on host page tables to find HugeTLB mappings
Remove KVM's HugeTLB specific logic and instead rely on walking the host page tables (already done for THP) to identify HugeTLB mappings. Eliminating the HugeTLB-only logic avoids taking mmap_sem and calling find_vma() for all hugepage compatible page faults, and simplifies KVM's page fault code by consolidating all hugepage adjustments into a common helper. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h15
1 files changed, 6 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 1ad87f0e19d0..472c32cdf2ff 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -628,14 +628,14 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
*/
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
struct guest_walker *gw,
- int write_fault, int hlevel, int max_level,
+ int write_fault, int max_level,
kvm_pfn_t pfn, bool map_writable, bool prefault,
bool lpage_disallowed)
{
struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it;
unsigned direct_access, access = gw->pt_access;
- int top_level, ret;
+ int top_level, hlevel, ret;
gfn_t gfn, base_gfn;
direct_access = gw->pte_access;
@@ -688,7 +688,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT);
base_gfn = gfn;
- transparent_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn, &hlevel);
+ hlevel = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn);
trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
@@ -790,7 +790,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
struct guest_walker walker;
int r;
kvm_pfn_t pfn;
- int level;
unsigned long mmu_seq;
bool map_writable, is_self_change_mapping;
bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
@@ -840,9 +839,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
else
max_level = walker.level;
- level = mapping_level(vcpu, walker.gfn, &max_level);
- if (level > PT_PAGE_TABLE_LEVEL)
- walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
+ max_level = max_mapping_level(vcpu, walker.gfn, max_level);
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
@@ -882,8 +879,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock;
- r = FNAME(fetch)(vcpu, addr, &walker, write_fault, level, max_level,
- pfn, map_writable, prefault, lpage_disallowed);
+ r = FNAME(fetch)(vcpu, addr, &walker, write_fault, max_level, pfn,
+ map_writable, prefault, lpage_disallowed);
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
out_unlock: