mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-17 06:17:35 +00:00
KVM: pass kvm_memory_slot to gfn_to_page_many_atomic
The memory slot is already available from gfn_to_memslot_dirty_bitmap. Isn't it a shame to look it up again? Plus, it makes gfn_to_page_many_atomic agnostic of multiple VCPU address spaces. Reviewed-by: Radim Krcmar <rkrcmar@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
f36f3f2846
commit
d9ef13c2b3
@ -2728,15 +2728,17 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
|
|||||||
u64 *start, u64 *end)
|
u64 *start, u64 *end)
|
||||||
{
|
{
|
||||||
struct page *pages[PTE_PREFETCH_NUM];
|
struct page *pages[PTE_PREFETCH_NUM];
|
||||||
|
struct kvm_memory_slot *slot;
|
||||||
unsigned access = sp->role.access;
|
unsigned access = sp->role.access;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
gfn_t gfn;
|
gfn_t gfn;
|
||||||
|
|
||||||
gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
|
gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
|
||||||
if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK))
|
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
|
||||||
|
if (!slot)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
|
ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
@ -526,8 +526,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
|
|||||||
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *slot);
|
struct kvm_memory_slot *slot);
|
||||||
|
|
||||||
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
|
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||||
int nr_pages);
|
struct page **pages, int nr_pages);
|
||||||
|
|
||||||
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
|
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
|
||||||
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
|
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
|
||||||
|
@ -1428,13 +1428,13 @@ pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
|
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
|
||||||
|
|
||||||
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
|
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||||
int nr_pages)
|
struct page **pages, int nr_pages)
|
||||||
{
|
{
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
gfn_t entry;
|
gfn_t entry;
|
||||||
|
|
||||||
addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
|
addr = gfn_to_hva_many(slot, gfn, &entry);
|
||||||
if (kvm_is_error_hva(addr))
|
if (kvm_is_error_hva(addr))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user