@@ -803,13 +803,15 @@ void stage2_unmap_vm(struct kvm *kvm)
803803 int idx ;
804804
805805 idx = srcu_read_lock (& kvm -> srcu );
806+ down_read (& current -> mm -> mmap_sem );
806807 spin_lock (& kvm -> mmu_lock );
807808
808809 slots = kvm_memslots (kvm );
809810 kvm_for_each_memslot (memslot , slots )
810811 stage2_unmap_memslot (kvm , memslot );
811812
812813 spin_unlock (& kvm -> mmu_lock );
814+ up_read (& current -> mm -> mmap_sem );
813815 srcu_read_unlock (& kvm -> srcu , idx );
814816}
815817
@@ -1771,6 +1773,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
17711773 (KVM_PHYS_SIZE >> PAGE_SHIFT ))
17721774 return - EFAULT ;
17731775
1776+ down_read (& current -> mm -> mmap_sem );
17741777 /*
17751778 * A memory region could potentially cover multiple VMAs, and any holes
17761779 * between them, so iterate over all of them to find out if we can map
@@ -1814,8 +1817,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
18141817 pa += vm_start - vma -> vm_start ;
18151818
18161819 /* IO region dirty page logging not allowed */
1817- if (memslot -> flags & KVM_MEM_LOG_DIRTY_PAGES )
1818- return - EINVAL ;
1820+ if (memslot -> flags & KVM_MEM_LOG_DIRTY_PAGES ) {
1821+ ret = - EINVAL ;
1822+ goto out ;
1823+ }
18191824
18201825 ret = kvm_phys_addr_ioremap (kvm , gpa , pa ,
18211826 vm_end - vm_start ,
@@ -1827,14 +1832,16 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
18271832 } while (hva < reg_end );
18281833
18291834 if (change == KVM_MR_FLAGS_ONLY )
1830- return ret ;
1835+ goto out ;
18311836
18321837 spin_lock (& kvm -> mmu_lock );
18331838 if (ret )
18341839 unmap_stage2_range (kvm , mem -> guest_phys_addr , mem -> memory_size );
18351840 else
18361841 stage2_flush_memslot (kvm , memslot );
18371842 spin_unlock (& kvm -> mmu_lock );
1843+ out :
1844+ up_read (& current -> mm -> mmap_sem );
18381845 return ret ;
18391846}
18401847
0 commit comments