[Devel] [PATCH RHEL7 COMMIT] kvm: unlock kvm_lock in case no VMs to shrink
Konstantin Khorenko
khorenko at virtuozzo.com
Tue Dec 24 13:29:58 MSK 2019
The commit is pushed to "branch-rh7-3.10.0-1062.7.1.vz7.130.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1062.7.1.vz7.130.5
------>
commit c3ad21d014361bb21e170c0244d84edb13d46272
Author: Konstantin Khorenko <khorenko at virtuozzo.com>
Date: Tue Jul 23 16:42:49 2019 +0300
kvm: unlock kvm_lock in case no VMs to shrink
If vm_list is empty kvm_lock is acquired and never released in
mmu_shrink_scan(), fix this.
https://jira.sw.ru/browse/PSBM-100474
Fixes: bbacd5e44b5b ("kvm: move actual VM memory shrink out of
kvm_lock")
https://jira.sw.ru/browse/PSBM-96262
Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
Reviewed-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
arch/x86/kvm/mmu.c | 51 ++++++++++++++++++++++++++++++---------------------
1 file changed, 30 insertions(+), 21 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5cbd5e9f50d80..dc28b530ff512 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5840,13 +5840,12 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
struct kvm *kvm, *tmp;
int nr_to_scan = sc->nr_to_scan;
unsigned long freed = 0;
+ int idx, found = 0;
+ LIST_HEAD(invalid_list);
mutex_lock(&kvm_lock);
list_for_each_entry_safe(kvm, tmp, &vm_list, vm_list) {
- int idx;
- LIST_HEAD(invalid_list);
-
/*
* Never scan more than sc->nr_to_scan VM instances.
* Will not hit this condition practically since we do not try
@@ -5854,7 +5853,6 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
* !n_used_mmu_pages so many times.
*/
if (!nr_to_scan--) {
- mutex_unlock(&kvm_lock);
break;
}
@@ -5883,30 +5881,41 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
*/
if (!kvm_try_get_kvm(kvm))
continue;
- mutex_unlock(&kvm_lock);
-
- idx = srcu_read_lock(&kvm->srcu);
- spin_lock(&kvm->mmu_lock);
- if (kvm_has_zapped_obsolete_pages(kvm)) {
- kvm_mmu_commit_zap_page(kvm,
- &kvm->arch.zapped_obsolete_pages);
- goto unlock;
- }
+ /*
+ * We found VM to shrink, and as we shrink only one VM per
+ * function call, break the cycle and do actual shrink out of
+ * the cycle.
+ */
+ found = 1;
+ break;
+ }
- if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
- freed++;
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ mutex_unlock(&kvm_lock);
-unlock:
- spin_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
+ /* If not found a VM to shrink, just exit. */
+ if (!found)
+ return freed;
- kvm_put_kvm(kvm);
+ idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
- break;
+ if (kvm_has_zapped_obsolete_pages(kvm)) {
+ kvm_mmu_commit_zap_page(kvm,
+ &kvm->arch.zapped_obsolete_pages);
+ goto unlock;
}
+ if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
+ freed++;
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
+
+unlock:
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ kvm_put_kvm(kvm);
+
return freed;
}
More information about the Devel
mailing list