[Devel] [PATCH vz8 7/7] kvm: unlock kvm_lock in case no VMs to shrink

Konstantin Khorenko khorenko at virtuozzo.com
Thu Jun 10 16:36:28 MSK 2021


merged into
[PATCH vz8 5/7] kvm: move actual VM memory shrink out of kvm_lock

--
Best regards,

Konstantin Khorenko,
Virtuozzo Linux Kernel Team

On 06/08/2021 08:58 PM, Valeriy Vdovin wrote:
> From: Konstantin Khorenko <khorenko at virtuozzo.com>
>
> If vm_list is empty kvm_lock is acquired and never released in
> mmu_shrink_scan(), fix this.
>
> https://jira.sw.ru/browse/PSBM-100474
>
> Fixes: bbacd5e44b5b ("kvm: move actual VM memory shrink out of
> kvm_lock")
> https://jira.sw.ru/browse/PSBM-96262
>
> Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
> Reviewed-by: Kirill Tkhai <ktkhai at virtuozzo.com>
> (cherry-picked from c3ad21d014361bb21e170c0244d84edb13d46272)
> https://jira.sw.ru/browse/PSBM-127849
> Signed-off-by: Valeriy Vdovin <valeriy.vdovin at virtuozzo.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 49 +++++++++++++++++++++++++-----------------
>  1 file changed, 29 insertions(+), 20 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 79ec523a29c9..474f441711ea 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -6077,12 +6077,12 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>  	struct kvm *kvm, *tmp;
>  	int nr_to_scan = sc->nr_to_scan;
>  	unsigned long freed = 0;
> +	int idx, found = 0;
> +	LIST_HEAD(invalid_list);
>
>  	mutex_lock(&kvm_lock);
>
>  	list_for_each_entry_safe(kvm, tmp, &vm_list, vm_list) {
> -		int idx;
> -		LIST_HEAD(invalid_list);
>
>  		/*
>  		 * Never scan more than sc->nr_to_scan VM instances.
> @@ -6091,7 +6091,6 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>  		 * !n_used_mmu_pages so many times.
>  		 */
>  		if (!nr_to_scan--) {
> -			mutex_unlock(&kvm_lock);
>  			break;
>  		}
>
> @@ -6120,29 +6119,39 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>  		 */
>  		if (!kvm_try_get_kvm(kvm))
>  			continue;
> -		mutex_unlock(&kvm_lock);
> +		/*
> +		 * We found VM to shrink, and as we shrink only one VM per
> +		 * function call, break the cycle and do actual shrink out of
> +		 * the cycle.
> +		 */
> +		found = 1;
> +		break;
> +	}
>
> -		idx = srcu_read_lock(&kvm->srcu);
> -		spin_lock(&kvm->mmu_lock);
> +	mutex_unlock(&kvm_lock);
>
> -		if (kvm_has_zapped_obsolete_pages(kvm)) {
> -			kvm_mmu_commit_zap_page(kvm,
> -			      &kvm->arch.zapped_obsolete_pages);
> -			goto unlock;
> -		}
> +	/* If not found a VM to shrink, just exit. */
> +	if (!found)
> +		return freed;
>
> -		if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
> -			freed++;
> -		kvm_mmu_commit_zap_page(kvm, &invalid_list);
> +	idx = srcu_read_lock(&kvm->srcu);
> +	spin_lock(&kvm->mmu_lock);
>
> -unlock:
> -		spin_unlock(&kvm->mmu_lock);
> -		srcu_read_unlock(&kvm->srcu, idx);
> +	if (kvm_has_zapped_obsolete_pages(kvm)) {
> +		kvm_mmu_commit_zap_page(kvm,
> +					&kvm->arch.zapped_obsolete_pages);
> +		goto unlock;
> +	}
>
> -		kvm_put_kvm(kvm);
> +	if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
> +		freed++;
> +	kvm_mmu_commit_zap_page(kvm, &invalid_list);
>
> -		break;
> -	}
> +unlock:
> +	spin_unlock(&kvm->mmu_lock);
> +	srcu_read_unlock(&kvm->srcu, idx);
> +
> +	kvm_put_kvm(kvm);
>
>  	return freed;
>  }
>


More information about the Devel mailing list