[Devel] [PATCH rh7 v2 1/7] mm: vmscan: pass memcg to get_scan_count()

Konstantin Khorenko khorenko at virtuozzo.com
Tue Feb 26 19:29:07 MSK 2019


Reverted temporarily first 6 patches of the patchset in vz7.83.16.
Until the crash is fixed: https://jira.sw.ru/browse/PSBM-91933

--
Best regards,

Konstantin Khorenko,
Virtuozzo Linux Kernel Team

On 02/19/2019 11:29 AM, Andrey Ryabinin wrote:
> From: Vladimir Davydov <vdavydov at virtuozzo.com>
>
> memcg will come in handy in get_scan_count().  It can already be used for
> getting swappiness immediately in get_scan_count() instead of passing it
> around.  The following patches will add more memcg-related values, which
> will be used there.
>
> Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
> Acked-by: Johannes Weiner <hannes at cmpxchg.org>
> Cc: Michal Hocko <mhocko at kernel.org>
> Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
> Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
> (cherry picked from commit 3337767850b490eec5ca822f871241c981664737)
> Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
> ---
>  mm/vmscan.c | 32 ++++++++++++++------------------
>  1 file changed, 14 insertions(+), 18 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 54555dcf7209..6ade994f23ec 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -103,9 +103,6 @@ struct scan_control {
>  	/* Reclaim only slab */
>  	bool slab_only;
>
> -	/* anon vs. file LRUs scanning "ratio" */
> -	int swappiness;
> -
>  	/*
>  	 * The memory cgroup that hit its limit and as a result is the
>  	 * primary target of this reclaim invocation.
> @@ -2115,9 +2112,12 @@ enum scan_balance {
>   * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
>   * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
>   */
> -static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
> -			   unsigned long *nr, unsigned long *lru_pages)
> +
> +static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
> +			   struct scan_control *sc, unsigned long *nr,
> +			   unsigned long *lru_pages)
>  {
> +	int swappiness = mem_cgroup_swappiness(memcg);
>  	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
>  	u64 fraction[2];
>  	u64 denominator = 0;	/* gcc */
> @@ -2159,7 +2159,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
>  	 * using the memory controller's swap limit feature would be
>  	 * too expensive.
>  	 */
> -	if (!global_reclaim(sc) && !sc->swappiness) {
> +	if (!global_reclaim(sc) && !swappiness) {
>  		scan_balance = SCAN_FILE;
>  		goto out;
>  	}
> @@ -2169,7 +2169,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
>  	 * system is close to OOM, scan both anon and file equally
>  	 * (unless the swappiness setting disagrees with swapping).
>  	 */
> -	if (!sc->priority && sc->swappiness) {
> +	if (!sc->priority && swappiness) {
>  		scan_balance = SCAN_EQUAL;
>  		goto out;
>  	}
> @@ -2216,7 +2216,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
>  	 * With swappiness at 100, anonymous and file have the same priority.
>  	 * This scanning priority is essentially the inverse of IO cost.
>  	 */
> -	anon_prio = sc->swappiness;
> +	anon_prio = swappiness;
>  	file_prio = 200 - anon_prio;
>
>  	/*
> @@ -2318,9 +2318,10 @@ static inline void init_tlb_ubc(void)
>  /*
>   * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
>   */
> -static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc,
> -			  unsigned long *lru_pages)
> +static void shrink_zone_memcg(struct zone *zone, struct mem_cgroup *memcg,
> +			      struct scan_control *sc, unsigned long *lru_pages)
>  {
> +	struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
>  	unsigned long nr[NR_LRU_LISTS];
>  	unsigned long targets[NR_LRU_LISTS];
>  	unsigned long nr_to_scan;
> @@ -2330,7 +2331,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc,
>  	struct blk_plug plug;
>  	bool scan_adjusted;
>
> -	get_scan_count(lruvec, sc, nr, lru_pages);
> +	get_scan_count(lruvec, memcg, sc, nr, lru_pages);
>
>  	/* Record the original scan target for proportional adjustments later */
>  	memcpy(targets, nr, sizeof(nr));
> @@ -2541,7 +2542,6 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc,
>  		memcg = mem_cgroup_iter(root, NULL, &reclaim);
>  		do {
>  			unsigned long lru_pages, scanned;
> -			struct lruvec *lruvec;
>
>  			if (!sc->may_thrash && mem_cgroup_low(root, memcg))
>  				continue;
> @@ -2549,9 +2549,7 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc,
>  			scanned = sc->nr_scanned;
>
>  			if (!slab_only) {
> -				lruvec = mem_cgroup_zone_lruvec(zone, memcg);
> -				sc->swappiness = mem_cgroup_swappiness(memcg);
> -				shrink_lruvec(lruvec, sc, &lru_pages);
> +				shrink_zone_memcg(zone, memcg, sc, &lru_pages);
>  				zone_lru_pages += lru_pages;
>  			}
>
> @@ -3124,11 +3122,9 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
>  		.may_swap = !noswap,
>  		.order = 0,
>  		.priority = 0,
> -		.swappiness = mem_cgroup_swappiness(memcg),
>  		.target_mem_cgroup = memcg,
>  		.stat = &stat,
>  	};
> -	struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
>  	unsigned long lru_pages;
>
>  	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
> @@ -3145,7 +3141,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
>  	 * will pick up pages from other mem cgroup's as well. We hack
>  	 * the priority and make it zero.
>  	 */
> -	shrink_lruvec(lruvec, &sc, &lru_pages);
> +	shrink_zone_memcg(zone, memcg, &sc, &lru_pages);
>
>  	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
>
>



More information about the Devel mailing list