[Devel] [PATCH VZ10 3/8] mm/memcontrol: remove excess folio get in memcg_numa_isolate_pages()
Pavel Tikhomirov
ptikhomirov at virtuozzo.com
Tue Sep 2 13:59:13 MSK 2025
Don't take excess page/folio reference, before this patch we had
folio_get_nontail_page() + get_page_unless_zero(), we don't need this
double reference similar to how we don't need it in
isolate_lru_folios().
Now we only take reference if we are really trying to isolate it, we
also check if our isolation is succesful and if not we release the
reference, similar to isolate_lru_folios().
https://virtuozzo.atlassian.net/browse/VSTOR-114298
Fixes: c92459bc18307 ("mm: memcontrol: add memory.numa_migrate file")
Signed-off-by: Pavel Tikhomirov <ptikhomirov at virtuozzo.com>
---
mm/memcontrol.c | 32 ++++++++++----------------------
1 file changed, 10 insertions(+), 22 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4a2994eb6e377..2c707716d0557 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3535,23 +3535,13 @@ static long memcg_numa_isolate_pages(struct lruvec *lruvec, enum lru_list lru,
* like it's done in isolate_migratepages_block() in commit
* 89f6c88a6ab4 ("mm: __isolate_lru_page_prepare() in
* isolate_migratepages_block()")
- */
-
- /*
+ *
* The code block below is taken from
* isolate_migratepages_block(), mode variable is defined and
* set to ISOLATE_ASYNC_MIGRATE in order to keep the original
* code unchanged where possible.
*/
-
- /*
- * Be careful not to clear PageLRU until after we're
- * sure the page is not being freed elsewhere -- the
- * page release code relies on it.
- */
- folio = folio_get_nontail_page(page);
- if (unlikely(!folio))
- goto isolate_fail;
+ folio = page_folio(page);
/*
* Migration will fail if an anonymous page is pinned in memory,
@@ -3560,18 +3550,18 @@ static long memcg_numa_isolate_pages(struct lruvec *lruvec, enum lru_list lru,
*/
mapping = folio_mapping(folio);
if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio))
- goto isolate_fail_put;
+ goto isolate_fail;
/* Only take pages on LRU: a check now makes later tests safe */
if (!folio_test_lru(folio))
- goto isolate_fail_put;
+ goto isolate_fail;
is_unevictable = folio_test_unevictable(folio);
/* Compaction might skip unevictable pages but CMA takes them */
/* if (is_unevictable) */
if (!(mode & ISOLATE_UNEVICTABLE) && is_unevictable)
- goto isolate_fail_put;
+ goto isolate_fail;
/*
* To minimise LRU disruption, the caller can indicate with
@@ -3580,7 +3570,7 @@ static long memcg_numa_isolate_pages(struct lruvec *lruvec, enum lru_list lru,
* for the most part. PageWriteback would require blocking.
*/
if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio))
- goto isolate_fail_put;
+ goto isolate_fail;
is_dirty = folio_test_dirty(folio);
@@ -3609,7 +3599,7 @@ static long memcg_numa_isolate_pages(struct lruvec *lruvec, enum lru_list lru,
* wasted cycles.
*/
if (!folio_trylock(folio))
- goto isolate_fail_put;
+ goto isolate_fail;
mapping = folio_mapping(folio);
if ((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) {
@@ -3619,7 +3609,7 @@ static long memcg_numa_isolate_pages(struct lruvec *lruvec, enum lru_list lru,
is_inaccessible = mapping && mapping_inaccessible(mapping);
folio_unlock(folio);
if (!migrate_dirty || is_inaccessible)
- goto isolate_fail_put;
+ goto isolate_fail;
}
/*
@@ -3628,20 +3618,18 @@ static long memcg_numa_isolate_pages(struct lruvec *lruvec, enum lru_list lru,
* page release code relies on it.
*/
if (unlikely(!get_page_unless_zero(page)))
- goto isolate_fail_put;
+ goto isolate_fail;
if (!TestClearPageLRU(page)) {
/* Another thread is already isolating this page */
put_page(page);
- goto isolate_fail_put;
+ goto isolate_fail;
}
nr_pages = thp_nr_pages(page);
taken += nr_pages;
nr_zone_taken[page_zonenum(page)] += nr_pages;
move_to = dst;
-isolate_fail_put:
- folio_put(folio);
isolate_fail:
list_move(&page->lru, move_to);
}
--
2.50.1
More information about the Devel
mailing list