[Devel] [PATCH rh7] tswap: Add support for zero-filled pages
Kirill Tkhai
ktkhai at virtuozzo.com
Thu Aug 3 12:54:11 MSK 2017
This patch makes tswap to do not allocate a new page,
if swapped page is zero-filled, and to use ZERO_PAGE()
pointer to decode it instead.
The same optimization is made in zram, and it may help
VMs to reduce memory usage in some way.
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
mm/tswap.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 51 insertions(+), 14 deletions(-)
diff --git a/mm/tswap.c b/mm/tswap.c
index 15f5adc2dc9..6a3cb917059 100644
--- a/mm/tswap.c
+++ b/mm/tswap.c
@@ -54,16 +54,20 @@ static void tswap_lru_add(struct page *page)
{
struct tswap_lru *lru = &tswap_lru_node[page_to_nid(page)];
- list_add_tail(&page->lru, &lru->list);
- lru->nr_items++;
+ if (page != ZERO_PAGE(0)) {
+ list_add_tail(&page->lru, &lru->list);
+ lru->nr_items++;
+ }
}
static void tswap_lru_del(struct page *page)
{
struct tswap_lru *lru = &tswap_lru_node[page_to_nid(page)];
- list_del(&page->lru);
- lru->nr_items--;
+ if (page != ZERO_PAGE(0)) {
+ list_del(&page->lru);
+ lru->nr_items--;
+ }
}
static struct page *tswap_lookup_page(swp_entry_t entry)
@@ -73,7 +77,7 @@ static struct page *tswap_lookup_page(swp_entry_t entry)
spin_lock(&tswap_lock);
page = radix_tree_lookup(&tswap_page_tree, entry.val);
spin_unlock(&tswap_lock);
- BUG_ON(page && page_private(page) != entry.val);
+ BUG_ON(page && page != ZERO_PAGE(0) && page_private(page) != entry.val);
return page;
}
@@ -85,7 +89,8 @@ static int tswap_insert_page(swp_entry_t entry, struct page *page)
if (err)
return err;
- set_page_private(page, entry.val);
+ if (page != ZERO_PAGE(0))
+ set_page_private(page, entry.val);
spin_lock(&tswap_lock);
err = radix_tree_insert(&tswap_page_tree, entry.val, page);
if (!err) {
@@ -111,7 +116,7 @@ static struct page *tswap_delete_page(swp_entry_t entry, struct page *expected)
spin_unlock(&tswap_lock);
if (page) {
BUG_ON(expected && page != expected);
- BUG_ON(page_private(page) != entry.val);
+ BUG_ON(page_private(page) != entry.val && page != ZERO_PAGE(0));
}
return page;
}
@@ -274,26 +279,57 @@ static void tswap_frontswap_init(unsigned type)
*/
}
+static bool is_zero_filled_page(struct page *page)
+{
+ bool zero_filled = true;
+ unsigned long *v;
+ int i;
+
+ v = kmap_atomic(page);
+ for (i = 0; i < PAGE_SIZE / sizeof(*v); i++) {
+ if (v[i] != 0) {
+ zero_filled = false;
+ break;
+ }
+ }
+ kunmap_atomic(v);
+ return zero_filled;
+}
+
static int tswap_frontswap_store(unsigned type, pgoff_t offset,
struct page *page)
{
swp_entry_t entry = swp_entry(type, offset);
+ int zero_filled = -1, err = 0;
struct page *cache_page;
- int err = 0;
if (!tswap_active)
return -1;
cache_page = tswap_lookup_page(entry);
- if (cache_page)
- goto copy;
+ if (cache_page) {
+ zero_filled = is_zero_filled_page(page);
+ /* If type of page has not changed, just reuse it */
+ if (zero_filled == (cache_page == ZERO_PAGE(0)))
+ goto copy;
+ tswap_delete_page(entry, NULL);
+ put_page(cache_page);
+ }
if (!(current->flags & PF_MEMCG_RECLAIM))
return -1;
- cache_page = alloc_page(TSWAP_GFP_MASK | __GFP_HIGHMEM);
- if (!cache_page)
- return -1;
+ if (zero_filled == -1)
+ zero_filled = is_zero_filled_page(page);
+
+ if (!zero_filled) {
+ cache_page = alloc_page(TSWAP_GFP_MASK | __GFP_HIGHMEM);
+ if (!cache_page)
+ return -1;
+ } else {
+ cache_page = ZERO_PAGE(0);
+ get_page(cache_page);
+ }
err = tswap_insert_page(entry, cache_page);
if (err) {
@@ -306,7 +342,8 @@ static int tswap_frontswap_store(unsigned type, pgoff_t offset,
return -1;
}
copy:
- copy_highpage(cache_page, page);
+ if (cache_page != ZERO_PAGE(0))
+ copy_highpage(cache_page, page);
return 0;
}
More information about the Devel
mailing list