[Devel] [PATCH rh7 7/8] radix: add very exceptional entries
Andrey Ryabinin
aryabinin at virtuozzo.com
Tue Feb 12 18:39:14 MSK 2019
Add very exceptional entries and use them instead of simply exceptional
entries for shadow entries. We need this for tracking anon pages refaults.
Common exceptional entries already used for swapped shmem pages, so we
need something else.
https://pmc.acronis.com/browse/VSTOR-19037
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
fs/proc/task_mmu.c | 3 ++-
include/linux/radix-tree.h | 15 ++++++++++++++-
mm/memcontrol.c | 3 ++-
mm/mincore.c | 3 ++-
mm/shmem.c | 12 ++++++++----
mm/workingset.c | 4 ++--
6 files changed, 30 insertions(+), 10 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 12fd9d6261ef..deb52a267590 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -518,7 +518,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
if (!page)
return;
- if (radix_tree_exceptional_entry(page))
+ if (radix_tree_exceptional_entry(page) &&
+ !radix_tree_very_exceptional_entry(page))
mss->swap += PAGE_SIZE;
else
page_cache_release(page);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index bc416a818d2c..e2ab68baff1f 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -35,7 +35,7 @@
* 00 - data pointer
* 01 - internal entry
* 10 - exceptional entry
- * 11 - this bit combination is currently unused/reserved
+ * 11 - very exceptional entry
*
* The internal entry may be a pointer to the next level in the tree, a
* sibling entry, or an indicator that the entry in this slot has been moved
@@ -54,6 +54,7 @@
* EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
*/
#define RADIX_TREE_EXCEPTIONAL_ENTRY 2
+#define RADIX_TREE_VERY_EXCEPTIONAL_ENTRY 3
#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
static inline bool radix_tree_is_internal_node(void *ptr)
@@ -251,6 +252,18 @@ static inline int radix_tree_exceptional_entry(void *arg)
return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
}
+/**
+ * radix_tree_very_exceptional_entry - radix_tree_deref_slot gave very exceptional entry?
+ * @arg: value returned by radix_tree_deref_slot
+ * Returns: 0 if well-aligned pointer, non-0 if exceptional entry.
+ */
+static inline int radix_tree_very_exceptional_entry(void *arg)
+{
+ /* Not unlikely because radix_tree_exception often tested first */
+ return ((unsigned long)arg & RADIX_TREE_ENTRY_MASK) ==
+ RADIX_TREE_VERY_EXCEPTIONAL_ENTRY;
+}
+
/**
* radix_tree_exception - radix_tree_deref_slot returned either exception?
* @arg: value returned by radix_tree_deref_slot
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b6b3edee80db..5170d97df131 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6716,7 +6716,8 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
/* shmem/tmpfs may report page out on swap: account for that too. */
if (shmem_mapping(mapping)) {
page = __find_get_page(mapping, pgoff);
- if (radix_tree_exceptional_entry(page)) {
+ if (radix_tree_exceptional_entry(page) &&
+ !radix_tree_very_exceptional_entry(page)) {
swp_entry_t swp = radix_to_swp_entry(page);
if (do_swap_account)
*entry = swp;
diff --git a/mm/mincore.c b/mm/mincore.c
index a6374c866557..4aae5290e07d 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -77,7 +77,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
* shmem/tmpfs may return swap: account for swapcache
* page too.
*/
- if (radix_tree_exceptional_entry(page)) {
+ if (radix_tree_exceptional_entry(page) &&
+ !radix_tree_very_exceptional_entry(page)) {
swp_entry_t swp = radix_to_swp_entry(page);
page = find_get_page(swap_address_space(swp), swp.val);
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 266e78679811..cda801a5496b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -484,7 +484,8 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
continue;
}
- if (radix_tree_exceptional_entry(page))
+ if (radix_tree_exceptional_entry(page) &&
+ !radix_tree_very_exceptional_entry(page))
swapped++;
if (need_resched()) {
@@ -599,7 +600,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (index >= end)
break;
- if (radix_tree_exceptional_entry(page)) {
+ if (radix_tree_exceptional_entry(page) &&
+ !radix_tree_very_exceptional_entry(page)) {
if (unfalloc)
continue;
nr_swaps_freed += !shmem_free_swap(mapping,
@@ -673,7 +675,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (index >= end)
break;
- if (radix_tree_exceptional_entry(page)) {
+ if (radix_tree_exceptional_entry(page) &&
+ !radix_tree_very_exceptional_entry(page)) {
if (unfalloc)
continue;
if (shmem_free_swap(mapping, index, page)) {
@@ -1221,7 +1224,8 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
repeat:
swap.val = 0;
page = __find_lock_page(mapping, index);
- if (radix_tree_exceptional_entry(page)) {
+ if (radix_tree_exceptional_entry(page) &&
+ !radix_tree_very_exceptional_entry(page)) {
swap = radix_to_swp_entry(page);
page = NULL;
}
diff --git a/mm/workingset.c b/mm/workingset.c
index 9f9264bb08ad..0b4cf96bb026 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -176,7 +176,7 @@ static void *pack_shadow(int memcgid, struct zone *zone, unsigned long eviction)
eviction = (eviction << ZONES_SHIFT) | zone_idx(zone);
eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
- return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
+ return (void *)(eviction | RADIX_TREE_VERY_EXCEPTIONAL_ENTRY);
}
static void unpack_shadow(void *shadow, int *memcgidp, struct zone **zonep,
@@ -445,7 +445,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
goto out_invalid;
for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
if (node->slots[i]) {
- if (WARN_ON_ONCE(!radix_tree_exceptional_entry(node->slots[i])))
+ if (WARN_ON_ONCE(!radix_tree_very_exceptional_entry(node->slots[i])))
goto out_invalid;
if (WARN_ON_ONCE(!mapping->nrexceptional))
goto out_invalid;
--
2.19.2
More information about the Devel
mailing list