[Devel] [RFC v14-rc3][PATCH 22/36] Prepare to support shared memory
Oren Laadan
orenl at cs.columbia.edu
Tue Apr 7 05:27:30 PDT 2009
Export functionality to retrieve specific pages from shared memory
given an inode in shmem-fs; this will be used in the next two patches
to provide support for c/r of shared memory.
Handling of shared memory depends on the type of a vma; to classify a
vma we extend the 'struct vma_operations_struct' with a new function
- 'cr_vma_type()' - through which a vma will report an integer that
reflects its type.
mm/shmem.c:
- shmem_getpage() and 'enum sgp_type' moved to linux/mm.h
- 'struct vm_operations_struct' extended with '->cr_vma_type' function
Changelog[v14]:
- Introduce patch
Signed-off-by: Oren Laadan <orenl at cs.columbia.edu>
---
include/linux/mm.h | 14 ++++++++++++++
mm/shmem.c | 15 ++-------------
2 files changed, 16 insertions(+), 13 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 065cdf8..e9bdc00 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -218,6 +218,9 @@ struct vm_operations_struct {
int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
const nodemask_t *to, unsigned long flags);
#endif
+#ifdef CONFIG_CHECKPOINT
+ int (*cr_vma_type)(struct vm_area_struct *vma);
+#endif
};
struct mmu_gather;
@@ -323,6 +326,17 @@ void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
+/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
+enum sgp_type {
+ SGP_READ, /* don't exceed i_size, don't allocate page */
+ SGP_CACHE, /* don't exceed i_size, may allocate page */
+ SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
+ SGP_WRITE, /* may exceed i_size, may allocate page */
+};
+
+extern int shmem_getpage(struct inode *inode, unsigned long idx,
+ struct page **pagep, enum sgp_type sgp, int *type);
+
/*
* Compound pages have a destructor function. Provide a
* prototype for that function and accessor functions.
diff --git a/mm/shmem.c b/mm/shmem.c
index 4103a23..53118f0 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -83,14 +83,6 @@ static struct vfsmount *shm_mnt;
/* Pretend that each entry is of this size in directory's i_size */
#define BOGO_DIRENT_SIZE 20
-/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
-enum sgp_type {
- SGP_READ, /* don't exceed i_size, don't allocate page */
- SGP_CACHE, /* don't exceed i_size, may allocate page */
- SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
- SGP_WRITE, /* may exceed i_size, may allocate page */
-};
-
#ifdef CONFIG_TMPFS
static unsigned long shmem_default_max_blocks(void)
{
@@ -103,9 +95,6 @@ static unsigned long shmem_default_max_inodes(void)
}
#endif
-static int shmem_getpage(struct inode *inode, unsigned long idx,
- struct page **pagep, enum sgp_type sgp, int *type);
-
static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
{
/*
@@ -1187,8 +1176,8 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
* vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache
*/
-static int shmem_getpage(struct inode *inode, unsigned long idx,
- struct page **pagep, enum sgp_type sgp, int *type)
+int shmem_getpage(struct inode *inode, unsigned long idx,
+ struct page **pagep, enum sgp_type sgp, int *type)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
--
1.5.4.3
_______________________________________________
Containers mailing list
Containers at lists.linux-foundation.org
https://lists.linux-foundation.org/mailman/listinfo/containers
More information about the Devel
mailing list