[PATCH 04/18] mm, hugetlb: region manipulation functions take resv_map rather list_head

2013-07-28 Thread Joonsoo Kim
To change a protection method for region tracking to find grained one,
we pass the resv_map, instead of list_head, to region manipulation
functions. This doesn't introduce any functional change, and it is just
for preparing a next step.

Signed-off-by: Joonsoo Kim 

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 35f6b12..24c0111 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -150,8 +150,9 @@ struct file_region {
long to;
 };
 
-static long region_add(struct list_head *head, long f, long t)
+static long region_add(struct resv_map *resv, long f, long t)
 {
+   struct list_head *head = >regions;
struct file_region *rg, *nrg, *trg;
 
/* Locate the region we are either in or before. */
@@ -186,8 +187,9 @@ static long region_add(struct list_head *head, long f, long 
t)
return 0;
 }
 
-static long region_chg(struct list_head *head, long f, long t)
+static long region_chg(struct resv_map *resv, long f, long t)
 {
+   struct list_head *head = >regions;
struct file_region *rg, *nrg;
long chg = 0;
 
@@ -235,8 +237,9 @@ static long region_chg(struct list_head *head, long f, long 
t)
return chg;
 }
 
-static long region_truncate(struct list_head *head, long end)
+static long region_truncate(struct resv_map *resv, long end)
 {
+   struct list_head *head = >regions;
struct file_region *rg, *trg;
long chg = 0;
 
@@ -265,8 +268,9 @@ static long region_truncate(struct list_head *head, long 
end)
return chg;
 }
 
-static long region_count(struct list_head *head, long f, long t)
+static long region_count(struct resv_map *resv, long f, long t)
 {
+   struct list_head *head = >regions;
struct file_region *rg;
long chg = 0;
 
@@ -392,7 +396,7 @@ void resv_map_release(struct kref *ref)
struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
 
/* Clear out any active regions before we release the map. */
-   region_truncate(_map->regions, 0);
+   region_truncate(resv_map, 0);
kfree(resv_map);
 }
 
@@ -1083,7 +1087,7 @@ static long vma_needs_reservation(struct hstate *h,
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = inode->i_mapping->private_data;
 
-   return region_chg(>regions, idx, idx + 1);
+   return region_chg(resv, idx, idx + 1);
 
} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
return 1;
@@ -1093,7 +1097,7 @@ static long vma_needs_reservation(struct hstate *h,
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = vma_resv_map(vma);
 
-   err = region_chg(>regions, idx, idx + 1);
+   err = region_chg(resv, idx, idx + 1);
if (err < 0)
return err;
return 0;
@@ -1109,14 +1113,14 @@ static void vma_commit_reservation(struct hstate *h,
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = inode->i_mapping->private_data;
 
-   region_add(>regions, idx, idx + 1);
+   region_add(resv, idx, idx + 1);
 
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = vma_resv_map(vma);
 
/* Mark this page used in the map. */
-   region_add(>regions, idx, idx + 1);
+   region_add(resv, idx, idx + 1);
}
 }
 
@@ -2203,7 +2207,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct 
*vma)
end = vma_hugecache_offset(h, vma, vma->vm_end);
 
reserve = (end - start) -
-   region_count(>regions, start, end);
+   region_count(resv, start, end);
 
resv_map_put(vma);
 
@@ -3078,7 +3082,7 @@ int hugetlb_reserve_pages(struct inode *inode,
if (!vma || vma->vm_flags & VM_MAYSHARE) {
resv_map = inode->i_mapping->private_data;
 
-   chg = region_chg(_map->regions, from, to);
+   chg = region_chg(resv_map, from, to);
 
} else {
resv_map = resv_map_alloc();
@@ -3124,7 +3128,7 @@ int hugetlb_reserve_pages(struct inode *inode,
 * else has to be done for private mappings here
 */
if (!vma || vma->vm_flags & VM_MAYSHARE)
-   region_add(_map->regions, from, to);
+   region_add(resv_map, from, to);
return 0;
 out_err:
if (vma)
@@ -3140,7 +3144,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long 
offset, long freed)
struct hugepage_subpool *spool = subpool_inode(inode);
 
if (resv_map)
-   chg = region_truncate(_map->regions, offset);
+   chg = region_truncate(resv_map, offset);
spin_lock(>i_lock);
inode->i_blocks -= (blocks_per_huge_page(h) * freed);

[PATCH 04/18] mm, hugetlb: region manipulation functions take resv_map rather list_head

2013-07-28 Thread Joonsoo Kim
To change a protection method for region tracking to find grained one,
we pass the resv_map, instead of list_head, to region manipulation
functions. This doesn't introduce any functional change, and it is just
for preparing a next step.

Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 35f6b12..24c0111 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -150,8 +150,9 @@ struct file_region {
long to;
 };
 
-static long region_add(struct list_head *head, long f, long t)
+static long region_add(struct resv_map *resv, long f, long t)
 {
+   struct list_head *head = resv-regions;
struct file_region *rg, *nrg, *trg;
 
/* Locate the region we are either in or before. */
@@ -186,8 +187,9 @@ static long region_add(struct list_head *head, long f, long 
t)
return 0;
 }
 
-static long region_chg(struct list_head *head, long f, long t)
+static long region_chg(struct resv_map *resv, long f, long t)
 {
+   struct list_head *head = resv-regions;
struct file_region *rg, *nrg;
long chg = 0;
 
@@ -235,8 +237,9 @@ static long region_chg(struct list_head *head, long f, long 
t)
return chg;
 }
 
-static long region_truncate(struct list_head *head, long end)
+static long region_truncate(struct resv_map *resv, long end)
 {
+   struct list_head *head = resv-regions;
struct file_region *rg, *trg;
long chg = 0;
 
@@ -265,8 +268,9 @@ static long region_truncate(struct list_head *head, long 
end)
return chg;
 }
 
-static long region_count(struct list_head *head, long f, long t)
+static long region_count(struct resv_map *resv, long f, long t)
 {
+   struct list_head *head = resv-regions;
struct file_region *rg;
long chg = 0;
 
@@ -392,7 +396,7 @@ void resv_map_release(struct kref *ref)
struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
 
/* Clear out any active regions before we release the map. */
-   region_truncate(resv_map-regions, 0);
+   region_truncate(resv_map, 0);
kfree(resv_map);
 }
 
@@ -1083,7 +1087,7 @@ static long vma_needs_reservation(struct hstate *h,
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = inode-i_mapping-private_data;
 
-   return region_chg(resv-regions, idx, idx + 1);
+   return region_chg(resv, idx, idx + 1);
 
} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
return 1;
@@ -1093,7 +1097,7 @@ static long vma_needs_reservation(struct hstate *h,
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = vma_resv_map(vma);
 
-   err = region_chg(resv-regions, idx, idx + 1);
+   err = region_chg(resv, idx, idx + 1);
if (err  0)
return err;
return 0;
@@ -1109,14 +1113,14 @@ static void vma_commit_reservation(struct hstate *h,
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = inode-i_mapping-private_data;
 
-   region_add(resv-regions, idx, idx + 1);
+   region_add(resv, idx, idx + 1);
 
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *resv = vma_resv_map(vma);
 
/* Mark this page used in the map. */
-   region_add(resv-regions, idx, idx + 1);
+   region_add(resv, idx, idx + 1);
}
 }
 
@@ -2203,7 +2207,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct 
*vma)
end = vma_hugecache_offset(h, vma, vma-vm_end);
 
reserve = (end - start) -
-   region_count(resv-regions, start, end);
+   region_count(resv, start, end);
 
resv_map_put(vma);
 
@@ -3078,7 +3082,7 @@ int hugetlb_reserve_pages(struct inode *inode,
if (!vma || vma-vm_flags  VM_MAYSHARE) {
resv_map = inode-i_mapping-private_data;
 
-   chg = region_chg(resv_map-regions, from, to);
+   chg = region_chg(resv_map, from, to);
 
} else {
resv_map = resv_map_alloc();
@@ -3124,7 +3128,7 @@ int hugetlb_reserve_pages(struct inode *inode,
 * else has to be done for private mappings here
 */
if (!vma || vma-vm_flags  VM_MAYSHARE)
-   region_add(resv_map-regions, from, to);
+   region_add(resv_map, from, to);
return 0;
 out_err:
if (vma)
@@ -3140,7 +3144,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long 
offset, long freed)
struct hugepage_subpool *spool = subpool_inode(inode);
 
if (resv_map)
-   chg = region_truncate(resv_map-regions, offset);
+   chg = region_truncate(resv_map, offset);
spin_lock(inode-i_lock);