[linux-mm-cc] [PATCH 07/12] avoid OOM : variable for num of ccache using pages

IKEDA Munehiro m-ikeda at ds.jp.nec.com
Mon Jul 23 06:08:18 EDT 2007


Introduce a variable to track how many pages are used
by ccache.

In v1, condition judgement to free page and (probably)
lock location were incorrect in merge_chunk().
This patch fixed them.

Signed-off-by: IKEDA, Munehiro <m-ikeda at ds.jp.nec.com>
---
 mm/ccache.c |   11 ++++++++---
 1 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/mm/ccache.c b/mm/ccache.c
index 5ee1c3e..3717f1f 100644
--- a/mm/ccache.c
+++ b/mm/ccache.c
@@ -18,6 +18,7 @@ static DEFINE_SPINLOCK(ccache_lock);
 static int anon_cc_started = 0, fs_backed_cc_started = 0;
 unsigned long max_anon_cc_size = 0, max_fs_backed_cc_size = 0;
 static atomic_t anon_cc_size, fs_backed_cc_size;	/* current sizes */
+static atomic_t cc_pages;	/* num of pages used by ccache */
 const unsigned long ccache_size_limit = MAX_SWAP_OFFSET - 1;
 
 static struct list_head pages_head, mcl_head, lru_anon, lru_fs_backed;
@@ -169,6 +170,7 @@ static int expand_ccache(void)
 	list_add_tail(&chunk->chunks, &mcl_head);
 	list_add_tail(&page->lru, &pages_head);
 	spin_unlock(&ccache_lock);
+	atomic_inc(&cc_pages);
 	CC_DEBUG2("success");
 	return 0;
 out:
@@ -268,10 +270,13 @@ repeat:
 		spin_lock(&ccache_lock);
 		list_del_init(&page->lru);
 		list_del_init(&chunk->chunks);
+		if (atomic_read(&cc_pages) > 1) {
+			CC_DEBUG("freeing page");
+			__free_page(page);
+			kfree(chunk);
+			atomic_dec(&cc_pages);
+		}
 		spin_unlock(&ccache_lock);
-		CC_DEBUG("freeing page");
-		__free_page(page);
-		kfree(chunk);
 	} else {
 		/* add to free list */
 		spin_lock(&ccache_lock);
-- 
1.4.4.4



More information about the linux-mm-cc mailing list