[linux-mm-cc] [PATCH 02/12] clean up merge_chunk
IKEDA Munehiro
m-ikeda at ds.jp.nec.com
Fri Jul 20 06:44:46 EDT 2007
This is code clean up patch for merge_chunk().
Code in cc_readpage() just after calling merge_chunk(),
which frees page or adds the chunk to the free list is
moved in merge_chunk().
Because merge_chunk() and this code are logically continuous
and shouldn't be implemented beeing devided, IMHO.
Signed-off-by: IKEDA, Munehiro <m-ikeda at ds.jp.nec.com>
---
mm/ccache.c | 73 ++++++++++++++++++++++++++++++----------------------------
1 files changed, 38 insertions(+), 35 deletions(-)
diff --git a/mm/ccache.c b/mm/ccache.c
index 12ebbd5..5cc0d25 100644
--- a/mm/ccache.c
+++ b/mm/ccache.c
@@ -217,7 +217,7 @@ out:
return -ENOMEM;
}
-static int merge_chunk(struct chunk *chunk)
+static void merge_chunk(struct chunk *chunk)
{
struct chunk *tmp_chunk = NULL;
void *page_start = (void *)((unsigned long)(chunk->start_addr) &
@@ -240,17 +240,45 @@ repeat:
SetChunkMerged(tmp_chunk);
tmp_chunk->start_addr = (void *)ATM_POISON;
}
+
+ if (back) {
+ back = 0;
+ tmp_chunk = NULL;
+ if (chunk->chunks.next != &mcl_head)
+ tmp_chunk = list_entry(chunk->chunks.next,
+ struct chunk, chunks);
+ goto repeat;
+ }
+
/* and now forward */
- if (!back) {
- SetChunkFree(chunk);
+ SetChunkFree(chunk);
+ spin_unlock(&ccache_lock);
+
+ /*
+ * NOTE: should keep some min no. of free pages
+ * in ccache. So, shouldn't free it immediately.
+ */
+ /*
+ * free chunk and page if it spans whole page,
+ * otherwise, add it to free list.
+ */
+ if (ChunkSize(chunk) == PAGE_SIZE) {
+ struct page *page;
+ page = virt_to_page(chunk->start_addr);
+ spin_lock(&ccache_lock);
+ list_del_init(&page->lru);
+ list_del_init(&chunk->chunks);
+ spin_unlock(&ccache_lock);
+ CC_DEBUG("freeing page");
+ __free_page(page);
+ kfree(chunk);
+ } else {
+ /* add to free list */
+ spin_lock(&ccache_lock);
+ chunk->next = free_head;
+ free_head = chunk;
spin_unlock(&ccache_lock);
- return 0;
}
- back = 0;
- tmp_chunk = NULL;
- if (chunk->chunks.next != &mcl_head)
- tmp_chunk = list_entry(chunk->chunks.next, struct chunk, chunks);
- goto repeat;
}
/*
@@ -576,7 +604,7 @@ static struct page *cc_readpage(struct chunk_head *ch)
{
int ret = -ENOMEM, algo_idx;
unsigned int comp_size=0;
- struct page *decomp_page, *comp_page, *tmp_page;
+ struct page *decomp_page, *comp_page;
void *comp_data;
struct chunk *chunk, *tmp;
CC_DEBUG2("start");
@@ -624,31 +652,6 @@ static struct page *cc_readpage(struct chunk_head *ch)
comp_size += ChunkSize(chunk);
tmp = chunk->next;
merge_chunk(chunk);
-
- /*
- * NOTE: should keep some min no. of free pages
- * in ccache. So, shouldn't free it immediately.
- */
- /*
- * free chunk and page if it spans whole page,
- * otherwise, add it to free list.
- */
- if (ChunkSize(chunk) == PAGE_SIZE) {
- tmp_page = virt_to_page(chunk->start_addr);
- spin_lock(&ccache_lock);
- list_del_init(&tmp_page->lru);
- list_del_init(&chunk->chunks);
- spin_unlock(&ccache_lock);
- CC_DEBUG("freeing page");
- __free_page(tmp_page);
- kfree(chunk);
- } else {
- /* add to free list */
- spin_lock(&ccache_lock);
- chunk->next = free_head;
- free_head = chunk;
- spin_unlock(&ccache_lock);
- }
chunk = tmp;
}
comp_data -= comp_size;
--
1.4.4.4
More information about the linux-mm-cc
mailing list