[linux-mm-cc] [PATCH] locking refinement

nai.xia at gmail.com nai.xia at gmail.com
Mon Apr 21 05:56:01 EDT 2008


hi, Nitin,

I have written a patch trying to improve the locking semantics of compcache.
This patch is based on the following observation:

1. TLSF is a fast real time algorithm and the main execution paths of
compcache_make_request is rather short. So it is beneficial to use spin_lock instead of
mutex_lock if we are sure a certain patch will never sleep.

2. The core algorithm of TLSF is atomic, the only reason why tlsf_malloc is not
is that it may get_mem. While it is smart to let TLSF grow the pool
automatically, tlsf_malloc may still fail due to contention (after trying 
MAX_RETRY_EXPAND times). This contention can not be directly control by its
client although the client should be responsible for the contention.

3. As suggested by Andrew Morton "kmap_atomic() is (much) preferred over
kmap()". We can use kmap_atomic on the paths which are atomic.



So my patch does the following changes:

1. Make compcache.compress_workmem and compcache.compress_buffer percpu buffers,
this makes the job of compressing safe in range of
kmap_atomic()~kunmap_atomic() (or in any other cases where preemption is disabled 
) without acquiring the spinlock.

2. Split tlsf_malloc into three functions: tlsf_malloc_atomic,
tlsf_grow_nosleep, tlsf_grow. In this way the core part of memory allocation is
atomic and let the growing behaviour controled by the user. This can further
enable user to separate the atomic and non-atomic paths.

4. Core algorithm of TLSF is now protected by spinlock intead of may-sleep 
mutex because it is now atomic. 

5. The execution paths of compcache_make_request() are now separated as atomic
paths and non-atomic paths. Spinlock is used to protect atomic ones.
A wait queue is used to protect the only rare case when sleeping allocation is
_really_ needed.



I've already done some testing myself. As I observed, for more than 99% of time,
it is enough to do non-waiting __vmalloc. So I believe the patch will improve in
parallel workloads where the contention for locks are intensive.

I understand that you may not consider merging of my patch when you are
stabilizing the kernel patch. But looking to the future, after getting to the
mainline, sooner or later, scalability improvement will be considered, right ? :)

Looking forward to your comments :)


diff --git a/compcache.c b/compcache.c
index 6330e93..cfa656a 100644
--- a/compcache.c
+++ b/compcache.c
@@ -23,6 +23,7 @@
 #include <linux/vmalloc.h>
 #include <asm/pgtable.h>
 #include <asm/string.h>
+#include <linux/smp.h>
 
 #include "sub-projects/compression/lzo-kmod/lzo.h"
 #include "sub-projects/allocators/tlsf-kmod/tlsf.h"
@@ -110,6 +111,7 @@ static int compcache_make_request(struct request_queue *queue, struct bio *bio)
 	size_t clen, page_no;
 	void *user_mem;
 	struct page *page;
+	int errno;
 
 	if (!valid_swap_request(bio)) {
 		stat_inc(&stats.invalid_io);
@@ -121,10 +123,10 @@ static int compcache_make_request(struct request_queue *queue, struct bio *bio)
 
 	page = bio->bi_io_vec[0].bv_page;
 	page_no = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
-	user_mem = kmap(page);
 
 	if (bio_data_dir(bio) == READ) {
 		stat_inc(&stats.num_reads);
+		user_mem = kmap_atomic(page, KM_USER0);
 		/*
 		 * This is attempt to read before any previous write
 		 * to this location. This happens due to readahead when
@@ -137,7 +139,7 @@ static int compcache_make_request(struct request_queue *queue, struct bio *bio)
 				bio->bi_size,
 				bio->bi_io_vec[0].bv_offset);
 			memset(user_mem, 0, PAGE_SIZE);
-			kunmap(page);
+			kunmap_atomic(page, KM_USER0);
 			set_bit(BIO_UPTODATE, &bio->bi_flags);
 			BIO_ENDIO(bio, 0);
 			return 0;
@@ -145,9 +147,8 @@ static int compcache_make_request(struct request_queue *queue, struct bio *bio)
 
 		/* Page is stored uncompressed since its incompressible */
 		if (unlikely(compcache.table[page_no].len == PAGE_SIZE)) {
-			memcpy(user_mem, compcache.table[page_no].addr,
-							PAGE_SIZE);
-			kunmap(page);
+			memcpy(user_mem, compcache.table[page_no].addr, PAGE_SIZE);
+			kunmap_atomic(page, KM_USER0);
 			set_bit(BIO_UPTODATE, &bio->bi_flags);
 			BIO_ENDIO(bio, 0);
 			return 0;
@@ -170,12 +171,13 @@ static int compcache_make_request(struct request_queue *queue, struct bio *bio)
 		}
 
 		CC_DEBUG2("Page decompressed: page_no=%zu\n", page_no);
-		kunmap(page);
+		kunmap_atomic(page, KM_USER0);
 		set_bit(BIO_UPTODATE, &bio->bi_flags);
 		BIO_ENDIO(bio, 0);
 		return 0;
 	} else {	/* Write */
-		unsigned char *src = compcache.compress_buffer;
+		unsigned char *src, *workmem ;
+		int cpu;
 		stat_inc(&stats.num_writes);
 		/*
 		 * System swaps to same sector again when the stored page
@@ -191,11 +193,14 @@ static int compcache_make_request(struct request_queue *queue, struct bio *bio)
 			compcache.table[page_no].len = 0;
 		}
 
-		mutex_lock(&compcache.lock);
+retry:
+		user_mem = kmap_atomic(page, KM_USER0);
+		cpu = smp_processor_id();
+		src = per_cpu_ptr(compcache.compress_buffer, cpu);
+		workmem = per_cpu_ptr(compcache.compress_workmem, cpu);
 		ret = lzo1x_1_compress(user_mem, PAGE_SIZE,
-			src, &clen, compcache.compress_workmem);
+			src, &clen, workmem); 
 		if (unlikely(ret != LZO_E_OK)) {
-			mutex_unlock(&compcache.lock);
 			pr_err(C "Compression failed! err=%d\n", ret);
 			compcache.table[page_no].addr = NULL;
 			compcache.table[page_no].len = 0;
@@ -213,14 +218,56 @@ static int compcache_make_request(struct request_queue *queue, struct bio *bio)
 			CC_DEBUG2("Page compressed: page_no=%zu, len=%zu\n",
 				page_no, clen);
 		}
-		if ((compcache.table[page_no].addr = tlsf_malloc(clen,
-					compcache.mem_pool)) == NULL) {
-			mutex_unlock(&compcache.lock);
-			pr_err(C "Error allocating memory for compressed "
-				"page: %zu, size=%zu \n", page_no, clen);
-			compcache.table[page_no].len = 0;
-			stat_inc(&stats.failed_writes);
-			goto out;
+
+
+		spin_lock(&compcache.spinlock);
+retry_atomic:
+		compcache.table[page_no].addr = tlsf_malloc_atomic(clen, 
+						compcache.mem_pool, &errno);
+		if (compcache.table[page_no].addr == NULL) {
+			if ( errno != TLSF_ALLOC_NEEDGROW ) {
+				spin_unlock(&compcache.spinlock);
+				goto out;
+			}
+			if ( !compcache.grow_inprogress ) {
+				if ( tlsf_grow_nosleep(compcache.mem_pool) ) {
+					CC_DEBUG2("nosleep grow success\n");
+					goto retry_atomic;
+				}
+				else {
+					CC_DEBUG2("nosleep grow failed\n");
+					compcache.grow_inprogress = 1;
+					spin_unlock(&compcache.spinlock);
+					kunmap_atomic(page, KM_USER0);
+					if ( tlsf_grow(compcache.mem_pool) ) {
+						CC_DEBUG2("*****sleeping grow success\n");
+						compcache.grow_inprogress = 0;
+						wake_up(&compcache.queue);
+						goto retry;
+					} else { /* no memory */
+					
+						pr_err(C "Error allocating memory" 
+						"page: %zu, size=%zu \n", page_no, clen);
+						
+						compcache.table[page_no].len = 0;
+						
+						stat_inc(&stats.failed_writes);
+						goto out_nomap;
+					}
+				}
+			} else { //someone is doing sleeping allocation
+				spin_unlock(&compcache.spinlock);
+				kunmap_atomic(page, KM_USER0);
+				CC_DEBUG("Compcache sleeping\n");
+
+				/* no need to use smp_mb here, because
+				 * wait_event already has it before testing the
+				 * condition 
+				 * */
+				wait_event(compcache.queue, 
+						compcache.grow_inprogress == 0); 
+				goto retry;
+			}
 		}
 		
 		memcpy(compcache.table[page_no].addr, src, clen);
@@ -232,17 +279,18 @@ static int compcache_make_request(struct request_queue *queue, struct bio *bio)
 		stat_inc_if_less(&stats.pages_expand, PAGE_SIZE - 1, clen);
 		stat_inc_if_less(&stats.good_compress, clen,
 						PAGE_SIZE / 2 + 1);
-		mutex_unlock(&compcache.lock);
+		
+		spin_unlock(&compcache.spinlock);
 		
 		compcache.table[page_no].len = clen;
 
-		kunmap(page);
+		kunmap_atomic(page, KM_USER0);
 		set_bit(BIO_UPTODATE, &bio->bi_flags);
 		BIO_ENDIO(bio, 0);
 		return 0;
 	}
 out:
-	kunmap(page);
+	kunmap_atomic(page, KM_USER0);
 out_nomap:
 	BIO_IO_ERROR(bio);
 	return 0;
@@ -261,6 +309,11 @@ static void *get_mem(size_t size)
 	return __vmalloc(size, GFP_NOIO, PAGE_KERNEL);
 }
 
+static void *get_mem_nosleep(size_t size)
+{
+	return __vmalloc(size, __GFP_NORETRY, PAGE_KERNEL);
+}
+
 static void put_mem(void *ptr)
 {
 	vfree(ptr);
@@ -273,6 +326,9 @@ static int __init compcache_init(void)
 	struct sysinfo i;
 
 	mutex_init(&compcache.lock);
+	spin_lock_init(&compcache.spinlock);
+	compcache.grow_inprogress = 0;
+	init_waitqueue_head(&compcache.queue);
 
 	if (compcache_size_kbytes == 0) {
 		pr_info(C "compcache size not provided."
@@ -290,14 +346,14 @@ static int __init compcache_init(void)
 	pr_info(C "Compressed swap size set to: %zu KB\n", compcache.size >> 10);
 	compcache.size >>= SECTOR_SHIFT;
 
-	compcache.compress_workmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+	compcache.compress_workmem = percpu_alloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
 	if (compcache.compress_workmem == NULL) {
 		pr_err(C "Error allocating compressor working memory\n");
 		ret = -ENOMEM;
 		goto fail;
 	}
 
-	compcache.compress_buffer = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
+	compcache.compress_buffer = percpu_alloc(2 * PAGE_SIZE, GFP_KERNEL);
 	if (compcache.compress_buffer == NULL) {
 		pr_err(C "Error allocating compressor buffer space\n");
 		ret = -ENOMEM;
@@ -358,7 +414,7 @@ static int __init compcache_init(void)
 	add_disk(compcache.disk);
 
 	compcache.mem_pool = tlsf_create_memory_pool("compcache",
-				get_mem, put_mem,
+				get_mem, put_mem, get_mem_nosleep, 
 				INIT_SIZE, 0, GROW_SIZE);
 	if (compcache.mem_pool == NULL) {
 		pr_err(C "Error creating memory pool\n");
@@ -391,9 +447,9 @@ fail:
 	if (compcache.table[0].addr)
 		free_page((unsigned long)compcache.table[0].addr);
 	if (compcache.compress_workmem)
-		kfree(compcache.compress_workmem);
+		percpu_free(compcache.compress_workmem);
 	if (compcache.compress_buffer)
-		kfree(compcache.compress_buffer);
+		percpu_free(compcache.compress_buffer);
 	if (compcache.table)
 		vfree(compcache.table);
 	if (compcache.mem_pool)
@@ -414,16 +470,23 @@ static void __exit compcache_exit(void)
 	unregister_blkdev(compcache.disk->major, compcache.disk->disk_name);
 	del_gendisk(compcache.disk);
 	free_page((unsigned long)compcache.table[0].addr);
-	kfree(compcache.compress_workmem);
-	kfree(compcache.compress_buffer);
+	CC_DEBUG("free table0 ok!\n");
+	percpu_free(compcache.compress_workmem);
+	percpu_free(compcache.compress_buffer);
 
 	/* Free all pages that are still in compcache */
 	for (i = 1; i < num_pages; i++)
-		if (compcache.table[i].addr)
+		if (compcache.table[i].addr){
+			CC_DEBUG("Going to free page%d......!",i);
 			tlsf_free(compcache.table[i].addr, compcache.mem_pool);
+			CC_DEBUG("done!\n");
+		}
+
+	CC_DEBUG("free all pages ok!\n");
 	vfree(compcache.table);
+	CC_DEBUG("free table ok!\n");
 	tlsf_destroy_memory_pool(compcache.mem_pool);
-
+	CC_DEBUG("destroy memory pool ok!\n");
 #ifdef CONFIG_COMPCACHE_PROC
 	remove_proc_entry("compcache", &proc_root);
 #endif
diff --git a/compcache.h b/compcache.h
index d428fc4..ceb005b 100644
--- a/compcache.h
+++ b/compcache.h
@@ -129,6 +129,9 @@ struct compcache {
 	void *compress_buffer;
 	struct table *table;
 	struct mutex lock;
+	spinlock_t spinlock;
+	int grow_inprogress;
+	wait_queue_head_t queue;
 	struct gendisk *disk;
 	size_t size;            /* In sectors */
 };
diff --git a/sub-projects/allocators/tlsf-kmod/tlsf.c b/sub-projects/allocators/tlsf-kmod/tlsf.c
index 157ddd5..495693a 100644
--- a/sub-projects/allocators/tlsf-kmod/tlsf.c
+++ b/sub-projects/allocators/tlsf-kmod/tlsf.c
@@ -320,6 +320,7 @@ static inline void ADD_REGION(void *region, size_t region_size,
 void *tlsf_create_memory_pool(const char *name,
 			get_memory get_mem,
 			put_memory put_mem,
+			get_memory get_mem_nosleep,
 			size_t init_size,
 			size_t max_size,
 			size_t grow_size)
@@ -353,6 +354,7 @@ void *tlsf_create_memory_pool(const char *name,
 	pool->grow_size = grow_size;
 	pool->get_mem = get_mem;
 	pool->put_mem = put_mem;
+	pool->get_mem_nosleep = get_mem_nosleep;
 	strncpy(pool->name, name, MAX_POOL_NAME_LEN);
 	pool->name[MAX_POOL_NAME_LEN - 1] = '\0';
 #if DEBUG
@@ -364,7 +366,7 @@ void *tlsf_create_memory_pool(const char *name,
 	ADD_REGION(region, init_size, pool);
 	pool->init_region = region;
 
-	mutex_init(&pool->lock);
+	spin_lock_init(&pool->spinlock);
 
 	list_add_tail(&pool->list, &pool_list_head);
 
@@ -455,56 +457,93 @@ void tlsf_destroy_memory_pool(void *mem_pool)
 }
 EXPORT_SYMBOL_GPL(tlsf_destroy_memory_pool);
 
+
+
+/**
+ * tlsf_grow_nosleep - force pool grow with grow_size,never sleep,
+ * return 1 if successful growed, 0 if failed
+ * @mem_pool: pool to grow
+ */
+int tlsf_grow_nosleep(void *mem_pool)
+{
+	struct pool *pool = (struct pool *)mem_pool;
+	struct bhdr *region;
+	int ret=0;
+
+	if (pool->get_mem_nosleep && 
+		(region = pool->get_mem_nosleep(pool->grow_size)) ) {
+		
+		spin_lock(&pool->spinlock);
+		ADD_REGION(region, pool->grow_size, pool);
+		spin_unlock(&pool->spinlock);
+		ret = 1;
+	} 
+	
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tlsf_grow_nosleep);
+
+/**
+ * tlsf_grow- force pool grow with grow_size, may sleep, 
+ * return 1 if successful growed, 0 if failed
+ * @mem_pool: pool to grow
+ */
+int tlsf_grow(void *mem_pool)
+{
+	struct pool *pool = (struct pool *)mem_pool;
+	struct bhdr *region;
+	int ret=0;
+
+	if (pool->get_mem && (region = pool->get_mem(pool->grow_size)) ) {
+		
+		spin_lock(&pool->spinlock);
+		ADD_REGION(region, pool->grow_size, pool);
+		spin_unlock(&pool->spinlock);
+		ret = 1;
+	} 
+	
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tlsf_grow);
+
 /**
- * tlsf_malloc - allocate memory from given pool
+ * tlsf_malloc_atomic - atomically allocate memory from given pool
  * @size: no. of bytes
  * @mem_pool: pool to allocate from
+ * @errno: if the size is too big, *errno is returned as TLSF_ALLOC_FATAL
+ * 	   if the pool needs growing, *errno is TLSF_ALLOC_NEEDGROW,
+ * 	   you should ignore *errno if tlsf_malloc_atomic returns non-NULL.
  */
-void *tlsf_malloc(size_t size, void *mem_pool)
+void *tlsf_malloc_atomic(size_t size, void *mem_pool, int *errno)
 {
 	struct pool *pool = (struct pool *)mem_pool;
-	struct bhdr *b, *b2, *next_b, *region;
+	struct bhdr *b, *b2, *next_b;
 	int fl, sl;
 	size_t tmp_size;
 #if STATS
 	size_t used, total;
 #endif
 
-#if DEBUG
-	unsigned int retries = 0;
-#endif
+	*errno = TLSF_ALLOC_NEEDGROW;
 
 	size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
 	/* Rounding up the requested size and calculating fl and sl */
 
-	mutex_lock(&pool->lock);
-retry_find:
+	spin_lock(&pool->spinlock);
 	MAPPING_SEARCH(&size, &fl, &sl);
 
 	/* Searching a free block */
 	if (!(b = FIND_SUITABLE_BLOCK(pool, &fl, &sl))) {
-#if DEBUG
-		/*
-		 * This can happen if there are too many users
-		 * allocating from this pool simultaneously.
-		 */
-		if (unlikely(retries == MAX_RETRY_EXPAND))
-			goto out_locked;
-		retries++;
-#endif
 		/* Not found */
-		if (size > (pool->grow_size - 2 * BHDR_OVERHEAD))
-			goto out_locked;
+		if (size > (pool->grow_size - 2 * BHDR_OVERHEAD)) 
+			*errno = TLSF_ALLOC_FATAL;
+
 		if (pool->max_size && (pool->init_size +
 				pool->num_regions * pool->grow_size
-				> pool->max_size))
-			goto out_locked;
-		mutex_unlock(&pool->lock);
-		if ((region = pool->get_mem(pool->grow_size)) == NULL)
-			goto out;
-		mutex_lock(&pool->lock);
-		ADD_REGION(region, pool->grow_size, pool);
-		goto retry_find;
+				> pool->max_size)) 
+			*errno = TLSF_ALLOC_FATAL;
+
+		goto out;
 	}
 	EXTRACT_BLOCK_HDR(b, pool, fl, sl);
 
@@ -540,18 +579,17 @@ retry_find:
 	stat_setmax(&pool->peak_used, used);
 	stat_setmax(&pool->peak_total, total);
 
-	mutex_unlock(&pool->lock);
+	spin_unlock(&pool->spinlock);
+
 	return (void *)b->ptr.buffer;
 
 	/* Failed alloc */
-out_locked:
-	mutex_unlock(&pool->lock);
-
 out:
+	spin_unlock(&pool->spinlock);
 	stat_inc(&pool->count_failed_alloc);
 	return NULL;
 }
-EXPORT_SYMBOL_GPL(tlsf_malloc);
+EXPORT_SYMBOL_GPL(tlsf_malloc_atomic);
 
 /**
  * tlsf_free - free memory from given pool
@@ -561,7 +599,7 @@ EXPORT_SYMBOL_GPL(tlsf_malloc);
 void tlsf_free(void *ptr, void *mem_pool)
 {
 	struct pool *pool = (struct pool *)mem_pool;
-	struct bhdr *b, *tmp_b;
+	struct bhdr *b, *tmp_b, *freeb=NULL;
 	int fl = 0, sl = 0;
 #if STATS
 	size_t used, total;
@@ -571,7 +609,7 @@ void tlsf_free(void *ptr, void *mem_pool)
 
 	b = (struct bhdr *) ((char *) ptr - BHDR_OVERHEAD);
 
-	mutex_lock(&pool->lock);
+	spin_lock(&pool->spinlock);
 	b->size |= FREE_BLOCK;
 	pool->used_size -= (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
 	b->ptr.free_ptr = (struct free_ptr) { NULL, NULL};
@@ -594,7 +632,8 @@ void tlsf_free(void *ptr, void *mem_pool)
 	MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
 
 	if ((b->prev_hdr == NULL) && ((tmp_b->size & BLOCK_SIZE_MASK) == 0)) {
-		pool->put_mem(b);
+		//pool->put_mem(b);
+		freeb = b;
 		pool->num_regions--;
 		pool->used_size -= BHDR_OVERHEAD; /* sentinel block header */
 		stat_inc(&pool->count_region_free);
@@ -614,29 +653,31 @@ out:
 	stat_setmax(&pool->peak_used, used);
 	stat_setmax(&pool->peak_total, total);
 
-	mutex_unlock(&pool->lock);
+	spin_unlock(&pool->spinlock);
+
+	pool->put_mem(freeb);
 }
 EXPORT_SYMBOL_GPL(tlsf_free);
 
 /**
- * tlsf_calloc - allocate and zero-out memory from given pool
+ * tlsf_calloc - atomically allocate and zero-out memory from given pool
  * @size: no. of bytes
  * @mem_pool: pool to allocate from
  */
-void *tlsf_calloc(size_t nelem, size_t elem_size, void *mem_pool)
+void *tlsf_calloc_atomic(size_t nelem, size_t elem_size, void *mem_pool, int *errno)
 {
 	void *ptr;
 
 	if (nelem == 0 || elem_size == 0)
 		return NULL;
 
-	if ((ptr = tlsf_malloc(nelem * elem_size, mem_pool)) == NULL)
+	if ((ptr = tlsf_malloc_atomic(nelem * elem_size, mem_pool, errno)) == NULL)
 		return NULL;
 	memset(ptr, 0, nelem * elem_size);
 
 	return ptr;
 }
-EXPORT_SYMBOL_GPL(tlsf_calloc);
+EXPORT_SYMBOL_GPL(tlsf_calloc_atomic);
 
 static int __init tlsf_init(void)
 {
diff --git a/sub-projects/allocators/tlsf-kmod/tlsf.h b/sub-projects/allocators/tlsf-kmod/tlsf.h
index 6f47444..6c5d93f 100644
--- a/sub-projects/allocators/tlsf-kmod/tlsf.h
+++ b/sub-projects/allocators/tlsf-kmod/tlsf.h
@@ -39,6 +39,7 @@ typedef void (put_memory)(void *ptr);
 extern void *tlsf_create_memory_pool(const char *name,
 					get_memory get_mem,
 					put_memory put_mem,
+					get_memory get_mem_nosleep,
 					size_t init_size,
 					size_t max_size,
 					size_t grow_size);
@@ -53,18 +54,38 @@ extern void *tlsf_create_memory_pool(const char *name,
 extern void tlsf_destroy_memory_pool(void *mem_pool);
 
 /**
- * tlsf_malloc - allocate memory from given pool
+ * tlsf_malloc_atomic - atomically allocate memory from given pool 
  * @size: no. of bytes
  * @mem_pool: pool to allocate from
+ * @errno: if the size is too big, *errno is returned as TLSF_ALLOC_FATAL
+ * 	   if the pool needs growing, *errno is TLSF_ALLOC_NEEDGROW,
+ * 	   you should ignore *errno if tlsf_malloc_atomic returns non-NULL.
  */
-extern void *tlsf_malloc(size_t size, void *mem_pool);
+extern void *tlsf_malloc_atomic(size_t size, void *mem_pool, int *errno);
 
 /**
- * tlsf_calloc - allocate and zero-out memory from given pool
+ * tlsf_grow- force pool grow with grow_size, may sleep, 
+ * return 1 if successful growed, 0 if failed
+ * @mem_pool: pool to grow
+ */
+extern int tlsf_grow(void *mem_pool);
+
+
+/**
+ * tlsf_grow_nosleep - force pool grow with grow_size, never sleep,
+ * return 1 if successful growed, 0 if failed
+ * @mem_pool: pool to grow
+ */
+
+extern int tlsf_grow_nosleep(void *mem_pool);
+
+
+/**
+ * tlsf_calloc_atomic - allocate and zero-out memory from given pool
  * @size: no. of bytes
  * @mem_pool: pool to allocate from
  */
-extern void *tlsf_calloc(size_t nelem, size_t elem_size, void *mem_pool);
+extern void *tlsf_calloc_atomic(size_t nelem, size_t elem_size, void *mem_pool, int *errno);
 
 /**
  * tlsf_free - free memory from given pool
@@ -90,4 +111,8 @@ extern size_t tlsf_get_used_size(void *mem_pool);
  */
 extern size_t tlsf_get_total_size(void *mem_pool);
 
+#define TLSF_ALLOC_NEEDGROW 0
+#define TLSF_ALLOC_FATAL 1 /* allocation will not be satisfied even with growing */
+
+
 #endif
diff --git a/sub-projects/allocators/tlsf-kmod/tlsf_int.h b/sub-projects/allocators/tlsf-kmod/tlsf_int.h
index 7f97994..d60d38e 100644
--- a/sub-projects/allocators/tlsf-kmod/tlsf_int.h
+++ b/sub-projects/allocators/tlsf-kmod/tlsf_int.h
@@ -135,6 +135,8 @@ struct pool {
 	struct bhdr *matrix[REAL_FLI][MAX_SLI];
 
 	struct mutex lock;
+	spinlock_t spinlock;
+
 
 	size_t init_size;
 	size_t max_size;
@@ -148,6 +150,9 @@ struct pool {
 	get_memory *get_mem;
 	put_memory *put_mem;
 
+	/* User provided for expanding pool without sleeping */
+	get_memory *get_mem_nosleep;
+
 	struct list_head list;
 
 #if STATS


More information about the linux-mm-cc mailing list