summaryrefslogtreecommitdiff
path: root/mm/cma.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/cma.c')
-rw-r--r--mm/cma.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/mm/cma.c b/mm/cma.c
index de5bc0c81fc2..95a8788e54d3 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -86,6 +86,7 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
spin_lock_irqsave(&cma->lock, flags);
bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
+ cma->available_count += count;
spin_unlock_irqrestore(&cma->lock, flags);
}
@@ -133,7 +134,7 @@ out_error:
free_reserved_page(pfn_to_page(pfn));
}
totalcma_pages -= cma->count;
- cma->count = 0;
+ cma->available_count = cma->count = 0;
pr_err("CMA area %s could not be activated\n", cma->name);
}
@@ -206,7 +207,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
cma->base_pfn = PFN_DOWN(base);
- cma->count = size >> PAGE_SHIFT;
+ cma->available_count = cma->count = size >> PAGE_SHIFT;
cma->order_per_bit = order_per_bit;
*res_cma = cma;
cma_area_count++;
@@ -390,7 +391,7 @@ static void cma_debug_show_areas(struct cma *cma)
{
unsigned long next_zero_bit, next_set_bit, nr_zero;
unsigned long start = 0;
- unsigned long nr_part, nr_total = 0;
+ unsigned long nr_part;
unsigned long nbits = cma_bitmap_maxno(cma);
spin_lock_irq(&cma->lock);
@@ -402,12 +403,12 @@ static void cma_debug_show_areas(struct cma *cma)
next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
nr_zero = next_set_bit - next_zero_bit;
nr_part = nr_zero << cma->order_per_bit;
- pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
+ pr_cont("%s%lu@%lu", start ? "+" : "", nr_part,
next_zero_bit);
- nr_total += nr_part;
start = next_zero_bit + nr_zero;
}
- pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
+ pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
+ cma->count);
spin_unlock_irq(&cma->lock);
}
@@ -444,6 +445,14 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
for (;;) {
spin_lock_irq(&cma->lock);
+ /*
+ * If the request is larger than the available number
+ * of pages, stop right away.
+ */
+ if (count > cma->available_count) {
+ spin_unlock_irq(&cma->lock);
+ break;
+ }
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
bitmap_maxno, start, bitmap_count, mask,
offset);
@@ -452,6 +461,7 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
break;
}
bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
+ cma->available_count -= count;
/*
* It's safe to drop the lock here. We've marked this region for
* our exclusive use. If the migration fails we will take the