linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Michal Nazarewicz <mina86@mina86.com>
Cc: Minchan Kim <minchan@kernel.org>,
	Russell King - ARM Linux <linux@arm.linux.org.uk>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Gleb Natapov <gleb@kernel.org>, Alexander Graf <agraf@suse.de>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, kvm@vger.kernel.org,
	kvm-ppc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	Zhang Yanfei <zhangyanfei@cn.fujitsu.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>
Subject: [PATCH v3 -next 4/9] DMA, CMA: support arbitrary bitmap granularity
Date: Mon, 16 Jun 2014 14:40:46 +0900	[thread overview]
Message-ID: <1402897251-23639-5-git-send-email-iamjoonsoo.kim@lge.com> (raw)
In-Reply-To: <1402897251-23639-1-git-send-email-iamjoonsoo.kim@lge.com>

PPC KVM's CMA area management requires arbitrary bitmap granularity,
since they want to reserve very large memory and manage this region
with bitmap that one bit for several pages to reduce management overheads.
So support arbitrary bitmap granularity for following generalization.

v3: use consistent local variable name (Minchan)
    use unsigned int for order_per_bit (Michal)
    change clear_cma_bitmap to cma_clear_bitmap for consistency (Michal)
    remove un-needed local variable, bitmap_maxno (Michal)

Acked-by: Michal Nazarewicz <mina86@mina86.com>
Acked-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>

diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 5f62c28..c6eeb2c 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -38,6 +38,7 @@ struct cma {
 	unsigned long	base_pfn;
 	unsigned long	count;
 	unsigned long	*bitmap;
+	unsigned int order_per_bit; /* Order of pages represented by one bit */
 	struct mutex	lock;
 };
 
@@ -157,9 +158,37 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
 
 static DEFINE_MUTEX(cma_mutex);
 
+static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
+{
+	return (1 << (align_order >> cma->order_per_bit)) - 1;
+}
+
+static unsigned long cma_bitmap_maxno(struct cma *cma)
+{
+	return cma->count >> cma->order_per_bit;
+}
+
+static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
+						unsigned long pages)
+{
+	return ALIGN(pages, 1 << cma->order_per_bit) >> cma->order_per_bit;
+}
+
+static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
+{
+	unsigned long bitmap_no, bitmap_count;
+
+	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
+	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
+
+	mutex_lock(&cma->lock);
+	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
+	mutex_unlock(&cma->lock);
+}
+
 static int __init cma_activate_area(struct cma *cma)
 {
-	int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
+	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
 	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
 	unsigned i = cma->count >> pageblock_order;
 	struct zone *zone;
@@ -215,9 +244,9 @@ static int __init cma_init_reserved_areas(void)
 core_initcall(cma_init_reserved_areas);
 
 static int __init __dma_contiguous_reserve_area(phys_addr_t size,
-				phys_addr_t base, phys_addr_t limit,
-				phys_addr_t alignment,
-				struct cma **res_cma, bool fixed)
+			phys_addr_t base, phys_addr_t limit,
+			phys_addr_t alignment, unsigned int order_per_bit,
+			struct cma **res_cma, bool fixed)
 {
 	struct cma *cma = &cma_areas[cma_area_count];
 	int ret = 0;
@@ -249,6 +278,10 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size,
 	size = ALIGN(size, alignment);
 	limit &= ~(alignment - 1);
 
+	/* size should be aligned with order_per_bit */
+	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
+		return -EINVAL;
+
 	/* Reserve memory */
 	if (base && fixed) {
 		if (memblock_is_region_reserved(base, size) ||
@@ -273,6 +306,7 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size,
 	 */
 	cma->base_pfn = PFN_DOWN(base);
 	cma->count = size >> PAGE_SHIFT;
+	cma->order_per_bit = order_per_bit;
 	*res_cma = cma;
 	cma_area_count++;
 
@@ -308,7 +342,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
 {
 	int ret;
 
-	ret = __dma_contiguous_reserve_area(size, base, limit, 0,
+	ret = __dma_contiguous_reserve_area(size, base, limit, 0, 0,
 						res_cma, fixed);
 	if (ret)
 		return ret;
@@ -320,17 +354,11 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
 	return 0;
 }
 
-static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
-{
-	mutex_lock(&cma->lock);
-	bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
-	mutex_unlock(&cma->lock);
-}
-
 static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
 				       unsigned int align)
 {
-	unsigned long mask, pfn, pageno, start = 0;
+	unsigned long mask, pfn, start = 0;
+	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
 	struct page *page = NULL;
 	int ret;
 
@@ -343,18 +371,19 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
 	if (!count)
 		return NULL;
 
-	mask = (1 << align) - 1;
-
+	mask = cma_bitmap_aligned_mask(cma, align);
+	bitmap_maxno = cma_bitmap_maxno(cma);
+	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
 
 	for (;;) {
 		mutex_lock(&cma->lock);
-		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
-						    start, count, mask);
-		if (pageno >= cma->count) {
+		bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
+				bitmap_maxno, start, bitmap_count, mask);
+		if (bitmap_no >= bitmap_maxno) {
 			mutex_unlock(&cma->lock);
 			break;
 		}
-		bitmap_set(cma->bitmap, pageno, count);
+		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
 		/*
 		 * It's safe to drop the lock here. We've marked this region for
 		 * our exclusive use. If the migration fails we will take the
@@ -362,7 +391,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
 		 */
 		mutex_unlock(&cma->lock);
 
-		pfn = cma->base_pfn + pageno;
+		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
 		mutex_lock(&cma_mutex);
 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
 		mutex_unlock(&cma_mutex);
@@ -370,14 +399,14 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
 			page = pfn_to_page(pfn);
 			break;
 		} else if (ret != -EBUSY) {
-			clear_cma_bitmap(cma, pfn, count);
+			cma_clear_bitmap(cma, pfn, count);
 			break;
 		}
-		clear_cma_bitmap(cma, pfn, count);
+		cma_clear_bitmap(cma, pfn, count);
 		pr_debug("%s(): memory range at %p is busy, retrying\n",
 			 __func__, pfn_to_page(pfn));
 		/* try again with a bit different memory target */
-		start = pageno + mask + 1;
+		start = bitmap_no + mask + 1;
 	}
 
 	pr_debug("%s(): returned %p\n", __func__, page);
@@ -424,7 +453,7 @@ static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
 
 	free_contig_range(pfn, count);
-	clear_cma_bitmap(cma, pfn, count);
+	cma_clear_bitmap(cma, pfn, count);
 
 	return true;
 }
-- 
1.7.9.5

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2014-06-16  5:36 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-06-16  5:40 [PATCH v3 -next 0/9] CMA: generalize CMA reserved area management code Joonsoo Kim
2014-06-16  5:40 ` [PATCH v3 -next 1/9] DMA, CMA: fix possible memory leak Joonsoo Kim
2014-06-16  6:27   ` Minchan Kim
2014-06-17  1:33     ` Joonsoo Kim
2014-06-16  5:40 ` [PATCH v3 -next 2/9] DMA, CMA: separate core CMA management codes from DMA APIs Joonsoo Kim
2014-06-16  5:40 ` [PATCH v3 -next 3/9] DMA, CMA: support alignment constraint on CMA region Joonsoo Kim
2014-06-16  5:40 ` Joonsoo Kim [this message]
2014-06-18 20:48   ` [PATCH v3 -next 4/9] DMA, CMA: support arbitrary bitmap granularity Andrew Morton
2014-06-19  8:18     ` Joonsoo Kim
2014-06-16  5:40 ` [PATCH v3 -next 5/9] CMA: generalize CMA reserved area management functionality Joonsoo Kim
2014-07-17  8:52   ` Marek Szyprowski
2014-07-17  9:36     ` [PATCH] CMA: generalize CMA reserved area management functionality (fixup) Marek Szyprowski
2014-07-17 22:06       ` Andrew Morton
2014-07-18  7:33         ` Marek Szyprowski
2014-06-16  5:40 ` [PATCH v3 -next 6/9] PPC, KVM, CMA: use general CMA reserved area management framework Joonsoo Kim
2014-06-16  5:40 ` [PATCH v3 -next 7/9] mm, CMA: clean-up CMA allocation error path Joonsoo Kim
2014-06-16  5:40 ` [PATCH v3 -next 8/9] mm, CMA: change cma_declare_contiguous() to obey coding convention Joonsoo Kim
2014-06-16  5:40 ` [PATCH v3 -next 9/9] mm, CMA: clean-up log message Joonsoo Kim
2014-06-16  9:11 ` [PATCH v3 -next 0/9] CMA: generalize CMA reserved area management code Marek Szyprowski
2014-06-17  1:25   ` Joonsoo Kim
2014-06-18 20:51     ` Andrew Morton
2014-06-24  7:52       ` Joonsoo Kim
2014-06-25 12:33       ` Marek Szyprowski
2014-06-25 20:04         ` Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1402897251-23639-5-git-send-email-iamjoonsoo.kim@lge.com \
    --to=iamjoonsoo.kim@lge.com \
    --cc=agraf@suse.de \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=gleb@kernel.org \
    --cc=gregkh@linuxfoundation.org \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux@arm.linux.org.uk \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=m.szyprowski@samsung.com \
    --cc=mina86@mina86.com \
    --cc=minchan@kernel.org \
    --cc=paulus@samba.org \
    --cc=pbonzini@redhat.com \
    --cc=zhangyanfei@cn.fujitsu.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).