public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 9/9] ttm/dma: Implement set_page_caching implementation in the TTM DMA pool code.
  2011-09-29 20:33 [PATCH] TTM DMA pool v1.8 Konrad Rzeszutek Wilk
@ 2011-09-29 20:33 ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 3+ messages in thread
From: Konrad Rzeszutek Wilk @ 2011-09-29 20:33 UTC (permalink / raw)
  To: linux-kernel, thellstrom, dri-devel, bskeggs, j.glisse, thomas,
	airlied, airlied, alexdeucher
  Cc: xen-devel, Konrad Rzeszutek Wilk

. which is pretty much like the other TTM pool except it
also handles moving the page to another pool list.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c |   96 ++++++++++++++++++++++++++++++
 1 files changed, 96 insertions(+), 0 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 5909d28..cea031e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -1307,11 +1307,107 @@ static int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
 	mutex_unlock(&_manager->lock);
 	return 0;
 }
+#ifdef CONFIG_X86
+static int ttm_dma_page_set_page_caching(struct page *p,
+					 int flags,
+					 enum ttm_caching_state c_old,
+					 enum ttm_caching_state c_new,
+					 struct device *dev)
+{
+	struct dma_pool *src, *dst;
+	enum pool_type type;
+	struct dma_page *dma_p;
+	bool found = false;
+	unsigned long irq_flags;
+	int ret = 0;
+
+	if (!p)
+		return 0;
+
+	if (PageHighMem(p))
+		return 0;
+
+	type = ttm_to_type(flags, c_old);
+	src = ttm_dma_find_pool(dev, type);
+	if (!src) {
+		WARN_ON(!src);
+		return -ENOMEM;
+	}
+	type = ttm_to_type(flags, c_new);
+	dst = ttm_dma_find_pool(dev, type);
+	if (!dst) {
+		gfp_t gfp_flags;
+		if (flags & TTM_PAGE_FLAG_DMA32)
+			gfp_flags = GFP_USER | GFP_DMA32;
+		else
+			gfp_flags = GFP_HIGHUSER;
+
+		if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+			gfp_flags |= __GFP_ZERO;
+
+		dst = ttm_dma_pool_init(dev, gfp_flags, type);
+		if (IS_ERR_OR_NULL(dst))
+			return -ENOMEM;
+	}
+
+	dev_dbg(dev, "(%d) Caching %p (%p) from %x to %x.\n", current->pid,
+		p, page_address(p), c_old, c_new);
+
+	if (c_old != tt_cached) {
+		/* p isn't in the default caching state, set it to
+		 * writeback first to free its current memtype. */
+
+		ret = set_pages_wb(p, 1);
+		if (ret)
+			return ret;
+	}
 
+	if (c_new == tt_wc)
+		ret = set_memory_wc((unsigned long) page_address(p), 1);
+	else if (c_new == tt_uncached)
+		ret = set_pages_uc(p, 1);
+
+	if (ret)
+		return ret;
+
+	dev_dbg(src->dev, "(%s:%d) Moving %p (%p) to %s.\n", src->name,
+		current->pid, p, page_address(p), dst->name);
+
+	/* To make it faster we only take the spinlock on list
+	 * removal, and later on adding the page to the destination pool. */
+	spin_lock_irqsave(&src->lock, irq_flags);
+	list_for_each_entry(dma_p, &src->page_list, page_list) {
+		if (virt_to_page(dma_p->vaddr) != p) {
+			pr_debug("%s: (%s:%d) Skipping %p (%p) (DMA:0x%lx)\n",
+				src->dev_name, src->name, current->pid,
+				dma_p->vaddr,
+				virt_to_page(dma_p->vaddr),
+				(unsigned long)dma_p->dma);
+			continue;
+		}
+		list_del(&dma_p->page_list);
+		src->npages_in_use -= 1;
+		found = true;
+		break;
+	}
+	spin_lock_irqsave(&src->lock, irq_flags);
+	if (!found)
+		return -ENODEV;
+
+	spin_lock_irqsave(&dst->lock, irq_flags);
+	list_add(&dma_p->page_list, &dst->page_list);
+	dst->npages_in_use++;
+	spin_unlock_irqrestore(&dst->lock, irq_flags);
+	return 0;
+};
+#endif
 struct ttm_page_alloc_func ttm_page_alloc_dma = {
 	.get_pages	= ttm_dma_get_pages,
 	.put_pages	= ttm_dma_put_pages,
 	.alloc_init	= ttm_dma_page_alloc_init,
 	.alloc_fini	= ttm_dma_page_alloc_fini,
 	.debugfs	= ttm_dma_page_alloc_debugfs,
+#ifdef CONFIG_X86
+	.set_caching	= ttm_dma_page_set_page_caching,
+#endif
 };
-- 
1.7.4.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH 9/9] ttm/dma: Implement set_page_caching implementation in the TTM DMA pool code.
@ 2011-09-29 22:16 Kevin Shanahan
  2011-09-29 22:45 ` Konrad Rzeszutek Wilk
  0 siblings, 1 reply; 3+ messages in thread
From: Kevin Shanahan @ 2011-09-29 22:16 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk; +Cc: linux-kernel

On 2011-09-29 20:33:52, Konrad Rzeszutek Wilk wrote:
> . which is pretty much like the other TTM pool except it
> also handles moving the page to another pool list.
> 
> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> ---
>  drivers/gpu/drm/ttm/ttm_page_alloc_dma.c |   96 ++++++++++++++++++++++++++++++
>  1 files changed, 96 insertions(+), 0 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> index 5909d28..cea031e 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> @@ -1307,11 +1307,107 @@ static int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
>  	mutex_unlock(&_manager->lock);
>  	return 0;
>  }
> +#ifdef CONFIG_X86
> +static int ttm_dma_page_set_page_caching(struct page *p,
> +					 int flags,
> +					 enum ttm_caching_state c_old,
> +					 enum ttm_caching_state c_new,
> +					 struct device *dev)
> +{
> +	struct dma_pool *src, *dst;
> +	enum pool_type type;
> +	struct dma_page *dma_p;
> +	bool found = false;
> +	unsigned long irq_flags;
> +	int ret = 0;
> +
> +	if (!p)
> +		return 0;
> +
> +	if (PageHighMem(p))
> +		return 0;
> +
> +	type = ttm_to_type(flags, c_old);
> +	src = ttm_dma_find_pool(dev, type);
> +	if (!src) {
> +		WARN_ON(!src);
> +		return -ENOMEM;
> +	}
> +	type = ttm_to_type(flags, c_new);
> +	dst = ttm_dma_find_pool(dev, type);
> +	if (!dst) {
> +		gfp_t gfp_flags;
> +		if (flags & TTM_PAGE_FLAG_DMA32)
> +			gfp_flags = GFP_USER | GFP_DMA32;
> +		else
> +			gfp_flags = GFP_HIGHUSER;
> +
> +		if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
> +			gfp_flags |= __GFP_ZERO;
> +
> +		dst = ttm_dma_pool_init(dev, gfp_flags, type);
> +		if (IS_ERR_OR_NULL(dst))
> +			return -ENOMEM;
> +	}
> +
> +	dev_dbg(dev, "(%d) Caching %p (%p) from %x to %x.\n", current->pid,
> +		p, page_address(p), c_old, c_new);
> +
> +	if (c_old != tt_cached) {
> +		/* p isn't in the default caching state, set it to
> +		 * writeback first to free its current memtype. */
> +
> +		ret = set_pages_wb(p, 1);
> +		if (ret)
> +			return ret;
> +	}
>  
> +	if (c_new == tt_wc)
> +		ret = set_memory_wc((unsigned long) page_address(p), 1);
> +	else if (c_new == tt_uncached)
> +		ret = set_pages_uc(p, 1);
> +
> +	if (ret)
> +		return ret;
> +
> +	dev_dbg(src->dev, "(%s:%d) Moving %p (%p) to %s.\n", src->name,
> +		current->pid, p, page_address(p), dst->name);
> +
> +	/* To make it faster we only take the spinlock on list
> +	 * removal, and later on adding the page to the destination pool. */
> +	spin_lock_irqsave(&src->lock, irq_flags);
> +	list_for_each_entry(dma_p, &src->page_list, page_list) {
> +		if (virt_to_page(dma_p->vaddr) != p) {
> +			pr_debug("%s: (%s:%d) Skipping %p (%p) (DMA:0x%lx)\n",
> +				src->dev_name, src->name, current->pid,
> +				dma_p->vaddr,
> +				virt_to_page(dma_p->vaddr),
> +				(unsigned long)dma_p->dma);
> +			continue;
> +		}
> +		list_del(&dma_p->page_list);
> +		src->npages_in_use -= 1;
> +		found = true;
> +		break;
> +	}
> +	spin_lock_irqsave(&src->lock, irq_flags);
        ^^^^^^^^^^^^^^^^^

Was this meant to be spin_unlock_irqrestore?

Cheers,
Kevin.

> +	if (!found)
> +		return -ENODEV;
> +
> +	spin_lock_irqsave(&dst->lock, irq_flags);
> +	list_add(&dma_p->page_list, &dst->page_list);
> +	dst->npages_in_use++;
> +	spin_unlock_irqrestore(&dst->lock, irq_flags);
> +	return 0;
> +};
> +#endif
>  struct ttm_page_alloc_func ttm_page_alloc_dma = {
>  	.get_pages	= ttm_dma_get_pages,
>  	.put_pages	= ttm_dma_put_pages,
>  	.alloc_init	= ttm_dma_page_alloc_init,
>  	.alloc_fini	= ttm_dma_page_alloc_fini,
>  	.debugfs	= ttm_dma_page_alloc_debugfs,
> +#ifdef CONFIG_X86
> +	.set_caching	= ttm_dma_page_set_page_caching,
> +#endif
>  };
> -- 
> 1.7.4.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/etc.

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH 9/9] ttm/dma: Implement set_page_caching implementation in the TTM DMA pool code.
  2011-09-29 22:16 [PATCH 9/9] ttm/dma: Implement set_page_caching implementation in the TTM DMA pool code Kevin Shanahan
@ 2011-09-29 22:45 ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 3+ messages in thread
From: Konrad Rzeszutek Wilk @ 2011-09-29 22:45 UTC (permalink / raw)
  To: Kevin Shanahan; +Cc: linux-kernel

On Fri, Sep 30, 2011 at 07:46:15AM +0930, Kevin Shanahan wrote:
> On 2011-09-29 20:33:52, Konrad Rzeszutek Wilk wrote:
> > . which is pretty much like the other TTM pool except it
> > also handles moving the page to another pool list.
> > 
> > Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> > ---
> >  drivers/gpu/drm/ttm/ttm_page_alloc_dma.c |   96 ++++++++++++++++++++++++++++++
> >  1 files changed, 96 insertions(+), 0 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> > index 5909d28..cea031e 100644
> > --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> > +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> > @@ -1307,11 +1307,107 @@ static int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
> >  	mutex_unlock(&_manager->lock);
> >  	return 0;
> >  }
> > +#ifdef CONFIG_X86
> > +static int ttm_dma_page_set_page_caching(struct page *p,
> > +					 int flags,
> > +					 enum ttm_caching_state c_old,
> > +					 enum ttm_caching_state c_new,
> > +					 struct device *dev)
> > +{
> > +	struct dma_pool *src, *dst;
> > +	enum pool_type type;
> > +	struct dma_page *dma_p;
> > +	bool found = false;
> > +	unsigned long irq_flags;
> > +	int ret = 0;
> > +
> > +	if (!p)
> > +		return 0;
> > +
> > +	if (PageHighMem(p))
> > +		return 0;
> > +
> > +	type = ttm_to_type(flags, c_old);
> > +	src = ttm_dma_find_pool(dev, type);
> > +	if (!src) {
> > +		WARN_ON(!src);
> > +		return -ENOMEM;
> > +	}
> > +	type = ttm_to_type(flags, c_new);
> > +	dst = ttm_dma_find_pool(dev, type);
> > +	if (!dst) {
> > +		gfp_t gfp_flags;
> > +		if (flags & TTM_PAGE_FLAG_DMA32)
> > +			gfp_flags = GFP_USER | GFP_DMA32;
> > +		else
> > +			gfp_flags = GFP_HIGHUSER;
> > +
> > +		if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
> > +			gfp_flags |= __GFP_ZERO;
> > +
> > +		dst = ttm_dma_pool_init(dev, gfp_flags, type);
> > +		if (IS_ERR_OR_NULL(dst))
> > +			return -ENOMEM;
> > +	}
> > +
> > +	dev_dbg(dev, "(%d) Caching %p (%p) from %x to %x.\n", current->pid,
> > +		p, page_address(p), c_old, c_new);
> > +
> > +	if (c_old != tt_cached) {
> > +		/* p isn't in the default caching state, set it to
> > +		 * writeback first to free its current memtype. */
> > +
> > +		ret = set_pages_wb(p, 1);
> > +		if (ret)
> > +			return ret;
> > +	}
> >  
> > +	if (c_new == tt_wc)
> > +		ret = set_memory_wc((unsigned long) page_address(p), 1);
> > +	else if (c_new == tt_uncached)
> > +		ret = set_pages_uc(p, 1);
> > +
> > +	if (ret)
> > +		return ret;
> > +
> > +	dev_dbg(src->dev, "(%s:%d) Moving %p (%p) to %s.\n", src->name,
> > +		current->pid, p, page_address(p), dst->name);
> > +
> > +	/* To make it faster we only take the spinlock on list
> > +	 * removal, and later on adding the page to the destination pool. */
> > +	spin_lock_irqsave(&src->lock, irq_flags);
> > +	list_for_each_entry(dma_p, &src->page_list, page_list) {
> > +		if (virt_to_page(dma_p->vaddr) != p) {
> > +			pr_debug("%s: (%s:%d) Skipping %p (%p) (DMA:0x%lx)\n",
> > +				src->dev_name, src->name, current->pid,
> > +				dma_p->vaddr,
> > +				virt_to_page(dma_p->vaddr),
> > +				(unsigned long)dma_p->dma);
> > +			continue;
> > +		}
> > +		list_del(&dma_p->page_list);
> > +		src->npages_in_use -= 1;
> > +		found = true;
> > +		break;
> > +	}
> > +	spin_lock_irqsave(&src->lock, irq_flags);
>         ^^^^^^^^^^^^^^^^^
> 
> Was this meant to be spin_unlock_irqrestore?

Yup! Fixing it up. Hmm.... how did it run. This is called from the TTM shrinker
when the memory pressure is low, unless I am reading the code wrong.

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2011-09-29 22:45 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-09-29 22:16 [PATCH 9/9] ttm/dma: Implement set_page_caching implementation in the TTM DMA pool code Kevin Shanahan
2011-09-29 22:45 ` Konrad Rzeszutek Wilk
  -- strict thread matches above, loose matches on Subject: below --
2011-09-29 20:33 [PATCH] TTM DMA pool v1.8 Konrad Rzeszutek Wilk
2011-09-29 20:33 ` [PATCH 9/9] ttm/dma: Implement set_page_caching implementation in the TTM DMA pool code Konrad Rzeszutek Wilk

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox