public inbox for linux-omap@vger.kernel.org
 help / color / mirror / Atom feed
* RE: [PATCH 1/4] [OMAPZOOM] [UPDATE]DSPBRIDGE: Memory lock for DMM.
@ 2009-04-01  0:54 Guzman Lugo, Fernando
  2009-04-01  7:25 ` Artem Bityutskiy
  0 siblings, 1 reply; 9+ messages in thread
From: Guzman Lugo, Fernando @ 2009-04-01  0:54 UTC (permalink / raw)
  To: Pandita, Vikram; +Cc: linux-omap@vger.kernel.org



Hi, 

	New update of this patch with the comments from Ameya Palande about some build warnings.

>From a5ab7e038b72e62358279ef3c4e64b2f260ceeee Mon Sep 17 00:00:00 2001
From: Hari Kanigeri <h-kanigeri2@ti.com>
Date: Thu, 26 Mar 2009 15:47:50 -0500
Subject: [PATCH] DSPBRIDGE: Memory lock for DMM.

Lock down the pages that are mapped to DSP virtual memory to prevent from
getting swapped out

Signed-off-by: Hari Kanigeri <h-kanigeri2@ti.com>
---
 arch/arm/plat-omap/include/dspbridge/dbdefs.h |    3 +
 drivers/dsp/bridge/hw/hw_mmu.h                |    1 +
 drivers/dsp/bridge/wmd/io_sm.c                |   24 +++--
 drivers/dsp/bridge/wmd/tiomap3430.c           |  133 +++++++++++++++++++++++-
 4 files changed, 144 insertions(+), 17 deletions(-)

diff --git a/arch/arm/plat-omap/include/dspbridge/dbdefs.h b/arch/arm/plat-omap/include/dspbridge/dbdefs.h
index 7f5a2bf..9782693 100644
--- a/arch/arm/plat-omap/include/dspbridge/dbdefs.h
+++ b/arch/arm/plat-omap/include/dspbridge/dbdefs.h
@@ -571,6 +571,9 @@ bit 6 - MMU element size = 64bit (valid only for non mixed page entries)
 
 #define DSP_MAPVMALLOCADDR         0x00000080
 
+#define DSP_MAPDONOTLOCK	   0x00000100
+
+
 #define GEM_CACHE_LINE_SIZE     128
 #define GEM_L1P_PREFETCH_SIZE   128
 
diff --git a/drivers/dsp/bridge/hw/hw_mmu.h b/drivers/dsp/bridge/hw/hw_mmu.h
index 065f0dd..b1e2458 100644
--- a/drivers/dsp/bridge/hw/hw_mmu.h
+++ b/drivers/dsp/bridge/hw/hw_mmu.h
@@ -51,6 +51,7 @@ struct HW_MMUMapAttrs_t {
 	enum HW_Endianism_t     endianism;
 	enum HW_ElementSize_t   elementSize;
 	enum HW_MMUMixedSize_t  mixedSize;
+	bool donotlockmpupage;
 } ;
 
 extern HW_STATUS HW_MMU_Enable(const void __iomem *baseAddress);
diff --git a/drivers/dsp/bridge/wmd/io_sm.c b/drivers/dsp/bridge/wmd/io_sm.c
index bd936eb..301bd72 100755
--- a/drivers/dsp/bridge/wmd/io_sm.c
+++ b/drivers/dsp/bridge/wmd/io_sm.c
@@ -553,6 +553,8 @@ func_cont1:
 	mapAttrs = DSP_MAPLITTLEENDIAN;
 	mapAttrs |= DSP_MAPPHYSICALADDR;
 	mapAttrs |= DSP_MAPELEMSIZE32;
+	mapAttrs |= DSP_MAPDONOTLOCK;
+
 	while (numBytes && DSP_SUCCEEDED(status)) {
 		/* To find the max. page size with which both PA & VA are
 		 * aligned */
@@ -690,18 +692,18 @@ func_cont:
 	mapAttrs = DSP_MAPLITTLEENDIAN;
 	mapAttrs |= DSP_MAPPHYSICALADDR;
 	mapAttrs |= DSP_MAPELEMSIZE32;
+	mapAttrs |= DSP_MAPDONOTLOCK;
+
 	/* Map the L4 peripherals */
-	{
-		i = 0;
-		while (L4PeripheralTable[i].physAddr && DSP_SUCCEEDED(status)) {
-				status = hIOMgr->pIntfFxns->pfnBrdMemMap
-					(hIOMgr->hWmdContext,
-					L4PeripheralTable[i].physAddr,
-					L4PeripheralTable[i].dspVirtAddr,
-					HW_PAGE_SIZE_4KB, mapAttrs);
-				DBC_Assert(DSP_SUCCEEDED(status));
-				i++;
-		}
+	i = 0;
+	while (L4PeripheralTable[i].physAddr && DSP_SUCCEEDED(status)) {
+		status = hIOMgr->pIntfFxns->pfnBrdMemMap
+			(hIOMgr->hWmdContext, L4PeripheralTable[i].physAddr,
+			L4PeripheralTable[i].dspVirtAddr, HW_PAGE_SIZE_4KB,
+			mapAttrs);
+		if (DSP_FAILED(status))
+			break;
+		i++;
 	}
 
 	if (DSP_SUCCEEDED(status)) {
diff --git a/drivers/dsp/bridge/wmd/tiomap3430.c b/drivers/dsp/bridge/wmd/tiomap3430.c
index 983465a..c9849e3 100755
--- a/drivers/dsp/bridge/wmd/tiomap3430.c
+++ b/drivers/dsp/bridge/wmd/tiomap3430.c
@@ -28,6 +28,8 @@
 
 /*  ----------------------------------- Host OS */
 #include <dspbridge/host_os.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
 #include "../arch/arm/mach-omap2/prcm-regs.h"
 #include "../arch/arm/mach-omap2/cm-regbits-34xx.h"
 #include "../arch/arm/mach-omap2/ti-compat.h"
@@ -90,6 +92,7 @@
 #define MMU_LARGE_PAGE_MASK      0xFFFF0000
 #define MMU_SMALL_PAGE_MASK      0xFFFFF000
 #define PAGES_II_LVL_TABLE   512
+#define phys_to_page(phys)      pfn_to_page((phys) >> PAGE_SHIFT)
 
 #define MMU_GFLUSH 0x60
 
@@ -1372,6 +1375,11 @@ static DSP_STATUS WMD_BRD_MemMap(struct WMD_DEV_CONTEXT *hDevContext,
 			return DSP_EINVALIDARG;
 		}
 	}
+	if (attrs & DSP_MAPDONOTLOCK)
+		hwAttrs.donotlockmpupage = 1;
+	else
+		hwAttrs.donotlockmpupage = 0;
+
 	if (attrs & DSP_MAPVMALLOCADDR) {
 		status = MemMapVmalloc(hDevContext, ulMpuAddr, ulVirtAddr,
 				       ulNumBytes, ulMapAttr);
@@ -1488,12 +1496,20 @@ static DSP_STATUS WMD_BRD_MemUnMap(struct WMD_DEV_CONTEXT *hDevContext,
 	u32 remBytes;
 	u32 remBytesL2;
 	u32 vaCurr;
+	struct page *pg = NULL;
 	DSP_STATUS status = DSP_SOK;
 	struct WMD_DEV_CONTEXT *pDevContext = hDevContext;
 	struct PgTableAttrs *pt = pDevContext->pPtAttrs;
+	u32 pacount = 0;
+	u32 *pPhysAddrPageTbl = NULL;
+	u32 temp;
+	u32 patemp = 0;
+	u32 pAddr;
+	u32 numof4KPages = 0;
 
 	DBG_Trace(DBG_ENTER, "> WMD_BRD_MemUnMap hDevContext %x, va %x, "
 		  "NumBytes %x\n", hDevContext, ulVirtAddr, ulNumBytes);
+	pPhysAddrPageTbl = DMM_GetPhysicalAddrTable();
 	vaCurr = ulVirtAddr;
 	remBytes = ulNumBytes;
 	remBytesL2 = 0;
@@ -1542,6 +1558,19 @@ static DSP_STATUS WMD_BRD_MemUnMap(struct WMD_DEV_CONTEXT *hDevContext,
 				/* vaCurr aligned to pteSize? */
 				if ((pteSize != 0) && (remBytesL2 >= pteSize) &&
 				   !(vaCurr & (pteSize - 1))) {
+					/* Collect Physical addresses from VA */
+					pAddr = (pteVal & ~(pteSize - 1));
+					if (pteSize == HW_PAGE_SIZE_64KB)
+						numof4KPages = 16;
+					else
+						numof4KPages = 1;
+					temp = 0;
+					while (temp++ < numof4KPages) {
+						pPhysAddrPageTbl[pacount++] =
+									pAddr;
+						pAddr += HW_PAGE_SIZE_4KB;
+					}
+
 					if (HW_MMU_PteClear(pteAddrL2,
 						vaCurr, pteSize) == RET_OK) {
 						status = DSP_SOK;
@@ -1602,6 +1631,20 @@ static DSP_STATUS WMD_BRD_MemUnMap(struct WMD_DEV_CONTEXT *hDevContext,
 	 * get flushed */
 EXIT_LOOP:
 	flush_all(pDevContext);
+	temp = 0;
+	while (temp < pacount) {
+		patemp = pPhysAddrPageTbl[temp];
+		if (pfn_valid(__phys_to_pfn(patemp))) {
+			pg = phys_to_page(patemp);
+			if (page_count(pg) <= 0)
+				printk(KERN_INFO "DSPBRIDGE:UNMAP function: "
+					"COUNT 0 FOR PA 0x%x, size = 0x%x\n",
+					patemp, ulNumBytes);
+			SetPageDirty(pg);
+			page_cache_release(pg);
+		}
+		temp++;
+	}
 	DBG_Trace(DBG_LEVEL1, "WMD_BRD_MemUnMap vaCurr %x, pteAddrL1 %x "
 		  "pteAddrL2 %x\n", vaCurr, pteAddrL1, pteAddrL2);
 	DBG_Trace(DBG_ENTER, "< WMD_BRD_MemUnMap status %x remBytes %x, "
@@ -1633,11 +1676,20 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 	u32 temp = 0;
 	u32 numUsrPgs;
 	struct task_struct *curr_task = current;
+	struct vm_area_struct *vma;
+	u32  write = 0;
+
 
 	DBG_Trace(DBG_ENTER, "TIOMAP_VirtToPhysical: START:ulMpuAddr=%x, "
 		  "ulNumBytes=%x\n", ulMpuAddr, ulNumBytes);
 	if (physicalAddrTable == NULL)
 		return DSP_EMEMORY;
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, ulMpuAddr);
+	up_read(&mm->mmap_sem);
+
+	if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+		write = 1;
 	while (ulNumBytes) {
 		DBG_Trace(DBG_LEVEL4, "TIOMAP_VirtToPhysical:Read the next PGD "
 			  "and PMD entry\n");
@@ -1660,7 +1712,7 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 			 * page tables
 			 */
 			numUsrPgs = get_user_pages(curr_task, mm, ulMpuAddr, 1,
-							true, 0, NULL, NULL);
+							write, 1, NULL, NULL);
 			up_read(&mm->mmap_sem);
 			/* Get the first level page table entry information */
 			/* Read the pointer to first level page table entry */
@@ -1704,7 +1756,7 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 					 * the page tables */
 					if (numUsrPgs <= PAGES_II_LVL_TABLE) {
 						get_user_pages(curr_task, mm,
-						ulMpuAddr, numUsrPgs, true,  0,
+						ulMpuAddr, numUsrPgs, write,  1,
 						NULL, NULL);
 						DBG_Trace(DBG_LEVEL4,
 						"get_user_pages, numUsrPgs"
@@ -1712,7 +1764,7 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 					} else {
 						get_user_pages(curr_task, mm,
 						ulMpuAddr, PAGES_II_LVL_TABLE,
-						true, 0, NULL, NULL);
+						write, 1, NULL, NULL);
 						DBG_Trace(DBG_LEVEL4,
 						"get_user_pages, numUsrPgs"
 						"= %d\n", PAGES_II_LVL_TABLE);
@@ -1737,7 +1789,12 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 					pAddr = pteVal & MMU_LARGE_PAGE_MASK;
 					chunkSz = HW_PAGE_SIZE_64KB;
 					numEntries = 16;
-					numof4KPages = 16;
+					if (ulNumBytes >= HW_PAGE_SIZE_64KB)
+						numof4KPages = 16;
+					else {
+						numof4KPages = ulNumBytes /
+							HW_PAGE_SIZE_4KB;
+					}
 					break;
 				case HW_PAGE_SIZE_4KB:
 					pAddr = pteVal & MMU_SMALL_PAGE_MASK;
@@ -1769,7 +1826,10 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 					ulMpuAddr += chunkSz;
 					/* Update the number of bytes that
 					 * are copied */
-					ulNumBytes -= chunkSz;
+					if (chunkSz > ulNumBytes)
+						ulNumBytes = 0;
+					else
+						ulNumBytes -= chunkSz;
 					DBG_Trace(DBG_LEVEL4,
 						"TIOMAP_VirtToPhysical: mpuCurr"
 						" = %x, pagesize = %x, "
@@ -1792,10 +1852,16 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 			switch (pteSize) {
 			case HW_PAGE_SIZE_16MB:
 				pAddr = pteVal & MMU_SSECTION_ADDR_MASK;
+				if (ulNumBytes >= HW_PAGE_SIZE_16MB) {
 					chunkSz = HW_PAGE_SIZE_16MB;
 					numEntries = 16;
 					numof4KPages = 4096;
-					break;
+				} else {
+					chunkSz = HW_PAGE_SIZE_1MB;
+					numEntries = 1;
+					numof4KPages = 256;
+				}
+				break;
 			case HW_PAGE_SIZE_1MB:
 				pAddr = pteVal & MMU_SECTION_ADDR_MASK;
 					chunkSz = HW_PAGE_SIZE_1MB;
@@ -1909,9 +1975,65 @@ static DSP_STATUS PteSet(struct PgTableAttrs *pt, u32 pa, u32 va,
 	u32 L2BaseVa = 0;
 	u32 L2BasePa = 0;
 	u32 L2PageNum = 0;
+	u32 num4KEntries = 0;
+	u32 temp = 0;
+	struct page *pg = NULL;
+	u32 patemp;
+
 	DSP_STATUS status = DSP_SOK;
 	DBG_Trace(DBG_ENTER, "> PteSet pPgTableAttrs %x, pa %x, va %x, "
 		 "size %x, attrs %x\n", pt, pa, va, size, attrs);
+	/* Lock the MPU pages that are getting mapped if this
+	 * attribute is set */
+	if (attrs->donotlockmpupage == 0) {
+		switch (size) {
+		case HW_PAGE_SIZE_64KB:
+			num4KEntries = 16;
+			break;
+		case HW_PAGE_SIZE_4KB:
+			num4KEntries = 1;
+			break;
+		case HW_PAGE_SIZE_16MB:
+			num4KEntries = 4096;
+			break;
+		case HW_PAGE_SIZE_1MB:
+			num4KEntries = 256;
+			break;
+		default:
+			return DSP_EFAIL;
+		}
+		patemp = pa;
+		while (temp++ < num4KEntries) {
+			/* FIXME: This is a hack to avoid getting pages for
+			 *  video overlay		*/
+			if (pfn_valid(__phys_to_pfn(patemp))) {
+				pg = phys_to_page(patemp);
+				get_page(pg);
+				if (page_count(pg) <= 1) {
+					printk(KERN_EMERG "DSPBRIDGE:MAP  "
+						"function: COUNT 0 FOR PA "
+						"0x%x\n", patemp);
+					printk(KERN_EMERG "Bad page state"
+						"in process '%s'\n"
+						"page:%p flags:0x%0*lx "
+						"mapping:%p mapcount:%d "
+						"count:%d\n"
+						"Trying to fix it up, but "
+						"a reboot is needed\n"
+						"Backtrace:\n",
+						current->comm, pg,
+						(int)(2*sizeof(unsigned long)),
+						(unsigned long)pg->flags,
+						pg->mapping, page_mapcount(pg),
+						page_count(pg));
+					dump_stack();
+					BUG_ON(1);
+				}
+			}
+			patemp += HW_PAGE_SIZE_4KB;
+		}
+	}
+	attrs->donotlockmpupage = 0;
 	L1BaseVa = pt->L1BaseVa;
 	pgTblVa = L1BaseVa;
 	if ((size == HW_PAGE_SIZE_64KB) || (size == HW_PAGE_SIZE_4KB)) {
-- 
1.5.6.4


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/4] [OMAPZOOM] [UPDATE]DSPBRIDGE: Memory lock for DMM.
  2009-04-01  0:54 [PATCH 1/4] [OMAPZOOM] [UPDATE]DSPBRIDGE: Memory lock for DMM Guzman Lugo, Fernando
@ 2009-04-01  7:25 ` Artem Bityutskiy
  0 siblings, 0 replies; 9+ messages in thread
From: Artem Bityutskiy @ 2009-04-01  7:25 UTC (permalink / raw)
  To: Guzman Lugo, Fernando; +Cc: Pandita, Vikram, linux-omap@vger.kernel.org

Guzman Lugo, Fernando wrote:
> +		patemp = pa;
> +		while (temp++ < num4KEntries) {
> +			/* FIXME: This is a hack to avoid getting pages for
> +			 *  video overlay		*/
> +			if (pfn_valid(__phys_to_pfn(patemp))) {
> +				pg = phys_to_page(patemp);
> +				get_page(pg);
> +				if (page_count(pg) <= 1) {
> +					printk(KERN_EMERG "DSPBRIDGE:MAP  "
> +						"function: COUNT 0 FOR PA "
> +						"0x%x\n", patemp);
> +					printk(KERN_EMERG "Bad page state"
> +						"in process '%s'\n"
> +						"page:%p flags:0x%0*lx "
> +						"mapping:%p mapcount:%d "
> +						"count:%d\n"
> +						"Trying to fix it up, but "
> +						"a reboot is needed\n"
> +						"Backtrace:\n",
> +						current->comm, pg,
> +						(int)(2*sizeof(unsigned long)),
> +						(unsigned long)pg->flags,
> +						pg->mapping, page_mapcount(pg),
> +						page_count(pg));
> +					dump_stack();
> +					BUG_ON(1);
> +				}
> +			}

Sorry for repeating myself, I just thought I have a good
suggestion. You could try the Obfuscated C code contest
and win some money with this code:

http://www.ioccc.org

:-)

-- 
Best Regards,
Artem Bityutskiy (Артём Битюцкий)
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: Memory lock for DMM.
@ 2009-04-07 19:26 Guzman Lugo, Fernando
  2009-04-07 20:19 ` Ameya Palande
  0 siblings, 1 reply; 9+ messages in thread
From: Guzman Lugo, Fernando @ 2009-04-07 19:26 UTC (permalink / raw)
  To: Pandita, Vikram; +Cc: linux-omap@vger.kernel.org


Hi,

	Patch updated wit the fix in page_count(pg).


>From a5ab7e038b72e62358279ef3c4e64b2f260ceeee Mon Sep 17 00:00:00 2001
From: Hari Kanigeri <h-kanigeri2@ti.com>
Date: Thu, 26 Mar 2009 15:47:50 -0500
Subject: [PATCH] DSPBRIDGE: Memory lock for DMM.

Lock down the pages that are mapped to DSP virtual memory to prevent from
getting swapped out

Signed-off-by: Hari Kanigeri <h-kanigeri2@ti.com>
---
 arch/arm/plat-omap/include/dspbridge/dbdefs.h |    3 +
 drivers/dsp/bridge/hw/hw_mmu.h                |    1 +
 drivers/dsp/bridge/wmd/io_sm.c                |   24 +++--
 drivers/dsp/bridge/wmd/tiomap3430.c           |  133 +++++++++++++++++++++++-
 4 files changed, 144 insertions(+), 17 deletions(-)

diff --git a/arch/arm/plat-omap/include/dspbridge/dbdefs.h b/arch/arm/plat-omap/include/dspbridge/dbdefs.h
index 7f5a2bf..9782693 100644
--- a/arch/arm/plat-omap/include/dspbridge/dbdefs.h
+++ b/arch/arm/plat-omap/include/dspbridge/dbdefs.h
@@ -571,6 +571,9 @@ bit 6 - MMU element size = 64bit (valid only for non mixed page entries)
 
 #define DSP_MAPVMALLOCADDR         0x00000080
 
+#define DSP_MAPDONOTLOCK	   0x00000100
+
+
 #define GEM_CACHE_LINE_SIZE     128
 #define GEM_L1P_PREFETCH_SIZE   128
 
diff --git a/drivers/dsp/bridge/hw/hw_mmu.h b/drivers/dsp/bridge/hw/hw_mmu.h
index 065f0dd..b1e2458 100644
--- a/drivers/dsp/bridge/hw/hw_mmu.h
+++ b/drivers/dsp/bridge/hw/hw_mmu.h
@@ -51,6 +51,7 @@ struct HW_MMUMapAttrs_t {
 	enum HW_Endianism_t     endianism;
 	enum HW_ElementSize_t   elementSize;
 	enum HW_MMUMixedSize_t  mixedSize;
+	bool donotlockmpupage;
 } ;
 
 extern HW_STATUS HW_MMU_Enable(const void __iomem *baseAddress);
diff --git a/drivers/dsp/bridge/wmd/io_sm.c b/drivers/dsp/bridge/wmd/io_sm.c
index bd936eb..301bd72 100755
--- a/drivers/dsp/bridge/wmd/io_sm.c
+++ b/drivers/dsp/bridge/wmd/io_sm.c
@@ -553,6 +553,8 @@ func_cont1:
 	mapAttrs = DSP_MAPLITTLEENDIAN;
 	mapAttrs |= DSP_MAPPHYSICALADDR;
 	mapAttrs |= DSP_MAPELEMSIZE32;
+	mapAttrs |= DSP_MAPDONOTLOCK;
+
 	while (numBytes && DSP_SUCCEEDED(status)) {
 		/* To find the max. page size with which both PA & VA are
 		 * aligned */
@@ -690,18 +692,18 @@ func_cont:
 	mapAttrs = DSP_MAPLITTLEENDIAN;
 	mapAttrs |= DSP_MAPPHYSICALADDR;
 	mapAttrs |= DSP_MAPELEMSIZE32;
+	mapAttrs |= DSP_MAPDONOTLOCK;
+
 	/* Map the L4 peripherals */
-	{
-		i = 0;
-		while (L4PeripheralTable[i].physAddr && DSP_SUCCEEDED(status)) {
-				status = hIOMgr->pIntfFxns->pfnBrdMemMap
-					(hIOMgr->hWmdContext,
-					L4PeripheralTable[i].physAddr,
-					L4PeripheralTable[i].dspVirtAddr,
-					HW_PAGE_SIZE_4KB, mapAttrs);
-				DBC_Assert(DSP_SUCCEEDED(status));
-				i++;
-		}
+	i = 0;
+	while (L4PeripheralTable[i].physAddr && DSP_SUCCEEDED(status)) {
+		status = hIOMgr->pIntfFxns->pfnBrdMemMap
+			(hIOMgr->hWmdContext, L4PeripheralTable[i].physAddr,
+			L4PeripheralTable[i].dspVirtAddr, HW_PAGE_SIZE_4KB,
+			mapAttrs);
+		if (DSP_FAILED(status))
+			break;
+		i++;
 	}
 
 	if (DSP_SUCCEEDED(status)) {
diff --git a/drivers/dsp/bridge/wmd/tiomap3430.c b/drivers/dsp/bridge/wmd/tiomap3430.c
index 983465a..c9849e3 100755
--- a/drivers/dsp/bridge/wmd/tiomap3430.c
+++ b/drivers/dsp/bridge/wmd/tiomap3430.c
@@ -28,6 +28,8 @@
 
 /*  ----------------------------------- Host OS */
 #include <dspbridge/host_os.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
 #include "../arch/arm/mach-omap2/prcm-regs.h"
 #include "../arch/arm/mach-omap2/cm-regbits-34xx.h"
 #include "../arch/arm/mach-omap2/ti-compat.h"
@@ -90,6 +92,7 @@
 #define MMU_LARGE_PAGE_MASK      0xFFFF0000
 #define MMU_SMALL_PAGE_MASK      0xFFFFF000
 #define PAGES_II_LVL_TABLE   512
+#define phys_to_page(phys)      pfn_to_page((phys) >> PAGE_SHIFT)
 
 #define MMU_GFLUSH 0x60
 
@@ -1372,6 +1375,11 @@ static DSP_STATUS WMD_BRD_MemMap(struct WMD_DEV_CONTEXT *hDevContext,
 			return DSP_EINVALIDARG;
 		}
 	}
+	if (attrs & DSP_MAPDONOTLOCK)
+		hwAttrs.donotlockmpupage = 1;
+	else
+		hwAttrs.donotlockmpupage = 0;
+
 	if (attrs & DSP_MAPVMALLOCADDR) {
 		status = MemMapVmalloc(hDevContext, ulMpuAddr, ulVirtAddr,
 				       ulNumBytes, ulMapAttr);
@@ -1488,12 +1496,20 @@ static DSP_STATUS WMD_BRD_MemUnMap(struct WMD_DEV_CONTEXT *hDevContext,
 	u32 remBytes;
 	u32 remBytesL2;
 	u32 vaCurr;
+	struct page *pg = NULL;
 	DSP_STATUS status = DSP_SOK;
 	struct WMD_DEV_CONTEXT *pDevContext = hDevContext;
 	struct PgTableAttrs *pt = pDevContext->pPtAttrs;
+	u32 pacount = 0;
+	u32 *pPhysAddrPageTbl = NULL;
+	u32 temp;
+	u32 patemp = 0;
+	u32 pAddr;
+	u32 numof4KPages = 0;
 
 	DBG_Trace(DBG_ENTER, "> WMD_BRD_MemUnMap hDevContext %x, va %x, "
 		  "NumBytes %x\n", hDevContext, ulVirtAddr, ulNumBytes);
+	pPhysAddrPageTbl = DMM_GetPhysicalAddrTable();
 	vaCurr = ulVirtAddr;
 	remBytes = ulNumBytes;
 	remBytesL2 = 0;
@@ -1542,6 +1558,19 @@ static DSP_STATUS WMD_BRD_MemUnMap(struct WMD_DEV_CONTEXT *hDevContext,
 				/* vaCurr aligned to pteSize? */
 				if ((pteSize != 0) && (remBytesL2 >= pteSize) &&
 				   !(vaCurr & (pteSize - 1))) {
+					/* Collect Physical addresses from VA */
+					pAddr = (pteVal & ~(pteSize - 1));
+					if (pteSize == HW_PAGE_SIZE_64KB)
+						numof4KPages = 16;
+					else
+						numof4KPages = 1;
+					temp = 0;
+					while (temp++ < numof4KPages) {
+						pPhysAddrPageTbl[pacount++] =
+									pAddr;
+						pAddr += HW_PAGE_SIZE_4KB;
+					}
+
 					if (HW_MMU_PteClear(pteAddrL2,
 						vaCurr, pteSize) == RET_OK) {
 						status = DSP_SOK;
@@ -1602,6 +1631,20 @@ static DSP_STATUS WMD_BRD_MemUnMap(struct WMD_DEV_CONTEXT *hDevContext,
 	 * get flushed */
 EXIT_LOOP:
 	flush_all(pDevContext);
+	temp = 0;
+	while (temp < pacount) {
+		patemp = pPhysAddrPageTbl[temp];
+		if (pfn_valid(__phys_to_pfn(patemp))) {
+			pg = phys_to_page(patemp);
+			if (page_count(pg) < 1)
+				printk(KERN_INFO "DSPBRIDGE:UNMAP function: "
+					"COUNT 0 FOR PA 0x%x, size = 0x%x\n",
+					patemp, ulNumBytes);
+			SetPageDirty(pg);
+			page_cache_release(pg);
+		}
+		temp++;
+	}
 	DBG_Trace(DBG_LEVEL1, "WMD_BRD_MemUnMap vaCurr %x, pteAddrL1 %x "
 		  "pteAddrL2 %x\n", vaCurr, pteAddrL1, pteAddrL2);
 	DBG_Trace(DBG_ENTER, "< WMD_BRD_MemUnMap status %x remBytes %x, "
@@ -1633,11 +1676,20 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 	u32 temp = 0;
 	u32 numUsrPgs;
 	struct task_struct *curr_task = current;
+	struct vm_area_struct *vma;
+	u32  write = 0;
+
 
 	DBG_Trace(DBG_ENTER, "TIOMAP_VirtToPhysical: START:ulMpuAddr=%x, "
 		  "ulNumBytes=%x\n", ulMpuAddr, ulNumBytes);
 	if (physicalAddrTable == NULL)
 		return DSP_EMEMORY;
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, ulMpuAddr);
+	up_read(&mm->mmap_sem);
+
+	if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+		write = 1;
 	while (ulNumBytes) {
 		DBG_Trace(DBG_LEVEL4, "TIOMAP_VirtToPhysical:Read the next PGD "
 			  "and PMD entry\n");
@@ -1660,7 +1712,7 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 			 * page tables
 			 */
 			numUsrPgs = get_user_pages(curr_task, mm, ulMpuAddr, 1,
-							true, 0, NULL, NULL);
+							write, 1, NULL, NULL);
 			up_read(&mm->mmap_sem);
 			/* Get the first level page table entry information */
 			/* Read the pointer to first level page table entry */
@@ -1704,7 +1756,7 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 					 * the page tables */
 					if (numUsrPgs <= PAGES_II_LVL_TABLE) {
 						get_user_pages(curr_task, mm,
-						ulMpuAddr, numUsrPgs, true,  0,
+						ulMpuAddr, numUsrPgs, write,  1,
 						NULL, NULL);
 						DBG_Trace(DBG_LEVEL4,
 						"get_user_pages, numUsrPgs"
@@ -1712,7 +1764,7 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 					} else {
 						get_user_pages(curr_task, mm,
 						ulMpuAddr, PAGES_II_LVL_TABLE,
-						true, 0, NULL, NULL);
+						write, 1, NULL, NULL);
 						DBG_Trace(DBG_LEVEL4,
 						"get_user_pages, numUsrPgs"
 						"= %d\n", PAGES_II_LVL_TABLE);
@@ -1737,7 +1789,12 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 					pAddr = pteVal & MMU_LARGE_PAGE_MASK;
 					chunkSz = HW_PAGE_SIZE_64KB;
 					numEntries = 16;
-					numof4KPages = 16;
+					if (ulNumBytes >= HW_PAGE_SIZE_64KB)
+						numof4KPages = 16;
+					else {
+						numof4KPages = ulNumBytes /
+							HW_PAGE_SIZE_4KB;
+					}
 					break;
 				case HW_PAGE_SIZE_4KB:
 					pAddr = pteVal & MMU_SMALL_PAGE_MASK;
@@ -1769,7 +1826,10 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 					ulMpuAddr += chunkSz;
 					/* Update the number of bytes that
 					 * are copied */
-					ulNumBytes -= chunkSz;
+					if (chunkSz > ulNumBytes)
+						ulNumBytes = 0;
+					else
+						ulNumBytes -= chunkSz;
 					DBG_Trace(DBG_LEVEL4,
 						"TIOMAP_VirtToPhysical: mpuCurr"
 						" = %x, pagesize = %x, "
@@ -1792,10 +1852,16 @@ static DSP_STATUS TIOMAP_VirtToPhysical(struct mm_struct *mm, u32 ulMpuAddr,
 			switch (pteSize) {
 			case HW_PAGE_SIZE_16MB:
 				pAddr = pteVal & MMU_SSECTION_ADDR_MASK;
+				if (ulNumBytes >= HW_PAGE_SIZE_16MB) {
 					chunkSz = HW_PAGE_SIZE_16MB;
 					numEntries = 16;
 					numof4KPages = 4096;
-					break;
+				} else {
+					chunkSz = HW_PAGE_SIZE_1MB;
+					numEntries = 1;
+					numof4KPages = 256;
+				}
+				break;
 			case HW_PAGE_SIZE_1MB:
 				pAddr = pteVal & MMU_SECTION_ADDR_MASK;
 					chunkSz = HW_PAGE_SIZE_1MB;
@@ -1909,9 +1975,65 @@ static DSP_STATUS PteSet(struct PgTableAttrs *pt, u32 pa, u32 va,
 	u32 L2BaseVa = 0;
 	u32 L2BasePa = 0;
 	u32 L2PageNum = 0;
+	u32 num4KEntries = 0;
+	u32 temp = 0;
+	struct page *pg = NULL;
+	u32 patemp;
+
 	DSP_STATUS status = DSP_SOK;
 	DBG_Trace(DBG_ENTER, "> PteSet pPgTableAttrs %x, pa %x, va %x, "
 		 "size %x, attrs %x\n", pt, pa, va, size, attrs);
+	/* Lock the MPU pages that are getting mapped if this
+	 * attribute is set */
+	if (attrs->donotlockmpupage == 0) {
+		switch (size) {
+		case HW_PAGE_SIZE_64KB:
+			num4KEntries = 16;
+			break;
+		case HW_PAGE_SIZE_4KB:
+			num4KEntries = 1;
+			break;
+		case HW_PAGE_SIZE_16MB:
+			num4KEntries = 4096;
+			break;
+		case HW_PAGE_SIZE_1MB:
+			num4KEntries = 256;
+			break;
+		default:
+			return DSP_EFAIL;
+		}
+		patemp = pa;
+		while (temp++ < num4KEntries) {
+			/* FIXME: This is a hack to avoid getting pages for
+			 *  video overlay		*/
+			if (pfn_valid(__phys_to_pfn(patemp))) {
+				pg = phys_to_page(patemp);
+				get_page(pg);
+				if (page_count(pg) < 1) {
+					printk(KERN_EMERG "DSPBRIDGE:MAP  "
+						"function: COUNT 0 FOR PA "
+						"0x%x\n", patemp);
+					printk(KERN_EMERG "Bad page state"
+						"in process '%s'\n"
+						"page:%p flags:0x%0*lx "
+						"mapping:%p mapcount:%d "
+						"count:%d\n"
+						"Trying to fix it up, but "
+						"a reboot is needed\n"
+						"Backtrace:\n",
+						current->comm, pg,
+						(int)(2*sizeof(unsigned long)),
+						(unsigned long)pg->flags,
+						pg->mapping, page_mapcount(pg),
+						page_count(pg));
+					dump_stack();
+					BUG_ON(1);
+				}
+			}
+			patemp += HW_PAGE_SIZE_4KB;
+		}
+	}
+	attrs->donotlockmpupage = 0;
 	L1BaseVa = pt->L1BaseVa;
 	pgTblVa = L1BaseVa;
 	if ((size == HW_PAGE_SIZE_64KB) || (size == HW_PAGE_SIZE_4KB)) {
-- 
1.5.6.4


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: Memory lock for DMM.
  2009-04-07 19:26 [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: " Guzman Lugo, Fernando
@ 2009-04-07 20:19 ` Ameya Palande
  2009-04-07 20:27   ` Kanigeri, Hari
  0 siblings, 1 reply; 9+ messages in thread
From: Ameya Palande @ 2009-04-07 20:19 UTC (permalink / raw)
  To: Guzman Lugo, Fernando; +Cc: Pandita, Vikram, linux-omap@vger.kernel.org

Hi Fernando,

On Tue, Apr 7, 2009 at 10:26 PM, Guzman Lugo, Fernando <x0095840@ti.com> wrote:
>
> Hi,
>
>        Patch updated wit the fix in page_count(pg).
>
>
> From a5ab7e038b72e62358279ef3c4e64b2f260ceeee Mon Sep 17 00:00:00 2001
> From: Hari Kanigeri <h-kanigeri2@ti.com>
> Date: Thu, 26 Mar 2009 15:47:50 -0500
> Subject: [PATCH] DSPBRIDGE: Memory lock for DMM.
>
> Lock down the pages that are mapped to DSP virtual memory to prevent from
> getting swapped out
>
> Signed-off-by: Hari Kanigeri <h-kanigeri2@ti.com>
> ---
> +                       if (pfn_valid(__phys_to_pfn(patemp))) {
> +                               pg = phys_to_page(patemp);
> +                               get_page(pg);

get_page() makes sure that page->count is not zero and atomically increments it.
That means after execution of get_page(); page->count will be always 1
or greater
than 1.

If this is the case then what is the point of having following code?
if (page_count(pg) < 1)  <-- This will always evaluate to FALSE.

> +                               if (page_count(pg) < 1) {
> +                                       printk(KERN_EMERG "DSPBRIDGE:MAP  "
> +                                               "function: COUNT 0 FOR PA "
> +                                               "0x%x\n", patemp);
> +                                       printk(KERN_EMERG "Bad page state"
> +                                               "in process '%s'\n"
> +                                               "page:%p flags:0x%0*lx "
> +                                               "mapping:%p mapcount:%d "
> +                                               "count:%d\n"
> +                                               "Trying to fix it up, but "
> +                                               "a reboot is needed\n"
> +                                               "Backtrace:\n",
> +                                               current->comm, pg,
> +                                               (int)(2*sizeof(unsigned long)),
> +                                               (unsigned long)pg->flags,
> +                                               pg->mapping, page_mapcount(pg),
> +                                               page_count(pg));
> +                                       dump_stack();
> +                                       BUG_ON(1);
> +                               }

Cheers,
Ameya.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: Memory lock for DMM.
  2009-04-07 20:19 ` Ameya Palande
@ 2009-04-07 20:27   ` Kanigeri, Hari
  2009-04-07 20:49     ` Ameya Palande
  0 siblings, 1 reply; 9+ messages in thread
From: Kanigeri, Hari @ 2009-04-07 20:27 UTC (permalink / raw)
  To: Ameya Palande, Guzman Lugo, Fernando
  Cc: Pandita, Vikram, linux-omap@vger.kernel.org

Ameya,

> If this is the case then what is the point of having following code?
> if (page_count(pg) < 1)  <-- This will always evaluate to FALSE.

-- As you mentioned this case shouldn't be hit. This check is to only to ensure that the translations from user space VA to Physical address is taking place correctly and flag the user if something went bad.


Thank you,
Best regards,
Hari

> -----Original Message-----
> From: linux-omap-owner@vger.kernel.org [mailto:linux-omap-
> owner@vger.kernel.org] On Behalf Of Ameya Palande
> Sent: Tuesday, April 07, 2009 3:20 PM
> To: Guzman Lugo, Fernando
> Cc: Pandita, Vikram; linux-omap@vger.kernel.org
> Subject: Re: [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: Memory lock for
> DMM.
> 
> Hi Fernando,
> 
> On Tue, Apr 7, 2009 at 10:26 PM, Guzman Lugo, Fernando <x0095840@ti.com>
> wrote:
> >
> > Hi,
> >
> >        Patch updated wit the fix in page_count(pg).
> >
> >
> > From a5ab7e038b72e62358279ef3c4e64b2f260ceeee Mon Sep 17 00:00:00 2001
> > From: Hari Kanigeri <h-kanigeri2@ti.com>
> > Date: Thu, 26 Mar 2009 15:47:50 -0500
> > Subject: [PATCH] DSPBRIDGE: Memory lock for DMM.
> >
> > Lock down the pages that are mapped to DSP virtual memory to prevent
> from
> > getting swapped out
> >
> > Signed-off-by: Hari Kanigeri <h-kanigeri2@ti.com>
> > ---
> > +                       if (pfn_valid(__phys_to_pfn(patemp))) {
> > +                               pg = phys_to_page(patemp);
> > +                               get_page(pg);
> 
> get_page() makes sure that page->count is not zero and atomically
> increments it.
> That means after execution of get_page(); page->count will be always 1
> or greater
> than 1.
> 
> If this is the case then what is the point of having following code?
> if (page_count(pg) < 1)  <-- This will always evaluate to FALSE.
> 
> > +                               if (page_count(pg) < 1) {
> > +                                       printk(KERN_EMERG "DSPBRIDGE:MAP
>  "
> > +                                               "function: COUNT 0 FOR
> PA "
> > +                                               "0x%x\n", patemp);
> > +                                       printk(KERN_EMERG "Bad page
> state"
> > +                                               "in process '%s'\n"
> > +                                               "page:%p flags:0x%0*lx "
> > +                                               "mapping:%p mapcount:%d
> "
> > +                                               "count:%d\n"
> > +                                               "Trying to fix it up,
> but "
> > +                                               "a reboot is needed\n"
> > +                                               "Backtrace:\n",
> > +                                               current->comm, pg,
> > +                                               (int)(2*sizeof(unsigned
> long)),
> > +                                               (unsigned long)pg-
> >flags,
> > +                                               pg->mapping,
> page_mapcount(pg),
> > +                                               page_count(pg));
> > +                                       dump_stack();
> > +                                       BUG_ON(1);
> > +                               }
> 
> Cheers,
> Ameya.
> N�����r��y���b�X��ǧv�^�)޺{.n�+����{��f��{ay�\x1dʇڙ�,j\r��f���h���z�\x1e�w���
> 
> ���j:+v���w�j�m����\r����zZ+�����ݢj"��!�i

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: Memory lock for DMM.
  2009-04-07 20:27   ` Kanigeri, Hari
@ 2009-04-07 20:49     ` Ameya Palande
  2009-04-07 20:59       ` Kanigeri, Hari
  0 siblings, 1 reply; 9+ messages in thread
From: Ameya Palande @ 2009-04-07 20:49 UTC (permalink / raw)
  To: Kanigeri, Hari
  Cc: Guzman Lugo, Fernando, Pandita, Vikram,
	linux-omap@vger.kernel.org

Hi Hari,

On Tue, Apr 7, 2009 at 11:27 PM, Kanigeri, Hari <h-kanigeri2@ti.com> wrote:
> Ameya,
>
>> If this is the case then what is the point of having following code?
>> if (page_count(pg) < 1)  <-- This will always evaluate to FALSE.
>
> -- As you mentioned this case shouldn't be hit. This check is to only to ensure that the >translations from user space VA to Physical address is taking place correctly and flag the >user if something went bad.

get_page() function has following check inside it:
VM_BUG_ON(atomic_read(&page->_count) == 0);

Why we need to repeat it again?
I guess we can completely remove following check which is in my
opinion completely
redundant:

if (page_count(pg) < 1) {
}

Cheers,
Ameya.
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: Memory lock for DMM.
  2009-04-07 20:49     ` Ameya Palande
@ 2009-04-07 20:59       ` Kanigeri, Hari
  2009-04-07 21:20         ` Ameya Palande
  0 siblings, 1 reply; 9+ messages in thread
From: Kanigeri, Hari @ 2009-04-07 20:59 UTC (permalink / raw)
  To: Ameya Palande
  Cc: Guzman Lugo, Fernando, Pandita, Vikram,
	linux-omap@vger.kernel.org

Ameya,

> 
> get_page() function has following check inside it:
> VM_BUG_ON(atomic_read(&page->_count) == 0);
> 
> Why we need to repeat it again?
> I guess we can completely remove following check which is in my
> opinion completely
> redundant:

-- This check in get_page is conditional check and this is to catch the cases when page count starts with "0", which I think is still a possible condition if the user space buffer was never touched. So I am not sure if we can rely on this.

> 
> if (page_count(pg) < 1) {
> }

-- This is to just to extend the check for the bad Page count.

Thank you,
Best regards,
Hari

> -----Original Message-----
> From: Ameya Palande [mailto:2ameya@gmail.com]
> Sent: Tuesday, April 07, 2009 3:49 PM
> To: Kanigeri, Hari
> Cc: Guzman Lugo, Fernando; Pandita, Vikram; linux-omap@vger.kernel.org
> Subject: Re: [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: Memory lock for
> DMM.
> 
> Hi Hari,
> 
> On Tue, Apr 7, 2009 at 11:27 PM, Kanigeri, Hari <h-kanigeri2@ti.com>
> wrote:
> > Ameya,
> >
> >> If this is the case then what is the point of having following code?
> >> if (page_count(pg) < 1)  <-- This will always evaluate to FALSE.
> >
> > -- As you mentioned this case shouldn't be hit. This check is to only to
> ensure that the >translations from user space VA to Physical address is
> taking place correctly and flag the >user if something went bad.
> 
> get_page() function has following check inside it:
> VM_BUG_ON(atomic_read(&page->_count) == 0);
> 
> Why we need to repeat it again?
> I guess we can completely remove following check which is in my
> opinion completely
> redundant:
> 
> if (page_count(pg) < 1) {
> }
> 
> Cheers,
> Ameya.

--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: Memory lock for DMM.
  2009-04-07 20:59       ` Kanigeri, Hari
@ 2009-04-07 21:20         ` Ameya Palande
  2009-04-07 21:36           ` Kanigeri, Hari
  0 siblings, 1 reply; 9+ messages in thread
From: Ameya Palande @ 2009-04-07 21:20 UTC (permalink / raw)
  To: Kanigeri, Hari
  Cc: Guzman Lugo, Fernando, Pandita, Vikram,
	linux-omap@vger.kernel.org

Hi Hari,

On Tue, Apr 7, 2009 at 11:59 PM, Kanigeri, Hari <h-kanigeri2@ti.com> wrote:
> Ameya,
>
>>
>> get_page() function has following check inside it:
>> VM_BUG_ON(atomic_read(&page->_count) == 0);
>>
>> Why we need to repeat it again?
>> I guess we can completely remove following check which is in my
>> opinion completely
>> redundant:
>
> -- This check in get_page is conditional check and this is to catch the cases when page >count starts with "0", which I think is still a possible condition if the user space buffer >was never touched. So I am not sure if we can rely on this.

Thanks for the explanation :) I missed the point that VM_BUG_ON is a
conditional check.
So ideally user space process should touch all the buffers before
sending them to
bridge driver so that page_count is never zero?

Also I didn't understand the meaning of the following message which is printed
by printk when page_count is less than 1.

"Trying to fix it up, but a reboot is needed\n"

Where the fixing effort is happening?

Cheers,
Ameya.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: Memory lock for DMM.
  2009-04-07 21:20         ` Ameya Palande
@ 2009-04-07 21:36           ` Kanigeri, Hari
  0 siblings, 0 replies; 9+ messages in thread
From: Kanigeri, Hari @ 2009-04-07 21:36 UTC (permalink / raw)
  To: Ameya Palande
  Cc: Guzman Lugo, Fernando, Pandita, Vikram,
	linux-omap@vger.kernel.org

Ameya,

> So ideally user space process should touch all the buffers before
> sending them to
> bridge driver so that page_count is never zero?

-- For output buffers from DSP, this is not needed. 

> Also I didn't understand the meaning of the following message which is
> printed
> by printk when page_count is less than 1.
> 
> "Trying to fix it up, but a reboot is needed\n"
> 
> Where the fixing effort is happening?

-- Good catch. There is no specific reason :). We can remove this. I am not sure but I think this warnings statement was borrowed from one of the Kernel's bad page panic message.

Thank you,
Best regards,
Hari

> -----Original Message-----
> From: Ameya Palande [mailto:2ameya@gmail.com]
> Sent: Tuesday, April 07, 2009 4:20 PM
> To: Kanigeri, Hari
> Cc: Guzman Lugo, Fernando; Pandita, Vikram; linux-omap@vger.kernel.org
> Subject: Re: [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: Memory lock for
> DMM.
> 
> Hi Hari,
> 
> On Tue, Apr 7, 2009 at 11:59 PM, Kanigeri, Hari <h-kanigeri2@ti.com>
> wrote:
> > Ameya,
> >
> >>
> >> get_page() function has following check inside it:
> >> VM_BUG_ON(atomic_read(&page->_count) == 0);
> >>
> >> Why we need to repeat it again?
> >> I guess we can completely remove following check which is in my
> >> opinion completely
> >> redundant:
> >
> > -- This check in get_page is conditional check and this is to catch the
> cases when page >count starts with "0", which I think is still a possible
> condition if the user space buffer >was never touched. So I am not sure if
> we can rely on this.
> 
> Thanks for the explanation :) I missed the point that VM_BUG_ON is a
> conditional check.
> So ideally user space process should touch all the buffers before
> sending them to
> bridge driver so that page_count is never zero?
> 
> Also I didn't understand the meaning of the following message which is
> printed
> by printk when page_count is less than 1.
> 
> "Trying to fix it up, but a reboot is needed\n"
> 
> Where the fixing effort is happening?
> 
> Cheers,
> Ameya.


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2009-04-07 21:36 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-04-01  0:54 [PATCH 1/4] [OMAPZOOM] [UPDATE]DSPBRIDGE: Memory lock for DMM Guzman Lugo, Fernando
2009-04-01  7:25 ` Artem Bityutskiy
  -- strict thread matches above, loose matches on Subject: below --
2009-04-07 19:26 [PATCH 1/4] [OMAPZOOM] [UPDATE] DSPBRIDGE: " Guzman Lugo, Fernando
2009-04-07 20:19 ` Ameya Palande
2009-04-07 20:27   ` Kanigeri, Hari
2009-04-07 20:49     ` Ameya Palande
2009-04-07 20:59       ` Kanigeri, Hari
2009-04-07 21:20         ` Ameya Palande
2009-04-07 21:36           ` Kanigeri, Hari

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox